diff --git a/.github/generate_change_log.sh b/.github/generate_change_log.sh index ea8cee0191..ab8ffff689 100755 --- a/.github/generate_change_log.sh +++ b/.github/generate_change_log.sh @@ -24,10 +24,6 @@ TESTNET_ZIP_SUM="$(checksum ./testnet.zip)" LINUX_BIN_SUM="$(checksum ./linux/geth)" MAC_BIN_SUM="$(checksum ./macos/geth)" WINDOWS_BIN_SUM="$(checksum ./windows/geth.exe)" -ARM5_BIN_SUM="$(checksum ./arm5/geth-linux-arm-5)" -ARM6_BIN_SUM="$(checksum ./arm6/geth-linux-arm-6)" -ARM7_BIN_SUM="$(checksum ./arm7/geth-linux-arm-7)" -ARM64_BIN_SUM="$(checksum ./arm64/geth-linux-arm64)" OUTPUT=$(cat <<-END ## Changelog\n ${CHANGE_LOG}\n @@ -39,10 +35,6 @@ ${CHANGE_LOG}\n | geth_linux | ${LINUX_BIN_SUM} |\n | geth_mac | ${MAC_BIN_SUM} |\n | geth_windows | ${WINDOWS_BIN_SUM} |\n -| geth_linux_arm5 | ${ARM5_BIN_SUM} |\n -| geth_linux_arm6 | ${ARM6_BIN_SUM} |\n -| geth_linux_arm7 | ${ARM7_BIN_SUM} |\n -| geth_linux_arm64 | ${ARM64_BIN_SUM} |\n END ) diff --git a/.github/release.env b/.github/release.env index 62e94fa4bd..387d81fce3 100644 --- a/.github/release.env +++ b/.github/release.env @@ -1,2 +1,2 @@ -MAINNET_FILE_URL="https://github.com/binance-chain/bsc/releases/download/v1.1.8/mainnet.zip" -TESTNET_FILE_URL="https://github.com/binance-chain/bsc/releases/download/v1.1.8/testnet.zip" +MAINNET_FILE_URL="https://github.com/binance-chain/bsc/releases/latest/download/mainnet.zip" +TESTNET_FILE_URL="https://github.com/binance-chain/bsc/releases/latest/download/testnet.zip" diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index b0fb9833b3..a3cb964df0 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -15,7 +15,7 @@ jobs: unit-test: strategy: matrix: - go-version: [1.16.x] + go-version: [1.17.x] os: [ubuntu-18.04] runs-on: ${{ matrix.os }} steps: diff --git a/.github/workflows/docker-release.yml b/.github/workflows/docker-release.yml new file mode 100644 index 0000000000..1b7d8fd9fb --- /dev/null +++ b/.github/workflows/docker-release.yml @@ -0,0 +1,51 @@ +name: Docker + +on: + push: + # Publish `v1.2.3` tags as releases. + tags: + - v* + +env: + IMAGE_NAME: bsc + +jobs: + # Push image to GitHub Packages. + push: + runs-on: ubuntu-latest + if: github.event_name == 'push' + + steps: + - uses: actions/checkout@v2 + + - name: Build image + run: | + docker build . \ + --label "org.opencontainers.image.source=${{ secrets.IMAGE_SOURCE }}" \ + --label "org.opencontainers.image.revision=$(git rev-parse HEAD)" \ + --label "org.opencontainers.image.version=$(git describe --tags --abbrev=0)" \ + --label "org.opencontainers.image.licenses=LGPL-3.0,GPL-3.0" \ + -f ./Dockerfile -t "${IMAGE_NAME}" + + - name: Log into registry + run: echo "${{ secrets.PACKAGE_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin + + - name: Push image + run: | + IMAGE_ID=ghcr.io/${{ github.repository }} + + # Change all uppercase to lowercase + IMAGE_ID=$(echo $IMAGE_ID | tr '[A-Z]' '[a-z]') + # Strip git ref prefix from version + VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') + # Strip "v" prefix from tag name + [[ "${{ github.ref }}" == "refs/tags/"* ]] && VERSION=$(echo $VERSION | sed -e 's/^v//') + # Use Docker `latest` tag convention + [ "$VERSION" == "master" ] && VERSION=latest + echo IMAGE_ID=$IMAGE_ID + echo VERSION=$VERSION + docker tag $IMAGE_NAME $IMAGE_ID:$VERSION + docker tag $IMAGE_NAME $IMAGE_ID:latest + docker push $IMAGE_ID:$VERSION + docker push $IMAGE_ID:latest + \ No newline at end of file diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 288749ae41..b947844ee2 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -15,7 +15,7 @@ jobs: unit-test: strategy: matrix: - go-version: [1.16.x] + go-version: [1.17.x] os: [ubuntu-18.04] runs-on: ${{ matrix.os }} steps: diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index 87583d8b54..cd9f5d1935 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -10,7 +10,7 @@ jobs: name: Build Release strategy: matrix: - go-version: [1.16.x] + go-version: [1.17.x] os: [ubuntu-18.04, macos-11, windows-2019] runs-on: ${{ matrix.os }} steps: @@ -45,19 +45,6 @@ jobs: - name: Build Binary for ${{matrix.os}} run: make geth - # ============================== - # Cross Compile for ARM - # ============================== - - - name: Build Binary for ARM - if: matrix.os == 'ubuntu-18.04' - env: - GOPATH: /home/runner/work/woodpecker/go - run: | - mkdir -p $GOPATH/src/github.com/binance-chain/bsc/ - cp -r ./* $GOPATH/src/github.com/binance-chain/bsc/ - cd $GOPATH/src/github.com/binance-chain/bsc/ && make geth-linux-arm - # ============================== # Upload artifacts # ============================== @@ -83,34 +70,6 @@ jobs: name: windows path: ./build/bin/geth.exe - - name: Upload ARM-5 Build - uses: actions/upload-artifact@v2 - if: matrix.os == 'ubuntu-18.04' - with: - name: arm5 - path: /home/runner/work/woodpecker/go/src/github.com/binance-chain/bsc/build/bin/geth-linux-arm-5 - - - name: Upload ARM-6 Build - uses: actions/upload-artifact@v2 - if: matrix.os == 'ubuntu-18.04' - with: - name: arm6 - path: /home/runner/work/woodpecker/go/src/github.com/binance-chain/bsc/build/bin/geth-linux-arm-6 - - - name: Upload ARM-7 Build - uses: actions/upload-artifact@v2 - if: matrix.os == 'ubuntu-18.04' - with: - name: arm7 - path: /home/runner/work/woodpecker/go/src/github.com/binance-chain/bsc/build/bin/geth-linux-arm-7 - - - name: Upload ARM-64 Build - uses: actions/upload-artifact@v2 - if: matrix.os == 'ubuntu-18.04' - with: - name: arm64 - path: /home/runner/work/woodpecker/go/src/github.com/binance-chain/bsc/build/bin/geth-linux-arm64 - release: name: Release needs: build @@ -143,30 +102,6 @@ jobs: with: name: windows path: ./windows - - - name: Download Artifacts - uses: actions/download-artifact@v2 - with: - name: arm5 - path: ./arm5 - - - name: Download Artifacts - uses: actions/download-artifact@v2 - with: - name: arm6 - path: ./arm6 - - - name: Download Artifacts - uses: actions/download-artifact@v2 - with: - name: arm7 - path: ./arm7 - - - name: Download Artifacts - uses: actions/download-artifact@v2 - with: - name: arm64 - path: ./arm64 - name: Download Config File run: | @@ -226,46 +161,6 @@ jobs: asset_path: ./windows/geth.exe asset_name: geth_windows.exe asset_content_type: application/octet-stream - - - name: Upload Release Asset - Linux ARM 5 - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps - asset_path: ./arm5/geth-linux-arm-5 - asset_name: geth-linux-arm-5 - asset_content_type: application/octet-stream - - - name: Upload Release Asset - Linux ARM 6 - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps - asset_path: ./arm6/geth-linux-arm-6 - asset_name: geth-linux-arm-6 - asset_content_type: application/octet-stream - - - name: Upload Release Asset - Linux ARM 7 - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps - asset_path: ./arm7/geth-linux-arm-7 - asset_name: geth-linux-arm-7 - asset_content_type: application/octet-stream - - - name: Upload Release Asset - Linux ARM 64 - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps - asset_path: ./arm64/geth-linux-arm64 - asset_name: geth-linux-arm64 - asset_content_type: application/octet-stream - name: Upload Release Asset - MAINNET.ZIP uses: actions/upload-release-asset@v1 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 63b49e9772..66b8ca4162 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -11,7 +11,7 @@ jobs: name: Build Release strategy: matrix: - go-version: [1.16.x] + go-version: [1.17.x] os: [ubuntu-18.04, macos-11, windows-2019] runs-on: ${{ matrix.os }} steps: @@ -46,19 +46,6 @@ jobs: - name: Build Binary for ${{matrix.os}} run: make geth - # ============================== - # Cross Compile for ARM - # ============================== - - - name: Build Binary for ARM - if: matrix.os == 'ubuntu-18.04' - env: - GOPATH: /home/runner/work/woodpecker/go - run: | - mkdir -p $GOPATH/src/github.com/binance-chain/bsc/ - cp -r ./* $GOPATH/src/github.com/binance-chain/bsc/ - cd $GOPATH/src/github.com/binance-chain/bsc/ && make geth-linux-arm - # ============================== # Upload artifacts # ============================== @@ -84,34 +71,6 @@ jobs: name: windows path: ./build/bin/geth.exe - - name: Upload ARM-5 Build - uses: actions/upload-artifact@v2 - if: matrix.os == 'ubuntu-18.04' - with: - name: arm5 - path: /home/runner/work/woodpecker/go/src/github.com/binance-chain/bsc/build/bin/geth-linux-arm-5 - - - name: Upload ARM-6 Build - uses: actions/upload-artifact@v2 - if: matrix.os == 'ubuntu-18.04' - with: - name: arm6 - path: /home/runner/work/woodpecker/go/src/github.com/binance-chain/bsc/build/bin/geth-linux-arm-6 - - - name: Upload ARM-7 Build - uses: actions/upload-artifact@v2 - if: matrix.os == 'ubuntu-18.04' - with: - name: arm7 - path: /home/runner/work/woodpecker/go/src/github.com/binance-chain/bsc/build/bin/geth-linux-arm-7 - - - name: Upload ARM-64 Build - uses: actions/upload-artifact@v2 - if: matrix.os == 'ubuntu-18.04' - with: - name: arm64 - path: /home/runner/work/woodpecker/go/src/github.com/binance-chain/bsc/build/bin/geth-linux-arm64 - release: name: Release needs: build @@ -145,30 +104,6 @@ jobs: name: windows path: ./windows - - name: Download Artifacts - uses: actions/download-artifact@v2 - with: - name: arm5 - path: ./arm5 - - - name: Download Artifacts - uses: actions/download-artifact@v2 - with: - name: arm6 - path: ./arm6 - - - name: Download Artifacts - uses: actions/download-artifact@v2 - with: - name: arm7 - path: ./arm7 - - - name: Download Artifacts - uses: actions/download-artifact@v2 - with: - name: arm64 - path: ./arm64 - - name: Download Config File run: | . ./.github/release.env @@ -236,46 +171,6 @@ jobs: asset_name: geth_windows.exe asset_content_type: application/octet-stream - - name: Upload Release Asset - Linux ARM 5 - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps - asset_path: ./arm5/geth-linux-arm-5 - asset_name: geth-linux-arm-5 - asset_content_type: application/octet-stream - - - name: Upload Release Asset - Linux ARM 6 - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps - asset_path: ./arm6/geth-linux-arm-6 - asset_name: geth-linux-arm-6 - asset_content_type: application/octet-stream - - - name: Upload Release Asset - Linux ARM 7 - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps - asset_path: ./arm7/geth-linux-arm-7 - asset_name: geth-linux-arm-7 - asset_content_type: application/octet-stream - - - name: Upload Release Asset - Linux ARM 64 - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps - asset_path: ./arm64/geth-linux-arm64 - asset_name: geth-linux-arm64 - asset_content_type: application/octet-stream - - name: Upload Release Asset - MAINNET.ZIP uses: actions/upload-release-asset@v1 env: diff --git a/.github/workflows/unit-test.yml b/.github/workflows/unit-test.yml index 8847aeba4d..fb0b01fbe7 100644 --- a/.github/workflows/unit-test.yml +++ b/.github/workflows/unit-test.yml @@ -15,7 +15,7 @@ jobs: unit-test: strategy: matrix: - go-version: [1.16.x] + go-version: [1.17.x] os: [ubuntu-18.04] runs-on: ${{ matrix.os }} steps: @@ -47,6 +47,5 @@ jobs: env: ANDROID_HOME: "" # Skip android test run: | - go clean -testcache make test diff --git a/.gitmodules b/.gitmodules index 32bdb3b6e5..241c169c47 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,8 @@ [submodule "tests"] path = tests/testdata url = https://github.com/ethereum/tests + shallow = true +[submodule "evm-benchmarks"] + path = tests/evm-benchmarks + url = https://github.com/ipsilon/evm-benchmarks + shallow = true diff --git a/.golangci.yml b/.golangci.yml index 18b325e206..4950b98c21 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,7 +1,7 @@ # This file configures github.com/golangci/golangci-lint. run: - timeout: 3m + timeout: 20m tests: true # default is true. Enables skipping of directories: # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ diff --git a/.travis.yml b/.travis.yml index 7406f31fe7..197d56748f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,7 +5,7 @@ jobs: allow_failures: - stage: build os: osx - go: 1.15.x + go: 1.17.x env: - azure-osx - azure-ios @@ -16,7 +16,7 @@ jobs: - stage: lint os: linux dist: bionic - go: 1.16.x + go: 1.17.x env: - lint git: @@ -24,12 +24,48 @@ jobs: script: - go run build/ci.go lint + # These builders create the Docker sub-images for multi-arch push and each + # will attempt to push the multi-arch image if they are the last builder + - stage: build + if: type = push + os: linux + arch: amd64 + dist: bionic + go: 1.17.x + env: + - docker + services: + - docker + git: + submodules: false # avoid cloning ethereum/tests + before_install: + - export DOCKER_CLI_EXPERIMENTAL=enabled + script: + - go run build/ci.go docker -image -manifest amd64,arm64 -upload ethereum/client-go + + - stage: build + if: type = push + os: linux + arch: arm64 + dist: bionic + go: 1.17.x + env: + - docker + services: + - docker + git: + submodules: false # avoid cloning ethereum/tests + before_install: + - export DOCKER_CLI_EXPERIMENTAL=enabled + script: + - go run build/ci.go docker -image -manifest amd64,arm64 -upload ethereum/client-go + # This builder does the Ubuntu PPA upload - stage: build if: type = push os: linux dist: bionic - go: 1.16.x + go: 1.17.x env: - ubuntu-ppa - GO111MODULE=on @@ -54,7 +90,7 @@ jobs: os: linux dist: bionic sudo: required - go: 1.16.x + go: 1.17.x env: - azure-linux - GO111MODULE=on @@ -84,36 +120,6 @@ jobs: - go run build/ci.go install -dlgo -arch arm64 -cc aarch64-linux-gnu-gcc - go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - # This builder does the Linux Azure MIPS xgo uploads - - stage: build - if: type = push - os: linux - dist: bionic - services: - - docker - go: 1.16.x - env: - - azure-linux-mips - - GO111MODULE=on - git: - submodules: false # avoid cloning ethereum/tests - script: - - go run build/ci.go xgo --alltools -- --targets=linux/mips --ldflags '-extldflags "-static"' -v - - for bin in build/bin/*-linux-mips; do mv -f "${bin}" "${bin/-linux-mips/}"; done - - go run build/ci.go archive -arch mips -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - - - go run build/ci.go xgo --alltools -- --targets=linux/mipsle --ldflags '-extldflags "-static"' -v - - for bin in build/bin/*-linux-mipsle; do mv -f "${bin}" "${bin/-linux-mipsle/}"; done - - go run build/ci.go archive -arch mipsle -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - - - go run build/ci.go xgo --alltools -- --targets=linux/mips64 --ldflags '-extldflags "-static"' -v - - for bin in build/bin/*-linux-mips64; do mv -f "${bin}" "${bin/-linux-mips64/}"; done - - go run build/ci.go archive -arch mips64 -type tar -signer LINUX_SIGNING_KEY signify SIGNIFY_KEY -upload gethstore/builds - - - go run build/ci.go xgo --alltools -- --targets=linux/mips64le --ldflags '-extldflags "-static"' -v - - for bin in build/bin/*-linux-mips64le; do mv -f "${bin}" "${bin/-linux-mips64le/}"; done - - go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - # This builder does the Android Maven and Azure uploads - stage: build if: type = push @@ -156,7 +162,7 @@ jobs: - stage: build if: type = push os: osx - go: 1.16.x + go: 1.17.x env: - azure-osx - azure-ios @@ -188,7 +194,7 @@ jobs: os: linux arch: amd64 dist: bionic - go: 1.16.x + go: 1.17.x env: - GO111MODULE=on script: @@ -199,7 +205,7 @@ jobs: os: linux arch: arm64 dist: bionic - go: 1.16.x + go: 1.17.x env: - GO111MODULE=on script: @@ -208,7 +214,7 @@ jobs: - stage: build os: linux dist: bionic - go: 1.15.x + go: 1.16.x env: - GO111MODULE=on script: @@ -219,7 +225,7 @@ jobs: if: type = cron os: linux dist: bionic - go: 1.16.x + go: 1.17.x env: - azure-purge - GO111MODULE=on @@ -227,3 +233,15 @@ jobs: submodules: false # avoid cloning ethereum/tests script: - go run build/ci.go purge -store gethstore/builds -days 14 + + # This builder executes race tests + - stage: build + if: type = cron + os: linux + dist: bionic + go: 1.17.x + env: + - GO111MODULE=on + script: + - go run build/ci.go test -race -coverage $TEST_PACKAGES + diff --git a/CHANGELOG.md b/CHANGELOG.md index deefb9d12a..62fb769d76 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,35 @@ # Changelog +## v1.1.12 + +FEATURE +* [\#862](https://github.com/bnb-chain/bsc/pull/862) Pruning AncientDB inline at runtime +* [\#926](https://github.com/bnb-chain/bsc/pull/926) Separate Processing and State Verification on BSC + +IMPROVEMENT +* [\#816](https://github.com/bnb-chain/bsc/pull/816) merge go-ethereum v1.10.15 +* [\#950](https://github.com/bnb-chain/bsc/pull/950) code optimizations for state prefetcher +* [\#972](https://github.com/bnb-chain/bsc/pull/972) redesign triePrefetcher to make it thread safe +* [\#998](https://github.com/bnb-chain/bsc/pull/998) update dockerfile with a few enhancement +* [\#1015](https://github.com/bnb-chain/bsc/pull/1015) disable noisy logs since system transaction will cause gas capping + +BUGFIX +* [\#932](https://github.com/bnb-chain/bsc/pull/932) fix account root was not set correctly when committing mpt during pipeline commit +* [\#953](https://github.com/bnb-chain/bsc/pull/953) correct logic for eip check of NewEVMInterpreter +* [\#958](https://github.com/bnb-chain/bsc/pull/958) define DiscReason as uint8 +* [\#959](https://github.com/bnb-chain/bsc/pull/959) update some packages' version +* [\#983](https://github.com/bnb-chain/bsc/pull/983) fix nil pointer issue when stopping mining new block +* [\#1002](https://github.com/bnb-chain/bsc/pull/1002) Fix pipecommit active statedb +* [\#1005](https://github.com/bnb-chain/bsc/pull/1005) freezer batch compatible offline prunblock command +* [\#1007](https://github.com/bnb-chain/bsc/pull/1007) missing contract upgrades and incorrect behavior when miners enable pipecommit +* [\#1009](https://github.com/bnb-chain/bsc/pull/1009) resolve the concurrent cache read and write issue for fast node +* [\#1011](https://github.com/bnb-chain/bsc/pull/1011) Incorrect merkle root issue when enabling pipecommit with miner +* [\#1013](https://github.com/bnb-chain/bsc/pull/1013) tools broken because of writting metadata when open a readyonly db +* [\#1014](https://github.com/bnb-chain/bsc/pull/1014) fast node can not recover from force kill or panic +* [\#1019](https://github.com/bnb-chain/bsc/pull/1019) memory leak issue with diff protocol +* [\#1020](https://github.com/bnb-chain/bsc/pull/1020) remove diffhash patch introduced from separate node +* [\#1024](https://github.com/bnb-chain/bsc/pull/1024) verify node is not treated as verify node + ## v1.1.11 UPGRADE diff --git a/Dockerfile b/Dockerfile index 437fb2737a..409ba02b39 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,16 +1,55 @@ +# Support setting various labels on the final image +ARG COMMIT="" +ARG VERSION="" +ARG BUILDNUM="" + # Build Geth in a stock Go builder container -FROM golang:1.16-alpine as builder +FROM golang:1.17-alpine as builder RUN apk add --no-cache make gcc musl-dev linux-headers git bash ADD . /go-ethereum -RUN cd /go-ethereum && make geth +RUN cd /go-ethereum && go run build/ci.go install ./cmd/geth # Pull Geth into a second stage deploy alpine container -FROM alpine:latest +FROM alpine:3.16.0 + +ARG BSC_USER=bsc +ARG BSC_USER_UID=1000 +ARG BSC_USER_GID=1000 + +ENV BSC_HOME=/bsc +ENV HOME=${BSC_HOME} +ENV DATA_DIR=/data + +ENV PACKAGES ca-certificates~=20211220 jq~=1.6 \ + bash~=5.1.16-r2 bind-tools~=9.16.29-r0 tini~=0.19.0 \ + grep~=3.7 curl==7.83.1-r2 sed~=4.8-r0 + +RUN apk add --no-cache $PACKAGES \ + && rm -rf /var/cache/apk/* \ + && addgroup -g ${BSC_USER_GID} ${BSC_USER} \ + && adduser -u ${BSC_USER_UID} -G ${BSC_USER} --shell /sbin/nologin --no-create-home -D ${BSC_USER} \ + && addgroup ${BSC_USER} tty \ + && sed -i -e "s/bin\/sh/bin\/bash/" /etc/passwd + +RUN echo "[ ! -z \"\$TERM\" -a -r /etc/motd ] && cat /etc/motd" >> /etc/bash/bashrc + +WORKDIR ${BSC_HOME} -RUN apk add --no-cache ca-certificates curl jq tini COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/ +COPY docker-entrypoint.sh ./ + +RUN chmod +x docker-entrypoint.sh \ + && mkdir -p ${DATA_DIR} \ + && chown -R ${BSC_USER_UID}:${BSC_USER_GID} ${BSC_HOME} ${DATA_DIR} + +VOLUME ${DATA_DIR} + +USER ${BSC_USER_UID}:${BSC_USER_GID} + +# rpc ws graphql EXPOSE 8545 8546 8547 30303 30303/udp -ENTRYPOINT ["geth"] \ No newline at end of file + +ENTRYPOINT ["/sbin/tini", "--", "./docker-entrypoint.sh"] diff --git a/Dockerfile.alltools b/Dockerfile.alltools index 483afad8c3..3ae5377e4f 100644 --- a/Dockerfile.alltools +++ b/Dockerfile.alltools @@ -1,10 +1,15 @@ +# Support setting various labels on the final image +ARG COMMIT="" +ARG VERSION="" +ARG BUILDNUM="" + # Build Geth in a stock Go builder container -FROM golang:1.16-alpine as builder +FROM golang:1.17-alpine as builder -RUN apk add --no-cache make gcc musl-dev linux-headers git +RUN apk add --no-cache gcc musl-dev linux-headers git ADD . /go-ethereum -RUN cd /go-ethereum && make all +RUN cd /go-ethereum && go run build/ci.go install # Pull all binaries into a second stage deploy alpine container FROM alpine:latest @@ -13,3 +18,10 @@ RUN apk add --no-cache ca-certificates COPY --from=builder /go-ethereum/build/bin/* /usr/local/bin/ EXPOSE 8545 8546 30303 30303/udp + +# Add some metadata labels to help programatic image consumption +ARG COMMIT="" +ARG VERSION="" +ARG BUILDNUM="" + +LABEL commit="$COMMIT" version="$VERSION" buildnum="$BUILDNUM" diff --git a/Makefile b/Makefile index 03f92e6f43..2cfb5def84 100644 --- a/Makefile +++ b/Makefile @@ -3,10 +3,7 @@ # don't need to bother with make. .PHONY: geth android ios geth-cross evm all test truffle-test clean -.PHONY: geth-linux geth-linux-386 geth-linux-amd64 geth-linux-mips64 geth-linux-mips64le -.PHONY: geth-linux-arm geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64 -.PHONY: geth-darwin geth-darwin-386 geth-darwin-amd64 -.PHONY: geth-windows geth-windows-386 geth-windows-amd64 +.PHONY: docker GOBIN = ./build/bin GO ?= latest @@ -64,94 +61,5 @@ devtools: @type "solc" 2> /dev/null || echo 'Please install solc' @type "protoc" 2> /dev/null || echo 'Please install protoc' -# Cross Compilation Targets (xgo) - -geth-cross: geth-linux geth-darwin geth-windows geth-android geth-ios - @echo "Full cross compilation done:" - @ls -ld $(GOBIN)/geth-* - -geth-linux: geth-linux-386 geth-linux-amd64 geth-linux-arm geth-linux-mips64 geth-linux-mips64le - @echo "Linux cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* - -geth-linux-386: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/386 -v ./cmd/geth - @echo "Linux 386 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep 386 - -geth-linux-amd64: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/amd64 -v ./cmd/geth - @echo "Linux amd64 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep amd64 - -geth-linux-arm: geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64 - @echo "Linux ARM cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep arm - -geth-linux-arm-5: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-5 -v ./cmd/geth - @echo "Linux ARMv5 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep arm-5 - -geth-linux-arm-6: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-6 -v ./cmd/geth - @echo "Linux ARMv6 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep arm-6 - -geth-linux-arm-7: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-7 -v ./cmd/geth - @echo "Linux ARMv7 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep arm-7 - -geth-linux-arm64: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm64 -v ./cmd/geth - @echo "Linux ARM64 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep arm64 - -geth-linux-mips: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips --ldflags '-extldflags "-static"' -v ./cmd/geth - @echo "Linux MIPS cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep mips - -geth-linux-mipsle: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mipsle --ldflags '-extldflags "-static"' -v ./cmd/geth - @echo "Linux MIPSle cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep mipsle - -geth-linux-mips64: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64 --ldflags '-extldflags "-static"' -v ./cmd/geth - @echo "Linux MIPS64 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep mips64 - -geth-linux-mips64le: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64le --ldflags '-extldflags "-static"' -v ./cmd/geth - @echo "Linux MIPS64le cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep mips64le - -geth-darwin: geth-darwin-386 geth-darwin-amd64 - @echo "Darwin cross compilation done:" - @ls -ld $(GOBIN)/geth-darwin-* - -geth-darwin-386: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/386 -v ./cmd/geth - @echo "Darwin 386 cross compilation done:" - @ls -ld $(GOBIN)/geth-darwin-* | grep 386 - -geth-darwin-amd64: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/amd64 -v ./cmd/geth - @echo "Darwin amd64 cross compilation done:" - @ls -ld $(GOBIN)/geth-darwin-* | grep amd64 - -geth-windows: geth-windows-386 geth-windows-amd64 - @echo "Windows cross compilation done:" - @ls -ld $(GOBIN)/geth-windows-* - -geth-windows-386: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/386 -v ./cmd/geth - @echo "Windows 386 cross compilation done:" - @ls -ld $(GOBIN)/geth-windows-* | grep 386 - -geth-windows-amd64: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/amd64 -v ./cmd/geth - @echo "Windows amd64 cross compilation done:" - @ls -ld $(GOBIN)/geth-windows-* | grep amd64 +docker: + docker build --pull -t bnb-chain/bsc:latest -f Dockerfile . diff --git a/accounts/abi/abi.go b/accounts/abi/abi.go index 5ca7c241db..cd2f4d7978 100644 --- a/accounts/abi/abi.go +++ b/accounts/abi/abi.go @@ -34,6 +34,7 @@ type ABI struct { Constructor Method Methods map[string]Method Events map[string]Event + Errors map[string]Error // Additional "special" functions introduced in solidity v0.6.0. // It's separated from the original default fallback. Each contract @@ -157,12 +158,13 @@ func (abi *ABI) UnmarshalJSON(data []byte) error { } abi.Methods = make(map[string]Method) abi.Events = make(map[string]Event) + abi.Errors = make(map[string]Error) for _, field := range fields { switch field.Type { case "constructor": abi.Constructor = NewMethod("", "", Constructor, field.StateMutability, field.Constant, field.Payable, field.Inputs, nil) case "function": - name := abi.overloadedMethodName(field.Name) + name := overloadedName(field.Name, func(s string) bool { _, ok := abi.Methods[s]; return ok }) abi.Methods[name] = NewMethod(name, field.Name, Function, field.StateMutability, field.Constant, field.Payable, field.Inputs, field.Outputs) case "fallback": // New introduced function type in v0.6.0, check more detail @@ -182,8 +184,10 @@ func (abi *ABI) UnmarshalJSON(data []byte) error { } abi.Receive = NewMethod("", "", Receive, field.StateMutability, field.Constant, field.Payable, nil, nil) case "event": - name := abi.overloadedEventName(field.Name) + name := overloadedName(field.Name, func(s string) bool { _, ok := abi.Events[s]; return ok }) abi.Events[name] = NewEvent(name, field.Name, field.Anonymous, field.Inputs) + case "error": + abi.Errors[field.Name] = NewError(field.Name, field.Inputs) default: return fmt.Errorf("abi: could not recognize type %v of field %v", field.Type, field.Name) } @@ -191,36 +195,6 @@ func (abi *ABI) UnmarshalJSON(data []byte) error { return nil } -// overloadedMethodName returns the next available name for a given function. -// Needed since solidity allows for function overload. -// -// e.g. if the abi contains Methods send, send1 -// overloadedMethodName would return send2 for input send. -func (abi *ABI) overloadedMethodName(rawName string) string { - name := rawName - _, ok := abi.Methods[name] - for idx := 0; ok; idx++ { - name = fmt.Sprintf("%s%d", rawName, idx) - _, ok = abi.Methods[name] - } - return name -} - -// overloadedEventName returns the next available name for a given event. -// Needed since solidity allows for event overload. -// -// e.g. if the abi contains events received, received1 -// overloadedEventName would return received2 for input received. -func (abi *ABI) overloadedEventName(rawName string) string { - name := rawName - _, ok := abi.Events[name] - for idx := 0; ok; idx++ { - name = fmt.Sprintf("%s%d", rawName, idx) - _, ok = abi.Events[name] - } - return name -} - // MethodById looks up a method by the 4-byte id, // returns nil if none found. func (abi *ABI) MethodById(sigdata []byte) (*Method, error) { @@ -277,3 +251,20 @@ func UnpackRevert(data []byte) (string, error) { } return unpacked[0].(string), nil } + +// overloadedName returns the next available name for a given thing. +// Needed since solidity allows for overloading. +// +// e.g. if the abi contains Methods send, send1 +// overloadedName would return send2 for input send. +// +// overloadedName works for methods, events and errors. +func overloadedName(rawName string, isAvail func(string) bool) string { + name := rawName + ok := isAvail(name) + for idx := 0; ok; idx++ { + name = fmt.Sprintf("%s%d", rawName, idx) + ok = isAvail(name) + } + return name +} diff --git a/accounts/abi/abi_test.go b/accounts/abi/abi_test.go index a022ec5f9d..cc8dfc61c3 100644 --- a/accounts/abi/abi_test.go +++ b/accounts/abi/abi_test.go @@ -295,6 +295,20 @@ func TestOverloadedMethodSignature(t *testing.T) { check("bar0", "bar(uint256,uint256)", false) } +func TestCustomErrors(t *testing.T) { + json := `[{ "inputs": [ { "internalType": "uint256", "name": "", "type": "uint256" } ],"name": "MyError", "type": "error"} ]` + abi, err := JSON(strings.NewReader(json)) + if err != nil { + t.Fatal(err) + } + check := func(name string, expect string) { + if abi.Errors[name].Sig != expect { + t.Fatalf("The signature of overloaded method mismatch, want %s, have %s", expect, abi.Methods[name].Sig) + } + } + check("MyError", "MyError(uint256)") +} + func TestMultiPack(t *testing.T) { abi, err := JSON(strings.NewReader(jsondata)) if err != nil { diff --git a/accounts/abi/argument.go b/accounts/abi/argument.go index e6d5245596..e6c117fe5f 100644 --- a/accounts/abi/argument.go +++ b/accounts/abi/argument.go @@ -81,13 +81,7 @@ func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) { if len(arguments) != 0 { return nil, fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected") } - // Nothing to unmarshal, return default variables - nonIndexedArgs := arguments.NonIndexed() - defaultVars := make([]interface{}, len(nonIndexedArgs)) - for index, arg := range nonIndexedArgs { - defaultVars[index] = reflect.New(arg.Type.GetType()) - } - return defaultVars, nil + return make([]interface{}, 0), nil } return arguments.UnpackValues(data) } @@ -137,7 +131,7 @@ func (arguments Arguments) copyAtomic(v interface{}, marshalledValues interface{ dst := reflect.ValueOf(v).Elem() src := reflect.ValueOf(marshalledValues) - if dst.Kind() == reflect.Struct && src.Kind() != reflect.Struct { + if dst.Kind() == reflect.Struct { return set(dst.Field(0), src) } return set(dst, src) diff --git a/accounts/abi/bind/auth.go b/accounts/abi/bind/auth.go index b8065e8488..a4307a9529 100644 --- a/accounts/abi/bind/auth.go +++ b/accounts/abi/bind/auth.go @@ -17,6 +17,7 @@ package bind import ( + "context" "crypto/ecdsa" "errors" "io" @@ -74,6 +75,7 @@ func NewKeyStoreTransactor(keystore *keystore.KeyStore, account accounts.Account } return tx.WithSignature(signer, signature) }, + Context: context.Background(), }, nil } @@ -97,6 +99,7 @@ func NewKeyedTransactor(key *ecdsa.PrivateKey) *TransactOpts { } return tx.WithSignature(signer, signature) }, + Context: context.Background(), } } @@ -133,6 +136,7 @@ func NewKeyStoreTransactorWithChainID(keystore *keystore.KeyStore, account accou } return tx.WithSignature(signer, signature) }, + Context: context.Background(), }, nil } @@ -156,6 +160,7 @@ func NewKeyedTransactorWithChainID(key *ecdsa.PrivateKey, chainID *big.Int) (*Tr } return tx.WithSignature(signer, signature) }, + Context: context.Background(), }, nil } @@ -170,5 +175,6 @@ func NewClefTransactor(clef *external.ExternalSigner, account accounts.Account) } return clef.SignTx(account, transaction, nil) // Clef enforces its own chain id }, + Context: context.Background(), } } diff --git a/accounts/abi/bind/backend.go b/accounts/abi/bind/backend.go index eed6a44bbc..c16990f395 100644 --- a/accounts/abi/bind/backend.go +++ b/accounts/abi/bind/backend.go @@ -32,12 +32,12 @@ var ( // have any code associated with it (i.e. suicided). ErrNoCode = errors.New("no contract code at given address") - // This error is raised when attempting to perform a pending state action + // ErrNoPendingState is raised when attempting to perform a pending state action // on a backend that doesn't implement PendingContractCaller. ErrNoPendingState = errors.New("backend does not support pending state") - // This error is returned by WaitDeployed if contract creation leaves an - // empty contract behind. + // ErrNoCodeAfterDeploy is returned by WaitDeployed if contract creation leaves + // an empty contract behind. ErrNoCodeAfterDeploy = errors.New("no contract code after deployment") ) @@ -47,7 +47,8 @@ type ContractCaller interface { // CodeAt returns the code of the given account. This is needed to differentiate // between contract internal errors and the local chain being out of sync. CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) - // ContractCall executes an Ethereum contract call with the specified data as the + + // CallContract executes an Ethereum contract call with the specified data as the // input. CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) } @@ -58,6 +59,7 @@ type ContractCaller interface { type PendingContractCaller interface { // PendingCodeAt returns the code of the given account in the pending state. PendingCodeAt(ctx context.Context, contract common.Address) ([]byte, error) + // PendingCallContract executes an Ethereum contract call against the pending state. PendingCallContract(ctx context.Context, call ethereum.CallMsg) ([]byte, error) } @@ -67,19 +69,31 @@ type PendingContractCaller interface { // used when the user does not provide some needed values, but rather leaves it up // to the transactor to decide. type ContractTransactor interface { + // HeaderByNumber returns a block header from the current canonical chain. If + // number is nil, the latest known header is returned. + HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) + // PendingCodeAt returns the code of the given account in the pending state. PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) + // PendingNonceAt retrieves the current pending nonce associated with an account. PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) + // SuggestGasPrice retrieves the currently suggested gas price to allow a timely // execution of a transaction. SuggestGasPrice(ctx context.Context) (*big.Int, error) + + // SuggestGasTipCap retrieves the currently suggested 1559 priority fee to allow + // a timely execution of a transaction. + SuggestGasTipCap(ctx context.Context) (*big.Int, error) + // EstimateGas tries to estimate the gas needed to execute a specific // transaction based on the current pending state of the backend blockchain. // There is no guarantee that this is the true gas limit requirement as other // transactions may be added or removed by miners, but it should provide a basis // for setting a reasonable default. EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) + // SendTransaction injects the transaction into the pending pool for execution. SendTransaction(ctx context.Context, tx *types.Transaction) error } diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index a7db3195fc..ff7c7e97f3 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -86,7 +86,7 @@ func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.Genesis config: genesis.Config, events: filters.NewEventSystem(&filterBackend{database, blockchain}, false), } - backend.rollback() + backend.rollback(blockchain.CurrentBlock()) return backend } @@ -112,7 +112,9 @@ func (b *SimulatedBackend) Commit() { if _, err := b.blockchain.InsertChain([]*types.Block{b.pendingBlock}); err != nil { panic(err) // This cannot happen unless the simulator is wrong, fail in that case } - b.rollback() + // Using the last inserted block here makes it possible to build on a side + // chain after a fork. + b.rollback(b.pendingBlock) } // Rollback aborts all pending transactions, reverting to the last committed state. @@ -120,22 +122,49 @@ func (b *SimulatedBackend) Rollback() { b.mu.Lock() defer b.mu.Unlock() - b.rollback() + b.rollback(b.blockchain.CurrentBlock()) } -func (b *SimulatedBackend) rollback() { - blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(int, *core.BlockGen) {}) +func (b *SimulatedBackend) rollback(parent *types.Block) { + blocks, _ := core.GenerateChain(b.config, parent, ethash.NewFaker(), b.database, 1, func(int, *core.BlockGen) {}) b.pendingBlock = blocks[0] b.pendingState, _ = state.New(b.pendingBlock.Root(), b.blockchain.StateCache(), nil) } +// Fork creates a side-chain that can be used to simulate reorgs. +// +// This function should be called with the ancestor block where the new side +// chain should be started. Transactions (old and new) can then be applied on +// top and Commit-ed. +// +// Note, the side-chain will only become canonical (and trigger the events) when +// it becomes longer. Until then CallContract will still operate on the current +// canonical chain. +// +// There is a % chance that the side chain becomes canonical at the same length +// to simulate live network behavior. +func (b *SimulatedBackend) Fork(ctx context.Context, parent common.Hash) error { + b.mu.Lock() + defer b.mu.Unlock() + + if len(b.pendingBlock.Transactions()) != 0 { + return errors.New("pending block dirty") + } + block, err := b.blockByHash(ctx, parent) + if err != nil { + return err + } + b.rollback(block) + return nil +} + // stateByBlockNumber retrieves a state by a given blocknumber. func (b *SimulatedBackend) stateByBlockNumber(ctx context.Context, blockNumber *big.Int) (*state.StateDB, error) { if blockNumber == nil || blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) == 0 { return b.blockchain.State() } - block, err := b.blockByNumberNoLock(ctx, blockNumber) + block, err := b.blockByNumber(ctx, blockNumber) if err != nil { return nil, err } @@ -201,6 +230,9 @@ func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common defer b.mu.Unlock() receipt, _, _, _ := rawdb.ReadReceipt(b.database, txHash, b.config) + if receipt == nil { + return nil, ethereum.NotFound + } return receipt, nil } @@ -228,6 +260,11 @@ func (b *SimulatedBackend) BlockByHash(ctx context.Context, hash common.Hash) (* b.mu.Lock() defer b.mu.Unlock() + return b.blockByHash(ctx, hash) +} + +// blockByHash retrieves a block based on the block hash without Locking. +func (b *SimulatedBackend) blockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { if hash == b.pendingBlock.Hash() { return b.pendingBlock, nil } @@ -246,12 +283,12 @@ func (b *SimulatedBackend) BlockByNumber(ctx context.Context, number *big.Int) ( b.mu.Lock() defer b.mu.Unlock() - return b.blockByNumberNoLock(ctx, number) + return b.blockByNumber(ctx, number) } -// blockByNumberNoLock retrieves a block from the database by number, caching it +// blockByNumber retrieves a block from the database by number, caching it // (associated with its hash) if found without Lock. -func (b *SimulatedBackend) blockByNumberNoLock(ctx context.Context, number *big.Int) (*types.Block, error) { +func (b *SimulatedBackend) blockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { if number == nil || number.Cmp(b.pendingBlock.Number()) == 0 { return b.blockchain.CurrentBlock(), nil } @@ -428,6 +465,18 @@ func (b *SimulatedBackend) PendingNonceAt(ctx context.Context, account common.Ad // SuggestGasPrice implements ContractTransactor.SuggestGasPrice. Since the simulated // chain doesn't have miners, we just return a gas price of 1 for any call. func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) { + b.mu.Lock() + defer b.mu.Unlock() + + if b.pendingBlock.Header().BaseFee != nil { + return b.pendingBlock.Header().BaseFee, nil + } + return big.NewInt(1), nil +} + +// SuggestGasTipCap implements ContractTransactor.SuggestGasTipCap. Since the simulated +// chain doesn't have miners, we just return a gas tip of 1 for any call. +func (b *SimulatedBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { return big.NewInt(1), nil } @@ -448,8 +497,19 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs } else { hi = b.pendingBlock.GasLimit() } + // Normalize the max fee per gas the call is willing to spend. + var feeCap *big.Int + if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) { + return 0, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") + } else if call.GasPrice != nil { + feeCap = call.GasPrice + } else if call.GasFeeCap != nil { + feeCap = call.GasFeeCap + } else { + feeCap = common.Big0 + } // Recap the highest gas allowance with account's balance. - if call.GasPrice != nil && call.GasPrice.BitLen() != 0 { + if feeCap.BitLen() != 0 { balance := b.pendingState.GetBalance(call.From) // from can't be nil available := new(big.Int).Set(balance) if call.Value != nil { @@ -458,14 +518,14 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs } available.Sub(available, call.Value) } - allowance := new(big.Int).Div(available, call.GasPrice) + allowance := new(big.Int).Div(available, feeCap) if allowance.IsUint64() && hi > allowance.Uint64() { transfer := call.Value if transfer == nil { transfer = new(big.Int) } log.Warn("Gas estimation capped by limited funds", "original", hi, "balance", balance, - "sent", transfer, "gasprice", call.GasPrice, "fundable", allowance) + "sent", transfer, "feecap", feeCap, "fundable", allowance) hi = allowance.Uint64() } } @@ -527,10 +587,38 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs // callContract implements common code between normal and pending contract calls. // state is modified during execution, make sure to copy it if necessary. func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, stateDB *state.StateDB) (*core.ExecutionResult, error) { - // Ensure message is initialized properly. - if call.GasPrice == nil { - call.GasPrice = big.NewInt(1) + // Gas prices post 1559 need to be initialized + if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) { + return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") + } + head := b.blockchain.CurrentHeader() + if !b.blockchain.Config().IsLondon(head.Number) { + // If there's no basefee, then it must be a non-1559 execution + if call.GasPrice == nil { + call.GasPrice = new(big.Int) + } + call.GasFeeCap, call.GasTipCap = call.GasPrice, call.GasPrice + } else { + // A basefee is provided, necessitating 1559-type execution + if call.GasPrice != nil { + // User specified the legacy gas field, convert to 1559 gas typing + call.GasFeeCap, call.GasTipCap = call.GasPrice, call.GasPrice + } else { + // User specified 1559 gas feilds (or none), use those + if call.GasFeeCap == nil { + call.GasFeeCap = new(big.Int) + } + if call.GasTipCap == nil { + call.GasTipCap = new(big.Int) + } + // Backfill the legacy gasPrice for EVM execution, unless we're all zeroes + call.GasPrice = new(big.Int) + if call.GasFeeCap.BitLen() > 0 || call.GasTipCap.BitLen() > 0 { + call.GasPrice = math.BigMin(new(big.Int).Add(call.GasTipCap, head.BaseFee), call.GasFeeCap) + } + } } + // Ensure message is initialized properly. if call.Gas == 0 { call.Gas = 50000000 } @@ -547,31 +635,33 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM evmContext := core.NewEVMBlockContext(block.Header(), b.blockchain, nil) // Create a new environment which holds all relevant information // about the transaction and calling mechanisms. - vmEnv := vm.NewEVM(evmContext, txContext, stateDB, b.config, vm.Config{}) + vmEnv := vm.NewEVM(evmContext, txContext, stateDB, b.config, vm.Config{NoBaseFee: true}) gasPool := new(core.GasPool).AddGas(math.MaxUint64) return core.NewStateTransition(vmEnv, msg, gasPool).TransitionDb() } // SendTransaction updates the pending block to include the given transaction. -// It panics if the transaction is invalid. func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transaction) error { b.mu.Lock() defer b.mu.Unlock() - // Check transaction validity. - block := b.blockchain.CurrentBlock() + // Get the last block + block, err := b.blockByHash(ctx, b.pendingBlock.ParentHash()) + if err != nil { + return fmt.Errorf("could not fetch parent") + } + // Check transaction validity signer := types.MakeSigner(b.blockchain.Config(), block.Number()) sender, err := types.Sender(signer, tx) if err != nil { - panic(fmt.Errorf("invalid transaction: %v", err)) + return fmt.Errorf("invalid transaction: %v", err) } nonce := b.pendingState.GetNonce(sender) if tx.Nonce() != nonce { - panic(fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce)) + return fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce) } - - // Include tx in chain. + // Include tx in chain blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) { for _, tx := range b.pendingBlock.Transactions() { block.AddTxWithChain(b.blockchain, tx) @@ -713,9 +803,11 @@ type callMsg struct { func (m callMsg) From() common.Address { return m.CallMsg.From } func (m callMsg) Nonce() uint64 { return 0 } -func (m callMsg) CheckNonce() bool { return false } +func (m callMsg) IsFake() bool { return true } func (m callMsg) To() *common.Address { return m.CallMsg.To } func (m callMsg) GasPrice() *big.Int { return m.CallMsg.GasPrice } +func (m callMsg) GasFeeCap() *big.Int { return m.CallMsg.GasFeeCap } +func (m callMsg) GasTipCap() *big.Int { return m.CallMsg.GasTipCap } func (m callMsg) Gas() uint64 { return m.CallMsg.Gas } func (m callMsg) Value() *big.Int { return m.CallMsg.Value } func (m callMsg) Data() []byte { return m.CallMsg.Data } diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go index 64ddf8bb2c..8a0cbe3357 100644 --- a/accounts/abi/bind/backends/simulated_test.go +++ b/accounts/abi/bind/backends/simulated_test.go @@ -21,6 +21,7 @@ import ( "context" "errors" "math/big" + "math/rand" "reflect" "strings" "testing" @@ -58,9 +59,12 @@ func TestSimulatedBackend(t *testing.T) { } // generate a transaction and confirm you can retrieve it + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + code := `6060604052600a8060106000396000f360606040526008565b00` var gas uint64 = 3000000 - tx := types.NewContractCreation(0, big.NewInt(0), gas, big.NewInt(1), common.FromHex(code)) + tx := types.NewContractCreation(0, big.NewInt(0), gas, gasPrice, common.FromHex(code)) tx, _ = types.SignTx(tx, types.HomesteadSigner{}, key) err = sim.SendTransaction(context.Background(), tx) @@ -110,14 +114,14 @@ var expectedReturn = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 func simTestBackend(testAddr common.Address) *SimulatedBackend { return NewSimulatedBackend( core.GenesisAlloc{ - testAddr: {Balance: big.NewInt(10000000000)}, + testAddr: {Balance: big.NewInt(10000000000000000)}, }, 10000000, ) } func TestNewSimulatedBackend(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) - expectedBal := big.NewInt(10000000000) + expectedBal := big.NewInt(10000000000000000) sim := simTestBackend(testAddr) defer sim.Close() @@ -136,7 +140,7 @@ func TestNewSimulatedBackend(t *testing.T) { } } -func TestSimulatedBackend_AdjustTime(t *testing.T) { +func TestAdjustTime(t *testing.T) { sim := NewSimulatedBackend( core.GenesisAlloc{}, 10000000, ) @@ -153,11 +157,15 @@ func TestSimulatedBackend_AdjustTime(t *testing.T) { } } -func TestNewSimulatedBackend_AdjustTimeFail(t *testing.T) { +func TestNewAdjustTimeFail(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) + // Create tx and send - tx := types.NewTransaction(0, testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(0, testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -178,7 +186,7 @@ func TestNewSimulatedBackend_AdjustTimeFail(t *testing.T) { t.Errorf("adjusted time not equal to a minute. prev: %v, new: %v", prevTime, newTime) } // Put a transaction after adjusting time - tx2 := types.NewTransaction(1, testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + tx2 := types.NewTransaction(1, testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx2, err := types.SignTx(tx2, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -191,9 +199,9 @@ func TestNewSimulatedBackend_AdjustTimeFail(t *testing.T) { } } -func TestSimulatedBackend_BalanceAt(t *testing.T) { +func TestBalanceAt(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) - expectedBal := big.NewInt(10000000000) + expectedBal := big.NewInt(10000000000000000) sim := simTestBackend(testAddr) defer sim.Close() bgCtx := context.Background() @@ -208,7 +216,7 @@ func TestSimulatedBackend_BalanceAt(t *testing.T) { } } -func TestSimulatedBackend_BlockByHash(t *testing.T) { +func TestBlockByHash(t *testing.T) { sim := NewSimulatedBackend( core.GenesisAlloc{}, 10000000, ) @@ -229,7 +237,7 @@ func TestSimulatedBackend_BlockByHash(t *testing.T) { } } -func TestSimulatedBackend_BlockByNumber(t *testing.T) { +func TestBlockByNumber(t *testing.T) { sim := NewSimulatedBackend( core.GenesisAlloc{}, 10000000, ) @@ -264,7 +272,7 @@ func TestSimulatedBackend_BlockByNumber(t *testing.T) { } } -func TestSimulatedBackend_NonceAt(t *testing.T) { +func TestNonceAt(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) @@ -281,7 +289,10 @@ func TestSimulatedBackend_NonceAt(t *testing.T) { } // create a signed transaction to send - tx := types.NewTransaction(nonce, testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(nonce, testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -314,7 +325,7 @@ func TestSimulatedBackend_NonceAt(t *testing.T) { } } -func TestSimulatedBackend_SendTransaction(t *testing.T) { +func TestSendTransaction(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) @@ -322,7 +333,10 @@ func TestSimulatedBackend_SendTransaction(t *testing.T) { bgCtx := context.Background() // create a signed transaction to send - tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -345,19 +359,22 @@ func TestSimulatedBackend_SendTransaction(t *testing.T) { } } -func TestSimulatedBackend_TransactionByHash(t *testing.T) { +func TestTransactionByHash(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := NewSimulatedBackend( core.GenesisAlloc{ - testAddr: {Balance: big.NewInt(10000000000)}, + testAddr: {Balance: big.NewInt(10000000000000000)}, }, 10000000, ) defer sim.Close() bgCtx := context.Background() // create a signed transaction to send - tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -396,7 +413,7 @@ func TestSimulatedBackend_TransactionByHash(t *testing.T) { } } -func TestSimulatedBackend_EstimateGas(t *testing.T) { +func TestEstimateGas(t *testing.T) { /* pragma solidity ^0.6.4; contract GasEstimation { @@ -479,7 +496,7 @@ func TestSimulatedBackend_EstimateGas(t *testing.T) { GasPrice: big.NewInt(0), Value: nil, Data: common.Hex2Bytes("b9b046f9"), - }, 0, errors.New("invalid opcode: opcode 0xfe not defined"), nil}, + }, 0, errors.New("invalid opcode: INVALID"), nil}, {"Valid", ethereum.CallMsg{ From: addr, @@ -514,7 +531,7 @@ func TestSimulatedBackend_EstimateGas(t *testing.T) { } } -func TestSimulatedBackend_EstimateGasWithPrice(t *testing.T) { +func TestEstimateGasWithPrice(t *testing.T) { key, _ := crypto.GenerateKey() addr := crypto.PubkeyToAddress(key.PublicKey) @@ -533,7 +550,7 @@ func TestSimulatedBackend_EstimateGasWithPrice(t *testing.T) { To: &recipient, Gas: 0, GasPrice: big.NewInt(0), - Value: big.NewInt(1000), + Value: big.NewInt(100000000000), Data: nil, }, 21000, nil}, @@ -541,8 +558,8 @@ func TestSimulatedBackend_EstimateGasWithPrice(t *testing.T) { From: addr, To: &recipient, Gas: 0, - GasPrice: big.NewInt(1000), - Value: big.NewInt(1000), + GasPrice: big.NewInt(100000000000), + Value: big.NewInt(100000000000), Data: nil, }, 21000, nil}, @@ -560,28 +577,51 @@ func TestSimulatedBackend_EstimateGasWithPrice(t *testing.T) { To: &recipient, Gas: 0, GasPrice: big.NewInt(2e14), // gascost = 4.2ether - Value: big.NewInt(1000), + Value: big.NewInt(100000000000), Data: nil, }, 21000, errors.New("gas required exceeds allowance (10999)")}, // 10999=(2.2ether-1000wei)/(2e14) - } - for _, c := range cases { + + {"EstimateEIP1559WithHighFees", ethereum.CallMsg{ + From: addr, + To: &addr, + Gas: 0, + GasFeeCap: big.NewInt(1e14), // maxgascost = 2.1ether + GasTipCap: big.NewInt(1), + Value: big.NewInt(1e17), // the remaining balance for fee is 2.1ether + Data: nil, + }, params.TxGas, nil}, + + {"EstimateEIP1559WithSuperHighFees", ethereum.CallMsg{ + From: addr, + To: &addr, + Gas: 0, + GasFeeCap: big.NewInt(1e14), // maxgascost = 2.1ether + GasTipCap: big.NewInt(1), + Value: big.NewInt(1e17 + 1), // the remaining balance for fee is 2.1ether + Data: nil, + }, params.TxGas, errors.New("gas required exceeds allowance (20999)")}, // 20999=(2.2ether-0.1ether-1wei)/(1e14) + } + for i, c := range cases { got, err := sim.EstimateGas(context.Background(), c.message) if c.expectError != nil { if err == nil { - t.Fatalf("Expect error, got nil") + t.Fatalf("test %d: expect error, got nil", i) } if c.expectError.Error() != err.Error() { - t.Fatalf("Expect error, want %v, got %v", c.expectError, err) + t.Fatalf("test %d: expect error, want %v, got %v", i, c.expectError, err) } continue } + if c.expectError == nil && err != nil { + t.Fatalf("test %d: didn't expect error, got %v", i, err) + } if got != c.expect { - t.Fatalf("Gas estimation mismatch, want %d, got %d", c.expect, got) + t.Fatalf("test %d: gas estimation mismatch, want %d, got %d", i, c.expect, got) } } } -func TestSimulatedBackend_HeaderByHash(t *testing.T) { +func TestHeaderByHash(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) @@ -602,7 +642,7 @@ func TestSimulatedBackend_HeaderByHash(t *testing.T) { } } -func TestSimulatedBackend_HeaderByNumber(t *testing.T) { +func TestHeaderByNumber(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) @@ -649,7 +689,7 @@ func TestSimulatedBackend_HeaderByNumber(t *testing.T) { } } -func TestSimulatedBackend_TransactionCount(t *testing.T) { +func TestTransactionCount(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) @@ -668,9 +708,11 @@ func TestSimulatedBackend_TransactionCount(t *testing.T) { if count != 0 { t.Errorf("expected transaction count of %v does not match actual count of %v", 0, count) } - // create a signed transaction to send - tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -699,7 +741,7 @@ func TestSimulatedBackend_TransactionCount(t *testing.T) { } } -func TestSimulatedBackend_TransactionInBlock(t *testing.T) { +func TestTransactionInBlock(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) @@ -723,9 +765,11 @@ func TestSimulatedBackend_TransactionInBlock(t *testing.T) { if pendingNonce != uint64(0) { t.Errorf("expected pending nonce of 0 got %v", pendingNonce) } - // create a signed transaction to send - tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -762,7 +806,7 @@ func TestSimulatedBackend_TransactionInBlock(t *testing.T) { } } -func TestSimulatedBackend_PendingNonceAt(t *testing.T) { +func TestPendingNonceAt(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) @@ -780,7 +824,10 @@ func TestSimulatedBackend_PendingNonceAt(t *testing.T) { } // create a signed transaction to send - tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -803,7 +850,7 @@ func TestSimulatedBackend_PendingNonceAt(t *testing.T) { } // make a new transaction with a nonce of 1 - tx = types.NewTransaction(uint64(1), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + tx = types.NewTransaction(uint64(1), testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err = types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -824,7 +871,7 @@ func TestSimulatedBackend_PendingNonceAt(t *testing.T) { } } -func TestSimulatedBackend_TransactionReceipt(t *testing.T) { +func TestTransactionReceipt(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) @@ -832,7 +879,10 @@ func TestSimulatedBackend_TransactionReceipt(t *testing.T) { bgCtx := context.Background() // create a signed transaction to send - tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -855,7 +905,7 @@ func TestSimulatedBackend_TransactionReceipt(t *testing.T) { } } -func TestSimulatedBackend_SuggestGasPrice(t *testing.T) { +func TestSuggestGasPrice(t *testing.T) { sim := NewSimulatedBackend( core.GenesisAlloc{}, 10000000, @@ -866,12 +916,12 @@ func TestSimulatedBackend_SuggestGasPrice(t *testing.T) { if err != nil { t.Errorf("could not get gas price: %v", err) } - if gasPrice.Uint64() != uint64(1) { - t.Errorf("gas price was not expected value of 1. actual: %v", gasPrice.Uint64()) + if gasPrice.Uint64() != sim.pendingBlock.Header().BaseFee.Uint64() { + t.Errorf("gas price was not expected value of %v. actual: %v", sim.pendingBlock.Header().BaseFee.Uint64(), gasPrice.Uint64()) } } -func TestSimulatedBackend_PendingCodeAt(t *testing.T) { +func TestPendingCodeAt(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) defer sim.Close() @@ -907,7 +957,7 @@ func TestSimulatedBackend_PendingCodeAt(t *testing.T) { } } -func TestSimulatedBackend_CodeAt(t *testing.T) { +func TestCodeAt(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) defer sim.Close() @@ -946,7 +996,7 @@ func TestSimulatedBackend_CodeAt(t *testing.T) { // When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt: // receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]} -func TestSimulatedBackend_PendingAndCallContract(t *testing.T) { +func TestPendingAndCallContract(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) defer sim.Close() @@ -1030,7 +1080,7 @@ contract Reverter { } } }*/ -func TestSimulatedBackend_CallContractRevert(t *testing.T) { +func TestCallContractRevert(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) defer sim.Close() @@ -1114,3 +1164,175 @@ func TestSimulatedBackend_CallContractRevert(t *testing.T) { sim.Commit() } } + +// TestFork check that the chain length after a reorg is correct. +// Steps: +// 1. Save the current block which will serve as parent for the fork. +// 2. Mine n blocks with n ∈ [0, 20]. +// 3. Assert that the chain length is n. +// 4. Fork by using the parent block as ancestor. +// 5. Mine n+1 blocks which should trigger a reorg. +// 6. Assert that the chain length is n+1. +// Since Commit() was called 2n+1 times in total, +// having a chain length of just n+1 means that a reorg occurred. +func TestFork(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + sim := simTestBackend(testAddr) + defer sim.Close() + // 1. + parent := sim.blockchain.CurrentBlock() + // 2. + n := int(rand.Int31n(21)) + for i := 0; i < n; i++ { + sim.Commit() + } + // 3. + if sim.blockchain.CurrentBlock().NumberU64() != uint64(n) { + t.Error("wrong chain length") + } + // 4. + sim.Fork(context.Background(), parent.Hash()) + // 5. + for i := 0; i < n+1; i++ { + sim.Commit() + } + // 6. + if sim.blockchain.CurrentBlock().NumberU64() != uint64(n+1) { + t.Error("wrong chain length") + } +} + +/* +Example contract to test event emission: + +pragma solidity >=0.7.0 <0.9.0; +contract Callable { + event Called(); + function Call() public { emit Called(); } +} +*/ +const callableAbi = "[{\"anonymous\":false,\"inputs\":[],\"name\":\"Called\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"Call\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]" + +const callableBin = "6080604052348015600f57600080fd5b5060998061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806334e2292114602d575b600080fd5b60336035565b005b7f81fab7a4a0aa961db47eefc81f143a5220e8c8495260dd65b1356f1d19d3c7b860405160405180910390a156fea2646970667358221220029436d24f3ac598ceca41d4d712e13ced6d70727f4cdc580667de66d2f51d8b64736f6c63430008010033" + +// TestForkLogsReborn check that the simulated reorgs +// correctly remove and reborn logs. +// Steps: +// 1. Deploy the Callable contract. +// 2. Set up an event subscription. +// 3. Save the current block which will serve as parent for the fork. +// 4. Send a transaction. +// 5. Check that the event was included. +// 6. Fork by using the parent block as ancestor. +// 7. Mine two blocks to trigger a reorg. +// 8. Check that the event was removed. +// 9. Re-send the transaction and mine a block. +// 10. Check that the event was reborn. +func TestForkLogsReborn(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + sim := simTestBackend(testAddr) + defer sim.Close() + // 1. + parsed, _ := abi.JSON(strings.NewReader(callableAbi)) + auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) + _, _, contract, err := bind.DeployContract(auth, parsed, common.FromHex(callableBin), sim) + if err != nil { + t.Errorf("deploying contract: %v", err) + } + sim.Commit() + // 2. + logs, sub, err := contract.WatchLogs(nil, "Called") + if err != nil { + t.Errorf("watching logs: %v", err) + } + defer sub.Unsubscribe() + // 3. + parent := sim.blockchain.CurrentBlock() + // 4. + tx, err := contract.Transact(auth, "Call") + if err != nil { + t.Errorf("transacting: %v", err) + } + sim.Commit() + // 5. + log := <-logs + if log.TxHash != tx.Hash() { + t.Error("wrong event tx hash") + } + if log.Removed { + t.Error("Event should be included") + } + // 6. + if err := sim.Fork(context.Background(), parent.Hash()); err != nil { + t.Errorf("forking: %v", err) + } + // 7. + sim.Commit() + sim.Commit() + // 8. + log = <-logs + if log.TxHash != tx.Hash() { + t.Error("wrong event tx hash") + } + if !log.Removed { + t.Error("Event should be removed") + } + // 9. + if err := sim.SendTransaction(context.Background(), tx); err != nil { + t.Errorf("sending transaction: %v", err) + } + sim.Commit() + // 10. + log = <-logs + if log.TxHash != tx.Hash() { + t.Error("wrong event tx hash") + } + if log.Removed { + t.Error("Event should be included") + } +} + +// TestForkResendTx checks that re-sending a TX after a fork +// is possible and does not cause a "nonce mismatch" panic. +// Steps: +// 1. Save the current block which will serve as parent for the fork. +// 2. Send a transaction. +// 3. Check that the TX is included in block 1. +// 4. Fork by using the parent block as ancestor. +// 5. Mine a block, Re-send the transaction and mine another one. +// 6. Check that the TX is now included in block 2. +func TestForkResendTx(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + sim := simTestBackend(testAddr) + defer sim.Close() + // 1. + parent := sim.blockchain.CurrentBlock() + // 2. + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + _tx := types.NewTransaction(0, testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) + tx, _ := types.SignTx(_tx, types.HomesteadSigner{}, testKey) + sim.SendTransaction(context.Background(), tx) + sim.Commit() + // 3. + receipt, _ := sim.TransactionReceipt(context.Background(), tx.Hash()) + if h := receipt.BlockNumber.Uint64(); h != 1 { + t.Errorf("TX included in wrong block: %d", h) + } + // 4. + if err := sim.Fork(context.Background(), parent.Hash()); err != nil { + t.Errorf("forking: %v", err) + } + // 5. + sim.Commit() + if err := sim.SendTransaction(context.Background(), tx); err != nil { + t.Errorf("sending transaction: %v", err) + } + sim.Commit() + // 6. + receipt, _ = sim.TransactionReceipt(context.Background(), tx.Hash()) + if h := receipt.BlockNumber.Uint64(); h != 2 { + t.Errorf("TX included in wrong block: %d", h) + } +} diff --git a/accounts/abi/bind/base.go b/accounts/abi/bind/base.go index 55aca31a1a..f4e5a2a900 100644 --- a/accounts/abi/bind/base.go +++ b/accounts/abi/bind/base.go @@ -21,6 +21,8 @@ import ( "errors" "fmt" "math/big" + "strings" + "sync" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" @@ -49,9 +51,11 @@ type TransactOpts struct { Nonce *big.Int // Nonce to use for the transaction execution (nil = use pending state) Signer SignerFn // Method to use for signing the transaction (mandatory) - Value *big.Int // Funds to transfer along the transaction (nil = 0 = no funds) - GasPrice *big.Int // Gas price to use for the transaction execution (nil = gas price oracle) - GasLimit uint64 // Gas limit to set for the transaction execution (0 = estimate) + Value *big.Int // Funds to transfer along the transaction (nil = 0 = no funds) + GasPrice *big.Int // Gas price to use for the transaction execution (nil = gas price oracle) + GasFeeCap *big.Int // Gas fee cap to use for the 1559 transaction execution (nil = gas price oracle) + GasTipCap *big.Int // Gas priority fee cap to use for the 1559 transaction execution (nil = gas price oracle) + GasLimit uint64 // Gas limit to set for the transaction execution (0 = estimate) Context context.Context // Network context to support cancellation and timeouts (nil = no timeout) @@ -74,6 +78,29 @@ type WatchOpts struct { Context context.Context // Network context to support cancellation and timeouts (nil = no timeout) } +// MetaData collects all metadata for a bound contract. +type MetaData struct { + mu sync.Mutex + Sigs map[string]string + Bin string + ABI string + ab *abi.ABI +} + +func (m *MetaData) GetAbi() (*abi.ABI, error) { + m.mu.Lock() + defer m.mu.Unlock() + if m.ab != nil { + return m.ab, nil + } + if parsed, err := abi.JSON(strings.NewReader(m.ABI)); err != nil { + return nil, err + } else { + m.ab = &parsed + } + return m.ab, nil +} + // BoundContract is the base wrapper object that reflects a contract on the // Ethereum network. It contains a collection of methods that are used by the // higher level contract bindings to operate. @@ -204,57 +231,158 @@ func (c *BoundContract) Transfer(opts *TransactOpts) (*types.Transaction, error) return c.transact(opts, &c.address, nil) } -// transact executes an actual transaction invocation, first deriving any missing -// authorization fields, and then scheduling the transaction for execution. -func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, input []byte) (*types.Transaction, error) { - var err error - - // Ensure a valid value field and resolve the account nonce +func (c *BoundContract) createDynamicTx(opts *TransactOpts, contract *common.Address, input []byte, head *types.Header) (*types.Transaction, error) { + // Normalize value value := opts.Value if value == nil { value = new(big.Int) } - var nonce uint64 - if opts.Nonce == nil { - nonce, err = c.transactor.PendingNonceAt(ensureContext(opts.Context), opts.From) + // Estimate TipCap + gasTipCap := opts.GasTipCap + if gasTipCap == nil { + tip, err := c.transactor.SuggestGasTipCap(ensureContext(opts.Context)) if err != nil { - return nil, fmt.Errorf("failed to retrieve account nonce: %v", err) + return nil, err } - } else { - nonce = opts.Nonce.Uint64() + gasTipCap = tip + } + // Estimate FeeCap + gasFeeCap := opts.GasFeeCap + if gasFeeCap == nil { + gasFeeCap = new(big.Int).Add( + gasTipCap, + new(big.Int).Mul(head.BaseFee, big.NewInt(2)), + ) + } + if gasFeeCap.Cmp(gasTipCap) < 0 { + return nil, fmt.Errorf("maxFeePerGas (%v) < maxPriorityFeePerGas (%v)", gasFeeCap, gasTipCap) + } + // Estimate GasLimit + gasLimit := opts.GasLimit + if opts.GasLimit == 0 { + var err error + gasLimit, err = c.estimateGasLimit(opts, contract, input, nil, gasTipCap, gasFeeCap, value) + if err != nil { + return nil, err + } + } + // create the transaction + nonce, err := c.getNonce(opts) + if err != nil { + return nil, err + } + baseTx := &types.DynamicFeeTx{ + To: contract, + Nonce: nonce, + GasFeeCap: gasFeeCap, + GasTipCap: gasTipCap, + Gas: gasLimit, + Value: value, + Data: input, + } + return types.NewTx(baseTx), nil +} + +func (c *BoundContract) createLegacyTx(opts *TransactOpts, contract *common.Address, input []byte) (*types.Transaction, error) { + if opts.GasFeeCap != nil || opts.GasTipCap != nil { + return nil, errors.New("maxFeePerGas or maxPriorityFeePerGas specified but london is not active yet") + } + // Normalize value + value := opts.Value + if value == nil { + value = new(big.Int) } - // Figure out the gas allowance and gas price values + // Estimate GasPrice gasPrice := opts.GasPrice if gasPrice == nil { - gasPrice, err = c.transactor.SuggestGasPrice(ensureContext(opts.Context)) + price, err := c.transactor.SuggestGasPrice(ensureContext(opts.Context)) if err != nil { - return nil, fmt.Errorf("failed to suggest gas price: %v", err) + return nil, err } + gasPrice = price } + // Estimate GasLimit gasLimit := opts.GasLimit - if gasLimit == 0 { - // Gas estimation cannot succeed without code for method invocations - if contract != nil { - if code, err := c.transactor.PendingCodeAt(ensureContext(opts.Context), c.address); err != nil { - return nil, err - } else if len(code) == 0 { - return nil, ErrNoCode - } - } - // If the contract surely has code (or code is not needed), estimate the transaction - msg := ethereum.CallMsg{From: opts.From, To: contract, GasPrice: gasPrice, Value: value, Data: input} - gasLimit, err = c.transactor.EstimateGas(ensureContext(opts.Context), msg) + if opts.GasLimit == 0 { + var err error + gasLimit, err = c.estimateGasLimit(opts, contract, input, gasPrice, nil, nil, value) if err != nil { - return nil, fmt.Errorf("failed to estimate gas needed: %v", err) + return nil, err + } + } + // create the transaction + nonce, err := c.getNonce(opts) + if err != nil { + return nil, err + } + baseTx := &types.LegacyTx{ + To: contract, + Nonce: nonce, + GasPrice: gasPrice, + Gas: gasLimit, + Value: value, + Data: input, + } + return types.NewTx(baseTx), nil +} + +func (c *BoundContract) estimateGasLimit(opts *TransactOpts, contract *common.Address, input []byte, gasPrice, gasTipCap, gasFeeCap, value *big.Int) (uint64, error) { + if contract != nil { + // Gas estimation cannot succeed without code for method invocations. + if code, err := c.transactor.PendingCodeAt(ensureContext(opts.Context), c.address); err != nil { + return 0, err + } else if len(code) == 0 { + return 0, ErrNoCode } } - // Create the transaction, sign it and schedule it for execution - var rawTx *types.Transaction - if contract == nil { - rawTx = types.NewContractCreation(nonce, value, gasLimit, gasPrice, input) + msg := ethereum.CallMsg{ + From: opts.From, + To: contract, + GasPrice: gasPrice, + GasTipCap: gasTipCap, + GasFeeCap: gasFeeCap, + Value: value, + Data: input, + } + return c.transactor.EstimateGas(ensureContext(opts.Context), msg) +} + +func (c *BoundContract) getNonce(opts *TransactOpts) (uint64, error) { + if opts.Nonce == nil { + return c.transactor.PendingNonceAt(ensureContext(opts.Context), opts.From) } else { - rawTx = types.NewTransaction(nonce, c.address, value, gasLimit, gasPrice, input) + return opts.Nonce.Uint64(), nil } +} + +// transact executes an actual transaction invocation, first deriving any missing +// authorization fields, and then scheduling the transaction for execution. +func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, input []byte) (*types.Transaction, error) { + if opts.GasPrice != nil && (opts.GasFeeCap != nil || opts.GasTipCap != nil) { + return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") + } + // Create the transaction + var ( + rawTx *types.Transaction + err error + ) + if opts.GasPrice != nil { + rawTx, err = c.createLegacyTx(opts, contract, input) + } else { + // Only query for basefee if gasPrice not specified + if head, errHead := c.transactor.HeaderByNumber(ensureContext(opts.Context), nil); errHead != nil { + return nil, errHead + } else if head.BaseFee != nil { + rawTx, err = c.createDynamicTx(opts, contract, input, head) + } else { + // Chain is not London ready -> use legacy transaction + rawTx, err = c.createLegacyTx(opts, contract, input) + } + } + if err != nil { + return nil, err + } + // Sign the transaction and schedule it for execution if opts.Signer == nil { return nil, errors.New("no signer to authorize the transaction with") } @@ -353,6 +481,9 @@ func (c *BoundContract) WatchLogs(opts *WatchOpts, name string, query ...[]inter // UnpackLog unpacks a retrieved log into the provided output structure. func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) error { + if log.Topics[0] != c.abi.Events[event].ID { + return fmt.Errorf("event signature mismatch") + } if len(log.Data) > 0 { if err := c.abi.UnpackIntoInterface(out, event, log.Data); err != nil { return err @@ -369,6 +500,9 @@ func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) // UnpackLogIntoMap unpacks a retrieved log into the provided map. func (c *BoundContract) UnpackLogIntoMap(out map[string]interface{}, event string, log types.Log) error { + if log.Topics[0] != c.abi.Events[event].ID { + return fmt.Errorf("event signature mismatch") + } if len(log.Data) > 0 { if err := c.abi.UnpackIntoMap(out, event, log.Data); err != nil { return err @@ -387,7 +521,7 @@ func (c *BoundContract) UnpackLogIntoMap(out map[string]interface{}, event strin // user specified it as such. func ensureContext(ctx context.Context) context.Context { if ctx == nil { - return context.TODO() + return context.Background() } return ctx } diff --git a/accounts/abi/bind/base_test.go b/accounts/abi/bind/base_test.go index c4740f68b7..08ba18f95e 100644 --- a/accounts/abi/bind/base_test.go +++ b/accounts/abi/bind/base_test.go @@ -31,8 +31,49 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" + "github.com/stretchr/testify/assert" ) +func mockSign(addr common.Address, tx *types.Transaction) (*types.Transaction, error) { return tx, nil } + +type mockTransactor struct { + baseFee *big.Int + gasTipCap *big.Int + gasPrice *big.Int + suggestGasTipCapCalled bool + suggestGasPriceCalled bool +} + +func (mt *mockTransactor) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { + return &types.Header{BaseFee: mt.baseFee}, nil +} + +func (mt *mockTransactor) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { + return []byte{1}, nil +} + +func (mt *mockTransactor) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { + return 0, nil +} + +func (mt *mockTransactor) SuggestGasPrice(ctx context.Context) (*big.Int, error) { + mt.suggestGasPriceCalled = true + return mt.gasPrice, nil +} + +func (mt *mockTransactor) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { + mt.suggestGasTipCapCalled = true + return mt.gasTipCap, nil +} + +func (mt *mockTransactor) EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) { + return 0, nil +} + +func (mt *mockTransactor) SendTransaction(ctx context.Context, tx *types.Transaction) error { + return nil +} + type mockCaller struct { codeAtBlockNumber *big.Int callContractBlockNumber *big.Int @@ -110,7 +151,7 @@ const hexData = "0x000000000000000000000000376c47978271565f56deb45495afa69e59c16 func TestUnpackIndexedStringTyLogIntoMap(t *testing.T) { hash := crypto.Keccak256Hash([]byte("testName")) topics := []common.Hash{ - common.HexToHash("0x0"), + crypto.Keccak256Hash([]byte("received(string,address,uint256,bytes)")), hash, } mockLog := newMockLog(topics, common.HexToHash("0x0")) @@ -135,7 +176,7 @@ func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) { } hash := crypto.Keccak256Hash(sliceBytes) topics := []common.Hash{ - common.HexToHash("0x0"), + crypto.Keccak256Hash([]byte("received(string[],address,uint256,bytes)")), hash, } mockLog := newMockLog(topics, common.HexToHash("0x0")) @@ -160,7 +201,7 @@ func TestUnpackIndexedArrayTyLogIntoMap(t *testing.T) { } hash := crypto.Keccak256Hash(arrBytes) topics := []common.Hash{ - common.HexToHash("0x0"), + crypto.Keccak256Hash([]byte("received(address[2],address,uint256,bytes)")), hash, } mockLog := newMockLog(topics, common.HexToHash("0x0")) @@ -187,7 +228,7 @@ func TestUnpackIndexedFuncTyLogIntoMap(t *testing.T) { var functionTy [24]byte copy(functionTy[:], functionTyBytes[0:24]) topics := []common.Hash{ - common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"), + crypto.Keccak256Hash([]byte("received(function,address,uint256,bytes)")), common.BytesToHash(functionTyBytes), } mockLog := newMockLog(topics, common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42")) @@ -208,7 +249,7 @@ func TestUnpackIndexedBytesTyLogIntoMap(t *testing.T) { bytes := []byte{1, 2, 3, 4, 5} hash := crypto.Keccak256Hash(bytes) topics := []common.Hash{ - common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"), + crypto.Keccak256Hash([]byte("received(bytes,address,uint256,bytes)")), hash, } mockLog := newMockLog(topics, common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42")) @@ -226,6 +267,51 @@ func TestUnpackIndexedBytesTyLogIntoMap(t *testing.T) { unpackAndCheck(t, bc, expectedReceivedMap, mockLog) } +func TestTransactGasFee(t *testing.T) { + assert := assert.New(t) + + // GasTipCap and GasFeeCap + // When opts.GasTipCap and opts.GasFeeCap are nil + mt := &mockTransactor{baseFee: big.NewInt(100), gasTipCap: big.NewInt(5)} + bc := bind.NewBoundContract(common.Address{}, abi.ABI{}, nil, mt, nil) + opts := &bind.TransactOpts{Signer: mockSign} + tx, err := bc.Transact(opts, "") + assert.Nil(err) + assert.Equal(big.NewInt(5), tx.GasTipCap()) + assert.Equal(big.NewInt(205), tx.GasFeeCap()) + assert.Nil(opts.GasTipCap) + assert.Nil(opts.GasFeeCap) + assert.True(mt.suggestGasTipCapCalled) + + // Second call to Transact should use latest suggested GasTipCap + mt.gasTipCap = big.NewInt(6) + mt.suggestGasTipCapCalled = false + tx, err = bc.Transact(opts, "") + assert.Nil(err) + assert.Equal(big.NewInt(6), tx.GasTipCap()) + assert.Equal(big.NewInt(206), tx.GasFeeCap()) + assert.True(mt.suggestGasTipCapCalled) + + // GasPrice + // When opts.GasPrice is nil + mt = &mockTransactor{gasPrice: big.NewInt(5)} + bc = bind.NewBoundContract(common.Address{}, abi.ABI{}, nil, mt, nil) + opts = &bind.TransactOpts{Signer: mockSign} + tx, err = bc.Transact(opts, "") + assert.Nil(err) + assert.Equal(big.NewInt(5), tx.GasPrice()) + assert.Nil(opts.GasPrice) + assert.True(mt.suggestGasPriceCalled) + + // Second call to Transact should use latest suggested GasPrice + mt.gasPrice = big.NewInt(6) + mt.suggestGasPriceCalled = false + tx, err = bc.Transact(opts, "") + assert.Nil(err) + assert.Equal(big.NewInt(6), tx.GasPrice()) + assert.True(mt.suggestGasPriceCalled) +} + func unpackAndCheck(t *testing.T, bc *bind.BoundContract, expected map[string]interface{}, mockLog types.Log) { received := make(map[string]interface{}) if err := bc.UnpackLogIntoMap(received, "received", mockLog); err != nil { diff --git a/accounts/abi/bind/bind.go b/accounts/abi/bind/bind.go index 0e98709b14..ff69a78c64 100644 --- a/accounts/abi/bind/bind.go +++ b/accounts/abi/bind/bind.go @@ -88,6 +88,13 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] transactIdentifiers = make(map[string]bool) eventIdentifiers = make(map[string]bool) ) + + for _, input := range evmABI.Constructor.Inputs { + if hasStruct(input.Type) { + bindStructType[lang](input.Type, structs) + } + } + for _, original := range evmABI.Methods { // Normalize the method for capital cases and non-anonymous inputs/outputs normalized := original diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go index d0958cb62f..992497993a 100644 --- a/accounts/abi/bind/bind_test.go +++ b/accounts/abi/bind/bind_test.go @@ -298,7 +298,7 @@ var bindTests = []struct { key, _ := crypto.GenerateKey() auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy an interaction tester contract and call a transaction on it @@ -353,7 +353,7 @@ var bindTests = []struct { key, _ := crypto.GenerateKey() auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a tuple tester contract and execute a structured call on it @@ -399,7 +399,7 @@ var bindTests = []struct { key, _ := crypto.GenerateKey() auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a tuple tester contract and execute a structured call on it @@ -457,7 +457,7 @@ var bindTests = []struct { key, _ := crypto.GenerateKey() auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a slice tester contract and execute a n array call on it @@ -505,7 +505,7 @@ var bindTests = []struct { key, _ := crypto.GenerateKey() auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a default method invoker contract and execute its default method @@ -571,7 +571,7 @@ var bindTests = []struct { key, _ := crypto.GenerateKey() auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a structs method invoker contract and execute its default method @@ -703,7 +703,7 @@ var bindTests = []struct { key, _ := crypto.GenerateKey() auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a funky gas pattern contract @@ -753,7 +753,7 @@ var bindTests = []struct { key, _ := crypto.GenerateKey() auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a sender tester contract and execute a structured call on it @@ -828,7 +828,7 @@ var bindTests = []struct { key, _ := crypto.GenerateKey() auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a underscorer tester contract and execute a structured call on it @@ -922,7 +922,7 @@ var bindTests = []struct { key, _ := crypto.GenerateKey() auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy an eventer contract @@ -1112,7 +1112,7 @@ var bindTests = []struct { key, _ := crypto.GenerateKey() auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() //deploy the test contract @@ -1247,7 +1247,7 @@ var bindTests = []struct { key, _ := crypto.GenerateKey() auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() _, _, contract, err := DeployTuple(auth, sim) @@ -1389,7 +1389,7 @@ var bindTests = []struct { key, _ := crypto.GenerateKey() auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() //deploy the test contract @@ -1454,7 +1454,7 @@ var bindTests = []struct { // Initialize test accounts key, _ := crypto.GenerateKey() auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // deploy the test contract @@ -1544,7 +1544,7 @@ var bindTests = []struct { addr := crypto.PubkeyToAddress(key.PublicKey) // Deploy registrar contract - sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) @@ -1606,7 +1606,7 @@ var bindTests = []struct { addr := crypto.PubkeyToAddress(key.PublicKey) // Deploy registrar contract - sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) @@ -1668,7 +1668,7 @@ var bindTests = []struct { key, _ := crypto.GenerateKey() auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a tester contract and execute a structured call on it @@ -1728,7 +1728,7 @@ var bindTests = []struct { key, _ := crypto.GenerateKey() addr := crypto.PubkeyToAddress(key.PublicKey) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}, 1000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 1000000) defer sim.Close() opts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) @@ -1785,6 +1785,176 @@ var bindTests = []struct { nil, nil, }, + // Test resolving single struct argument + { + `NewSingleStructArgument`, + ` + pragma solidity ^0.8.0; + + contract NewSingleStructArgument { + struct MyStruct{ + uint256 a; + uint256 b; + } + event StructEvent(MyStruct s); + function TestEvent() public { + emit StructEvent(MyStruct({a: 1, b: 2})); + } + } + `, + []string{"608060405234801561001057600080fd5b50610113806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806324ec1d3f14602d575b600080fd5b60336035565b005b7fb4b2ff75e30cb4317eaae16dd8a187dd89978df17565104caa6c2797caae27d460405180604001604052806001815260200160028152506040516078919060ba565b60405180910390a1565b6040820160008201516096600085018260ad565b50602082015160a7602085018260ad565b50505050565b60b48160d3565b82525050565b600060408201905060cd60008301846082565b92915050565b600081905091905056fea26469706673582212208823628796125bf9941ce4eda18da1be3cf2931b231708ab848e1bd7151c0c9a64736f6c63430008070033"}, + []string{`[{"anonymous":false,"inputs":[{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256","name":"b","type":"uint256"}],"indexed":false,"internalType":"struct Test.MyStruct","name":"s","type":"tuple"}],"name":"StructEvent","type":"event"},{"inputs":[],"name":"TestEvent","outputs":[],"stateMutability":"nonpayable","type":"function"}]`}, + ` + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/ethconfig" + `, + ` + var ( + key, _ = crypto.GenerateKey() + user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil) + ) + defer sim.Close() + + _, _, d, err := DeployNewSingleStructArgument(user, sim) + if err != nil { + t.Fatalf("Failed to deploy contract %v", err) + } + sim.Commit() + + _, err = d.TestEvent(user) + if err != nil { + t.Fatalf("Failed to call contract %v", err) + } + sim.Commit() + + it, err := d.FilterStructEvent(nil) + if err != nil { + t.Fatalf("Failed to filter contract event %v", err) + } + var count int + for it.Next() { + if it.Event.S.A.Cmp(big.NewInt(1)) != 0 { + t.Fatal("Unexpected contract event") + } + if it.Event.S.B.Cmp(big.NewInt(2)) != 0 { + t.Fatal("Unexpected contract event") + } + count += 1 + } + if count != 1 { + t.Fatal("Unexpected contract event number") + } + `, + nil, + nil, + nil, + nil, + }, + // Test errors introduced in v0.8.4 + { + `NewErrors`, + ` + pragma solidity >0.8.4; + + contract NewErrors { + error MyError(uint256); + error MyError1(uint256); + error MyError2(uint256, uint256); + error MyError3(uint256 a, uint256 b, uint256 c); + function Error() public pure { + revert MyError3(1,2,3); + } + } + `, + []string{"0x6080604052348015600f57600080fd5b5060998061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063726c638214602d575b600080fd5b60336035565b005b60405163024876cd60e61b815260016004820152600260248201526003604482015260640160405180910390fdfea264697066735822122093f786a1bc60216540cd999fbb4a6109e0fef20abcff6e9107fb2817ca968f3c64736f6c63430008070033"}, + []string{`[{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError","type":"error"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError1","type":"error"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"},{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError2","type":"error"},{"inputs":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256","name":"b","type":"uint256"},{"internalType":"uint256","name":"c","type":"uint256"}],"name":"MyError3","type":"error"},{"inputs":[],"name":"Error","outputs":[],"stateMutability":"pure","type":"function"}]`}, + ` + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/ethconfig" + `, + ` + var ( + key, _ = crypto.GenerateKey() + user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil) + ) + defer sim.Close() + + _, tx, contract, err := DeployNewErrors(user, sim) + if err != nil { + t.Fatal(err) + } + sim.Commit() + _, err = bind.WaitDeployed(nil, sim, tx) + if err != nil { + t.Error(err) + } + if err := contract.Error(new(bind.CallOpts)); err == nil { + t.Fatalf("expected contract to throw error") + } + // TODO (MariusVanDerWijden unpack error using abigen + // once that is implemented + `, + nil, + nil, + nil, + nil, + }, + { + name: `ConstructorWithStructParam`, + contract: ` + pragma solidity >=0.8.0 <0.9.0; + + contract ConstructorWithStructParam { + struct StructType { + uint256 field; + } + + constructor(StructType memory st) {} + } + `, + bytecode: []string{`0x608060405234801561001057600080fd5b506040516101c43803806101c48339818101604052810190610032919061014a565b50610177565b6000604051905090565b600080fd5b600080fd5b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6100958261004c565b810181811067ffffffffffffffff821117156100b4576100b361005d565b5b80604052505050565b60006100c7610038565b90506100d3828261008c565b919050565b6000819050919050565b6100eb816100d8565b81146100f657600080fd5b50565b600081519050610108816100e2565b92915050565b60006020828403121561012457610123610047565b5b61012e60206100bd565b9050600061013e848285016100f9565b60008301525092915050565b6000602082840312156101605761015f610042565b5b600061016e8482850161010e565b91505092915050565b603f806101856000396000f3fe6080604052600080fdfea2646970667358221220cdffa667affecefac5561f65f4a4ba914204a8d4eb859d8cd426fb306e5c12a364736f6c634300080a0033`}, + abi: []string{`[{"inputs":[{"components":[{"internalType":"uint256","name":"field","type":"uint256"}],"internalType":"struct ConstructorWithStructParam.StructType","name":"st","type":"tuple"}],"stateMutability":"nonpayable","type":"constructor"}]`}, + imports: ` + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/ethconfig" + `, + tester: ` + var ( + key, _ = crypto.GenerateKey() + user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil) + ) + defer sim.Close() + + _, tx, _, err := DeployConstructorWithStructParam(user, sim, ConstructorWithStructParamStructType{Field: big.NewInt(42)}) + if err != nil { + t.Fatalf("DeployConstructorWithStructParam() got err %v; want nil err", err) + } + sim.Commit() + + if _, err = bind.WaitDeployed(nil, sim, tx); err != nil { + t.Logf("Deployment tx: %+v", tx) + t.Errorf("bind.WaitDeployed(nil, %T, ) got err %v; want nil err", sim, err) + } + `, + }, } // Tests that packages generated by the binder can be successfully compiled and @@ -1808,22 +1978,23 @@ func TestGolangBindings(t *testing.T) { } // Generate the test suite for all the contracts for i, tt := range bindTests { - var types []string - if tt.types != nil { - types = tt.types - } else { - types = []string{tt.name} - } - // Generate the binding and create a Go source file in the workspace - bind, err := Bind(types, tt.abi, tt.bytecode, tt.fsigs, "bindtest", LangGo, tt.libs, tt.aliases) - if err != nil { - t.Fatalf("test %d: failed to generate binding: %v", i, err) - } - if err = ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+".go"), []byte(bind), 0600); err != nil { - t.Fatalf("test %d: failed to write binding: %v", i, err) - } - // Generate the test file with the injected test code - code := fmt.Sprintf(` + t.Run(tt.name, func(t *testing.T) { + var types []string + if tt.types != nil { + types = tt.types + } else { + types = []string{tt.name} + } + // Generate the binding and create a Go source file in the workspace + bind, err := Bind(types, tt.abi, tt.bytecode, tt.fsigs, "bindtest", LangGo, tt.libs, tt.aliases) + if err != nil { + t.Fatalf("test %d: failed to generate binding: %v", i, err) + } + if err = ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+".go"), []byte(bind), 0600); err != nil { + t.Fatalf("test %d: failed to write binding: %v", i, err) + } + // Generate the test file with the injected test code + code := fmt.Sprintf(` package bindtest import ( @@ -1835,9 +2006,10 @@ func TestGolangBindings(t *testing.T) { %s } `, tt.imports, tt.name, tt.tester) - if err := ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+"_test.go"), []byte(code), 0600); err != nil { - t.Fatalf("test %d: failed to write tests: %v", i, err) - } + if err := ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+"_test.go"), []byte(code), 0600); err != nil { + t.Fatalf("test %d: failed to write tests: %v", i, err) + } + }) } // Convert the package to go modules and use the current source for go-ethereum moder := exec.Command(gocmd, "mod", "init", "bindtest") diff --git a/accounts/abi/bind/template.go b/accounts/abi/bind/template.go index e9bdd3d414..492bad8c57 100644 --- a/accounts/abi/bind/template.go +++ b/accounts/abi/bind/template.go @@ -90,6 +90,7 @@ package {{.Package}} import ( "math/big" "strings" + "errors" ethereum "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" @@ -101,6 +102,7 @@ import ( // Reference imports to suppress errors if they are not otherwise used. var ( + _ = errors.New _ = big.NewInt _ = strings.NewReader _ = ethereum.NotFound @@ -120,32 +122,48 @@ var ( {{end}} {{range $contract := .Contracts}} + // {{.Type}}MetaData contains all meta data concerning the {{.Type}} contract. + var {{.Type}}MetaData = &bind.MetaData{ + ABI: "{{.InputABI}}", + {{if $contract.FuncSigs -}} + Sigs: map[string]string{ + {{range $strsig, $binsig := .FuncSigs}}"{{$binsig}}": "{{$strsig}}", + {{end}} + }, + {{end -}} + {{if .InputBin -}} + Bin: "0x{{.InputBin}}", + {{end}} + } // {{.Type}}ABI is the input ABI used to generate the binding from. - const {{.Type}}ABI = "{{.InputABI}}" + // Deprecated: Use {{.Type}}MetaData.ABI instead. + var {{.Type}}ABI = {{.Type}}MetaData.ABI {{if $contract.FuncSigs}} + // Deprecated: Use {{.Type}}MetaData.Sigs instead. // {{.Type}}FuncSigs maps the 4-byte function signature to its string representation. - var {{.Type}}FuncSigs = map[string]string{ - {{range $strsig, $binsig := .FuncSigs}}"{{$binsig}}": "{{$strsig}}", - {{end}} - } + var {{.Type}}FuncSigs = {{.Type}}MetaData.Sigs {{end}} {{if .InputBin}} // {{.Type}}Bin is the compiled bytecode used for deploying new contracts. - var {{.Type}}Bin = "0x{{.InputBin}}" + // Deprecated: Use {{.Type}}MetaData.Bin instead. + var {{.Type}}Bin = {{.Type}}MetaData.Bin // Deploy{{.Type}} deploys a new Ethereum contract, binding an instance of {{.Type}} to it. func Deploy{{.Type}}(auth *bind.TransactOpts, backend bind.ContractBackend {{range .Constructor.Inputs}}, {{.Name}} {{bindtype .Type $structs}}{{end}}) (common.Address, *types.Transaction, *{{.Type}}, error) { - parsed, err := abi.JSON(strings.NewReader({{.Type}}ABI)) + parsed, err := {{.Type}}MetaData.GetAbi() if err != nil { return common.Address{}, nil, nil, err } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } {{range $pattern, $name := .Libraries}} {{decapitalise $name}}Addr, _, _, _ := Deploy{{capitalise $name}}(auth, backend) {{$contract.Type}}Bin = strings.Replace({{$contract.Type}}Bin, "__${{$pattern}}$__", {{decapitalise $name}}Addr.String()[2:], -1) {{end}} - address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex({{.Type}}Bin), backend {{range .Constructor.Inputs}}, {{.Name}}{{end}}) + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex({{.Type}}Bin), backend {{range .Constructor.Inputs}}, {{.Name}}{{end}}) if err != nil { return common.Address{}, nil, nil, err } diff --git a/accounts/abi/bind/util.go b/accounts/abi/bind/util.go index 118abc59a7..b931fbb04d 100644 --- a/accounts/abi/bind/util.go +++ b/accounts/abi/bind/util.go @@ -21,6 +21,7 @@ import ( "errors" "time" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" @@ -35,14 +36,16 @@ func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*ty logger := log.New("hash", tx.Hash()) for { receipt, err := b.TransactionReceipt(ctx, tx.Hash()) - if receipt != nil { + if err == nil { return receipt, nil } - if err != nil { - logger.Trace("Receipt retrieval failed", "err", err) - } else { + + if errors.Is(err, ethereum.NotFound) { logger.Trace("Transaction not yet mined") + } else { + logger.Trace("Receipt retrieval failed", "err", err) } + // Wait for the next round. select { case <-ctx.Done(): diff --git a/accounts/abi/bind/util_test.go b/accounts/abi/bind/util_test.go index 9f9b7a000d..75fbc91ceb 100644 --- a/accounts/abi/bind/util_test.go +++ b/accounts/abi/bind/util_test.go @@ -56,14 +56,17 @@ func TestWaitDeployed(t *testing.T) { for name, test := range waitDeployedTests { backend := backends.NewSimulatedBackend( core.GenesisAlloc{ - crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)}, + crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)}, }, 10000000, ) defer backend.Close() - // Create the transaction. - tx := types.NewContractCreation(0, big.NewInt(0), test.gas, big.NewInt(1), common.FromHex(test.code)) + // Create the transaction + head, _ := backend.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewContractCreation(0, big.NewInt(0), test.gas, gasPrice, common.FromHex(test.code)) tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) // Wait for it to get mined in the background. @@ -99,15 +102,18 @@ func TestWaitDeployed(t *testing.T) { func TestWaitDeployedCornerCases(t *testing.T) { backend := backends.NewSimulatedBackend( core.GenesisAlloc{ - crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)}, + crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)}, }, 10000000, ) defer backend.Close() + head, _ := backend.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + // Create a transaction to an account. code := "6060604052600a8060106000396000f360606040526008565b00" - tx := types.NewTransaction(0, common.HexToAddress("0x01"), big.NewInt(0), 3000000, big.NewInt(1), common.FromHex(code)) + tx := types.NewTransaction(0, common.HexToAddress("0x01"), big.NewInt(0), 3000000, gasPrice, common.FromHex(code)) tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -119,7 +125,7 @@ func TestWaitDeployedCornerCases(t *testing.T) { } // Create a transaction that is not mined. - tx = types.NewContractCreation(1, big.NewInt(0), 3000000, big.NewInt(1), common.FromHex(code)) + tx = types.NewContractCreation(1, big.NewInt(0), 3000000, gasPrice, common.FromHex(code)) tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) go func() { diff --git a/accounts/abi/error.go b/accounts/abi/error.go index f0f71b6c91..3d1a4877dd 100644 --- a/accounts/abi/error.go +++ b/accounts/abi/error.go @@ -1,4 +1,4 @@ -// Copyright 2016 The go-ethereum Authors +// Copyright 2021 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -17,66 +17,75 @@ package abi import ( + "bytes" "errors" "fmt" - "reflect" -) + "strings" -var ( - errBadBool = errors.New("abi: improperly encoded boolean value") + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" ) -// formatSliceString formats the reflection kind with the given slice size -// and returns a formatted string representation. -func formatSliceString(kind reflect.Kind, sliceSize int) string { - if sliceSize == -1 { - return fmt.Sprintf("[]%v", kind) - } - return fmt.Sprintf("[%d]%v", sliceSize, kind) +type Error struct { + Name string + Inputs Arguments + str string + // Sig contains the string signature according to the ABI spec. + // e.g. event foo(uint32 a, int b) = "foo(uint32,int256)" + // Please note that "int" is substitute for its canonical representation "int256" + Sig string + // ID returns the canonical representation of the event's signature used by the + // abi definition to identify event names and types. + ID common.Hash } -// sliceTypeCheck checks that the given slice can by assigned to the reflection -// type in t. -func sliceTypeCheck(t Type, val reflect.Value) error { - if val.Kind() != reflect.Slice && val.Kind() != reflect.Array { - return typeErr(formatSliceString(t.GetType().Kind(), t.Size), val.Type()) +func NewError(name string, inputs Arguments) Error { + // sanitize inputs to remove inputs without names + // and precompute string and sig representation. + names := make([]string, len(inputs)) + types := make([]string, len(inputs)) + for i, input := range inputs { + if input.Name == "" { + inputs[i] = Argument{ + Name: fmt.Sprintf("arg%d", i), + Indexed: input.Indexed, + Type: input.Type, + } + } else { + inputs[i] = input + } + // string representation + names[i] = fmt.Sprintf("%v %v", input.Type, inputs[i].Name) + if input.Indexed { + names[i] = fmt.Sprintf("%v indexed %v", input.Type, inputs[i].Name) + } + // sig representation + types[i] = input.Type.String() } - if t.T == ArrayTy && val.Len() != t.Size { - return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), formatSliceString(val.Type().Elem().Kind(), val.Len())) - } + str := fmt.Sprintf("error %v(%v)", name, strings.Join(names, ", ")) + sig := fmt.Sprintf("%v(%v)", name, strings.Join(types, ",")) + id := common.BytesToHash(crypto.Keccak256([]byte(sig))) - if t.Elem.T == SliceTy || t.Elem.T == ArrayTy { - if val.Len() > 0 { - return sliceTypeCheck(*t.Elem, val.Index(0)) - } + return Error{ + Name: name, + Inputs: inputs, + str: str, + Sig: sig, + ID: id, } +} - if val.Type().Elem().Kind() != t.Elem.GetType().Kind() { - return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), val.Type()) - } - return nil +func (e *Error) String() string { + return e.str } -// typeCheck checks that the given reflection value can be assigned to the reflection -// type in t. -func typeCheck(t Type, value reflect.Value) error { - if t.T == SliceTy || t.T == ArrayTy { - return sliceTypeCheck(t, value) +func (e *Error) Unpack(data []byte) (interface{}, error) { + if len(data) < 4 { + return "", errors.New("invalid data for unpacking") } - - // Check base type validity. Element types will be checked later on. - if t.GetType().Kind() != value.Kind() { - return typeErr(t.GetType().Kind(), value.Kind()) - } else if t.T == FixedBytesTy && t.Size != value.Len() { - return typeErr(t.GetType(), value.Type()) - } else { - return nil + if !bytes.Equal(data[:4], e.ID[:4]) { + return "", errors.New("invalid data for unpacking") } - -} - -// typeErr returns a formatted type casting error. -func typeErr(expected, got interface{}) error { - return fmt.Errorf("abi: cannot use %v as type %v as argument", got, expected) + return e.Inputs.Unpack(data[4:]) } diff --git a/accounts/abi/error_handling.go b/accounts/abi/error_handling.go new file mode 100644 index 0000000000..f0f71b6c91 --- /dev/null +++ b/accounts/abi/error_handling.go @@ -0,0 +1,82 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package abi + +import ( + "errors" + "fmt" + "reflect" +) + +var ( + errBadBool = errors.New("abi: improperly encoded boolean value") +) + +// formatSliceString formats the reflection kind with the given slice size +// and returns a formatted string representation. +func formatSliceString(kind reflect.Kind, sliceSize int) string { + if sliceSize == -1 { + return fmt.Sprintf("[]%v", kind) + } + return fmt.Sprintf("[%d]%v", sliceSize, kind) +} + +// sliceTypeCheck checks that the given slice can by assigned to the reflection +// type in t. +func sliceTypeCheck(t Type, val reflect.Value) error { + if val.Kind() != reflect.Slice && val.Kind() != reflect.Array { + return typeErr(formatSliceString(t.GetType().Kind(), t.Size), val.Type()) + } + + if t.T == ArrayTy && val.Len() != t.Size { + return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), formatSliceString(val.Type().Elem().Kind(), val.Len())) + } + + if t.Elem.T == SliceTy || t.Elem.T == ArrayTy { + if val.Len() > 0 { + return sliceTypeCheck(*t.Elem, val.Index(0)) + } + } + + if val.Type().Elem().Kind() != t.Elem.GetType().Kind() { + return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), val.Type()) + } + return nil +} + +// typeCheck checks that the given reflection value can be assigned to the reflection +// type in t. +func typeCheck(t Type, value reflect.Value) error { + if t.T == SliceTy || t.T == ArrayTy { + return sliceTypeCheck(t, value) + } + + // Check base type validity. Element types will be checked later on. + if t.GetType().Kind() != value.Kind() { + return typeErr(t.GetType().Kind(), value.Kind()) + } else if t.T == FixedBytesTy && t.Size != value.Len() { + return typeErr(t.GetType(), value.Type()) + } else { + return nil + } + +} + +// typeErr returns a formatted type casting error. +func typeErr(expected, got interface{}) error { + return fmt.Errorf("abi: cannot use %v as type %v as argument", got, expected) +} diff --git a/accounts/abi/reflect.go b/accounts/abi/reflect.go index 11248e0730..35e5556d2c 100644 --- a/accounts/abi/reflect.go +++ b/accounts/abi/reflect.go @@ -123,15 +123,8 @@ func set(dst, src reflect.Value) error { func setSlice(dst, src reflect.Value) error { slice := reflect.MakeSlice(dst.Type(), src.Len(), src.Len()) for i := 0; i < src.Len(); i++ { - if src.Index(i).Kind() == reflect.Struct { - if err := set(slice.Index(i), src.Index(i)); err != nil { - return err - } - } else { - // e.g. [][32]uint8 to []common.Hash - if err := set(slice.Index(i), src.Index(i)); err != nil { - return err - } + if err := set(slice.Index(i), src.Index(i)); err != nil { + return err } } if dst.CanSet() { diff --git a/accounts/abi/unpack.go b/accounts/abi/unpack.go index ec06984936..43cd6c6457 100644 --- a/accounts/abi/unpack.go +++ b/accounts/abi/unpack.go @@ -290,7 +290,7 @@ func tuplePointsTo(index int, output []byte) (start int, err error) { offset := big.NewInt(0).SetBytes(output[index : index+32]) outputLen := big.NewInt(int64(len(output))) - if offset.Cmp(big.NewInt(int64(len(output)))) > 0 { + if offset.Cmp(outputLen) > 0 { return 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %v would go over slice boundary (len=%v)", offset, outputLen) } if offset.BitLen() > 63 { diff --git a/accounts/abi/unpack_test.go b/accounts/abi/unpack_test.go index b88f77805b..e617f8abc5 100644 --- a/accounts/abi/unpack_test.go +++ b/accounts/abi/unpack_test.go @@ -762,20 +762,24 @@ func TestUnpackTuple(t *testing.T) { buff.Write(common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) // ret[b] = -1 // If the result is single tuple, use struct as return value container directly. - v := struct { + type v struct { A *big.Int B *big.Int - }{new(big.Int), new(big.Int)} + } + type r struct { + Result v + } + var ret0 = new(r) + err = abi.UnpackIntoInterface(ret0, "tuple", buff.Bytes()) - err = abi.UnpackIntoInterface(&v, "tuple", buff.Bytes()) if err != nil { t.Error(err) } else { - if v.A.Cmp(big.NewInt(1)) != 0 { - t.Errorf("unexpected value unpacked: want %x, got %x", 1, v.A) + if ret0.Result.A.Cmp(big.NewInt(1)) != 0 { + t.Errorf("unexpected value unpacked: want %x, got %x", 1, ret0.Result.A) } - if v.B.Cmp(big.NewInt(-1)) != 0 { - t.Errorf("unexpected value unpacked: want %x, got %x", -1, v.B) + if ret0.Result.B.Cmp(big.NewInt(-1)) != 0 { + t.Errorf("unexpected value unpacked: want %x, got %x", -1, ret0.Result.B) } } diff --git a/accounts/accounts.go b/accounts/accounts.go index 177c6775ac..8e15c080ff 100644 --- a/accounts/accounts.go +++ b/accounts/accounts.go @@ -47,7 +47,7 @@ const ( // accounts (derived from the same seed). type Wallet interface { // URL retrieves the canonical path under which this wallet is reachable. It is - // user by upper layers to define a sorting order over all wallets from multiple + // used by upper layers to define a sorting order over all wallets from multiple // backends. URL() URL @@ -90,7 +90,7 @@ type Wallet interface { // accounts. // // Note, self derivation will increment the last component of the specified path - // opposed to decending into a child path to allow discovering accounts starting + // opposed to descending into a child path to allow discovering accounts starting // from non zero components. // // Some hardware wallets switched derivation paths through their evolution, so @@ -106,7 +106,7 @@ type Wallet interface { // or optionally with the aid of any location metadata from the embedded URL field. // // If the wallet requires additional authentication to sign the request (e.g. - // a password to decrypt the account, or a PIN code o verify the transaction), + // a password to decrypt the account, or a PIN code to verify the transaction), // an AuthNeededError instance will be returned, containing infos for the user // about which fields or actions are needed. The user may retry by providing // the needed details via SignDataWithPassphrase, or by other means (e.g. unlock @@ -125,13 +125,13 @@ type Wallet interface { // or optionally with the aid of any location metadata from the embedded URL field. // // If the wallet requires additional authentication to sign the request (e.g. - // a password to decrypt the account, or a PIN code o verify the transaction), + // a password to decrypt the account, or a PIN code to verify the transaction), // an AuthNeededError instance will be returned, containing infos for the user // about which fields or actions are needed. The user may retry by providing // the needed details via SignTextWithPassphrase, or by other means (e.g. unlock // the account in a keystore). // - // This method should return the signature in 'canonical' format, with v 0 or 1 + // This method should return the signature in 'canonical' format, with v 0 or 1. SignText(account Account, text []byte) ([]byte, error) // SignTextWithPassphrase is identical to Signtext, but also takes a password @@ -177,7 +177,7 @@ type Backend interface { // TextHash is a helper function that calculates a hash for the given message that can be // safely used to calculate a signature from. // -// The hash is calulcated as +// The hash is calculated as // keccak256("\x19Ethereum Signed Message:\n"${message length}${message}). // // This gives context to the signed message and prevents signing of transactions. @@ -189,7 +189,7 @@ func TextHash(data []byte) []byte { // TextAndHash is a helper function that calculates a hash for the given message that can be // safely used to calculate a signature from. // -// The hash is calulcated as +// The hash is calculated as // keccak256("\x19Ethereum Signed Message:\n"${message length}${message}). // // This gives context to the signed message and prevents signing of transactions. diff --git a/accounts/errors.go b/accounts/errors.go index 2fed35f9d0..727e5329be 100644 --- a/accounts/errors.go +++ b/accounts/errors.go @@ -42,7 +42,7 @@ var ErrInvalidPassphrase = errors.New("invalid password") var ErrWalletAlreadyOpen = errors.New("wallet already open") // ErrWalletClosed is returned if a wallet is attempted to be opened the -// secodn time. +// second time. var ErrWalletClosed = errors.New("wallet closed") // AuthNeededError is returned by backends for signing requests where the user diff --git a/accounts/external/backend.go b/accounts/external/backend.go index 5a96aad6b4..07062495de 100644 --- a/accounts/external/backend.go +++ b/accounts/external/backend.go @@ -29,7 +29,7 @@ import ( "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" - "github.com/ethereum/go-ethereum/signer/core" + "github.com/ethereum/go-ethereum/signer/core/apitypes" ) type ExternalBackend struct { @@ -196,6 +196,10 @@ type signTransactionResult struct { Tx *types.Transaction `json:"tx"` } +// SignTx sends the transaction to the external signer. +// If chainID is nil, or tx.ChainID is zero, the chain ID will be assigned +// by the external signer. For non-legacy transactions, the chain ID of the +// transaction overrides the chainID parameter. func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) { data := hexutil.Bytes(tx.Data()) var to *common.MixedcaseAddress @@ -203,26 +207,34 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio t := common.NewMixedcaseAddress(*tx.To()) to = &t } - args := &core.SendTxArgs{ - Data: &data, - Nonce: hexutil.Uint64(tx.Nonce()), - Value: hexutil.Big(*tx.Value()), - Gas: hexutil.Uint64(tx.Gas()), - GasPrice: hexutil.Big(*tx.GasPrice()), - To: to, - From: common.NewMixedcaseAddress(account.Address), + args := &apitypes.SendTxArgs{ + Data: &data, + Nonce: hexutil.Uint64(tx.Nonce()), + Value: hexutil.Big(*tx.Value()), + Gas: hexutil.Uint64(tx.Gas()), + To: to, + From: common.NewMixedcaseAddress(account.Address), + } + switch tx.Type() { + case types.LegacyTxType, types.AccessListTxType: + args.GasPrice = (*hexutil.Big)(tx.GasPrice()) + case types.DynamicFeeTxType: + args.MaxFeePerGas = (*hexutil.Big)(tx.GasFeeCap()) + args.MaxPriorityFeePerGas = (*hexutil.Big)(tx.GasTipCap()) + default: + return nil, fmt.Errorf("unsupported tx type %d", tx.Type()) } // We should request the default chain id that we're operating with // (the chain we're executing on) - if chainID != nil { + if chainID != nil && chainID.Sign() != 0 { args.ChainID = (*hexutil.Big)(chainID) } - // However, if the user asked for a particular chain id, then we should - // use that instead. - if tx.Type() != types.LegacyTxType && tx.ChainId() != nil { - args.ChainID = (*hexutil.Big)(tx.ChainId()) - } - if tx.Type() == types.AccessListTxType { + if tx.Type() != types.LegacyTxType { + // However, if the user asked for a particular chain id, then we should + // use that instead. + if tx.ChainId().Sign() != 0 { + args.ChainID = (*hexutil.Big)(tx.ChainId()) + } accessList := tx.AccessList() args.AccessList = &accessList } diff --git a/accounts/keystore/account_cache_test.go b/accounts/keystore/account_cache_test.go index fe9233c046..a847545bc8 100644 --- a/accounts/keystore/account_cache_test.go +++ b/accounts/keystore/account_cache_test.go @@ -96,7 +96,7 @@ func TestWatchNoDir(t *testing.T) { // Create ks but not the directory that it watches. rand.Seed(time.Now().UnixNano()) - dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watch-test-%d-%d", os.Getpid(), rand.Int())) + dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watchnodir-test-%d-%d", os.Getpid(), rand.Int())) ks := NewKeyStore(dir, LightScryptN, LightScryptP) list := ks.Accounts() @@ -322,7 +322,7 @@ func TestUpdatedKeyfileContents(t *testing.T) { // Create a temporary kesytore to test with rand.Seed(time.Now().UnixNano()) - dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watch-test-%d-%d", os.Getpid(), rand.Int())) + dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-updatedkeyfilecontents-test-%d-%d", os.Getpid(), rand.Int())) ks := NewKeyStore(dir, LightScryptN, LightScryptP) list := ks.Accounts() diff --git a/accounts/keystore/watch.go b/accounts/keystore/watch.go index d6ef53327d..ad176040d6 100644 --- a/accounts/keystore/watch.go +++ b/accounts/keystore/watch.go @@ -14,6 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . +//go:build (darwin && !ios && cgo) || freebsd || (linux && !arm64) || netbsd || solaris // +build darwin,!ios,cgo freebsd linux,!arm64 netbsd solaris package keystore diff --git a/accounts/keystore/watch_fallback.go b/accounts/keystore/watch_fallback.go index de0e87f8a5..e40eca42fe 100644 --- a/accounts/keystore/watch_fallback.go +++ b/accounts/keystore/watch_fallback.go @@ -14,6 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . +//go:build (darwin && !cgo) || ios || (linux && arm64) || windows || (!darwin && !freebsd && !linux && !netbsd && !solaris) // +build darwin,!cgo ios linux,arm64 windows !darwin,!freebsd,!linux,!netbsd,!solaris // This is the fallback implementation of directory watching. diff --git a/accounts/manager.go b/accounts/manager.go index acf41ed8e9..1e111d1948 100644 --- a/accounts/manager.go +++ b/accounts/manager.go @@ -25,6 +25,10 @@ import ( "github.com/ethereum/go-ethereum/event" ) +// managerSubBufferSize determines how many incoming wallet events +// the manager will buffer in its channel. +const managerSubBufferSize = 50 + // Config contains the settings of the global account manager. // // TODO(rjl493456442, karalabe, holiman): Get rid of this when account management @@ -33,18 +37,27 @@ type Config struct { InsecureUnlockAllowed bool // Whether account unlocking in insecure environment is allowed } +// newBackendEvent lets the manager know it should +// track the given backend for wallet updates. +type newBackendEvent struct { + backend Backend + processed chan struct{} // Informs event emitter that backend has been integrated +} + // Manager is an overarching account manager that can communicate with various // backends for signing transactions. type Manager struct { - config *Config // Global account manager configurations - backends map[reflect.Type][]Backend // Index of backends currently registered - updaters []event.Subscription // Wallet update subscriptions for all backends - updates chan WalletEvent // Subscription sink for backend wallet changes - wallets []Wallet // Cache of all wallets from all registered backends + config *Config // Global account manager configurations + backends map[reflect.Type][]Backend // Index of backends currently registered + updaters []event.Subscription // Wallet update subscriptions for all backends + updates chan WalletEvent // Subscription sink for backend wallet changes + newBackends chan newBackendEvent // Incoming backends to be tracked by the manager + wallets []Wallet // Cache of all wallets from all registered backends feed event.Feed // Wallet feed notifying of arrivals/departures quit chan chan error + term chan struct{} // Channel is closed upon termination of the update loop lock sync.RWMutex } @@ -57,7 +70,7 @@ func NewManager(config *Config, backends ...Backend) *Manager { wallets = merge(wallets, backend.Wallets()...) } // Subscribe to wallet notifications from all backends - updates := make(chan WalletEvent, 4*len(backends)) + updates := make(chan WalletEvent, managerSubBufferSize) subs := make([]event.Subscription, len(backends)) for i, backend := range backends { @@ -65,12 +78,14 @@ func NewManager(config *Config, backends ...Backend) *Manager { } // Assemble the account manager and return am := &Manager{ - config: config, - backends: make(map[reflect.Type][]Backend), - updaters: subs, - updates: updates, - wallets: wallets, - quit: make(chan chan error), + config: config, + backends: make(map[reflect.Type][]Backend), + updaters: subs, + updates: updates, + newBackends: make(chan newBackendEvent), + wallets: wallets, + quit: make(chan chan error), + term: make(chan struct{}), } for _, backend := range backends { kind := reflect.TypeOf(backend) @@ -93,6 +108,14 @@ func (am *Manager) Config() *Config { return am.config } +// AddBackend starts the tracking of an additional backend for wallet updates. +// cmd/geth assumes once this func returns the backends have been already integrated. +func (am *Manager) AddBackend(backend Backend) { + done := make(chan struct{}) + am.newBackends <- newBackendEvent{backend, done} + <-done +} + // update is the wallet event loop listening for notifications from the backends // and updating the cache of wallets. func (am *Manager) update() { @@ -122,10 +145,22 @@ func (am *Manager) update() { // Notify any listeners of the event am.feed.Send(event) - + case event := <-am.newBackends: + am.lock.Lock() + // Update caches + backend := event.backend + am.wallets = merge(am.wallets, backend.Wallets()...) + am.updaters = append(am.updaters, backend.Subscribe(am.updates)) + kind := reflect.TypeOf(backend) + am.backends[kind] = append(am.backends[kind], backend) + am.lock.Unlock() + close(event.processed) case errc := <-am.quit: // Manager terminating, return errc <- nil + // Signals event emitters the loop is not receiving values + // to prevent them from getting stuck. + close(am.term) return } } @@ -133,6 +168,9 @@ func (am *Manager) update() { // Backends retrieves the backend(s) with the given type from the account manager. func (am *Manager) Backends(kind reflect.Type) []Backend { + am.lock.RLock() + defer am.lock.RUnlock() + return am.backends[kind] } diff --git a/accounts/scwallet/wallet.go b/accounts/scwallet/wallet.go index b4d229bc0b..2a2b83bd1b 100644 --- a/accounts/scwallet/wallet.go +++ b/accounts/scwallet/wallet.go @@ -638,7 +638,7 @@ func (w *Wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun // accounts. // // Note, self derivation will increment the last component of the specified path -// opposed to decending into a child path to allow discovering accounts starting +// opposed to descending into a child path to allow discovering accounts starting // from non zero components. // // Some hardware wallets switched derivation paths through their evolution, so diff --git a/accounts/usbwallet/wallet.go b/accounts/usbwallet/wallet.go index b6f1814488..382f3ddaee 100644 --- a/accounts/usbwallet/wallet.go +++ b/accounts/usbwallet/wallet.go @@ -496,7 +496,7 @@ func (w *wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun // accounts. // // Note, self derivation will increment the last component of the specified path -// opposed to decending into a child path to allow discovering accounts starting +// opposed to descending into a child path to allow discovering accounts starting // from non zero components. // // Some hardware wallets switched derivation paths through their evolution, so diff --git a/appveyor.yml b/appveyor.yml index a72163382a..65b5f96841 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,29 +1,57 @@ -os: Visual Studio 2019 clone_depth: 5 version: "{branch}.{build}" + +image: + - Ubuntu + - Visual Studio 2019 + environment: matrix: - # We use gcc from MSYS2 because it is the most recent compiler version available on - # AppVeyor. Note: gcc.exe only works properly if the corresponding bin/ directory is - # contained in PATH. - GETH_ARCH: amd64 - GETH_CC: C:\msys64\mingw64\bin\gcc.exe - PATH: C:\msys64\mingw64\bin;C:\Program Files (x86)\NSIS\;%PATH% + GETH_MINGW: 'C:\msys64\mingw64' - GETH_ARCH: 386 - GETH_CC: C:\msys64\mingw32\bin\gcc.exe - PATH: C:\msys64\mingw32\bin;C:\Program Files (x86)\NSIS\;%PATH% + GETH_MINGW: 'C:\msys64\mingw32' install: - git submodule update --init --depth 1 - go version - - "%GETH_CC% --version" -build_script: - - go run build\ci.go install -dlgo -arch %GETH_ARCH% -cc %GETH_CC% +for: + # Linux has its own script without -arch and -cc. + # The linux builder also runs lint. + - matrix: + only: + - image: Ubuntu + build_script: + - go run build/ci.go lint + - go run build/ci.go install -dlgo + test_script: + - go run build/ci.go test -dlgo -coverage -after_build: - - go run build\ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds - - go run build\ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds + # linux/386 is disabled. + - matrix: + exclude: + - image: Ubuntu + GETH_ARCH: 386 -test_script: - - go run build\ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -coverage + # Windows builds for amd64 + 386. + - matrix: + only: + - image: Visual Studio 2019 + environment: + # We use gcc from MSYS2 because it is the most recent compiler version available on + # AppVeyor. Note: gcc.exe only works properly if the corresponding bin/ directory is + # contained in PATH. + GETH_CC: '%GETH_MINGW%\bin\gcc.exe' + PATH: '%GETH_MINGW%\bin;C:\Program Files (x86)\NSIS\;%PATH%' + build_script: + - 'echo %GETH_ARCH%' + - 'echo %GETH_CC%' + - '%GETH_CC% --version' + - go run build/ci.go install -dlgo -arch %GETH_ARCH% -cc %GETH_CC% + after_build: + # Upload builds. Note that ci.go makes this a no-op PR builds. + - go run build/ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds + test_script: + - go run build/ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -coverage diff --git a/build/checksums.txt b/build/checksums.txt index 0e33e07f2c..5df27bbf61 100644 --- a/build/checksums.txt +++ b/build/checksums.txt @@ -1,33 +1,37 @@ # This file contains sha256 checksums of optional build dependencies. -b298d29de9236ca47a023e382313bcc2d2eed31dfa706b60a04103ce83a71a25 go1.16.3.src.tar.gz -6bb1cf421f8abc2a9a4e39140b7397cdae6aca3e8d36dcff39a1a77f4f1170ac go1.16.3.darwin-amd64.tar.gz -f4e96bbcd5d2d1942f5b55d9e4ab19564da4fad192012f6d7b0b9b055ba4208f go1.16.3.darwin-arm64.tar.gz -48b2d1481db756c88c18b1f064dbfc3e265ce4a775a23177ca17e25d13a24c5d go1.16.3.linux-386.tar.gz -951a3c7c6ce4e56ad883f97d9db74d3d6d80d5fec77455c6ada6c1f7ac4776d2 go1.16.3.linux-amd64.tar.gz -566b1d6f17d2bc4ad5f81486f0df44f3088c3ed47a3bec4099d8ed9939e90d5d go1.16.3.linux-arm64.tar.gz -0dae30385e3564a557dac7f12a63eedc73543e6da0f6017990e214ce8cc8797c go1.16.3.linux-armv6l.tar.gz -a3c16e1531bf9726f47911c4a9ed7cb665a6207a51c44f10ebad4db63b4bcc5a go1.16.3.windows-386.zip -a4400345135b36cb7942e52bbaf978b66814738b855eeff8de879a09fd99de7f go1.16.3.windows-amd64.zip -31ecd11d497684fa8b0f01ba784590c4c760943665fdc4fe0adaa1405c71736c go1.16.3.freebsd-386.tar.gz -ffbd920b309e62e807457b11d80e8c17fefe3ef6de423aaba4b1e270b2ca4c3d go1.16.3.freebsd-amd64.tar.gz -5eb046bbbbc7fe2591846a4303884cb5a01abb903e3e61e33459affe7874e811 go1.16.3.linux-ppc64le.tar.gz -3e8bd7bde533a73fd6fa75b5288678ef397e76c198cfb26b8ae086035383b1cf go1.16.3.linux-s390x.tar.gz +3defb9a09bed042403195e872dcbc8c6fae1485963332279668ec52e80a95a2d go1.17.5.src.tar.gz +2db6a5d25815b56072465a2cacc8ed426c18f1d5fc26c1fc8c4f5a7188658264 go1.17.5.darwin-amd64.tar.gz +111f71166de0cb8089bb3e8f9f5b02d76e1bf1309256824d4062a47b0e5f98e0 go1.17.5.darwin-arm64.tar.gz +443c1cd9768df02085014f1eb034ebc7dbe032ffc8a9bb9f2e6617d037eee23c go1.17.5.freebsd-386.tar.gz +17180bdc4126acffd0ebf86d66ef5cbc3488b6734e93374fb00eb09494e006d3 go1.17.5.freebsd-amd64.tar.gz +4f4914303bc18f24fd137a97e595735308f5ce81323c7224c12466fd763fc59f go1.17.5.linux-386.tar.gz +bd78114b0d441b029c8fe0341f4910370925a4d270a6a590668840675b0c653e go1.17.5.linux-amd64.tar.gz +6f95ce3da40d9ce1355e48f31f4eb6508382415ca4d7413b1e7a3314e6430e7e go1.17.5.linux-arm64.tar.gz +aa1fb6c53b4fe72f159333362a10aca37ae938bde8adc9c6eaf2a8e87d1e47de go1.17.5.linux-armv6l.tar.gz +3d4be616e568f0a02cb7f7769bcaafda4b0969ed0f9bb4277619930b96847e70 go1.17.5.linux-ppc64le.tar.gz +8087d4fe991e82804e6485c26568c2e0ee0bfde00ceb9015dc86cb6bf84ef40b go1.17.5.linux-s390x.tar.gz +6d7b9948ee14a906b14f5cbebdfab63cd6828b0b618160847ecd3cc3470a26fe go1.17.5.windows-386.zip +671faf99cd5d81cd7e40936c0a94363c64d654faa0148d2af4bbc262555620b9 go1.17.5.windows-amd64.zip +45e88676b68e9cf364be469b5a27965397f4e339aa622c2f52c10433c56e5030 go1.17.5.windows-arm64.zip -7e9a47ab540aa3e8472fbf8120d28bed3b9d9cf625b955818e8bc69628d7187c golangci-lint-1.39.0-darwin-amd64.tar.gz -574daa2c9c299b01672a6daeb1873b5f12e413cdb6dc0e30f2ff163956778064 golangci-lint-1.39.0-darwin-arm64.tar.gz -6225f7014987324ab78e9b511f294e3f25be013728283c33918c67c8576d543e golangci-lint-1.39.0-freebsd-386.tar.gz -6b3e76e1e5eaf0159411c8e2727f8d533989d3bb19f10e9caa6e0b9619ee267d golangci-lint-1.39.0-freebsd-amd64.tar.gz -a301cacfff87ed9b00313d95278533c25a4527a06b040a17d969b4b7e1b8a90d golangci-lint-1.39.0-freebsd-armv7.tar.gz -25bfd96a29c3112f508d5e4fc860dbad7afce657233c343acfa20715717d51e7 golangci-lint-1.39.0-freebsd-armv6.tar.gz -9687e4ff15545cfc722b0e46107a94195166a505023b48a316579af25ad09505 golangci-lint-1.39.0-linux-armv7.tar.gz -a7fa7ab2bfc99cbe5e5bcbf5684f5a997f920afbbe2f253d2feb1001d5e3c8b3 golangci-lint-1.39.0-linux-armv6.tar.gz -c8f9634115beddb4ed9129c1f7ecd4c97c99d07aeef33e3707234097eeb51b7b golangci-lint-1.39.0-linux-mips64le.tar.gz -d1234c213b74751f1af413302dde0e9a6d4d29aecef034af7abb07dc1b6e887f golangci-lint-1.39.0-linux-arm64.tar.gz -df25d9267168323b163147acb823ab0215a8a3bb6898a4a9320afdfedde66817 golangci-lint-1.39.0-linux-386.tar.gz -1767e75fba357b7651b1a796d38453558f371c60af805505ec99e166908c04b5 golangci-lint-1.39.0-linux-ppc64le.tar.gz -25fd75bf3186b3d930ecae10185689968fd18fd8fa6f9f555d6beb04348c20f6 golangci-lint-1.39.0-linux-s390x.tar.gz -3a73aa7468087caa62673c8adea99b4e4dff846dc72707222db85f8679b40cbf golangci-lint-1.39.0-linux-amd64.tar.gz -578caceccf81739bda67dbfec52816709d03608c6878888ecdc0e186a094a41b golangci-lint-1.39.0-linux-mips64.tar.gz -494b66ba0e32c8ddf6c4f6b1d05729b110900f6017eda943057e43598c17d7a8 golangci-lint-1.39.0-windows-386.zip -52ec2e13a3cbb47147244dff8cfc35103563deb76e0459133058086fc35fb2c7 golangci-lint-1.39.0-windows-amd64.zip +d4bd25b9814eeaa2134197dd2c7671bb791eae786d42010d9d788af20dee4bfa golangci-lint-1.42.0-darwin-amd64.tar.gz +e56859c04a2ad5390c6a497b1acb1cc9329ecb1010260c6faae9b5a4c35b35ea golangci-lint-1.42.0-darwin-arm64.tar.gz +14d912a3fa856830339472fc4dc341933adf15f37bdb7130bbbfcf960ecf4809 golangci-lint-1.42.0-freebsd-386.tar.gz +337257fccc9baeb5ee1cd7e70c153e9d9f59d3afde46d631659500048afbdf80 golangci-lint-1.42.0-freebsd-amd64.tar.gz +6debcc266b629359fdd8eef4f4abb05a621604079d27016265afb5b4593b0eff golangci-lint-1.42.0-freebsd-armv6.tar.gz +878f0e190169db2ce9dde8cefbd99adc4fe28b90b68686bbfcfcc2085e6d693e golangci-lint-1.42.0-freebsd-armv7.tar.gz +42c78e31faf62b225363eff1b1d2aa74f9dbcb75686c8914aa3e90d6af65cece golangci-lint-1.42.0-linux-386.tar.gz +6937f62f8e2329e94822dc11c10b871ace5557ae1fcc4ee2f9980cd6aecbc159 golangci-lint-1.42.0-linux-amd64.tar.gz +2cf8d23d96cd854a537b355dab2962b960b88a06b615232599f066afd233f246 golangci-lint-1.42.0-linux-arm64.tar.gz +08b003d1ed61367473886defc957af5301066e62338e5d96a319c34dadc4c1d1 golangci-lint-1.42.0-linux-armv6.tar.gz +c7c00ec4845e806a1f32685f5b150219e180bd6d6a9d584be8d27f0c41d7a1bf golangci-lint-1.42.0-linux-armv7.tar.gz +3650fcf29eb3d8ee326d77791a896b15259eb2d5bf77437dc72e7efe5af6bd40 golangci-lint-1.42.0-linux-mips64.tar.gz +f51ae003fdbca4fef78ba73e2eb736a939c8eaa178cd452234213b489da5a420 golangci-lint-1.42.0-linux-mips64le.tar.gz +1b0bb7b8b22cc4ea7da44fd5ad5faaf6111d0677e01cc6f961b62a96537de2c6 golangci-lint-1.42.0-linux-ppc64le.tar.gz +8cb56927eb75e572450efbe0ff0f9cf3f56dc9faa81d9e8d30d6559fc1d06e6d golangci-lint-1.42.0-linux-riscv64.tar.gz +5ac41cd31825a176b21505a371a7b307cd9cdf17df0f35bbb3bf1466f9356ccc golangci-lint-1.42.0-linux-s390x.tar.gz +e1cebd2af621ac4b64c20937df92c3819264f2174c92f51e196db1e64ae097e0 golangci-lint-1.42.0-windows-386.zip +7e70fcde8e87a17cae0455df07d257ebc86669f3968d568e12727fa24bbe9883 golangci-lint-1.42.0-windows-amd64.zip +59da7ce1bda432616bfc28ae663e52c3675adee8d9bf5959fafd657c159576ab golangci-lint-1.42.0-windows-armv6.zip +65f62dda937bfcede0326ac77abe947ce1548931e6e13298ca036cb31f224db5 golangci-lint-1.42.0-windows-armv7.zip diff --git a/build/ci.go b/build/ci.go index df1cb50ab2..9ab0680591 100644 --- a/build/ci.go +++ b/build/ci.go @@ -14,6 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . +//go:build none // +build none /* @@ -32,7 +33,6 @@ Available commands are: nsis -- creates a Windows NSIS installer aar [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an Android archive xcode [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an iOS XCode framework - xgo [ -alltools ] [ options ] -- cross builds according to options purge [ -store blobstore ] [ -days threshold ] -- purges old archives from the blobstore For all commands, -n prevents execution of external programs (dry run mode). @@ -54,6 +54,7 @@ import ( "path/filepath" "regexp" "runtime" + "strconv" "strings" "time" @@ -128,19 +129,13 @@ var ( // Distros for which packages are created. // Note: vivid is unsupported because there is no golang-1.6 package for it. - // Note: wily is unsupported because it was officially deprecated on Launchpad. - // Note: yakkety is unsupported because it was officially deprecated on Launchpad. - // Note: zesty is unsupported because it was officially deprecated on Launchpad. - // Note: artful is unsupported because it was officially deprecated on Launchpad. - // Note: cosmic is unsupported because it was officially deprecated on Launchpad. - // Note: disco is unsupported because it was officially deprecated on Launchpad. - // Note: eoan is unsupported because it was officially deprecated on Launchpad. + // Note: the following Ubuntu releases have been officially deprecated on Launchpad: + // wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy debDistroGoBoots = map[string]string{ "trusty": "golang-1.11", "xenial": "golang-go", "bionic": "golang-go", "focal": "golang-go", - "groovy": "golang-go", "hirsute": "golang-go", } @@ -152,7 +147,7 @@ var ( // This is the version of go that will be downloaded by // // go run ci.go install -dlgo - dlgoVersion = "1.16.3" + dlgoVersion = "1.17.5" ) var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin")) @@ -182,6 +177,8 @@ func main() { doLint(os.Args[2:]) case "archive": doArchive(os.Args[2:]) + case "docker": + doDocker(os.Args[2:]) case "debsrc": doDebianSource(os.Args[2:]) case "nsis": @@ -190,8 +187,6 @@ func main() { doAndroidArchive(os.Args[2:]) case "xcode": doXCodeFramework(os.Args[2:]) - case "xgo": - doXgo(os.Args[2:]) case "purge": doPurge(os.Args[2:]) default: @@ -262,6 +257,11 @@ func buildFlags(env build.Environment) (flags []string) { if runtime.GOOS == "darwin" { ld = append(ld, "-s") } + // Enforce the stacksize to 8M, which is the case on most platforms apart from + // alpine Linux. + if runtime.GOOS == "linux" { + ld = append(ld, "-extldflags", "-Wl,-z,stack-size=0x800000") + } if len(ld) > 0 { flags = append(flags, "-ldflags", strings.Join(ld, " ")) } @@ -280,6 +280,7 @@ func doTest(cmdline []string) { coverage = flag.Bool("coverage", false, "Whether to record code coverage") verbose = flag.Bool("v", false, "Whether to log verbosely") timeout = flag.String("timeout", "10m", `Timeout of runing tests`) + race = flag.Bool("race", false, "Execute the race detector") ) flag.CommandLine.Parse(cmdline) @@ -303,6 +304,9 @@ func doTest(cmdline []string) { if *timeout != "" { gotest.Args = append(gotest.Args, []string{"-timeout", *timeout}...) } + if *race { + gotest.Args = append(gotest.Args, "-race") + } packages := []string{"./accounts/...", "./common/...", "./consensus/...", "./console/...", "./core/...", "./crypto/...", "./eth/...", "./ethclient/...", "./ethdb/...", "./event/...", "./graphql/...", "./les/...", @@ -334,10 +338,14 @@ func doLint(cmdline []string) { // downloadLinter downloads and unpacks golangci-lint. func downloadLinter(cachedir string) string { - const version = "1.39.0" + const version = "1.42.0" csdb := build.MustLoadChecksums("build/checksums.txt") - base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, runtime.GOARCH) + arch := runtime.GOARCH + if arch == "arm" { + arch += "v" + os.Getenv("GOARM") + } + base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, arch) url := fmt.Sprintf("https://github.com/golangci/golangci-lint/releases/download/v%s/%s.tar.gz", version, base) archivePath := filepath.Join(cachedir, base+".tar.gz") if err := csdb.DownloadFile(url, archivePath); err != nil { @@ -454,11 +462,177 @@ func maybeSkipArchive(env build.Environment) { os.Exit(0) } if env.Branch != "master" && !strings.HasPrefix(env.Tag, "v1.") { - log.Printf("skipping archive creation because branch %q, tag %q is not on the whitelist", env.Branch, env.Tag) + log.Printf("skipping archive creation because branch %q, tag %q is not on the inclusion list", env.Branch, env.Tag) os.Exit(0) } } +// Builds the docker images and optionally uploads them to Docker Hub. +func doDocker(cmdline []string) { + var ( + image = flag.Bool("image", false, `Whether to build and push an arch specific docker image`) + manifest = flag.String("manifest", "", `Push a multi-arch docker image for the specified architectures (usually "amd64,arm64")`) + upload = flag.String("upload", "", `Where to upload the docker image (usually "ethereum/client-go")`) + ) + flag.CommandLine.Parse(cmdline) + + // Skip building and pushing docker images for PR builds + env := build.Env() + maybeSkipArchive(env) + + // Retrieve the upload credentials and authenticate + user := getenvBase64("DOCKER_HUB_USERNAME") + pass := getenvBase64("DOCKER_HUB_PASSWORD") + + if len(user) > 0 && len(pass) > 0 { + auther := exec.Command("docker", "login", "-u", string(user), "--password-stdin") + auther.Stdin = bytes.NewReader(pass) + build.MustRun(auther) + } + // Retrieve the version infos to build and push to the following paths: + // - ethereum/client-go:latest - Pushes to the master branch, Geth only + // - ethereum/client-go:stable - Version tag publish on GitHub, Geth only + // - ethereum/client-go:alltools-latest - Pushes to the master branch, Geth & tools + // - ethereum/client-go:alltools-stable - Version tag publish on GitHub, Geth & tools + // - ethereum/client-go:release-. - Version tag publish on GitHub, Geth only + // - ethereum/client-go:alltools-release-. - Version tag publish on GitHub, Geth & tools + // - ethereum/client-go:v.. - Version tag publish on GitHub, Geth only + // - ethereum/client-go:alltools-v.. - Version tag publish on GitHub, Geth & tools + var tags []string + + switch { + case env.Branch == "master": + tags = []string{"latest"} + case strings.HasPrefix(env.Tag, "v1."): + tags = []string{"stable", fmt.Sprintf("release-1.%d", params.VersionMinor), "v" + params.Version} + } + // If architecture specific image builds are requested, build and push them + if *image { + build.MustRunCommand("docker", "build", "--build-arg", "COMMIT="+env.Commit, "--build-arg", "VERSION="+params.VersionWithMeta, "--build-arg", "BUILDNUM="+env.Buildnum, "--tag", fmt.Sprintf("%s:TAG", *upload), ".") + build.MustRunCommand("docker", "build", "--build-arg", "COMMIT="+env.Commit, "--build-arg", "VERSION="+params.VersionWithMeta, "--build-arg", "BUILDNUM="+env.Buildnum, "--tag", fmt.Sprintf("%s:alltools-TAG", *upload), "-f", "Dockerfile.alltools", ".") + + // Tag and upload the images to Docker Hub + for _, tag := range tags { + gethImage := fmt.Sprintf("%s:%s-%s", *upload, tag, runtime.GOARCH) + toolImage := fmt.Sprintf("%s:alltools-%s-%s", *upload, tag, runtime.GOARCH) + + // If the image already exists (non version tag), check the build + // number to prevent overwriting a newer commit if concurrent builds + // are running. This is still a tiny bit racey if two published are + // done at the same time, but that's extremely unlikely even on the + // master branch. + for _, img := range []string{gethImage, toolImage} { + if exec.Command("docker", "pull", img).Run() != nil { + continue // Generally the only failure is a missing image, which is good + } + buildnum, err := exec.Command("docker", "inspect", "--format", "{{index .Config.Labels \"buildnum\"}}", img).CombinedOutput() + if err != nil { + log.Fatalf("Failed to inspect container: %v\nOutput: %s", err, string(buildnum)) + } + buildnum = bytes.TrimSpace(buildnum) + + if len(buildnum) > 0 && len(env.Buildnum) > 0 { + oldnum, err := strconv.Atoi(string(buildnum)) + if err != nil { + log.Fatalf("Failed to parse old image build number: %v", err) + } + newnum, err := strconv.Atoi(env.Buildnum) + if err != nil { + log.Fatalf("Failed to parse current build number: %v", err) + } + if oldnum > newnum { + log.Fatalf("Current build number %d not newer than existing %d", newnum, oldnum) + } else { + log.Printf("Updating %s from build %d to %d", img, oldnum, newnum) + } + } + } + build.MustRunCommand("docker", "image", "tag", fmt.Sprintf("%s:TAG", *upload), gethImage) + build.MustRunCommand("docker", "image", "tag", fmt.Sprintf("%s:alltools-TAG", *upload), toolImage) + build.MustRunCommand("docker", "push", gethImage) + build.MustRunCommand("docker", "push", toolImage) + } + } + // If multi-arch image manifest push is requested, assemble it + if len(*manifest) != 0 { + // Since different architectures are pushed by different builders, wait + // until all required images are updated. + var mismatch bool + for i := 0; i < 2; i++ { // 2 attempts, second is race check + mismatch = false // hope there's no mismatch now + + for _, tag := range tags { + for _, arch := range strings.Split(*manifest, ",") { + gethImage := fmt.Sprintf("%s:%s-%s", *upload, tag, arch) + toolImage := fmt.Sprintf("%s:alltools-%s-%s", *upload, tag, arch) + + for _, img := range []string{gethImage, toolImage} { + if out, err := exec.Command("docker", "pull", img).CombinedOutput(); err != nil { + log.Printf("Required image %s unavailable: %v\nOutput: %s", img, err, out) + mismatch = true + break + } + buildnum, err := exec.Command("docker", "inspect", "--format", "{{index .Config.Labels \"buildnum\"}}", img).CombinedOutput() + if err != nil { + log.Fatalf("Failed to inspect container: %v\nOutput: %s", err, string(buildnum)) + } + buildnum = bytes.TrimSpace(buildnum) + + if string(buildnum) != env.Buildnum { + log.Printf("Build number mismatch on %s: want %s, have %s", img, env.Buildnum, buildnum) + mismatch = true + break + } + } + if mismatch { + break + } + } + if mismatch { + break + } + } + if mismatch { + // Build numbers mismatching, retry in a short time to + // avoid concurrent failes in both publisher images. If + // however the retry failed too, it means the concurrent + // builder is still crunching, let that do the publish. + if i == 0 { + time.Sleep(30 * time.Second) + } + continue + } + break + } + if mismatch { + log.Println("Relinquishing publish to other builder") + return + } + // Assemble and push the Geth manifest image + for _, tag := range tags { + gethImage := fmt.Sprintf("%s:%s", *upload, tag) + + var gethSubImages []string + for _, arch := range strings.Split(*manifest, ",") { + gethSubImages = append(gethSubImages, gethImage+"-"+arch) + } + build.MustRunCommand("docker", append([]string{"manifest", "create", gethImage}, gethSubImages...)...) + build.MustRunCommand("docker", "manifest", "push", gethImage) + } + // Assemble and push the alltools manifest image + for _, tag := range tags { + toolImage := fmt.Sprintf("%s:alltools-%s", *upload, tag) + + var toolSubImages []string + for _, arch := range strings.Split(*manifest, ",") { + toolSubImages = append(toolSubImages, toolImage+"-"+arch) + } + build.MustRunCommand("docker", append([]string{"manifest", "create", toolImage}, toolSubImages...)...) + build.MustRunCommand("docker", "manifest", "push", toolImage) + } + } +} + // Debian Packaging func doDebianSource(cmdline []string) { var ( @@ -1043,48 +1217,6 @@ func newPodMetadata(env build.Environment, archive string) podMetadata { } } -// Cross compilation - -func doXgo(cmdline []string) { - var ( - alltools = flag.Bool("alltools", false, `Flag whether we're building all known tools, or only on in particular`) - ) - flag.CommandLine.Parse(cmdline) - env := build.Env() - var tc build.GoToolchain - - // Make sure xgo is available for cross compilation - build.MustRun(tc.Install(GOBIN, "github.com/karalabe/xgo@latest")) - - // If all tools building is requested, build everything the builder wants - args := append(buildFlags(env), flag.Args()...) - - if *alltools { - args = append(args, []string{"--dest", GOBIN}...) - for _, res := range allToolsArchiveFiles { - if strings.HasPrefix(res, GOBIN) { - // Binary tool found, cross build it explicitly - args = append(args, "./"+filepath.Join("cmd", filepath.Base(res))) - build.MustRun(xgoTool(args)) - args = args[:len(args)-1] - } - } - return - } - - // Otherwise execute the explicit cross compilation - path := args[len(args)-1] - args = append(args[:len(args)-1], []string{"--dest", GOBIN, path}...) - build.MustRun(xgoTool(args)) -} - -func xgoTool(args []string) *exec.Cmd { - cmd := exec.Command(filepath.Join(GOBIN, "xgo"), args...) - cmd.Env = os.Environ() - cmd.Env = append(cmd.Env, []string{"GOBIN=" + GOBIN}...) - return cmd -} - // Binary distribution cleanups func doPurge(cmdline []string) { diff --git a/cmd/abidump/main.go b/cmd/abidump/main.go index 35cbcbb0ed..4f942749df 100644 --- a/cmd/abidump/main.go +++ b/cmd/abidump/main.go @@ -23,7 +23,7 @@ import ( "os" "strings" - "github.com/ethereum/go-ethereum/signer/core" + "github.com/ethereum/go-ethereum/signer/core/apitypes" "github.com/ethereum/go-ethereum/signer/fourbyte" ) @@ -41,7 +41,7 @@ func parse(data []byte) { if err != nil { die(err) } - messages := core.ValidationMessages{} + messages := apitypes.ValidationMessages{} db.ValidateCallData(nil, data, &messages) for _, m := range messages.Messages { fmt.Printf("%v: %v\n", m.Typ, m.Message) diff --git a/cmd/clef/main.go b/cmd/clef/main.go index 8befce88dc..3aaf898db2 100644 --- a/cmd/clef/main.go +++ b/cmd/clef/main.go @@ -50,10 +50,10 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/signer/core" + "github.com/ethereum/go-ethereum/signer/core/apitypes" "github.com/ethereum/go-ethereum/signer/fourbyte" "github.com/ethereum/go-ethereum/signer/rules" "github.com/ethereum/go-ethereum/signer/storage" - "github.com/mattn/go-colorable" "github.com/mattn/go-isatty" "gopkg.in/urfave/cli.v1" @@ -657,7 +657,7 @@ func signer(c *cli.Context) error { cors := utils.SplitAndTrim(c.GlobalString(utils.HTTPCORSDomainFlag.Name)) srv := rpc.NewServer() - err := node.RegisterApisFromWhitelist(rpcAPI, []string{"account"}, srv, false) + err := node.RegisterApis(rpcAPI, []string{"account"}, srv, false) if err != nil { utils.Fatalf("Could not register API: %w", err) } @@ -898,7 +898,7 @@ func testExternalUI(api *core.SignerAPI) { addr, _ := common.NewMixedcaseAddressFromString("0x0011223344556677889900112233445566778899") data := `{"types":{"EIP712Domain":[{"name":"name","type":"string"},{"name":"version","type":"string"},{"name":"chainId","type":"uint256"},{"name":"verifyingContract","type":"address"}],"Person":[{"name":"name","type":"string"},{"name":"test","type":"uint8"},{"name":"wallet","type":"address"}],"Mail":[{"name":"from","type":"Person"},{"name":"to","type":"Person"},{"name":"contents","type":"string"}]},"primaryType":"Mail","domain":{"name":"Ether Mail","version":"1","chainId":"1","verifyingContract":"0xCCCcccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC"},"message":{"from":{"name":"Cow","test":"3","wallet":"0xcD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826"},"to":{"name":"Bob","wallet":"0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB","test":"2"},"contents":"Hello, Bob!"}}` //_, err := api.SignData(ctx, accounts.MimetypeTypedData, *addr, hexutil.Encode([]byte(data))) - var typedData core.TypedData + var typedData apitypes.TypedData json.Unmarshal([]byte(data), &typedData) _, err := api.SignTypedData(ctx, *addr, typedData) expectApprove("sign 712 typed data", err) @@ -923,13 +923,13 @@ func testExternalUI(api *core.SignerAPI) { time.Sleep(delay) data := hexutil.Bytes([]byte{}) to := common.NewMixedcaseAddress(a) - tx := core.SendTxArgs{ + tx := apitypes.SendTxArgs{ Data: &data, Nonce: 0x1, Value: hexutil.Big(*big.NewInt(6)), From: common.NewMixedcaseAddress(a), To: &to, - GasPrice: hexutil.Big(*big.NewInt(5)), + GasPrice: (*hexutil.Big)(big.NewInt(5)), Gas: 1000, Input: nil, } @@ -1025,7 +1025,7 @@ func GenDoc(ctx *cli.Context) { "of the work in canonicalizing and making sense of the data, and it's up to the UI to present" + "the user with the contents of the `message`" sighash, msg := accounts.TextAndHash([]byte("hello world")) - messages := []*core.NameValueType{{Name: "message", Value: msg, Typ: accounts.MimetypeTextPlain}} + messages := []*apitypes.NameValueType{{Name: "message", Value: msg, Typ: accounts.MimetypeTextPlain}} add("SignDataRequest", desc, &core.SignDataRequest{ Address: common.NewMixedcaseAddress(a), @@ -1055,17 +1055,17 @@ func GenDoc(ctx *cli.Context) { data := hexutil.Bytes([]byte{0x01, 0x02, 0x03, 0x04}) add("SignTxRequest", desc, &core.SignTxRequest{ Meta: meta, - Callinfo: []core.ValidationInfo{ + Callinfo: []apitypes.ValidationInfo{ {Typ: "Warning", Message: "Something looks odd, show this message as a warning"}, {Typ: "Info", Message: "User should see this as well"}, }, - Transaction: core.SendTxArgs{ + Transaction: apitypes.SendTxArgs{ Data: &data, Nonce: 0x1, Value: hexutil.Big(*big.NewInt(6)), From: common.NewMixedcaseAddress(a), To: nil, - GasPrice: hexutil.Big(*big.NewInt(5)), + GasPrice: (*hexutil.Big)(big.NewInt(5)), Gas: 1000, Input: nil, }}) @@ -1075,13 +1075,13 @@ func GenDoc(ctx *cli.Context) { add("SignTxResponse - approve", "Response to request to sign a transaction. This response needs to contain the `transaction`"+ ", because the UI is free to make modifications to the transaction.", &core.SignTxResponse{Approved: true, - Transaction: core.SendTxArgs{ + Transaction: apitypes.SendTxArgs{ Data: &data, Nonce: 0x4, Value: hexutil.Big(*big.NewInt(6)), From: common.NewMixedcaseAddress(a), To: nil, - GasPrice: hexutil.Big(*big.NewInt(5)), + GasPrice: (*hexutil.Big)(big.NewInt(5)), Gas: 1000, Input: nil, }}) diff --git a/cmd/clef/testdata/sign_1559_missing_field_exp_fail.json b/cmd/clef/testdata/sign_1559_missing_field_exp_fail.json new file mode 100644 index 0000000000..c5a1336860 --- /dev/null +++ b/cmd/clef/testdata/sign_1559_missing_field_exp_fail.json @@ -0,0 +1,16 @@ +{ + "jsonrpc": "2.0", + "method": "account_signTransaction", + "params": [ + { + "from": "0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "to": "0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "gas": "0x333", + "maxFeePerGas": "0x123", + "nonce": "0x0", + "value": "0x10", + "data": "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012" + } + ], + "id": 67 +} diff --git a/cmd/clef/testdata/sign_1559_missing_maxfeepergas_exp_fail.json b/cmd/clef/testdata/sign_1559_missing_maxfeepergas_exp_fail.json new file mode 100644 index 0000000000..df69231d7e --- /dev/null +++ b/cmd/clef/testdata/sign_1559_missing_maxfeepergas_exp_fail.json @@ -0,0 +1,16 @@ +{ + "jsonrpc": "2.0", + "method": "account_signTransaction", + "params": [ + { + "from": "0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "to": "0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "gas": "0x333", + "maxPriorityFeePerGas": "0x123", + "nonce": "0x0", + "value": "0x10", + "data": "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012" + } + ], + "id": 67 +} diff --git a/cmd/clef/testdata/sign_1559_tx.json b/cmd/clef/testdata/sign_1559_tx.json new file mode 100644 index 0000000000..29355f6cf5 --- /dev/null +++ b/cmd/clef/testdata/sign_1559_tx.json @@ -0,0 +1,17 @@ +{ + "jsonrpc": "2.0", + "method": "account_signTransaction", + "params": [ + { + "from": "0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "to": "0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "gas": "0x333", + "maxPriorityFeePerGas": "0x123", + "maxFeePerGas": "0x123", + "nonce": "0x0", + "value": "0x10", + "data": "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012" + } + ], + "id": 67 +} diff --git a/cmd/clef/testdata/sign_bad_checksum_exp_fail.json b/cmd/clef/testdata/sign_bad_checksum_exp_fail.json new file mode 100644 index 0000000000..21ba7b3fc0 --- /dev/null +++ b/cmd/clef/testdata/sign_bad_checksum_exp_fail.json @@ -0,0 +1,17 @@ +{ + "jsonrpc": "2.0", + "method": "account_signTransaction", + "params": [ + { + "from":"0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192", + "to":"0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192", + "gas": "0x333", + "gasPrice": "0x123", + "nonce": "0x0", + "value": "0x10", + "data": + "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012" + } + ], + "id": 67 +} diff --git a/cmd/clef/testdata/sign_normal_exp_ok.json b/cmd/clef/testdata/sign_normal_exp_ok.json new file mode 100644 index 0000000000..7f3a9202a0 --- /dev/null +++ b/cmd/clef/testdata/sign_normal_exp_ok.json @@ -0,0 +1,17 @@ +{ + "jsonrpc": "2.0", + "method": "account_signTransaction", + "params": [ + { + "from":"0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "to":"0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "gas": "0x333", + "gasPrice": "0x123", + "nonce": "0x0", + "value": "0x10", + "data": + "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012" + } + ], + "id": 67 +} diff --git a/cmd/devp2p/dns_cloudflare.go b/cmd/devp2p/dns_cloudflare.go index 596254df91..d67aaea1a7 100644 --- a/cmd/devp2p/dns_cloudflare.go +++ b/cmd/devp2p/dns_cloudflare.go @@ -133,7 +133,8 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string) log.Info(fmt.Sprintf("Creating %s = %q", path, val)) ttl := rootTTL if path != name { - ttl = treeNodeTTL // Max TTL permitted by Cloudflare + ttl = treeNodeTTLCloudflare // Max TTL permitted by Cloudflare + } record := cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl} _, err = c.CreateDNSRecord(context.Background(), c.zoneID, record) diff --git a/cmd/devp2p/dnscmd.go b/cmd/devp2p/dnscmd.go index 66deef56ea..85f28b8cb1 100644 --- a/cmd/devp2p/dnscmd.go +++ b/cmd/devp2p/dnscmd.go @@ -115,8 +115,9 @@ var ( ) const ( - rootTTL = 30 * 60 // 30 min - treeNodeTTL = 4 * 7 * 24 * 60 * 60 // 4 weeks + rootTTL = 30 * 60 // 30 min + treeNodeTTL = 4 * 7 * 24 * 60 * 60 // 4 weeks + treeNodeTTLCloudflare = 24 * 60 * 60 // 1 day ) // dnsSync performs dnsSyncCommand. diff --git a/cmd/devp2p/internal/ethtest/chain.go b/cmd/devp2p/internal/ethtest/chain.go index 83c55181ad..d0d55a455d 100644 --- a/cmd/devp2p/internal/ethtest/chain.go +++ b/cmd/devp2p/internal/ethtest/chain.go @@ -26,6 +26,7 @@ import ( "os" "strings" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/core/types" @@ -39,30 +40,41 @@ type Chain struct { chainConfig *params.ChainConfig } -func (c *Chain) WriteTo(writer io.Writer) error { - for _, block := range c.blocks { - if err := rlp.Encode(writer, block); err != nil { - return err - } - } - - return nil -} - // Len returns the length of the chain. func (c *Chain) Len() int { return len(c.blocks) } -// TD calculates the total difficulty of the chain. -func (c *Chain) TD(height int) *big.Int { // TODO later on channge scheme so that the height is included in range +// TD calculates the total difficulty of the chain at the +// chain head. +func (c *Chain) TD() *big.Int { + sum := big.NewInt(0) + for _, block := range c.blocks[:c.Len()] { + sum.Add(sum, block.Difficulty()) + } + return sum +} + +// TotalDifficultyAt calculates the total difficulty of the chain +// at the given block height. +func (c *Chain) TotalDifficultyAt(height int) *big.Int { sum := big.NewInt(0) - for _, block := range c.blocks[:height] { + if height >= c.Len() { + return sum + } + for _, block := range c.blocks[:height+1] { sum.Add(sum, block.Difficulty()) } return sum } +func (c *Chain) RootAt(height int) common.Hash { + if height < c.Len() { + return c.blocks[height].Root() + } + return common.Hash{} +} + // ForkID gets the fork id of the chain. func (c *Chain) ForkID() forkid.ID { return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len())) diff --git a/cmd/devp2p/internal/ethtest/eth66_suite.go b/cmd/devp2p/internal/ethtest/eth66_suite.go deleted file mode 100644 index 903a90c7eb..0000000000 --- a/cmd/devp2p/internal/ethtest/eth66_suite.go +++ /dev/null @@ -1,521 +0,0 @@ -// Copyright 2021 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package ethtest - -import ( - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth/protocols/eth" - "github.com/ethereum/go-ethereum/internal/utesting" - "github.com/ethereum/go-ethereum/p2p" -) - -// Is_66 checks if the node supports the eth66 protocol version, -// and if not, exists the test suite -func (s *Suite) Is_66(t *utesting.T) { - conn := s.dial66(t) - conn.handshake(t) - if conn.negotiatedProtoVersion < 66 { - t.Fail() - } -} - -// TestStatus_66 attempts to connect to the given node and exchange -// a status message with it on the eth66 protocol, and then check to -// make sure the chain head is correct. -func (s *Suite) TestStatus_66(t *utesting.T) { - conn := s.dial66(t) - defer conn.Close() - // get protoHandshake - conn.handshake(t) - // get status - switch msg := conn.statusExchange66(t, s.chain).(type) { - case *Status: - status := *msg - if status.ProtocolVersion != uint32(66) { - t.Fatalf("mismatch in version: wanted 66, got %d", status.ProtocolVersion) - } - t.Logf("got status message: %s", pretty.Sdump(msg)) - default: - t.Fatalf("unexpected: %s", pretty.Sdump(msg)) - } -} - -// TestGetBlockHeaders_66 tests whether the given node can respond to -// an eth66 `GetBlockHeaders` request and that the response is accurate. -func (s *Suite) TestGetBlockHeaders_66(t *utesting.T) { - conn := s.setupConnection66(t) - defer conn.Close() - // get block headers - req := ð.GetBlockHeadersPacket66{ - RequestId: 3, - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ - Origin: eth.HashOrNumber{ - Hash: s.chain.blocks[1].Hash(), - }, - Amount: 2, - Skip: 1, - Reverse: false, - }, - } - // write message - headers, err := s.getBlockHeaders66(conn, req, req.RequestId) - if err != nil { - t.Fatalf("could not get block headers: %v", err) - } - // check for correct headers - if !headersMatch(t, s.chain, headers) { - t.Fatal("received wrong header(s)") - } -} - -// TestSimultaneousRequests_66 sends two simultaneous `GetBlockHeader` requests -// with different request IDs and checks to make sure the node responds with the correct -// headers per request. -func (s *Suite) TestSimultaneousRequests_66(t *utesting.T) { - // create two connections - conn := s.setupConnection66(t) - defer conn.Close() - // create two requests - req1 := ð.GetBlockHeadersPacket66{ - RequestId: 111, - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ - Origin: eth.HashOrNumber{ - Hash: s.chain.blocks[1].Hash(), - }, - Amount: 2, - Skip: 1, - Reverse: false, - }, - } - req2 := ð.GetBlockHeadersPacket66{ - RequestId: 222, - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ - Origin: eth.HashOrNumber{ - Hash: s.chain.blocks[1].Hash(), - }, - Amount: 4, - Skip: 1, - Reverse: false, - }, - } - // write first request - if err := conn.write66(req1, GetBlockHeaders{}.Code()); err != nil { - t.Fatalf("failed to write to connection: %v", err) - } - // write second request - if err := conn.write66(req2, GetBlockHeaders{}.Code()); err != nil { - t.Fatalf("failed to write to connection: %v", err) - } - // wait for responses - headers1, err := s.waitForBlockHeadersResponse66(conn, req1.RequestId) - if err != nil { - t.Fatalf("error while waiting for block headers: %v", err) - } - headers2, err := s.waitForBlockHeadersResponse66(conn, req2.RequestId) - if err != nil { - t.Fatalf("error while waiting for block headers: %v", err) - } - // check headers of both responses - if !headersMatch(t, s.chain, headers1) { - t.Fatalf("wrong header(s) in response to req1: got %v", headers1) - } - if !headersMatch(t, s.chain, headers2) { - t.Fatalf("wrong header(s) in response to req2: got %v", headers2) - } -} - -// TestBroadcast_66 tests whether a block announcement is correctly -// propagated to the given node's peer(s) on the eth66 protocol. -func (s *Suite) TestBroadcast_66(t *utesting.T) { - s.sendNextBlock66(t) -} - -// TestGetBlockBodies_66 tests whether the given node can respond to -// a `GetBlockBodies` request and that the response is accurate over -// the eth66 protocol. -func (s *Suite) TestGetBlockBodies_66(t *utesting.T) { - conn := s.setupConnection66(t) - defer conn.Close() - // create block bodies request - id := uint64(55) - req := ð.GetBlockBodiesPacket66{ - RequestId: id, - GetBlockBodiesPacket: eth.GetBlockBodiesPacket{ - s.chain.blocks[54].Hash(), - s.chain.blocks[75].Hash(), - }, - } - if err := conn.write66(req, GetBlockBodies{}.Code()); err != nil { - t.Fatalf("could not write to connection: %v", err) - } - - reqID, msg := conn.readAndServe66(s.chain, timeout) - switch msg := msg.(type) { - case BlockBodies: - if reqID != req.RequestId { - t.Fatalf("request ID mismatch: wanted %d, got %d", req.RequestId, reqID) - } - t.Logf("received %d block bodies", len(msg)) - default: - t.Fatalf("unexpected: %s", pretty.Sdump(msg)) - } -} - -// TestLargeAnnounce_66 tests the announcement mechanism with a large block. -func (s *Suite) TestLargeAnnounce_66(t *utesting.T) { - nextBlock := len(s.chain.blocks) - blocks := []*NewBlock{ - { - Block: largeBlock(), - TD: s.fullChain.TD(nextBlock + 1), - }, - { - Block: s.fullChain.blocks[nextBlock], - TD: largeNumber(2), - }, - { - Block: largeBlock(), - TD: largeNumber(2), - }, - { - Block: s.fullChain.blocks[nextBlock], - TD: s.fullChain.TD(nextBlock + 1), - }, - } - - for i, blockAnnouncement := range blocks[0:3] { - t.Logf("Testing malicious announcement: %v\n", i) - sendConn := s.setupConnection66(t) - if err := sendConn.Write(blockAnnouncement); err != nil { - t.Fatalf("could not write to connection: %v", err) - } - // Invalid announcement, check that peer disconnected - switch msg := sendConn.ReadAndServe(s.chain, time.Second*8).(type) { - case *Disconnect: - case *Error: - break - default: - t.Fatalf("unexpected: %s wanted disconnect", pretty.Sdump(msg)) - } - sendConn.Close() - } - // Test the last block as a valid block - s.sendNextBlock66(t) -} - -func (s *Suite) TestOldAnnounce_66(t *utesting.T) { - sendConn, recvConn := s.setupConnection66(t), s.setupConnection66(t) - defer sendConn.Close() - defer recvConn.Close() - - s.oldAnnounce(t, sendConn, recvConn) -} - -// TestMaliciousHandshake_66 tries to send malicious data during the handshake. -func (s *Suite) TestMaliciousHandshake_66(t *utesting.T) { - conn := s.dial66(t) - defer conn.Close() - // write hello to client - pub0 := crypto.FromECDSAPub(&conn.ourKey.PublicKey)[1:] - handshakes := []*Hello{ - { - Version: 5, - Caps: []p2p.Cap{ - {Name: largeString(2), Version: 66}, - }, - ID: pub0, - }, - { - Version: 5, - Caps: []p2p.Cap{ - {Name: "eth", Version: 64}, - {Name: "eth", Version: 65}, - {Name: "eth", Version: 66}, - }, - ID: append(pub0, byte(0)), - }, - { - Version: 5, - Caps: []p2p.Cap{ - {Name: "eth", Version: 64}, - {Name: "eth", Version: 65}, - {Name: "eth", Version: 66}, - }, - ID: append(pub0, pub0...), - }, - { - Version: 5, - Caps: []p2p.Cap{ - {Name: "eth", Version: 64}, - {Name: "eth", Version: 65}, - {Name: "eth", Version: 66}, - }, - ID: largeBuffer(2), - }, - { - Version: 5, - Caps: []p2p.Cap{ - {Name: largeString(2), Version: 66}, - }, - ID: largeBuffer(2), - }, - } - for i, handshake := range handshakes { - t.Logf("Testing malicious handshake %v\n", i) - // Init the handshake - if err := conn.Write(handshake); err != nil { - t.Fatalf("could not write to connection: %v", err) - } - // check that the peer disconnected - timeout := 20 * time.Second - // Discard one hello - for i := 0; i < 2; i++ { - switch msg := conn.ReadAndServe(s.chain, timeout).(type) { - case *Disconnect: - case *Error: - case *Hello: - // Hello's are sent concurrently, so ignore them - continue - default: - t.Fatalf("unexpected: %s", pretty.Sdump(msg)) - } - } - // Dial for the next round - conn = s.dial66(t) - } -} - -// TestMaliciousStatus_66 sends a status package with a large total difficulty. -func (s *Suite) TestMaliciousStatus_66(t *utesting.T) { - conn := s.dial66(t) - defer conn.Close() - // get protoHandshake - conn.handshake(t) - status := &Status{ - ProtocolVersion: uint32(66), - NetworkID: s.chain.chainConfig.ChainID.Uint64(), - TD: largeNumber(2), - Head: s.chain.blocks[s.chain.Len()-1].Hash(), - Genesis: s.chain.blocks[0].Hash(), - ForkID: s.chain.ForkID(), - } - // get status - switch msg := conn.statusExchange(t, s.chain, status).(type) { - case *Status: - t.Logf("%+v\n", msg) - default: - t.Fatalf("expected status, got: %#v ", msg) - } - // wait for disconnect - switch msg := conn.ReadAndServe(s.chain, timeout).(type) { - case *Disconnect: - case *Error: - return - default: - t.Fatalf("expected disconnect, got: %s", pretty.Sdump(msg)) - } -} - -func (s *Suite) TestTransaction_66(t *utesting.T) { - tests := []*types.Transaction{ - getNextTxFromChain(t, s), - unknownTx(t, s), - } - for i, tx := range tests { - t.Logf("Testing tx propagation: %v\n", i) - sendSuccessfulTx66(t, s, tx) - } -} - -func (s *Suite) TestMaliciousTx_66(t *utesting.T) { - badTxs := []*types.Transaction{ - getOldTxFromChain(t, s), - invalidNonceTx(t, s), - hugeAmount(t, s), - hugeGasPrice(t, s), - hugeData(t, s), - } - sendConn := s.setupConnection66(t) - defer sendConn.Close() - // set up receiving connection before sending txs to make sure - // no announcements are missed - recvConn := s.setupConnection66(t) - defer recvConn.Close() - - for i, tx := range badTxs { - t.Logf("Testing malicious tx propagation: %v\n", i) - if err := sendConn.Write(&Transactions{tx}); err != nil { - t.Fatalf("could not write to connection: %v", err) - } - - } - // check to make sure bad txs aren't propagated - waitForTxPropagation(t, s, badTxs, recvConn) -} - -// TestZeroRequestID_66 checks that a request ID of zero is still handled -// by the node. -func (s *Suite) TestZeroRequestID_66(t *utesting.T) { - conn := s.setupConnection66(t) - defer conn.Close() - - req := ð.GetBlockHeadersPacket66{ - RequestId: 0, - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ - Origin: eth.HashOrNumber{ - Number: 0, - }, - Amount: 2, - }, - } - headers, err := s.getBlockHeaders66(conn, req, req.RequestId) - if err != nil { - t.Fatalf("could not get block headers: %v", err) - } - if !headersMatch(t, s.chain, headers) { - t.Fatal("received wrong header(s)") - } -} - -// TestSameRequestID_66 sends two requests with the same request ID -// concurrently to a single node. -func (s *Suite) TestSameRequestID_66(t *utesting.T) { - conn := s.setupConnection66(t) - // create two requests with the same request ID - reqID := uint64(1234) - request1 := ð.GetBlockHeadersPacket66{ - RequestId: reqID, - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ - Origin: eth.HashOrNumber{ - Number: 1, - }, - Amount: 2, - }, - } - request2 := ð.GetBlockHeadersPacket66{ - RequestId: reqID, - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ - Origin: eth.HashOrNumber{ - Number: 33, - }, - Amount: 2, - }, - } - // write the first request - err := conn.write66(request1, GetBlockHeaders{}.Code()) - if err != nil { - t.Fatalf("could not write to connection: %v", err) - } - // perform second request - headers2, err := s.getBlockHeaders66(conn, request2, reqID) - if err != nil { - t.Fatalf("could not get block headers: %v", err) - return - } - // wait for response to first request - headers1, err := s.waitForBlockHeadersResponse66(conn, reqID) - if err != nil { - t.Fatalf("could not get BlockHeaders response: %v", err) - } - // check if headers match - if !headersMatch(t, s.chain, headers1) || !headersMatch(t, s.chain, headers2) { - t.Fatal("received wrong header(s)") - } -} - -// TestLargeTxRequest_66 tests whether a node can fulfill a large GetPooledTransactions -// request. -func (s *Suite) TestLargeTxRequest_66(t *utesting.T) { - // send the next block to ensure the node is no longer syncing and is able to accept - // txs - s.sendNextBlock66(t) - // send 2000 transactions to the node - hashMap, txs := generateTxs(t, s, 2000) - sendConn := s.setupConnection66(t) - defer sendConn.Close() - - sendMultipleSuccessfulTxs(t, s, sendConn, txs) - // set up connection to receive to ensure node is peered with the receiving connection - // before tx request is sent - recvConn := s.setupConnection66(t) - defer recvConn.Close() - // create and send pooled tx request - hashes := make([]common.Hash, 0) - for _, hash := range hashMap { - hashes = append(hashes, hash) - } - getTxReq := ð.GetPooledTransactionsPacket66{ - RequestId: 1234, - GetPooledTransactionsPacket: hashes, - } - if err := recvConn.write66(getTxReq, GetPooledTransactions{}.Code()); err != nil { - t.Fatalf("could not write to conn: %v", err) - } - // check that all received transactions match those that were sent to node - switch msg := recvConn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) { - case PooledTransactions: - for _, gotTx := range msg { - if _, exists := hashMap[gotTx.Hash()]; !exists { - t.Fatalf("unexpected tx received: %v", gotTx.Hash()) - } - } - default: - t.Fatalf("unexpected %s", pretty.Sdump(msg)) - } -} - -// TestNewPooledTxs_66 tests whether a node will do a GetPooledTransactions -// request upon receiving a NewPooledTransactionHashes announcement. -func (s *Suite) TestNewPooledTxs_66(t *utesting.T) { - // send the next block to ensure the node is no longer syncing and is able to accept - // txs - s.sendNextBlock66(t) - // generate 50 txs - hashMap, _ := generateTxs(t, s, 50) - // create new pooled tx hashes announcement - hashes := make([]common.Hash, 0) - for _, hash := range hashMap { - hashes = append(hashes, hash) - } - announce := NewPooledTransactionHashes(hashes) - // send announcement - conn := s.setupConnection66(t) - defer conn.Close() - if err := conn.Write(announce); err != nil { - t.Fatalf("could not write to connection: %v", err) - } - // wait for GetPooledTxs request - for { - _, msg := conn.readAndServe66(s.chain, timeout) - switch msg := msg.(type) { - case GetPooledTransactions: - if len(msg) != len(hashes) { - t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg)) - } - return - case *NewPooledTransactionHashes, *NewBlock, *NewBlockHashes: - // ignore propagated txs and blocks from old tests - continue - default: - t.Fatalf("unexpected %s", pretty.Sdump(msg)) - } - } -} diff --git a/cmd/devp2p/internal/ethtest/eth66_suiteHelpers.go b/cmd/devp2p/internal/ethtest/eth66_suiteHelpers.go deleted file mode 100644 index 3c5b22f0b5..0000000000 --- a/cmd/devp2p/internal/ethtest/eth66_suiteHelpers.go +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright 2021 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package ethtest - -import ( - "fmt" - "reflect" - "time" - - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/eth/protocols/eth" - "github.com/ethereum/go-ethereum/internal/utesting" - "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/rlp" - "github.com/stretchr/testify/assert" -) - -func (c *Conn) statusExchange66(t *utesting.T, chain *Chain) Message { - status := &Status{ - ProtocolVersion: uint32(66), - NetworkID: chain.chainConfig.ChainID.Uint64(), - TD: chain.TD(chain.Len()), - Head: chain.blocks[chain.Len()-1].Hash(), - Genesis: chain.blocks[0].Hash(), - ForkID: chain.ForkID(), - } - return c.statusExchange(t, chain, status) -} - -func (s *Suite) dial66(t *utesting.T) *Conn { - conn, err := s.dial() - if err != nil { - t.Fatalf("could not dial: %v", err) - } - conn.caps = append(conn.caps, p2p.Cap{Name: "eth", Version: 66}) - conn.ourHighestProtoVersion = 66 - return conn -} - -func (c *Conn) write66(req eth.Packet, code int) error { - payload, err := rlp.EncodeToBytes(req) - if err != nil { - return err - } - _, err = c.Conn.Write(uint64(code), payload) - return err -} - -func (c *Conn) read66() (uint64, Message) { - code, rawData, _, err := c.Conn.Read() - if err != nil { - return 0, errorf("could not read from connection: %v", err) - } - - var msg Message - - switch int(code) { - case (Hello{}).Code(): - msg = new(Hello) - - case (Ping{}).Code(): - msg = new(Ping) - case (Pong{}).Code(): - msg = new(Pong) - case (Disconnect{}).Code(): - msg = new(Disconnect) - case (Status{}).Code(): - msg = new(Status) - case (GetBlockHeaders{}).Code(): - ethMsg := new(eth.GetBlockHeadersPacket66) - if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { - return 0, errorf("could not rlp decode message: %v", err) - } - return ethMsg.RequestId, GetBlockHeaders(*ethMsg.GetBlockHeadersPacket) - case (BlockHeaders{}).Code(): - ethMsg := new(eth.BlockHeadersPacket66) - if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { - return 0, errorf("could not rlp decode message: %v", err) - } - return ethMsg.RequestId, BlockHeaders(ethMsg.BlockHeadersPacket) - case (GetBlockBodies{}).Code(): - ethMsg := new(eth.GetBlockBodiesPacket66) - if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { - return 0, errorf("could not rlp decode message: %v", err) - } - return ethMsg.RequestId, GetBlockBodies(ethMsg.GetBlockBodiesPacket) - case (BlockBodies{}).Code(): - ethMsg := new(eth.BlockBodiesPacket66) - if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { - return 0, errorf("could not rlp decode message: %v", err) - } - return ethMsg.RequestId, BlockBodies(ethMsg.BlockBodiesPacket) - case (NewBlock{}).Code(): - msg = new(NewBlock) - case (NewBlockHashes{}).Code(): - msg = new(NewBlockHashes) - case (Transactions{}).Code(): - msg = new(Transactions) - case (NewPooledTransactionHashes{}).Code(): - msg = new(NewPooledTransactionHashes) - case (GetPooledTransactions{}.Code()): - ethMsg := new(eth.GetPooledTransactionsPacket66) - if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { - return 0, errorf("could not rlp decode message: %v", err) - } - return ethMsg.RequestId, GetPooledTransactions(ethMsg.GetPooledTransactionsPacket) - case (PooledTransactions{}.Code()): - ethMsg := new(eth.PooledTransactionsPacket66) - if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { - return 0, errorf("could not rlp decode message: %v", err) - } - return ethMsg.RequestId, PooledTransactions(ethMsg.PooledTransactionsPacket) - default: - msg = errorf("invalid message code: %d", code) - } - - if msg != nil { - if err := rlp.DecodeBytes(rawData, msg); err != nil { - return 0, errorf("could not rlp decode message: %v", err) - } - return 0, msg - } - return 0, errorf("invalid message: %s", string(rawData)) -} - -func (c *Conn) waitForResponse(chain *Chain, timeout time.Duration, requestID uint64) Message { - for { - id, msg := c.readAndServe66(chain, timeout) - if id == requestID { - return msg - } - } -} - -// ReadAndServe serves GetBlockHeaders requests while waiting -// on another message from the node. -func (c *Conn) readAndServe66(chain *Chain, timeout time.Duration) (uint64, Message) { - start := time.Now() - for time.Since(start) < timeout { - c.SetReadDeadline(time.Now().Add(10 * time.Second)) - - reqID, msg := c.read66() - - switch msg := msg.(type) { - case *Ping: - c.Write(&Pong{}) - case *GetBlockHeaders: - headers, err := chain.GetHeaders(*msg) - if err != nil { - return 0, errorf("could not get headers for inbound header request: %v", err) - } - resp := ð.BlockHeadersPacket66{ - RequestId: reqID, - BlockHeadersPacket: eth.BlockHeadersPacket(headers), - } - if err := c.write66(resp, BlockHeaders{}.Code()); err != nil { - return 0, errorf("could not write to connection: %v", err) - } - default: - return reqID, msg - } - } - return 0, errorf("no message received within %v", timeout) -} - -func (s *Suite) setupConnection66(t *utesting.T) *Conn { - // create conn - sendConn := s.dial66(t) - sendConn.handshake(t) - sendConn.statusExchange66(t, s.chain) - return sendConn -} - -func (s *Suite) testAnnounce66(t *utesting.T, sendConn, receiveConn *Conn, blockAnnouncement *NewBlock) { - // Announce the block. - if err := sendConn.Write(blockAnnouncement); err != nil { - t.Fatalf("could not write to connection: %v", err) - } - s.waitAnnounce66(t, receiveConn, blockAnnouncement) -} - -func (s *Suite) waitAnnounce66(t *utesting.T, conn *Conn, blockAnnouncement *NewBlock) { - for { - _, msg := conn.readAndServe66(s.chain, timeout) - switch msg := msg.(type) { - case *NewBlock: - t.Logf("received NewBlock message: %s", pretty.Sdump(msg.Block)) - assert.Equal(t, - blockAnnouncement.Block.Header(), msg.Block.Header(), - "wrong block header in announcement", - ) - assert.Equal(t, - blockAnnouncement.TD, msg.TD, - "wrong TD in announcement", - ) - return - case *NewBlockHashes: - blockHashes := *msg - t.Logf("received NewBlockHashes message: %s", pretty.Sdump(blockHashes)) - assert.Equal(t, blockAnnouncement.Block.Hash(), blockHashes[0].Hash, - "wrong block hash in announcement", - ) - return - case *NewPooledTransactionHashes: - // ignore old txs being propagated - continue - default: - t.Fatalf("unexpected: %s", pretty.Sdump(msg)) - } - } -} - -// waitForBlock66 waits for confirmation from the client that it has -// imported the given block. -func (c *Conn) waitForBlock66(block *types.Block) error { - defer c.SetReadDeadline(time.Time{}) - - c.SetReadDeadline(time.Now().Add(20 * time.Second)) - // note: if the node has not yet imported the block, it will respond - // to the GetBlockHeaders request with an empty BlockHeaders response, - // so the GetBlockHeaders request must be sent again until the BlockHeaders - // response contains the desired header. - for { - req := eth.GetBlockHeadersPacket66{ - RequestId: 54, - GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ - Origin: eth.HashOrNumber{ - Hash: block.Hash(), - }, - Amount: 1, - }, - } - if err := c.write66(req, GetBlockHeaders{}.Code()); err != nil { - return err - } - - reqID, msg := c.read66() - // check message - switch msg := msg.(type) { - case BlockHeaders: - // check request ID - if reqID != req.RequestId { - return fmt.Errorf("request ID mismatch: wanted %d, got %d", req.RequestId, reqID) - } - for _, header := range msg { - if header.Number.Uint64() == block.NumberU64() { - return nil - } - } - time.Sleep(100 * time.Millisecond) - case *NewPooledTransactionHashes: - // ignore old announcements - continue - default: - return fmt.Errorf("invalid message: %s", pretty.Sdump(msg)) - } - } -} - -func sendSuccessfulTx66(t *utesting.T, s *Suite, tx *types.Transaction) { - sendConn := s.setupConnection66(t) - defer sendConn.Close() - sendSuccessfulTxWithConn(t, s, tx, sendConn) -} - -// waitForBlockHeadersResponse66 waits for a BlockHeaders message with the given expected request ID -func (s *Suite) waitForBlockHeadersResponse66(conn *Conn, expectedID uint64) (BlockHeaders, error) { - reqID, msg := conn.readAndServe66(s.chain, timeout) - switch msg := msg.(type) { - case BlockHeaders: - if reqID != expectedID { - return nil, fmt.Errorf("request ID mismatch: wanted %d, got %d", expectedID, reqID) - } - return msg, nil - default: - return nil, fmt.Errorf("unexpected: %s", pretty.Sdump(msg)) - } -} - -func (s *Suite) getBlockHeaders66(conn *Conn, req eth.Packet, expectedID uint64) (BlockHeaders, error) { - if err := conn.write66(req, GetBlockHeaders{}.Code()); err != nil { - return nil, fmt.Errorf("could not write to connection: %v", err) - } - return s.waitForBlockHeadersResponse66(conn, expectedID) -} - -func headersMatch(t *utesting.T, chain *Chain, headers BlockHeaders) bool { - mismatched := 0 - for _, header := range headers { - num := header.Number.Uint64() - t.Logf("received header (%d): %s", num, pretty.Sdump(header.Hash())) - if !reflect.DeepEqual(chain.blocks[int(num)].Header(), header) { - mismatched += 1 - t.Logf("received wrong header: %v", pretty.Sdump(header)) - } - } - return mismatched == 0 -} - -func (s *Suite) sendNextBlock66(t *utesting.T) { - sendConn, receiveConn := s.setupConnection66(t), s.setupConnection66(t) - defer sendConn.Close() - defer receiveConn.Close() - - // create new block announcement - nextBlock := len(s.chain.blocks) - blockAnnouncement := &NewBlock{ - Block: s.fullChain.blocks[nextBlock], - TD: s.fullChain.TD(nextBlock + 1), - } - // send announcement and wait for node to request the header - s.testAnnounce66(t, sendConn, receiveConn, blockAnnouncement) - // wait for client to update its chain - if err := receiveConn.waitForBlock66(s.fullChain.blocks[nextBlock]); err != nil { - t.Fatal(err) - } - // update test suite chain - s.chain.blocks = append(s.chain.blocks, s.fullChain.blocks[nextBlock]) -} diff --git a/cmd/devp2p/internal/ethtest/helpers.go b/cmd/devp2p/internal/ethtest/helpers.go new file mode 100644 index 0000000000..dd9dfd8619 --- /dev/null +++ b/cmd/devp2p/internal/ethtest/helpers.go @@ -0,0 +1,789 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "fmt" + "net" + "reflect" + "strings" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/rlpx" +) + +var ( + pretty = spew.ConfigState{ + Indent: " ", + DisableCapacities: true, + DisablePointerAddresses: true, + SortKeys: true, + } + timeout = 20 * time.Second +) + +// Is_66 checks if the node supports the eth66 protocol version, +// and if not, exists the test suite +func (s *Suite) Is_66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + if err := conn.handshake(); err != nil { + t.Fatalf("handshake failed: %v", err) + } + if conn.negotiatedProtoVersion < 66 { + t.Fail() + } +} + +// dial attempts to dial the given node and perform a handshake, +// returning the created Conn if successful. +func (s *Suite) dial() (*Conn, error) { + // dial + fd, err := net.Dial("tcp", fmt.Sprintf("%v:%d", s.Dest.IP(), s.Dest.TCP())) + if err != nil { + return nil, err + } + conn := Conn{Conn: rlpx.NewConn(fd, s.Dest.Pubkey())} + // do encHandshake + conn.ourKey, _ = crypto.GenerateKey() + _, err = conn.Handshake(conn.ourKey) + if err != nil { + conn.Close() + return nil, err + } + // set default p2p capabilities + conn.caps = []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + } + conn.ourHighestProtoVersion = 65 + return &conn, nil +} + +// dial66 attempts to dial the given node and perform a handshake, +// returning the created Conn with additional eth66 capabilities if +// successful +func (s *Suite) dial66() (*Conn, error) { + conn, err := s.dial() + if err != nil { + return nil, fmt.Errorf("dial failed: %v", err) + } + conn.caps = append(conn.caps, p2p.Cap{Name: "eth", Version: 66}) + conn.ourHighestProtoVersion = 66 + return conn, nil +} + +// dial66 attempts to dial the given node and perform a handshake, +// returning the created Conn with additional snap/1 capabilities if +// successful. +func (s *Suite) dialSnap() (*Conn, error) { + conn, err := s.dial66() + if err != nil { + return nil, fmt.Errorf("dial failed: %v", err) + } + conn.caps = append(conn.caps, p2p.Cap{Name: "snap", Version: 1}) + conn.ourHighestSnapProtoVersion = 1 + return conn, nil +} + +// peer performs both the protocol handshake and the status message +// exchange with the node in order to peer with it. +func (c *Conn) peer(chain *Chain, status *Status) error { + if err := c.handshake(); err != nil { + return fmt.Errorf("handshake failed: %v", err) + } + if _, err := c.statusExchange(chain, status); err != nil { + return fmt.Errorf("status exchange failed: %v", err) + } + return nil +} + +// handshake performs a protocol handshake with the node. +func (c *Conn) handshake() error { + defer c.SetDeadline(time.Time{}) + c.SetDeadline(time.Now().Add(10 * time.Second)) + // write hello to client + pub0 := crypto.FromECDSAPub(&c.ourKey.PublicKey)[1:] + ourHandshake := &Hello{ + Version: 5, + Caps: c.caps, + ID: pub0, + } + if err := c.Write(ourHandshake); err != nil { + return fmt.Errorf("write to connection failed: %v", err) + } + // read hello from client + switch msg := c.Read().(type) { + case *Hello: + // set snappy if version is at least 5 + if msg.Version >= 5 { + c.SetSnappy(true) + } + c.negotiateEthProtocol(msg.Caps) + if c.negotiatedProtoVersion == 0 { + return fmt.Errorf("could not negotiate eth protocol (remote caps: %v, local eth version: %v)", msg.Caps, c.ourHighestProtoVersion) + } + // If we require snap, verify that it was negotiated + if c.ourHighestSnapProtoVersion != c.negotiatedSnapProtoVersion { + return fmt.Errorf("could not negotiate snap protocol (remote caps: %v, local snap version: %v)", msg.Caps, c.ourHighestSnapProtoVersion) + } + return nil + default: + return fmt.Errorf("bad handshake: %#v", msg) + } +} + +// negotiateEthProtocol sets the Conn's eth protocol version to highest +// advertised capability from peer. +func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) { + var highestEthVersion uint + var highestSnapVersion uint + for _, capability := range caps { + switch capability.Name { + case "eth": + if capability.Version > highestEthVersion && capability.Version <= c.ourHighestProtoVersion { + highestEthVersion = capability.Version + } + case "snap": + if capability.Version > highestSnapVersion && capability.Version <= c.ourHighestSnapProtoVersion { + highestSnapVersion = capability.Version + } + } + } + c.negotiatedProtoVersion = highestEthVersion + c.negotiatedSnapProtoVersion = highestSnapVersion +} + +// statusExchange performs a `Status` message exchange with the given node. +func (c *Conn) statusExchange(chain *Chain, status *Status) (Message, error) { + defer c.SetDeadline(time.Time{}) + c.SetDeadline(time.Now().Add(20 * time.Second)) + + // read status message from client + var message Message +loop: + for { + switch msg := c.Read().(type) { + case *Status: + if have, want := msg.Head, chain.blocks[chain.Len()-1].Hash(); have != want { + return nil, fmt.Errorf("wrong head block in status, want: %#x (block %d) have %#x", + want, chain.blocks[chain.Len()-1].NumberU64(), have) + } + if have, want := msg.TD.Cmp(chain.TD()), 0; have != want { + return nil, fmt.Errorf("wrong TD in status: have %v want %v", have, want) + } + if have, want := msg.ForkID, chain.ForkID(); !reflect.DeepEqual(have, want) { + return nil, fmt.Errorf("wrong fork ID in status: have %v, want %v", have, want) + } + if have, want := msg.ProtocolVersion, c.ourHighestProtoVersion; have != uint32(want) { + return nil, fmt.Errorf("wrong protocol version: have %v, want %v", have, want) + } + message = msg + break loop + case *Disconnect: + return nil, fmt.Errorf("disconnect received: %v", msg.Reason) + case *Ping: + c.Write(&Pong{}) // TODO (renaynay): in the future, this should be an error + // (PINGs should not be a response upon fresh connection) + default: + return nil, fmt.Errorf("bad status message: %s", pretty.Sdump(msg)) + } + } + // make sure eth protocol version is set for negotiation + if c.negotiatedProtoVersion == 0 { + return nil, fmt.Errorf("eth protocol version must be set in Conn") + } + if status == nil { + // default status message + status = &Status{ + ProtocolVersion: uint32(c.negotiatedProtoVersion), + NetworkID: chain.chainConfig.ChainID.Uint64(), + TD: chain.TD(), + Head: chain.blocks[chain.Len()-1].Hash(), + Genesis: chain.blocks[0].Hash(), + ForkID: chain.ForkID(), + } + } + if err := c.Write(status); err != nil { + return nil, fmt.Errorf("write to connection failed: %v", err) + } + return message, nil +} + +// createSendAndRecvConns creates two connections, one for sending messages to the +// node, and one for receiving messages from the node. +func (s *Suite) createSendAndRecvConns(isEth66 bool) (*Conn, *Conn, error) { + var ( + sendConn *Conn + recvConn *Conn + err error + ) + if isEth66 { + sendConn, err = s.dial66() + if err != nil { + return nil, nil, fmt.Errorf("dial failed: %v", err) + } + recvConn, err = s.dial66() + if err != nil { + sendConn.Close() + return nil, nil, fmt.Errorf("dial failed: %v", err) + } + } else { + sendConn, err = s.dial() + if err != nil { + return nil, nil, fmt.Errorf("dial failed: %v", err) + } + recvConn, err = s.dial() + if err != nil { + sendConn.Close() + return nil, nil, fmt.Errorf("dial failed: %v", err) + } + } + return sendConn, recvConn, nil +} + +func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message { + if c.negotiatedProtoVersion == 66 { + _, msg := c.readAndServe66(chain, timeout) + return msg + } + return c.readAndServe65(chain, timeout) +} + +// readAndServe serves GetBlockHeaders requests while waiting +// on another message from the node. +func (c *Conn) readAndServe65(chain *Chain, timeout time.Duration) Message { + start := time.Now() + for time.Since(start) < timeout { + c.SetReadDeadline(time.Now().Add(5 * time.Second)) + switch msg := c.Read().(type) { + case *Ping: + c.Write(&Pong{}) + case *GetBlockHeaders: + req := *msg + headers, err := chain.GetHeaders(req) + if err != nil { + return errorf("could not get headers for inbound header request: %v", err) + } + if err := c.Write(headers); err != nil { + return errorf("could not write to connection: %v", err) + } + default: + return msg + } + } + return errorf("no message received within %v", timeout) +} + +// readAndServe66 serves eth66 GetBlockHeaders requests while waiting +// on another message from the node. +func (c *Conn) readAndServe66(chain *Chain, timeout time.Duration) (uint64, Message) { + start := time.Now() + for time.Since(start) < timeout { + c.SetReadDeadline(time.Now().Add(10 * time.Second)) + + reqID, msg := c.Read66() + + switch msg := msg.(type) { + case *Ping: + c.Write(&Pong{}) + case GetBlockHeaders: + headers, err := chain.GetHeaders(msg) + if err != nil { + return 0, errorf("could not get headers for inbound header request: %v", err) + } + resp := ð.BlockHeadersPacket66{ + RequestId: reqID, + BlockHeadersPacket: eth.BlockHeadersPacket(headers), + } + if err := c.Write66(resp, BlockHeaders{}.Code()); err != nil { + return 0, errorf("could not write to connection: %v", err) + } + default: + return reqID, msg + } + } + return 0, errorf("no message received within %v", timeout) +} + +// headersRequest executes the given `GetBlockHeaders` request. +func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, isEth66 bool, reqID uint64) (BlockHeaders, error) { + defer c.SetReadDeadline(time.Time{}) + c.SetReadDeadline(time.Now().Add(20 * time.Second)) + // if on eth66 connection, perform eth66 GetBlockHeaders request + if isEth66 { + return getBlockHeaders66(chain, c, request, reqID) + } + if err := c.Write(request); err != nil { + return nil, err + } + switch msg := c.readAndServe(chain, timeout).(type) { + case *BlockHeaders: + return *msg, nil + default: + return nil, fmt.Errorf("invalid message: %s", pretty.Sdump(msg)) + } +} + +func (c *Conn) snapRequest(msg Message, id uint64, chain *Chain) (Message, error) { + defer c.SetReadDeadline(time.Time{}) + c.SetReadDeadline(time.Now().Add(5 * time.Second)) + if err := c.Write(msg); err != nil { + return nil, fmt.Errorf("could not write to connection: %v", err) + } + return c.ReadSnap(id) +} + +// getBlockHeaders66 executes the given `GetBlockHeaders` request over the eth66 protocol. +func getBlockHeaders66(chain *Chain, conn *Conn, request *GetBlockHeaders, id uint64) (BlockHeaders, error) { + // write request + packet := eth.GetBlockHeadersPacket(*request) + req := ð.GetBlockHeadersPacket66{ + RequestId: id, + GetBlockHeadersPacket: &packet, + } + if err := conn.Write66(req, GetBlockHeaders{}.Code()); err != nil { + return nil, fmt.Errorf("could not write to connection: %v", err) + } + // wait for response + msg := conn.waitForResponse(chain, timeout, req.RequestId) + headers, ok := msg.(BlockHeaders) + if !ok { + return nil, fmt.Errorf("unexpected message received: %s", pretty.Sdump(msg)) + } + return headers, nil +} + +// headersMatch returns whether the received headers match the given request +func headersMatch(expected BlockHeaders, headers BlockHeaders) bool { + return reflect.DeepEqual(expected, headers) +} + +// waitForResponse reads from the connection until a response with the expected +// request ID is received. +func (c *Conn) waitForResponse(chain *Chain, timeout time.Duration, requestID uint64) Message { + for { + id, msg := c.readAndServe66(chain, timeout) + if id == requestID { + return msg + } + } +} + +// sendNextBlock broadcasts the next block in the chain and waits +// for the node to propagate the block and import it into its chain. +func (s *Suite) sendNextBlock(isEth66 bool) error { + // set up sending and receiving connections + sendConn, recvConn, err := s.createSendAndRecvConns(isEth66) + if err != nil { + return err + } + defer sendConn.Close() + defer recvConn.Close() + if err = sendConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + if err = recvConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + // create new block announcement + nextBlock := s.fullChain.blocks[s.chain.Len()] + blockAnnouncement := &NewBlock{ + Block: nextBlock, + TD: s.fullChain.TotalDifficultyAt(s.chain.Len()), + } + // send announcement and wait for node to request the header + if err = s.testAnnounce(sendConn, recvConn, blockAnnouncement); err != nil { + return fmt.Errorf("failed to announce block: %v", err) + } + // wait for client to update its chain + if err = s.waitForBlockImport(recvConn, nextBlock, isEth66); err != nil { + return fmt.Errorf("failed to receive confirmation of block import: %v", err) + } + // update test suite chain + s.chain.blocks = append(s.chain.blocks, nextBlock) + return nil +} + +// testAnnounce writes a block announcement to the node and waits for the node +// to propagate it. +func (s *Suite) testAnnounce(sendConn, receiveConn *Conn, blockAnnouncement *NewBlock) error { + if err := sendConn.Write(blockAnnouncement); err != nil { + return fmt.Errorf("could not write to connection: %v", err) + } + return s.waitAnnounce(receiveConn, blockAnnouncement) +} + +// waitAnnounce waits for a NewBlock or NewBlockHashes announcement from the node. +func (s *Suite) waitAnnounce(conn *Conn, blockAnnouncement *NewBlock) error { + for { + switch msg := conn.readAndServe(s.chain, timeout).(type) { + case *NewBlock: + if !reflect.DeepEqual(blockAnnouncement.Block.Header(), msg.Block.Header()) { + return fmt.Errorf("wrong header in block announcement: \nexpected %v "+ + "\ngot %v", blockAnnouncement.Block.Header(), msg.Block.Header()) + } + if !reflect.DeepEqual(blockAnnouncement.TD, msg.TD) { + return fmt.Errorf("wrong TD in announcement: expected %v, got %v", blockAnnouncement.TD, msg.TD) + } + return nil + case *NewBlockHashes: + hashes := *msg + if blockAnnouncement.Block.Hash() != hashes[0].Hash { + return fmt.Errorf("wrong block hash in announcement: expected %v, got %v", blockAnnouncement.Block.Hash(), hashes[0].Hash) + } + return nil + case *NewPooledTransactionHashes: + // ignore tx announcements from previous tests + continue + default: + return fmt.Errorf("unexpected: %s", pretty.Sdump(msg)) + } + } +} + +func (s *Suite) waitForBlockImport(conn *Conn, block *types.Block, isEth66 bool) error { + defer conn.SetReadDeadline(time.Time{}) + conn.SetReadDeadline(time.Now().Add(20 * time.Second)) + // create request + req := &GetBlockHeaders{ + Origin: eth.HashOrNumber{ + Hash: block.Hash(), + }, + Amount: 1, + } + // loop until BlockHeaders response contains desired block, confirming the + // node imported the block + for { + var ( + headers BlockHeaders + err error + ) + if isEth66 { + requestID := uint64(54) + headers, err = conn.headersRequest(req, s.chain, eth66, requestID) + } else { + headers, err = conn.headersRequest(req, s.chain, eth65, 0) + } + if err != nil { + return fmt.Errorf("GetBlockHeader request failed: %v", err) + } + // if headers response is empty, node hasn't imported block yet, try again + if len(headers) == 0 { + time.Sleep(100 * time.Millisecond) + continue + } + if !reflect.DeepEqual(block.Header(), headers[0]) { + return fmt.Errorf("wrong header returned: wanted %v, got %v", block.Header(), headers[0]) + } + return nil + } +} + +func (s *Suite) oldAnnounce(isEth66 bool) error { + sendConn, receiveConn, err := s.createSendAndRecvConns(isEth66) + if err != nil { + return err + } + defer sendConn.Close() + defer receiveConn.Close() + if err := sendConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + if err := receiveConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + // create old block announcement + oldBlockAnnounce := &NewBlock{ + Block: s.chain.blocks[len(s.chain.blocks)/2], + TD: s.chain.blocks[len(s.chain.blocks)/2].Difficulty(), + } + if err := sendConn.Write(oldBlockAnnounce); err != nil { + return fmt.Errorf("could not write to connection: %v", err) + } + // wait to see if the announcement is propagated + switch msg := receiveConn.readAndServe(s.chain, time.Second*8).(type) { + case *NewBlock: + block := *msg + if block.Block.Hash() == oldBlockAnnounce.Block.Hash() { + return fmt.Errorf("unexpected: block propagated: %s", pretty.Sdump(msg)) + } + case *NewBlockHashes: + hashes := *msg + for _, hash := range hashes { + if hash.Hash == oldBlockAnnounce.Block.Hash() { + return fmt.Errorf("unexpected: block announced: %s", pretty.Sdump(msg)) + } + } + case *Error: + errMsg := *msg + // check to make sure error is timeout (propagation didn't come through == test successful) + if !strings.Contains(errMsg.String(), "timeout") { + return fmt.Errorf("unexpected error: %v", pretty.Sdump(msg)) + } + default: + return fmt.Errorf("unexpected: %s", pretty.Sdump(msg)) + } + return nil +} + +func (s *Suite) maliciousHandshakes(t *utesting.T, isEth66 bool) error { + var ( + conn *Conn + err error + ) + if isEth66 { + conn, err = s.dial66() + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + } else { + conn, err = s.dial() + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + } + defer conn.Close() + // write hello to client + pub0 := crypto.FromECDSAPub(&conn.ourKey.PublicKey)[1:] + handshakes := []*Hello{ + { + Version: 5, + Caps: []p2p.Cap{ + {Name: largeString(2), Version: 64}, + }, + ID: pub0, + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + ID: append(pub0, byte(0)), + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + ID: append(pub0, pub0...), + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + ID: largeBuffer(2), + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: largeString(2), Version: 64}, + }, + ID: largeBuffer(2), + }, + } + for i, handshake := range handshakes { + t.Logf("Testing malicious handshake %v\n", i) + if err := conn.Write(handshake); err != nil { + return fmt.Errorf("could not write to connection: %v", err) + } + // check that the peer disconnected + for i := 0; i < 2; i++ { + switch msg := conn.readAndServe(s.chain, 20*time.Second).(type) { + case *Disconnect: + case *Error: + case *Hello: + // Discard one hello as Hello's are sent concurrently + continue + default: + return fmt.Errorf("unexpected: %s", pretty.Sdump(msg)) + } + } + // dial for the next round + if isEth66 { + conn, err = s.dial66() + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + } else { + conn, err = s.dial() + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + } + } + return nil +} + +func (s *Suite) maliciousStatus(conn *Conn) error { + if err := conn.handshake(); err != nil { + return fmt.Errorf("handshake failed: %v", err) + } + status := &Status{ + ProtocolVersion: uint32(conn.negotiatedProtoVersion), + NetworkID: s.chain.chainConfig.ChainID.Uint64(), + TD: largeNumber(2), + Head: s.chain.blocks[s.chain.Len()-1].Hash(), + Genesis: s.chain.blocks[0].Hash(), + ForkID: s.chain.ForkID(), + } + // get status + msg, err := conn.statusExchange(s.chain, status) + if err != nil { + return fmt.Errorf("status exchange failed: %v", err) + } + switch msg := msg.(type) { + case *Status: + default: + return fmt.Errorf("expected status, got: %#v ", msg) + } + // wait for disconnect + switch msg := conn.readAndServe(s.chain, timeout).(type) { + case *Disconnect: + return nil + case *Error: + return nil + default: + return fmt.Errorf("expected disconnect, got: %s", pretty.Sdump(msg)) + } +} + +func (s *Suite) hashAnnounce(isEth66 bool) error { + // create connections + sendConn, recvConn, err := s.createSendAndRecvConns(isEth66) + if err != nil { + return fmt.Errorf("failed to create connections: %v", err) + } + defer sendConn.Close() + defer recvConn.Close() + if err := sendConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + if err := recvConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + // create NewBlockHashes announcement + type anno struct { + Hash common.Hash // Hash of one particular block being announced + Number uint64 // Number of one particular block being announced + } + nextBlock := s.fullChain.blocks[s.chain.Len()] + announcement := anno{Hash: nextBlock.Hash(), Number: nextBlock.Number().Uint64()} + newBlockHash := &NewBlockHashes{announcement} + if err := sendConn.Write(newBlockHash); err != nil { + return fmt.Errorf("failed to write to connection: %v", err) + } + // Announcement sent, now wait for a header request + var ( + id uint64 + msg Message + blockHeaderReq GetBlockHeaders + ) + if isEth66 { + id, msg = sendConn.Read66() + switch msg := msg.(type) { + case GetBlockHeaders: + blockHeaderReq = msg + default: + return fmt.Errorf("unexpected %s", pretty.Sdump(msg)) + } + if blockHeaderReq.Amount != 1 { + return fmt.Errorf("unexpected number of block headers requested: %v", blockHeaderReq.Amount) + } + if blockHeaderReq.Origin.Hash != announcement.Hash { + return fmt.Errorf("unexpected block header requested. Announced:\n %v\n Remote request:\n%v", + pretty.Sdump(announcement), + pretty.Sdump(blockHeaderReq)) + } + if err := sendConn.Write66(ð.BlockHeadersPacket66{ + RequestId: id, + BlockHeadersPacket: eth.BlockHeadersPacket{ + nextBlock.Header(), + }, + }, BlockHeaders{}.Code()); err != nil { + return fmt.Errorf("failed to write to connection: %v", err) + } + } else { + msg = sendConn.Read() + switch msg := msg.(type) { + case *GetBlockHeaders: + blockHeaderReq = *msg + default: + return fmt.Errorf("unexpected %s", pretty.Sdump(msg)) + } + if blockHeaderReq.Amount != 1 { + return fmt.Errorf("unexpected number of block headers requested: %v", blockHeaderReq.Amount) + } + if blockHeaderReq.Origin.Hash != announcement.Hash { + return fmt.Errorf("unexpected block header requested. Announced:\n %v\n Remote request:\n%v", + pretty.Sdump(announcement), + pretty.Sdump(blockHeaderReq)) + } + if err := sendConn.Write(&BlockHeaders{nextBlock.Header()}); err != nil { + return fmt.Errorf("failed to write to connection: %v", err) + } + } + // wait for block announcement + msg = recvConn.readAndServe(s.chain, timeout) + switch msg := msg.(type) { + case *NewBlockHashes: + hashes := *msg + if len(hashes) != 1 { + return fmt.Errorf("unexpected new block hash announcement: wanted 1 announcement, got %d", len(hashes)) + } + if nextBlock.Hash() != hashes[0].Hash { + return fmt.Errorf("unexpected block hash announcement, wanted %v, got %v", nextBlock.Hash(), + hashes[0].Hash) + } + case *NewBlock: + // node should only propagate NewBlock without having requested the body if the body is empty + nextBlockBody := nextBlock.Body() + if len(nextBlockBody.Transactions) != 0 || len(nextBlockBody.Uncles) != 0 { + return fmt.Errorf("unexpected non-empty new block propagated: %s", pretty.Sdump(msg)) + } + if msg.Block.Hash() != nextBlock.Hash() { + return fmt.Errorf("mismatched hash of propagated new block: wanted %v, got %v", + nextBlock.Hash(), msg.Block.Hash()) + } + // check to make sure header matches header that was sent to the node + if !reflect.DeepEqual(nextBlock.Header(), msg.Block.Header()) { + return fmt.Errorf("incorrect header received: wanted %v, got %v", nextBlock.Header(), msg.Block.Header()) + } + default: + return fmt.Errorf("unexpected: %s", pretty.Sdump(msg)) + } + // confirm node imported block + if err := s.waitForBlockImport(recvConn, nextBlock, isEth66); err != nil { + return fmt.Errorf("error waiting for node to import new block: %v", err) + } + // update the chain + s.chain.blocks = append(s.chain.blocks, nextBlock) + return nil +} diff --git a/cmd/devp2p/internal/ethtest/snap.go b/cmd/devp2p/internal/ethtest/snap.go new file mode 100644 index 0000000000..95dd90fd3b --- /dev/null +++ b/cmd/devp2p/internal/ethtest/snap.go @@ -0,0 +1,675 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "bytes" + "errors" + "fmt" + "math/rand" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/protocols/snap" + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/light" + "github.com/ethereum/go-ethereum/trie" + "golang.org/x/crypto/sha3" +) + +func (s *Suite) TestSnapStatus(t *utesting.T) { + conn, err := s.dialSnap() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } +} + +type accRangeTest struct { + nBytes uint64 + root common.Hash + origin common.Hash + limit common.Hash + + expAccounts int + expFirst common.Hash + expLast common.Hash +} + +// TestSnapGetAccountRange various forms of GetAccountRange requests. +func (s *Suite) TestSnapGetAccountRange(t *utesting.T) { + var ( + root = s.chain.RootAt(999) + ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + zero = common.Hash{} + firstKeyMinus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf29") + firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a") + firstKeyPlus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2b") + secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606") + storageRoot = common.HexToHash("0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790") + ) + for i, tc := range []accRangeTest{ + // Tests decreasing the number of bytes + {4000, root, zero, ffHash, 76, firstKey, common.HexToHash("0xd2669dcf3858e7f1eecb8b5fedbf22fbea3e9433848a75035f79d68422c2dcda")}, + {3000, root, zero, ffHash, 57, firstKey, common.HexToHash("0x9b63fa753ece5cb90657d02ecb15df4dc1508d8c1d187af1bf7f1a05e747d3c7")}, + {2000, root, zero, ffHash, 38, firstKey, common.HexToHash("0x5e6140ecae4354a9e8f47559a8c6209c1e0e69cb077b067b528556c11698b91f")}, + {1, root, zero, ffHash, 1, firstKey, firstKey}, + + // Tests variations of the range + // + // [00b to firstkey]: should return [firstkey, secondkey], where secondkey is out of bounds + {4000, root, common.HexToHash("0x00bf000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2b"), 2, firstKey, secondKey}, + // [00b0 to 0bf0]: where both are before firstkey. Should return firstKey (even though it's out of bounds) + {4000, root, common.HexToHash("0x00b0000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x00bf100000000000000000000000000000000000000000000000000000000000"), 1, firstKey, firstKey}, + {4000, root, zero, zero, 1, firstKey, firstKey}, + {4000, root, firstKey, ffHash, 76, firstKey, common.HexToHash("0xd2669dcf3858e7f1eecb8b5fedbf22fbea3e9433848a75035f79d68422c2dcda")}, + {4000, root, firstKeyPlus1, ffHash, 76, secondKey, common.HexToHash("0xd28f55d3b994f16389f36944ad685b48e0fc3f8fbe86c3ca92ebecadf16a783f")}, + + // Test different root hashes + // + // A stateroot that does not exist + {4000, common.Hash{0x13, 37}, zero, ffHash, 0, zero, zero}, + // The genesis stateroot (we expect it to not be served) + {4000, s.chain.RootAt(0), zero, ffHash, 0, zero, zero}, + // A 127 block old stateroot, expected to be served + {4000, s.chain.RootAt(999 - 127), zero, ffHash, 77, firstKey, common.HexToHash("0xe4c6fdef5dd4e789a2612390806ee840b8ec0fe52548f8b4efe41abb20c37aac")}, + // A root which is not actually an account root, but a storage orot + {4000, storageRoot, zero, ffHash, 0, zero, zero}, + + // And some non-sensical requests + // + // range from [0xFF to 0x00], wrong order. Expect not to be serviced + {4000, root, ffHash, zero, 0, zero, zero}, + // range from [firstkey, firstkey-1], wrong order. Expect to get first key. + {4000, root, firstKey, firstKeyMinus1, 1, firstKey, firstKey}, + // range from [firstkey, 0], wrong order. Expect to get first key. + {4000, root, firstKey, zero, 1, firstKey, firstKey}, + // Max bytes: 0. Expect to deliver one account. + {0, root, zero, ffHash, 1, firstKey, firstKey}, + } { + if err := s.snapGetAccountRange(t, &tc); err != nil { + t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\nfailed: %v", i, tc.root, tc.origin, tc.limit, tc.nBytes, err) + } + } +} + +type stRangesTest struct { + root common.Hash + accounts []common.Hash + origin []byte + limit []byte + nBytes uint64 + + expSlots int +} + +// TestSnapGetStorageRange various forms of GetStorageRanges requests. +func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) { + var ( + ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + zero = common.Hash{} + firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a") + secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606") + ) + for i, tc := range []stRangesTest{ + { + root: s.chain.RootAt(999), + accounts: []common.Hash{secondKey, firstKey}, + origin: zero[:], + limit: ffHash[:], + nBytes: 500, + expSlots: 0, + }, + + /* + Some tests against this account: + { + "balance": "0", + "nonce": 1, + "root": "0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790", + "codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", + "storage": { + "0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace": "02", + "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6": "01", + "0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b": "03" + }, + "key": "0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844" + } + */ + { // [:] -> [slot1, slot2, slot3] + root: s.chain.RootAt(999), + accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")}, + origin: zero[:], + limit: ffHash[:], + nBytes: 500, + expSlots: 3, + }, + { // [slot1:] -> [slot1, slot2, slot3] + root: s.chain.RootAt(999), + accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")}, + origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace"), + limit: ffHash[:], + nBytes: 500, + expSlots: 3, + }, + { // [slot1+ :] -> [slot2, slot3] + root: s.chain.RootAt(999), + accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")}, + origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5acf"), + limit: ffHash[:], + nBytes: 500, + expSlots: 2, + }, + { // [slot1:slot2] -> [slot1, slot2] + root: s.chain.RootAt(999), + accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")}, + origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace"), + limit: common.FromHex("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6"), + nBytes: 500, + expSlots: 2, + }, + { // [slot1+:slot2+] -> [slot2, slot3] + root: s.chain.RootAt(999), + accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")}, + origin: common.FromHex("0x4fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + limit: common.FromHex("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf7"), + nBytes: 500, + expSlots: 2, + }, + } { + if err := s.snapGetStorageRanges(t, &tc); err != nil { + t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\n #accounts: %d\nfailed: %v", + i, tc.root, tc.origin, tc.limit, tc.nBytes, len(tc.accounts), err) + } + } +} + +type byteCodesTest struct { + nBytes uint64 + hashes []common.Hash + + expHashes int +} + +var ( + // emptyRoot is the known root hash of an empty trie. + emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + // emptyCode is the known hash of the empty EVM bytecode. + emptyCode = common.HexToHash("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470") +) + +// TestSnapGetByteCodes various forms of GetByteCodes requests. +func (s *Suite) TestSnapGetByteCodes(t *utesting.T) { + // The halfchain import should yield these bytecodes + var hcBytecodes []common.Hash + for _, s := range []string{ + "0x200c90460d8b0063210d5f5b9918e053c8f2c024485e0f1b48be8b1fc71b1317", + "0x20ba67ed4ac6aff626e0d1d4db623e2fada9593daeefc4a6eb4b70e6cff986f3", + "0x24b5b4902cb3d897c1cee9f16be8e897d8fa277c04c6dc8214f18295fca5de44", + "0x320b9d0a2be39b8a1c858f9f8cb96b1df0983071681de07ded3a7c0d05db5fd6", + "0x48cb0d5275936a24632babc7408339f9f7b051274809de565b8b0db76e97e03c", + "0x67c7a6f5cdaa43b4baa0e15b2be63346d1b9ce9f2c3d7e5804e0cacd44ee3b04", + "0x6d8418059bdc8c3fabf445e6bfc662af3b6a4ae45999b953996e42c7ead2ab49", + "0x7043422e5795d03f17ee0463a37235258e609fdd542247754895d72695e3e142", + "0x727f9e6f0c4bac1ff8d72c2972122d9c8d37ccb37e04edde2339e8da193546f1", + "0x86ccd5e23c78568a8334e0cebaf3e9f48c998307b0bfb1c378cee83b4bfb29cb", + "0x8fc89b00d6deafd4c4279531e743365626dbfa28845ec697919d305c2674302d", + "0x92cfc353bcb9746bb6f9996b6b9df779c88af2e9e0eeac44879ca19887c9b732", + "0x941b4872104f0995a4898fcf0f615ea6bf46bfbdfcf63ea8f2fd45b3f3286b77", + "0xa02fe8f41159bb39d2b704c633c3d6389cf4bfcb61a2539a9155f60786cf815f", + "0xa4b94e0afdffcb0af599677709dac067d3145489ea7aede57672bee43e3b7373", + "0xaf4e64edd3234c1205b725e42963becd1085f013590bd7ed93f8d711c5eb65fb", + "0xb69a18fa855b742031420081999086f6fb56c3930ae8840944e8b8ae9931c51e", + "0xc246c217bc73ce6666c93a93a94faa5250564f50a3fdc27ea74c231c07fe2ca6", + "0xcd6e4ab2c3034df2a8a1dfaaeb1c4baecd162a93d22de35e854ee2945cbe0c35", + "0xe24b692d09d6fc2f3d1a6028c400a27c37d7cbb11511907c013946d6ce263d3b", + "0xe440c5f0e8603fd1ed25976eee261ccee8038cf79d6a4c0eb31b2bf883be737f", + "0xe6eacbc509203d21ac814b350e72934fde686b7f673c19be8cf956b0c70078ce", + "0xe8530de4371467b5be7ea0e69e675ab36832c426d6c1ce9513817c0f0ae1486b", + "0xe85d487abbbc83bf3423cf9731360cf4f5a37220e18e5add54e72ee20861196a", + "0xf195ea389a5eea28db0be93660014275b158963dec44af1dfa7d4743019a9a49", + } { + hcBytecodes = append(hcBytecodes, common.HexToHash(s)) + } + + for i, tc := range []byteCodesTest{ + // A few stateroots + { + nBytes: 10000, hashes: []common.Hash{s.chain.RootAt(0), s.chain.RootAt(999)}, + expHashes: 0, + }, + { + nBytes: 10000, hashes: []common.Hash{s.chain.RootAt(0), s.chain.RootAt(0)}, + expHashes: 0, + }, + // Empties + { + nBytes: 10000, hashes: []common.Hash{emptyRoot}, + expHashes: 0, + }, + { + nBytes: 10000, hashes: []common.Hash{emptyCode}, + expHashes: 1, + }, + { + nBytes: 10000, hashes: []common.Hash{emptyCode, emptyCode, emptyCode}, + expHashes: 3, + }, + // The existing bytecodes + { + nBytes: 10000, hashes: hcBytecodes, + expHashes: len(hcBytecodes), + }, + // The existing, with limited byte arg + { + nBytes: 1, hashes: hcBytecodes, + expHashes: 1, + }, + { + nBytes: 0, hashes: hcBytecodes, + expHashes: 1, + }, + { + nBytes: 1000, hashes: []common.Hash{hcBytecodes[0], hcBytecodes[0], hcBytecodes[0], hcBytecodes[0]}, + expHashes: 4, + }, + } { + if err := s.snapGetByteCodes(t, &tc); err != nil { + t.Errorf("test %d \n bytes: %d\n #hashes: %d\nfailed: %v", i, tc.nBytes, len(tc.hashes), err) + } + } +} + +type trieNodesTest struct { + root common.Hash + paths []snap.TrieNodePathSet + nBytes uint64 + + expHashes []common.Hash + expReject bool +} + +func decodeNibbles(nibbles []byte, bytes []byte) { + for bi, ni := 0, 0; ni < len(nibbles); bi, ni = bi+1, ni+2 { + bytes[bi] = nibbles[ni]<<4 | nibbles[ni+1] + } +} + +// hasTerm returns whether a hex key has the terminator flag. +func hasTerm(s []byte) bool { + return len(s) > 0 && s[len(s)-1] == 16 +} + +func keybytesToHex(str []byte) []byte { + l := len(str)*2 + 1 + var nibbles = make([]byte, l) + for i, b := range str { + nibbles[i*2] = b / 16 + nibbles[i*2+1] = b % 16 + } + nibbles[l-1] = 16 + return nibbles +} + +func hexToCompact(hex []byte) []byte { + terminator := byte(0) + if hasTerm(hex) { + terminator = 1 + hex = hex[:len(hex)-1] + } + buf := make([]byte, len(hex)/2+1) + buf[0] = terminator << 5 // the flag byte + if len(hex)&1 == 1 { + buf[0] |= 1 << 4 // odd flag + buf[0] |= hex[0] // first nibble is contained in the first byte + hex = hex[1:] + } + decodeNibbles(hex, buf[1:]) + return buf +} + +// TestSnapTrieNodes various forms of GetTrieNodes requests. +func (s *Suite) TestSnapTrieNodes(t *utesting.T) { + + key := common.FromHex("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a") + // helper function to iterate the key, and generate the compact-encoded + // trie paths along the way. + pathTo := func(length int) snap.TrieNodePathSet { + hex := keybytesToHex(key)[:length] + hex[len(hex)-1] = 0 // remove term flag + hKey := hexToCompact(hex) + return snap.TrieNodePathSet{hKey} + } + var accPaths []snap.TrieNodePathSet + for i := 1; i <= 65; i++ { + accPaths = append(accPaths, pathTo(i)) + } + empty := emptyCode + for i, tc := range []trieNodesTest{ + { + root: s.chain.RootAt(999), + paths: nil, + nBytes: 500, + expHashes: nil, + }, + { + root: s.chain.RootAt(999), + paths: []snap.TrieNodePathSet{ + snap.TrieNodePathSet{}, // zero-length pathset should 'abort' and kick us off + snap.TrieNodePathSet{[]byte{0}}, + }, + nBytes: 5000, + expHashes: []common.Hash{}, + expReject: true, + }, + { + root: s.chain.RootAt(999), + paths: []snap.TrieNodePathSet{ + snap.TrieNodePathSet{[]byte{0}}, + snap.TrieNodePathSet{[]byte{1}, []byte{0}}, + }, + nBytes: 5000, + //0x6b3724a41b8c38b46d4d02fba2bb2074c47a507eb16a9a4b978f91d32e406faf + expHashes: []common.Hash{s.chain.RootAt(999)}, + }, + { // nonsensically long path + root: s.chain.RootAt(999), + paths: []snap.TrieNodePathSet{ + snap.TrieNodePathSet{[]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8}}, + }, + nBytes: 5000, + expHashes: []common.Hash{common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")}, + }, + { + root: s.chain.RootAt(0), + paths: []snap.TrieNodePathSet{ + snap.TrieNodePathSet{[]byte{0}}, + snap.TrieNodePathSet{[]byte{1}, []byte{0}}, + }, + nBytes: 5000, + expHashes: []common.Hash{}, + }, + { + // The leaf is only a couple of levels down, so the continued trie traversal causes lookup failures. + root: s.chain.RootAt(999), + paths: accPaths, + nBytes: 5000, + expHashes: []common.Hash{ + common.HexToHash("0xbcefee69b37cca1f5bf3a48aebe08b35f2ea1864fa958bb0723d909a0e0d28d8"), + common.HexToHash("0x4fb1e4e2391e4b4da471d59641319b8fa25d76c973d4bec594d7b00a69ae5135"), + empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, + empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, + empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, + empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, + empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, + empty, empty, empty}, + }, + { + // Basically the same as above, with different ordering + root: s.chain.RootAt(999), + paths: []snap.TrieNodePathSet{ + accPaths[10], accPaths[1], accPaths[0], + }, + nBytes: 5000, + expHashes: []common.Hash{ + empty, + common.HexToHash("0x4fb1e4e2391e4b4da471d59641319b8fa25d76c973d4bec594d7b00a69ae5135"), + common.HexToHash("0xbcefee69b37cca1f5bf3a48aebe08b35f2ea1864fa958bb0723d909a0e0d28d8"), + }, + }, + } { + if err := s.snapGetTrieNodes(t, &tc); err != nil { + t.Errorf("test %d \n #hashes %x\n root: %#x\n bytes: %d\nfailed: %v", i, len(tc.expHashes), tc.root, tc.nBytes, err) + } + } +} + +func (s *Suite) snapGetAccountRange(t *utesting.T, tc *accRangeTest) error { + conn, err := s.dialSnap() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // write request + req := &GetAccountRange{ + ID: uint64(rand.Int63()), + Root: tc.root, + Origin: tc.origin, + Limit: tc.limit, + Bytes: tc.nBytes, + } + resp, err := conn.snapRequest(req, req.ID, s.chain) + if err != nil { + return fmt.Errorf("account range request failed: %v", err) + } + var res *snap.AccountRangePacket + if r, ok := resp.(*AccountRange); !ok { + return fmt.Errorf("account range response wrong: %T %v", resp, resp) + } else { + res = (*snap.AccountRangePacket)(r) + } + if exp, got := tc.expAccounts, len(res.Accounts); exp != got { + return fmt.Errorf("expected %d accounts, got %d", exp, got) + } + // Check that the encoding order is correct + for i := 1; i < len(res.Accounts); i++ { + if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 { + return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:]) + } + } + var ( + hashes []common.Hash + accounts [][]byte + proof = res.Proof + ) + hashes, accounts, err = res.Unpack() + if err != nil { + return err + } + if len(hashes) == 0 && len(accounts) == 0 && len(proof) == 0 { + return nil + } + if len(hashes) > 0 { + if exp, got := tc.expFirst, res.Accounts[0].Hash; exp != got { + return fmt.Errorf("expected first account 0x%x, got 0x%x", exp, got) + } + if exp, got := tc.expLast, res.Accounts[len(res.Accounts)-1].Hash; exp != got { + return fmt.Errorf("expected last account 0x%x, got 0x%x", exp, got) + } + } + // Reconstruct a partial trie from the response and verify it + keys := make([][]byte, len(hashes)) + for i, key := range hashes { + keys[i] = common.CopyBytes(key[:]) + } + nodes := make(light.NodeList, len(proof)) + for i, node := range proof { + nodes[i] = node + } + proofdb := nodes.NodeSet() + + var end []byte + if len(keys) > 0 { + end = keys[len(keys)-1] + } + _, err = trie.VerifyRangeProof(tc.root, tc.origin[:], end, keys, accounts, proofdb) + return err +} + +func (s *Suite) snapGetStorageRanges(t *utesting.T, tc *stRangesTest) error { + conn, err := s.dialSnap() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // write request + req := &GetStorageRanges{ + ID: uint64(rand.Int63()), + Root: tc.root, + Accounts: tc.accounts, + Origin: tc.origin, + Limit: tc.limit, + Bytes: tc.nBytes, + } + resp, err := conn.snapRequest(req, req.ID, s.chain) + if err != nil { + return fmt.Errorf("account range request failed: %v", err) + } + var res *snap.StorageRangesPacket + if r, ok := resp.(*StorageRanges); !ok { + return fmt.Errorf("account range response wrong: %T %v", resp, resp) + } else { + res = (*snap.StorageRangesPacket)(r) + } + gotSlots := 0 + // Ensure the ranges are monotonically increasing + for i, slots := range res.Slots { + gotSlots += len(slots) + for j := 1; j < len(slots); j++ { + if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 { + return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:]) + } + } + } + if exp, got := tc.expSlots, gotSlots; exp != got { + return fmt.Errorf("expected %d slots, got %d", exp, got) + } + return nil +} + +func (s *Suite) snapGetByteCodes(t *utesting.T, tc *byteCodesTest) error { + conn, err := s.dialSnap() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // write request + req := &GetByteCodes{ + ID: uint64(rand.Int63()), + Hashes: tc.hashes, + Bytes: tc.nBytes, + } + resp, err := conn.snapRequest(req, req.ID, s.chain) + if err != nil { + return fmt.Errorf("getBytecodes request failed: %v", err) + } + var res *snap.ByteCodesPacket + if r, ok := resp.(*ByteCodes); !ok { + return fmt.Errorf("bytecodes response wrong: %T %v", resp, resp) + } else { + res = (*snap.ByteCodesPacket)(r) + } + if exp, got := tc.expHashes, len(res.Codes); exp != got { + for i, c := range res.Codes { + fmt.Printf("%d. %#x\n", i, c) + } + return fmt.Errorf("expected %d bytecodes, got %d", exp, got) + } + // Cross reference the requested bytecodes with the response to find gaps + // that the serving node is missing + var ( + bytecodes = res.Codes + hasher = sha3.NewLegacyKeccak256().(crypto.KeccakState) + hash = make([]byte, 32) + codes = make([][]byte, len(req.Hashes)) + ) + + for i, j := 0, 0; i < len(bytecodes); i++ { + // Find the next hash that we've been served, leaving misses with nils + hasher.Reset() + hasher.Write(bytecodes[i]) + hasher.Read(hash) + + for j < len(req.Hashes) && !bytes.Equal(hash, req.Hashes[j][:]) { + j++ + } + if j < len(req.Hashes) { + codes[j] = bytecodes[i] + j++ + continue + } + // We've either ran out of hashes, or got unrequested data + return errors.New("unexpected bytecode") + } + + return nil +} + +func (s *Suite) snapGetTrieNodes(t *utesting.T, tc *trieNodesTest) error { + conn, err := s.dialSnap() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // write request + req := &GetTrieNodes{ + ID: uint64(rand.Int63()), + Root: tc.root, + Paths: tc.paths, + Bytes: tc.nBytes, + } + resp, err := conn.snapRequest(req, req.ID, s.chain) + if err != nil { + if tc.expReject { + return nil + } + return fmt.Errorf("trienodes request failed: %v", err) + } + var res *snap.TrieNodesPacket + if r, ok := resp.(*TrieNodes); !ok { + return fmt.Errorf("trienodes response wrong: %T %v", resp, resp) + } else { + res = (*snap.TrieNodesPacket)(r) + } + + // Check the correctness + + // Cross reference the requested trienodes with the response to find gaps + // that the serving node is missing + hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hash := make([]byte, 32) + trienodes := res.Nodes + if got, want := len(trienodes), len(tc.expHashes); got != want { + return fmt.Errorf("wrong trienode count, got %d, want %d\n", got, want) + } + for i, trienode := range trienodes { + hasher.Reset() + hasher.Write(trienode) + hasher.Read(hash) + if got, want := hash, tc.expHashes[i]; !bytes.Equal(got, want[:]) { + fmt.Printf("hash %d wrong, got %#x, want %#x\n", i, got, want) + err = fmt.Errorf("hash %d wrong, got %#x, want %#x", i, got, want) + } + } + return err +} diff --git a/cmd/devp2p/internal/ethtest/snapTypes.go b/cmd/devp2p/internal/ethtest/snapTypes.go new file mode 100644 index 0000000000..bb8638c3d8 --- /dev/null +++ b/cmd/devp2p/internal/ethtest/snapTypes.go @@ -0,0 +1,36 @@ +package ethtest + +import "github.com/ethereum/go-ethereum/eth/protocols/snap" + +// GetAccountRange represents an account range query. +type GetAccountRange snap.GetAccountRangePacket + +func (g GetAccountRange) Code() int { return 33 } + +type AccountRange snap.AccountRangePacket + +func (g AccountRange) Code() int { return 34 } + +type GetStorageRanges snap.GetStorageRangesPacket + +func (g GetStorageRanges) Code() int { return 35 } + +type StorageRanges snap.StorageRangesPacket + +func (g StorageRanges) Code() int { return 36 } + +type GetByteCodes snap.GetByteCodesPacket + +func (g GetByteCodes) Code() int { return 37 } + +type ByteCodes snap.ByteCodesPacket + +func (g ByteCodes) Code() int { return 38 } + +type GetTrieNodes snap.GetTrieNodesPacket + +func (g GetTrieNodes) Code() int { return 39 } + +type TrieNodes snap.TrieNodesPacket + +func (g TrieNodes) Code() int { return 40 } diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go index abc6bcddce..dee59bc579 100644 --- a/cmd/devp2p/internal/ethtest/suite.go +++ b/cmd/devp2p/internal/ethtest/suite.go @@ -17,33 +17,16 @@ package ethtest import ( - "fmt" - "net" - "strings" "time" - "github.com/davecgh/go-spew/spew" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/internal/utesting" - "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/rlpx" - "github.com/stretchr/testify/assert" ) -var pretty = spew.ConfigState{ - Indent: " ", - DisableCapacities: true, - DisablePointerAddresses: true, - SortKeys: true, -} - -var timeout = 20 * time.Second - -// Suite represents a structure used to test the eth -// protocol of a node(s). +// Suite represents a structure used to test a node's conformance +// to the eth protocol. type Suite struct { Dest *enode.Node @@ -69,140 +52,166 @@ func NewSuite(dest *enode.Node, chainfile string, genesisfile string) (*Suite, e func (s *Suite) AllEthTests() []utesting.Test { return []utesting.Test{ // status - {Name: "TestStatus", Fn: s.TestStatus}, - {Name: "TestStatus_66", Fn: s.TestStatus_66}, + {Name: "TestStatus65", Fn: s.TestStatus65}, + {Name: "TestStatus66", Fn: s.TestStatus66}, // get block headers - {Name: "TestGetBlockHeaders", Fn: s.TestGetBlockHeaders}, - {Name: "TestGetBlockHeaders_66", Fn: s.TestGetBlockHeaders_66}, - {Name: "TestSimultaneousRequests_66", Fn: s.TestSimultaneousRequests_66}, - {Name: "TestSameRequestID_66", Fn: s.TestSameRequestID_66}, - {Name: "TestZeroRequestID_66", Fn: s.TestZeroRequestID_66}, + {Name: "TestGetBlockHeaders65", Fn: s.TestGetBlockHeaders65}, + {Name: "TestGetBlockHeaders66", Fn: s.TestGetBlockHeaders66}, + {Name: "TestSimultaneousRequests66", Fn: s.TestSimultaneousRequests66}, + {Name: "TestSameRequestID66", Fn: s.TestSameRequestID66}, + {Name: "TestZeroRequestID66", Fn: s.TestZeroRequestID66}, // get block bodies - {Name: "TestGetBlockBodies", Fn: s.TestGetBlockBodies}, - {Name: "TestGetBlockBodies_66", Fn: s.TestGetBlockBodies_66}, + {Name: "TestGetBlockBodies65", Fn: s.TestGetBlockBodies65}, + {Name: "TestGetBlockBodies66", Fn: s.TestGetBlockBodies66}, // broadcast - {Name: "TestBroadcast", Fn: s.TestBroadcast}, - {Name: "TestBroadcast_66", Fn: s.TestBroadcast_66}, - {Name: "TestLargeAnnounce", Fn: s.TestLargeAnnounce}, - {Name: "TestLargeAnnounce_66", Fn: s.TestLargeAnnounce_66}, - {Name: "TestOldAnnounce", Fn: s.TestOldAnnounce}, - {Name: "TestOldAnnounce_66", Fn: s.TestOldAnnounce_66}, + {Name: "TestBroadcast65", Fn: s.TestBroadcast65}, + {Name: "TestBroadcast66", Fn: s.TestBroadcast66}, + {Name: "TestLargeAnnounce65", Fn: s.TestLargeAnnounce65}, + {Name: "TestLargeAnnounce66", Fn: s.TestLargeAnnounce66}, + {Name: "TestOldAnnounce65", Fn: s.TestOldAnnounce65}, + {Name: "TestOldAnnounce66", Fn: s.TestOldAnnounce66}, + {Name: "TestBlockHashAnnounce65", Fn: s.TestBlockHashAnnounce65}, + {Name: "TestBlockHashAnnounce66", Fn: s.TestBlockHashAnnounce66}, // malicious handshakes + status - {Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake}, - {Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus}, - {Name: "TestMaliciousHandshake_66", Fn: s.TestMaliciousHandshake_66}, - {Name: "TestMaliciousStatus_66", Fn: s.TestMaliciousStatus_66}, + {Name: "TestMaliciousHandshake65", Fn: s.TestMaliciousHandshake65}, + {Name: "TestMaliciousStatus65", Fn: s.TestMaliciousStatus65}, + {Name: "TestMaliciousHandshake66", Fn: s.TestMaliciousHandshake66}, + {Name: "TestMaliciousStatus66", Fn: s.TestMaliciousStatus66}, // test transactions - {Name: "TestTransaction", Fn: s.TestTransaction}, - {Name: "TestTransaction_66", Fn: s.TestTransaction_66}, - {Name: "TestMaliciousTx", Fn: s.TestMaliciousTx}, - {Name: "TestMaliciousTx_66", Fn: s.TestMaliciousTx_66}, - {Name: "TestLargeTxRequest_66", Fn: s.TestLargeTxRequest_66}, - {Name: "TestNewPooledTxs_66", Fn: s.TestNewPooledTxs_66}, + {Name: "TestTransaction65", Fn: s.TestTransaction65}, + {Name: "TestTransaction66", Fn: s.TestTransaction66}, + {Name: "TestMaliciousTx65", Fn: s.TestMaliciousTx65}, + {Name: "TestMaliciousTx66", Fn: s.TestMaliciousTx66}, + {Name: "TestLargeTxRequest66", Fn: s.TestLargeTxRequest66}, + {Name: "TestNewPooledTxs66", Fn: s.TestNewPooledTxs66}, } } func (s *Suite) EthTests() []utesting.Test { return []utesting.Test{ - {Name: "TestStatus", Fn: s.TestStatus}, - {Name: "TestGetBlockHeaders", Fn: s.TestGetBlockHeaders}, - {Name: "TestGetBlockBodies", Fn: s.TestGetBlockBodies}, - {Name: "TestBroadcast", Fn: s.TestBroadcast}, - {Name: "TestLargeAnnounce", Fn: s.TestLargeAnnounce}, - {Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake}, - {Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus}, - {Name: "TestTransaction", Fn: s.TestTransaction}, - {Name: "TestMaliciousTx", Fn: s.TestMaliciousTx}, + {Name: "TestStatus65", Fn: s.TestStatus65}, + {Name: "TestGetBlockHeaders65", Fn: s.TestGetBlockHeaders65}, + {Name: "TestGetBlockBodies65", Fn: s.TestGetBlockBodies65}, + {Name: "TestBroadcast65", Fn: s.TestBroadcast65}, + {Name: "TestLargeAnnounce65", Fn: s.TestLargeAnnounce65}, + {Name: "TestOldAnnounce65", Fn: s.TestOldAnnounce65}, + {Name: "TestBlockHashAnnounce65", Fn: s.TestBlockHashAnnounce65}, + {Name: "TestMaliciousHandshake65", Fn: s.TestMaliciousHandshake65}, + {Name: "TestMaliciousStatus65", Fn: s.TestMaliciousStatus65}, + {Name: "TestTransaction65", Fn: s.TestTransaction65}, + {Name: "TestMaliciousTx65", Fn: s.TestMaliciousTx65}, } } func (s *Suite) Eth66Tests() []utesting.Test { return []utesting.Test{ // only proceed with eth66 test suite if node supports eth 66 protocol - {Name: "TestStatus_66", Fn: s.TestStatus_66}, - {Name: "TestGetBlockHeaders_66", Fn: s.TestGetBlockHeaders_66}, - {Name: "TestSimultaneousRequests_66", Fn: s.TestSimultaneousRequests_66}, - {Name: "TestSameRequestID_66", Fn: s.TestSameRequestID_66}, - {Name: "TestZeroRequestID_66", Fn: s.TestZeroRequestID_66}, - {Name: "TestGetBlockBodies_66", Fn: s.TestGetBlockBodies_66}, - {Name: "TestBroadcast_66", Fn: s.TestBroadcast_66}, - {Name: "TestLargeAnnounce_66", Fn: s.TestLargeAnnounce_66}, - {Name: "TestMaliciousHandshake_66", Fn: s.TestMaliciousHandshake_66}, - {Name: "TestMaliciousStatus_66", Fn: s.TestMaliciousStatus_66}, - {Name: "TestTransaction_66", Fn: s.TestTransaction_66}, - {Name: "TestMaliciousTx_66", Fn: s.TestMaliciousTx_66}, - {Name: "TestLargeTxRequest_66", Fn: s.TestLargeTxRequest_66}, - {Name: "TestNewPooledTxs_66", Fn: s.TestNewPooledTxs_66}, - } -} - -// TestStatus attempts to connect to the given node and exchange -// a status message with it, and then check to make sure -// the chain head is correct. -func (s *Suite) TestStatus(t *utesting.T) { + {Name: "TestStatus66", Fn: s.TestStatus66}, + {Name: "TestGetBlockHeaders66", Fn: s.TestGetBlockHeaders66}, + {Name: "TestSimultaneousRequests66", Fn: s.TestSimultaneousRequests66}, + {Name: "TestSameRequestID66", Fn: s.TestSameRequestID66}, + {Name: "TestZeroRequestID66", Fn: s.TestZeroRequestID66}, + {Name: "TestGetBlockBodies66", Fn: s.TestGetBlockBodies66}, + {Name: "TestBroadcast66", Fn: s.TestBroadcast66}, + {Name: "TestLargeAnnounce66", Fn: s.TestLargeAnnounce66}, + {Name: "TestOldAnnounce66", Fn: s.TestOldAnnounce66}, + {Name: "TestBlockHashAnnounce66", Fn: s.TestBlockHashAnnounce66}, + {Name: "TestMaliciousHandshake66", Fn: s.TestMaliciousHandshake66}, + {Name: "TestMaliciousStatus66", Fn: s.TestMaliciousStatus66}, + {Name: "TestTransaction66", Fn: s.TestTransaction66}, + {Name: "TestMaliciousTx66", Fn: s.TestMaliciousTx66}, + {Name: "TestLargeTxRequest66", Fn: s.TestLargeTxRequest66}, + {Name: "TestNewPooledTxs66", Fn: s.TestNewPooledTxs66}, + } +} + +func (s *Suite) SnapTests() []utesting.Test { + return []utesting.Test{ + {Name: "TestSnapStatus", Fn: s.TestSnapStatus}, + {Name: "TestSnapAccountRange", Fn: s.TestSnapGetAccountRange}, + {Name: "TestSnapGetByteCodes", Fn: s.TestSnapGetByteCodes}, + {Name: "TestSnapGetTrieNodes", Fn: s.TestSnapTrieNodes}, + {Name: "TestSnapGetStorageRanges", Fn: s.TestSnapGetStorageRanges}, + } +} + +var ( + eth66 = true // indicates whether suite should negotiate eth66 connection + eth65 = false // indicates whether suite should negotiate eth65 connection or below. +) + +// TestStatus65 attempts to connect to the given node and exchange +// a status message with it. +func (s *Suite) TestStatus65(t *utesting.T) { conn, err := s.dial() if err != nil { - t.Fatalf("could not dial: %v", err) + t.Fatalf("dial failed: %v", err) } defer conn.Close() - // get protoHandshake - conn.handshake(t) - // get status - switch msg := conn.statusExchange(t, s.chain, nil).(type) { - case *Status: - t.Logf("got status message: %s", pretty.Sdump(msg)) - default: - t.Fatalf("unexpected: %s", pretty.Sdump(msg)) + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) } } -// TestMaliciousStatus sends a status package with a large total difficulty. -func (s *Suite) TestMaliciousStatus(t *utesting.T) { - conn, err := s.dial() +// TestStatus66 attempts to connect to the given node and exchange +// a status message with it on the eth66 protocol. +func (s *Suite) TestStatus66(t *utesting.T) { + conn, err := s.dial66() if err != nil { - t.Fatalf("could not dial: %v", err) + t.Fatalf("dial failed: %v", err) } defer conn.Close() - // get protoHandshake - conn.handshake(t) - status := &Status{ - ProtocolVersion: uint32(conn.negotiatedProtoVersion), - NetworkID: s.chain.chainConfig.ChainID.Uint64(), - TD: largeNumber(2), - Head: s.chain.blocks[s.chain.Len()-1].Hash(), - Genesis: s.chain.blocks[0].Hash(), - ForkID: s.chain.ForkID(), - } - // get status - switch msg := conn.statusExchange(t, s.chain, status).(type) { - case *Status: - t.Logf("%+v\n", msg) - default: - t.Fatalf("expected status, got: %#v ", msg) - } - // wait for disconnect - switch msg := conn.ReadAndServe(s.chain, timeout).(type) { - case *Disconnect: - case *Error: - return - default: - t.Fatalf("expected disconnect, got: %s", pretty.Sdump(msg)) + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) } } -// TestGetBlockHeaders tests whether the given node can respond to -// a `GetBlockHeaders` request and that the response is accurate. -func (s *Suite) TestGetBlockHeaders(t *utesting.T) { +// TestGetBlockHeaders65 tests whether the given node can respond to +// a `GetBlockHeaders` request accurately. +func (s *Suite) TestGetBlockHeaders65(t *utesting.T) { conn, err := s.dial() if err != nil { - t.Fatalf("could not dial: %v", err) + t.Fatalf("dial failed: %v", err) } defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("handshake(s) failed: %v", err) + } + // write request + req := &GetBlockHeaders{ + Origin: eth.HashOrNumber{ + Hash: s.chain.blocks[1].Hash(), + }, + Amount: 2, + Skip: 1, + Reverse: false, + } + headers, err := conn.headersRequest(req, s.chain, eth65, 0) + if err != nil { + t.Fatalf("GetBlockHeaders request failed: %v", err) + } + // check for correct headers + expected, err := s.chain.GetHeaders(*req) + if err != nil { + t.Fatalf("failed to get headers for given request: %v", err) + } + if !headersMatch(expected, headers) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers) + } +} - conn.handshake(t) - conn.statusExchange(t, s.chain, nil) - - // get block headers +// TestGetBlockHeaders66 tests whether the given node can respond to +// an eth66 `GetBlockHeaders` request and that the response is accurate. +func (s *Suite) TestGetBlockHeaders66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // write request req := &GetBlockHeaders{ Origin: eth.HashOrNumber{ Hash: s.chain.blocks[1].Hash(), @@ -211,35 +220,199 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) { Skip: 1, Reverse: false, } + headers, err := conn.headersRequest(req, s.chain, eth66, 33) + if err != nil { + t.Fatalf("could not get block headers: %v", err) + } + // check for correct headers + expected, err := s.chain.GetHeaders(*req) + if err != nil { + t.Fatalf("failed to get headers for given request: %v", err) + } + if !headersMatch(expected, headers) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers) + } +} - if err := conn.Write(req); err != nil { - t.Fatalf("could not write to connection: %v", err) +// TestSimultaneousRequests66 sends two simultaneous `GetBlockHeader` requests from +// the same connection with different request IDs and checks to make sure the node +// responds with the correct headers per request. +func (s *Suite) TestSimultaneousRequests66(t *utesting.T) { + // create a connection + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // create two requests + req1 := ð.GetBlockHeadersPacket66{ + RequestId: uint64(111), + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Hash: s.chain.blocks[1].Hash(), + }, + Amount: 2, + Skip: 1, + Reverse: false, + }, + } + req2 := ð.GetBlockHeadersPacket66{ + RequestId: uint64(222), + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Hash: s.chain.blocks[1].Hash(), + }, + Amount: 4, + Skip: 1, + Reverse: false, + }, + } + // write the first request + if err := conn.Write66(req1, GetBlockHeaders{}.Code()); err != nil { + t.Fatalf("failed to write to connection: %v", err) + } + // write the second request + if err := conn.Write66(req2, GetBlockHeaders{}.Code()); err != nil { + t.Fatalf("failed to write to connection: %v", err) } + // wait for responses + msg := conn.waitForResponse(s.chain, timeout, req1.RequestId) + headers1, ok := msg.(BlockHeaders) + if !ok { + t.Fatalf("unexpected %s", pretty.Sdump(msg)) + } + msg = conn.waitForResponse(s.chain, timeout, req2.RequestId) + headers2, ok := msg.(BlockHeaders) + if !ok { + t.Fatalf("unexpected %s", pretty.Sdump(msg)) + } + // check received headers for accuracy + expected1, err := s.chain.GetHeaders(GetBlockHeaders(*req1.GetBlockHeadersPacket)) + if err != nil { + t.Fatalf("failed to get expected headers for request 1: %v", err) + } + expected2, err := s.chain.GetHeaders(GetBlockHeaders(*req2.GetBlockHeadersPacket)) + if err != nil { + t.Fatalf("failed to get expected headers for request 2: %v", err) + } + if !headersMatch(expected1, headers1) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) + } + if !headersMatch(expected2, headers2) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) + } +} - switch msg := conn.ReadAndServe(s.chain, timeout).(type) { - case *BlockHeaders: - headers := *msg - for _, header := range headers { - num := header.Number.Uint64() - t.Logf("received header (%d): %s", num, pretty.Sdump(header.Hash())) - assert.Equal(t, s.chain.blocks[int(num)].Header(), header) - } - default: - t.Fatalf("unexpected: %s", pretty.Sdump(msg)) +// TestSameRequestID66 sends two requests with the same request ID to a +// single node. +func (s *Suite) TestSameRequestID66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // create requests + reqID := uint64(1234) + request1 := ð.GetBlockHeadersPacket66{ + RequestId: reqID, + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Number: 1, + }, + Amount: 2, + }, + } + request2 := ð.GetBlockHeadersPacket66{ + RequestId: reqID, + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Number: 33, + }, + Amount: 2, + }, + } + // write the requests + if err = conn.Write66(request1, GetBlockHeaders{}.Code()); err != nil { + t.Fatalf("failed to write to connection: %v", err) + } + if err = conn.Write66(request2, GetBlockHeaders{}.Code()); err != nil { + t.Fatalf("failed to write to connection: %v", err) + } + // wait for responses + msg := conn.waitForResponse(s.chain, timeout, reqID) + headers1, ok := msg.(BlockHeaders) + if !ok { + t.Fatalf("unexpected %s", pretty.Sdump(msg)) + } + msg = conn.waitForResponse(s.chain, timeout, reqID) + headers2, ok := msg.(BlockHeaders) + if !ok { + t.Fatalf("unexpected %s", pretty.Sdump(msg)) + } + // check if headers match + expected1, err := s.chain.GetHeaders(GetBlockHeaders(*request1.GetBlockHeadersPacket)) + if err != nil { + t.Fatalf("failed to get expected block headers: %v", err) + } + expected2, err := s.chain.GetHeaders(GetBlockHeaders(*request2.GetBlockHeadersPacket)) + if err != nil { + t.Fatalf("failed to get expected block headers: %v", err) + } + if !headersMatch(expected1, headers1) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) + } + if !headersMatch(expected2, headers2) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) + } +} + +// TestZeroRequestID_66 checks that a message with a request ID of zero is still handled +// by the node. +func (s *Suite) TestZeroRequestID66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + req := &GetBlockHeaders{ + Origin: eth.HashOrNumber{ + Number: 0, + }, + Amount: 2, + } + headers, err := conn.headersRequest(req, s.chain, eth66, 0) + if err != nil { + t.Fatalf("failed to get block headers: %v", err) + } + expected, err := s.chain.GetHeaders(*req) + if err != nil { + t.Fatalf("failed to get expected block headers: %v", err) + } + if !headersMatch(expected, headers) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers) } } -// TestGetBlockBodies tests whether the given node can respond to +// TestGetBlockBodies65 tests whether the given node can respond to // a `GetBlockBodies` request and that the response is accurate. -func (s *Suite) TestGetBlockBodies(t *utesting.T) { +func (s *Suite) TestGetBlockBodies65(t *utesting.T) { conn, err := s.dial() if err != nil { - t.Fatalf("could not dial: %v", err) + t.Fatalf("dial failed: %v", err) } defer conn.Close() - - conn.handshake(t) - conn.statusExchange(t, s.chain, nil) + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } // create block bodies request req := &GetBlockBodies{ s.chain.blocks[54].Hash(), @@ -248,126 +421,125 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) { if err := conn.Write(req); err != nil { t.Fatalf("could not write to connection: %v", err) } - - switch msg := conn.ReadAndServe(s.chain, timeout).(type) { + // wait for response + switch msg := conn.readAndServe(s.chain, timeout).(type) { case *BlockBodies: t.Logf("received %d block bodies", len(*msg)) + if len(*msg) != len(*req) { + t.Fatalf("wrong bodies in response: expected %d bodies, "+ + "got %d", len(*req), len(*msg)) + } default: t.Fatalf("unexpected: %s", pretty.Sdump(msg)) } } -// TestBroadcast tests whether a block announcement is correctly -// propagated to the given node's peer(s). -func (s *Suite) TestBroadcast(t *utesting.T) { - s.sendNextBlock(t) +// TestGetBlockBodies66 tests whether the given node can respond to +// a `GetBlockBodies` request and that the response is accurate over +// the eth66 protocol. +func (s *Suite) TestGetBlockBodies66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // create block bodies request + req := ð.GetBlockBodiesPacket66{ + RequestId: uint64(55), + GetBlockBodiesPacket: eth.GetBlockBodiesPacket{ + s.chain.blocks[54].Hash(), + s.chain.blocks[75].Hash(), + }, + } + if err := conn.Write66(req, GetBlockBodies{}.Code()); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + // wait for block bodies response + msg := conn.waitForResponse(s.chain, timeout, req.RequestId) + blockBodies, ok := msg.(BlockBodies) + if !ok { + t.Fatalf("unexpected: %s", pretty.Sdump(msg)) + } + t.Logf("received %d block bodies", len(blockBodies)) + if len(blockBodies) != len(req.GetBlockBodiesPacket) { + t.Fatalf("wrong bodies in response: expected %d bodies, "+ + "got %d", len(req.GetBlockBodiesPacket), len(blockBodies)) + } } -func (s *Suite) sendNextBlock(t *utesting.T) { - sendConn, receiveConn := s.setupConnection(t), s.setupConnection(t) - defer sendConn.Close() - defer receiveConn.Close() - - // create new block announcement - nextBlock := len(s.chain.blocks) - blockAnnouncement := &NewBlock{ - Block: s.fullChain.blocks[nextBlock], - TD: s.fullChain.TD(nextBlock + 1), - } - // send announcement and wait for node to request the header - s.testAnnounce(t, sendConn, receiveConn, blockAnnouncement) - // wait for client to update its chain - if err := receiveConn.waitForBlock(s.fullChain.blocks[nextBlock]); err != nil { - t.Fatal(err) +// TestBroadcast65 tests whether a block announcement is correctly +// propagated to the given node's peer(s). +func (s *Suite) TestBroadcast65(t *utesting.T) { + if err := s.sendNextBlock(eth65); err != nil { + t.Fatalf("block broadcast failed: %v", err) } - // update test suite chain - s.chain.blocks = append(s.chain.blocks, s.fullChain.blocks[nextBlock]) } -// TestMaliciousHandshake tries to send malicious data during the handshake. -func (s *Suite) TestMaliciousHandshake(t *utesting.T) { - conn, err := s.dial() - if err != nil { - t.Fatalf("could not dial: %v", err) +// TestBroadcast66 tests whether a block announcement is correctly +// propagated to the given node's peer(s) on the eth66 protocol. +func (s *Suite) TestBroadcast66(t *utesting.T) { + if err := s.sendNextBlock(eth66); err != nil { + t.Fatalf("block broadcast failed: %v", err) } - defer conn.Close() - // write hello to client - pub0 := crypto.FromECDSAPub(&conn.ourKey.PublicKey)[1:] - handshakes := []*Hello{ - { - Version: 5, - Caps: []p2p.Cap{ - {Name: largeString(2), Version: 64}, - }, - ID: pub0, - }, - { - Version: 5, - Caps: []p2p.Cap{ - {Name: "eth", Version: 64}, - {Name: "eth", Version: 65}, - }, - ID: append(pub0, byte(0)), - }, +} + +// TestLargeAnnounce65 tests the announcement mechanism with a large block. +func (s *Suite) TestLargeAnnounce65(t *utesting.T) { + nextBlock := len(s.chain.blocks) + blocks := []*NewBlock{ { - Version: 5, - Caps: []p2p.Cap{ - {Name: "eth", Version: 64}, - {Name: "eth", Version: 65}, - }, - ID: append(pub0, pub0...), + Block: largeBlock(), + TD: s.fullChain.TotalDifficultyAt(nextBlock), }, { - Version: 5, - Caps: []p2p.Cap{ - {Name: "eth", Version: 64}, - {Name: "eth", Version: 65}, - }, - ID: largeBuffer(2), + Block: s.fullChain.blocks[nextBlock], + TD: largeNumber(2), }, { - Version: 5, - Caps: []p2p.Cap{ - {Name: largeString(2), Version: 64}, - }, - ID: largeBuffer(2), + Block: largeBlock(), + TD: largeNumber(2), }, } - for i, handshake := range handshakes { - t.Logf("Testing malicious handshake %v\n", i) - // Init the handshake - if err := conn.Write(handshake); err != nil { - t.Fatalf("could not write to connection: %v", err) + + for i, blockAnnouncement := range blocks { + t.Logf("Testing malicious announcement: %v\n", i) + conn, err := s.dial() + if err != nil { + t.Fatalf("dial failed: %v", err) } - // check that the peer disconnected - timeout := 20 * time.Second - // Discard one hello - for i := 0; i < 2; i++ { - switch msg := conn.ReadAndServe(s.chain, timeout).(type) { - case *Disconnect: - case *Error: - case *Hello: - // Hello's are send concurrently, so ignore them - continue - default: - t.Fatalf("unexpected: %s", pretty.Sdump(msg)) - } + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) } - // Dial for the next round - conn, err = s.dial() - if err != nil { - t.Fatalf("could not dial: %v", err) + if err = conn.Write(blockAnnouncement); err != nil { + t.Fatalf("could not write to connection: %v", err) } + // Invalid announcement, check that peer disconnected + switch msg := conn.readAndServe(s.chain, time.Second*8).(type) { + case *Disconnect: + case *Error: + break + default: + t.Fatalf("unexpected: %s wanted disconnect", pretty.Sdump(msg)) + } + conn.Close() + } + // Test the last block as a valid block + if err := s.sendNextBlock(eth65); err != nil { + t.Fatalf("failed to broadcast next block: %v", err) } } -// TestLargeAnnounce tests the announcement mechanism with a large block. -func (s *Suite) TestLargeAnnounce(t *utesting.T) { +// TestLargeAnnounce66 tests the announcement mechanism with a large +// block over the eth66 protocol. +func (s *Suite) TestLargeAnnounce66(t *utesting.T) { nextBlock := len(s.chain.blocks) blocks := []*NewBlock{ { Block: largeBlock(), - TD: s.fullChain.TD(nextBlock + 1), + TD: s.fullChain.TotalDifficultyAt(nextBlock), }, { Block: s.fullChain.blocks[nextBlock], @@ -377,174 +549,245 @@ func (s *Suite) TestLargeAnnounce(t *utesting.T) { Block: largeBlock(), TD: largeNumber(2), }, - { - Block: s.fullChain.blocks[nextBlock], - TD: s.fullChain.TD(nextBlock + 1), - }, } for i, blockAnnouncement := range blocks[0:3] { t.Logf("Testing malicious announcement: %v\n", i) - sendConn := s.setupConnection(t) - if err := sendConn.Write(blockAnnouncement); err != nil { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + if err := conn.Write(blockAnnouncement); err != nil { t.Fatalf("could not write to connection: %v", err) } // Invalid announcement, check that peer disconnected - switch msg := sendConn.ReadAndServe(s.chain, time.Second*8).(type) { + switch msg := conn.readAndServe(s.chain, time.Second*8).(type) { case *Disconnect: case *Error: break default: t.Fatalf("unexpected: %s wanted disconnect", pretty.Sdump(msg)) } - sendConn.Close() + conn.Close() } // Test the last block as a valid block - s.sendNextBlock(t) + if err := s.sendNextBlock(eth66); err != nil { + t.Fatalf("failed to broadcast next block: %v", err) + } } -func (s *Suite) TestOldAnnounce(t *utesting.T) { - sendConn, recvConn := s.setupConnection(t), s.setupConnection(t) - defer sendConn.Close() - defer recvConn.Close() - - s.oldAnnounce(t, sendConn, recvConn) +// TestOldAnnounce65 tests the announcement mechanism with an old block. +func (s *Suite) TestOldAnnounce65(t *utesting.T) { + if err := s.oldAnnounce(eth65); err != nil { + t.Fatal(err) + } } -func (s *Suite) oldAnnounce(t *utesting.T, sendConn, receiveConn *Conn) { - oldBlockAnnounce := &NewBlock{ - Block: s.chain.blocks[len(s.chain.blocks)/2], - TD: s.chain.blocks[len(s.chain.blocks)/2].Difficulty(), +// TestOldAnnounce66 tests the announcement mechanism with an old block, +// over the eth66 protocol. +func (s *Suite) TestOldAnnounce66(t *utesting.T) { + if err := s.oldAnnounce(eth66); err != nil { + t.Fatal(err) } +} - if err := sendConn.Write(oldBlockAnnounce); err != nil { - t.Fatalf("could not write to connection: %v", err) +// TestBlockHashAnnounce65 sends a new block hash announcement and expects +// the node to perform a `GetBlockHeaders` request. +func (s *Suite) TestBlockHashAnnounce65(t *utesting.T) { + if err := s.hashAnnounce(eth65); err != nil { + t.Fatalf("block hash announcement failed: %v", err) } +} - switch msg := receiveConn.ReadAndServe(s.chain, time.Second*8).(type) { - case *NewBlock: - block := *msg - if block.Block.Hash() == oldBlockAnnounce.Block.Hash() { - t.Fatalf("unexpected: block propagated: %s", pretty.Sdump(msg)) - } - case *NewBlockHashes: - hashes := *msg - for _, hash := range hashes { - if hash.Hash == oldBlockAnnounce.Block.Hash() { - t.Fatalf("unexpected: block announced: %s", pretty.Sdump(msg)) - } - } - case *Error: - errMsg := *msg - // check to make sure error is timeout (propagation didn't come through == test successful) - if !strings.Contains(errMsg.String(), "timeout") { - t.Fatalf("unexpected error: %v", pretty.Sdump(msg)) - } - default: - t.Fatalf("unexpected: %s", pretty.Sdump(msg)) +// TestBlockHashAnnounce66 sends a new block hash announcement and expects +// the node to perform a `GetBlockHeaders` request. +func (s *Suite) TestBlockHashAnnounce66(t *utesting.T) { + if err := s.hashAnnounce(eth66); err != nil { + t.Fatalf("block hash announcement failed: %v", err) } } -func (s *Suite) testAnnounce(t *utesting.T, sendConn, receiveConn *Conn, blockAnnouncement *NewBlock) { - // Announce the block. - if err := sendConn.Write(blockAnnouncement); err != nil { - t.Fatalf("could not write to connection: %v", err) +// TestMaliciousHandshake65 tries to send malicious data during the handshake. +func (s *Suite) TestMaliciousHandshake65(t *utesting.T) { + if err := s.maliciousHandshakes(t, eth65); err != nil { + t.Fatal(err) } - s.waitAnnounce(t, receiveConn, blockAnnouncement) -} - -func (s *Suite) waitAnnounce(t *utesting.T, conn *Conn, blockAnnouncement *NewBlock) { - switch msg := conn.ReadAndServe(s.chain, timeout).(type) { - case *NewBlock: - t.Logf("received NewBlock message: %s", pretty.Sdump(msg.Block)) - assert.Equal(t, - blockAnnouncement.Block.Header(), msg.Block.Header(), - "wrong block header in announcement", - ) - assert.Equal(t, - blockAnnouncement.TD, msg.TD, - "wrong TD in announcement", - ) - case *NewBlockHashes: - message := *msg - t.Logf("received NewBlockHashes message: %s", pretty.Sdump(message)) - assert.Equal(t, blockAnnouncement.Block.Hash(), message[0].Hash, - "wrong block hash in announcement", - ) - default: - t.Fatalf("unexpected: %s", pretty.Sdump(msg)) +} + +// TestMaliciousHandshake66 tries to send malicious data during the handshake. +func (s *Suite) TestMaliciousHandshake66(t *utesting.T) { + if err := s.maliciousHandshakes(t, eth66); err != nil { + t.Fatal(err) } } -func (s *Suite) setupConnection(t *utesting.T) *Conn { - // create conn - sendConn, err := s.dial() +// TestMaliciousStatus65 sends a status package with a large total difficulty. +func (s *Suite) TestMaliciousStatus65(t *utesting.T) { + conn, err := s.dial() if err != nil { - t.Fatalf("could not dial: %v", err) + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + + if err := s.maliciousStatus(conn); err != nil { + t.Fatal(err) } - sendConn.handshake(t) - sendConn.statusExchange(t, s.chain, nil) - return sendConn } -// dial attempts to dial the given node and perform a handshake, -// returning the created Conn if successful. -func (s *Suite) dial() (*Conn, error) { - var conn Conn - // dial - fd, err := net.Dial("tcp", fmt.Sprintf("%v:%d", s.Dest.IP(), s.Dest.TCP())) +// TestMaliciousStatus66 sends a status package with a large total +// difficulty over the eth66 protocol. +func (s *Suite) TestMaliciousStatus66(t *utesting.T) { + conn, err := s.dial66() if err != nil { - return nil, err + t.Fatalf("dial failed: %v", err) } - conn.Conn = rlpx.NewConn(fd, s.Dest.Pubkey()) - // do encHandshake - conn.ourKey, _ = crypto.GenerateKey() - _, err = conn.Handshake(conn.ourKey) - if err != nil { - return nil, err + defer conn.Close() + + if err := s.maliciousStatus(conn); err != nil { + t.Fatal(err) } - // set default p2p capabilities - conn.caps = []p2p.Cap{ - {Name: "eth", Version: 64}, - {Name: "eth", Version: 65}, +} + +// TestTransaction65 sends a valid transaction to the node and +// checks if the transaction gets propagated. +func (s *Suite) TestTransaction65(t *utesting.T) { + if err := s.sendSuccessfulTxs(t, eth65); err != nil { + t.Fatal(err) } - conn.ourHighestProtoVersion = 65 - return &conn, nil } -func (s *Suite) TestTransaction(t *utesting.T) { - tests := []*types.Transaction{ - getNextTxFromChain(t, s), - unknownTx(t, s), +// TestTransaction66 sends a valid transaction to the node and +// checks if the transaction gets propagated. +func (s *Suite) TestTransaction66(t *utesting.T) { + if err := s.sendSuccessfulTxs(t, eth66); err != nil { + t.Fatal(err) } - for i, tx := range tests { - t.Logf("Testing tx propagation: %v\n", i) - sendSuccessfulTx(t, s, tx) +} + +// TestMaliciousTx65 sends several invalid transactions and tests whether +// the node will propagate them. +func (s *Suite) TestMaliciousTx65(t *utesting.T) { + if err := s.sendMaliciousTxs(t, eth65); err != nil { + t.Fatal(err) } } -func (s *Suite) TestMaliciousTx(t *utesting.T) { - badTxs := []*types.Transaction{ - getOldTxFromChain(t, s), - invalidNonceTx(t, s), - hugeAmount(t, s), - hugeGasPrice(t, s), - hugeData(t, s), +// TestMaliciousTx66 sends several invalid transactions and tests whether +// the node will propagate them. +func (s *Suite) TestMaliciousTx66(t *utesting.T) { + if err := s.sendMaliciousTxs(t, eth66); err != nil { + t.Fatal(err) } - sendConn := s.setupConnection(t) - defer sendConn.Close() - // set up receiving connection before sending txs to make sure - // no announcements are missed - recvConn := s.setupConnection(t) - defer recvConn.Close() +} - for i, tx := range badTxs { - t.Logf("Testing malicious tx propagation: %v\n", i) - if err := sendConn.Write(&Transactions{tx}); err != nil { - t.Fatalf("could not write to connection: %v", err) +// TestLargeTxRequest66 tests whether a node can fulfill a large GetPooledTransactions +// request. +func (s *Suite) TestLargeTxRequest66(t *utesting.T) { + // send the next block to ensure the node is no longer syncing and + // is able to accept txs + if err := s.sendNextBlock(eth66); err != nil { + t.Fatalf("failed to send next block: %v", err) + } + // send 2000 transactions to the node + hashMap, txs, err := generateTxs(s, 2000) + if err != nil { + t.Fatalf("failed to generate transactions: %v", err) + } + if err = sendMultipleSuccessfulTxs(t, s, txs); err != nil { + t.Fatalf("failed to send multiple txs: %v", err) + } + // set up connection to receive to ensure node is peered with the receiving connection + // before tx request is sent + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // create and send pooled tx request + hashes := make([]common.Hash, 0) + for _, hash := range hashMap { + hashes = append(hashes, hash) + } + getTxReq := ð.GetPooledTransactionsPacket66{ + RequestId: 1234, + GetPooledTransactionsPacket: hashes, + } + if err = conn.Write66(getTxReq, GetPooledTransactions{}.Code()); err != nil { + t.Fatalf("could not write to conn: %v", err) + } + // check that all received transactions match those that were sent to node + switch msg := conn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) { + case PooledTransactions: + for _, gotTx := range msg { + if _, exists := hashMap[gotTx.Hash()]; !exists { + t.Fatalf("unexpected tx received: %v", gotTx.Hash()) + } } + default: + t.Fatalf("unexpected %s", pretty.Sdump(msg)) + } +} + +// TestNewPooledTxs_66 tests whether a node will do a GetPooledTransactions +// request upon receiving a NewPooledTransactionHashes announcement. +func (s *Suite) TestNewPooledTxs66(t *utesting.T) { + // send the next block to ensure the node is no longer syncing and + // is able to accept txs + if err := s.sendNextBlock(eth66); err != nil { + t.Fatalf("failed to send next block: %v", err) + } + // generate 50 txs + hashMap, _, err := generateTxs(s, 50) + if err != nil { + t.Fatalf("failed to generate transactions: %v", err) + } + + // create new pooled tx hashes announcement + hashes := make([]common.Hash, 0) + for _, hash := range hashMap { + hashes = append(hashes, hash) + } + announce := NewPooledTransactionHashes(hashes) + + // send announcement + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + if err = conn.Write(announce); err != nil { + t.Fatalf("failed to write to connection: %v", err) + } + + // wait for GetPooledTxs request + for { + _, msg := conn.readAndServe66(s.chain, timeout) + switch msg := msg.(type) { + case GetPooledTransactions: + if len(msg) != len(hashes) { + t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg)) + } + return + // ignore propagated txs from previous tests + case *NewPooledTransactionHashes: + continue + // ignore block announcements from previous tests + case *NewBlockHashes: + continue + case *NewBlock: + continue + default: + t.Fatalf("unexpected %s", pretty.Sdump(msg)) + } } - // check to make sure bad txs aren't propagated - waitForTxPropagation(t, s, badTxs, recvConn) } diff --git a/cmd/devp2p/internal/ethtest/suite_test.go b/cmd/devp2p/internal/ethtest/suite_test.go new file mode 100644 index 0000000000..9bc55bc0ab --- /dev/null +++ b/cmd/devp2p/internal/ethtest/suite_test.go @@ -0,0 +1,128 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "os" + "testing" + "time" + + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" +) + +var ( + genesisFile = "./testdata/genesis.json" + halfchainFile = "./testdata/halfchain.rlp" + fullchainFile = "./testdata/chain.rlp" +) + +func TestEthSuite(t *testing.T) { + geth, err := runGeth() + if err != nil { + t.Fatalf("could not run geth: %v", err) + } + defer geth.Close() + + suite, err := NewSuite(geth.Server().Self(), fullchainFile, genesisFile) + if err != nil { + t.Fatalf("could not create new test suite: %v", err) + } + for _, test := range suite.Eth66Tests() { + t.Run(test.Name, func(t *testing.T) { + result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout) + if result[0].Failed { + t.Fatal() + } + }) + } +} + +func TestSnapSuite(t *testing.T) { + geth, err := runGeth() + if err != nil { + t.Fatalf("could not run geth: %v", err) + } + defer geth.Close() + + suite, err := NewSuite(geth.Server().Self(), fullchainFile, genesisFile) + if err != nil { + t.Fatalf("could not create new test suite: %v", err) + } + for _, test := range suite.SnapTests() { + t.Run(test.Name, func(t *testing.T) { + result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout) + if result[0].Failed { + t.Fatal() + } + }) + } +} + +// runGeth creates and starts a geth node +func runGeth() (*node.Node, error) { + stack, err := node.New(&node.Config{ + P2P: p2p.Config{ + ListenAddr: "127.0.0.1:0", + NoDiscovery: true, + MaxPeers: 10, // in case a test requires multiple connections, can be changed in the future + NoDial: true, + }, + }) + if err != nil { + return nil, err + } + + err = setupGeth(stack) + if err != nil { + stack.Close() + return nil, err + } + if err = stack.Start(); err != nil { + stack.Close() + return nil, err + } + return stack, nil +} + +func setupGeth(stack *node.Node) error { + chain, err := loadChain(halfchainFile, genesisFile) + if err != nil { + return err + } + + backend, err := eth.New(stack, ðconfig.Config{ + Genesis: &chain.genesis, + NetworkId: chain.genesis.Config.ChainID.Uint64(), // 19763 + DatabaseCache: 10, + TrieCleanCache: 10, + TrieCleanCacheJournal: "", + TrieCleanCacheRejournal: 60 * time.Minute, + TrieDirtyCache: 16, + TrieTimeout: 60 * time.Minute, + SnapshotCache: 10, + }) + if err != nil { + return err + } + + _, err = backend.BlockChain().InsertChain(chain.blocks[1:]) + return err +} diff --git a/cmd/devp2p/internal/ethtest/transaction.go b/cmd/devp2p/internal/ethtest/transaction.go index a6166bd2e3..d2dbe0a7d6 100644 --- a/cmd/devp2p/internal/ethtest/transaction.go +++ b/cmd/devp2p/internal/ethtest/transaction.go @@ -17,6 +17,7 @@ package ethtest import ( + "fmt" "math/big" "strings" "time" @@ -31,58 +32,171 @@ import ( //var faucetAddr = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") var faucetKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") -func sendSuccessfulTx(t *utesting.T, s *Suite, tx *types.Transaction) { - sendConn := s.setupConnection(t) - defer sendConn.Close() - sendSuccessfulTxWithConn(t, s, tx, sendConn) +func (s *Suite) sendSuccessfulTxs(t *utesting.T, isEth66 bool) error { + tests := []*types.Transaction{ + getNextTxFromChain(s), + unknownTx(s), + } + for i, tx := range tests { + if tx == nil { + return fmt.Errorf("could not find tx to send") + } + t.Logf("Testing tx propagation %d: sending tx %v %v %v\n", i, tx.Hash().String(), tx.GasPrice(), tx.Gas()) + // get previous tx if exists for reference in case of old tx propagation + var prevTx *types.Transaction + if i != 0 { + prevTx = tests[i-1] + } + // write tx to connection + if err := sendSuccessfulTx(s, tx, prevTx, isEth66); err != nil { + return fmt.Errorf("send successful tx test failed: %v", err) + } + } + return nil } -func sendSuccessfulTxWithConn(t *utesting.T, s *Suite, tx *types.Transaction, sendConn *Conn) { - t.Logf("sending tx: %v %v %v\n", tx.Hash().String(), tx.GasPrice(), tx.Gas()) +func sendSuccessfulTx(s *Suite, tx *types.Transaction, prevTx *types.Transaction, isEth66 bool) error { + sendConn, recvConn, err := s.createSendAndRecvConns(isEth66) + if err != nil { + return err + } + defer sendConn.Close() + defer recvConn.Close() + if err = sendConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } // Send the transaction - if err := sendConn.Write(&Transactions{tx}); err != nil { - t.Fatal(err) + if err = sendConn.Write(&Transactions{tx}); err != nil { + return fmt.Errorf("failed to write to connection: %v", err) + } + // peer receiving connection to node + if err = recvConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) } // update last nonce seen nonce = tx.Nonce() - - recvConn := s.setupConnection(t) // Wait for the transaction announcement - switch msg := recvConn.ReadAndServe(s.chain, timeout).(type) { - case *Transactions: - recTxs := *msg - for _, gotTx := range recTxs { - if gotTx.Hash() == tx.Hash() { - // Ok - return + for { + switch msg := recvConn.readAndServe(s.chain, timeout).(type) { + case *Transactions: + recTxs := *msg + // if you receive an old tx propagation, read from connection again + if len(recTxs) == 1 && prevTx != nil { + if recTxs[0] == prevTx { + continue + } } - } - t.Fatalf("missing transaction: got %v missing %v", recTxs, tx.Hash()) - case *NewPooledTransactionHashes: - txHashes := *msg - for _, gotHash := range txHashes { - if gotHash == tx.Hash() { - return + for _, gotTx := range recTxs { + if gotTx.Hash() == tx.Hash() { + // Ok + return nil + } } + return fmt.Errorf("missing transaction: got %v missing %v", recTxs, tx.Hash()) + case *NewPooledTransactionHashes: + txHashes := *msg + // if you receive an old tx propagation, read from connection again + if len(txHashes) == 1 && prevTx != nil { + if txHashes[0] == prevTx.Hash() { + continue + } + } + for _, gotHash := range txHashes { + if gotHash == tx.Hash() { + // Ok + return nil + } + } + return fmt.Errorf("missing transaction announcement: got %v missing %v", txHashes, tx.Hash()) + default: + return fmt.Errorf("unexpected message in sendSuccessfulTx: %s", pretty.Sdump(msg)) } - t.Fatalf("missing transaction announcement: got %v missing %v", txHashes, tx.Hash()) - default: - t.Fatalf("unexpected message in sendSuccessfulTx: %s", pretty.Sdump(msg)) } } +func (s *Suite) sendMaliciousTxs(t *utesting.T, isEth66 bool) error { + badTxs := []*types.Transaction{ + getOldTxFromChain(s), + invalidNonceTx(s), + hugeAmount(s), + hugeGasPrice(s), + hugeData(s), + } + // setup receiving connection before sending malicious txs + var ( + recvConn *Conn + err error + ) + if isEth66 { + recvConn, err = s.dial66() + } else { + recvConn, err = s.dial() + } + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + defer recvConn.Close() + if err = recvConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + for i, tx := range badTxs { + t.Logf("Testing malicious tx propagation: %v\n", i) + if err = sendMaliciousTx(s, tx, isEth66); err != nil { + return fmt.Errorf("malicious tx test failed:\ntx: %v\nerror: %v", tx, err) + } + } + // check to make sure bad txs aren't propagated + return checkMaliciousTxPropagation(s, badTxs, recvConn) +} + +func sendMaliciousTx(s *Suite, tx *types.Transaction, isEth66 bool) error { + // setup connection + var ( + conn *Conn + err error + ) + if isEth66 { + conn, err = s.dial66() + } else { + conn, err = s.dial() + } + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + // write malicious tx + if err = conn.Write(&Transactions{tx}); err != nil { + return fmt.Errorf("failed to write to connection: %v", err) + } + return nil +} + var nonce = uint64(99) -func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, sendConn *Conn, txs []*types.Transaction) { +// sendMultipleSuccessfulTxs sends the given transactions to the node and +// expects the node to accept and propagate them. +func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, txs []*types.Transaction) error { txMsg := Transactions(txs) t.Logf("sending %d txs\n", len(txs)) - recvConn := s.setupConnection(t) + sendConn, recvConn, err := s.createSendAndRecvConns(true) + if err != nil { + return err + } + defer sendConn.Close() defer recvConn.Close() - + if err = sendConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + if err = recvConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } // Send the transactions - if err := sendConn.Write(&txMsg); err != nil { - t.Fatal(err) + if err = sendConn.Write(&txMsg); err != nil { + return fmt.Errorf("failed to write message to connection: %v", err) } // update nonce nonce = txs[len(txs)-1].Nonce() @@ -90,7 +204,7 @@ func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, sendConn *Conn, txs []*t recvHashes := make([]common.Hash, 0) // all txs should be announced within 3 announcements for i := 0; i < 3; i++ { - switch msg := recvConn.ReadAndServe(s.chain, timeout).(type) { + switch msg := recvConn.readAndServe(s.chain, timeout).(type) { case *Transactions: for _, tx := range *msg { recvHashes = append(recvHashes, tx.Hash()) @@ -99,7 +213,7 @@ func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, sendConn *Conn, txs []*t recvHashes = append(recvHashes, *msg...) default: if !strings.Contains(pretty.Sdump(msg), "i/o timeout") { - t.Fatalf("unexpected message while waiting to receive txs: %s", pretty.Sdump(msg)) + return fmt.Errorf("unexpected message while waiting to receive txs: %s", pretty.Sdump(msg)) } } // break once all 2000 txs have been received @@ -112,7 +226,7 @@ func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, sendConn *Conn, txs []*t continue } else { t.Logf("successfully received all %d txs", len(txs)) - return + return nil } } } @@ -121,13 +235,15 @@ func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, sendConn *Conn, txs []*t for _, missing := range missingTxs { t.Logf("missing tx: %v", missing.Hash()) } - t.Fatalf("missing %d txs", len(missingTxs)) + return fmt.Errorf("missing %d txs", len(missingTxs)) } + return nil } -func waitForTxPropagation(t *utesting.T, s *Suite, txs []*types.Transaction, recvConn *Conn) { - // Wait for another transaction announcement - switch msg := recvConn.ReadAndServe(s.chain, time.Second*8).(type) { +// checkMaliciousTxPropagation checks whether the given malicious transactions were +// propagated by the node. +func checkMaliciousTxPropagation(s *Suite, txs []*types.Transaction, conn *Conn) error { + switch msg := conn.readAndServe(s.chain, time.Second*8).(type) { case *Transactions: // check to see if any of the failing txs were in the announcement recvTxs := make([]common.Hash, len(*msg)) @@ -136,25 +252,20 @@ func waitForTxPropagation(t *utesting.T, s *Suite, txs []*types.Transaction, rec } badTxs, _ := compareReceivedTxs(recvTxs, txs) if len(badTxs) > 0 { - for _, tx := range badTxs { - t.Logf("received bad tx: %v", tx) - } - t.Fatalf("received %d bad txs", len(badTxs)) + return fmt.Errorf("received %d bad txs: \n%v", len(badTxs), badTxs) } case *NewPooledTransactionHashes: badTxs, _ := compareReceivedTxs(*msg, txs) if len(badTxs) > 0 { - for _, tx := range badTxs { - t.Logf("received bad tx: %v", tx) - } - t.Fatalf("received %d bad txs", len(badTxs)) + return fmt.Errorf("received %d bad txs: \n%v", len(badTxs), badTxs) } case *Error: // Transaction should not be announced -> wait for timeout - return + return nil default: - t.Fatalf("unexpected message in sendFailingTx: %s", pretty.Sdump(msg)) + return fmt.Errorf("unexpected message in sendFailingTx: %s", pretty.Sdump(msg)) } + return nil } // compareReceivedTxs compares the received set of txs against the given set of txs, @@ -180,118 +291,129 @@ func compareReceivedTxs(recvTxs []common.Hash, txs []*types.Transaction) (presen return present, missing } -func unknownTx(t *utesting.T, s *Suite) *types.Transaction { - tx := getNextTxFromChain(t, s) +func unknownTx(s *Suite) *types.Transaction { + tx := getNextTxFromChain(s) + if tx == nil { + return nil + } var to common.Address if tx.To() != nil { to = *tx.To() } txNew := types.NewTransaction(tx.Nonce()+1, to, tx.Value(), tx.Gas(), tx.GasPrice(), tx.Data()) - return signWithFaucet(t, s.chain.chainConfig, txNew) + return signWithFaucet(s.chain.chainConfig, txNew) } -func getNextTxFromChain(t *utesting.T, s *Suite) *types.Transaction { +func getNextTxFromChain(s *Suite) *types.Transaction { // Get a new transaction - var tx *types.Transaction for _, blocks := range s.fullChain.blocks[s.chain.Len():] { txs := blocks.Transactions() if txs.Len() != 0 { - tx = txs[0] - break + return txs[0] } } - if tx == nil { - t.Fatal("could not find transaction") - } - return tx + return nil } -func generateTxs(t *utesting.T, s *Suite, numTxs int) (map[common.Hash]common.Hash, []*types.Transaction) { +func generateTxs(s *Suite, numTxs int) (map[common.Hash]common.Hash, []*types.Transaction, error) { txHashMap := make(map[common.Hash]common.Hash, numTxs) txs := make([]*types.Transaction, numTxs) - nextTx := getNextTxFromChain(t, s) + nextTx := getNextTxFromChain(s) + if nextTx == nil { + return nil, nil, fmt.Errorf("failed to get the next transaction") + } gas := nextTx.Gas() nonce = nonce + 1 // generate txs for i := 0; i < numTxs; i++ { - tx := generateTx(t, s.chain.chainConfig, nonce, gas) + tx := generateTx(s.chain.chainConfig, nonce, gas) + if tx == nil { + return nil, nil, fmt.Errorf("failed to get the next transaction") + } txHashMap[tx.Hash()] = tx.Hash() txs[i] = tx nonce = nonce + 1 } - return txHashMap, txs + return txHashMap, txs, nil } -func generateTx(t *utesting.T, chainConfig *params.ChainConfig, nonce uint64, gas uint64) *types.Transaction { +func generateTx(chainConfig *params.ChainConfig, nonce uint64, gas uint64) *types.Transaction { var to common.Address tx := types.NewTransaction(nonce, to, big.NewInt(1), gas, big.NewInt(1), []byte{}) - return signWithFaucet(t, chainConfig, tx) + return signWithFaucet(chainConfig, tx) } -func getOldTxFromChain(t *utesting.T, s *Suite) *types.Transaction { - var tx *types.Transaction +func getOldTxFromChain(s *Suite) *types.Transaction { for _, blocks := range s.fullChain.blocks[:s.chain.Len()-1] { txs := blocks.Transactions() if txs.Len() != 0 { - tx = txs[0] - break + return txs[0] } } - if tx == nil { - t.Fatal("could not find transaction") - } - return tx + return nil } -func invalidNonceTx(t *utesting.T, s *Suite) *types.Transaction { - tx := getNextTxFromChain(t, s) +func invalidNonceTx(s *Suite) *types.Transaction { + tx := getNextTxFromChain(s) + if tx == nil { + return nil + } var to common.Address if tx.To() != nil { to = *tx.To() } txNew := types.NewTransaction(tx.Nonce()-2, to, tx.Value(), tx.Gas(), tx.GasPrice(), tx.Data()) - return signWithFaucet(t, s.chain.chainConfig, txNew) + return signWithFaucet(s.chain.chainConfig, txNew) } -func hugeAmount(t *utesting.T, s *Suite) *types.Transaction { - tx := getNextTxFromChain(t, s) +func hugeAmount(s *Suite) *types.Transaction { + tx := getNextTxFromChain(s) + if tx == nil { + return nil + } amount := largeNumber(2) var to common.Address if tx.To() != nil { to = *tx.To() } txNew := types.NewTransaction(tx.Nonce(), to, amount, tx.Gas(), tx.GasPrice(), tx.Data()) - return signWithFaucet(t, s.chain.chainConfig, txNew) + return signWithFaucet(s.chain.chainConfig, txNew) } -func hugeGasPrice(t *utesting.T, s *Suite) *types.Transaction { - tx := getNextTxFromChain(t, s) +func hugeGasPrice(s *Suite) *types.Transaction { + tx := getNextTxFromChain(s) + if tx == nil { + return nil + } gasPrice := largeNumber(2) var to common.Address if tx.To() != nil { to = *tx.To() } txNew := types.NewTransaction(tx.Nonce(), to, tx.Value(), tx.Gas(), gasPrice, tx.Data()) - return signWithFaucet(t, s.chain.chainConfig, txNew) + return signWithFaucet(s.chain.chainConfig, txNew) } -func hugeData(t *utesting.T, s *Suite) *types.Transaction { - tx := getNextTxFromChain(t, s) +func hugeData(s *Suite) *types.Transaction { + tx := getNextTxFromChain(s) + if tx == nil { + return nil + } var to common.Address if tx.To() != nil { to = *tx.To() } txNew := types.NewTransaction(tx.Nonce(), to, tx.Value(), tx.Gas(), tx.GasPrice(), largeBuffer(2)) - return signWithFaucet(t, s.chain.chainConfig, txNew) + return signWithFaucet(s.chain.chainConfig, txNew) } -func signWithFaucet(t *utesting.T, chainConfig *params.ChainConfig, tx *types.Transaction) *types.Transaction { +func signWithFaucet(chainConfig *params.ChainConfig, tx *types.Transaction) *types.Transaction { signer := types.LatestSigner(chainConfig) signedTx, err := types.SignTx(tx, signer, faucetKey) if err != nil { - t.Fatalf("could not sign tx: %v\n", err) + return nil } return signedTx } diff --git a/cmd/devp2p/internal/ethtest/types.go b/cmd/devp2p/internal/ethtest/types.go index 50a69b9418..09bb218d51 100644 --- a/cmd/devp2p/internal/ethtest/types.go +++ b/cmd/devp2p/internal/ethtest/types.go @@ -19,13 +19,9 @@ package ethtest import ( "crypto/ecdsa" "fmt" - "reflect" "time" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth/protocols/eth" - "github.com/ethereum/go-ethereum/internal/utesting" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/rlpx" "github.com/ethereum/go-ethereum/rlp" @@ -131,12 +127,15 @@ func (pt PooledTransactions) Code() int { return 26 } // Conn represents an individual connection with a peer type Conn struct { *rlpx.Conn - ourKey *ecdsa.PrivateKey - negotiatedProtoVersion uint - ourHighestProtoVersion uint - caps []p2p.Cap + ourKey *ecdsa.PrivateKey + negotiatedProtoVersion uint + negotiatedSnapProtoVersion uint + ourHighestProtoVersion uint + ourHighestSnapProtoVersion uint + caps []p2p.Cap } +// Read reads an eth packet from the connection. func (c *Conn) Read() Message { code, rawData, _, err := c.Conn.Read() if err != nil { @@ -185,175 +184,138 @@ func (c *Conn) Read() Message { return msg } -// ReadAndServe serves GetBlockHeaders requests while waiting -// on another message from the node. -func (c *Conn) ReadAndServe(chain *Chain, timeout time.Duration) Message { - start := time.Now() - for time.Since(start) < timeout { - c.SetReadDeadline(time.Now().Add(5 * time.Second)) - switch msg := c.Read().(type) { - case *Ping: - c.Write(&Pong{}) - case *GetBlockHeaders: - req := *msg - headers, err := chain.GetHeaders(req) - if err != nil { - return errorf("could not get headers for inbound header request: %v", err) - } - - if err := c.Write(headers); err != nil { - return errorf("could not write to connection: %v", err) - } - default: - return msg - } - } - return errorf("no message received within %v", timeout) -} - -func (c *Conn) Write(msg Message) error { - // check if message is eth protocol message - var ( - payload []byte - err error - ) - payload, err = rlp.EncodeToBytes(msg) +// Read66 reads an eth66 packet from the connection. +func (c *Conn) Read66() (uint64, Message) { + code, rawData, _, err := c.Conn.Read() if err != nil { - return err + return 0, errorf("could not read from connection: %v", err) } - _, err = c.Conn.Write(uint64(msg.Code()), payload) - return err -} -// handshake checks to make sure a `HELLO` is received. -func (c *Conn) handshake(t *utesting.T) Message { - defer c.SetDeadline(time.Time{}) - c.SetDeadline(time.Now().Add(10 * time.Second)) - - // write hello to client - pub0 := crypto.FromECDSAPub(&c.ourKey.PublicKey)[1:] - ourHandshake := &Hello{ - Version: 5, - Caps: c.caps, - ID: pub0, - } - if err := c.Write(ourHandshake); err != nil { - t.Fatalf("could not write to connection: %v", err) - } - // read hello from client - switch msg := c.Read().(type) { - case *Hello: - // set snappy if version is at least 5 - if msg.Version >= 5 { - c.SetSnappy(true) + var msg Message + switch int(code) { + case (Hello{}).Code(): + msg = new(Hello) + case (Ping{}).Code(): + msg = new(Ping) + case (Pong{}).Code(): + msg = new(Pong) + case (Disconnect{}).Code(): + msg = new(Disconnect) + case (Status{}).Code(): + msg = new(Status) + case (GetBlockHeaders{}).Code(): + ethMsg := new(eth.GetBlockHeadersPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return ethMsg.RequestId, GetBlockHeaders(*ethMsg.GetBlockHeadersPacket) + case (BlockHeaders{}).Code(): + ethMsg := new(eth.BlockHeadersPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return ethMsg.RequestId, BlockHeaders(ethMsg.BlockHeadersPacket) + case (GetBlockBodies{}).Code(): + ethMsg := new(eth.GetBlockBodiesPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return ethMsg.RequestId, GetBlockBodies(ethMsg.GetBlockBodiesPacket) + case (BlockBodies{}).Code(): + ethMsg := new(eth.BlockBodiesPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) } - c.negotiateEthProtocol(msg.Caps) - if c.negotiatedProtoVersion == 0 { - t.Fatalf("unexpected eth protocol version") + return ethMsg.RequestId, BlockBodies(ethMsg.BlockBodiesPacket) + case (NewBlock{}).Code(): + msg = new(NewBlock) + case (NewBlockHashes{}).Code(): + msg = new(NewBlockHashes) + case (Transactions{}).Code(): + msg = new(Transactions) + case (NewPooledTransactionHashes{}).Code(): + msg = new(NewPooledTransactionHashes) + case (GetPooledTransactions{}.Code()): + ethMsg := new(eth.GetPooledTransactionsPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) } - return msg + return ethMsg.RequestId, GetPooledTransactions(ethMsg.GetPooledTransactionsPacket) + case (PooledTransactions{}.Code()): + ethMsg := new(eth.PooledTransactionsPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return ethMsg.RequestId, PooledTransactions(ethMsg.PooledTransactionsPacket) default: - t.Fatalf("bad handshake: %#v", msg) - return nil + msg = errorf("invalid message code: %d", code) } -} -// negotiateEthProtocol sets the Conn's eth protocol version -// to highest advertised capability from peer -func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) { - var highestEthVersion uint - for _, capability := range caps { - if capability.Name != "eth" { - continue - } - if capability.Version > highestEthVersion && capability.Version <= c.ourHighestProtoVersion { - highestEthVersion = capability.Version + if msg != nil { + if err := rlp.DecodeBytes(rawData, msg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) } + return 0, msg } - c.negotiatedProtoVersion = highestEthVersion + return 0, errorf("invalid message: %s", string(rawData)) } -// statusExchange performs a `Status` message exchange with the given -// node. -func (c *Conn) statusExchange(t *utesting.T, chain *Chain, status *Status) Message { - defer c.SetDeadline(time.Time{}) - c.SetDeadline(time.Now().Add(20 * time.Second)) - - // read status message from client - var message Message -loop: - for { - switch msg := c.Read().(type) { - case *Status: - if have, want := msg.Head, chain.blocks[chain.Len()-1].Hash(); have != want { - t.Fatalf("wrong head block in status, want: %#x (block %d) have %#x", - want, chain.blocks[chain.Len()-1].NumberU64(), have) - } - if have, want := msg.TD.Cmp(chain.TD(chain.Len())), 0; have != want { - t.Fatalf("wrong TD in status: have %v want %v", have, want) - } - if have, want := msg.ForkID, chain.ForkID(); !reflect.DeepEqual(have, want) { - t.Fatalf("wrong fork ID in status: have %v, want %v", have, want) - } - message = msg - break loop - case *Disconnect: - t.Fatalf("disconnect received: %v", msg.Reason) - case *Ping: - c.Write(&Pong{}) // TODO (renaynay): in the future, this should be an error - // (PINGs should not be a response upon fresh connection) - default: - t.Fatalf("bad status message: %s", pretty.Sdump(msg)) - } - } - // make sure eth protocol version is set for negotiation - if c.negotiatedProtoVersion == 0 { - t.Fatalf("eth protocol version must be set in Conn") - } - if status == nil { - // write status message to client - status = &Status{ - ProtocolVersion: uint32(c.negotiatedProtoVersion), - NetworkID: chain.chainConfig.ChainID.Uint64(), - TD: chain.TD(chain.Len()), - Head: chain.blocks[chain.Len()-1].Hash(), - Genesis: chain.blocks[0].Hash(), - ForkID: chain.ForkID(), - } +// Write writes a eth packet to the connection. +func (c *Conn) Write(msg Message) error { + payload, err := rlp.EncodeToBytes(msg) + if err != nil { + return err } + _, err = c.Conn.Write(uint64(msg.Code()), payload) + return err +} - if err := c.Write(status); err != nil { - t.Fatalf("could not write to connection: %v", err) +// Write66 writes an eth66 packet to the connection. +func (c *Conn) Write66(req eth.Packet, code int) error { + payload, err := rlp.EncodeToBytes(req) + if err != nil { + return err } - - return message + _, err = c.Conn.Write(uint64(code), payload) + return err } -// waitForBlock waits for confirmation from the client that it has -// imported the given block. -func (c *Conn) waitForBlock(block *types.Block) error { - defer c.SetReadDeadline(time.Time{}) - - c.SetReadDeadline(time.Now().Add(20 * time.Second)) - // note: if the node has not yet imported the block, it will respond - // to the GetBlockHeaders request with an empty BlockHeaders response, - // so the GetBlockHeaders request must be sent again until the BlockHeaders - // response contains the desired header. - for { - req := &GetBlockHeaders{Origin: eth.HashOrNumber{Hash: block.Hash()}, Amount: 1} - if err := c.Write(req); err != nil { - return err +// ReadSnap reads a snap/1 response with the given id from the connection. +func (c *Conn) ReadSnap(id uint64) (Message, error) { + respId := id + 1 + start := time.Now() + for respId != id && time.Since(start) < timeout { + code, rawData, _, err := c.Conn.Read() + if err != nil { + return nil, fmt.Errorf("could not read from connection: %v", err) } - switch msg := c.Read().(type) { - case *BlockHeaders: - for _, header := range *msg { - if header.Number.Uint64() == block.NumberU64() { - return nil - } - } - time.Sleep(100 * time.Millisecond) + var snpMsg interface{} + switch int(code) { + case (GetAccountRange{}).Code(): + snpMsg = new(GetAccountRange) + case (AccountRange{}).Code(): + snpMsg = new(AccountRange) + case (GetStorageRanges{}).Code(): + snpMsg = new(GetStorageRanges) + case (StorageRanges{}).Code(): + snpMsg = new(StorageRanges) + case (GetByteCodes{}).Code(): + snpMsg = new(GetByteCodes) + case (ByteCodes{}).Code(): + snpMsg = new(ByteCodes) + case (GetTrieNodes{}).Code(): + snpMsg = new(GetTrieNodes) + case (TrieNodes{}).Code(): + snpMsg = new(TrieNodes) default: - return fmt.Errorf("invalid message: %s", pretty.Sdump(msg)) + //return nil, fmt.Errorf("invalid message code: %d", code) + continue } + if err := rlp.DecodeBytes(rawData, snpMsg); err != nil { + return nil, fmt.Errorf("could not rlp decode message: %v", err) + } + return snpMsg.(Message), nil + } + return nil, fmt.Errorf("request timed out") } diff --git a/cmd/devp2p/internal/v4test/discv4tests.go b/cmd/devp2p/internal/v4test/discv4tests.go index 140b96bfa5..5f340ed94c 100644 --- a/cmd/devp2p/internal/v4test/discv4tests.go +++ b/cmd/devp2p/internal/v4test/discv4tests.go @@ -21,7 +21,6 @@ import ( "crypto/rand" "fmt" "net" - "reflect" "time" "github.com/ethereum/go-ethereum/crypto" @@ -80,25 +79,57 @@ func BasicPing(t *utesting.T) { To: te.remoteEndpoint(), Expiration: futureExpiration(), }) - - reply, _, _ := te.read(te.l1) - if err := te.checkPong(reply, pingHash); err != nil { + if err := te.checkPingPong(pingHash); err != nil { t.Fatal(err) } } -// checkPong verifies that reply is a valid PONG matching the given ping hash. +// checkPingPong verifies that the remote side sends both a PONG with the +// correct hash, and a PING. +// The two packets do not have to be in any particular order. +func (te *testenv) checkPingPong(pingHash []byte) error { + var ( + pings int + pongs int + ) + for i := 0; i < 2; i++ { + reply, _, err := te.read(te.l1) + if err != nil { + return err + } + switch reply.Kind() { + case v4wire.PongPacket: + if err := te.checkPong(reply, pingHash); err != nil { + return err + } + pongs++ + case v4wire.PingPacket: + pings++ + default: + return fmt.Errorf("expected PING or PONG, got %v %v", reply.Name(), reply) + } + } + if pongs == 1 && pings == 1 { + return nil + } + return fmt.Errorf("expected 1 PING (got %d) and 1 PONG (got %d)", pings, pongs) +} + +// checkPong verifies that reply is a valid PONG matching the given ping hash, +// and a PING. The two packets do not have to be in any particular order. func (te *testenv) checkPong(reply v4wire.Packet, pingHash []byte) error { - if reply == nil || reply.Kind() != v4wire.PongPacket { - return fmt.Errorf("expected PONG reply, got %v", reply) + if reply == nil { + return fmt.Errorf("expected PONG reply, got nil") + } + if reply.Kind() != v4wire.PongPacket { + return fmt.Errorf("expected PONG reply, got %v %v", reply.Name(), reply) } pong := reply.(*v4wire.Pong) if !bytes.Equal(pong.ReplyTok, pingHash) { return fmt.Errorf("PONG reply token mismatch: got %x, want %x", pong.ReplyTok, pingHash) } - wantEndpoint := te.localEndpoint(te.l1) - if !reflect.DeepEqual(pong.To, wantEndpoint) { - return fmt.Errorf("PONG 'to' endpoint mismatch: got %+v, want %+v", pong.To, wantEndpoint) + if want := te.localEndpoint(te.l1); !want.IP.Equal(pong.To.IP) || want.UDP != pong.To.UDP { + return fmt.Errorf("PONG 'to' endpoint mismatch: got %+v, want %+v", pong.To, want) } if v4wire.Expired(pong.Expiration) { return fmt.Errorf("PONG is expired (%v)", pong.Expiration) @@ -118,9 +149,7 @@ func PingWrongTo(t *utesting.T) { To: wrongEndpoint, Expiration: futureExpiration(), }) - - reply, _, _ := te.read(te.l1) - if err := te.checkPong(reply, pingHash); err != nil { + if err := te.checkPingPong(pingHash); err != nil { t.Fatal(err) } } @@ -138,8 +167,7 @@ func PingWrongFrom(t *utesting.T) { Expiration: futureExpiration(), }) - reply, _, _ := te.read(te.l1) - if err := te.checkPong(reply, pingHash); err != nil { + if err := te.checkPingPong(pingHash); err != nil { t.Fatal(err) } } @@ -160,8 +188,7 @@ func PingExtraData(t *utesting.T) { JunkData2: []byte{9, 8, 7, 6, 5, 4, 3, 2, 1}, }) - reply, _, _ := te.read(te.l1) - if err := te.checkPong(reply, pingHash); err != nil { + if err := te.checkPingPong(pingHash); err != nil { t.Fatal(err) } } @@ -182,8 +209,7 @@ func PingExtraDataWrongFrom(t *utesting.T) { JunkData2: []byte{9, 8, 7, 6, 5, 4, 3, 2, 1}, } pingHash := te.send(te.l1, &req) - reply, _, _ := te.read(te.l1) - if err := te.checkPong(reply, pingHash); err != nil { + if err := te.checkPingPong(pingHash); err != nil { t.Fatal(err) } } @@ -203,7 +229,7 @@ func PingPastExpiration(t *utesting.T) { reply, _, _ := te.read(te.l1) if reply != nil { - t.Fatal("Expected no reply, got", reply) + t.Fatalf("Expected no reply, got %v %v", reply.Name(), reply) } } @@ -221,7 +247,7 @@ func WrongPacketType(t *utesting.T) { reply, _, _ := te.read(te.l1) if reply != nil { - t.Fatal("Expected no reply, got", reply) + t.Fatalf("Expected no reply, got %v %v", reply.Name(), reply) } } @@ -239,9 +265,9 @@ func BondThenPingWithWrongFrom(t *utesting.T) { To: te.remoteEndpoint(), Expiration: futureExpiration(), }) - - reply, _, _ := te.read(te.l1) - if err := te.checkPong(reply, pingHash); err != nil { + if reply, _, err := te.read(te.l1); err != nil { + t.Fatal(err) + } else if err := te.checkPong(reply, pingHash); err != nil { t.Fatal(err) } } @@ -256,9 +282,16 @@ func FindnodeWithoutEndpointProof(t *utesting.T) { rand.Read(req.Target[:]) te.send(te.l1, &req) - reply, _, _ := te.read(te.l1) - if reply != nil { - t.Fatal("Expected no response, got", reply) + for { + reply, _, _ := te.read(te.l1) + if reply == nil { + // No response, all good + break + } + if reply.Kind() == v4wire.PingPacket { + continue // A ping is ok, just ignore it + } + t.Fatalf("Expected no reply, got %v %v", reply.Name(), reply) } } @@ -278,7 +311,7 @@ func BasicFindnode(t *utesting.T) { t.Fatal("read find nodes", err) } if reply.Kind() != v4wire.NeighborsPacket { - t.Fatal("Expected neighbors, got", reply.Name()) + t.Fatalf("Expected neighbors, got %v %v", reply.Name(), reply) } } @@ -315,7 +348,7 @@ func UnsolicitedNeighbors(t *utesting.T) { t.Fatal("read find nodes", err) } if reply.Kind() != v4wire.NeighborsPacket { - t.Fatal("Expected neighbors, got", reply.Name()) + t.Fatalf("Expected neighbors, got %v %v", reply.Name(), reply) } nodes := reply.(*v4wire.Neighbors).Nodes if contains(nodes, encFakeKey) { diff --git a/cmd/devp2p/nodesetcmd.go b/cmd/devp2p/nodesetcmd.go index 848288c9cf..d65d6314c8 100644 --- a/cmd/devp2p/nodesetcmd.go +++ b/cmd/devp2p/nodesetcmd.go @@ -235,6 +235,8 @@ func ethFilter(args []string) (nodeFilter, error) { filter = forkid.NewStaticFilter(params.GoerliChainConfig, params.GoerliGenesisHash) case "ropsten": filter = forkid.NewStaticFilter(params.RopstenChainConfig, params.RopstenGenesisHash) + case "sepolia": + filter = forkid.NewStaticFilter(params.SepoliaChainConfig, params.SepoliaGenesisHash) default: return nil, fmt.Errorf("unknown network %q", args[0]) } diff --git a/cmd/devp2p/rlpxcmd.go b/cmd/devp2p/rlpxcmd.go index 24a16f0b3c..6557a239da 100644 --- a/cmd/devp2p/rlpxcmd.go +++ b/cmd/devp2p/rlpxcmd.go @@ -36,6 +36,7 @@ var ( Subcommands: []cli.Command{ rlpxPingCommand, rlpxEthTestCommand, + rlpxSnapTestCommand, }, } rlpxPingCommand = cli.Command{ @@ -53,6 +54,16 @@ var ( testTAPFlag, }, } + rlpxSnapTestCommand = cli.Command{ + Name: "snap-test", + Usage: "Runs tests against a node", + ArgsUsage: " ", + Action: rlpxSnapTest, + Flags: []cli.Flag{ + testPatternFlag, + testTAPFlag, + }, + } ) func rlpxPing(ctx *cli.Context) error { @@ -106,3 +117,15 @@ func rlpxEthTest(ctx *cli.Context) error { } return runTests(ctx, suite.AllEthTests()) } + +// rlpxSnapTest runs the snap protocol test suite. +func rlpxSnapTest(ctx *cli.Context) error { + if ctx.NArg() < 3 { + exit("missing path to chain.rlp as command-line argument") + } + suite, err := ethtest.NewSuite(getNodeArg(ctx), ctx.Args()[1], ctx.Args()[2]) + if err != nil { + exit(err) + } + return runTests(ctx, suite.SnapTests()) +} diff --git a/cmd/ethkey/utils.go b/cmd/ethkey/utils.go index f2986e8ee9..70baae92f4 100644 --- a/cmd/ethkey/utils.go +++ b/cmd/ethkey/utils.go @@ -49,7 +49,7 @@ func getPassphrase(ctx *cli.Context, confirmation bool) string { // signHash is a helper function that calculates a hash for the given message // that can be safely used to calculate a signature from. // -// The hash is calulcated as +// The hash is calculated as // keccak256("\x19Ethereum Signed Message:\n"${message length}${message}). // // This gives context to the signed message and prevents signing of transactions. diff --git a/cmd/evm/README.md b/cmd/evm/README.md index cdff41f904..1a029ab709 100644 --- a/cmd/evm/README.md +++ b/cmd/evm/README.md @@ -4,15 +4,15 @@ The `evm t8n` tool is a stateless state transition utility. It is a utility which can 1. Take a prestate, including - - Accounts, - - Block context information, - - Previous blockshashes (*optional) +- Accounts, +- Block context information, +- Previous blockshashes (*optional) 2. Apply a set of transactions, 3. Apply a mining-reward (*optional), 4. And generate a post-state, including - - State root, transaction root, receipt root, - - Information about rejected transactions, - - Optionally: a full or partial post-state dump +- State root, transaction root, receipt root, +- Information about rejected transactions, +- Optionally: a full or partial post-state dump ## Specification @@ -37,6 +37,8 @@ Command line params that has to be supported are --output.result result Determines where to put the result (stateroot, txroot etc) of the post-state. `stdout` - into the stdout output `stderr` - into the stderr output + --output.body value If set, the RLP of the transactions (block body) will be written to this file. + --input.txs stdin stdin or file name of where to find the transactions to apply. If the file prefix is '.rlp', then the data is interpreted as an RLP list of signed transactions.The '.rlp' format is identical to the output.body format. (default: "txs.json") --state.fork value Name of ruleset to use. --state.chainid value ChainID to use (default: 1) --state.reward value Mining reward. Set to -1 to disable (default: 0) @@ -110,7 +112,10 @@ Two resulting files: } ], "rejected": [ - 1 + { + "index": 1, + "error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" + } ] } ``` @@ -156,7 +161,10 @@ Output: } ], "rejected": [ - 1 + { + "index": 1, + "error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" + } ] } } @@ -168,9 +176,9 @@ Mining rewards and ommer rewards might need to be added. This is how those are a - `block_reward` is the block mining reward for the miner (`0xaa`), of a block at height `N`. - For each ommer (mined by `0xbb`), with blocknumber `N-delta` - - (where `delta` is the difference between the current block and the ommer) - - The account `0xbb` (ommer miner) is awarded `(8-delta)/ 8 * block_reward` - - The account `0xaa` (block miner) is awarded `block_reward / 32` + - (where `delta` is the difference between the current block and the ommer) + - The account `0xbb` (ommer miner) is awarded `(8-delta)/ 8 * block_reward` + - The account `0xaa` (block miner) is awarded `block_reward / 32` To make `state_t8n` apply these, the following inputs are required: @@ -200,7 +208,7 @@ Example: ] } ``` -When applying this, using a reward of `0x08` +When applying this, using a reward of `0x80` Output: ```json { @@ -220,7 +228,7 @@ Output: ### Future EIPS It is also possible to experiment with future eips that are not yet defined in a hard fork. -Example, putting EIP-1344 into Frontier: +Example, putting EIP-1344 into Frontier: ``` ./evm t8n --state.fork=Frontier+1344 --input.pre=./testdata/1/pre.json --input.txs=./testdata/1/txs.json --input.env=/testdata/1/env.json ``` @@ -229,41 +237,102 @@ Example, putting EIP-1344 into Frontier: The `BLOCKHASH` opcode requires blockhashes to be provided by the caller, inside the `env`. If a required blockhash is not provided, the exit code should be `4`: -Example where blockhashes are provided: +Example where blockhashes are provided: ``` -./evm t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace +./evm --verbosity=1 t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace +INFO [07-27|11:53:40.960] Trie dumping started root=b7341d..857ea1 +INFO [07-27|11:53:40.960] Trie dumping complete accounts=3 elapsed="103.298µs" +INFO [07-27|11:53:40.960] Wrote file file=alloc.json +INFO [07-27|11:53:40.960] Wrote file file=result.json + ``` + ``` cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2 ``` ``` -{"pc":0,"op":96,"gas":"0x5f58ef8","gasCost":"0x3","memory":"0x","memSize":0,"stack":[],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"PUSH1","error":""} -{"pc":2,"op":64,"gas":"0x5f58ef5","gasCost":"0x14","memory":"0x","memSize":0,"stack":["0x1"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"BLOCKHASH","error":""} -{"pc":3,"op":0,"gas":"0x5f58ee1","gasCost":"0x0","memory":"0x","memSize":0,"stack":["0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"STOP","error":""} -{"output":"","gasUsed":"0x17","time":142709} +{"pc":0,"op":96,"gas":"0x5f58ef8","gasCost":"0x3","memory":"0x","memSize":0,"stack":[],"returnData":"0x","depth":1,"refund":0,"opName":"PUSH1","error":""} +{"pc":2,"op":64,"gas":"0x5f58ef5","gasCost":"0x14","memory":"0x","memSize":0,"stack":["0x1"],"returnData":"0x","depth":1,"refund":0,"opName":"BLOCKHASH","error":""} +{"pc":3,"op":0,"gas":"0x5f58ee1","gasCost":"0x0","memory":"0x","memSize":0,"stack":["0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"],"returnData":"0x","depth":1,"refund":0,"opName":"STOP","error":""} +{"output":"","gasUsed":"0x17","time":156276} ``` In this example, the caller has not provided the required blockhash: ``` ./evm t8n --input.alloc=./testdata/4/alloc.json --input.txs=./testdata/4/txs.json --input.env=./testdata/4/env.json --trace -``` -``` ERROR(4): getHash(3) invoked, blockhash for that block not provided ``` Error code: 4 + ### Chaining Another thing that can be done, is to chain invocations: ``` ./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.alloc=stdout | ./evm t8n --input.alloc=stdin --input.env=./testdata/1/env.json --input.txs=./testdata/1/txs.json -INFO [01-21|22:41:22.963] rejected tx index=1 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" -INFO [01-21|22:41:22.966] rejected tx index=0 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" -INFO [01-21|22:41:22.967] rejected tx index=1 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" +INFO [07-27|11:53:41.049] rejected tx index=1 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" +INFO [07-27|11:53:41.050] Trie dumping started root=84208a..ae4e13 +INFO [07-27|11:53:41.050] Trie dumping complete accounts=3 elapsed="59.412µs" +INFO [07-27|11:53:41.050] Wrote file file=result.json +INFO [07-27|11:53:41.051] rejected tx index=0 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" +INFO [07-27|11:53:41.051] rejected tx index=1 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" +INFO [07-27|11:53:41.052] Trie dumping started root=84208a..ae4e13 +INFO [07-27|11:53:41.052] Trie dumping complete accounts=3 elapsed="45.734µs" +INFO [07-27|11:53:41.052] Wrote file file=alloc.json +INFO [07-27|11:53:41.052] Wrote file file=result.json ``` -What happened here, is that we first applied two identical transactions, so the second one was rejected. +What happened here, is that we first applied two identical transactions, so the second one was rejected. Then, taking the poststate alloc as the input for the next state, we tried again to include the same two transactions: this time, both failed due to too low nonce. In order to meaningfully chain invocations, one would need to provide meaningful new `env`, otherwise the actual blocknumber (exposed to the EVM) would not increase. + +### Transactions in RLP form + +It is possible to provide already-signed transactions as input to, using an `input.txs` which ends with the `rlp` suffix. +The input format for RLP-form transactions is _identical_ to the _output_ format for block bodies. Therefore, it's fully possible +to use the evm to go from `json` input to `rlp` input. + +The following command takes **json** the transactions in `./testdata/13/txs.json` and signs them. After execution, they are output to `signed_txs.rlp`.: +``` +./evm t8n --state.fork=London --input.alloc=./testdata/13/alloc.json --input.txs=./testdata/13/txs.json --input.env=./testdata/13/env.json --output.result=alloc_jsontx.json --output.body=signed_txs.rlp +INFO [07-27|11:53:41.124] Trie dumping started root=e4b924..6aef61 +INFO [07-27|11:53:41.124] Trie dumping complete accounts=3 elapsed="94.284µs" +INFO [07-27|11:53:41.125] Wrote file file=alloc.json +INFO [07-27|11:53:41.125] Wrote file file=alloc_jsontx.json +INFO [07-27|11:53:41.125] Wrote file file=signed_txs.rlp + +``` + +The `output.body` is the rlp-list of transactions, encoded in hex and placed in a string a'la `json` encoding rules: +``` +cat signed_txs.rlp +"0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9" +``` + +We can use `rlpdump` to check what the contents are: +``` +rlpdump -hex $(cat signed_txs.rlp | jq -r ) +[ + 02f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904, + 02f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9, +] +``` +Now, we can now use those (or any other already signed transactions), as input, like so: +``` +./evm t8n --state.fork=London --input.alloc=./testdata/13/alloc.json --input.txs=./signed_txs.rlp --input.env=./testdata/13/env.json --output.result=alloc_rlptx.json +INFO [07-27|11:53:41.253] Trie dumping started root=e4b924..6aef61 +INFO [07-27|11:53:41.253] Trie dumping complete accounts=3 elapsed="128.445µs" +INFO [07-27|11:53:41.253] Wrote file file=alloc.json +INFO [07-27|11:53:41.255] Wrote file file=alloc_rlptx.json + +``` + +You might have noticed that the results from these two invocations were stored in two separate files. +And we can now finally check that they match. +``` +cat alloc_jsontx.json | jq .stateRoot && cat alloc_rlptx.json | jq .stateRoot +"0xe4b924a6adb5959fccf769d5b7bb2f6359e26d1e76a2443c5a91a36d826aef61" +"0xe4b924a6adb5959fccf769d5b7bb2f6359e26d1e76a2443c5a91a36d826aef61" +``` diff --git a/cmd/evm/internal/t8ntool/block.go b/cmd/evm/internal/t8ntool/block.go new file mode 100644 index 0000000000..d4edd33bde --- /dev/null +++ b/cmd/evm/internal/t8ntool/block.go @@ -0,0 +1,380 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package t8ntool + +import ( + "crypto/ecdsa" + "encoding/json" + "errors" + "fmt" + "math/big" + "os" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/consensus/clique" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "gopkg.in/urfave/cli.v1" +) + +//go:generate gencodec -type header -field-override headerMarshaling -out gen_header.go +type header struct { + ParentHash common.Hash `json:"parentHash"` + OmmerHash *common.Hash `json:"sha3Uncles"` + Coinbase *common.Address `json:"miner"` + Root common.Hash `json:"stateRoot" gencodec:"required"` + TxHash *common.Hash `json:"transactionsRoot"` + ReceiptHash *common.Hash `json:"receiptsRoot"` + Bloom types.Bloom `json:"logsBloom"` + Difficulty *big.Int `json:"difficulty"` + Number *big.Int `json:"number" gencodec:"required"` + GasLimit uint64 `json:"gasLimit" gencodec:"required"` + GasUsed uint64 `json:"gasUsed"` + Time uint64 `json:"timestamp" gencodec:"required"` + Extra []byte `json:"extraData"` + MixDigest common.Hash `json:"mixHash"` + Nonce *types.BlockNonce `json:"nonce"` + BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"` +} + +type headerMarshaling struct { + Difficulty *math.HexOrDecimal256 + Number *math.HexOrDecimal256 + GasLimit math.HexOrDecimal64 + GasUsed math.HexOrDecimal64 + Time math.HexOrDecimal64 + Extra hexutil.Bytes + BaseFee *math.HexOrDecimal256 +} + +type bbInput struct { + Header *header `json:"header,omitempty"` + OmmersRlp []string `json:"ommers,omitempty"` + TxRlp string `json:"txs,omitempty"` + Clique *cliqueInput `json:"clique,omitempty"` + + Ethash bool `json:"-"` + EthashDir string `json:"-"` + PowMode ethash.Mode `json:"-"` + Txs []*types.Transaction `json:"-"` + Ommers []*types.Header `json:"-"` +} + +type cliqueInput struct { + Key *ecdsa.PrivateKey + Voted *common.Address + Authorize *bool + Vanity common.Hash +} + +// UnmarshalJSON implements json.Unmarshaler interface. +func (c *cliqueInput) UnmarshalJSON(input []byte) error { + var x struct { + Key *common.Hash `json:"secretKey"` + Voted *common.Address `json:"voted"` + Authorize *bool `json:"authorize"` + Vanity common.Hash `json:"vanity"` + } + if err := json.Unmarshal(input, &x); err != nil { + return err + } + if x.Key == nil { + return errors.New("missing required field 'secretKey' for cliqueInput") + } + if ecdsaKey, err := crypto.ToECDSA(x.Key[:]); err != nil { + return err + } else { + c.Key = ecdsaKey + } + c.Voted = x.Voted + c.Authorize = x.Authorize + c.Vanity = x.Vanity + return nil +} + +// ToBlock converts i into a *types.Block +func (i *bbInput) ToBlock() *types.Block { + header := &types.Header{ + ParentHash: i.Header.ParentHash, + UncleHash: types.EmptyUncleHash, + Coinbase: common.Address{}, + Root: i.Header.Root, + TxHash: types.EmptyRootHash, + ReceiptHash: types.EmptyRootHash, + Bloom: i.Header.Bloom, + Difficulty: common.Big0, + Number: i.Header.Number, + GasLimit: i.Header.GasLimit, + GasUsed: i.Header.GasUsed, + Time: i.Header.Time, + Extra: i.Header.Extra, + MixDigest: i.Header.MixDigest, + BaseFee: i.Header.BaseFee, + } + + // Fill optional values. + if i.Header.OmmerHash != nil { + header.UncleHash = *i.Header.OmmerHash + } else if len(i.Ommers) != 0 { + // Calculate the ommer hash if none is provided and there are ommers to hash + header.UncleHash = types.CalcUncleHash(i.Ommers) + } + if i.Header.Coinbase != nil { + header.Coinbase = *i.Header.Coinbase + } + if i.Header.TxHash != nil { + header.TxHash = *i.Header.TxHash + } + if i.Header.ReceiptHash != nil { + header.ReceiptHash = *i.Header.ReceiptHash + } + if i.Header.Nonce != nil { + header.Nonce = *i.Header.Nonce + } + if header.Difficulty != nil { + header.Difficulty = i.Header.Difficulty + } + return types.NewBlockWithHeader(header).WithBody(i.Txs, i.Ommers) +} + +// SealBlock seals the given block using the configured engine. +func (i *bbInput) SealBlock(block *types.Block) (*types.Block, error) { + switch { + case i.Ethash: + return i.sealEthash(block) + case i.Clique != nil: + return i.sealClique(block) + default: + return block, nil + } +} + +// sealEthash seals the given block using ethash. +func (i *bbInput) sealEthash(block *types.Block) (*types.Block, error) { + if i.Header.Nonce != nil { + return nil, NewError(ErrorConfig, fmt.Errorf("sealing with ethash will overwrite provided nonce")) + } + ethashConfig := ethash.Config{ + PowMode: i.PowMode, + DatasetDir: i.EthashDir, + CacheDir: i.EthashDir, + DatasetsInMem: 1, + DatasetsOnDisk: 2, + CachesInMem: 2, + CachesOnDisk: 3, + } + engine := ethash.New(ethashConfig, nil, true) + defer engine.Close() + // Use a buffered chan for results. + // If the testmode is used, the sealer will return quickly, and complain + // "Sealing result is not read by miner" if it cannot write the result. + results := make(chan *types.Block, 1) + if err := engine.Seal(nil, block, results, nil); err != nil { + panic(fmt.Sprintf("failed to seal block: %v", err)) + } + found := <-results + return block.WithSeal(found.Header()), nil +} + +// sealClique seals the given block using clique. +func (i *bbInput) sealClique(block *types.Block) (*types.Block, error) { + // If any clique value overwrites an explicit header value, fail + // to avoid silently building a block with unexpected values. + if i.Header.Extra != nil { + return nil, NewError(ErrorConfig, fmt.Errorf("sealing with clique will overwrite provided extra data")) + } + header := block.Header() + if i.Clique.Voted != nil { + if i.Header.Coinbase != nil { + return nil, NewError(ErrorConfig, fmt.Errorf("sealing with clique and voting will overwrite provided coinbase")) + } + header.Coinbase = *i.Clique.Voted + } + if i.Clique.Authorize != nil { + if i.Header.Nonce != nil { + return nil, NewError(ErrorConfig, fmt.Errorf("sealing with clique and voting will overwrite provided nonce")) + } + if *i.Clique.Authorize { + header.Nonce = [8]byte{} + } else { + header.Nonce = [8]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + } + } + // Extra is fixed 32 byte vanity and 65 byte signature + header.Extra = make([]byte, 32+65) + copy(header.Extra[0:32], i.Clique.Vanity.Bytes()[:]) + + // Sign the seal hash and fill in the rest of the extra data + h := clique.SealHash(header) + sighash, err := crypto.Sign(h[:], i.Clique.Key) + if err != nil { + return nil, err + } + copy(header.Extra[32:], sighash) + block = block.WithSeal(header) + return block, nil +} + +// BuildBlock constructs a block from the given inputs. +func BuildBlock(ctx *cli.Context) error { + // Configure the go-ethereum logger + glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) + glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name))) + log.Root().SetHandler(glogger) + + baseDir, err := createBasedir(ctx) + if err != nil { + return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err)) + } + inputData, err := readInput(ctx) + if err != nil { + return err + } + block := inputData.ToBlock() + block, err = inputData.SealBlock(block) + if err != nil { + return err + } + return dispatchBlock(ctx, baseDir, block) +} + +func readInput(ctx *cli.Context) (*bbInput, error) { + var ( + headerStr = ctx.String(InputHeaderFlag.Name) + ommersStr = ctx.String(InputOmmersFlag.Name) + txsStr = ctx.String(InputTxsRlpFlag.Name) + cliqueStr = ctx.String(SealCliqueFlag.Name) + ethashOn = ctx.Bool(SealEthashFlag.Name) + ethashDir = ctx.String(SealEthashDirFlag.Name) + ethashMode = ctx.String(SealEthashModeFlag.Name) + inputData = &bbInput{} + ) + if ethashOn && cliqueStr != "" { + return nil, NewError(ErrorConfig, fmt.Errorf("both ethash and clique sealing specified, only one may be chosen")) + } + if ethashOn { + inputData.Ethash = ethashOn + inputData.EthashDir = ethashDir + switch ethashMode { + case "normal": + inputData.PowMode = ethash.ModeNormal + case "test": + inputData.PowMode = ethash.ModeTest + case "fake": + inputData.PowMode = ethash.ModeFake + default: + return nil, NewError(ErrorConfig, fmt.Errorf("unknown pow mode: %s, supported modes: test, fake, normal", ethashMode)) + } + } + if headerStr == stdinSelector || ommersStr == stdinSelector || txsStr == stdinSelector || cliqueStr == stdinSelector { + decoder := json.NewDecoder(os.Stdin) + if err := decoder.Decode(inputData); err != nil { + return nil, NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err)) + } + } + if cliqueStr != stdinSelector && cliqueStr != "" { + var clique cliqueInput + if err := readFile(cliqueStr, "clique", &clique); err != nil { + return nil, err + } + inputData.Clique = &clique + } + if headerStr != stdinSelector { + var env header + if err := readFile(headerStr, "header", &env); err != nil { + return nil, err + } + inputData.Header = &env + } + if ommersStr != stdinSelector && ommersStr != "" { + var ommers []string + if err := readFile(ommersStr, "ommers", &ommers); err != nil { + return nil, err + } + inputData.OmmersRlp = ommers + } + if txsStr != stdinSelector { + var txs string + if err := readFile(txsStr, "txs", &txs); err != nil { + return nil, err + } + inputData.TxRlp = txs + } + // Deserialize rlp txs and ommers + var ( + ommers = []*types.Header{} + txs = []*types.Transaction{} + ) + if inputData.TxRlp != "" { + if err := rlp.DecodeBytes(common.FromHex(inputData.TxRlp), &txs); err != nil { + return nil, NewError(ErrorRlp, fmt.Errorf("unable to decode transaction from rlp data: %v", err)) + } + inputData.Txs = txs + } + for _, str := range inputData.OmmersRlp { + type extblock struct { + Header *types.Header + Txs []*types.Transaction + Ommers []*types.Header + } + var ommer *extblock + if err := rlp.DecodeBytes(common.FromHex(str), &ommer); err != nil { + return nil, NewError(ErrorRlp, fmt.Errorf("unable to decode ommer from rlp data: %v", err)) + } + ommers = append(ommers, ommer.Header) + } + inputData.Ommers = ommers + + return inputData, nil +} + +// dispatchOutput writes the output data to either stderr or stdout, or to the specified +// files +func dispatchBlock(ctx *cli.Context, baseDir string, block *types.Block) error { + raw, _ := rlp.EncodeToBytes(block) + + type blockInfo struct { + Rlp hexutil.Bytes `json:"rlp"` + Hash common.Hash `json:"hash"` + } + var enc blockInfo + enc.Rlp = raw + enc.Hash = block.Hash() + + b, err := json.MarshalIndent(enc, "", " ") + if err != nil { + return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err)) + } + switch dest := ctx.String(OutputBlockFlag.Name); dest { + case "stdout": + os.Stdout.Write(b) + os.Stdout.WriteString("\n") + case "stderr": + os.Stderr.Write(b) + os.Stderr.WriteString("\n") + default: + if err := saveFile(baseDir, dest, enc); err != nil { + return err + } + } + return nil +} diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index aadebdd439..9eaead3878 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -23,6 +23,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/misc" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" @@ -46,13 +47,15 @@ type Prestate struct { // ExecutionResult contains the execution status after running a state test, any // error that might have occurred and a dump of the final state if requested. type ExecutionResult struct { - StateRoot common.Hash `json:"stateRoot"` - TxRoot common.Hash `json:"txRoot"` - ReceiptRoot common.Hash `json:"receiptRoot"` - LogsHash common.Hash `json:"logsHash"` - Bloom types.Bloom `json:"logsBloom" gencodec:"required"` - Receipts types.Receipts `json:"receipts"` - Rejected []int `json:"rejected,omitempty"` + StateRoot common.Hash `json:"stateRoot"` + TxRoot common.Hash `json:"txRoot"` + ReceiptRoot common.Hash `json:"receiptsRoot"` + LogsHash common.Hash `json:"logsHash"` + Bloom types.Bloom `json:"logsBloom" gencodec:"required"` + Receipts types.Receipts `json:"receipts"` + Rejected []*rejectedTx `json:"rejected,omitempty"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` + GasUsed math.HexOrDecimal64 `json:"gasUsed"` } type ommer struct { @@ -62,21 +65,35 @@ type ommer struct { //go:generate gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go type stEnv struct { - Coinbase common.Address `json:"currentCoinbase" gencodec:"required"` - Difficulty *big.Int `json:"currentDifficulty" gencodec:"required"` - GasLimit uint64 `json:"currentGasLimit" gencodec:"required"` - Number uint64 `json:"currentNumber" gencodec:"required"` - Timestamp uint64 `json:"currentTimestamp" gencodec:"required"` - BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` - Ommers []ommer `json:"ommers,omitempty"` + Coinbase common.Address `json:"currentCoinbase" gencodec:"required"` + Difficulty *big.Int `json:"currentDifficulty"` + Random *big.Int `json:"currentRandom"` + ParentDifficulty *big.Int `json:"parentDifficulty"` + GasLimit uint64 `json:"currentGasLimit" gencodec:"required"` + Number uint64 `json:"currentNumber" gencodec:"required"` + Timestamp uint64 `json:"currentTimestamp" gencodec:"required"` + ParentTimestamp uint64 `json:"parentTimestamp,omitempty"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + BaseFee *big.Int `json:"currentBaseFee,omitempty"` + ParentUncleHash common.Hash `json:"parentUncleHash"` } type stEnvMarshaling struct { - Coinbase common.UnprefixedAddress - Difficulty *math.HexOrDecimal256 - GasLimit math.HexOrDecimal64 - Number math.HexOrDecimal64 - Timestamp math.HexOrDecimal64 + Coinbase common.UnprefixedAddress + Difficulty *math.HexOrDecimal256 + Random *math.HexOrDecimal256 + ParentDifficulty *math.HexOrDecimal256 + GasLimit math.HexOrDecimal64 + Number math.HexOrDecimal64 + Timestamp math.HexOrDecimal64 + ParentTimestamp math.HexOrDecimal64 + BaseFee *math.HexOrDecimal256 +} + +type rejectedTx struct { + Index int `json:"index"` + Err string `json:"error"` } // Apply applies a set of transactions to a pre-state @@ -103,7 +120,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, signer = types.MakeSigner(chainConfig, new(big.Int).SetUint64(pre.Env.Number)) gaspool = new(core.GasPool) blockHash = common.Hash{0x13, 0x37} - rejectedTxs []int + rejectedTxs []*rejectedTx includedTxs types.Transactions gasUsed = uint64(0) receipts = make(types.Receipts, 0) @@ -120,6 +137,15 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, GasLimit: pre.Env.GasLimit, GetHash: getHash, } + // If currentBaseFee is defined, add it to the vmContext. + if pre.Env.BaseFee != nil { + vmContext.BaseFee = new(big.Int).Set(pre.Env.BaseFee) + } + // If random is defined, add it to the vmContext. + if pre.Env.Random != nil { + rnd := common.BigToHash(pre.Env.Random) + vmContext.Random = &rnd + } // If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's // done in StateProcessor.Process(block, ...), right before transactions are applied. if chainConfig.DAOForkSupport && @@ -129,10 +155,10 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, } for i, tx := range txs { - msg, err := tx.AsMessage(signer) + msg, err := tx.AsMessage(signer, pre.Env.BaseFee) if err != nil { - log.Info("rejected tx", "index", i, "hash", tx.Hash(), "error", err) - rejectedTxs = append(rejectedTxs, i) + log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", err) + rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()}) continue } tracer, err := getTracerFn(txIndex, tx.Hash()) @@ -141,7 +167,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, } vmConfig.Tracer = tracer vmConfig.Debug = (tracer != nil) - statedb.Prepare(tx.Hash(), blockHash, txIndex) + statedb.Prepare(tx.Hash(), txIndex) txContext := core.NewEVMTxContext(msg) snapshot := statedb.Snapshot() evm := vm.NewEVM(vmContext, txContext, statedb, chainConfig, vmConfig) @@ -151,7 +177,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, if err != nil { statedb.RevertToSnapshot(snapshot) log.Info("rejected tx", "index", i, "hash", tx.Hash(), "from", msg.From(), "error", err) - rejectedTxs = append(rejectedTxs, i) + rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()}) continue } includedTxs = append(includedTxs, tx) @@ -186,7 +212,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, } // Set the receipt logs and create the bloom filter. - receipt.Logs = statedb.GetLogs(tx.Hash()) + receipt.Logs = statedb.GetLogs(tx.Hash(), blockHash) receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) // These three are non-consensus fields: //receipt.BlockHash @@ -238,6 +264,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, LogsHash: rlpHash(statedb.Logs()), Receipts: receipts, Rejected: rejectedTxs, + Difficulty: (*math.HexOrDecimal256)(vmContext.Difficulty), + GasUsed: (math.HexOrDecimal64)(gasUsed), } return statedb, execRs, nil } @@ -267,3 +295,23 @@ func rlpHash(x interface{}) (h common.Hash) { hw.Sum(h[:0]) return h } + +// calcDifficulty is based on ethash.CalcDifficulty. This method is used in case +// the caller does not provide an explicit difficulty, but instead provides only +// parent timestamp + difficulty. +// Note: this method only works for ethash engine. +func calcDifficulty(config *params.ChainConfig, number, currentTime, parentTime uint64, + parentDifficulty *big.Int, parentUncleHash common.Hash) *big.Int { + uncleHash := parentUncleHash + if uncleHash == (common.Hash{}) { + uncleHash = types.EmptyUncleHash + } + parent := &types.Header{ + ParentHash: common.Hash{}, + UncleHash: uncleHash, + Difficulty: parentDifficulty, + Number: new(big.Int).SetUint64(number - 1), + Time: parentTime, + } + return ethash.CalcDifficulty(config, currentTime, parent) +} diff --git a/cmd/evm/internal/t8ntool/flags.go b/cmd/evm/internal/t8ntool/flags.go index a3ba9acc2a..b6054ea562 100644 --- a/cmd/evm/internal/t8ntool/flags.go +++ b/cmd/evm/internal/t8ntool/flags.go @@ -32,7 +32,11 @@ var ( } TraceDisableMemoryFlag = cli.BoolTFlag{ Name: "trace.nomemory", - Usage: "Disable full memory dump in traces", + Usage: "Disable full memory dump in traces (deprecated)", + } + TraceEnableMemoryFlag = cli.BoolFlag{ + Name: "trace.memory", + Usage: "Enable full memory dump in traces", } TraceDisableStackFlag = cli.BoolFlag{ Name: "trace.nostack", @@ -40,7 +44,11 @@ var ( } TraceDisableReturnDataFlag = cli.BoolTFlag{ Name: "trace.noreturndata", - Usage: "Disable return data output in traces", + Usage: "Disable return data output in traces (deprecated)", + } + TraceEnableReturnDataFlag = cli.BoolFlag{ + Name: "trace.returndata", + Usage: "Enable return data output in traces", } OutputBasedir = cli.StringFlag{ Name: "output.basedir", @@ -68,6 +76,14 @@ var ( "\t - into the file ", Value: "result.json", } + OutputBlockFlag = cli.StringFlag{ + Name: "output.block", + Usage: "Determines where to put the `block` after building.\n" + + "\t`stdout` - into the stdout output\n" + + "\t`stderr` - into the stderr output\n" + + "\t - into the file ", + Value: "block.json", + } InputAllocFlag = cli.StringFlag{ Name: "input.alloc", Usage: "`stdin` or file name of where to find the prestate alloc to use.", @@ -79,10 +95,43 @@ var ( Value: "env.json", } InputTxsFlag = cli.StringFlag{ - Name: "input.txs", - Usage: "`stdin` or file name of where to find the transactions to apply.", + Name: "input.txs", + Usage: "`stdin` or file name of where to find the transactions to apply. " + + "If the file extension is '.rlp', then the data is interpreted as an RLP list of signed transactions." + + "The '.rlp' format is identical to the output.body format.", Value: "txs.json", } + InputHeaderFlag = cli.StringFlag{ + Name: "input.header", + Usage: "`stdin` or file name of where to find the block header to use.", + Value: "header.json", + } + InputOmmersFlag = cli.StringFlag{ + Name: "input.ommers", + Usage: "`stdin` or file name of where to find the list of ommer header RLPs to use.", + } + InputTxsRlpFlag = cli.StringFlag{ + Name: "input.txs", + Usage: "`stdin` or file name of where to find the transactions list in RLP form.", + Value: "txs.rlp", + } + SealCliqueFlag = cli.StringFlag{ + Name: "seal.clique", + Usage: "Seal block with Clique. `stdin` or file name of where to find the Clique sealing data.", + } + SealEthashFlag = cli.BoolFlag{ + Name: "seal.ethash", + Usage: "Seal block with ethash.", + } + SealEthashDirFlag = cli.StringFlag{ + Name: "seal.ethash.dir", + Usage: "Path to ethash DAG. If none exists, a new DAG will be generated.", + } + SealEthashModeFlag = cli.StringFlag{ + Name: "seal.ethash.mode", + Usage: "Defines the type and amount of PoW verification an ethash engine makes.", + Value: "normal", + } RewardFlag = cli.Int64Flag{ Name: "state.reward", Usage: "Mining reward. Set to -1 to disable", diff --git a/cmd/evm/internal/t8ntool/gen_header.go b/cmd/evm/internal/t8ntool/gen_header.go new file mode 100644 index 0000000000..196e49dd71 --- /dev/null +++ b/cmd/evm/internal/t8ntool/gen_header.go @@ -0,0 +1,135 @@ +// Code generated by github.com/fjl/gencodec. DO NOT EDIT. + +package t8ntool + +import ( + "encoding/json" + "errors" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/core/types" +) + +var _ = (*headerMarshaling)(nil) + +// MarshalJSON marshals as JSON. +func (h header) MarshalJSON() ([]byte, error) { + type header struct { + ParentHash common.Hash `json:"parentHash"` + OmmerHash *common.Hash `json:"sha3Uncles"` + Coinbase *common.Address `json:"miner"` + Root common.Hash `json:"stateRoot" gencodec:"required"` + TxHash *common.Hash `json:"transactionsRoot"` + ReceiptHash *common.Hash `json:"receiptsRoot"` + Bloom types.Bloom `json:"logsBloom"` + Difficulty *math.HexOrDecimal256 `json:"difficulty"` + Number *math.HexOrDecimal256 `json:"number" gencodec:"required"` + GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` + GasUsed math.HexOrDecimal64 `json:"gasUsed"` + Time math.HexOrDecimal64 `json:"timestamp" gencodec:"required"` + Extra hexutil.Bytes `json:"extraData"` + MixDigest common.Hash `json:"mixHash"` + Nonce *types.BlockNonce `json:"nonce"` + BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"` + } + var enc header + enc.ParentHash = h.ParentHash + enc.OmmerHash = h.OmmerHash + enc.Coinbase = h.Coinbase + enc.Root = h.Root + enc.TxHash = h.TxHash + enc.ReceiptHash = h.ReceiptHash + enc.Bloom = h.Bloom + enc.Difficulty = (*math.HexOrDecimal256)(h.Difficulty) + enc.Number = (*math.HexOrDecimal256)(h.Number) + enc.GasLimit = math.HexOrDecimal64(h.GasLimit) + enc.GasUsed = math.HexOrDecimal64(h.GasUsed) + enc.Time = math.HexOrDecimal64(h.Time) + enc.Extra = h.Extra + enc.MixDigest = h.MixDigest + enc.Nonce = h.Nonce + enc.BaseFee = (*math.HexOrDecimal256)(h.BaseFee) + return json.Marshal(&enc) +} + +// UnmarshalJSON unmarshals from JSON. +func (h *header) UnmarshalJSON(input []byte) error { + type header struct { + ParentHash *common.Hash `json:"parentHash"` + OmmerHash *common.Hash `json:"sha3Uncles"` + Coinbase *common.Address `json:"miner"` + Root *common.Hash `json:"stateRoot" gencodec:"required"` + TxHash *common.Hash `json:"transactionsRoot"` + ReceiptHash *common.Hash `json:"receiptsRoot"` + Bloom *types.Bloom `json:"logsBloom"` + Difficulty *math.HexOrDecimal256 `json:"difficulty"` + Number *math.HexOrDecimal256 `json:"number" gencodec:"required"` + GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` + GasUsed *math.HexOrDecimal64 `json:"gasUsed"` + Time *math.HexOrDecimal64 `json:"timestamp" gencodec:"required"` + Extra *hexutil.Bytes `json:"extraData"` + MixDigest *common.Hash `json:"mixHash"` + Nonce *types.BlockNonce `json:"nonce"` + BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"` + } + var dec header + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + if dec.ParentHash != nil { + h.ParentHash = *dec.ParentHash + } + if dec.OmmerHash != nil { + h.OmmerHash = dec.OmmerHash + } + if dec.Coinbase != nil { + h.Coinbase = dec.Coinbase + } + if dec.Root == nil { + return errors.New("missing required field 'stateRoot' for header") + } + h.Root = *dec.Root + if dec.TxHash != nil { + h.TxHash = dec.TxHash + } + if dec.ReceiptHash != nil { + h.ReceiptHash = dec.ReceiptHash + } + if dec.Bloom != nil { + h.Bloom = *dec.Bloom + } + if dec.Difficulty != nil { + h.Difficulty = (*big.Int)(dec.Difficulty) + } + if dec.Number == nil { + return errors.New("missing required field 'number' for header") + } + h.Number = (*big.Int)(dec.Number) + if dec.GasLimit == nil { + return errors.New("missing required field 'gasLimit' for header") + } + h.GasLimit = uint64(*dec.GasLimit) + if dec.GasUsed != nil { + h.GasUsed = uint64(*dec.GasUsed) + } + if dec.Time == nil { + return errors.New("missing required field 'timestamp' for header") + } + h.Time = uint64(*dec.Time) + if dec.Extra != nil { + h.Extra = *dec.Extra + } + if dec.MixDigest != nil { + h.MixDigest = *dec.MixDigest + } + if dec.Nonce != nil { + h.Nonce = dec.Nonce + } + if dec.BaseFee != nil { + h.BaseFee = (*big.Int)(dec.BaseFee) + } + return nil +} diff --git a/cmd/evm/internal/t8ntool/gen_stenv.go b/cmd/evm/internal/t8ntool/gen_stenv.go index ab5951534e..a6d774cdab 100644 --- a/cmd/evm/internal/t8ntool/gen_stenv.go +++ b/cmd/evm/internal/t8ntool/gen_stenv.go @@ -16,35 +16,50 @@ var _ = (*stEnvMarshaling)(nil) // MarshalJSON marshals as JSON. func (s stEnv) MarshalJSON() ([]byte, error) { type stEnv struct { - Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` - Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` - GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` - Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` - Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` - BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` - Ommers []ommer `json:"ommers,omitempty"` + Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` + Random *math.HexOrDecimal256 `json:"currentRandom"` + ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` + GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + ParentTimestamp math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` + ParentUncleHash common.Hash `json:"parentUncleHash"` } var enc stEnv enc.Coinbase = common.UnprefixedAddress(s.Coinbase) enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty) + enc.Random = (*math.HexOrDecimal256)(s.Random) + enc.ParentDifficulty = (*math.HexOrDecimal256)(s.ParentDifficulty) enc.GasLimit = math.HexOrDecimal64(s.GasLimit) enc.Number = math.HexOrDecimal64(s.Number) enc.Timestamp = math.HexOrDecimal64(s.Timestamp) + enc.ParentTimestamp = math.HexOrDecimal64(s.ParentTimestamp) enc.BlockHashes = s.BlockHashes enc.Ommers = s.Ommers + enc.BaseFee = (*math.HexOrDecimal256)(s.BaseFee) + enc.ParentUncleHash = s.ParentUncleHash return json.Marshal(&enc) } // UnmarshalJSON unmarshals from JSON. func (s *stEnv) UnmarshalJSON(input []byte) error { type stEnv struct { - Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` - Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` - GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` - Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` - Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` - BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` - Ommers []ommer `json:"ommers,omitempty"` + Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` + Random *math.HexOrDecimal256 `json:"currentRandom"` + ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` + GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + ParentTimestamp *math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` + ParentUncleHash *common.Hash `json:"parentUncleHash"` } var dec stEnv if err := json.Unmarshal(input, &dec); err != nil { @@ -54,10 +69,15 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'currentCoinbase' for stEnv") } s.Coinbase = common.Address(*dec.Coinbase) - if dec.Difficulty == nil { - return errors.New("missing required field 'currentDifficulty' for stEnv") + if dec.Difficulty != nil { + s.Difficulty = (*big.Int)(dec.Difficulty) + } + if dec.Random != nil { + s.Random = (*big.Int)(dec.Random) + } + if dec.ParentDifficulty != nil { + s.ParentDifficulty = (*big.Int)(dec.ParentDifficulty) } - s.Difficulty = (*big.Int)(dec.Difficulty) if dec.GasLimit == nil { return errors.New("missing required field 'currentGasLimit' for stEnv") } @@ -70,11 +90,20 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'currentTimestamp' for stEnv") } s.Timestamp = uint64(*dec.Timestamp) + if dec.ParentTimestamp != nil { + s.ParentTimestamp = uint64(*dec.ParentTimestamp) + } if dec.BlockHashes != nil { s.BlockHashes = dec.BlockHashes } if dec.Ommers != nil { s.Ommers = dec.Ommers } + if dec.BaseFee != nil { + s.BaseFee = (*big.Int)(dec.BaseFee) + } + if dec.ParentUncleHash != nil { + s.ParentUncleHash = *dec.ParentUncleHash + } return nil } diff --git a/cmd/evm/internal/t8ntool/transaction.go b/cmd/evm/internal/t8ntool/transaction.go new file mode 100644 index 0000000000..6f1c964ada --- /dev/null +++ b/cmd/evm/internal/t8ntool/transaction.go @@ -0,0 +1,179 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package t8ntool + +import ( + "encoding/json" + "errors" + "fmt" + "math/big" + "os" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/tests" + "gopkg.in/urfave/cli.v1" +) + +type result struct { + Error error + Address common.Address + Hash common.Hash + IntrinsicGas uint64 +} + +// MarshalJSON marshals as JSON with a hash. +func (r *result) MarshalJSON() ([]byte, error) { + type xx struct { + Error string `json:"error,omitempty"` + Address *common.Address `json:"address,omitempty"` + Hash *common.Hash `json:"hash,omitempty"` + IntrinsicGas hexutil.Uint64 `json:"intrinsicGas,omitempty"` + } + var out xx + if r.Error != nil { + out.Error = r.Error.Error() + } + if r.Address != (common.Address{}) { + out.Address = &r.Address + } + if r.Hash != (common.Hash{}) { + out.Hash = &r.Hash + } + out.IntrinsicGas = hexutil.Uint64(r.IntrinsicGas) + return json.Marshal(out) +} + +func Transaction(ctx *cli.Context) error { + // Configure the go-ethereum logger + glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) + glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name))) + log.Root().SetHandler(glogger) + + var ( + err error + ) + // We need to load the transactions. May be either in stdin input or in files. + // Check if anything needs to be read from stdin + var ( + txStr = ctx.String(InputTxsFlag.Name) + inputData = &input{} + chainConfig *params.ChainConfig + ) + // Construct the chainconfig + if cConf, _, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil { + return NewError(ErrorConfig, fmt.Errorf("failed constructing chain configuration: %v", err)) + } else { + chainConfig = cConf + } + // Set the chain id + chainConfig.ChainID = big.NewInt(ctx.Int64(ChainIDFlag.Name)) + var body hexutil.Bytes + if txStr == stdinSelector { + decoder := json.NewDecoder(os.Stdin) + if err := decoder.Decode(inputData); err != nil { + return NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err)) + } + // Decode the body of already signed transactions + body = common.FromHex(inputData.TxRlp) + } else { + // Read input from file + inFile, err := os.Open(txStr) + if err != nil { + return NewError(ErrorIO, fmt.Errorf("failed reading txs file: %v", err)) + } + defer inFile.Close() + decoder := json.NewDecoder(inFile) + if strings.HasSuffix(txStr, ".rlp") { + if err := decoder.Decode(&body); err != nil { + return err + } + } else { + return NewError(ErrorIO, errors.New("only rlp supported")) + } + } + signer := types.MakeSigner(chainConfig, new(big.Int)) + // We now have the transactions in 'body', which is supposed to be an + // rlp list of transactions + it, err := rlp.NewListIterator([]byte(body)) + if err != nil { + return err + } + var results []result + for it.Next() { + if err := it.Err(); err != nil { + return NewError(ErrorIO, err) + } + var tx types.Transaction + err := rlp.DecodeBytes(it.Value(), &tx) + if err != nil { + results = append(results, result{Error: err}) + continue + } + r := result{Hash: tx.Hash()} + if sender, err := types.Sender(signer, &tx); err != nil { + r.Error = err + results = append(results, r) + continue + } else { + r.Address = sender + } + // Check intrinsic gas + if gas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, + chainConfig.IsHomestead(new(big.Int)), chainConfig.IsIstanbul(new(big.Int))); err != nil { + r.Error = err + results = append(results, r) + continue + } else { + r.IntrinsicGas = gas + if tx.Gas() < gas { + r.Error = fmt.Errorf("%w: have %d, want %d", core.ErrIntrinsicGas, tx.Gas(), gas) + results = append(results, r) + continue + } + } + // Validate <256bit fields + switch { + case tx.Nonce()+1 < tx.Nonce(): + r.Error = errors.New("nonce exceeds 2^64-1") + case tx.Value().BitLen() > 256: + r.Error = errors.New("value exceeds 256 bits") + case tx.GasPrice().BitLen() > 256: + r.Error = errors.New("gasPrice exceeds 256 bits") + case tx.GasTipCap().BitLen() > 256: + r.Error = errors.New("maxPriorityFeePerGas exceeds 256 bits") + case tx.GasFeeCap().BitLen() > 256: + r.Error = errors.New("maxFeePerGas exceeds 256 bits") + case tx.GasFeeCap().Cmp(tx.GasTipCap()) < 0: + r.Error = errors.New("maxFeePerGas < maxPriorityFeePerGas") + case new(big.Int).Mul(tx.GasPrice(), new(big.Int).SetUint64(tx.Gas())).BitLen() > 256: + r.Error = errors.New("gas * gasPrice exceeds 256 bits") + case new(big.Int).Mul(tx.GasFeeCap(), new(big.Int).SetUint64(tx.Gas())).BitLen() > 256: + r.Error = errors.New("gas * maxFeePerGas exceeds 256 bits") + } + results = append(results, r) + } + out, err := json.MarshalIndent(results, "", " ") + fmt.Println(string(out)) + return err +} diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index f5c0dd42f0..dc70e380fb 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -19,11 +19,13 @@ package t8ntool import ( "crypto/ecdsa" "encoding/json" + "errors" "fmt" "io/ioutil" "math/big" "os" "path" + "strings" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -32,6 +34,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" @@ -41,11 +44,12 @@ import ( const ( ErrorEVM = 2 - ErrorVMConfig = 3 + ErrorConfig = 3 ErrorMissingBlockhash = 4 ErrorJson = 10 ErrorIO = 11 + ErrorRlp = 12 stdinSelector = "stdin" ) @@ -63,17 +67,23 @@ func (n *NumberedError) Error() string { return fmt.Sprintf("ERROR(%d): %v", n.errorCode, n.err.Error()) } -func (n *NumberedError) Code() int { +func (n *NumberedError) ExitCode() int { return n.errorCode } +// compile-time conformance test +var ( + _ cli.ExitCoder = (*NumberedError)(nil) +) + type input struct { Alloc core.GenesisAlloc `json:"alloc,omitempty"` Env *stEnv `json:"env,omitempty"` Txs []*txWithKey `json:"txs,omitempty"` + TxRlp string `json:"txsRlp,omitempty"` } -func Main(ctx *cli.Context) error { +func Transition(ctx *cli.Context) error { // Configure the go-ethereum logger glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name))) @@ -82,26 +92,32 @@ func Main(ctx *cli.Context) error { var ( err error tracer vm.EVMLogger - baseDir = "" + baseDir string ) var getTracer func(txIndex int, txHash common.Hash) (vm.EVMLogger, error) - // If user specified a basedir, make sure it exists - if ctx.IsSet(OutputBasedir.Name) { - if base := ctx.String(OutputBasedir.Name); len(base) > 0 { - err := os.MkdirAll(base, 0755) // //rw-r--r-- - if err != nil { - return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err)) - } - baseDir = base - } + baseDir, err = createBasedir(ctx) + if err != nil { + return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err)) } if ctx.Bool(TraceFlag.Name) { + if ctx.IsSet(TraceDisableMemoryFlag.Name) && ctx.IsSet(TraceEnableMemoryFlag.Name) { + return NewError(ErrorConfig, fmt.Errorf("can't use both flags --%s and --%s", TraceDisableMemoryFlag.Name, TraceEnableMemoryFlag.Name)) + } + if ctx.IsSet(TraceDisableReturnDataFlag.Name) && ctx.IsSet(TraceEnableReturnDataFlag.Name) { + return NewError(ErrorConfig, fmt.Errorf("can't use both flags --%s and --%s", TraceDisableReturnDataFlag.Name, TraceEnableReturnDataFlag.Name)) + } + if ctx.IsSet(TraceDisableMemoryFlag.Name) { + log.Warn(fmt.Sprintf("--%s has been deprecated in favour of --%s", TraceDisableMemoryFlag.Name, TraceEnableMemoryFlag.Name)) + } + if ctx.IsSet(TraceDisableReturnDataFlag.Name) { + log.Warn(fmt.Sprintf("--%s has been deprecated in favour of --%s", TraceDisableReturnDataFlag.Name, TraceEnableReturnDataFlag.Name)) + } // Configure the EVM logger - logConfig := &vm.LogConfig{ + logConfig := &logger.Config{ DisableStack: ctx.Bool(TraceDisableStackFlag.Name), - EnableMemory: !ctx.Bool(TraceDisableMemoryFlag.Name), - EnableReturnData: !ctx.Bool(TraceDisableReturnDataFlag.Name), + EnableMemory: !ctx.Bool(TraceDisableMemoryFlag.Name) || ctx.Bool(TraceEnableMemoryFlag.Name), + EnableReturnData: !ctx.Bool(TraceDisableReturnDataFlag.Name) || ctx.Bool(TraceEnableReturnDataFlag.Name), Debug: true, } var prevFile *os.File @@ -120,7 +136,7 @@ func Main(ctx *cli.Context) error { return nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err)) } prevFile = traceFile - return vm.NewJSONLogger(logConfig, traceFile), nil + return logger.NewJSONLogger(logConfig, traceFile), nil } } else { getTracer = func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error) { @@ -142,32 +158,22 @@ func Main(ctx *cli.Context) error { // Figure out the prestate alloc if allocStr == stdinSelector || envStr == stdinSelector || txStr == stdinSelector { decoder := json.NewDecoder(os.Stdin) - decoder.Decode(inputData) + if err := decoder.Decode(inputData); err != nil { + return NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err)) + } } if allocStr != stdinSelector { - inFile, err := os.Open(allocStr) - if err != nil { - return NewError(ErrorIO, fmt.Errorf("failed reading alloc file: %v", err)) - } - defer inFile.Close() - decoder := json.NewDecoder(inFile) - if err := decoder.Decode(&inputData.Alloc); err != nil { - return NewError(ErrorJson, fmt.Errorf("Failed unmarshaling alloc-file: %v", err)) + if err := readFile(allocStr, "alloc", &inputData.Alloc); err != nil { + return err } } prestate.Pre = inputData.Alloc // Set the block environment if envStr != stdinSelector { - inFile, err := os.Open(envStr) - if err != nil { - return NewError(ErrorIO, fmt.Errorf("failed reading env file: %v", err)) - } - defer inFile.Close() - decoder := json.NewDecoder(inFile) var env stEnv - if err := decoder.Decode(&env); err != nil { - return NewError(ErrorJson, fmt.Errorf("Failed unmarshaling env-file: %v", err)) + if err := readFile(envStr, "env", &env); err != nil { + return err } inputData.Env = &env } @@ -180,7 +186,7 @@ func Main(ctx *cli.Context) error { // Construct the chainconfig var chainConfig *params.ChainConfig if cConf, extraEips, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil { - return NewError(ErrorVMConfig, fmt.Errorf("Failed constructing chain configuration: %v", err)) + return NewError(ErrorConfig, fmt.Errorf("failed constructing chain configuration: %v", err)) } else { chainConfig = cConf vmConfig.ExtraEips = extraEips @@ -196,58 +202,118 @@ func Main(ctx *cli.Context) error { } defer inFile.Close() decoder := json.NewDecoder(inFile) - if err := decoder.Decode(&txsWithKeys); err != nil { - return NewError(ErrorJson, fmt.Errorf("Failed unmarshaling txs-file: %v", err)) + if strings.HasSuffix(txStr, ".rlp") { + var body hexutil.Bytes + if err := decoder.Decode(&body); err != nil { + return err + } + var txs types.Transactions + if err := rlp.DecodeBytes(body, &txs); err != nil { + return err + } + for _, tx := range txs { + txsWithKeys = append(txsWithKeys, &txWithKey{ + key: nil, + tx: tx, + }) + } + } else { + if err := decoder.Decode(&txsWithKeys); err != nil { + return NewError(ErrorJson, fmt.Errorf("failed unmarshaling txs-file: %v", err)) + } } } else { - txsWithKeys = inputData.Txs + if len(inputData.TxRlp) > 0 { + // Decode the body of already signed transactions + body := common.FromHex(inputData.TxRlp) + var txs types.Transactions + if err := rlp.DecodeBytes(body, &txs); err != nil { + return err + } + for _, tx := range txs { + txsWithKeys = append(txsWithKeys, &txWithKey{ + key: nil, + tx: tx, + }) + } + } else { + // JSON encoded transactions + txsWithKeys = inputData.Txs + } } // We may have to sign the transactions. signer := types.MakeSigner(chainConfig, big.NewInt(int64(prestate.Env.Number))) if txs, err = signUnsignedTransactions(txsWithKeys, signer); err != nil { - return NewError(ErrorJson, fmt.Errorf("Failed signing transactions: %v", err)) + return NewError(ErrorJson, fmt.Errorf("failed signing transactions: %v", err)) + } + // Sanity check, to not `panic` in state_transition + if chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) { + if prestate.Env.BaseFee == nil { + return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section")) + } + } + // Sanity check, to not `panic` in state_transition + if prestate.Env.Random != nil && !chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) { + return NewError(ErrorConfig, errors.New("can only apply RANDOM on top of London chainrules")) + } + if env := prestate.Env; env.Difficulty == nil { + // If difficulty was not provided by caller, we need to calculate it. + switch { + case env.ParentDifficulty == nil: + return NewError(ErrorConfig, errors.New("currentDifficulty was not provided, and cannot be calculated due to missing parentDifficulty")) + case env.Number == 0: + return NewError(ErrorConfig, errors.New("currentDifficulty needs to be provided for block number 0")) + case env.Timestamp <= env.ParentTimestamp: + return NewError(ErrorConfig, fmt.Errorf("currentDifficulty cannot be calculated -- currentTime (%d) needs to be after parent time (%d)", + env.Timestamp, env.ParentTimestamp)) + } + prestate.Env.Difficulty = calcDifficulty(chainConfig, env.Number, env.Timestamp, + env.ParentTimestamp, env.ParentDifficulty, env.ParentUncleHash) } - - // Iterate over all the tests, run them and aggregate the results - // Run the test and aggregate the result - state, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer) + s, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer) if err != nil { return err } body, _ := rlp.EncodeToBytes(txs) // Dump the excution result collector := make(Alloc) - state.DumpToCollector(collector, false, false, false, nil, -1) + s.DumpToCollector(collector, nil) return dispatchOutput(ctx, baseDir, result, collector, body) - } // txWithKey is a helper-struct, to allow us to use the types.Transaction along with // a `secretKey`-field, for input type txWithKey struct { - key *ecdsa.PrivateKey - tx *types.Transaction + key *ecdsa.PrivateKey + tx *types.Transaction + protected bool } func (t *txWithKey) UnmarshalJSON(input []byte) error { - // Read the secretKey, if present - type sKey struct { - Key *common.Hash `json:"secretKey"` + // Read the metadata, if present + type txMetadata struct { + Key *common.Hash `json:"secretKey"` + Protected *bool `json:"protected"` } - var key sKey - if err := json.Unmarshal(input, &key); err != nil { + var data txMetadata + if err := json.Unmarshal(input, &data); err != nil { return err } - if key.Key != nil { - k := key.Key.Hex()[2:] + if data.Key != nil { + k := data.Key.Hex()[2:] if ecdsaKey, err := crypto.HexToECDSA(k); err != nil { return err } else { t.key = ecdsaKey } } + if data.Protected != nil { + t.protected = *data.Protected + } else { + t.protected = true + } // Now, read the transaction itself var tx types.Transaction if err := json.Unmarshal(input, &tx); err != nil { @@ -276,9 +342,17 @@ func signUnsignedTransactions(txs []*txWithKey, signer types.Signer) (types.Tran v, r, s := tx.RawSignatureValues() if key != nil && v.BitLen()+r.BitLen()+s.BitLen() == 0 { // This transaction needs to be signed - signed, err := types.SignTx(tx, signer, key) + var ( + signed *types.Transaction + err error + ) + if txWithKey.protected { + signed, err = types.SignTx(tx, signer, key) + } else { + signed, err = types.SignTx(tx, types.FrontierSigner{}, key) + } if err != nil { - return nil, NewError(ErrorJson, fmt.Errorf("Tx %d: failed to sign tx: %v", i, err)) + return nil, NewError(ErrorJson, fmt.Errorf("tx %d: failed to sign tx: %v", i, err)) } signedTxs = append(signedTxs, signed) } else { @@ -303,7 +377,7 @@ func (g Alloc) OnAccount(addr common.Address, dumpAccount state.DumpAccount) { } } genesisAccount := core.GenesisAccount{ - Code: common.FromHex(dumpAccount.Code), + Code: dumpAccount.Code, Storage: storage, Balance: balance, Nonce: dumpAccount.Nonce, @@ -355,18 +429,20 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, a return err } if len(stdOutObject) > 0 { - b, err := json.MarshalIndent(stdOutObject, "", " ") + b, err := json.MarshalIndent(stdOutObject, "", " ") if err != nil { return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err)) } os.Stdout.Write(b) + os.Stdout.WriteString("\n") } if len(stdErrObject) > 0 { - b, err := json.MarshalIndent(stdErrObject, "", " ") + b, err := json.MarshalIndent(stdErrObject, "", " ") if err != nil { return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err)) } os.Stderr.Write(b) + os.Stderr.WriteString("\n") } return nil } diff --git a/cmd/evm/internal/t8ntool/utils.go b/cmd/evm/internal/t8ntool/utils.go new file mode 100644 index 0000000000..1c54f09bf4 --- /dev/null +++ b/cmd/evm/internal/t8ntool/utils.go @@ -0,0 +1,54 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package t8ntool + +import ( + "encoding/json" + "fmt" + "os" + + "gopkg.in/urfave/cli.v1" +) + +// readFile reads the json-data in the provided path and marshals into dest. +func readFile(path, desc string, dest interface{}) error { + inFile, err := os.Open(path) + if err != nil { + return NewError(ErrorIO, fmt.Errorf("failed reading %s file: %v", desc, err)) + } + defer inFile.Close() + decoder := json.NewDecoder(inFile) + if err := decoder.Decode(dest); err != nil { + return NewError(ErrorJson, fmt.Errorf("failed unmarshaling %s file: %v", desc, err)) + } + return nil +} + +// createBasedir makes sure the basedir exists, if user specified one. +func createBasedir(ctx *cli.Context) (string, error) { + baseDir := "" + if ctx.IsSet(OutputBasedir.Name) { + if base := ctx.String(OutputBasedir.Name); len(base) > 0 { + err := os.MkdirAll(base, 0755) // //rw-r--r-- + if err != nil { + return "", err + } + baseDir = base + } + } + return baseDir, nil +} diff --git a/cmd/evm/main.go b/cmd/evm/main.go index d78970e7dc..2f404d48e9 100644 --- a/cmd/evm/main.go +++ b/cmd/evm/main.go @@ -129,23 +129,20 @@ var ( Name: "noreturndata", Usage: "enable return data output", } - EVMInterpreterFlag = cli.StringFlag{ - Name: "vm.evm", - Usage: "External EVM configuration (default = built-in interpreter)", - Value: "", - } ) var stateTransitionCommand = cli.Command{ Name: "transition", Aliases: []string{"t8n"}, Usage: "executes a full state transition", - Action: t8ntool.Main, + Action: t8ntool.Transition, Flags: []cli.Flag{ t8ntool.TraceFlag, t8ntool.TraceDisableMemoryFlag, + t8ntool.TraceEnableMemoryFlag, t8ntool.TraceDisableStackFlag, t8ntool.TraceDisableReturnDataFlag, + t8ntool.TraceEnableReturnDataFlag, t8ntool.OutputBasedir, t8ntool.OutputAllocFlag, t8ntool.OutputResultFlag, @@ -159,6 +156,37 @@ var stateTransitionCommand = cli.Command{ t8ntool.VerbosityFlag, }, } +var transactionCommand = cli.Command{ + Name: "transaction", + Aliases: []string{"t9n"}, + Usage: "performs transaction validation", + Action: t8ntool.Transaction, + Flags: []cli.Flag{ + t8ntool.InputTxsFlag, + t8ntool.ChainIDFlag, + t8ntool.ForknameFlag, + t8ntool.VerbosityFlag, + }, +} + +var blockBuilderCommand = cli.Command{ + Name: "block-builder", + Aliases: []string{"b11r"}, + Usage: "builds a block", + Action: t8ntool.BuildBlock, + Flags: []cli.Flag{ + t8ntool.OutputBasedir, + t8ntool.OutputBlockFlag, + t8ntool.InputHeaderFlag, + t8ntool.InputOmmersFlag, + t8ntool.InputTxsRlpFlag, + t8ntool.SealCliqueFlag, + t8ntool.SealEthashFlag, + t8ntool.SealEthashDirFlag, + t8ntool.SealEthashModeFlag, + t8ntool.VerbosityFlag, + }, +} func init() { app.Flags = []cli.Flag{ @@ -185,7 +213,6 @@ func init() { DisableStackFlag, DisableStorageFlag, DisableReturnDataFlag, - EVMInterpreterFlag, } app.Commands = []cli.Command{ compileCommand, @@ -193,6 +220,8 @@ func init() { runCommand, stateTestCommand, stateTransitionCommand, + transactionCommand, + blockBuilderCommand, } cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate } @@ -201,7 +230,7 @@ func main() { if err := app.Run(os.Args); err != nil { code := 1 if ec, ok := err.(*t8ntool.NumberedError); ok { - code = ec.Code() + code = ec.ExitCode() } fmt.Fprintln(os.Stderr, err) os.Exit(code) diff --git a/cmd/evm/poststate.json b/cmd/evm/poststate.json deleted file mode 100644 index 9ee17f18d1..0000000000 --- a/cmd/evm/poststate.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "root": "f4157bb27bcb1d1a63001434a249a80948f2e9fe1f53d551244c1dae826b5b23", - "accounts": { - "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { - "balance": "4276951709", - "nonce": 1, - "root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" - }, - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "6916764286133345652", - "nonce": 172, - "root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" - }, - "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "42500", - "nonce": 0, - "root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" - } - } -} \ No newline at end of file diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index fd18270182..6f2d97320d 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -36,6 +36,7 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm/runtime" + "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "gopkg.in/urfave/cli.v1" @@ -107,7 +108,7 @@ func runCmd(ctx *cli.Context) error { glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name))) log.Root().SetHandler(glogger) - logconfig := &vm.LogConfig{ + logconfig := &logger.Config{ EnableMemory: !ctx.GlobalBool(DisableMemoryFlag.Name), DisableStack: ctx.GlobalBool(DisableStackFlag.Name), DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name), @@ -117,7 +118,7 @@ func runCmd(ctx *cli.Context) error { var ( tracer vm.EVMLogger - debugLogger *vm.StructLogger + debugLogger *logger.StructLogger statedb *state.StateDB chainConfig *params.ChainConfig sender = common.BytesToAddress([]byte("sender")) @@ -125,12 +126,12 @@ func runCmd(ctx *cli.Context) error { genesisConfig *core.Genesis ) if ctx.GlobalBool(MachineFlag.Name) { - tracer = vm.NewJSONLogger(logconfig, os.Stdout) + tracer = logger.NewJSONLogger(logconfig, os.Stdout) } else if ctx.GlobalBool(DebugFlag.Name) { - debugLogger = vm.NewStructLogger(logconfig) + debugLogger = logger.NewStructLogger(logconfig) tracer = debugLogger } else { - debugLogger = vm.NewStructLogger(logconfig) + debugLogger = logger.NewStructLogger(logconfig) } if ctx.GlobalString(GenesisFlag.Name) != "" { gen := readGenesis(ctx.GlobalString(GenesisFlag.Name)) @@ -211,9 +212,8 @@ func runCmd(ctx *cli.Context) error { Coinbase: genesisConfig.Coinbase, BlockNumber: new(big.Int).SetUint64(genesisConfig.Number), EVMConfig: vm.Config{ - Tracer: tracer, - Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name), - EVMInterpreter: ctx.GlobalString(EVMInterpreterFlag.Name), + Tracer: tracer, + Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name), }, } @@ -272,7 +272,7 @@ func runCmd(ctx *cli.Context) error { statedb.AccountsIntermediateRoot() statedb.Commit(nil) statedb.IntermediateRoot(true) - fmt.Println(string(statedb.Dump(false, false, true))) + fmt.Println(string(statedb.Dump(nil))) } if memProfilePath := ctx.GlobalString(MemProfileFlag.Name); memProfilePath != "" { @@ -291,10 +291,10 @@ func runCmd(ctx *cli.Context) error { if ctx.GlobalBool(DebugFlag.Name) { if debugLogger != nil { fmt.Fprintln(os.Stderr, "#### TRACE ####") - vm.WriteTrace(os.Stderr, debugLogger.StructLogs()) + logger.WriteTrace(os.Stderr, debugLogger.StructLogs()) } fmt.Fprintln(os.Stderr, "#### LOGS ####") - vm.WriteLogs(os.Stderr, statedb.Logs()) + logger.WriteLogs(os.Stderr, statedb.Logs()) } if bench || ctx.GlobalBool(StatDumpFlag.Name) { diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go index c373772fbe..6ffc2f51ca 100644 --- a/cmd/evm/staterunner.go +++ b/cmd/evm/staterunner.go @@ -25,6 +25,7 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/tests" @@ -58,7 +59,7 @@ func stateTestCmd(ctx *cli.Context) error { log.Root().SetHandler(glogger) // Configure the EVM logger - config := &vm.LogConfig{ + config := &logger.Config{ EnableMemory: !ctx.GlobalBool(DisableMemoryFlag.Name), DisableStack: ctx.GlobalBool(DisableStackFlag.Name), DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name), @@ -66,18 +67,18 @@ func stateTestCmd(ctx *cli.Context) error { } var ( tracer vm.EVMLogger - debugger *vm.StructLogger + debugger *logger.StructLogger ) switch { case ctx.GlobalBool(MachineFlag.Name): - tracer = vm.NewJSONLogger(config, os.Stderr) + tracer = logger.NewJSONLogger(config, os.Stderr) case ctx.GlobalBool(DebugFlag.Name): - debugger = vm.NewStructLogger(config) + debugger = logger.NewStructLogger(config) tracer = debugger default: - debugger = vm.NewStructLogger(config) + debugger = logger.NewStructLogger(config) } // Load the test content from the input file src, err := ioutil.ReadFile(ctx.Args().First()) @@ -98,17 +99,17 @@ func stateTestCmd(ctx *cli.Context) error { for _, st := range test.Subtests() { // Run the test and aggregate the result result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true} - _, state, err := test.Run(st, cfg, false) + _, s, err := test.Run(st, cfg, false) // print state root for evmlab tracing - if ctx.GlobalBool(MachineFlag.Name) && state != nil { - root := state.IntermediateRoot(false) + if ctx.GlobalBool(MachineFlag.Name) && s != nil { + root := s.IntermediateRoot(false) fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", root) } if err != nil { // Test failed, mark as so and dump any state to aid debugging result.Pass, result.Error = false, err.Error() - if ctx.GlobalBool(DumpFlag.Name) && state != nil { - dump := state.RawDump(false, false, true) + if ctx.GlobalBool(DumpFlag.Name) && s != nil { + dump := s.RawDump(nil) result.State = &dump } } @@ -119,7 +120,7 @@ func stateTestCmd(ctx *cli.Context) error { if ctx.GlobalBool(DebugFlag.Name) { if debugger != nil { fmt.Fprintln(os.Stderr, "#### TRACE ####") - vm.WriteTrace(os.Stderr, debugger.StructLogs()) + logger.WriteTrace(os.Stderr, debugger.StructLogs()) } } } diff --git a/cmd/evm/t8n_test.go b/cmd/evm/t8n_test.go new file mode 100644 index 0000000000..3f0bd3185f --- /dev/null +++ b/cmd/evm/t8n_test.go @@ -0,0 +1,477 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/pkg/reexec" + "github.com/ethereum/go-ethereum/cmd/evm/internal/t8ntool" + "github.com/ethereum/go-ethereum/internal/cmdtest" +) + +func TestMain(m *testing.M) { + // Run the app if we've been exec'd as "ethkey-test" in runEthkey. + reexec.Register("evm-test", func() { + if err := app.Run(os.Args); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + os.Exit(0) + }) + // check if we have been reexec'd + if reexec.Init() { + return + } + os.Exit(m.Run()) +} + +type testT8n struct { + *cmdtest.TestCmd +} + +type t8nInput struct { + inAlloc string + inTxs string + inEnv string + stFork string + stReward string +} + +func (args *t8nInput) get(base string) []string { + var out []string + if opt := args.inAlloc; opt != "" { + out = append(out, "--input.alloc") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if opt := args.inTxs; opt != "" { + out = append(out, "--input.txs") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if opt := args.inEnv; opt != "" { + out = append(out, "--input.env") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if opt := args.stFork; opt != "" { + out = append(out, "--state.fork", opt) + } + if opt := args.stReward; opt != "" { + out = append(out, "--state.reward", opt) + } + return out +} + +type t8nOutput struct { + alloc bool + result bool + body bool +} + +func (args *t8nOutput) get() (out []string) { + if args.body { + out = append(out, "--output.body", "stdout") + } else { + out = append(out, "--output.body", "") // empty means ignore + } + if args.result { + out = append(out, "--output.result", "stdout") + } else { + out = append(out, "--output.result", "") + } + if args.alloc { + out = append(out, "--output.alloc", "stdout") + } else { + out = append(out, "--output.alloc", "") + } + return out +} + +func TestT8n(t *testing.T) { + tt := new(testT8n) + tt.TestCmd = cmdtest.NewTestCmd(t, tt) + for i, tc := range []struct { + base string + input t8nInput + output t8nOutput + expExitCode int + expOut string + }{ + { // Test exit (3) on bad config + base: "./testdata/1", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Frontier+1346", "", + }, + output: t8nOutput{alloc: true, result: true}, + expExitCode: 3, + }, + { + base: "./testdata/1", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Byzantium", "", + }, + output: t8nOutput{alloc: true, result: true}, + expOut: "exp.json", + }, + { // blockhash test + base: "./testdata/3", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Berlin", "", + }, + output: t8nOutput{alloc: true, result: true}, + expOut: "exp.json", + }, + { // missing blockhash test + base: "./testdata/4", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Berlin", "", + }, + output: t8nOutput{alloc: true, result: true}, + expExitCode: 4, + }, + { // Uncle test + base: "./testdata/5", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Byzantium", "0x80", + }, + output: t8nOutput{alloc: true, result: true}, + expOut: "exp.json", + }, + { // Sign json transactions + base: "./testdata/13", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "London", "", + }, + output: t8nOutput{body: true}, + expOut: "exp.json", + }, + { // Already signed transactions + base: "./testdata/13", + input: t8nInput{ + "alloc.json", "signed_txs.rlp", "env.json", "London", "", + }, + output: t8nOutput{result: true}, + expOut: "exp2.json", + }, + { // Difficulty calculation - no uncles + base: "./testdata/14", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "London", "", + }, + output: t8nOutput{result: true}, + expOut: "exp.json", + }, + { // Difficulty calculation - with uncles + base: "./testdata/14", + input: t8nInput{ + "alloc.json", "txs.json", "env.uncles.json", "London", "", + }, + output: t8nOutput{result: true}, + expOut: "exp2.json", + }, + { // Difficulty calculation - with ommers + Berlin + base: "./testdata/14", + input: t8nInput{ + "alloc.json", "txs.json", "env.uncles.json", "Berlin", "", + }, + output: t8nOutput{result: true}, + expOut: "exp_berlin.json", + }, + { // Difficulty calculation on arrow glacier + base: "./testdata/19", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "London", "", + }, + output: t8nOutput{result: true}, + expOut: "exp_london.json", + }, + { // Difficulty calculation on arrow glacier + base: "./testdata/19", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "ArrowGlacier", "", + }, + output: t8nOutput{result: true}, + expOut: "exp_arrowglacier.json", + }, + { // Sign unprotected (pre-EIP155) transaction + base: "./testdata/23", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Berlin", "", + }, + output: t8nOutput{result: true}, + expOut: "exp.json", + }, + } { + + args := []string{"t8n"} + args = append(args, tc.output.get()...) + args = append(args, tc.input.get(tc.base)...) + var qArgs []string // quoted args for debugging purposes + for _, arg := range args { + if len(arg) == 0 { + qArgs = append(qArgs, `""`) + } else { + qArgs = append(qArgs, arg) + } + } + tt.Logf("args: %v\n", strings.Join(qArgs, " ")) + tt.Run("evm-test", args...) + // Compare the expected output, if provided + if tc.expOut != "" { + want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut)) + if err != nil { + t.Fatalf("test %d: could not read expected output: %v", i, err) + } + have := tt.Output() + ok, err := cmpJson(have, want) + switch { + case err != nil: + t.Fatalf("test %d, json parsing failed: %v", i, err) + case !ok: + t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want)) + } + } + tt.WaitExit() + if have, want := tt.ExitStatus(), tc.expExitCode; have != want { + t.Fatalf("test %d: wrong exit code, have %d, want %d", i, have, want) + } + } +} + +type t9nInput struct { + inTxs string + stFork string +} + +func (args *t9nInput) get(base string) []string { + var out []string + if opt := args.inTxs; opt != "" { + out = append(out, "--input.txs") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if opt := args.stFork; opt != "" { + out = append(out, "--state.fork", opt) + } + return out +} + +func TestT9n(t *testing.T) { + tt := new(testT8n) + tt.TestCmd = cmdtest.NewTestCmd(t, tt) + for i, tc := range []struct { + base string + input t9nInput + expExitCode int + expOut string + }{ + { // London txs on homestead + base: "./testdata/15", + input: t9nInput{ + inTxs: "signed_txs.rlp", + stFork: "Homestead", + }, + expOut: "exp.json", + }, + { // London txs on London + base: "./testdata/15", + input: t9nInput{ + inTxs: "signed_txs.rlp", + stFork: "London", + }, + expOut: "exp2.json", + }, + { // An RLP list (a blockheader really) + base: "./testdata/15", + input: t9nInput{ + inTxs: "blockheader.rlp", + stFork: "London", + }, + expOut: "exp3.json", + }, + { // Transactions with too low gas + base: "./testdata/16", + input: t9nInput{ + inTxs: "signed_txs.rlp", + stFork: "London", + }, + expOut: "exp.json", + }, + { // Transactions with value exceeding 256 bits + base: "./testdata/17", + input: t9nInput{ + inTxs: "signed_txs.rlp", + stFork: "London", + }, + expOut: "exp.json", + }, + { // Invalid RLP + base: "./testdata/18", + input: t9nInput{ + inTxs: "invalid.rlp", + stFork: "London", + }, + expExitCode: t8ntool.ErrorIO, + }, + } { + + args := []string{"t9n"} + args = append(args, tc.input.get(tc.base)...) + + tt.Run("evm-test", args...) + tt.Logf("args:\n go run . %v\n", strings.Join(args, " ")) + // Compare the expected output, if provided + if tc.expOut != "" { + want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut)) + if err != nil { + t.Fatalf("test %d: could not read expected output: %v", i, err) + } + have := tt.Output() + ok, err := cmpJson(have, want) + switch { + case err != nil: + t.Logf(string(have)) + t.Fatalf("test %d, json parsing failed: %v", i, err) + case !ok: + t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want)) + } + } + tt.WaitExit() + if have, want := tt.ExitStatus(), tc.expExitCode; have != want { + t.Fatalf("test %d: wrong exit code, have %d, want %d", i, have, want) + } + } +} + +type b11rInput struct { + inEnv string + inOmmersRlp string + inTxsRlp string + inClique string + ethash bool + ethashMode string + ethashDir string +} + +func (args *b11rInput) get(base string) []string { + var out []string + if opt := args.inEnv; opt != "" { + out = append(out, "--input.header") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if opt := args.inOmmersRlp; opt != "" { + out = append(out, "--input.ommers") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if opt := args.inTxsRlp; opt != "" { + out = append(out, "--input.txs") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if opt := args.inClique; opt != "" { + out = append(out, "--seal.clique") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if args.ethash { + out = append(out, "--seal.ethash") + } + if opt := args.ethashMode; opt != "" { + out = append(out, "--seal.ethash.mode") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if opt := args.ethashDir; opt != "" { + out = append(out, "--seal.ethash.dir") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + out = append(out, "--output.block") + out = append(out, "stdout") + return out +} + +func TestB11r(t *testing.T) { + tt := new(testT8n) + tt.TestCmd = cmdtest.NewTestCmd(t, tt) + for i, tc := range []struct { + base string + input b11rInput + expExitCode int + expOut string + }{ + { // unsealed block + base: "./testdata/20", + input: b11rInput{ + inEnv: "header.json", + inOmmersRlp: "ommers.json", + inTxsRlp: "txs.rlp", + }, + expOut: "exp.json", + }, + { // ethash test seal + base: "./testdata/21", + input: b11rInput{ + inEnv: "header.json", + inOmmersRlp: "ommers.json", + inTxsRlp: "txs.rlp", + }, + expOut: "exp.json", + }, + { // clique test seal + base: "./testdata/21", + input: b11rInput{ + inEnv: "header.json", + inOmmersRlp: "ommers.json", + inTxsRlp: "txs.rlp", + inClique: "clique.json", + }, + expOut: "exp-clique.json", + }, + { // block with ommers + base: "./testdata/22", + input: b11rInput{ + inEnv: "header.json", + inOmmersRlp: "ommers.json", + inTxsRlp: "txs.rlp", + }, + expOut: "exp.json", + }, + } { + + args := []string{"b11r"} + args = append(args, tc.input.get(tc.base)...) + + tt.Run("evm-test", args...) + tt.Logf("args:\n go run . %v\n", strings.Join(args, " ")) + // Compare the expected output, if provided + if tc.expOut != "" { + want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut)) + if err != nil { + t.Fatalf("test %d: could not read expected output: %v", i, err) + } + have := tt.Output() + ok, err := cmpJson(have, want) + switch { + case err != nil: + t.Logf(string(have)) + t.Fatalf("test %d, json parsing failed: %v", i, err) + case !ok: + t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want)) + } + } + tt.WaitExit() + if have, want := tt.ExitStatus(), tc.expExitCode; have != want { + t.Fatalf("test %d: wrong exit code, have %d, want %d", i, have, want) + } + } +} + +// cmpJson compares the JSON in two byte slices. +func cmpJson(a, b []byte) (bool, error) { + var j, j2 interface{} + if err := json.Unmarshal(a, &j); err != nil { + return false, err + } + if err := json.Unmarshal(b, &j2); err != nil { + return false, err + } + return reflect.DeepEqual(j2, j), nil +} diff --git a/cmd/evm/testdata/1/exp.json b/cmd/evm/testdata/1/exp.json new file mode 100644 index 0000000000..7d3805012d --- /dev/null +++ b/cmd/evm/testdata/1/exp.json @@ -0,0 +1,44 @@ +{ + "alloc": { + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { + "balance": "0xfeed1a9d", + "nonce": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "nonce": "0xac" + }, + "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0xa410" + } + }, + "result": { + "stateRoot": "0x84208a19bc2b46ada7445180c1db162be5b39b9abc8c0a54b05d32943eae4e13", + "txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d", + "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x5208", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x5208", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0" + } + ], + "rejected": [ + { + "index": 1, + "error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" + } + ], + "currentDifficulty": "0x20000", + "gasUsed": "0x5208" + } +} diff --git a/cmd/evm/testdata/10/alloc.json b/cmd/evm/testdata/10/alloc.json new file mode 100644 index 0000000000..6e98e7513c --- /dev/null +++ b/cmd/evm/testdata/10/alloc.json @@ -0,0 +1,23 @@ +{ + "0x1111111111111111111111111111111111111111" : { + "balance" : "0x010000000000", + "code" : "0xfe", + "nonce" : "0x01", + "storage" : { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x010000000000", + "code" : "0x", + "nonce" : "0x01", + "storage" : { + } + }, + "0xd02d72e067e77158444ef2020ff2d325f929b363" : { + "balance" : "0x01000000000000", + "code" : "0x", + "nonce" : "0x01", + "storage" : { + } + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/10/env.json b/cmd/evm/testdata/10/env.json new file mode 100644 index 0000000000..3a82d46a77 --- /dev/null +++ b/cmd/evm/testdata/10/env.json @@ -0,0 +1,12 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentNumber" : "0x01", + "currentTimestamp" : "0x079e", + "previousHash" : "0xcb23ee65a163121f640673b41788ee94633941405f95009999b502eedfbbfd4f", + "currentGasLimit" : "0x40000000", + "currentBaseFee" : "0x036b", + "blockHashes" : { + "0" : "0xcb23ee65a163121f640673b41788ee94633941405f95009999b502eedfbbfd4f" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/10/readme.md b/cmd/evm/testdata/10/readme.md new file mode 100644 index 0000000000..c34be80bb7 --- /dev/null +++ b/cmd/evm/testdata/10/readme.md @@ -0,0 +1,79 @@ +## EIP-1559 testing + +This test contains testcases for EIP-1559, which were reported by Ori as misbehaving. + +``` +[user@work evm]$ dir=./testdata/10 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout 2>&1 +INFO [05-09|22:11:59.436] rejected tx index=3 hash=db07bf..ede1e8 from=0xd02d72E067e77158444ef2020Ff2d325f929B363 error="gas limit reached" +``` +Output: +```json +{ + "alloc": { + "0x1111111111111111111111111111111111111111": { + "code": "0xfe", + "balance": "0x10000000000", + "nonce": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x10000000000", + "nonce": "0x1" + }, + "0xd02d72e067e77158444ef2020ff2d325f929b363": { + "balance": "0xff5beffffc95", + "nonce": "0x4" + } + }, + "result": { + "stateRoot": "0xf91a7ec08e4bfea88719aab34deabb000c86902360532b52afa9599d41f2bb8b", + "txRoot": "0xda925f2306a52fa24c15d5cd212d736ee016415fd8dd0c45fd368de7917d64bb", + "receiptRoot": "0x439a25f7fc424c10fb1f89800e4aa1df74156b137239d9ac3eaa7c911c353cd5", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x10000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x88980f6efcc5358d9c359663e7b9414722d430497637340ea056b076bc206701", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000001", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0" + }, + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x20000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0xd7bf3886f4e2aef74d525ae072c680f3846f550254401b67cbfda4a233757582", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000000", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x1" + }, + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x30000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x50308296760f01f1eeec7500e9e73cad67469249b1f59e9a9f55e6625a4923db", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000000", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x2" + } + ], + "rejected": [ + 3 + ] + } +} +``` diff --git a/cmd/evm/testdata/10/txs.json b/cmd/evm/testdata/10/txs.json new file mode 100644 index 0000000000..f7c9baa26d --- /dev/null +++ b/cmd/evm/testdata/10/txs.json @@ -0,0 +1,70 @@ +[ + { + "input" : "0x", + "gas" : "0x10000001", + "nonce" : "0x1", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x7a45f00bcde9036b026cdf1628b023cd8a31a95c62b5e4dbbee2fa7debe668fb", + "s" : "0x3cc9d6f2cd00a045b0263f2d6dad7d60938d5d13d061af4969f95928aa934d4a", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [ + ] + }, + { + "input" : "0x", + "gas" : "0x10000000", + "nonce" : "0x2", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x4c564b94b0281a8210eeec2dd1fe2e16ff1c1903a8c3a1078d735d7f8208b2af", + "s" : "0x56432b2593e6de95db1cb997b7385217aca03f1615327e231734446b39f266d", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [ + ] + }, + { + "input" : "0x", + "gas" : "0x10000000", + "nonce" : "0x3", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x2ed2ef52f924f59d4a21e1f2a50d3b1109303ce5e32334a7ece9b46f4fbc2a57", + "s" : "0x2980257129cbd3da987226f323d50ba3975a834d165e0681f991b75615605c44", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [ + ] + }, + { + "input" : "0x", + "gas" : "0x10000000", + "nonce" : "0x4", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x5df7d7f8f8e15b36fc9f189cacb625040fad10398d08fc90812595922a2c49b2", + "s" : "0x565fc1803f77a84d754ffe3c5363ab54a8d93a06ea1bb9d4c73c73a282b35917", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [ + ] + } +] \ No newline at end of file diff --git a/cmd/evm/testdata/11/alloc.json b/cmd/evm/testdata/11/alloc.json new file mode 100644 index 0000000000..86938230fa --- /dev/null +++ b/cmd/evm/testdata/11/alloc.json @@ -0,0 +1,25 @@ +{ + "0x0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x61ffff5060046000f3", + "nonce" : "0x01", + "storage" : { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + "0x00" : "0x00" + } + }, + "0xb94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x00", + "code" : "0x6001600055", + "nonce" : "0x00", + "storage" : { + } + } +} + diff --git a/cmd/evm/testdata/11/env.json b/cmd/evm/testdata/11/env.json new file mode 100644 index 0000000000..37dedf0947 --- /dev/null +++ b/cmd/evm/testdata/11/env.json @@ -0,0 +1,12 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentNumber" : "0x01", + "currentTimestamp" : "0x03e8", + "previousHash" : "0xfda4419b3660e99f37e536dae1ab081c180136bb38c837a93e93d9aab58553b2", + "currentGasLimit" : "0x0f4240", + "blockHashes" : { + "0" : "0xfda4419b3660e99f37e536dae1ab081c180136bb38c837a93e93d9aab58553b2" + } +} + diff --git a/cmd/evm/testdata/11/readme.md b/cmd/evm/testdata/11/readme.md new file mode 100644 index 0000000000..d499f8e99f --- /dev/null +++ b/cmd/evm/testdata/11/readme.md @@ -0,0 +1,13 @@ +## Test missing basefee + +In this test, the `currentBaseFee` is missing from the env portion. +On a live blockchain, the basefee is present in the header, and verified as part of header validation. + +In `evm t8n`, we don't have blocks, so it needs to be added in the `env`instead. + +When it's missing, an error is expected. + +``` +dir=./testdata/11 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout 2>&1>/dev/null +ERROR(3): EIP-1559 config but missing 'currentBaseFee' in env section +``` \ No newline at end of file diff --git a/cmd/evm/testdata/11/txs.json b/cmd/evm/testdata/11/txs.json new file mode 100644 index 0000000000..c54b0a1f5b --- /dev/null +++ b/cmd/evm/testdata/11/txs.json @@ -0,0 +1,14 @@ +[ + { + "input" : "0x38600060013960015160005560006000f3", + "gas" : "0x61a80", + "gasPrice" : "0x1", + "nonce" : "0x0", + "value" : "0x186a0", + "v" : "0x1c", + "r" : "0x2e1391fd903387f1cc2b51df083805fb4bbb0d4710a2cdf4a044d191ff7be63e", + "s" : "0x7f10a933c42ab74927db02b1db009e923d9d2ab24ac24d63c399f2fe5d9c9b22", + "secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + } +] + diff --git a/cmd/evm/testdata/12/alloc.json b/cmd/evm/testdata/12/alloc.json new file mode 100644 index 0000000000..3ed96894fb --- /dev/null +++ b/cmd/evm/testdata/12/alloc.json @@ -0,0 +1,11 @@ +{ + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "84000000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + "0x00" : "0x00" + } + } +} + diff --git a/cmd/evm/testdata/12/env.json b/cmd/evm/testdata/12/env.json new file mode 100644 index 0000000000..8ae5465369 --- /dev/null +++ b/cmd/evm/testdata/12/env.json @@ -0,0 +1,10 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentNumber" : "0x01", + "currentTimestamp" : "0x03e8", + "previousHash" : "0xfda4419b3660e99f37e536dae1ab081c180136bb38c837a93e93d9aab58553b2", + "currentGasLimit" : "0x0f4240", + "currentBaseFee" : "0x20" +} + diff --git a/cmd/evm/testdata/12/readme.md b/cmd/evm/testdata/12/readme.md new file mode 100644 index 0000000000..b0177ecc24 --- /dev/null +++ b/cmd/evm/testdata/12/readme.md @@ -0,0 +1,40 @@ +## Test 1559 balance + gasCap + +This test contains an EIP-1559 consensus issue which happened on Ropsten, where +`geth` did not properly account for the value transfer while doing the check on `max_fee_per_gas * gas_limit`. + +Before the issue was fixed, this invocation allowed the transaction to pass into a block: +``` +dir=./testdata/12 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout +``` + +With the fix applied, the result is: +``` +dir=./testdata/12 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout +INFO [07-21|19:03:50.276] rejected tx index=0 hash=ccc996..d83435 from=0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B error="insufficient funds for gas * price + value: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B have 84000000 want 84000032" +INFO [07-21|19:03:50.276] Trie dumping started root=e05f81..6597a5 +INFO [07-21|19:03:50.276] Trie dumping complete accounts=1 elapsed="39.549µs" +{ + "alloc": { + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x501bd00" + } + }, + "result": { + "stateRoot": "0xe05f81f8244a76503ceec6f88abfcd03047a612a1001217f37d30984536597a5", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [], + "rejected": [ + { + "index": 0, + "error": "insufficient funds for gas * price + value: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B have 84000000 want 84000032" + } + ] + } +} +``` + +The transaction is rejected. \ No newline at end of file diff --git a/cmd/evm/testdata/12/txs.json b/cmd/evm/testdata/12/txs.json new file mode 100644 index 0000000000..cd683f271c --- /dev/null +++ b/cmd/evm/testdata/12/txs.json @@ -0,0 +1,20 @@ +[ + { + "input" : "0x", + "gas" : "0x5208", + "nonce" : "0x0", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x20", + "v" : "0x0", + "r" : "0x0", + "s" : "0x0", + "secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x20", + "accessList" : [ + ] + } +] + diff --git a/cmd/evm/testdata/13/alloc.json b/cmd/evm/testdata/13/alloc.json new file mode 100644 index 0000000000..6e98e7513c --- /dev/null +++ b/cmd/evm/testdata/13/alloc.json @@ -0,0 +1,23 @@ +{ + "0x1111111111111111111111111111111111111111" : { + "balance" : "0x010000000000", + "code" : "0xfe", + "nonce" : "0x01", + "storage" : { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x010000000000", + "code" : "0x", + "nonce" : "0x01", + "storage" : { + } + }, + "0xd02d72e067e77158444ef2020ff2d325f929b363" : { + "balance" : "0x01000000000000", + "code" : "0x", + "nonce" : "0x01", + "storage" : { + } + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/13/env.json b/cmd/evm/testdata/13/env.json new file mode 100644 index 0000000000..3a82d46a77 --- /dev/null +++ b/cmd/evm/testdata/13/env.json @@ -0,0 +1,12 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentNumber" : "0x01", + "currentTimestamp" : "0x079e", + "previousHash" : "0xcb23ee65a163121f640673b41788ee94633941405f95009999b502eedfbbfd4f", + "currentGasLimit" : "0x40000000", + "currentBaseFee" : "0x036b", + "blockHashes" : { + "0" : "0xcb23ee65a163121f640673b41788ee94633941405f95009999b502eedfbbfd4f" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/13/exp.json b/cmd/evm/testdata/13/exp.json new file mode 100644 index 0000000000..2b049dfb29 --- /dev/null +++ b/cmd/evm/testdata/13/exp.json @@ -0,0 +1,3 @@ +{ + "body": "0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9" +} diff --git a/cmd/evm/testdata/13/exp2.json b/cmd/evm/testdata/13/exp2.json new file mode 100644 index 0000000000..ba8c9f865b --- /dev/null +++ b/cmd/evm/testdata/13/exp2.json @@ -0,0 +1,39 @@ +{ + "result": { + "stateRoot": "0xe4b924a6adb5959fccf769d5b7bb2f6359e26d1e76a2443c5a91a36d826aef61", + "txRoot": "0x013509c8563d41c0ae4bf38f2d6d19fc6512a1d0d6be045079c8c9f68bf45f9d", + "receiptsRoot": "0xa532a08aa9f62431d6fe5d924951b8efb86ed3c54d06fee77788c3767dd13420", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x84d0", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x84d0", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0" + }, + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x109a0", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x84d0", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x1" + } + ], + "currentDifficulty": "0x20000", + "gasUsed": "0x109a0" + } +} diff --git a/cmd/evm/testdata/13/readme.md b/cmd/evm/testdata/13/readme.md new file mode 100644 index 0000000000..64f52fc9a9 --- /dev/null +++ b/cmd/evm/testdata/13/readme.md @@ -0,0 +1,4 @@ +## Input transactions in RLP form + +This testdata folder is used to examplify how transaction input can be provided in rlp form. +Please see the README in `evm` folder for how this is performed. \ No newline at end of file diff --git a/cmd/evm/testdata/13/signed_txs.rlp b/cmd/evm/testdata/13/signed_txs.rlp new file mode 100644 index 0000000000..9d1157ea45 --- /dev/null +++ b/cmd/evm/testdata/13/signed_txs.rlp @@ -0,0 +1 @@ +"0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9" \ No newline at end of file diff --git a/cmd/evm/testdata/13/txs.json b/cmd/evm/testdata/13/txs.json new file mode 100644 index 0000000000..c45ef1e13d --- /dev/null +++ b/cmd/evm/testdata/13/txs.json @@ -0,0 +1,34 @@ +[ + { + "input" : "0x", + "gas" : "0x84d0", + "nonce" : "0x1", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x0", + "s" : "0x0", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [] + }, + { + "input" : "0x", + "gas" : "0x84d0", + "nonce" : "0x2", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x0", + "s" : "0x0", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [] + } +] \ No newline at end of file diff --git a/cmd/evm/testdata/14/alloc.json b/cmd/evm/testdata/14/alloc.json new file mode 100644 index 0000000000..cef1a25ff0 --- /dev/null +++ b/cmd/evm/testdata/14/alloc.json @@ -0,0 +1,12 @@ +{ + "a94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "code": "0x", + "nonce": "0xac", + "storage": {} + }, + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192":{ + "balance": "0xfeedbead", + "nonce" : "0x00" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/14/env.json b/cmd/evm/testdata/14/env.json new file mode 100644 index 0000000000..0bf1c5cf48 --- /dev/null +++ b/cmd/evm/testdata/14/env.json @@ -0,0 +1,9 @@ +{ + "currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "currentGasLimit": "0x750a163df65e8a", + "currentBaseFee": "0x500", + "currentNumber": "12800000", + "currentTimestamp": "100015", + "parentTimestamp" : "99999", + "parentDifficulty" : "0x2000000000000" +} diff --git a/cmd/evm/testdata/14/env.uncles.json b/cmd/evm/testdata/14/env.uncles.json new file mode 100644 index 0000000000..83811b95ec --- /dev/null +++ b/cmd/evm/testdata/14/env.uncles.json @@ -0,0 +1,10 @@ +{ + "currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "currentGasLimit": "0x750a163df65e8a", + "currentBaseFee": "0x500", + "currentNumber": "12800000", + "currentTimestamp": "100035", + "parentTimestamp" : "99999", + "parentDifficulty" : "0x2000000000000", + "parentUncleHash" : "0x000000000000000000000000000000000000000000000000000000000000beef" +} diff --git a/cmd/evm/testdata/14/exp.json b/cmd/evm/testdata/14/exp.json new file mode 100644 index 0000000000..9bf5635f5b --- /dev/null +++ b/cmd/evm/testdata/14/exp.json @@ -0,0 +1,12 @@ +{ + "result": { + "stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "currentDifficulty": "0x2000020000000", + "receipts": [], + "gasUsed": "0x0" + } +} diff --git a/cmd/evm/testdata/14/exp2.json b/cmd/evm/testdata/14/exp2.json new file mode 100644 index 0000000000..9c9025381f --- /dev/null +++ b/cmd/evm/testdata/14/exp2.json @@ -0,0 +1,12 @@ +{ + "result": { + "stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [], + "currentDifficulty": "0x1ff8020000000", + "gasUsed": "0x0" + } +} diff --git a/cmd/evm/testdata/14/exp_berlin.json b/cmd/evm/testdata/14/exp_berlin.json new file mode 100644 index 0000000000..c2bf953119 --- /dev/null +++ b/cmd/evm/testdata/14/exp_berlin.json @@ -0,0 +1,12 @@ +{ + "result": { + "stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [], + "currentDifficulty": "0x1ff9000000000", + "gasUsed": "0x0" + } +} diff --git a/cmd/evm/testdata/14/readme.md b/cmd/evm/testdata/14/readme.md new file mode 100644 index 0000000000..9d0dc9569c --- /dev/null +++ b/cmd/evm/testdata/14/readme.md @@ -0,0 +1,41 @@ +## Difficulty calculation + +This test shows how the `evm t8n` can be used to calculate the (ethash) difficulty, if none is provided by the caller. + +Calculating it (with an empty set of txs) using `London` rules (and no provided unclehash for the parent block): +``` +[user@work evm]$ ./evm t8n --input.alloc=./testdata/14/alloc.json --input.txs=./testdata/14/txs.json --input.env=./testdata/14/env.json --output.result=stdout --state.fork=London +INFO [08-30|20:43:09.352] Trie dumping started root=6f0588..7f4bdc +INFO [08-30|20:43:09.352] Trie dumping complete accounts=2 elapsed="82.533µs" +INFO [08-30|20:43:09.352] Wrote file file=alloc.json +{ + "result": { + "stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [], + "currentDifficulty": "0x2000020000000" + } +} +``` +Same thing, but this time providing a non-empty (and non-`emptyKeccak`) unclehash, which leads to a slightly different result: +``` +[user@work evm]$ ./evm t8n --input.alloc=./testdata/14/alloc.json --input.txs=./testdata/14/txs.json --input.env=./testdata/14/env.uncles.json --output.result=stdout --state.fork=London +INFO [08-30|20:44:33.102] Trie dumping started root=6f0588..7f4bdc +INFO [08-30|20:44:33.102] Trie dumping complete accounts=2 elapsed="72.91µs" +INFO [08-30|20:44:33.102] Wrote file file=alloc.json +{ + "result": { + "stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [], + "currentDifficulty": "0x1ff8020000000" + } +} +``` + diff --git a/cmd/evm/testdata/14/txs.json b/cmd/evm/testdata/14/txs.json new file mode 100644 index 0000000000..fe51488c70 --- /dev/null +++ b/cmd/evm/testdata/14/txs.json @@ -0,0 +1 @@ +[] diff --git a/cmd/evm/testdata/15/blockheader.rlp b/cmd/evm/testdata/15/blockheader.rlp new file mode 100644 index 0000000000..1124e8e2da --- /dev/null +++ b/cmd/evm/testdata/15/blockheader.rlp @@ -0,0 +1 @@ +"0xf901f0a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007b0101020383010203a00000000000000000000000000000000000000000000000000000000000000000880000000000000000" \ No newline at end of file diff --git a/cmd/evm/testdata/15/exp.json b/cmd/evm/testdata/15/exp.json new file mode 100644 index 0000000000..1893fdfc08 --- /dev/null +++ b/cmd/evm/testdata/15/exp.json @@ -0,0 +1,10 @@ +[ + { + "error": "transaction type not supported", + "hash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476" + }, + { + "error": "transaction type not supported", + "hash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a" + } +] \ No newline at end of file diff --git a/cmd/evm/testdata/15/exp2.json b/cmd/evm/testdata/15/exp2.json new file mode 100644 index 0000000000..dd5e8a358c --- /dev/null +++ b/cmd/evm/testdata/15/exp2.json @@ -0,0 +1,12 @@ +[ + { + "address": "0xd02d72e067e77158444ef2020ff2d325f929b363", + "hash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476", + "intrinsicGas": "0x5208" + }, + { + "address": "0xd02d72e067e77158444ef2020ff2d325f929b363", + "hash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a", + "intrinsicGas": "0x5208" + } +] diff --git a/cmd/evm/testdata/15/exp3.json b/cmd/evm/testdata/15/exp3.json new file mode 100644 index 0000000000..6c46d267cf --- /dev/null +++ b/cmd/evm/testdata/15/exp3.json @@ -0,0 +1,47 @@ +[ + { + "error": "transaction type not supported" + }, + { + "error": "transaction type not supported" + }, + { + "error": "transaction type not supported" + }, + { + "error": "transaction type not supported" + }, + { + "error": "transaction type not supported" + }, + { + "error": "transaction type not supported" + }, + { + "error": "transaction type not supported" + }, + { + "error": "rlp: expected List" + }, + { + "error": "rlp: expected List" + }, + { + "error": "rlp: expected List" + }, + { + "error": "rlp: expected List" + }, + { + "error": "rlp: expected List" + }, + { + "error": "rlp: expected input list for types.AccessListTx" + }, + { + "error": "transaction type not supported" + }, + { + "error": "transaction type not supported" + } +] diff --git a/cmd/evm/testdata/15/signed_txs.rlp b/cmd/evm/testdata/15/signed_txs.rlp new file mode 100644 index 0000000000..9d1157ea45 --- /dev/null +++ b/cmd/evm/testdata/15/signed_txs.rlp @@ -0,0 +1 @@ +"0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9" \ No newline at end of file diff --git a/cmd/evm/testdata/15/signed_txs.rlp.json b/cmd/evm/testdata/15/signed_txs.rlp.json new file mode 100644 index 0000000000..187f40f24a --- /dev/null +++ b/cmd/evm/testdata/15/signed_txs.rlp.json @@ -0,0 +1,4 @@ +{ + "txsRlp" : "0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9" +} + diff --git a/cmd/evm/testdata/16/exp.json b/cmd/evm/testdata/16/exp.json new file mode 100644 index 0000000000..137ade6513 --- /dev/null +++ b/cmd/evm/testdata/16/exp.json @@ -0,0 +1,13 @@ +[ + { + "address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "hash": "0x7cc3d1a8540a44736750f03bb4d85c0113be4b3472a71bf82241a3b261b479e6", + "intrinsicGas": "0x5208" + }, + { + "error": "intrinsic gas too low: have 82, want 21000", + "address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "hash": "0x3b2d2609e4361562edb9169314f4c05afc6dbf5d706bf9dda5abe242ab76a22b", + "intrinsicGas": "0x5208" + } +] \ No newline at end of file diff --git a/cmd/evm/testdata/16/signed_txs.rlp b/cmd/evm/testdata/16/signed_txs.rlp new file mode 100644 index 0000000000..952ced2130 --- /dev/null +++ b/cmd/evm/testdata/16/signed_txs.rlp @@ -0,0 +1 @@ +"0xf8cab86401f8610180018252089411111111111111111111111111111111111111112080c001a0937f65ef1deece46c473b99962678fb7c38425cf303d1e8fa9717eb4b9d012b5a01940c5a5647c4940217ffde1051a5fd92ec8551e275c1787f81f50a2ad84de43b86201f85f018001529411111111111111111111111111111111111111112080c001a0241c3aec732205542a87fef8c76346741e85480bce5a42d05a9a73dac892f84ca04f52e2dfce57f3a02ed10e085e1a154edf38a726da34127c85fc53b4921759c8" \ No newline at end of file diff --git a/cmd/evm/testdata/16/unsigned_txs.json b/cmd/evm/testdata/16/unsigned_txs.json new file mode 100644 index 0000000000..f619589406 --- /dev/null +++ b/cmd/evm/testdata/16/unsigned_txs.json @@ -0,0 +1,34 @@ +[ + { + "input" : "0x", + "gas" : "0x5208", + "nonce" : "0x0", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x20", + "v" : "0x0", + "r" : "0x0", + "s" : "0x0", + "secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8", + "chainId" : "0x1", + "type" : "0x1", + "gasPrice": "0x1", + "accessList" : [ + ] + }, + { + "input" : "0x", + "gas" : "0x52", + "nonce" : "0x0", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x20", + "v" : "0x0", + "r" : "0x0", + "s" : "0x0", + "secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8", + "chainId" : "0x1", + "type" : "0x1", + "gasPrice": "0x1", + "accessList" : [ + ] + } +] diff --git a/cmd/evm/testdata/17/exp.json b/cmd/evm/testdata/17/exp.json new file mode 100644 index 0000000000..485906041b --- /dev/null +++ b/cmd/evm/testdata/17/exp.json @@ -0,0 +1,22 @@ + [ + { + "error": "value exceeds 256 bits", + "address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "hash": "0xfbd91685dcbf8172f0e8c53e2ddbb4d26707840da6b51a74371f62a33868fd82", + "intrinsicGas": "0x5208" + }, + { + "error": "gasPrice exceeds 256 bits", + "address": "0x1b57ccef1fe5fb73f1e64530fb4ebd9cf1655964", + "hash": "0x45dc05035cada83748e4c1fe617220106b331eca054f44c2304d5654a9fb29d5", + "intrinsicGas": "0x5208" + }, + { + "error": "invalid transaction v, r, s values", + "hash": "0xf06691c2a803ab7f3c81d06a0c0a896f80f311105c599fc59a9fdbc669356d35" + }, + { + "error": "invalid transaction v, r, s values", + "hash": "0x84703b697ad5b0db25e4f1f98fb6b1adce85b9edb2232eeba9cedd8c6601694b" + } +] \ No newline at end of file diff --git a/cmd/evm/testdata/17/rlpdata.txt b/cmd/evm/testdata/17/rlpdata.txt new file mode 100644 index 0000000000..874461fd76 --- /dev/null +++ b/cmd/evm/testdata/17/rlpdata.txt @@ -0,0 +1,46 @@ +[ + [ + "", + "d", + 5208, + d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0, + 010000000000000000000000000000000000000000000000000000000000000001, + "", + 1b, + c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549d, + 6180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28, + ], + [ + "", + 010000000000000000000000000000000000000000000000000000000000000001, + 5208, + d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0, + 11, + "", + 1b, + c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549d, + 6180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28, + ], + [ + "", + 11, + 5208, + d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0, + 11, + "", + 1b, + c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549daa, + 6180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28, + ], + [ + "", + 11, + 5208, + d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0, + 11, + "", + 1b, + c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549d, + 6180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28bb, + ], +] diff --git a/cmd/evm/testdata/17/signed_txs.rlp b/cmd/evm/testdata/17/signed_txs.rlp new file mode 100644 index 0000000000..0e351fb03c --- /dev/null +++ b/cmd/evm/testdata/17/signed_txs.rlp @@ -0,0 +1 @@ +"0xf901c8f880806482520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0a1010000000000000000000000000000000000000000000000000000000000000001801ba0c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549da06180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28f88080a101000000000000000000000000000000000000000000000000000000000000000182520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d011801ba0c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549da06180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28f860801182520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d011801ba1c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549daaa06180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28f860801182520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d011801ba0c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549da16180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28bb" \ No newline at end of file diff --git a/cmd/evm/testdata/18/README.md b/cmd/evm/testdata/18/README.md new file mode 100644 index 0000000000..360a9bba01 --- /dev/null +++ b/cmd/evm/testdata/18/README.md @@ -0,0 +1,9 @@ +# Invalid rlp + +This folder contains a sample of invalid RLP, and it's expected +that the t9n handles this properly: + +``` +$ go run . t9n --input.txs=./testdata/18/invalid.rlp --state.fork=London +ERROR(11): rlp: value size exceeds available input length +``` \ No newline at end of file diff --git a/cmd/evm/testdata/18/invalid.rlp b/cmd/evm/testdata/18/invalid.rlp new file mode 100644 index 0000000000..7ff2824caf --- /dev/null +++ b/cmd/evm/testdata/18/invalid.rlp @@ -0,0 +1 @@ +"0xf852328001825208870b9331677e6ebf0a801ca098ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4aa03887321be575c8095f789dd4c743dfe42c1820f9231f98a962b210e3ac2452a3" \ No newline at end of file diff --git a/cmd/evm/testdata/19/alloc.json b/cmd/evm/testdata/19/alloc.json new file mode 100644 index 0000000000..cef1a25ff0 --- /dev/null +++ b/cmd/evm/testdata/19/alloc.json @@ -0,0 +1,12 @@ +{ + "a94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "code": "0x", + "nonce": "0xac", + "storage": {} + }, + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192":{ + "balance": "0xfeedbead", + "nonce" : "0x00" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/19/env.json b/cmd/evm/testdata/19/env.json new file mode 100644 index 0000000000..0c64392aff --- /dev/null +++ b/cmd/evm/testdata/19/env.json @@ -0,0 +1,9 @@ +{ + "currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "currentGasLimit": "0x750a163df65e8a", + "currentBaseFee": "0x500", + "currentNumber": "13000000", + "currentTimestamp": "100015", + "parentTimestamp" : "99999", + "parentDifficulty" : "0x2000000000000" +} diff --git a/cmd/evm/testdata/19/exp_arrowglacier.json b/cmd/evm/testdata/19/exp_arrowglacier.json new file mode 100644 index 0000000000..9cf56ffafc --- /dev/null +++ b/cmd/evm/testdata/19/exp_arrowglacier.json @@ -0,0 +1,12 @@ +{ + "result": { + "stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "currentDifficulty": "0x2000000200000", + "receipts": [], + "gasUsed": "0x0" + } +} diff --git a/cmd/evm/testdata/19/exp_london.json b/cmd/evm/testdata/19/exp_london.json new file mode 100644 index 0000000000..a06bc8ca69 --- /dev/null +++ b/cmd/evm/testdata/19/exp_london.json @@ -0,0 +1,12 @@ +{ + "result": { + "stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "currentDifficulty": "0x2000080000000", + "receipts": [], + "gasUsed": "0x0" + } +} diff --git a/cmd/evm/testdata/19/readme.md b/cmd/evm/testdata/19/readme.md new file mode 100644 index 0000000000..5fae183f48 --- /dev/null +++ b/cmd/evm/testdata/19/readme.md @@ -0,0 +1,9 @@ +## Difficulty calculation + +This test shows how the `evm t8n` can be used to calculate the (ethash) difficulty, if none is provided by the caller, +this time on `ArrowGlacier` (Eip 4345). + +Calculating it (with an empty set of txs) using `ArrowGlacier` rules (and no provided unclehash for the parent block): +``` +[user@work evm]$ ./evm t8n --input.alloc=./testdata/14/alloc.json --input.txs=./testdata/14/txs.json --input.env=./testdata/14/env.json --output.result=stdout --state.fork=ArrowGlacier +``` \ No newline at end of file diff --git a/cmd/evm/testdata/19/txs.json b/cmd/evm/testdata/19/txs.json new file mode 100644 index 0000000000..fe51488c70 --- /dev/null +++ b/cmd/evm/testdata/19/txs.json @@ -0,0 +1 @@ +[] diff --git a/cmd/evm/testdata/20/exp.json b/cmd/evm/testdata/20/exp.json new file mode 100644 index 0000000000..7bec6cefd6 --- /dev/null +++ b/cmd/evm/testdata/20/exp.json @@ -0,0 +1,4 @@ +{ + "rlp": "0xf902d9f90211a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794e997a23b159e2e2a5ce72333262972374b15425ca0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e99476574682f76312e302e312f6c696e75782f676f312e342e32a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf8897435673d874f7c8f8c2f85f8002825208948a8eafb1cf62bfbeb1741769dae1a9dd4799619201801ba09500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdba07235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600f85f8002825208948a8eafb1cf62bfbeb1741769dae1a9dd4799619201801ba09500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdba07235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600c0", + "hash": "0xaba9a3b6a4e96e9ecffcadaa5a2ae0589359455617535cd86589fe1dd26fe899" +} diff --git a/cmd/evm/testdata/20/header.json b/cmd/evm/testdata/20/header.json new file mode 100644 index 0000000000..fb9b7fc563 --- /dev/null +++ b/cmd/evm/testdata/20/header.json @@ -0,0 +1,14 @@ +{ + "parentHash": "0xd6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34e", + "miner": "0xe997a23b159e2e2a5ce72333262972374b15425c", + "stateRoot": "0x325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2e", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "difficulty": "0x1000", + "number": "0xc3be", + "gasLimit": "0x50785", + "gasUsed": "0x0", + "timestamp": "0x55c5277e", + "extraData": "0x476574682f76312e302e312f6c696e75782f676f312e342e32", + "mixHash": "0x5865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf", + "nonce": "0x97435673d874f7c8" +} diff --git a/cmd/evm/testdata/20/ommers.json b/cmd/evm/testdata/20/ommers.json new file mode 100644 index 0000000000..fe51488c70 --- /dev/null +++ b/cmd/evm/testdata/20/ommers.json @@ -0,0 +1 @@ +[] diff --git a/cmd/evm/testdata/20/readme.md b/cmd/evm/testdata/20/readme.md new file mode 100644 index 0000000000..2c448a96e6 --- /dev/null +++ b/cmd/evm/testdata/20/readme.md @@ -0,0 +1,11 @@ +# Block building + +This test shows how `b11r` can be used to assemble an unsealed block. + +```console +$ go run . b11r --input.header=testdata/20/header.json --input.txs=testdata/20/txs.rlp --input.ommers=testdata/20/ommers.json --output.block=stdout +{ + "rlp": "0xf90216f90211a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794e997a23b159e2e2a5ce72333262972374b15425ca0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e99476574682f76312e302e312f6c696e75782f676f312e342e32a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf8897435673d874f7c8c0c0", + "hash": "0xaba9a3b6a4e96e9ecffcadaa5a2ae0589359455617535cd86589fe1dd26fe899" +} +``` diff --git a/cmd/evm/testdata/20/txs.rlp b/cmd/evm/testdata/20/txs.rlp new file mode 100644 index 0000000000..3599ff0654 --- /dev/null +++ b/cmd/evm/testdata/20/txs.rlp @@ -0,0 +1 @@ +"0xf8c2f85f8002825208948a8eafb1cf62bfbeb1741769dae1a9dd4799619201801ba09500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdba07235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600f85f8002825208948a8eafb1cf62bfbeb1741769dae1a9dd4799619201801ba09500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdba07235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600" \ No newline at end of file diff --git a/cmd/evm/testdata/21/clique.json b/cmd/evm/testdata/21/clique.json new file mode 100644 index 0000000000..84fa259a0d --- /dev/null +++ b/cmd/evm/testdata/21/clique.json @@ -0,0 +1,6 @@ +{ + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8", + "voted": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "authorize": false, + "vanity": "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +} diff --git a/cmd/evm/testdata/21/exp-clique.json b/cmd/evm/testdata/21/exp-clique.json new file mode 100644 index 0000000000..c990ba8aa6 --- /dev/null +++ b/cmd/evm/testdata/21/exp-clique.json @@ -0,0 +1,4 @@ +{ + "rlp": "0xf9025ff9025aa0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277eb861aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac540a67aaee364005841da84f488f6b6d0116dfb5103d091402c81a163d5f66666595e37f56f196d8c5c98da714dbfae68d6b7e1790cc734a20ec6ce52213ad800a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf88ffffffffffffffffc0c0", + "hash": "0x71c59102cc805dbe8741e1210ebe229a321eff144ac7276006fefe39e8357dc7" +} diff --git a/cmd/evm/testdata/21/exp.json b/cmd/evm/testdata/21/exp.json new file mode 100644 index 0000000000..b3e5e7a831 --- /dev/null +++ b/cmd/evm/testdata/21/exp.json @@ -0,0 +1,4 @@ +{ + "rlp": "0xf901fdf901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0c0", + "hash": "0x801411e9f6609a659825690d13e4f75a3cfe9143952fa2d9573f3b0a5eb9ebbb" +} diff --git a/cmd/evm/testdata/21/header.json b/cmd/evm/testdata/21/header.json new file mode 100644 index 0000000000..62abe3cc2c --- /dev/null +++ b/cmd/evm/testdata/21/header.json @@ -0,0 +1,11 @@ +{ + "parentHash": "0xd6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34e", + "stateRoot": "0x325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2e", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "difficulty": "0x1000", + "number": "0xc3be", + "gasLimit": "0x50785", + "gasUsed": "0x0", + "timestamp": "0x55c5277e", + "mixHash": "0x5865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf" +} diff --git a/cmd/evm/testdata/21/ommers.json b/cmd/evm/testdata/21/ommers.json new file mode 100644 index 0000000000..fe51488c70 --- /dev/null +++ b/cmd/evm/testdata/21/ommers.json @@ -0,0 +1 @@ +[] diff --git a/cmd/evm/testdata/21/readme.md b/cmd/evm/testdata/21/readme.md new file mode 100644 index 0000000000..b70f106ffc --- /dev/null +++ b/cmd/evm/testdata/21/readme.md @@ -0,0 +1,23 @@ +# Sealed block building + +This test shows how `b11r` can be used to assemble a sealed block. + +## Ethash + +```console +$ go run . b11r --input.header=testdata/21/header.json --input.txs=testdata/21/txs.rlp --input.ommers=testdata/21/ommers.json --seal.ethash --seal.ethash.mode=test --output.block=stdout +{ + "rlp": "0xf901fdf901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0c0", + "hash": "0x801411e9f6609a659825690d13e4f75a3cfe9143952fa2d9573f3b0a5eb9ebbb" +} +``` + +## Clique + +```console +$ go run . b11r --input.header=testdata/21/header.json --input.txs=testdata/21/txs.rlp --input.ommers=testdata/21/ommers.json --seal.clique=testdata/21/clique.json --output.block=stdout +{ + "rlp": "0xf9025ff9025aa0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277eb861aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac540a67aaee364005841da84f488f6b6d0116dfb5103d091402c81a163d5f66666595e37f56f196d8c5c98da714dbfae68d6b7e1790cc734a20ec6ce52213ad800a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf88ffffffffffffffffc0c0", + "hash": "0x71c59102cc805dbe8741e1210ebe229a321eff144ac7276006fefe39e8357dc7" +} +``` diff --git a/cmd/evm/testdata/21/txs.rlp b/cmd/evm/testdata/21/txs.rlp new file mode 100644 index 0000000000..e815397b33 --- /dev/null +++ b/cmd/evm/testdata/21/txs.rlp @@ -0,0 +1 @@ +"c0" diff --git a/cmd/evm/testdata/22/exp-clique.json b/cmd/evm/testdata/22/exp-clique.json new file mode 100644 index 0000000000..c990ba8aa6 --- /dev/null +++ b/cmd/evm/testdata/22/exp-clique.json @@ -0,0 +1,4 @@ +{ + "rlp": "0xf9025ff9025aa0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277eb861aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac540a67aaee364005841da84f488f6b6d0116dfb5103d091402c81a163d5f66666595e37f56f196d8c5c98da714dbfae68d6b7e1790cc734a20ec6ce52213ad800a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf88ffffffffffffffffc0c0", + "hash": "0x71c59102cc805dbe8741e1210ebe229a321eff144ac7276006fefe39e8357dc7" +} diff --git a/cmd/evm/testdata/22/exp.json b/cmd/evm/testdata/22/exp.json new file mode 100644 index 0000000000..14fd81997d --- /dev/null +++ b/cmd/evm/testdata/22/exp.json @@ -0,0 +1,4 @@ +{ + "rlp": "0xf905f5f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea06eb9f0c3cd68c9e97134e6725d12b1f1d8f0644458da6870a37ff84c908fb1e7940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0f903f6f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000", + "hash": "0xd9a81c8fcd57a7f2a0d2c375eff6ad192c30c3729a271303f0a9a7e1b357e755" +} diff --git a/cmd/evm/testdata/22/header.json b/cmd/evm/testdata/22/header.json new file mode 100644 index 0000000000..62abe3cc2c --- /dev/null +++ b/cmd/evm/testdata/22/header.json @@ -0,0 +1,11 @@ +{ + "parentHash": "0xd6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34e", + "stateRoot": "0x325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2e", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "difficulty": "0x1000", + "number": "0xc3be", + "gasLimit": "0x50785", + "gasUsed": "0x0", + "timestamp": "0x55c5277e", + "mixHash": "0x5865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf" +} diff --git a/cmd/evm/testdata/22/ommers.json b/cmd/evm/testdata/22/ommers.json new file mode 100644 index 0000000000..997015b3ce --- /dev/null +++ b/cmd/evm/testdata/22/ommers.json @@ -0,0 +1 @@ +["0xf901fdf901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0c0","0xf901fdf901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0c0"] diff --git a/cmd/evm/testdata/22/readme.md b/cmd/evm/testdata/22/readme.md new file mode 100644 index 0000000000..2cac8a2434 --- /dev/null +++ b/cmd/evm/testdata/22/readme.md @@ -0,0 +1,11 @@ +# Building blocks with ommers + +This test shows how `b11r` can chain together ommer assembles into a canonical block. + +```console +$ echo "{ \"ommers\": [`go run . b11r --input.header=testdata/22/header.json --input.txs=testdata/22/txs.rlp --output.block=stdout | jq '.[\"rlp\"]'`,`go run . b11r --input.header=testdata/22/header.json --input.txs=testdata/22/txs.rlp --output.block=stdout | jq '.[\"rlp\"]'`]}" | go run . b11r --input.header=testdata/22/header.json --input.txs=testdata/22/txs.rlp --input.ommers=stdin --output.block=stdout +{ + "rlp": "0xf905f5f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea06eb9f0c3cd68c9e97134e6725d12b1f1d8f0644458da6870a37ff84c908fb1e7940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0f903f6f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000", + "hash": "0xd9a81c8fcd57a7f2a0d2c375eff6ad192c30c3729a271303f0a9a7e1b357e755" +} +``` diff --git a/cmd/evm/testdata/22/txs.rlp b/cmd/evm/testdata/22/txs.rlp new file mode 100644 index 0000000000..e815397b33 --- /dev/null +++ b/cmd/evm/testdata/22/txs.rlp @@ -0,0 +1 @@ +"c0" diff --git a/cmd/evm/testdata/23/alloc.json b/cmd/evm/testdata/23/alloc.json new file mode 100644 index 0000000000..239b3553f9 --- /dev/null +++ b/cmd/evm/testdata/23/alloc.json @@ -0,0 +1,16 @@ +{ + "0x095e7baea6a6c7c4c2dfeb977efac326af552d87" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x6001", + "nonce" : "0x00", + "storage" : { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + } +} diff --git a/cmd/evm/testdata/23/env.json b/cmd/evm/testdata/23/env.json new file mode 100644 index 0000000000..1b46321512 --- /dev/null +++ b/cmd/evm/testdata/23/env.json @@ -0,0 +1,7 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentGasLimit" : "0x3b9aca00", + "currentNumber" : "0x05", + "currentTimestamp" : "0x03e8" +} diff --git a/cmd/evm/testdata/23/exp.json b/cmd/evm/testdata/23/exp.json new file mode 100644 index 0000000000..e51f37d9c7 --- /dev/null +++ b/cmd/evm/testdata/23/exp.json @@ -0,0 +1,25 @@ +{ + "result": { + "stateRoot": "0x65334305e4accfa18352deb24f007b837b5036425b0712cf0e65a43bfa95154d", + "txRoot": "0x75e61774a2ff58cbe32653420256c7f44bc715715a423b0b746d5c622979af6b", + "receiptsRoot": "0xf951f9396af203499cc7d379715a9110323de73967c5700e2f424725446a3c76", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x520b", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x520b", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0" + } + ], + "currentDifficulty": "0x20000", + "gasUsed": "0x520b" + } +} diff --git a/cmd/evm/testdata/23/readme.md b/cmd/evm/testdata/23/readme.md new file mode 100644 index 0000000000..85fe8db66c --- /dev/null +++ b/cmd/evm/testdata/23/readme.md @@ -0,0 +1 @@ +These files examplify how to sign a transaction using the pre-EIP155 scheme. diff --git a/cmd/evm/testdata/23/txs.json b/cmd/evm/testdata/23/txs.json new file mode 100644 index 0000000000..22f3840f84 --- /dev/null +++ b/cmd/evm/testdata/23/txs.json @@ -0,0 +1,15 @@ +[ + { + "input" : "0x", + "gas" : "0x5f5e100", + "gasPrice" : "0x1", + "nonce" : "0x0", + "to" : "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", + "value" : "0x186a0", + "v" : "0x0", + "r" : "0x0", + "s" : "0x0", + "secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8", + "protected": false + } +] diff --git a/cmd/evm/testdata/3/exp.json b/cmd/evm/testdata/3/exp.json new file mode 100644 index 0000000000..71b3d2f559 --- /dev/null +++ b/cmd/evm/testdata/3/exp.json @@ -0,0 +1,38 @@ +{ + "alloc": { + "0x095e7baea6a6c7c4c2dfeb977efac326af552d87": { + "code": "0x600140", + "balance": "0xde0b6b3a76586a0" + }, + "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { + "balance": "0x521f" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0xde0b6b3a7622741", + "nonce": "0x1" + } + }, + "result": { + "stateRoot": "0xb7341da3f9f762a6884eaa186c32942734c146b609efee11c4b0214c44857ea1", + "txRoot": "0x75e61774a2ff58cbe32653420256c7f44bc715715a423b0b746d5c622979af6b", + "receiptsRoot": "0xd0d26df80374a327c025d405ebadc752b1bbd089d864801ae78ab704bcad8086", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x521f", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x521f", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0" + } + ], + "currentDifficulty": "0x20000", + "gasUsed": "0x521f" + } +} diff --git a/cmd/evm/testdata/5/exp.json b/cmd/evm/testdata/5/exp.json new file mode 100644 index 0000000000..7d715672c5 --- /dev/null +++ b/cmd/evm/testdata/5/exp.json @@ -0,0 +1,23 @@ +{ + "alloc": { + "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": { + "balance": "0x88" + }, + "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb": { + "balance": "0x70" + }, + "0xcccccccccccccccccccccccccccccccccccccccc": { + "balance": "0x60" + } + }, + "result": { + "stateRoot": "0xa7312add33811645c6aa65d928a1a4f49d65d448801912c069a0aa8fe9c1f393", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [], + "currentDifficulty": "0x20000", + "gasUsed": "0x0" + } +} diff --git a/cmd/evm/testdata/9/alloc.json b/cmd/evm/testdata/9/alloc.json new file mode 100644 index 0000000000..c14e38e845 --- /dev/null +++ b/cmd/evm/testdata/9/alloc.json @@ -0,0 +1,11 @@ +{ + "0x000000000000000000000000000000000000aaaa": { + "balance": "0x03", + "code": "0x58585454", + "nonce": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x100000000000000", + "nonce": "0x00" + } +} diff --git a/cmd/evm/testdata/9/env.json b/cmd/evm/testdata/9/env.json new file mode 100644 index 0000000000..ec5164b995 --- /dev/null +++ b/cmd/evm/testdata/9/env.json @@ -0,0 +1,8 @@ +{ + "currentCoinbase": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty": "0x20000", + "currentGasTarget": "0x1000000000", + "currentBaseFee": "0x3B9ACA00", + "currentNumber": "0x1000000", + "currentTimestamp": "0x04" +} diff --git a/cmd/evm/testdata/9/readme.md b/cmd/evm/testdata/9/readme.md new file mode 100644 index 0000000000..88f0f12aaa --- /dev/null +++ b/cmd/evm/testdata/9/readme.md @@ -0,0 +1,75 @@ +## EIP-1559 testing + +This test contains testcases for EIP-1559, which uses an new transaction type and has a new block parameter. + +### Prestate + +The alloc portion contains one contract (`0x000000000000000000000000000000000000aaaa`), containing the +following code: `0x58585454`: `PC; PC; SLOAD; SLOAD`. + +Essentialy, this contract does `SLOAD(0)` and `SLOAD(1)`. + +The alloc also contains some funds on `0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b`. + +## Transactions + +There are two transactions, each invokes the contract above. + +1. EIP-1559 ACL-transaction, which contains the `0x0` slot for `0xaaaa` +2. Legacy transaction + +## Execution + +Running it yields: +``` +$ dir=./testdata/9 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --trace && cat trace-* | grep SLOAD +{"pc":2,"op":84,"gas":"0x48c28","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0","0x1"],"returnStack":null,"returnD +ata":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":3,"op":84,"gas":"0x483f4","gasCost":"0x64","memory":"0x","memSize":0,"stack":["0x0","0x0"],"returnStack":null,"returnDa +ta":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":2,"op":84,"gas":"0x49cf4","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0","0x1"],"returnStack":null,"returnD +ata":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":3,"op":84,"gas":"0x494c0","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0","0x0"],"returnStack":null,"returnD +ata":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +``` + +We can also get the post-alloc: +``` +$ dir=./testdata/9 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout +{ + "alloc": { + "0x000000000000000000000000000000000000aaaa": { + "code": "0x58585454", + "balance": "0x3", + "nonce": "0x1" + }, + "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { + "balance": "0xbfc02677a000" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0xff104fcfea7800", + "nonce": "0x2" + } + } +} +``` + +If we try to execute it on older rules: +``` +dir=./testdata/9 && ./evm t8n --state.fork=Berlin --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout +ERROR(10): Failed signing transactions: ERROR(10): Tx 0: failed to sign tx: transaction type not supported +``` + +It fails, due to the `evm t8n` cannot sign them in with the given signer. We can bypass that, however, +by feeding it presigned transactions, located in `txs_signed.json`. + +``` +dir=./testdata/9 && ./evm t8n --state.fork=Berlin --input.alloc=$dir/alloc.json --input.txs=$dir/txs_signed.json --input.env=$dir/env.json +INFO [05-07|12:28:42.072] rejected tx index=0 hash=b4821e..536819 error="transaction type not supported" +INFO [05-07|12:28:42.072] rejected tx index=1 hash=a9c6c6..fa4036 from=0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B error="nonce too high: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B, tx: 1 state: 0" +INFO [05-07|12:28:42.073] Wrote file file=alloc.json +INFO [05-07|12:28:42.073] Wrote file file=result.json +``` + +Number `0` is not applicable, and therefore number `1` has wrong nonce, and both are rejected. + diff --git a/cmd/evm/testdata/9/txs.json b/cmd/evm/testdata/9/txs.json new file mode 100644 index 0000000000..740abce079 --- /dev/null +++ b/cmd/evm/testdata/9/txs.json @@ -0,0 +1,37 @@ +[ + { + "gas": "0x4ef00", + "maxPriorityFeePerGas": "0x2", + "maxFeePerGas": "0x12A05F200", + "chainId": "0x1", + "input": "0x", + "nonce": "0x0", + "to": "0x000000000000000000000000000000000000aaaa", + "value": "0x0", + "type" : "0x2", + "accessList": [ + {"address": "0x000000000000000000000000000000000000aaaa", + "storageKeys": [ + "0x0000000000000000000000000000000000000000000000000000000000000000" + ] + } + ], + "v": "0x0", + "r": "0x0", + "s": "0x0", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + }, + { + "gas": "0x4ef00", + "gasPrice": "0x12A05F200", + "chainId": "0x1", + "input": "0x", + "nonce": "0x1", + "to": "0x000000000000000000000000000000000000aaaa", + "value": "0x0", + "v": "0x0", + "r": "0x0", + "s": "0x0", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + } +] diff --git a/cmd/evm/transition-test.sh b/cmd/evm/transition-test.sh index 34c9249855..250238d169 100644 --- a/cmd/evm/transition-test.sh +++ b/cmd/evm/transition-test.sh @@ -11,6 +11,8 @@ function showjson(){ function demo(){ echo "$ticks" echo "$1" + $1 + echo "" echo "$ticks" echo "" } @@ -152,9 +154,7 @@ echo "" echo "The \`BLOCKHASH\` opcode requires blockhashes to be provided by the caller, inside the \`env\`." echo "If a required blockhash is not provided, the exit code should be \`4\`:" echo "Example where blockhashes are provided: " -cmd="./evm t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace" -tick && echo $cmd && tick -$cmd 2>&1 >/dev/null +demo "./evm --verbosity=1 t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace" cmd="cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2" tick && echo $cmd && tick echo "$ticks" @@ -164,13 +164,11 @@ echo "" echo "In this example, the caller has not provided the required blockhash:" cmd="./evm t8n --input.alloc=./testdata/4/alloc.json --input.txs=./testdata/4/txs.json --input.env=./testdata/4/env.json --trace" -tick && echo $cmd && tick -tick -$cmd +tick && echo $cmd && $cmd errc=$? tick echo "Error code: $errc" - +echo "" echo "### Chaining" echo "" @@ -189,3 +187,28 @@ echo "" echo "In order to meaningfully chain invocations, one would need to provide meaningful new \`env\`, otherwise the" echo "actual blocknumber (exposed to the EVM) would not increase." echo "" + +echo "### Transactions in RLP form" +echo "" +echo "It is possible to provide already-signed transactions as input to, using an \`input.txs\` which ends with the \`rlp\` suffix." +echo "The input format for RLP-form transactions is _identical_ to the _output_ format for block bodies. Therefore, it's fully possible" +echo "to use the evm to go from \`json\` input to \`rlp\` input." +echo "" +echo "The following command takes **json** the transactions in \`./testdata/13/txs.json\` and signs them. After execution, they are output to \`signed_txs.rlp\`.:" +demo "./evm t8n --state.fork=London --input.alloc=./testdata/13/alloc.json --input.txs=./testdata/13/txs.json --input.env=./testdata/13/env.json --output.result=alloc_jsontx.json --output.body=signed_txs.rlp" +echo "The \`output.body\` is the rlp-list of transactions, encoded in hex and placed in a string a'la \`json\` encoding rules:" +demo "cat signed_txs.rlp" +echo "We can use \`rlpdump\` to check what the contents are: " +echo "$ticks" +echo "rlpdump -hex \$(cat signed_txs.rlp | jq -r )" +rlpdump -hex $(cat signed_txs.rlp | jq -r ) +echo "$ticks" +echo "Now, we can now use those (or any other already signed transactions), as input, like so: " +demo "./evm t8n --state.fork=London --input.alloc=./testdata/13/alloc.json --input.txs=./signed_txs.rlp --input.env=./testdata/13/env.json --output.result=alloc_rlptx.json" + +echo "You might have noticed that the results from these two invocations were stored in two separate files. " +echo "And we can now finally check that they match." +echo "$ticks" +echo "cat alloc_jsontx.json | jq .stateRoot && cat alloc_rlptx.json | jq .stateRoot" +cat alloc_jsontx.json | jq .stateRoot && cat alloc_rlptx.json | jq .stateRoot +echo "$ticks" diff --git a/cmd/faucet/faucet.go b/cmd/faucet/faucet.go index 500a1e920f..5475399359 100644 --- a/cmd/faucet/faucet.go +++ b/cmd/faucet/faucet.go @@ -802,7 +802,7 @@ func authTwitter(url string, tokenV1, tokenV2 string) (string, string, string, c return "", "", "", common.Address{}, errors.New("No Binance Smart Chain address found to fund") } var avatar string - if parts = regexp.MustCompile("src=\"([^\"]+twimg.com/profile_images[^\"]+)\"").FindStringSubmatch(string(body)); len(parts) == 2 { + if parts = regexp.MustCompile(`src="([^"]+twimg\.com/profile_images[^"]+)"`).FindStringSubmatch(string(body)); len(parts) == 2 { avatar = parts[1] } return username + "@twitter", username, avatar, address, nil @@ -928,7 +928,7 @@ func authFacebook(url string) (string, string, common.Address, error) { return "", "", common.Address{}, errors.New("No Binance Smart Chain address found to fund") } var avatar string - if parts = regexp.MustCompile("src=\"([^\"]+fbcdn.net[^\"]+)\"").FindStringSubmatch(string(body)); len(parts) == 2 { + if parts = regexp.MustCompile(`src="([^"]+fbcdn\.net[^"]+)"`).FindStringSubmatch(string(body)); len(parts) == 2 { avatar = parts[1] } return username + "@facebook", avatar, address, nil diff --git a/cmd/faucet/faucet_test.go b/cmd/faucet/faucet_test.go new file mode 100644 index 0000000000..58a1f22b54 --- /dev/null +++ b/cmd/faucet/faucet_test.go @@ -0,0 +1,45 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" +) + +func TestFacebook(t *testing.T) { + // TODO: Remove facebook auth or implement facebook api, which seems to require an API key + t.Skipf("The facebook access is flaky, needs to be reimplemented or removed") + for _, tt := range []struct { + url string + want common.Address + }{ + { + "https://www.facebook.com/fooz.gazonk/posts/2837228539847129", + common.HexToAddress("0xDeadDeaDDeaDbEefbEeFbEEfBeeFBeefBeeFbEEF"), + }, + } { + _, _, gotAddress, err := authFacebook(tt.url) + if err != nil { + t.Fatal(err) + } + if gotAddress != tt.want { + t.Fatalf("address wrong, have %v want %v", gotAddress, tt.want) + } + } +} diff --git a/cmd/geth/accountcmd.go b/cmd/geth/accountcmd.go index 6d45c88763..e33b9eb0fb 100644 --- a/cmd/geth/accountcmd.go +++ b/cmd/geth/accountcmd.go @@ -268,11 +268,16 @@ func accountCreate(ctx *cli.Context) error { } } utils.SetNodeConfig(ctx, &cfg.Node) - scryptN, scryptP, keydir, err := cfg.Node.AccountConfig() - + keydir, err := cfg.Node.KeyDirConfig() if err != nil { utils.Fatalf("Failed to read configuration: %v", err) } + scryptN := keystore.StandardScryptN + scryptP := keystore.StandardScryptP + if cfg.Node.UseLightweightKDF { + scryptN = keystore.LightScryptN + scryptP = keystore.LightScryptP + } password := utils.GetPassPhraseWithList("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx)) diff --git a/cmd/geth/accountcmd_test.go b/cmd/geth/accountcmd_test.go index 9455eeda36..3a71b85716 100644 --- a/cmd/geth/accountcmd_test.go +++ b/cmd/geth/accountcmd_test.go @@ -120,7 +120,7 @@ func importAccountWithExpect(t *testing.T, key string, expected string) { if err := ioutil.WriteFile(passwordFile, []byte("foobar"), 0600); err != nil { t.Error(err) } - geth := runGeth(t, "account", "import", keyfile, "-password", passwordFile) + geth := runGeth(t, "--lightkdf", "account", "import", keyfile, "-password", passwordFile) defer geth.ExpectExit() geth.Expect(expected) } diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index a30efbbdea..911e802586 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -18,6 +18,7 @@ package main import ( "encoding/json" + "errors" "fmt" "net" "os" @@ -30,10 +31,13 @@ import ( "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/node" @@ -83,9 +87,9 @@ It expects the genesis file as argument.`, Flags: []cli.Flag{ utils.MainnetFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, - utils.YoloV3Flag, }, Category: "BLOCKCHAIN COMMANDS", Description: ` @@ -109,11 +113,15 @@ The dumpgenesis command dumps the genesis block configuration in JSON format to utils.MetricsHTTPFlag, utils.MetricsPortFlag, utils.MetricsEnableInfluxDBFlag, + utils.MetricsEnableInfluxDBV2Flag, utils.MetricsInfluxDBEndpointFlag, utils.MetricsInfluxDBDatabaseFlag, utils.MetricsInfluxDBUsernameFlag, utils.MetricsInfluxDBPasswordFlag, utils.MetricsInfluxDBTagsFlag, + utils.MetricsInfluxDBTokenFlag, + utils.MetricsInfluxDBBucketFlag, + utils.MetricsInfluxDBOrganizationFlag, utils.TxLookupLimitFlag, }, Category: "BLOCKCHAIN COMMANDS", @@ -154,7 +162,9 @@ be gzipped.`, }, Category: "BLOCKCHAIN COMMANDS", Description: ` - The import-preimages command imports hash preimages from an RLP encoded stream.`, +The import-preimages command imports hash preimages from an RLP encoded stream. +It's deprecated, please use "geth db import" instead. +`, } exportPreimagesCommand = cli.Command{ Action: utils.MigrateFlags(exportPreimages), @@ -168,26 +178,29 @@ be gzipped.`, }, Category: "BLOCKCHAIN COMMANDS", Description: ` -The export-preimages command export hash preimages to an RLP encoded stream`, +The export-preimages command exports hash preimages to an RLP encoded stream. +It's deprecated, please use "geth db export" instead. +`, } dumpCommand = cli.Command{ Action: utils.MigrateFlags(dump), Name: "dump", Usage: "Dump a specific block from storage", - ArgsUsage: "[ | ]...", + ArgsUsage: "[? | ]", Flags: []cli.Flag{ utils.DataDirFlag, utils.CacheFlag, - utils.SyncModeFlag, utils.IterativeOutputFlag, utils.ExcludeCodeFlag, utils.ExcludeStorageFlag, utils.IncludeIncompletesFlag, + utils.StartKeyFlag, + utils.DumpLimitFlag, }, Category: "BLOCKCHAIN COMMANDS", Description: ` -The arguments are interpreted as block numbers or hashes. -Use "ethereum dump 0" to dump the genesis block.`, +This command dumps out the state for a given block (or latest, if none provided). +`, } ) @@ -473,7 +486,6 @@ func exportPreimages(ctx *cli.Context) error { if len(ctx.Args()) < 1 { utils.Fatalf("This command requires an argument.") } - stack, _ := makeConfigNode(ctx) defer stack.Close() @@ -487,47 +499,85 @@ func exportPreimages(ctx *cli.Context) error { return nil } -func dump(ctx *cli.Context) error { - stack, _ := makeConfigNode(ctx) - defer stack.Close() - +func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) { db := utils.MakeChainDatabase(ctx, stack, true, false) - for _, arg := range ctx.Args() { - var header *types.Header + var header *types.Header + if ctx.NArg() > 1 { + return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg()) + } + if ctx.NArg() == 1 { + arg := ctx.Args().First() if hashish(arg) { hash := common.HexToHash(arg) - number := rawdb.ReadHeaderNumber(db, hash) - if number != nil { + if number := rawdb.ReadHeaderNumber(db, hash); number != nil { header = rawdb.ReadHeader(db, hash, *number) + } else { + return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash) } } else { - number, _ := strconv.Atoi(arg) - hash := rawdb.ReadCanonicalHash(db, uint64(number)) - if hash != (common.Hash{}) { - header = rawdb.ReadHeader(db, hash, uint64(number)) - } - } - if header == nil { - fmt.Println("{}") - utils.Fatalf("block not found") - } else { - state, err := state.New(header.Root, state.NewDatabase(db), nil) + number, err := strconv.Atoi(arg) if err != nil { - utils.Fatalf("could not create new state: %v", err) + return nil, nil, common.Hash{}, err } - excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name) - excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name) - includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name) - if ctx.Bool(utils.IterativeOutputFlag.Name) { - state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout)) + if hash := rawdb.ReadCanonicalHash(db, uint64(number)); hash != (common.Hash{}) { + header = rawdb.ReadHeader(db, hash, uint64(number)) } else { - if includeMissing { - fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" + - " otherwise the accounts will overwrite each other in the resulting mapping.") - } - fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false)) + return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number) } } + } else { + // Use latest + header = rawdb.ReadHeadHeader(db) + } + if header == nil { + return nil, nil, common.Hash{}, errors.New("no head block found") + } + startArg := common.FromHex(ctx.String(utils.StartKeyFlag.Name)) + var start common.Hash + switch len(startArg) { + case 0: // common.Hash + case 32: + start = common.BytesToHash(startArg) + case 20: + start = crypto.Keccak256Hash(startArg) + log.Info("Converting start-address to hash", "address", common.BytesToAddress(startArg), "hash", start.Hex()) + default: + return nil, nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg) + } + var conf = &state.DumpConfig{ + SkipCode: ctx.Bool(utils.ExcludeCodeFlag.Name), + SkipStorage: ctx.Bool(utils.ExcludeStorageFlag.Name), + OnlyWithAddresses: !ctx.Bool(utils.IncludeIncompletesFlag.Name), + Start: start.Bytes(), + Max: ctx.Uint64(utils.DumpLimitFlag.Name), + } + log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(), + "skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage, + "start", hexutil.Encode(conf.Start), "limit", conf.Max) + return conf, db, header.Root, nil +} + +func dump(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + conf, db, root, err := parseDumpConfig(ctx, stack) + if err != nil { + return err + } + state, err := state.New(root, state.NewDatabase(db), nil) + if err != nil { + return err + } + if ctx.Bool(utils.IterativeOutputFlag.Name) { + state.IterativeDump(conf, json.NewEncoder(os.Stdout)) + } else { + if conf.OnlyWithAddresses { + fmt.Fprintf(os.Stderr, "If you want to include accounts with missing preimages, you need iterative output, since"+ + " otherwise the accounts will overwrite each other in the resulting mapping.") + return fmt.Errorf("incompatible options") + } + fmt.Println(string(state.Dump(conf))) } return nil } diff --git a/cmd/geth/config.go b/cmd/geth/config.go index c867877ee6..00f2cebe34 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -27,10 +27,14 @@ import ( "gopkg.in/urfave/cli.v1" + "github.com/ethereum/go-ethereum/accounts/external" + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/ethereum/go-ethereum/accounts/scwallet" + "github.com/ethereum/go-ethereum/accounts/usbwallet" "github.com/ethereum/go-ethereum/cmd/utils" - "github.com/ethereum/go-ethereum/eth/catalyst" "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/internal/ethapi" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" @@ -63,7 +67,12 @@ var tomlSettings = toml.Config{ return field }, MissingField: func(rt reflect.Type, field string) error { - link := "" + id := fmt.Sprintf("%s.%s", rt.String(), field) + if deprecated(id) { + log.Warn("Config field is deprecated and won't have an effect", "name", id) + return nil + } + var link string if unicode.IsUpper(rune(rt.Name()[0])) && rt.PkgPath() != "main" { link = fmt.Sprintf(", see https://godoc.org/%s#%s for available fields", rt.PkgPath(), rt.Name()) } @@ -129,6 +138,11 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) { if err != nil { utils.Fatalf("Failed to create the protocol stack: %v", err) } + // Node doesn't by default populate account manager backends + if err := setAccountManagerBackends(stack); err != nil { + utils.Fatalf("Failed to set account manager backends: %v", err) + } + utils.SetEthConfig(ctx, stack, &cfg.Eth) if ctx.GlobalIsSet(utils.EthStatsURLFlag.Name) { cfg.Ethstats.URL = ctx.GlobalString(utils.EthStatsURLFlag.Name) @@ -144,17 +158,13 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { if ctx.GlobalIsSet(utils.OverrideBerlinFlag.Name) { cfg.Eth.OverrideBerlin = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideBerlinFlag.Name)) } - backend, eth := utils.RegisterEthService(stack, &cfg.Eth) - - // Configure catalyst. - if ctx.GlobalBool(utils.CatalystFlag.Name) { - if eth == nil { - utils.Fatalf("Catalyst does not work in light client mode.") - } - if err := catalyst.Register(stack, eth); err != nil { - utils.Fatalf("%v", err) - } + if ctx.GlobalIsSet(utils.OverrideArrowGlacierFlag.Name) { + cfg.Eth.OverrideArrowGlacier = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideArrowGlacierFlag.Name)) + } + if ctx.GlobalIsSet(utils.OverrideTerminalTotalDifficulty.Name) { + cfg.Eth.OverrideTerminalTotalDifficulty = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideTerminalTotalDifficulty.Name)) } + backend, _ := utils.RegisterEthService(stack, &cfg.Eth) // Configure GraphQL if requested if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) { @@ -227,4 +237,86 @@ func applyMetricConfig(ctx *cli.Context, cfg *gethConfig) { if ctx.GlobalIsSet(utils.MetricsInfluxDBTagsFlag.Name) { cfg.Metrics.InfluxDBTags = ctx.GlobalString(utils.MetricsInfluxDBTagsFlag.Name) } + if ctx.GlobalIsSet(utils.MetricsEnableInfluxDBV2Flag.Name) { + cfg.Metrics.EnableInfluxDBV2 = ctx.GlobalBool(utils.MetricsEnableInfluxDBV2Flag.Name) + } + if ctx.GlobalIsSet(utils.MetricsInfluxDBTokenFlag.Name) { + cfg.Metrics.InfluxDBToken = ctx.GlobalString(utils.MetricsInfluxDBTokenFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsInfluxDBBucketFlag.Name) { + cfg.Metrics.InfluxDBBucket = ctx.GlobalString(utils.MetricsInfluxDBBucketFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsInfluxDBOrganizationFlag.Name) { + cfg.Metrics.InfluxDBOrganization = ctx.GlobalString(utils.MetricsInfluxDBOrganizationFlag.Name) + } +} + +func deprecated(field string) bool { + switch field { + case "ethconfig.Config.EVMInterpreter": + return true + case "ethconfig.Config.EWASMInterpreter": + return true + default: + return false + } +} + +func setAccountManagerBackends(stack *node.Node) error { + conf := stack.Config() + am := stack.AccountManager() + keydir := stack.KeyStoreDir() + scryptN := keystore.StandardScryptN + scryptP := keystore.StandardScryptP + if conf.UseLightweightKDF { + scryptN = keystore.LightScryptN + scryptP = keystore.LightScryptP + } + + // Assemble the supported backends + if len(conf.ExternalSigner) > 0 { + log.Info("Using external signer", "url", conf.ExternalSigner) + if extapi, err := external.NewExternalBackend(conf.ExternalSigner); err == nil { + am.AddBackend(extapi) + return nil + } else { + return fmt.Errorf("error connecting to external signer: %v", err) + } + } + + // For now, we're using EITHER external signer OR local signers. + // If/when we implement some form of lockfile for USB and keystore wallets, + // we can have both, but it's very confusing for the user to see the same + // accounts in both externally and locally, plus very racey. + am.AddBackend(keystore.NewKeyStore(keydir, scryptN, scryptP)) + if conf.USB { + // Start a USB hub for Ledger hardware wallets + if ledgerhub, err := usbwallet.NewLedgerHub(); err != nil { + log.Warn(fmt.Sprintf("Failed to start Ledger hub, disabling: %v", err)) + } else { + am.AddBackend(ledgerhub) + } + // Start a USB hub for Trezor hardware wallets (HID version) + if trezorhub, err := usbwallet.NewTrezorHubWithHID(); err != nil { + log.Warn(fmt.Sprintf("Failed to start HID Trezor hub, disabling: %v", err)) + } else { + am.AddBackend(trezorhub) + } + // Start a USB hub for Trezor hardware wallets (WebUSB version) + if trezorhub, err := usbwallet.NewTrezorHubWithWebUSB(); err != nil { + log.Warn(fmt.Sprintf("Failed to start WebUSB Trezor hub, disabling: %v", err)) + } else { + am.AddBackend(trezorhub) + } + } + if len(conf.SmartCardDaemonPath) > 0 { + // Start a smart card hub + if schub, err := scwallet.NewHub(conf.SmartCardDaemonPath, scwallet.Scheme, keydir); err != nil { + log.Warn(fmt.Sprintf("Failed to start smart card hub, disabling: %v", err)) + } else { + am.AddBackend(schub) + } + } + + return nil } diff --git a/cmd/geth/consolecmd.go b/cmd/geth/consolecmd.go index 9d8794eb15..7a0135b9a1 100644 --- a/cmd/geth/consolecmd.go +++ b/cmd/geth/consolecmd.go @@ -77,13 +77,13 @@ func localConsole(ctx *cli.Context) error { // Create and start the node based on the CLI flags prepare(ctx) stack, backend := makeFullNode(ctx) - startNode(ctx, stack, backend) + startNode(ctx, stack, backend, true) defer stack.Close() - // Attach to the newly started node and start the JavaScript console + // Attach to the newly started node and create the JavaScript console. client, err := stack.Attach() if err != nil { - utils.Fatalf("Failed to attach to the inproc geth: %v", err) + return fmt.Errorf("Failed to attach to the inproc geth: %v", err) } config := console.Config{ DataDir: utils.MakeDataDir(ctx), @@ -91,29 +91,34 @@ func localConsole(ctx *cli.Context) error { Client: client, Preload: utils.MakeConsolePreloads(ctx), } - console, err := console.New(config) if err != nil { - utils.Fatalf("Failed to start the JavaScript console: %v", err) + return fmt.Errorf("Failed to start the JavaScript console: %v", err) } defer console.Stop(false) - // If only a short execution was requested, evaluate and return + // If only a short execution was requested, evaluate and return. if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" { console.Evaluate(script) return nil } - // Otherwise print the welcome screen and enter interactive mode + + // Track node shutdown and stop the console when it goes down. + // This happens when SIGTERM is sent to the process. + go func() { + stack.Wait() + console.StopInteractive() + }() + + // Print the welcome screen and enter interactive mode. console.Welcome() console.Interactive() - return nil } // remoteConsole will connect to a remote geth instance, attaching a JavaScript // console to it. func remoteConsole(ctx *cli.Context) error { - // Attach to a remotely running geth instance and start the JavaScript console endpoint := ctx.Args().First() if endpoint == "" { path := node.DefaultDataDir() @@ -134,8 +139,8 @@ func remoteConsole(ctx *cli.Context) error { path = filepath.Join(path, "rinkeby") } else if ctx.GlobalBool(utils.GoerliFlag.Name) { path = filepath.Join(path, "goerli") - } else if ctx.GlobalBool(utils.YoloV3Flag.Name) { - path = filepath.Join(path, "yolo-v3") + } else if ctx.GlobalBool(utils.SepoliaFlag.Name) { + path = filepath.Join(path, "sepolia") } } endpoint = fmt.Sprintf("%s/geth.ipc", path) @@ -150,7 +155,6 @@ func remoteConsole(ctx *cli.Context) error { Client: client, Preload: utils.MakeConsolePreloads(ctx), } - console, err := console.New(config) if err != nil { utils.Fatalf("Failed to start the JavaScript console: %v", err) @@ -165,13 +169,12 @@ func remoteConsole(ctx *cli.Context) error { // Otherwise print the welcome screen and enter interactive mode console.Welcome() console.Interactive() - return nil } // dialRPC returns a RPC client which connects to the given endpoint. // The check for empty endpoint implements the defaulting logic -// for "geth attach" and "geth monitor" with no argument. +// for "geth attach" with no argument. func dialRPC(endpoint string) (*rpc.Client, error) { if endpoint == "" { endpoint = node.DefaultIPCEndpoint(clientIdentifier) @@ -189,13 +192,13 @@ func dialRPC(endpoint string) (*rpc.Client, error) { func ephemeralConsole(ctx *cli.Context) error { // Create and start the node based on the CLI flags stack, backend := makeFullNode(ctx) - startNode(ctx, stack, backend) + startNode(ctx, stack, backend, false) defer stack.Close() // Attach to the newly started node and start the JavaScript console client, err := stack.Attach() if err != nil { - utils.Fatalf("Failed to attach to the inproc geth: %v", err) + return fmt.Errorf("Failed to attach to the inproc geth: %v", err) } config := console.Config{ DataDir: utils.MakeDataDir(ctx), @@ -206,22 +209,24 @@ func ephemeralConsole(ctx *cli.Context) error { console, err := console.New(config) if err != nil { - utils.Fatalf("Failed to start the JavaScript console: %v", err) + return fmt.Errorf("Failed to start the JavaScript console: %v", err) } defer console.Stop(false) - // Evaluate each of the specified JavaScript files + // Interrupt the JS interpreter when node is stopped. + go func() { + stack.Wait() + console.Stop(false) + }() + + // Evaluate each of the specified JavaScript files. for _, file := range ctx.Args() { if err = console.Execute(file); err != nil { - utils.Fatalf("Failed to execute %s: %v", file, err) + return fmt.Errorf("Failed to execute %s: %v", file, err) } } - go func() { - stack.Wait() - console.Stop(false) - }() + // The main script is now done, but keep running timers/callbacks. console.Stop(true) - return nil } diff --git a/cmd/geth/consolecmd_test.go b/cmd/geth/consolecmd_test.go index c3f41b187c..845ede2f9c 100644 --- a/cmd/geth/consolecmd_test.go +++ b/cmd/geth/consolecmd_test.go @@ -75,7 +75,7 @@ at block: 0 ({{niltime}}) datadir: {{.Datadir}} modules: {{apis}} -To exit, press ctrl-d +To exit, press ctrl-d or type exit > {{.InputLine "exit"}} `) geth.ExpectExit() @@ -149,7 +149,7 @@ at block: 0 ({{niltime}}){{if ipc}} datadir: {{datadir}}{{end}} modules: {{apis}} -To exit, press ctrl-d +To exit, press ctrl-d or type exit > {{.InputLine "exit" }} `) attach.ExpectExit() diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go index 2ac8b803cb..c871fcfee1 100644 --- a/cmd/geth/dbcmd.go +++ b/cmd/geth/dbcmd.go @@ -17,11 +17,16 @@ package main import ( + "bytes" + "errors" "fmt" "os" + "os/signal" "path/filepath" "sort" "strconv" + "strings" + "syscall" "time" "github.com/ethereum/go-ethereum/cmd/utils" @@ -29,9 +34,11 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/console/prompt" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/trie" + "github.com/olekukonko/tablewriter" "gopkg.in/urfave/cli.v1" ) @@ -62,6 +69,9 @@ Remove blockchain and state databases`, dbPutCmd, dbGetSlotsCmd, dbDumpFreezerIndex, + dbImportCmd, + dbExportCmd, + dbMetadataCmd, ancientInspectCmd, }, } @@ -71,12 +81,13 @@ Remove blockchain and state databases`, ArgsUsage: " ", Flags: []cli.Flag{ utils.DataDirFlag, + utils.AncientFlag, utils.SyncModeFlag, utils.MainnetFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, - utils.YoloV3Flag, }, Usage: "Inspect the storage size for each type of data in the database", Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`, @@ -90,9 +101,9 @@ Remove blockchain and state databases`, utils.SyncModeFlag, utils.MainnetFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, - utils.YoloV3Flag, }, } dbCompactCmd = cli.Command{ @@ -104,9 +115,9 @@ Remove blockchain and state databases`, utils.SyncModeFlag, utils.MainnetFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, - utils.YoloV3Flag, utils.CacheFlag, utils.CacheDatabaseFlag, }, @@ -124,9 +135,9 @@ corruption if it is aborted during execution'!`, utils.SyncModeFlag, utils.MainnetFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, - utils.YoloV3Flag, }, Description: "This command looks up the specified database key from the database.", } @@ -140,9 +151,9 @@ corruption if it is aborted during execution'!`, utils.SyncModeFlag, utils.MainnetFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, - utils.YoloV3Flag, }, Description: `This command deletes the specified database key from the database. WARNING: This is a low-level operation which may cause database corruption!`, @@ -157,9 +168,9 @@ WARNING: This is a low-level operation which may cause database corruption!`, utils.SyncModeFlag, utils.MainnetFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, - utils.YoloV3Flag, }, Description: `This command sets a given database key to the given value. WARNING: This is a low-level operation which may cause database corruption!`, @@ -174,9 +185,9 @@ WARNING: This is a low-level operation which may cause database corruption!`, utils.SyncModeFlag, utils.MainnetFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, - utils.YoloV3Flag, }, Description: "This command looks up the specified database key from the database.", } @@ -190,12 +201,57 @@ WARNING: This is a low-level operation which may cause database corruption!`, utils.SyncModeFlag, utils.MainnetFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, - utils.YoloV3Flag, }, Description: "This command displays information about the freezer index.", } + dbImportCmd = cli.Command{ + Action: utils.MigrateFlags(importLDBdata), + Name: "import", + Usage: "Imports leveldb-data from an exported RLP dump.", + ArgsUsage: " has .gz suffix, gzip compression will be used.", + ArgsUsage: " ", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + }, + Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.", + } + dbMetadataCmd = cli.Command{ + Action: utils.MigrateFlags(showMetaData), + Name: "metadata", + Usage: "Shows metadata about the chain status.", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.RopstenFlag, + utils.SepoliaFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + }, + Description: "Shows metadata about the chain status.", + } ancientInspectCmd = cli.Command{ Action: utils.MigrateFlags(ancientInspect), Name: "inspect-reserved-oldest-blocks", @@ -363,14 +419,15 @@ func dbGet(ctx *cli.Context) error { db := utils.MakeChainDatabase(ctx, stack, true, false) defer db.Close() - key, err := hexutil.Decode(ctx.Args().Get(0)) + key, err := parseHexOrString(ctx.Args().Get(0)) if err != nil { log.Info("Could not decode the key", "error", err) return err } + data, err := db.Get(key) if err != nil { - log.Info("Get operation failed", "error", err) + log.Info("Get operation failed", "key", fmt.Sprintf("0x%#x", key), "error", err) return err } fmt.Printf("key %#x: %#x\n", key, data) @@ -388,7 +445,7 @@ func dbDelete(ctx *cli.Context) error { db := utils.MakeChainDatabase(ctx, stack, false, false) defer db.Close() - key, err := hexutil.Decode(ctx.Args().Get(0)) + key, err := parseHexOrString(ctx.Args().Get(0)) if err != nil { log.Info("Could not decode the key", "error", err) return err @@ -398,7 +455,7 @@ func dbDelete(ctx *cli.Context) error { fmt.Printf("Previous value: %#x\n", data) } if err = db.Delete(key); err != nil { - log.Info("Delete operation returned an error", "error", err) + log.Info("Delete operation returned an error", "key", fmt.Sprintf("0x%#x", key), "error", err) return err } return nil @@ -421,7 +478,7 @@ func dbPut(ctx *cli.Context) error { data []byte err error ) - key, err = hexutil.Decode(ctx.Args().Get(0)) + key, err = parseHexOrString(ctx.Args().Get(0)) if err != nil { log.Info("Could not decode the key", "error", err) return err @@ -520,10 +577,196 @@ func freezerInspect(ctx *cli.Context) error { defer stack.Close() path := filepath.Join(stack.ResolvePath("chaindata"), "ancient") log.Info("Opening freezer", "location", path, "name", kind) - if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy); err != nil { + if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy, true); err != nil { return err } else { f.DumpIndex(start, end) } return nil } + +// ParseHexOrString tries to hexdecode b, but if the prefix is missing, it instead just returns the raw bytes +func parseHexOrString(str string) ([]byte, error) { + b, err := hexutil.Decode(str) + if errors.Is(err, hexutil.ErrMissingPrefix) { + return []byte(str), nil + } + return b, err +} + +func importLDBdata(ctx *cli.Context) error { + start := 0 + switch ctx.NArg() { + case 1: + break + case 2: + s, err := strconv.Atoi(ctx.Args().Get(1)) + if err != nil { + return fmt.Errorf("second arg must be an integer: %v", err) + } + start = s + default: + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) + } + var ( + fName = ctx.Args().Get(0) + stack, _ = makeConfigNode(ctx) + interrupt = make(chan os.Signal, 1) + stop = make(chan struct{}) + ) + defer stack.Close() + signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM) + defer signal.Stop(interrupt) + defer close(interrupt) + go func() { + if _, ok := <-interrupt; ok { + log.Info("Interrupted during ldb import, stopping at next batch") + } + close(stop) + }() + db := utils.MakeChainDatabase(ctx, stack, false, false) + return utils.ImportLDBData(db, fName, int64(start), stop) +} + +type preimageIterator struct { + iter ethdb.Iterator +} + +func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) { + for iter.iter.Next() { + key := iter.iter.Key() + if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) { + return utils.OpBatchAdd, key, iter.iter.Value(), true + } + } + return 0, nil, nil, false +} + +func (iter *preimageIterator) Release() { + iter.iter.Release() +} + +type snapshotIterator struct { + init bool + account ethdb.Iterator + storage ethdb.Iterator +} + +func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) { + if !iter.init { + iter.init = true + return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true + } + for iter.account.Next() { + key := iter.account.Key() + if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) { + return utils.OpBatchAdd, key, iter.account.Value(), true + } + } + for iter.storage.Next() { + key := iter.storage.Key() + if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) { + return utils.OpBatchAdd, key, iter.storage.Value(), true + } + } + return 0, nil, nil, false +} + +func (iter *snapshotIterator) Release() { + iter.account.Release() + iter.storage.Release() +} + +// chainExporters defines the export scheme for all exportable chain data. +var chainExporters = map[string]func(db ethdb.Database) utils.ChainDataIterator{ + "preimage": func(db ethdb.Database) utils.ChainDataIterator { + iter := db.NewIterator(rawdb.PreimagePrefix, nil) + return &preimageIterator{iter: iter} + }, + "snapshot": func(db ethdb.Database) utils.ChainDataIterator { + account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil) + storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil) + return &snapshotIterator{account: account, storage: storage} + }, +} + +func exportChaindata(ctx *cli.Context) error { + if ctx.NArg() < 2 { + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) + } + // Parse the required chain data type, make sure it's supported. + kind := ctx.Args().Get(0) + kind = strings.ToLower(strings.Trim(kind, " ")) + exporter, ok := chainExporters[kind] + if !ok { + var kinds []string + for kind := range chainExporters { + kinds = append(kinds, kind) + } + return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", ")) + } + var ( + stack, _ = makeConfigNode(ctx) + interrupt = make(chan os.Signal, 1) + stop = make(chan struct{}) + ) + defer stack.Close() + signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM) + defer signal.Stop(interrupt) + defer close(interrupt) + go func() { + if _, ok := <-interrupt; ok { + log.Info("Interrupted during db export, stopping at next batch") + } + close(stop) + }() + db := utils.MakeChainDatabase(ctx, stack, true, false) + return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop) +} + +func showMetaData(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + db := utils.MakeChainDatabase(ctx, stack, true, false) + ancients, err := db.Ancients() + if err != nil { + fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err) + } + pp := func(val *uint64) string { + if val == nil { + return "" + } + return fmt.Sprintf("%d (0x%x)", *val, *val) + } + data := [][]string{ + {"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))}, + {"headBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadBlockHash(db))}, + {"headFastBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadFastBlockHash(db))}, + {"headHeaderHash", fmt.Sprintf("%v", rawdb.ReadHeadHeaderHash(db))}} + if b := rawdb.ReadHeadBlock(db); b != nil { + data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())}) + data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())}) + data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (0x%x)", b.Number(), b.Number())}) + } + if h := rawdb.ReadHeadHeader(db); h != nil { + data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())}) + data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)}) + data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (0x%x)", h.Number, h.Number)}) + } + data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)}, + {"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))}, + {"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotSyncStatus(db)))}, + {"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))}, + {"snapshotDisabled", fmt.Sprintf("%v", rawdb.ReadSnapshotDisabled(db))}, + {"snapshotJournal", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotJournal(db)))}, + {"snapshotRecoveryNumber", pp(rawdb.ReadSnapshotRecoveryNumber(db))}, + {"snapshotRoot", fmt.Sprintf("%v", rawdb.ReadSnapshotRoot(db))}, + {"txIndexTail", pp(rawdb.ReadTxIndexTail(db))}, + {"fastTxLookupLimit", pp(rawdb.ReadFastTxLookupLimit(db))}, + }...) + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Field", "Value"}) + table.AppendBulk(data) + table.Render() + return nil +} diff --git a/cmd/geth/genesis_test.go b/cmd/geth/genesis_test.go index cbc1b38374..0563ef3c42 100644 --- a/cmd/geth/genesis_test.go +++ b/cmd/geth/genesis_test.go @@ -84,7 +84,7 @@ func TestCustomGenesis(t *testing.T) { runGeth(t, "--datadir", datadir, "init", json).WaitExit() // Query the custom genesis block - geth := runGeth(t, "--networkid", "1337", "--syncmode=full", + geth := runGeth(t, "--networkid", "1337", "--syncmode=full", "--cache", "16", "--datadir", datadir, "--maxpeers", "0", "--port", "0", "--nodiscover", "--nat", "none", "--ipcdisable", "--exec", tt.query, "console") diff --git a/cmd/geth/les_test.go b/cmd/geth/les_test.go index 053ce96aa3..151c12c68c 100644 --- a/cmd/geth/les_test.go +++ b/cmd/geth/les_test.go @@ -137,14 +137,18 @@ func startGethWithIpc(t *testing.T, name string, args ...string) *gethrpc { name: name, geth: runGeth(t, args...), } - // wait before we can attach to it. TODO: probe for it properly - time.Sleep(1 * time.Second) - var err error ipcpath := ipcEndpoint(ipcName, g.geth.Datadir) - if g.rpc, err = rpc.Dial(ipcpath); err != nil { - t.Fatalf("%v rpc connect to %v: %v", name, ipcpath, err) + // We can't know exactly how long geth will take to start, so we try 10 + // times over a 5 second period. + var err error + for i := 0; i < 10; i++ { + time.Sleep(500 * time.Millisecond) + if g.rpc, err = rpc.Dial(ipcpath); err == nil { + return g + } } - return g + t.Fatalf("%v rpc connect to %v: %v", name, ipcpath, err) + return nil } func initGeth(t *testing.T) string { diff --git a/cmd/geth/main.go b/cmd/geth/main.go index e0f29bce77..9ed921c24f 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -71,12 +71,15 @@ var ( utils.NoUSBFlag, utils.DirectBroadcastFlag, utils.DisableSnapProtocolFlag, + utils.DisableDiffProtocolFlag, + utils.EnableTrustProtocolFlag, utils.DiffSyncFlag, utils.PipeCommitFlag, utils.RangeLimitFlag, utils.USBFlag, utils.SmartCardDaemonPathFlag, - utils.OverrideBerlinFlag, + utils.OverrideArrowGlacierFlag, + utils.OverrideTerminalTotalDifficulty, utils.EthashCacheDirFlag, utils.EthashCachesInMemoryFlag, utils.EthashCachesOnDiskFlag, @@ -98,6 +101,7 @@ var ( utils.TxPoolLifetimeFlag, utils.TxPoolReannounceTimeFlag, utils.SyncModeFlag, + utils.TriesVerifyModeFlag, utils.ExitWhenSyncedFlag, utils.GCModeFlag, utils.SnapshotFlag, @@ -125,20 +129,21 @@ var ( utils.CachePreimagesFlag, utils.PersistDiffFlag, utils.DiffBlockFlag, + utils.PruneAncientDataFlag, utils.ListenPortFlag, utils.MaxPeersFlag, utils.MaxPendingPeersFlag, utils.MiningEnabledFlag, utils.MinerThreadsFlag, utils.MinerNotifyFlag, - utils.MinerGasTargetFlag, + utils.LegacyMinerGasTargetFlag, utils.MinerGasLimitFlag, utils.MinerGasPriceFlag, utils.MinerEtherbaseFlag, utils.MinerExtraDataFlag, utils.MinerRecommitIntervalFlag, - utils.MinerDelayLeftoverFlag, utils.MinerNoVerfiyFlag, + utils.MinerDelayLeftoverFlag, utils.NATFlag, utils.NoDiscoverFlag, utils.DiscoveryV5Flag, @@ -149,10 +154,11 @@ var ( utils.MainnetFlag, utils.DeveloperFlag, utils.DeveloperPeriodFlag, + utils.DeveloperGasLimitFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, - utils.YoloV3Flag, utils.VMEnableDebugFlag, utils.NetworkIdFlag, utils.EthStatsURLFlag, @@ -161,11 +167,9 @@ var ( utils.GpoBlocksFlag, utils.GpoPercentileFlag, utils.GpoMaxGasPriceFlag, - utils.EWASMInterpreterFlag, - utils.EVMInterpreterFlag, + utils.GpoIgnoreGasPriceFlag, utils.MinerNotifyFullFlag, configFileFlag, - utils.CatalystFlag, utils.BlockAmountReserved, utils.CheckSnapshotWithMPT, } @@ -176,12 +180,6 @@ var ( utils.HTTPPortFlag, utils.HTTPCORSDomainFlag, utils.HTTPVirtualHostsFlag, - utils.LegacyRPCEnabledFlag, - utils.LegacyRPCListenAddrFlag, - utils.LegacyRPCPortFlag, - utils.LegacyRPCCORSDomainFlag, - utils.LegacyRPCVirtualHostsFlag, - utils.LegacyRPCApiFlag, utils.GraphQLEnabledFlag, utils.GraphQLCORSDomainFlag, utils.GraphQLVirtualHostsFlag, @@ -197,6 +195,7 @@ var ( utils.IPCPathFlag, utils.InsecureUnlockAllowedFlag, utils.RPCGlobalGasCapFlag, + utils.RPCGlobalEVMTimeoutFlag, utils.RPCGlobalTxFeeCapFlag, utils.AllowUnprotectedTxs, } @@ -212,6 +211,10 @@ var ( utils.MetricsInfluxDBUsernameFlag, utils.MetricsInfluxDBPasswordFlag, utils.MetricsInfluxDBTagsFlag, + utils.MetricsEnableInfluxDBV2Flag, + utils.MetricsInfluxDBTokenFlag, + utils.MetricsInfluxDBBucketFlag, + utils.MetricsInfluxDBOrganizationFlag, } ) @@ -219,7 +222,7 @@ func init() { // Initialize the CLI app and start Geth app.Action = geth app.HideVersion = true // we have a command to print the version - app.Copyright = "Copyright 2013-2020 The go-ethereum Authors and BSC Authors" + app.Copyright = "Copyright 2013-2022 The go-ethereum Authors and BSC Authors" app.Commands = []cli.Command{ // See chaincmd.go: initCommand, @@ -286,15 +289,15 @@ func prepare(ctx *cli.Context) { case ctx.GlobalIsSet(utils.RopstenFlag.Name): log.Info("Starting Geth on Ropsten testnet...") + case ctx.GlobalIsSet(utils.SepoliaFlag.Name): + log.Info("Starting Geth on Sepolia testnet...") + case ctx.GlobalIsSet(utils.RinkebyFlag.Name): log.Info("Starting Geth on Rinkeby testnet...") case ctx.GlobalIsSet(utils.GoerliFlag.Name): log.Info("Starting Geth on Görli testnet...") - case ctx.GlobalIsSet(utils.YoloV3Flag.Name): - log.Info("Starting Geth on YOLOv3 testnet...") - case ctx.GlobalIsSet(utils.DeveloperFlag.Name): log.Info("Starting Geth in ephemeral dev mode...") @@ -304,7 +307,11 @@ func prepare(ctx *cli.Context) { // If we're a full node on mainnet without --cache specified, bump default cache allowance if ctx.GlobalString(utils.SyncModeFlag.Name) != "light" && !ctx.GlobalIsSet(utils.CacheFlag.Name) && !ctx.GlobalIsSet(utils.NetworkIdFlag.Name) { // Make sure we're not on any supported preconfigured testnet either - if !ctx.GlobalIsSet(utils.RopstenFlag.Name) && !ctx.GlobalIsSet(utils.RinkebyFlag.Name) && !ctx.GlobalIsSet(utils.GoerliFlag.Name) && !ctx.GlobalIsSet(utils.DeveloperFlag.Name) { + if !ctx.GlobalIsSet(utils.RopstenFlag.Name) && + !ctx.GlobalIsSet(utils.SepoliaFlag.Name) && + !ctx.GlobalIsSet(utils.RinkebyFlag.Name) && + !ctx.GlobalIsSet(utils.GoerliFlag.Name) && + !ctx.GlobalIsSet(utils.DeveloperFlag.Name) { // Nope, we're really on mainnet. Bump that cache up! log.Info("Bumping default cache on mainnet", "provided", ctx.GlobalInt(utils.CacheFlag.Name), "updated", 4096) ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(4096)) @@ -335,7 +342,7 @@ func geth(ctx *cli.Context) error { stack, backend := makeFullNode(ctx) defer stack.Close() - startNode(ctx, stack, backend) + startNode(ctx, stack, backend, false) stack.Wait() return nil } @@ -343,11 +350,11 @@ func geth(ctx *cli.Context) error { // startNode boots up the system node and all registered protocols, after which // it unlocks any requested accounts, and starts the RPC/IPC interfaces and the // miner. -func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend) { +func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isConsole bool) { debug.Memsize.Add("node", stack) // Start up the node itself - utils.StartNode(ctx, stack) + utils.StartNode(ctx, stack, isConsole) // Unlock any account specifically requested unlockAccounts(ctx, stack) @@ -428,7 +435,7 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend) { } ethBackend, ok := backend.(*eth.EthAPIBackend) if !ok { - utils.Fatalf("Ethereum service not running: %v", err) + utils.Fatalf("Ethereum service not running") } // Set the gas price to the limits from the CLI and start mining gasprice := utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name) diff --git a/cmd/geth/pruneblock_test.go b/cmd/geth/pruneblock_test.go index 7d78f444fa..78b1d343b5 100644 --- a/cmd/geth/pruneblock_test.go +++ b/cmd/geth/pruneblock_test.go @@ -93,7 +93,7 @@ func testOfflineBlockPruneWithAmountReserved(t *testing.T, amountReserved uint64 t.Fatalf("Failed to back up block: %v", err) } - dbBack, err := rawdb.NewLevelDBDatabaseWithFreezer(chaindbPath, 0, 0, newAncientPath, "", false, true, false) + dbBack, err := rawdb.NewLevelDBDatabaseWithFreezer(chaindbPath, 0, 0, newAncientPath, "", false, true, false, false) if err != nil { t.Fatalf("failed to create database with ancient backend") } @@ -139,7 +139,7 @@ func testOfflineBlockPruneWithAmountReserved(t *testing.T, amountReserved uint64 func BlockchainCreator(t *testing.T, chaindbPath, AncientPath string, blockRemain uint64) (ethdb.Database, []*types.Block, []*types.Block, []types.Receipts, []*big.Int, uint64, *core.BlockChain) { //create a database with ancient freezer - db, err := rawdb.NewLevelDBDatabaseWithFreezer(chaindbPath, 0, 0, AncientPath, "", false, false, false) + db, err := rawdb.NewLevelDBDatabaseWithFreezer(chaindbPath, 0, 0, AncientPath, "", false, false, false, false) if err != nil { t.Fatalf("failed to create database with ancient backend") } diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go index c00334ee9e..dd5d6e34fc 100644 --- a/cmd/geth/snapshot.go +++ b/cmd/geth/snapshot.go @@ -18,6 +18,7 @@ package main import ( "bytes" + "encoding/json" "errors" "fmt" "os" @@ -29,13 +30,17 @@ import ( "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state/pruner" "github.com/ethereum/go-ethereum/core/state/snapshot" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" @@ -66,6 +71,7 @@ var ( utils.DataDirFlag, utils.AncientFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, utils.CacheTrieJournalFlag, @@ -121,6 +127,7 @@ so it's very necessary to do block data prune, this feature will handle it. utils.DataDirFlag, utils.AncientFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, }, @@ -129,6 +136,32 @@ geth snapshot verify-state will traverse the whole accounts and storages set based on the specified snapshot and recalculate the root hash of state for verification. In other words, this command does the snapshot to trie conversion. +`, + }, + { + Name: "insecure-prune-all", + Usage: "Prune all trie state data except genesis block, it will break storage for fullnode, only suitable for fast node " + + "who do not need trie storage at all", + ArgsUsage: "", + Action: utils.MigrateFlags(pruneAllState), + Category: "MISCELLANEOUS COMMANDS", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.AncientFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + }, + Description: ` +will prune all historical trie state data except genesis block. +All trie nodes will be deleted from the database. + +It expects the genesis file as argument. + +WARNING: It's necessary to delete the trie clean cache after the pruning. +If you specify another directory for the trie clean cache via "--cache.trie.journal" +during the use of Geth, please also specify it here for correct deletion. Otherwise +the trie clean cache with default directory will be deleted. `, }, { @@ -141,6 +174,7 @@ In other words, this command does the snapshot to trie conversion. utils.DataDirFlag, utils.AncientFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, }, @@ -163,6 +197,7 @@ It's also usable without snapshot enabled. utils.DataDirFlag, utils.AncientFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, }, @@ -174,6 +209,33 @@ verification. The default checking target is the HEAD state. It's basically iden to traverse-state, but the check granularity is smaller. It's also usable without snapshot enabled. +`, + }, + { + Name: "dump", + Usage: "Dump a specific block from storage (same as 'geth dump' but using snapshots)", + ArgsUsage: "[? | ]", + Action: utils.MigrateFlags(dumpState), + Category: "MISCELLANEOUS COMMANDS", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.AncientFlag, + utils.RopstenFlag, + utils.SepoliaFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.ExcludeCodeFlag, + utils.ExcludeStorageFlag, + utils.StartKeyFlag, + utils.DumpLimitFlag, + utils.TriesInMemoryFlag, + }, + Description: ` +This command is semantically equivalent to 'geth dump', but uses the snapshots +as the backend data source, making this command a lot faster. + +The argument is interpreted as block number or hash. If none is provided, the latest +block is used. `, }, }, @@ -195,7 +257,7 @@ func accessDb(ctx *cli.Context, stack *node.Node) (ethdb.Database, error) { } headHeader := headBlock.Header() //Make sure the MPT and snapshot matches before pruning, otherwise the node can not start. - snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, TriesInMemory, headBlock.Root(), false, false, false) + snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, TriesInMemory, headBlock.Root(), false, false, false, false) if err != nil { log.Error("snaptree error", "err", err) return nil, err // The relevant snapshot(s) might not exist @@ -268,18 +330,48 @@ func accessDb(ctx *cli.Context, stack *node.Node) (ethdb.Database, error) { } func pruneBlock(ctx *cli.Context) error { - stack, config := makeConfigNode(ctx) + var ( + stack *node.Node + config gethConfig + chaindb ethdb.Database + err error + + oldAncientPath string + newAncientPath string + blockAmountReserved uint64 + blockpruner *pruner.BlockPruner + ) + + stack, config = makeConfigNode(ctx) defer stack.Close() - blockAmountReserved := ctx.GlobalUint64(utils.BlockAmountReserved.Name) - chaindb, err := accessDb(ctx, stack) + blockAmountReserved = ctx.GlobalUint64(utils.BlockAmountReserved.Name) + chaindb, err = accessDb(ctx, stack) if err != nil { return err } - var newAncientPath string - oldAncientPath := ctx.GlobalString(utils.AncientFlag.Name) - if !filepath.IsAbs(oldAncientPath) { - // force absolute paths, which often fail due to the splicing of relative paths - return errors.New("datadir.ancient not abs path") + + // Most of the problems reported by users when first using the prune-block + // tool are due to incorrect directory settings.Here, the default directory + // and relative directory are canceled, and the user is forced to formulate + // an absolute path to guide users to run the prune-block command correctly. + if !ctx.GlobalIsSet(utils.DataDirFlag.Name) { + return errors.New("datadir must be set") + } else { + datadir := ctx.GlobalString(utils.DataDirFlag.Name) + if !filepath.IsAbs(datadir) { + // force absolute paths, which often fail due to the splicing of relative paths + return errors.New("datadir not abs path") + } + } + + if !ctx.GlobalIsSet(utils.AncientFlag.Name) { + return errors.New("datadir.ancient must be set") + } else { + oldAncientPath = ctx.GlobalString(utils.AncientFlag.Name) + if !filepath.IsAbs(oldAncientPath) { + // force absolute paths, which often fail due to the splicing of relative paths + return errors.New("datadir.ancient not abs path") + } } path, _ := filepath.Split(oldAncientPath) @@ -288,7 +380,7 @@ func pruneBlock(ctx *cli.Context) error { } newAncientPath = filepath.Join(path, "ancient_back") - blockpruner := pruner.NewBlockPruner(chaindb, stack, oldAncientPath, newAncientPath, blockAmountReserved) + blockpruner = pruner.NewBlockPruner(chaindb, stack, oldAncientPath, newAncientPath, blockAmountReserved) lock, exist, err := fileutil.Flock(filepath.Join(oldAncientPath, "PRUNEFLOCK")) if err != nil { @@ -363,6 +455,48 @@ func pruneState(ctx *cli.Context) error { return nil } +func pruneAllState(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + genesisPath := ctx.Args().First() + if len(genesisPath) == 0 { + utils.Fatalf("Must supply path to genesis JSON file") + } + file, err := os.Open(genesisPath) + if err != nil { + utils.Fatalf("Failed to read genesis file: %v", err) + } + defer file.Close() + + g := new(core.Genesis) + if err := json.NewDecoder(file).Decode(g); err != nil { + cfg := gethConfig{ + Eth: ethconfig.Defaults, + Node: defaultNodeConfig(), + Metrics: metrics.DefaultConfig, + } + + // Load config file. + if err := loadConfig(genesisPath, &cfg); err != nil { + utils.Fatalf("%v", err) + } + g = cfg.Eth.Genesis + } + + chaindb := utils.MakeChainDatabase(ctx, stack, false, false) + pruner, err := pruner.NewAllPruner(chaindb) + if err != nil { + log.Error("Failed to open snapshot tree", "err", err) + return err + } + if err = pruner.PruneAll(g); err != nil { + log.Error("Failed to prune state", "err", err) + return err + } + return nil +} + func verifyState(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() @@ -373,7 +507,7 @@ func verifyState(ctx *cli.Context) error { log.Error("Failed to load head block") return errors.New("no head block") } - snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, 128, headBlock.Root(), false, false, false) + snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, 128, headBlock.Root(), false, false, false, false) if err != nil { log.Error("Failed to open snapshot tree", "err", err) return err @@ -391,7 +525,7 @@ func verifyState(ctx *cli.Context) error { } } if err := snaptree.Verify(root); err != nil { - log.Error("Failed to verfiy state", "root", root, "err", err) + log.Error("Failed to verify state", "root", root, "err", err) return err } log.Info("Verified the state", "root", root) @@ -446,7 +580,7 @@ func traverseState(ctx *cli.Context) error { accIter := trie.NewIterator(t.NodeIterator(nil)) for accIter.Next() { accounts += 1 - var acc state.Account + var acc types.StateAccount if err := rlp.DecodeBytes(accIter.Value, &acc); err != nil { log.Error("Invalid account encountered during traversal", "err", err) return err @@ -552,7 +686,7 @@ func traverseRawState(ctx *cli.Context) error { // dig into the storage trie further. if accIter.Leaf() { accounts += 1 - var acc state.Account + var acc types.StateAccount if err := rlp.DecodeBytes(accIter.LeafBlob(), &acc); err != nil { log.Error("Invalid account encountered during traversal", "err", err) return errors.New("invalid account") @@ -571,8 +705,7 @@ func traverseRawState(ctx *cli.Context) error { // Check the present for non-empty hash node(embedded node doesn't // have their own hash). if node != (common.Hash{}) { - blob := rawdb.ReadTrieNode(chaindb, node) - if len(blob) == 0 { + if !rawdb.HasTrieNode(chaindb, node) { log.Error("Missing trie node(storage)", "hash", node) return errors.New("missing storage") } @@ -616,3 +749,74 @@ func parseRoot(input string) (common.Hash, error) { } return h, nil } + +func dumpState(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + conf, db, root, err := parseDumpConfig(ctx, stack) + if err != nil { + return err + } + triesInMemory := ctx.GlobalUint64(utils.TriesInMemoryFlag.Name) + snaptree, err := snapshot.New(db, trie.NewDatabase(db), int(triesInMemory), 256, root, false, false, false, false) + if err != nil { + return err + } + accIt, err := snaptree.AccountIterator(root, common.BytesToHash(conf.Start)) + if err != nil { + return err + } + defer accIt.Release() + + log.Info("Snapshot dumping started", "root", root) + var ( + start = time.Now() + logged = time.Now() + accounts uint64 + ) + enc := json.NewEncoder(os.Stdout) + enc.Encode(struct { + Root common.Hash `json:"root"` + }{root}) + for accIt.Next() { + account, err := snapshot.FullAccount(accIt.Account()) + if err != nil { + return err + } + da := &state.DumpAccount{ + Balance: account.Balance.String(), + Nonce: account.Nonce, + Root: account.Root, + CodeHash: account.CodeHash, + SecureKey: accIt.Hash().Bytes(), + } + if !conf.SkipCode && !bytes.Equal(account.CodeHash, emptyCode) { + da.Code = rawdb.ReadCode(db, common.BytesToHash(account.CodeHash)) + } + if !conf.SkipStorage { + da.Storage = make(map[common.Hash]string) + + stIt, err := snaptree.StorageIterator(root, accIt.Hash(), common.Hash{}) + if err != nil { + return err + } + for stIt.Next() { + da.Storage[stIt.Hash()] = common.Bytes2Hex(stIt.Slot()) + } + } + enc.Encode(da) + accounts++ + if time.Since(logged) > 8*time.Second { + log.Info("Snapshot dumping in progress", "at", accIt.Hash(), "accounts", accounts, + "elapsed", common.PrettyDuration(time.Since(start))) + logged = time.Now() + } + if conf.Max > 0 && accounts >= conf.Max { + break + } + } + log.Info("Snapshot dumping complete", "accounts", accounts, + "elapsed", common.PrettyDuration(time.Since(start))) + return nil +} diff --git a/cmd/geth/testdata/vcheck/vulnerabilities.json b/cmd/geth/testdata/vcheck/vulnerabilities.json index 36509f95a9..92de0c9ccc 100644 --- a/cmd/geth/testdata/vcheck/vulnerabilities.json +++ b/cmd/geth/testdata/vcheck/vulnerabilities.json @@ -52,13 +52,16 @@ "check": "Geth\\/v1\\.9\\.(7|8|9|10|11|12|13|14|15|16).*$" }, { - "name": "GethCrash", + "name": "Geth DoS via MULMOD", "uid": "GETH-2020-04", "summary": "A denial-of-service issue can be used to crash Geth nodes during block processing", - "description": "Full details to be disclosed at a later date", + "description": "Affected versions suffer from a vulnerability which can be exploited through the `MULMOD` operation, by specifying a modulo of `0`: `mulmod(a,b,0)`, causing a `panic` in the underlying library. \nThe crash was in the `uint256` library, where a buffer [underflowed](https://github.com/holiman/uint256/blob/4ce82e695c10ddad57215bdbeafb68b8c5df2c30/uint256.go#L442).\n\n\tif `d == 0`, `dLen` remains `0`\n\nand https://github.com/holiman/uint256/blob/4ce82e695c10ddad57215bdbeafb68b8c5df2c30/uint256.go#L451 will try to access index `[-1]`.\n\nThe `uint256` library was first merged in this [commit](https://github.com/ethereum/go-ethereum/commit/cf6674539c589f80031f3371a71c6a80addbe454), on 2020-06-08. \nExploiting this vulnerabilty would cause all vulnerable nodes to drop off the network. \n\nThe issue was brought to our attention through a [bug report](https://github.com/ethereum/go-ethereum/issues/21367), showing a `panic` occurring on sync from genesis on the Ropsten network.\n \nIt was estimated that the least obvious way to fix this would be to merge the fix into `uint256`, make a new release of that library and then update the geth-dependency.\n", "links": [ "https://blog.ethereum.org/2020/11/12/geth_security_release/", - "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-jm5c-rv3w-w83m" + "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-jm5c-rv3w-w83m", + "https://github.com/holiman/uint256/releases/tag/v1.1.1", + "https://github.com/holiman/uint256/pull/80", + "https://github.com/ethereum/go-ethereum/pull/21368" ], "introduced": "v1.9.16", "fixed": "v1.9.18", @@ -66,5 +69,51 @@ "severity": "Critical", "CVE": "CVE-2020-26242", "check": "Geth\\/v1\\.9.(16|17).*$" + }, + { + "name": "LES Server DoS via GetProofsV2", + "uid": "GETH-2020-05", + "summary": "A DoS vulnerability can make a LES server crash.", + "description": "A DoS vulnerability can make a LES server crash via malicious GetProofsV2 request from a connected LES client.\n\nThe vulnerability was patched in #21896.\n\nThis vulnerability only concern users explicitly running geth as a light server", + "links": [ + "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-r33q-22hv-j29q", + "https://github.com/ethereum/go-ethereum/pull/21896" + ], + "introduced": "v1.8.0", + "fixed": "v1.9.25", + "published": "2020-12-10", + "severity": "Medium", + "CVE": "CVE-2020-26264", + "check": "(Geth\\/v1\\.8\\.*)|(Geth\\/v1\\.9\\.\\d-.*)|(Geth\\/v1\\.9\\.1\\d-.*)|(Geth\\/v1\\.9\\.(20|21|22|23|24)-.*)$" + }, + { + "name": "SELFDESTRUCT-recreate consensus flaw", + "uid": "GETH-2020-06", + "introduced": "v1.9.4", + "fixed": "v1.9.20", + "summary": "A consensus-vulnerability in Geth could cause a chain split, where vulnerable versions refuse to accept the canonical chain.", + "description": "A flaw was repoted at 2020-08-11 by John Youngseok Yang (Software Platform Lab), where a particular sequence of transactions could cause a consensus failure.\n\n- Tx 1:\n - `sender` invokes `caller`.\n - `caller` invokes `0xaa`. `0xaa` has 3 wei, does a self-destruct-to-self\n - `caller` does a `1 wei` -call to `0xaa`, who thereby has 1 wei (the code in `0xaa` still executed, since the tx is still ongoing, but doesn't redo the selfdestruct, it takes a different path if callvalue is non-zero)\n\n-Tx 2:\n - `sender` does a 5-wei call to 0xaa. No exec (since no code). \n\nIn geth, the result would be that `0xaa` had `6 wei`, whereas OE reported (correctly) `5` wei. Furthermore, in geth, if the second tx was not executed, the `0xaa` would be destructed, resulting in `0 wei`. Thus obviously wrong. \n\nIt was determined that the root cause was this [commit](https://github.com/ethereum/go-ethereum/commit/223b950944f494a5b4e0957fd9f92c48b09037ad) from [this PR](https://github.com/ethereum/go-ethereum/pull/19953). The semantics of `createObject` was subtly changd, into returning a non-nil object (with `deleted=true`) where it previously did not if the account had been destructed. This return value caused the new object to inherit the old `balance`.\n", + "links": [ + "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-xw37-57qp-9mm4" + ], + "published": "2020-12-10", + "severity": "High", + "CVE": "CVE-2020-26265", + "check": "(Geth\\/v1\\.9\\.(4|5|6|7|8|9)-.*)|(Geth\\/v1\\.9\\.1\\d-.*)$" + }, + { + "name": "Not ready for London upgrade", + "uid": "GETH-2021-01", + "summary": "The client is not ready for the 'London' technical upgrade, and will deviate from the canonical chain when the London upgrade occurs (at block '12965000' around August 4, 2021.", + "description": "At (or around) August 4, Ethereum will undergo a technical upgrade called 'London'. Clients not upgraded will fail to progress on the canonical chain.", + "links": [ + "https://github.com/ethereum/eth1.0-specs/blob/master/network-upgrades/mainnet-upgrades/london.md", + "https://notes.ethereum.org/@timbeiko/ropsten-postmortem" + ], + "introduced": "v1.10.1", + "fixed": "v1.10.6", + "published": "2020-12-10", + "severity": "High", + "check": "(Geth\\/v1\\.10\\.(1|2|3|4|5)-.*)$" } ] diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index a1a0a7d0b4..43e27c5558 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -41,15 +41,18 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.NoUSBFlag, utils.DirectBroadcastFlag, utils.DisableSnapProtocolFlag, + utils.DisableDiffProtocolFlag, + utils.EnableTrustProtocolFlag, utils.RangeLimitFlag, utils.SmartCardDaemonPathFlag, utils.NetworkIdFlag, utils.MainnetFlag, utils.GoerliFlag, utils.RinkebyFlag, - utils.YoloV3Flag, utils.RopstenFlag, + utils.SepoliaFlag, utils.SyncModeFlag, + utils.TriesVerifyModeFlag, utils.ExitWhenSyncedFlag, utils.GCModeFlag, utils.TxLookupLimitFlag, @@ -81,6 +84,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{ Flags: []cli.Flag{ utils.DeveloperFlag, utils.DeveloperPeriodFlag, + utils.DeveloperGasLimitFlag, }, }, { @@ -157,6 +161,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.GraphQLCORSDomainFlag, utils.GraphQLVirtualHostsFlag, utils.RPCGlobalGasCapFlag, + utils.RPCGlobalEVMTimeoutFlag, utils.RPCGlobalTxFeeCapFlag, utils.AllowUnprotectedTxs, utils.JSpathFlag, @@ -188,7 +193,6 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.MinerNotifyFlag, utils.MinerNotifyFullFlag, utils.MinerGasPriceFlag, - utils.MinerGasTargetFlag, utils.MinerGasLimitFlag, utils.MinerEtherbaseFlag, utils.MinerExtraDataFlag, @@ -203,14 +207,13 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.GpoBlocksFlag, utils.GpoPercentileFlag, utils.GpoMaxGasPriceFlag, + utils.GpoIgnoreGasPriceFlag, }, }, { Name: "VIRTUAL MACHINE", Flags: []cli.Flag{ utils.VMEnableDebugFlag, - utils.EVMInterpreterFlag, - utils.EWASMInterpreterFlag, }, }, { @@ -228,12 +231,6 @@ var AppHelpFlagGroups = []flags.FlagGroup{ Name: "ALIASED (deprecated)", Flags: []cli.Flag{ utils.NoUSBFlag, - utils.LegacyRPCEnabledFlag, - utils.LegacyRPCListenAddrFlag, - utils.LegacyRPCPortFlag, - utils.LegacyRPCCORSDomainFlag, - utils.LegacyRPCVirtualHostsFlag, - utils.LegacyRPCApiFlag, }, }, { @@ -242,7 +239,6 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.SnapshotFlag, utils.BloomFilterSizeFlag, cli.HelpFlag, - utils.CatalystFlag, }, }, } diff --git a/cmd/geth/version_check_test.go b/cmd/geth/version_check_test.go index 0f056d1967..4be32d5e4f 100644 --- a/cmd/geth/version_check_test.go +++ b/cmd/geth/version_check_test.go @@ -25,6 +25,8 @@ import ( "strconv" "strings" "testing" + + "github.com/jedisct1/go-minisign" ) func TestVerification(t *testing.T) { @@ -128,3 +130,39 @@ func TestMatching(t *testing.T) { } } } + +func TestGethPubKeysParseable(t *testing.T) { + for _, pubkey := range gethPubKeys { + _, err := minisign.NewPublicKey(pubkey) + if err != nil { + t.Errorf("Should be parseable") + } + } +} + +func TestKeyID(t *testing.T) { + type args struct { + id [8]byte + } + tests := []struct { + name string + args args + want string + }{ + {"@holiman key", args{id: extractKeyId(gethPubKeys[0])}, "FB1D084D39BAEC24"}, + {"second key", args{id: extractKeyId(gethPubKeys[1])}, "138B1CA303E51687"}, + {"third key", args{id: extractKeyId(gethPubKeys[2])}, "FD9813B2D2098484"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := keyID(tt.args.id); got != tt.want { + t.Errorf("keyID() = %v, want %v", got, tt.want) + } + }) + } +} + +func extractKeyId(pubkey string) [8]byte { + p, _ := minisign.NewPublicKey(pubkey) + return p.KeyId +} diff --git a/cmd/puppeth/module_dashboard.go b/cmd/puppeth/module_dashboard.go index a76ee19a06..35cfada66f 100644 --- a/cmd/puppeth/module_dashboard.go +++ b/cmd/puppeth/module_dashboard.go @@ -80,12 +80,10 @@ var dashboardContent = `