diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 7ac7915f225c..e62a85d1ed59 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,23 +1,23 @@ # Lines starting with '#' are comments. # Each line is a file pattern followed by one or more owners. -accounts/usbwallet @karalabe -accounts/scwallet @gballet -accounts/abi @gballet -cmd/clef @holiman -cmd/puppeth @karalabe -consensus @karalabe -core/ @karalabe @holiman @rjl493456442 -dashboard/ @kurkomisi -eth/ @karalabe @holiman @rjl493456442 -graphql/ @gballet -les/ @zsfelfoldi @rjl493456442 -light/ @zsfelfoldi @rjl493456442 -mobile/ @karalabe @ligi -p2p/ @fjl @zsfelfoldi -rpc/ @fjl @holiman -p2p/simulations @zelig @janos @justelad -p2p/protocols @zelig @janos @justelad -p2p/testing @zelig @janos @justelad -signer/ @holiman -whisper/ @gballet @gluk256 +# accounts/usbwallet @karalabe +# accounts/scwallet @gballet +# accounts/abi @gballet @MariusVanDerWijden +# cmd/clef @holiman +# cmd/puppeth @karalabe +# consensus @karalabe +# core/ @karalabe @holiman @rjl493456442 +# eth/ @karalabe @holiman @rjl493456442 +# eth/catalyst/ @gballet +# graphql/ @gballet +# les/ @zsfelfoldi @rjl493456442 +# light/ @zsfelfoldi @rjl493456442 +# mobile/ @karalabe @ligi +# node/ @fjl @renaynay +# p2p/ @fjl @zsfelfoldi +# rpc/ @fjl @holiman +# p2p/simulations @fjl +# p2p/protocols @fjl +# p2p/testing @fjl +# signer/ @holiman diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index f87996cdcb94..a08542df2555 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -30,11 +30,11 @@ Please make sure your contributions adhere to our coding guidelines: Before you submit a feature request, please check and make sure that it isn't possible through some other means. The JavaScript-enabled console is a powerful feature in the right hands. Please check our -[Wiki page](https://github.com/ethereum/go-ethereum/wiki) for more info +[Geth documentation page](https://geth.ethereum.org/docs/) for more info and help. ## Configuration, dependencies, and tests -Please see the [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide) +Please see the [Developers' Guide](https://geth.ethereum.org/docs/developers/devguide) for more details on configuring your environment, managing project dependencies and testing procedures. diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE/bug.md similarity index 52% rename from .github/ISSUE_TEMPLATE.md rename to .github/ISSUE_TEMPLATE/bug.md index 4e638166ff71..2aa2c48a600b 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE/bug.md @@ -1,8 +1,10 @@ -Hi there, - -Please note that this is an issue tracker reserved for bug reports and feature requests. - -For general questions please use the gitter channel or the Ethereum stack exchange at https://ethereum.stackexchange.com. +--- +name: Report a bug +about: Something with go-ethereum is not working as expected +title: '' +labels: 'type:bug' +assignees: '' +--- #### System information @@ -24,3 +26,5 @@ Commit hash : (if `develop`) ```` [backtrace] ```` + +When submitting logs: please submit them as text and not screenshots. \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature.md b/.github/ISSUE_TEMPLATE/feature.md new file mode 100644 index 000000000000..aacd885f9e5e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature.md @@ -0,0 +1,17 @@ +--- +name: Request a feature +about: Report a missing feature - e.g. as a step before submitting a PR +title: '' +labels: 'type:feature' +assignees: '' +--- + +# Rationale + +Why should this feature exist? +What are the use-cases? + +# Implementation + +Do you have ideas regarding the implementation of this feature? +Are you willing to implement this feature? \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md new file mode 100644 index 000000000000..8f460ab558ec --- /dev/null +++ b/.github/ISSUE_TEMPLATE/question.md @@ -0,0 +1,9 @@ +--- +name: Ask a question +about: Something is unclear +title: '' +labels: 'type:docs' +assignees: '' +--- + +This should only be used in very rare cases e.g. if you are not 100% sure if something is a bug or asking a question that leads to improving the documentation. For general questions please use [discord](https://discord.gg/nthXNEv) or the Ethereum stack exchange at https://ethereum.stackexchange.com. diff --git a/.gitmodules b/.gitmodules index aad487369507..57093244b9c7 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,14 @@ [submodule "tests"] path = tests/testdata url = https://github.com/ethereum/tests + shallow = true +[submodule "evm-benchmarks"] + path = tests/evm-benchmarks + url = https://github.com/ipsilon/evm-benchmarks + shallow = true [submodule "rocksdb"] path = rocksdb url = https://github.com/facebook/rocksdb +[submodule "etcd"] + path = etcd + url = https://github.com/metadium/etcd diff --git a/.golangci.yml b/.golangci.yml index 24d00da6ec54..4950b98c21ba 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,7 +1,7 @@ # This file configures github.com/golangci/golangci-lint. run: - timeout: 2m + timeout: 20m tests: true # default is true. Enables skipping of directories: # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ diff --git a/.travis.yml b/.travis.yml index 416a83018d78..197d56748fc2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,12 +2,21 @@ language: go go_import_path: github.com/ethereum/go-ethereum sudo: false jobs: + allow_failures: + - stage: build + os: osx + go: 1.17.x + env: + - azure-osx + - azure-ios + - cocoapods-ios + include: - # This builder only tests code linters on latest version of Go + # This builder only tests code linters on latest version of Go - stage: lint os: linux - dist: xenial - go: 1.13.x + dist: bionic + go: 1.17.x env: - lint git: @@ -15,71 +24,51 @@ jobs: script: - go run build/ci.go lint + # These builders create the Docker sub-images for multi-arch push and each + # will attempt to push the multi-arch image if they are the last builder - stage: build - os: linux - dist: xenial - go: 1.11.x - env: - - GO111MODULE=on - script: - - go run build/ci.go install - - go run build/ci.go test -coverage $TEST_PACKAGES - - - stage: build - os: linux - dist: xenial - go: 1.12.x - env: - - GO111MODULE=on - script: - - go run build/ci.go install - - go run build/ci.go test -coverage $TEST_PACKAGES - - # These are the latest Go versions. - - stage: build + if: type = push os: linux arch: amd64 - dist: xenial - go: 1.13.x + dist: bionic + go: 1.17.x + env: + - docker + services: + - docker + git: + submodules: false # avoid cloning ethereum/tests + before_install: + - export DOCKER_CLI_EXPERIMENTAL=enabled script: - - go run build/ci.go install - - go run build/ci.go test -coverage $TEST_PACKAGES + - go run build/ci.go docker -image -manifest amd64,arm64 -upload ethereum/client-go - stage: build - if: type = pull_request + if: type = push os: linux arch: arm64 - dist: xenial - go: 1.13.x - script: - - go run build/ci.go install - - go run build/ci.go test -coverage $TEST_PACKAGES - - - stage: build - os: osx - osx_image: xcode11.3 - go: 1.13.x + dist: bionic + go: 1.17.x + env: + - docker + services: + - docker + git: + submodules: false # avoid cloning ethereum/tests + before_install: + - export DOCKER_CLI_EXPERIMENTAL=enabled script: - - echo "Increase the maximum number of open file descriptors on macOS" - - NOFILE=20480 - - sudo sysctl -w kern.maxfiles=$NOFILE - - sudo sysctl -w kern.maxfilesperproc=$NOFILE - - sudo launchctl limit maxfiles $NOFILE $NOFILE - - sudo launchctl limit maxfiles - - ulimit -S -n $NOFILE - - ulimit -n - - unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703 - - go run build/ci.go install - - go run build/ci.go test -coverage $TEST_PACKAGES + - go run build/ci.go docker -image -manifest amd64,arm64 -upload ethereum/client-go # This builder does the Ubuntu PPA upload - stage: build if: type = push os: linux - dist: xenial - go: 1.13.x + dist: bionic + go: 1.17.x env: - ubuntu-ppa + - GO111MODULE=on git: submodules: false # avoid cloning ethereum/tests addons: @@ -93,17 +82,18 @@ jobs: - python-paramiko script: - echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts - - go run build/ci.go debsrc -goversion 1.13.6 -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder " + - go run build/ci.go debsrc -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder " # This builder does the Linux Azure uploads - stage: build if: type = push os: linux - dist: xenial + dist: bionic sudo: required - go: 1.13.x + go: 1.17.x env: - azure-linux + - GO111MODULE=on git: submodules: false # avoid cloning ethereum/tests addons: @@ -112,105 +102,77 @@ jobs: - gcc-multilib script: # Build for the primary platforms that Trusty can manage - - go run build/ci.go install - - go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - go run build/ci.go install -arch 386 - - go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go install -dlgo + - go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds + - go run build/ci.go install -dlgo -arch 386 + - go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds # Switch over GCC to cross compilation (breaks 386, hence why do it here only) - sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-arm-linux-gnueabihf libc6-dev-armhf-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross - sudo ln -s /usr/include/asm-generic /usr/include/asm - - GOARM=5 go run build/ci.go install -arch arm -cc arm-linux-gnueabi-gcc - - GOARM=5 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - GOARM=6 go run build/ci.go install -arch arm -cc arm-linux-gnueabi-gcc - - GOARM=6 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - GOARM=7 go run build/ci.go install -arch arm -cc arm-linux-gnueabihf-gcc - - GOARM=7 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - go run build/ci.go install -arch arm64 -cc aarch64-linux-gnu-gcc - - go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - # This builder does the Linux Azure MIPS xgo uploads - - stage: build - if: type = push - os: linux - dist: xenial - services: - - docker - go: 1.13.x - env: - - azure-linux-mips - git: - submodules: false # avoid cloning ethereum/tests - script: - - go run build/ci.go xgo --alltools -- --targets=linux/mips --ldflags '-extldflags "-static"' -v - - for bin in build/bin/*-linux-mips; do mv -f "${bin}" "${bin/-linux-mips/}"; done - - go run build/ci.go archive -arch mips -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - - go run build/ci.go xgo --alltools -- --targets=linux/mipsle --ldflags '-extldflags "-static"' -v - - for bin in build/bin/*-linux-mipsle; do mv -f "${bin}" "${bin/-linux-mipsle/}"; done - - go run build/ci.go archive -arch mipsle -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - - go run build/ci.go xgo --alltools -- --targets=linux/mips64 --ldflags '-extldflags "-static"' -v - - for bin in build/bin/*-linux-mips64; do mv -f "${bin}" "${bin/-linux-mips64/}"; done - - go run build/ci.go archive -arch mips64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - - go run build/ci.go xgo --alltools -- --targets=linux/mips64le --ldflags '-extldflags "-static"' -v - - for bin in build/bin/*-linux-mips64le; do mv -f "${bin}" "${bin/-linux-mips64le/}"; done - - go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - GOARM=5 go run build/ci.go install -dlgo -arch arm -cc arm-linux-gnueabi-gcc + - GOARM=5 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds + - GOARM=6 go run build/ci.go install -dlgo -arch arm -cc arm-linux-gnueabi-gcc + - GOARM=6 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds + - GOARM=7 go run build/ci.go install -dlgo -arch arm -cc arm-linux-gnueabihf-gcc + - GOARM=7 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds + - go run build/ci.go install -dlgo -arch arm64 -cc aarch64-linux-gnu-gcc + - go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds # This builder does the Android Maven and Azure uploads - stage: build if: type = push os: linux - dist: xenial + dist: bionic addons: apt: packages: - - oracle-java8-installer - - oracle-java8-set-default - language: android - android: - components: - - platform-tools - - tools - - android-15 - - android-19 - - android-24 + - openjdk-8-jdk env: - azure-android - maven-android + - GO111MODULE=on git: submodules: false # avoid cloning ethereum/tests before_install: - - curl https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz | tar -xz + # Install Android and it's dependencies manually, Travis is stale + - export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 + - curl https://dl.google.com/android/repository/commandlinetools-linux-6858069_latest.zip -o android.zip + - unzip -q android.zip -d $HOME/sdk && rm android.zip + - mv $HOME/sdk/cmdline-tools $HOME/sdk/latest && mkdir $HOME/sdk/cmdline-tools && mv $HOME/sdk/latest $HOME/sdk/cmdline-tools + - export PATH=$PATH:$HOME/sdk/cmdline-tools/latest/bin + - export ANDROID_HOME=$HOME/sdk + + - yes | sdkmanager --licenses >/dev/null + - sdkmanager "platform-tools" "platforms;android-15" "platforms;android-19" "platforms;android-24" "ndk-bundle" + + # Install Go to allow building with + - curl https://dl.google.com/go/go1.16.linux-amd64.tar.gz | tar -xz - export PATH=`pwd`/go/bin:$PATH - export GOROOT=`pwd`/go - export GOPATH=$HOME/go script: # Build the Android archive and upload it to Maven Central and Azure - - curl https://dl.google.com/android/repository/android-ndk-r19b-linux-x86_64.zip -o android-ndk-r19b.zip - - unzip -q android-ndk-r19b.zip && rm android-ndk-r19b.zip - - mv android-ndk-r19b $ANDROID_HOME/ndk-bundle - - mkdir -p $GOPATH/src/github.com/ethereum - ln -s `pwd` $GOPATH/src/github.com/ethereum/go-ethereum - - go run build/ci.go aar -signer ANDROID_SIGNING_KEY -deploy https://oss.sonatype.org -upload gethstore/builds + - go run build/ci.go aar -signer ANDROID_SIGNING_KEY -signify SIGNIFY_KEY -deploy https://oss.sonatype.org -upload gethstore/builds # This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads - stage: build if: type = push os: osx - go: 1.13.x + go: 1.17.x env: - azure-osx - azure-ios - cocoapods-ios + - GO111MODULE=on git: submodules: false # avoid cloning ethereum/tests script: - - go run build/ci.go install - - go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go install -dlgo + - go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds # Build the iOS framework and upload it to CocoaPods and Azure - gem uninstall cocoapods -a -x @@ -225,17 +187,61 @@ jobs: # Workaround for https://github.com/golang/go/issues/23749 - export CGO_CFLAGS_ALLOW='-fmodules|-fblocks|-fobjc-arc' - - go run build/ci.go xcode -signer IOS_SIGNING_KEY -deploy trunk -upload gethstore/builds + - go run build/ci.go xcode -signer IOS_SIGNING_KEY -signify SIGNIFY_KEY -deploy trunk -upload gethstore/builds + + # These builders run the tests + - stage: build + os: linux + arch: amd64 + dist: bionic + go: 1.17.x + env: + - GO111MODULE=on + script: + - go run build/ci.go test -coverage $TEST_PACKAGES + + - stage: build + if: type = pull_request + os: linux + arch: arm64 + dist: bionic + go: 1.17.x + env: + - GO111MODULE=on + script: + - go run build/ci.go test -coverage $TEST_PACKAGES + + - stage: build + os: linux + dist: bionic + go: 1.16.x + env: + - GO111MODULE=on + script: + - go run build/ci.go test -coverage $TEST_PACKAGES # This builder does the Azure archive purges to avoid accumulating junk - stage: build if: type = cron os: linux - dist: xenial - go: 1.13.x + dist: bionic + go: 1.17.x env: - azure-purge + - GO111MODULE=on git: submodules: false # avoid cloning ethereum/tests script: - go run build/ci.go purge -store gethstore/builds -days 14 + + # This builder executes race tests + - stage: build + if: type = cron + os: linux + dist: bionic + go: 1.17.x + env: + - GO111MODULE=on + script: + - go run build/ci.go test -race -coverage $TEST_PACKAGES + diff --git a/COPYING b/COPYING index 8d66e8772370..f288702d2fa1 100644 --- a/COPYING +++ b/COPYING @@ -1,7 +1,7 @@ GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 - Copyright (C) 2014 The go-ethereum Authors. + Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. @@ -616,4 +616,59 @@ above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. \ No newline at end of file +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/Dockerfile b/Dockerfile index 114e7620581e..7badbc1320a4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,10 +1,15 @@ +# Support setting various labels on the final image +ARG COMMIT="" +ARG VERSION="" +ARG BUILDNUM="" + # Build Geth in a stock Go builder container -FROM golang:1.13-alpine as builder +FROM golang:1.17-alpine as builder -RUN apk add --no-cache make gcc musl-dev linux-headers git +RUN apk add --no-cache gcc musl-dev linux-headers git ADD . /go-ethereum -RUN cd /go-ethereum && make geth +RUN cd /go-ethereum && go run build/ci.go install ./cmd/geth # Pull Geth into a second stage deploy alpine container FROM alpine:latest @@ -12,5 +17,12 @@ FROM alpine:latest RUN apk add --no-cache ca-certificates COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/ -EXPOSE 8545 8546 8547 30303 30303/udp +EXPOSE 8545 8546 30303 30303/udp ENTRYPOINT ["geth"] + +# Add some metadata labels to help programatic image consumption +ARG COMMIT="" +ARG VERSION="" +ARG BUILDNUM="" + +LABEL commit="$COMMIT" version="$VERSION" buildnum="$BUILDNUM" diff --git a/Dockerfile.alltools b/Dockerfile.alltools index 2f661ba01c6f..3ae5377e4f36 100644 --- a/Dockerfile.alltools +++ b/Dockerfile.alltools @@ -1,10 +1,15 @@ +# Support setting various labels on the final image +ARG COMMIT="" +ARG VERSION="" +ARG BUILDNUM="" + # Build Geth in a stock Go builder container -FROM golang:1.13-alpine as builder +FROM golang:1.17-alpine as builder -RUN apk add --no-cache make gcc musl-dev linux-headers git +RUN apk add --no-cache gcc musl-dev linux-headers git ADD . /go-ethereum -RUN cd /go-ethereum && make all +RUN cd /go-ethereum && go run build/ci.go install # Pull all binaries into a second stage deploy alpine container FROM alpine:latest @@ -12,4 +17,11 @@ FROM alpine:latest RUN apk add --no-cache ca-certificates COPY --from=builder /go-ethereum/build/bin/* /usr/local/bin/ -EXPOSE 8545 8546 8547 30303 30303/udp +EXPOSE 8545 8546 30303 30303/udp + +# Add some metadata labels to help programatic image consumption +ARG COMMIT="" +ARG VERSION="" +ARG BUILDNUM="" + +LABEL commit="$COMMIT" version="$VERSION" buildnum="$BUILDNUM" diff --git a/Dockerfile.metadium b/Dockerfile.metadium new file mode 100644 index 000000000000..e1a50d272c38 --- /dev/null +++ b/Dockerfile.metadium @@ -0,0 +1,22 @@ +# builder image + +FROM ubuntu:bionic as base + +SHELL ["/bin/bash", "-c"] + +RUN apt-get update -q -y && apt-get upgrade -q -y +RUN DEBIAN_FRONTEND=noninteractive apt-get install -y tzdata +RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends build-essential ca-certificates curl libjemalloc-dev liblz4-dev libsnappy-dev libzstd-dev libudev-dev git + +# golang +RUN curl -sL -o /tmp/go.tar.gz https://dl.google.com/go/$(curl -sL https://golang.org/VERSION?m=text).linux-amd64.tar.gz && \ + pushd /usr/local/ && \ + tar xfz /tmp/go.tar.gz && \ + cd /usr/local/bin/ && \ + ln -sf ../go/bin/* . && \ + popd && \ + rm /tmp/go.tar.gz + +RUN apt autoremove && apt autoclean + +# EOF diff --git a/Makefile b/Makefile index 17168f1cb99b..6415aee0e6dc 100644 --- a/Makefile +++ b/Makefile @@ -2,11 +2,7 @@ # with Go source code. If you know what GOPATH is then you probably # don't need to bother with make. -.PHONY: geth android ios geth-cross evm all test clean rocksdb -.PHONY: geth-linux geth-linux-386 geth-linux-amd64 geth-linux-mips64 geth-linux-mips64le -.PHONY: geth-linux-arm geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64 -.PHONY: geth-darwin geth-darwin-386 geth-darwin-amd64 -.PHONY: geth-windows geth-windows-386 geth-windows-amd64 +.PHONY: geth android ios evm all test clean rocksdb etcd .PHONY: gmet-linux GOBIN = ./build/bin @@ -33,7 +29,7 @@ ROCKSDB_DIR=$(shell pwd)/rocksdb ROCKSDB_TAG=-tags rocksdb endif -metadium: gmet logrot +metadium: etcd gmet logrot @[ -d build/conf ] || mkdir -p build/conf @cp -p metadium/scripts/gmet.sh metadium/scripts/solc.sh build/bin/ @cp -p metadium/scripts/config.json.example \ @@ -44,7 +40,7 @@ metadium: gmet logrot @(cd build; tar cfz metadium.tar.gz bin conf) @echo "Done building build/metadium.tar.gz" -gmet: rocksdb metadium/governance_abi.go +gmet: etcd rocksdb metadium/governance_abi.go ifeq ($(USE_ROCKSDB), NO) $(GORUN) build/ci.go install $(ROCKSDB_TAG) ./cmd/gmet else @@ -63,8 +59,14 @@ geth: @echo "Done building." @echo "Run \"$(GOBIN)/geth\" to launch geth." -dbbench: +dbbench: rocksdb +ifeq ($(USE_ROCKSDB), NO) $(GORUN) build/ci.go install $(ROCKSDB_TAG) ./cmd/dbbench +else + CGO_CFLAGS=-I$(ROCKSDB_DIR)/include \ + CGO_LDFLAGS="-L$(ROCKSDB_DIR) -lrocksdb -lm -lstdc++ $(shell awk '/PLATFORM_LDFLAGS/ {sub("PLATFORM_LDFLAGS=", ""); print} /JEMALLOC=1/ {print "-ljemalloc"}' < $(ROCKSDB_DIR)/make_config.mk)" \ + $(GORUN) build/ci.go install $(ROCKSDB_TAG) ./cmd/dbbench +endif all: $(GORUN) build/ci.go install @@ -73,6 +75,8 @@ android: $(GORUN) build/ci.go aar --local @echo "Done building." @echo "Import \"$(GOBIN)/geth.aar\" to use the library." + @echo "Import \"$(GOBIN)/geth-sources.jar\" to add javadocs" + @echo "For more info see https://stackoverflow.com/questions/20994336/android-studio-how-to-attach-javadoc" ios: $(GORUN) build/ci.go xcode --local @@ -82,7 +86,7 @@ ios: test: all $(GORUN) build/ci.go test -lint: ## Run linters. +lint: metadium/governance_abi.go ## Run linters. $(GORUN) build/ci.go lint clean: @@ -98,141 +102,39 @@ clean: # You need to put $GOBIN (or $GOPATH/bin) in your PATH to use 'go generate'. devtools: - env GOBIN= go get -u golang.org/x/tools/cmd/stringer - env GOBIN= go get -u github.com/kevinburke/go-bindata/go-bindata - env GOBIN= go get -u github.com/fjl/gencodec - env GOBIN= go get -u github.com/golang/protobuf/protoc-gen-go + env GOBIN= go install golang.org/x/tools/cmd/stringer@latest + env GOBIN= go install github.com/kevinburke/go-bindata/go-bindata@latest + env GOBIN= go install github.com/fjl/gencodec@latest + env GOBIN= go install github.com/golang/protobuf/protoc-gen-go@latest env GOBIN= go install ./cmd/abigen - @type "npm" 2> /dev/null || echo 'Please install node.js and npm' @type "solc" 2> /dev/null || echo 'Please install solc' @type "protoc" 2> /dev/null || echo 'Please install protoc' -# Cross Compilation Targets (xgo) - -geth-cross: geth-linux geth-darwin geth-windows geth-android geth-ios - @echo "Full cross compilation done:" - @ls -ld $(GOBIN)/geth-* - -geth-linux: geth-linux-386 geth-linux-amd64 geth-linux-arm geth-linux-mips64 geth-linux-mips64le - @echo "Linux cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* - -geth-linux-386: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/386 -v ./cmd/geth - @echo "Linux 386 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep 386 - -geth-linux-amd64: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/amd64 -v ./cmd/geth - @echo "Linux amd64 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep amd64 - -geth-linux-arm: geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64 - @echo "Linux ARM cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep arm - -geth-linux-arm-5: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-5 -v ./cmd/geth - @echo "Linux ARMv5 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep arm-5 - -geth-linux-arm-6: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-6 -v ./cmd/geth - @echo "Linux ARMv6 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep arm-6 - -geth-linux-arm-7: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-7 -v ./cmd/geth - @echo "Linux ARMv7 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep arm-7 - -geth-linux-arm64: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm64 -v ./cmd/geth - @echo "Linux ARM64 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep arm64 - -geth-linux-mips: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips --ldflags '-extldflags "-static"' -v ./cmd/geth - @echo "Linux MIPS cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep mips - -geth-linux-mipsle: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mipsle --ldflags '-extldflags "-static"' -v ./cmd/geth - @echo "Linux MIPSle cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep mipsle - -geth-linux-mips64: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64 --ldflags '-extldflags "-static"' -v ./cmd/geth - @echo "Linux MIPS64 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep mips64 - -geth-linux-mips64le: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64le --ldflags '-extldflags "-static"' -v ./cmd/geth - @echo "Linux MIPS64le cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep mips64le - -geth-darwin: geth-darwin-386 geth-darwin-amd64 - @echo "Darwin cross compilation done:" - @ls -ld $(GOBIN)/geth-darwin-* - -geth-darwin-386: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/386 -v ./cmd/geth - @echo "Darwin 386 cross compilation done:" - @ls -ld $(GOBIN)/geth-darwin-* | grep 386 - -geth-darwin-amd64: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/amd64 -v ./cmd/geth - @echo "Darwin amd64 cross compilation done:" - @ls -ld $(GOBIN)/geth-darwin-* | grep amd64 - -geth-windows: geth-windows-386 geth-windows-amd64 - @echo "Windows cross compilation done:" - @ls -ld $(GOBIN)/geth-windows-* - -geth-windows-386: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/386 -v ./cmd/geth - @echo "Windows 386 cross compilation done:" - @ls -ld $(GOBIN)/geth-windows-* | grep 386 - -geth-windows-amd64: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/amd64 -v ./cmd/geth - @echo "Windows amd64 cross compilation done:" - @ls -ld $(GOBIN)/geth-windows-* | grep amd64 - -gmet-linux: -ifeq ($(shell uname), Linux) - @docker --version > /dev/null 2>&1; \ - if [ ! $$? = 0 ]; then \ - echo "Docker not found."; \ - else \ - docker run -e HOME=/tmp --rm \ - -v /etc/passwd:/etc/passwd:ro \ - -v /etc/group:/etc/group:ro \ - -v ~/src:/home/$${USER}/src \ - -v $(shell pwd):/data -u $$(id -u):$$(id -g) \ - -w /data metadium/bobthe:latest \ - make USE_ROCKSDB=$(USE_ROCKSDB); \ - fi -else +gmet-linux: etcd @docker --version > /dev/null 2>&1; \ if [ ! $$? = 0 ]; then \ echo "Docker not found."; \ else \ + docker build -t meta/builder:local \ + -f Dockerfile.metadium . && \ docker run -e HOME=/tmp --rm -v $(shell pwd):/data \ - -w /data metadium/bobthe:latest \ + -w /data meta/builder:local \ make USE_ROCKSDB=$(USE_ROCKSDB); \ fi -endif ifneq ($(USE_ROCKSDB), YES) rocksdb: else rocksdb: - @[ ! -e rocksdb/.git ] && git submodule init rocksdb; \ - git submodule update rocksdb && \ + @[ ! -e rocksdb/.git ] && git submodule update --init rocksdb; \ cd $(ROCKSDB_DIR) && make -j8 static_lib; endif +etcd: + @if [ ! -e etcd/.git ]; then \ + git submodule update --init etcd; \ + fi + AWK_CODE=' \ BEGIN { print "package metadium"; bin = 0; name = ""; abi = ""; } \ /^{/ { bin = 1; abi = ""; name = ""; } \ @@ -253,7 +155,7 @@ metadium/admin_abi.go: metadium/contracts/MetadiumAdmin-template.sol build/bin/s rm -f /tmp/junk.$$$$; AWK_CODE_2=' \ -BEGIN { print "package metadium"; } \ +BEGIN { print "package metadium\n"; } \ /^var Registry_contract/ { \ sub("^var[^(]*\\(","",$$0); sub("\\);$$","",$$0); \ n = "Registry"; \ diff --git a/README.md b/README.md index 35b18420b9da..57e10e7dbc27 100644 --- a/README.md +++ b/README.md @@ -174,9 +174,9 @@ Official Golang implementation of the Ethereum protocol. [![API Reference]( https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667 -)](https://godoc.org/github.com/ethereum/go-ethereum) +)](https://pkg.go.dev/github.com/ethereum/go-ethereum?tab=doc) [![Go Report Card](https://goreportcard.com/badge/github.com/ethereum/go-ethereum)](https://goreportcard.com/report/github.com/ethereum/go-ethereum) -[![Travis](https://travis-ci.org/ethereum/go-ethereum.svg?branch=master)](https://travis-ci.org/ethereum/go-ethereum) +[![Travis](https://travis-ci.com/ethereum/go-ethereum.svg?branch=master)](https://travis-ci.com/ethereum/go-ethereum) [![Discord](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/nthXNEv) Automated builds are available for stable releases and the unstable master branch. Binary @@ -184,9 +184,9 @@ archives are published at https://geth.ethereum.org/downloads/. ## Building the source -For prerequisites and detailed build instructions please read the [Installation Instructions](https://github.com/ethereum/go-ethereum/wiki/Building-Ethereum) on the wiki. +For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/install-and-build/installing-geth). -Building `geth` requires both a Go (version 1.10 or later) and a C compiler. You can install +Building `geth` requires both a Go (version 1.14 or later) and a C compiler. You can install them using your favourite package manager. Once the dependencies are installed, run ```shell @@ -206,18 +206,19 @@ directory. | Command | Description | | :-----------: | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. | -| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. | +| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI page](https://geth.ethereum.org/docs/interface/command-line-options) for command line options. | +| `clef` | Stand-alone signing tool, which can be used as a backend signer for `geth`. | +| `devp2p` | Utilities to interact with nodes on the networking layer, without running a full blockchain. | +| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/dapp/native-bindings) page for details. | | `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. | -| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). | -| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. | -| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). | +| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). | +| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://eth.wiki/en/fundamentals/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). | | `puppeth` | a CLI wizard that aids in creating a new Ethereum network. | ## Running `geth` Going through all the possible command line flags is out of scope here (please consult our -[CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options)), +[CLI Wiki page](https://geth.ethereum.org/docs/interface/command-line-options)), but we've enumerated a few common parameter combos to get you up to speed quickly on how you can run your own `geth` instance. @@ -226,23 +227,24 @@ on how you can run your own `geth` instance. By far the most common scenario is people wanting to simply interact with the Ethereum network: create accounts; transfer funds; deploy and interact with contracts. For this particular use-case the user doesn't care about years-old historical data, so we can -fast-sync quickly to the current state of the network. To do so: +sync quickly to the current state of the network. To do so: ```shell $ geth console ``` This command will: - * Start `geth` in fast sync mode (default, can be changed with the `--syncmode` flag), + * Start `geth` in snap sync mode (default, can be changed with the `--syncmode` flag), causing it to download more data in exchange for avoiding processing the entire history of the Ethereum network, which is very CPU intensive. - * Start up `geth`'s built-in interactive [JavaScript console](https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console), - (via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API) - as well as `geth`'s own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs). + * Start up `geth`'s built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interface/javascript-console), + (via the trailing `console` subcommand) through which you can interact using [`web3` methods](https://github.com/ChainSafe/web3.js/blob/0.20.7/DOCUMENTATION.md) + (note: the `web3` version bundled within `geth` is very old, and not up to date with official docs), + as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/rpc/server). This tool is optional and if you leave it out you can always attach to an already running `geth` instance with `geth attach`. -### A Full node on the Ethereum test network +### A Full node on the Görli test network Transitioning towards developers, if you'd like to play around with creating Ethereum contracts, you almost certainly would like to do that without any real money involved until @@ -251,23 +253,24 @@ network, you want to join the **test** network with your node, which is fully eq the main network, but with play-Ether only. ```shell -$ geth --testnet console +$ geth --goerli console ``` The `console` subcommand has the exact same meaning as above and they are equally -useful on the testnet too. Please see above for their explanations if you've skipped here. +useful on the testnet too. Please, see above for their explanations if you've skipped here. -Specifying the `--testnet` flag, however, will reconfigure your `geth` instance a bit: +Specifying the `--goerli` flag, however, will reconfigure your `geth` instance a bit: + * Instead of connecting the main Ethereum network, the client will connect to the Görli + test network, which uses different P2P bootnodes, different network IDs and genesis + states. * Instead of using the default data directory (`~/.ethereum` on Linux for example), `geth` - will nest itself one level deeper into a `testnet` subfolder (`~/.ethereum/testnet` on + will nest itself one level deeper into a `goerli` subfolder (`~/.ethereum/goerli` on Linux). Note, on OSX and Linux this also means that attaching to a running testnet node requires the use of a custom endpoint since `geth attach` will try to attach to a - production node endpoint by default. E.g. - `geth attach /testnet/geth.ipc`. Windows users are not affected by + production node endpoint by default, e.g., + `geth attach /goerli/geth.ipc`. Windows users are not affected by this. - * Instead of connecting the main Ethereum network, the client will connect to the test - network, which uses different P2P bootnodes, different network IDs and genesis states. *Note: Although there are some internal protective measures to prevent transactions from crossing over between the main network and test network, you should make sure to always @@ -277,17 +280,26 @@ accounts available between them.* ### Full node on the Rinkeby test network -The above test network is a cross-client one based on the ethash proof-of-work consensus -algorithm. As such, it has certain extra overhead and is more susceptible to reorganization -attacks due to the network's low difficulty/security. Go Ethereum also supports connecting -to a proof-of-authority based test network called [*Rinkeby*](https://www.rinkeby.io) -(operated by members of the community). This network is lighter, more secure, but is only -supported by go-ethereum. +Go Ethereum also supports connecting to the older proof-of-authority based test network +called [*Rinkeby*](https://www.rinkeby.io) which is operated by members of the community. ```shell $ geth --rinkeby console ``` +### Full node on the Ropsten test network + +In addition to Görli and Rinkeby, Geth also supports the ancient Ropsten testnet. The +Ropsten test network is based on the Ethash proof-of-work consensus algorithm. As such, +it has certain extra overhead and is more susceptible to reorganization attacks due to the +network's low difficulty/security. + +```shell +$ geth --ropsten console +``` + +*Note: Older Geth configurations store the Ropsten database in the `testnet` subdirectory.* + ### Configuration As an alternative to passing the numerous flags to the `geth` binary, you can also pass a @@ -317,21 +329,21 @@ docker run -d --name ethereum-node -v /Users/alice/ethereum:/root \ ethereum/client-go ``` -This will start `geth` in fast-sync mode with a DB memory allowance of 1GB just as the +This will start `geth` in snap-sync mode with a DB memory allowance of 1GB just as the above command does. It will also create a persistent volume in your home directory for saving your blockchain as well as map the default ports. There is also an `alpine` tag available for a slim version of the image. -Do not forget `--rpcaddr 0.0.0.0`, if you want to access RPC from other containers -and/or hosts. By default, `geth` binds to the local interface and RPC endpoints is not +Do not forget `--http.addr 0.0.0.0`, if you want to access RPC from other containers +and/or hosts. By default, `geth` binds to the local interface and RPC endpoints are not accessible from the outside. ### Programmatically interfacing `geth` nodes As a developer, sooner rather than later you'll want to start interacting with `geth` and the Ethereum network via your own programs and not manually through the console. To aid -this, `geth` has built-in support for a JSON-RPC based APIs ([standard APIs](https://github.com/ethereum/wiki/wiki/JSON-RPC) -and [`geth` specific APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs)). +this, `geth` has built-in support for a JSON-RPC based APIs ([standard APIs](https://eth.wiki/json-rpc/API) +and [`geth` specific APIs](https://geth.ethereum.org/docs/rpc/server)). These can be exposed via HTTP, WebSockets and IPC (UNIX sockets on UNIX based platforms, and named pipes on Windows). @@ -342,16 +354,16 @@ you'd expect. HTTP based JSON-RPC API options: - * `--rpc` Enable the HTTP-RPC server - * `--rpcaddr` HTTP-RPC server listening interface (default: `localhost`) - * `--rpcport` HTTP-RPC server listening port (default: `8545`) - * `--rpcapi` API's offered over the HTTP-RPC interface (default: `eth,net,web3`) - * `--rpccorsdomain` Comma separated list of domains from which to accept cross origin requests (browser enforced) + * `--http` Enable the HTTP-RPC server + * `--http.addr` HTTP-RPC server listening interface (default: `localhost`) + * `--http.port` HTTP-RPC server listening port (default: `8545`) + * `--http.api` API's offered over the HTTP-RPC interface (default: `eth,net,web3`) + * `--http.corsdomain` Comma separated list of domains from which to accept cross origin requests (browser enforced) * `--ws` Enable the WS-RPC server - * `--wsaddr` WS-RPC server listening interface (default: `localhost`) - * `--wsport` WS-RPC server listening port (default: `8546`) - * `--wsapi` API's offered over the WS-RPC interface (default: `eth,net,web3`) - * `--wsorigins` Origins from which to accept websockets requests + * `--ws.addr` WS-RPC server listening interface (default: `localhost`) + * `--ws.port` WS-RPC server listening port (default: `8546`) + * `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`) + * `--ws.origins` Origins from which to accept websockets requests * `--ipcdisable` Disable the IPC-RPC server * `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,shh,txpool,web3`) * `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it) @@ -387,7 +399,10 @@ aware of and agree upon. This consists of a small JSON file (e.g. call it `genes "eip158Block": 0, "byzantiumBlock": 0, "constantinopleBlock": 0, - "petersburgBlock": 0 + "petersburgBlock": 0, + "istanbulBlock": 0, + "berlinBlock": 0, + "londonBlock": 0 }, "alloc": {}, "coinbase": "0x0000000000000000000000000000000000000000", @@ -436,7 +451,7 @@ $ bootnode --genkey=boot.key $ bootnode --nodekey=boot.key ``` -With the bootnode online, it will display an [`enode` URL](https://github.com/ethereum/wiki/wiki/enode-url-format) +With the bootnode online, it will display an [`enode` URL](https://eth.wiki/en/fundamentals/enode-url-format) that other nodes can use to connect to it and exchange peer information. Make sure to replace the displayed IP address information (most probably `[::]`) with your externally accessible IP to get the actual `enode` URL. @@ -473,13 +488,13 @@ ones either). To start a `geth` instance for mining, run it with all your usual by: ```shell -$ geth --mine --miner.threads=1 --etherbase=0x0000000000000000000000000000000000000000 +$ geth --mine --miner.threads=1 --miner.etherbase=0x0000000000000000000000000000000000000000 ``` Which will start mining blocks and transactions on a single CPU thread, crediting all -proceedings to the account specified by `--etherbase`. You can further tune the mining -by changing the default gas limit blocks converge to (`--targetgaslimit`) and the price -transactions are accepted at (`--gasprice`). +proceedings to the account specified by `--miner.etherbase`. You can further tune the mining +by changing the default gas limit blocks converge to (`--miner.targetgaslimit`) and the price +transactions are accepted at (`--miner.gasprice`). ## Contribution @@ -488,7 +503,7 @@ from anyone on the internet, and are grateful for even the smallest of fixes! If you'd like to contribute to go-ethereum, please fork, fix, commit and send a pull request for the maintainers to review and merge into the main code base. If you wish to submit -more complex changes though, please check up with the core devs first on [our gitter channel](https://gitter.im/ethereum/go-ethereum) +more complex changes though, please check up with the core devs first on [our Discord Server](https://discord.gg/invite/nthXNEv) to ensure those changes are in line with the general philosophy of the project and/or get some early feedback which can make both your efforts much lighter as well as our review and merge procedures quick and simple. @@ -503,7 +518,7 @@ Please make sure your contributions adhere to our coding guidelines: * Commit messages should be prefixed with the package(s) they modify. * E.g. "eth, rpc: make trace configs optional" -Please see the [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide) +Please see the [Developers' Guide](https://geth.ethereum.org/docs/developers/devguide) for more details on configuring your environment, managing project dependencies, and testing procedures. diff --git a/SECURITY.md b/SECURITY.md index bc54ede42fac..41b900d5e984 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,119 +2,174 @@ ## Supported Versions -Please see Releases. We recommend to use the most recent released version. +Please see [Releases](https://github.com/ethereum/go-ethereum/releases). We recommend using the [most recently released version](https://github.com/ethereum/go-ethereum/releases/latest). ## Audit reports Audit reports are published in the `docs` folder: https://github.com/ethereum/go-ethereum/tree/master/docs/audits - | Scope | Date | Report Link | | ------- | ------- | ----------- | | `geth` | 20170425 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2017-04-25_Geth-audit_Truesec.pdf) | | `clef` | 20180914 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2018-09-14_Clef-audit_NCC.pdf) | - - +| `Discv5` | 20191015 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2019-10-15_Discv5_audit_LeastAuthority.pdf) | +| `Discv5` | 20200124 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2020-01-24_DiscV5_audit_Cure53.pdf) | ## Reporting a Vulnerability **Please do not file a public ticket** mentioning the vulnerability. -To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. +To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. Please read the [disclosure page](https://github.com/ethereum/go-ethereum/security/advisories?state=published) for more information about publicly disclosed security vulnerabilities. + +Use the built-in `geth version-check` feature to check whether the software is affected by any known vulnerability. This command will fetch the latest [`vulnerabilities.json`](https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json) file which contains known security vulnerabilities concerning `geth`, and cross-check the data against its own version number. The following key may be used to communicate sensitive information to developers. Fingerprint: `AE96 ED96 9E47 9B00 84F3 E17F E88D 3334 FA5F 6A0A` - ``` -----BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1 - -mQINBFgl3tgBEAC8A1tUBkD9YV+eLrOmtgy+/JS/H9RoZvkg3K1WZ8IYfj6iIRaY -neAk3Bp182GUPVz/zhKr2g0tMXIScDR3EnaDsY+Qg+JqQl8NOG+Cikr1nnkG2on9 -L8c8yiqry1ZTCmYMqCa2acTFqnyuXJ482aZNtB4QG2BpzfhW4k8YThpegk/EoRUi -m+y7buJDtoNf7YILlhDQXN8qlHB02DWOVUihph9tUIFsPK6BvTr9SIr/eG6j6k0b -fUo9pexOn7LS4SojoJmsm/5dp6AoKlac48cZU5zwR9AYcq/nvkrfmf2WkObg/xRd -EvKZzn05jRopmAIwmoC3CiLmqCHPmT5a29vEob/yPFE335k+ujjZCPOu7OwjzDk7 -M0zMSfnNfDq8bXh16nn+ueBxJ0NzgD1oC6c2PhM+XRQCXChoyI8vbfp4dGvCvYqv -QAE1bWjqnumZ/7vUPgZN6gDfiAzG2mUxC2SeFBhacgzDvtQls+uuvm+FnQOUgg2H -h8x2zgoZ7kqV29wjaUPFREuew7e+Th5BxielnzOfVycVXeSuvvIn6cd3g/s8mX1c -2kLSXJR7+KdWDrIrR5Az0kwAqFZt6B6QTlDrPswu3mxsm5TzMbny0PsbL/HBM+GZ -EZCjMXxB8bqV2eSaktjnSlUNX1VXxyOxXA+ZG2jwpr51egi57riVRXokrQARAQAB -tDlFdGhlcmV1bSBGb3VuZGF0aW9uIFNlY3VyaXR5IFRlYW0gPHNlY3VyaXR5QGV0 -aGVyZXVtLm9yZz6JAj4EEwECACgCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheA -BQJaCWH6BQkFo2BYAAoJEOiNMzT6X2oK+DEP/3H6dxkm0hvHZKoHLVuuxcu3EHYo -k5sd3MMWPrZSN8qzZnY7ayEDMxnarWOizc+2jfOxfJlzX/g8lR1/fsHdWPFPhPoV -Qk8ygrHn1H8U8+rpw/U03BqmqHpYCDzJ+CIis9UWROniqXw1nuqu/FtWOsdWxNKh -jUo6k/0EsaXsxRPzgJv7fEUcVcQ7as/C3x9sy3muc2gvgA4/BKoGPb1/U0GuA8lV -fDIDshAggmnSUAg+TuYSAAdoFQ1sKwFMPigcLJF2eyKuK3iUyixJrec/c4LSf3wA -cGghbeuqI8INP0Y2zvXDQN2cByxsFAuoZG+m0cyKGaDH2MVUvOKKYqn/03qvrf15 -AWAsW0l0yQwOTCo3FbsNzemClm5Bj/xH0E4XuwXwChcMCMOWJrFoxyvCEI+keoQc -c08/a8/MtS7vBAABXwOziSmm6CNqmzpWrh/fDrjlJlba9U3MxzvqU3IFlTdMratv -6V+SgX+L25lCzW4NxxUavoB8fAlvo8lxpHKo24FP+RcLQ8XqkU3RiUsgRjQRFOqQ -TaJcsp8mimmiYyf24mNu6b48pi+a5c/eQR9w59emeEUZqsJU+nqv8BWIIp7o4Agh -NYnKjkhPlY5e1fLVfAHIADZFynWwRPkPMJSrBiP5EtcOFxQGHGjRxU/KjXkvE0hV -xYb1PB8pWMTu/beeiQI+BBMBAgAoBQJYJd7YAhsDBQkB4TOABgsJCAcDAgYVCAIJ -CgsEFgIDAQIeAQIXgAAKCRDojTM0+l9qCplDD/9IZ2i+m1cnqQKtiyHbyFGx32oL -fzqPylX2bOG5DPsSTorSUdJMGVfT04oVxXc4S/2DVnNvi7RAbSiLapCWSplgtBOj -j1xlblOoXxT3m7s1XHGCX5tENxI9fVSSPVKJn+fQaWpPB2MhBA+1lUI6GJ+11T7K -J8LrP/fiw1/nOb7rW61HW44Gtyox23sA/d1+DsFVaF8hxJlNj5coPKr8xWzQ8pQl -juzdjHDukjevuw4rRmRq9vozvj9keEU9XJ5dldyEVXFmdDk7KT0p0Rla9nxYhzf/ -r/Bv8Bzy0HCWRb2D31BjXXGG05oVnYmNGxGFxYja4MwgrMmne3ilEVjfUJsapsqi -w41BAyQgIdfREulYN7ahsF5PrjVAqBd9IGtE8ULelF2SQxEBQBngEkP0ahP6tRAL -i7/CBjPKOyKijtqVny7qrGOnU2ygcA88/WDibexDhrjz0Gx8WmErU7rIWZiZ5u4Y -vJYVRo0+6rBCXRPeSJfiP5h1p17Anr2l42boAYslfcrzquB8MHtrNcyn650OLtHG -nbxgIdniKrpuzGN6Opw+O2id2JhD1/1p4SOemwAmthplr1MIyOHNP3q93rEj2J7h -5zPS/AJuKkMDFUpslPNLQjCOwPXtdzL7/kUZGBSyez1T3TaW1uY6l9XaJJRaSn+v -1zPgfp4GJ3lPs4AlAbQ0RXRoZXJldW0gRm91bmRhdGlvbiBCdWcgQm91bnR5IDxi -b3VudHlAZXRoZXJldW0ub3JnPokCPgQTAQIAKAIbAwYLCQgHAwIGFQgCCQoLBBYC -AwECHgECF4AFAloJYfoFCQWjYFgACgkQ6I0zNPpfagoENg/+LnSaVeMxiGVtcjWl -b7Xd73yrEy4uxiESS1AalW9mMf7oZzfI05f7QIQlaLAkNac74vZDJbPKjtb7tpMO -RFhRZMCveq6CPKU6pd1SI8IUVUKwpEe6AJP3lHdVP57dquieFE2HlYKm6uHbCGWU -0cjyTA+uu2KbgCHGmofsPY/xOcZLGEHTHqa5w60JJAQm+BSDKnw8wTyrxGvA3EK/ -ePSvOZMYa+iw6vYuZeBIMbdiXR/A2keBi3GuvqB8tDMj7P22TrH5mVDm3zNqGYD6 -amDPeiWp4cztY3aZyLcgYotqXPpDceZzDn+HopBPzAb/llCdE7bVswKRhphVMw4b -bhL0R/TQY7Sf6TK2LKSBrjv0DWOSijikE71SJcBnJvHU7EpKrQQ0lMGclm3ynyji -Nf0YTPXQt4I+fwTmOew2GFeK3UytNWbWI7oXX7Nm4bj9bhf3IJ0kmZb/Gs73+xII -e7Rz52Mby436tWyQIQiF9ITYNGvNf53TwBBZMn0pKPiTyr3Ur7FHEotkEOFNh1// -4zQY10XxuBdLrYGyZ4V8xHJM+oKre8Eg2R9qHXVbjvErHE+7CvgnV7YUip0criPr -BlKRvuoJaSliH2JFhSjWVrkPmFGrWN0BAx10yIqMnEplfKeHf4P9Elek3oInS8WP -G1zJG6s/t5+hQK0X37+TB+6rd3GJAj4EEwECACgFAlgl4TsCGwMFCQHhM4AGCwkI -BwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOiNMzT6X2oKzf8P/iIKd77WHTbp4pMN -8h52HyZJtDJmjA1DPZrbGl1TesW/Z9uTd12txlgqZnbG2GfN9+LSP6EOPzR6v2xC -OVhR+RdWhZDJJuQCVS7lJIqQrZgmeTZG0TyQPZdLjVFBOrrhVwYX+HXbu429IzHr -URf5InyR1QgqOXyElDYS6e28HFqvaoA0DWTWDDqOLPVl+U5fuceIE2XXdv3AGLeP -Yf8J5MPobjPiZtBqI6S6iENY2Yn35qLX+axeC/iYSCHVtFuCCIdb/QYR1ZZV8Ps/ -aI9DwC7LU+YfPw7iqCIoqxSeA3o1PORkdSigEg3jtfRv5UqVo9a0oBb9jdoADsat -F/gW0E7mto3XGOiaR0eB9SSdsM3x7Bz4A0HIGNaxpZo1RWqlO91leP4c13Px7ISv -5OGXfLg+M8qb+qxbGd1HpitGi9s1y1aVfEj1kOtZ0tN8eu+Upg5WKwPNBDX3ar7J -9NCULgVSL+E79FG+zXw62gxiQrLfKzm4wU/9L5wVkwQnm29hLJ0tokrSBZFnc/1l -7OC+GM63tYicKkY4rqmoWUeYx7IwFH9mtDtvR1RxO85RbQhZizwpZpdpRkH0DqZu -ZJRmRa5r7rPqmfa7d+VIFhz2Xs8pJMLVqxTsLKcLglmjw7aOrYG0SWeH7YraXWGD -N3SlvSBiVwcK7QUKzLLvpadLwxfsuQINBFgl3tgBEACbgq6HTN5gEBi0lkD/MafI -nmNi+59U5gRGYqk46WlfRjhHudXjDpgD0lolGb4hYontkMaKRlCg2Rvgjvk3Zve0 -PKWjKw7gr8YBa9fMFY8BhAXI32OdyI9rFhxEZFfWAfwKVmT19BdeAQRFvcfd+8w8 -f1XVc+zddULMJFBTr+xKDlIRWwTkdLPQeWbjo0eHl/g4tuLiLrTxVbnj26bf+2+1 -DbM/w5VavzPrkviHqvKe/QP/gay4QDViWvFgLb90idfAHIdsPgflp0VDS5rVHFL6 -D73rSRdIRo3I8c8mYoNjSR4XDuvgOkAKW9LR3pvouFHHjp6Fr0GesRbrbb2EG66i -PsR99MQ7FqIL9VMHPm2mtR+XvbnKkH2rYyEqaMbSdk29jGapkAWle4sIhSKk749A -4tGkHl08KZ2N9o6GrfUehP/V2eJLaph2DioFL1HxRryrKy80QQKLMJRekxigq8gr -eW8xB4zuf9Mkuou+RHNmo8PebHjFstLigiD6/zP2e+4tUmrT0/JTGOShoGMl8Rt0 -VRxdPImKun+4LOXbfOxArOSkY6i35+gsgkkSy1gTJE0BY3S9auT6+YrglY/TWPQ9 -IJxWVOKlT+3WIp5wJu2bBKQ420VLqDYzkoWytel/bM1ACUtipMiIVeUs2uFiRjpz -A1Wy0QHKPTdSuGlJPRrfcQARAQABiQIlBBgBAgAPAhsMBQJaCWIIBQkFo2BYAAoJ -EOiNMzT6X2oKgSwQAKKs7BGF8TyZeIEO2EUK7R2bdQDCdSGZY06tqLFg3IHMGxDM -b/7FVoa2AEsFgv6xpoebxBB5zkhUk7lslgxvKiSLYjxfNjTBltfiFJ+eQnf+OTs8 -KeR51lLa66rvIH2qUzkNDCCTF45H4wIDpV05AXhBjKYkrDCrtey1rQyFp5fxI+0I -Q1UKKXvzZK4GdxhxDbOUSd38MYy93nqcmclGSGK/gF8XiyuVjeifDCM6+T1NQTX0 -K9lneidcqtBDvlggJTLJtQPO33o5EHzXSiud+dKth1uUhZOFEaYRZoye1YE3yB0T -NOOE8fXlvu8iuIAMBSDL9ep6sEIaXYwoD60I2gHdWD0lkP0DOjGQpi4ouXM3Edsd -5MTi0MDRNTij431kn8T/D0LCgmoUmYYMBgbwFhXr67axPZlKjrqR0z3F/Elv0ZPP -cVg1tNznsALYQ9Ovl6b5M3cJ5GapbbvNWC7yEE1qScl9HiMxjt/H6aPastH63/7w -cN0TslW+zRBy05VNJvpWGStQXcngsSUeJtI1Gd992YNjUJq4/Lih6Z1TlwcFVap+ -cTcDptoUvXYGg/9mRNNPZwErSfIJ0Ibnx9wPVuRN6NiCLOt2mtKp2F1pM6AOQPpZ -85vEh6I8i6OaO0w/Z0UHBwvpY6jDUliaROsWUQsqz78Z34CVj4cy6vPW2EF4 -=r6KK ------END PGP PUBLIC KEY BLOCK----- +Version: SKS 1.1.6 +Comment: Hostname: pgp.mit.edu + +mQINBFgl3tgBEAC8A1tUBkD9YV+eLrOmtgy+/JS/H9RoZvkg3K1WZ8IYfj6iIRaYneAk3Bp1 +82GUPVz/zhKr2g0tMXIScDR3EnaDsY+Qg+JqQl8NOG+Cikr1nnkG2on9L8c8yiqry1ZTCmYM +qCa2acTFqnyuXJ482aZNtB4QG2BpzfhW4k8YThpegk/EoRUim+y7buJDtoNf7YILlhDQXN8q +lHB02DWOVUihph9tUIFsPK6BvTr9SIr/eG6j6k0bfUo9pexOn7LS4SojoJmsm/5dp6AoKlac +48cZU5zwR9AYcq/nvkrfmf2WkObg/xRdEvKZzn05jRopmAIwmoC3CiLmqCHPmT5a29vEob/y +PFE335k+ujjZCPOu7OwjzDk7M0zMSfnNfDq8bXh16nn+ueBxJ0NzgD1oC6c2PhM+XRQCXCho +yI8vbfp4dGvCvYqvQAE1bWjqnumZ/7vUPgZN6gDfiAzG2mUxC2SeFBhacgzDvtQls+uuvm+F +nQOUgg2Hh8x2zgoZ7kqV29wjaUPFREuew7e+Th5BxielnzOfVycVXeSuvvIn6cd3g/s8mX1c +2kLSXJR7+KdWDrIrR5Az0kwAqFZt6B6QTlDrPswu3mxsm5TzMbny0PsbL/HBM+GZEZCjMXxB +8bqV2eSaktjnSlUNX1VXxyOxXA+ZG2jwpr51egi57riVRXokrQARAQABtDRFdGhlcmV1bSBG +b3VuZGF0aW9uIEJ1ZyBCb3VudHkgPGJvdW50eUBldGhlcmV1bS5vcmc+iQIcBBEBCAAGBQJa +FCY6AAoJEHoMA3Q0/nfveH8P+gJBPo9BXZL8isUfbUWjwLi81Yi70hZqIJUnz64SWTqBzg5b +mCZ69Ji5637THsxQetS2ARabz0DybQ779FhD/IWnqV9T3KuBM/9RzJtuhLzKCyMrAINPMo28 +rKWdunHHarpuR4m3tL2zWJkle5QVYb+vkZXJJE98PJw+N4IYeKKeCs2ubeqZu636GA0sMzzB +Jn3m/dRRA2va+/zzbr6F6b51ynzbMxWKTsJnstjC8gs8EeI+Zcd6otSyelLtCUkk3h5sTvpV +Wv67BNSU0BYsMkxyFi9PUyy07Wixgeas89K5jG1oOtDva/FkpRHrTE/WA5OXDRcLrHJM+SwD +CwqcLQqJd09NxwUW1iKeBmPptTiOGu1Gv2o7aEyoaWrHRBO7JuYrQrj6q2B3H1Je0zjAd2qt +09ni2bLwLn4LA+VDpprNTO+eZDprv09s2oFSU6NwziHybovu0y7X4pADGkK2evOM7c86PohX +QRQ1M1T16xLj6wP8/Ykwl6v/LUk7iDPXP3GPILnh4YOkwBR3DsCOPn8098xy7FxEELmupRzt +Cj9oC7YAoweeShgUjBPzb+nGY1m6OcFfbUPBgFyMMfwF6joHbiVIO+39+Ut2g2ysZa7KF+yp +XqVDqyEkYXsOLb25OC7brt8IJEPgBPwcHK5GNag6RfLxnQV+iVZ9KNH1yQgSiQI+BBMBAgAo +AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAUCWglh+gUJBaNgWAAKCRDojTM0+l9qCgQ2 +D/4udJpV4zGIZW1yNaVvtd3vfKsTLi7GIRJLUBqVb2Yx/uhnN8jTl/tAhCVosCQ1pzvi9kMl +s8qO1vu2kw5EWFFkwK96roI8pTql3VIjwhRVQrCkR7oAk/eUd1U/nt2q6J4UTYeVgqbq4dsI +ZZTRyPJMD667YpuAIcaah+w9j/E5xksYQdMeprnDrQkkBCb4FIMqfDzBPKvEa8DcQr949K85 +kxhr6LDq9i5l4Egxt2JdH8DaR4GLca6+oHy0MyPs/bZOsfmZUObfM2oZgPpqYM96JanhzO1j +dpnItyBii2pc+kNx5nMOf4eikE/MBv+WUJ0TttWzApGGmFUzDhtuEvRH9NBjtJ/pMrYspIGu +O/QNY5KKOKQTvVIlwGcm8dTsSkqtBDSUwZyWbfKfKOI1/RhM9dC3gj5/BOY57DYYV4rdTK01 +ZtYjuhdfs2bhuP1uF/cgnSSZlv8azvf7Egh7tHPnYxvLjfq1bJAhCIX0hNg0a81/ndPAEFky +fSko+JPKvdSvsUcSi2QQ4U2HX//jNBjXRfG4F0utgbJnhXzEckz6gqt7wSDZH2oddVuO8Ssc +T7sK+CdXthSKnRyuI+sGUpG+6glpKWIfYkWFKNZWuQ+YUatY3QEDHXTIioycSmV8p4d/g/0S +V6TegidLxY8bXMkbqz+3n6FArRffv5MH7qt3cYkCPgQTAQIAKAUCWCXhOwIbAwUJAeEzgAYL +CQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQ6I0zNPpfagrN/w/+Igp3vtYdNunikw3yHnYf +Jkm0MmaMDUM9mtsaXVN6xb9n25N3Xa3GWCpmdsbYZ8334tI/oQ4/NHq/bEI5WFH5F1aFkMkm +5AJVLuUkipCtmCZ5NkbRPJA9l0uNUUE6uuFXBhf4ddu7jb0jMetRF/kifJHVCCo5fISUNhLp +7bwcWq9qgDQNZNYMOo4s9WX5Tl+5x4gTZdd2/cAYt49h/wnkw+huM+Jm0GojpLqIQ1jZiffm +otf5rF4L+JhIIdW0W4IIh1v9BhHVllXw+z9oj0PALstT5h8/DuKoIiirFJ4DejU85GR1KKAS +DeO19G/lSpWj1rSgFv2N2gAOxq0X+BbQTua2jdcY6JpHR4H1JJ2wzfHsHPgDQcgY1rGlmjVF +aqU73WV4/hzXc/HshK/k4Zd8uD4zypv6rFsZ3UemK0aL2zXLVpV8SPWQ61nS03x675SmDlYr +A80ENfdqvsn00JQuBVIv4Tv0Ub7NfDraDGJCst8rObjBT/0vnBWTBCebb2EsnS2iStIFkWdz +/WXs4L4Yzre1iJwqRjiuqahZR5jHsjAUf2a0O29HVHE7zlFtCFmLPClml2lGQfQOpm5klGZF +rmvus+qZ9rt35UgWHPZezykkwtWrFOwspwuCWaPDto6tgbRJZ4ftitpdYYM3dKW9IGJXBwrt +BQrMsu+lp0vDF+yJAlUEEwEIAD8CGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAFiEErpbt +lp5HmwCE8+F/6I0zNPpfagoFAmEAEJwFCQycmLgACgkQ6I0zNPpfagpWoBAAhOcbMAUw6Zt0 +GYzT3sR5/c0iatezPzXEXJf9ebzR8M5uPElXcxcnMx1dvXZmGPXPJKCPa99WCu1NZYy8F+Wj +GTOY9tfIkvSxhys1p/giPAmvid6uQmD+bz7ivktnyzCkDWfMA+l8lsCSEqVlaq6y5T+a6SWB +6TzC2S0MPb/RrC/7DpwyrNYWumvyVJh09adm1Mw/UGgst/sZ8eMaRYEd3X0yyT1CBpX4zp2E +qQj9IEOTizvzv1x2jkHe5ZUeU3+nTBNlhSA+WFHUi0pfBdo2qog3Mv2EC1P2qMKoSdD5tPbA +zql1yKoHHnXOMsqdftGwbiv2sYXWvrYvmaCd3Ys/viOyt3HOy9uV2ZEtBd9Yqo9x/NZj8QMA +nY5k8jjrIXbUC89MqrJsQ6xxWQIg5ikMT7DvY0Ln89ev4oJyVvwIQAwCm4jUzFNm9bZLYDOP +5lGJCV7tF5NYVU7NxNM8vescKc40mVNK/pygS5mxhK9QYOUjZsIv8gddrl1TkqrFMuxFnTyN +WvzE29wFu/n4N1DkF+ZBqS70SlRvB+Hjz5LrDgEzF1Wf1eA/wq1dZbvMjjDVIc2VGlYp8Cp2 +8ob23c1seTtYXTNYgSR5go4EpH+xi+bIWv01bQQ9xGwBbT5sm4WUeWOcmX4QewzLZ3T/wK9+ +N4Ye/hmU9O34FwWJOY58EIe0OUV0aGVyZXVtIEZvdW5kYXRpb24gU2VjdXJpdHkgVGVhbSA8 +c2VjdXJpdHlAZXRoZXJldW0ub3JnPokCHAQRAQgABgUCWhQmOgAKCRB6DAN0NP5372LSEACT +wZk1TASWZj5QF7rmkIM1GEyBxLE+PundNcMgM9Ktj1315ED8SmiukNI4knVS1MY99OIgXhQl +D1foF2GKdTomrwwC4012zTNyUYCY60LnPZ6Z511HG+rZgZtZrbkz0IiUpwAlhGQND77lBqem +J3K+CFX2XpDA/ojui/kqrY4cwMT5P8xPJkwgpRgw/jgdcZyJTsXdHblV9IGU4H1Vd1SgcfAf +Db3YxDUlBtzlp0NkZqxen8irLIXUQvsfuIfRUbUSkWoK/n3U/gOCajAe8ZNF07iX4OWjH4Sw +NDA841WhFWcGE+d8+pfMVfPASU3UPKH72uw86b2VgR46Av6voyMFd1pj+yCA+YAhJuOpV4yL +QaGg2Z0kVOjuNWK/kBzp1F58DWGh4YBatbhE/UyQOqAAtR7lNf0M3QF9AdrHTxX8oZeqVW3V +Fmi2mk0NwCIUv8SSrZr1dTchp04OtyXe5gZBXSfzncCSRQIUDC8OgNWaOzAaUmK299v4bvye +uSCxOysxC7Q1hZtjzFPKdljS81mRlYeUL4fHlJU9R57bg8mriSXLmn7eKrSEDm/EG5T8nRx7 +TgX2MqJs8sWFxD2+bboVEu75yuFmZ//nmCBApAit9Hr2/sCshGIEpa9MQ6xJCYUxyqeJH+Cc +Aja0UfXhnK2uvPClpJLIl4RE3gm4OXeE1IkCPgQTAQIAKAIbAwYLCQgHAwIGFQgCCQoLBBYC +AwECHgECF4AFAloJYfoFCQWjYFgACgkQ6I0zNPpfagr4MQ//cfp3GSbSG8dkqgctW67Fy7cQ +diiTmx3cwxY+tlI3yrNmdjtrIQMzGdqtY6LNz7aN87F8mXNf+DyVHX9+wd1Y8U+E+hVCTzKC +sefUfxTz6unD9TTcGqaoelgIPMn4IiKz1RZE6eKpfDWe6q78W1Y6x1bE0qGNSjqT/QSxpezF +E/OAm/t8RRxVxDtqz8LfH2zLea5zaC+ADj8EqgY9vX9TQa4DyVV8MgOyECCCadJQCD5O5hIA +B2gVDWwrAUw+KBwskXZ7Iq4reJTKLEmt5z9zgtJ/fABwaCFt66ojwg0/RjbO9cNA3ZwHLGwU +C6hkb6bRzIoZoMfYxVS84opiqf/Teq+t/XkBYCxbSXTJDA5MKjcVuw3N6YKWbkGP/EfQThe7 +BfAKFwwIw5YmsWjHK8IQj6R6hBxzTz9rz8y1Lu8EAAFfA7OJKaboI2qbOlauH98OuOUmVtr1 +TczHO+pTcgWVN0ytq2/pX5KBf4vbmULNbg3HFRq+gHx8CW+jyXGkcqjbgU/5FwtDxeqRTdGJ +SyBGNBEU6pBNolyynyaKaaJjJ/biY27pvjymL5rlz95BH3Dn16Z4RRmqwlT6eq/wFYginujg +CCE1icqOSE+Vjl7V8tV8AcgANkXKdbBE+Q8wlKsGI/kS1w4XFAYcaNHFT8qNeS8TSFXFhvU8 +HylYxO79t56JAj4EEwECACgFAlgl3tgCGwMFCQHhM4AGCwkIBwMCBhUIAgkKCwQWAgMBAh4B +AheAAAoJEOiNMzT6X2oKmUMP/0hnaL6bVyepAq2LIdvIUbHfagt/Oo/KVfZs4bkM+xJOitJR +0kwZV9PTihXFdzhL/YNWc2+LtEBtKItqkJZKmWC0E6OPXGVuU6hfFPebuzVccYJfm0Q3Ej19 +VJI9Uomf59Bpak8HYyEED7WVQjoYn7XVPsonwus/9+LDX+c5vutbrUdbjga3KjHbewD93X4O +wVVoXyHEmU2Plyg8qvzFbNDylCWO7N2McO6SN6+7DitGZGr2+jO+P2R4RT1cnl2V3IRVcWZ0 +OTspPSnRGVr2fFiHN/+v8G/wHPLQcJZFvYPfUGNdcYbTmhWdiY0bEYXFiNrgzCCsyad7eKUR +WN9QmxqmyqLDjUEDJCAh19ES6Vg3tqGwXk+uNUCoF30ga0TxQt6UXZJDEQFAGeASQ/RqE/q1 +EAuLv8IGM8o7IqKO2pWfLuqsY6dTbKBwDzz9YOJt7EOGuPPQbHxaYStTushZmJnm7hi8lhVG +jT7qsEJdE95Il+I/mHWnXsCevaXjZugBiyV9yvOq4Hwwe2s1zKfrnQ4u0cadvGAh2eIqum7M +Y3o6nD47aJ3YmEPX/WnhI56bACa2GmWvUwjI4c0/er3esSPYnuHnM9L8Am4qQwMVSmyU80tC +MI7A9e13Mvv+RRkYFLJ7PVPdNpbW5jqX1doklFpKf6/XM+B+ngYneU+zgCUBiQJVBBMBCAA/ +AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgBYhBK6W7ZaeR5sAhPPhf+iNMzT6X2oKBQJh +ABCQBQkMnJi4AAoJEOiNMzT6X2oKAv0P+gJ3twBp5efNWyVLcIg4h4cOo9uD0NPvz8/fm2gX +FoOJL3MeigtPuSVfE9kuTaTuRbArzuFtdvH6G/kcRQvOlO4zyiIRHCk1gDHoIvvtn6RbRhVm +/Xo4uGIsFHst7n4A7BjicwEK5Op6Ih5Hoq19xz83YSBgBVk2fYEJIRyJiKFbyPjH0eSYe8v+ +Ra5/F85ugLx1P6mMVkW+WPzULns89riW7BGTnZmXFHZp8nO2pkUlcI7F3KRG7l4kmlC50ox6 +DiG/6AJCVulbAClky9C68TmJ/R1RazQxU/9IqVywsydq66tbJQbm5Z7GEti0C5jjbSRJL2oT +1xC7Rilr85PMREkPL3vegJdgj5PKlffZ/MocD/0EohiQ7wFpejFD4iTljeh0exRUwCRb6655 +9ib34JSQgU8Hl4JJu+mEgd9v0ZHD0/1mMD6fnAR84zca+O3cdASbnQmzTOKcGzLIrkE8TEnU ++2UZ8Ol7SAAqmBgzY1gKOilUho6dkyCAwNL+QDpvrITDPLEFPsjyB/M2KudZSVEn+Rletju1 +qkMW31qFMNlsbwzMZw+0USeGcs31Cs0B2/WQsro99CExlhS9auUFkmoVjJmYVTIYOM0zuPa4 +OyGspqPhRu5hEsmMDPDWD7Aad5k4GTqogQNnuKyRliZjXXrDZqFD5nfsJSL8Ky/sJGEMuQIN +BFgl3tgBEACbgq6HTN5gEBi0lkD/MafInmNi+59U5gRGYqk46WlfRjhHudXjDpgD0lolGb4h +YontkMaKRlCg2Rvgjvk3Zve0PKWjKw7gr8YBa9fMFY8BhAXI32OdyI9rFhxEZFfWAfwKVmT1 +9BdeAQRFvcfd+8w8f1XVc+zddULMJFBTr+xKDlIRWwTkdLPQeWbjo0eHl/g4tuLiLrTxVbnj +26bf+2+1DbM/w5VavzPrkviHqvKe/QP/gay4QDViWvFgLb90idfAHIdsPgflp0VDS5rVHFL6 +D73rSRdIRo3I8c8mYoNjSR4XDuvgOkAKW9LR3pvouFHHjp6Fr0GesRbrbb2EG66iPsR99MQ7 +FqIL9VMHPm2mtR+XvbnKkH2rYyEqaMbSdk29jGapkAWle4sIhSKk749A4tGkHl08KZ2N9o6G +rfUehP/V2eJLaph2DioFL1HxRryrKy80QQKLMJRekxigq8greW8xB4zuf9Mkuou+RHNmo8Pe +bHjFstLigiD6/zP2e+4tUmrT0/JTGOShoGMl8Rt0VRxdPImKun+4LOXbfOxArOSkY6i35+gs +gkkSy1gTJE0BY3S9auT6+YrglY/TWPQ9IJxWVOKlT+3WIp5wJu2bBKQ420VLqDYzkoWytel/ +bM1ACUtipMiIVeUs2uFiRjpzA1Wy0QHKPTdSuGlJPRrfcQARAQABiQIlBBgBAgAPAhsMBQJa +CWIIBQkFo2BYAAoJEOiNMzT6X2oKgSwQAKKs7BGF8TyZeIEO2EUK7R2bdQDCdSGZY06tqLFg +3IHMGxDMb/7FVoa2AEsFgv6xpoebxBB5zkhUk7lslgxvKiSLYjxfNjTBltfiFJ+eQnf+OTs8 +KeR51lLa66rvIH2qUzkNDCCTF45H4wIDpV05AXhBjKYkrDCrtey1rQyFp5fxI+0IQ1UKKXvz +ZK4GdxhxDbOUSd38MYy93nqcmclGSGK/gF8XiyuVjeifDCM6+T1NQTX0K9lneidcqtBDvlgg +JTLJtQPO33o5EHzXSiud+dKth1uUhZOFEaYRZoye1YE3yB0TNOOE8fXlvu8iuIAMBSDL9ep6 +sEIaXYwoD60I2gHdWD0lkP0DOjGQpi4ouXM3Edsd5MTi0MDRNTij431kn8T/D0LCgmoUmYYM +BgbwFhXr67axPZlKjrqR0z3F/Elv0ZPPcVg1tNznsALYQ9Ovl6b5M3cJ5GapbbvNWC7yEE1q +Scl9HiMxjt/H6aPastH63/7wcN0TslW+zRBy05VNJvpWGStQXcngsSUeJtI1Gd992YNjUJq4 +/Lih6Z1TlwcFVap+cTcDptoUvXYGg/9mRNNPZwErSfIJ0Ibnx9wPVuRN6NiCLOt2mtKp2F1p +M6AOQPpZ85vEh6I8i6OaO0w/Z0UHBwvpY6jDUliaROsWUQsqz78Z34CVj4cy6vPW2EF4iQIl +BBgBAgAPBQJYJd7YAhsMBQkB4TOAAAoJEOiNMzT6X2oKTjgP/1ojCVyGyvHMLUgnX0zwrR5Q +1M5RKFz6kHwKjODVLR3Isp8I935oTQt3DY7yFDI4t0GqbYRQMtxcNEb7maianhK2trCXfhPs +6/L04igjDf5iTcmzamXN6xnh5xkz06hZJJCMuu4MvKxC9MQHCVKAwjswl/9H9JqIBXAY3E2l +LpX5P+5jDZuPxS86p3+k4Rrdp9KTGXjiuEleM3zGlz5BLWydqovOck7C2aKh27ETFpDYY0z3 +yQ5AsPJyk1rAr0wrH6+ywmwWlzuQewavnrLnJ2M8iMFXpIhyHeEIU/f7o8f+dQk72rZ9CGzd +cqig2za/BS3zawZWgbv2vB2elNsIllYLdir45jxBOxx2yvJvEuu4glz78y4oJTCTAYAbMlle +5gVdPkVcGyvvVS9tinnSaiIzuvWrYHKWll1uYPm2Q1CDs06P5I7bUGAXpgQLUh/XQguy/0sX +GWqW3FS5JzP+XgcR/7UASvwBdHylubKbeqEpB7G1s+m+8C67qOrc7EQv3Jmy1YDOkhEyNig1 +rmjplLuir3tC1X+D7dHpn7NJe7nMwFx2b2MpMkLA9jPPAGPp/ekcu5sxCe+E0J/4UF++K+CR +XIxgtzU2UJfp8p9x+ygbx5qHinR0tVRdIzv3ZnGsXrfxnWfSOaB582cU3VRN9INzHHax8ETa +QVDnGO5uQa+FiQI8BBgBCAAmAhsMFiEErpbtlp5HmwCE8+F/6I0zNPpfagoFAmEAELYFCQyc +mN4ACgkQ6I0zNPpfagoqAQ/+MnDjBx8JWMd/XjeFoYKx/Oo0ntkInV+ME61JTBls4PdVk+TB +8PWZdPQHw9SnTvRmykFeznXIRzuxkowjrZYXdPXBxY2b1WyD5V3Ati1TM9vqpaR4osyPs2xy +I4dzDssh9YvUsIRL99O04/65lGiYeBNuACq+yK/7nD/ErzBkDYJHhMCdadbVWUACxvVIDvro +yQeVLKMsHqMCd8BTGD7VDs79NXskPnN77pAFnkzS4Z2b8SNzrlgTc5pUiuZHIXPIpEYmsYzh +ucTU6uI3dN1PbSFHK5tG2pHb4ZrPxY3L20Dgc2Tfu5/SDApZzwvvKTqjdO891MEJ++H+ssOz +i4O1UeWKs9owWttan9+PI47ozBSKOTxmMqLSQ0f56Np9FJsV0ilGxRKfjhzJ4KniOMUBA7mP ++m+TmXfVtthJred4sHlJMTJNpt+sCcT6wLMmyc3keIEAu33gsJj3LTpkEA2q+V+ZiP6Q8HRB +402ITklABSArrPSE/fQU9L8hZ5qmy0Z96z0iyILgVMLuRCCfQOMWhwl8yQWIIaf1yPI07xur +epy6lH7HmxjjOR7eo0DaSxQGQpThAtFGwkWkFh8yki8j3E42kkrxvEyyYZDXn2YcI3bpqhJx +PtwCMZUJ3kc/skOrs6bOI19iBNaEoNX5Dllm7UHjOgWNDQkcCuOCxucKano= +=arte +-----END PGP PUBLIC KEY BLOCK------ ``` diff --git a/accounts/abi/abi.go b/accounts/abi/abi.go index fdb4c48b3931..cd2f4d7978bd 100644 --- a/accounts/abi/abi.go +++ b/accounts/abi/abi.go @@ -19,10 +19,12 @@ package abi import ( "bytes" "encoding/json" + "errors" "fmt" "io" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" ) // The ABI holds information about a contract's context and available @@ -32,6 +34,13 @@ type ABI struct { Constructor Method Methods map[string]Method Events map[string]Event + Errors map[string]Error + + // Additional "special" functions introduced in solidity v0.6.0. + // It's separated from the original default fallback. Each contract + // can only define one fallback and receive function. + Fallback Method // Note it's also used to represent legacy fallback before v0.6.0 + Receive Method } // JSON returns a parsed ABI interface and error if it failed. @@ -42,7 +51,6 @@ func JSON(reader io.Reader) (ABI, error) { if err := dec.Decode(&abi); err != nil { return ABI{}, err } - return abi, nil } @@ -70,106 +78,131 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) { return nil, err } // Pack up the method ID too if not a constructor and return - return append(method.ID(), arguments...), nil + return append(method.ID, arguments...), nil } -// Unpack output in v according to the abi specification -func (abi ABI) Unpack(v interface{}, name string, data []byte) (err error) { +func (abi ABI) getArguments(name string, data []byte) (Arguments, error) { // since there can't be naming collisions with contracts and events, // we need to decide whether we're calling a method or an event + var args Arguments if method, ok := abi.Methods[name]; ok { if len(data)%32 != 0 { - return fmt.Errorf("abi: improperly formatted output: %s - Bytes: [%+v]", string(data), data) + return nil, fmt.Errorf("abi: improperly formatted output: %s - Bytes: [%+v]", string(data), data) } - return method.Outputs.Unpack(v, data) + args = method.Outputs } if event, ok := abi.Events[name]; ok { - return event.Inputs.Unpack(v, data) + args = event.Inputs + } + if args == nil { + return nil, errors.New("abi: could not locate named method or event") } - return fmt.Errorf("abi: could not locate named method or event") + return args, nil } -// UnpackIntoMap unpacks a log into the provided map[string]interface{} -func (abi ABI) UnpackIntoMap(v map[string]interface{}, name string, data []byte) (err error) { - // since there can't be naming collisions with contracts and events, - // we need to decide whether we're calling a method or an event - if method, ok := abi.Methods[name]; ok { - if len(data)%32 != 0 { - return fmt.Errorf("abi: improperly formatted output") - } - return method.Outputs.UnpackIntoMap(v, data) +// Unpack unpacks the output according to the abi specification. +func (abi ABI) Unpack(name string, data []byte) ([]interface{}, error) { + args, err := abi.getArguments(name, data) + if err != nil { + return nil, err } - if event, ok := abi.Events[name]; ok { - return event.Inputs.UnpackIntoMap(v, data) + return args.Unpack(data) +} + +// UnpackIntoInterface unpacks the output in v according to the abi specification. +// It performs an additional copy. Please only use, if you want to unpack into a +// structure that does not strictly conform to the abi structure (e.g. has additional arguments) +func (abi ABI) UnpackIntoInterface(v interface{}, name string, data []byte) error { + args, err := abi.getArguments(name, data) + if err != nil { + return err + } + unpacked, err := args.Unpack(data) + if err != nil { + return err } - return fmt.Errorf("abi: could not locate named method or event") + return args.Copy(v, unpacked) } -// UnmarshalJSON implements json.Unmarshaler interface +// UnpackIntoMap unpacks a log into the provided map[string]interface{}. +func (abi ABI) UnpackIntoMap(v map[string]interface{}, name string, data []byte) (err error) { + args, err := abi.getArguments(name, data) + if err != nil { + return err + } + return args.UnpackIntoMap(v, data) +} + +// UnmarshalJSON implements json.Unmarshaler interface. func (abi *ABI) UnmarshalJSON(data []byte) error { var fields []struct { - Type string - Name string - Constant bool + Type string + Name string + Inputs []Argument + Outputs []Argument + + // Status indicator which can be: "pure", "view", + // "nonpayable" or "payable". StateMutability string - Anonymous bool - Inputs []Argument - Outputs []Argument + + // Deprecated Status indicators, but removed in v0.6.0. + Constant bool // True if function is either pure or view + Payable bool // True if function is payable + + // Event relevant indicator represents the event is + // declared as anonymous. + Anonymous bool } if err := json.Unmarshal(data, &fields); err != nil { return err } abi.Methods = make(map[string]Method) abi.Events = make(map[string]Event) + abi.Errors = make(map[string]Error) for _, field := range fields { switch field.Type { case "constructor": - abi.Constructor = Method{ - Inputs: field.Inputs, + abi.Constructor = NewMethod("", "", Constructor, field.StateMutability, field.Constant, field.Payable, field.Inputs, nil) + case "function": + name := overloadedName(field.Name, func(s string) bool { _, ok := abi.Methods[s]; return ok }) + abi.Methods[name] = NewMethod(name, field.Name, Function, field.StateMutability, field.Constant, field.Payable, field.Inputs, field.Outputs) + case "fallback": + // New introduced function type in v0.6.0, check more detail + // here https://solidity.readthedocs.io/en/v0.6.0/contracts.html#fallback-function + if abi.HasFallback() { + return errors.New("only single fallback is allowed") } - // empty defaults to function according to the abi spec - case "function", "": - name := field.Name - _, ok := abi.Methods[name] - for idx := 0; ok; idx++ { - name = fmt.Sprintf("%s%d", field.Name, idx) - _, ok = abi.Methods[name] + abi.Fallback = NewMethod("", "", Fallback, field.StateMutability, field.Constant, field.Payable, nil, nil) + case "receive": + // New introduced function type in v0.6.0, check more detail + // here https://solidity.readthedocs.io/en/v0.6.0/contracts.html#fallback-function + if abi.HasReceive() { + return errors.New("only single receive is allowed") } - isConst := field.Constant || field.StateMutability == "pure" || field.StateMutability == "view" - abi.Methods[name] = Method{ - Name: name, - RawName: field.Name, - Const: isConst, - Inputs: field.Inputs, - Outputs: field.Outputs, + if field.StateMutability != "payable" { + return errors.New("the statemutability of receive can only be payable") } + abi.Receive = NewMethod("", "", Receive, field.StateMutability, field.Constant, field.Payable, nil, nil) case "event": - name := field.Name - _, ok := abi.Events[name] - for idx := 0; ok; idx++ { - name = fmt.Sprintf("%s%d", field.Name, idx) - _, ok = abi.Events[name] - } - abi.Events[name] = Event{ - Name: name, - RawName: field.Name, - Anonymous: field.Anonymous, - Inputs: field.Inputs, - } + name := overloadedName(field.Name, func(s string) bool { _, ok := abi.Events[s]; return ok }) + abi.Events[name] = NewEvent(name, field.Name, field.Anonymous, field.Inputs) + case "error": + abi.Errors[field.Name] = NewError(field.Name, field.Inputs) + default: + return fmt.Errorf("abi: could not recognize type %v of field %v", field.Type, field.Name) } } - return nil } -// MethodById looks up a method by the 4-byte id -// returns nil if none found +// MethodById looks up a method by the 4-byte id, +// returns nil if none found. func (abi *ABI) MethodById(sigdata []byte) (*Method, error) { if len(sigdata) < 4 { return nil, fmt.Errorf("data too short (%d bytes) for abi method lookup", len(sigdata)) } for _, method := range abi.Methods { - if bytes.Equal(method.ID(), sigdata[:4]) { + if bytes.Equal(method.ID, sigdata[:4]) { return &method, nil } } @@ -180,9 +213,58 @@ func (abi *ABI) MethodById(sigdata []byte) (*Method, error) { // ABI and returns nil if none found. func (abi *ABI) EventByID(topic common.Hash) (*Event, error) { for _, event := range abi.Events { - if bytes.Equal(event.ID().Bytes(), topic.Bytes()) { + if bytes.Equal(event.ID.Bytes(), topic.Bytes()) { return &event, nil } } return nil, fmt.Errorf("no event with id: %#x", topic.Hex()) } + +// HasFallback returns an indicator whether a fallback function is included. +func (abi *ABI) HasFallback() bool { + return abi.Fallback.Type == Fallback +} + +// HasReceive returns an indicator whether a receive function is included. +func (abi *ABI) HasReceive() bool { + return abi.Receive.Type == Receive +} + +// revertSelector is a special function selector for revert reason unpacking. +var revertSelector = crypto.Keccak256([]byte("Error(string)"))[:4] + +// UnpackRevert resolves the abi-encoded revert reason. According to the solidity +// spec https://solidity.readthedocs.io/en/latest/control-structures.html#revert, +// the provided revert reason is abi-encoded as if it were a call to a function +// `Error(string)`. So it's a special tool for it. +func UnpackRevert(data []byte) (string, error) { + if len(data) < 4 { + return "", errors.New("invalid data for unpacking") + } + if !bytes.Equal(data[:4], revertSelector) { + return "", errors.New("invalid data for unpacking") + } + typ, _ := NewType("string", "", nil) + unpacked, err := (Arguments{{Type: typ}}).Unpack(data[4:]) + if err != nil { + return "", err + } + return unpacked[0].(string), nil +} + +// overloadedName returns the next available name for a given thing. +// Needed since solidity allows for overloading. +// +// e.g. if the abi contains Methods send, send1 +// overloadedName would return send2 for input send. +// +// overloadedName works for methods, events and errors. +func overloadedName(rawName string, isAvail func(string) bool) string { + name := rawName + ok := isAvail(name) + for idx := 0; ok; idx++ { + name = fmt.Sprintf("%s%d", rawName, idx) + ok = isAvail(name) + } + return name +} diff --git a/accounts/abi/abi_test.go b/accounts/abi/abi_test.go index 61ab70eb83ad..cc8dfc61c389 100644 --- a/accounts/abi/abi_test.go +++ b/accounts/abi/abi_test.go @@ -19,6 +19,7 @@ package abi import ( "bytes" "encoding/hex" + "errors" "fmt" "math/big" "reflect" @@ -26,57 +27,108 @@ import ( "testing" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" ) const jsondata = ` [ - { "type" : "function", "name" : "balance", "constant" : true }, - { "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] } + { "type" : "function", "name" : ""}, + { "type" : "function", "name" : "balance", "stateMutability" : "view" }, + { "type" : "function", "name" : "send", "inputs" : [ { "name" : "amount", "type" : "uint256" } ] }, + { "type" : "function", "name" : "test", "inputs" : [ { "name" : "number", "type" : "uint32" } ] }, + { "type" : "function", "name" : "string", "inputs" : [ { "name" : "inputs", "type" : "string" } ] }, + { "type" : "function", "name" : "bool", "inputs" : [ { "name" : "inputs", "type" : "bool" } ] }, + { "type" : "function", "name" : "address", "inputs" : [ { "name" : "inputs", "type" : "address" } ] }, + { "type" : "function", "name" : "uint64[2]", "inputs" : [ { "name" : "inputs", "type" : "uint64[2]" } ] }, + { "type" : "function", "name" : "uint64[]", "inputs" : [ { "name" : "inputs", "type" : "uint64[]" } ] }, + { "type" : "function", "name" : "int8", "inputs" : [ { "name" : "inputs", "type" : "int8" } ] }, + { "type" : "function", "name" : "bytes32", "inputs" : [ { "name" : "inputs", "type" : "bytes32" } ] }, + { "type" : "function", "name" : "foo", "inputs" : [ { "name" : "inputs", "type" : "uint32" } ] }, + { "type" : "function", "name" : "bar", "inputs" : [ { "name" : "inputs", "type" : "uint32" }, { "name" : "string", "type" : "uint16" } ] }, + { "type" : "function", "name" : "slice", "inputs" : [ { "name" : "inputs", "type" : "uint32[2]" } ] }, + { "type" : "function", "name" : "slice256", "inputs" : [ { "name" : "inputs", "type" : "uint256[2]" } ] }, + { "type" : "function", "name" : "sliceAddress", "inputs" : [ { "name" : "inputs", "type" : "address[]" } ] }, + { "type" : "function", "name" : "sliceMultiAddress", "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] }, + { "type" : "function", "name" : "nestedArray", "inputs" : [ { "name" : "a", "type" : "uint256[2][2]" }, { "name" : "b", "type" : "address[]" } ] }, + { "type" : "function", "name" : "nestedArray2", "inputs" : [ { "name" : "a", "type" : "uint8[][2]" } ] }, + { "type" : "function", "name" : "nestedSlice", "inputs" : [ { "name" : "a", "type" : "uint8[][]" } ] }, + { "type" : "function", "name" : "receive", "inputs" : [ { "name" : "memo", "type" : "bytes" }], "outputs" : [], "payable" : true, "stateMutability" : "payable" }, + { "type" : "function", "name" : "fixedArrStr", "stateMutability" : "view", "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr", "type" : "uint256[2]" } ] }, + { "type" : "function", "name" : "fixedArrBytes", "stateMutability" : "view", "inputs" : [ { "name" : "bytes", "type" : "bytes" }, { "name" : "fixedArr", "type" : "uint256[2]" } ] }, + { "type" : "function", "name" : "mixedArrStr", "stateMutability" : "view", "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr", "type" : "uint256[2]" }, { "name" : "dynArr", "type" : "uint256[]" } ] }, + { "type" : "function", "name" : "doubleFixedArrStr", "stateMutability" : "view", "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr1", "type" : "uint256[2]" }, { "name" : "fixedArr2", "type" : "uint256[3]" } ] }, + { "type" : "function", "name" : "multipleMixedArrStr", "stateMutability" : "view", "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr1", "type" : "uint256[2]" }, { "name" : "dynArr", "type" : "uint256[]" }, { "name" : "fixedArr2", "type" : "uint256[3]" } ] }, + { "type" : "function", "name" : "overloadedNames", "stateMutability" : "view", "inputs": [ { "components": [ { "internalType": "uint256", "name": "_f", "type": "uint256" }, { "internalType": "uint256", "name": "__f", "type": "uint256"}, { "internalType": "uint256", "name": "f", "type": "uint256"}],"internalType": "struct Overloader.F", "name": "f","type": "tuple"}]} ]` -const jsondata2 = ` -[ - { "type" : "function", "name" : "balance", "constant" : true }, - { "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] }, - { "type" : "function", "name" : "test", "constant" : false, "inputs" : [ { "name" : "number", "type" : "uint32" } ] }, - { "type" : "function", "name" : "string", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "string" } ] }, - { "type" : "function", "name" : "bool", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "bool" } ] }, - { "type" : "function", "name" : "address", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "address" } ] }, - { "type" : "function", "name" : "uint64[2]", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint64[2]" } ] }, - { "type" : "function", "name" : "uint64[]", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint64[]" } ] }, - { "type" : "function", "name" : "foo", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32" } ] }, - { "type" : "function", "name" : "bar", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32" }, { "name" : "string", "type" : "uint16" } ] }, - { "type" : "function", "name" : "slice", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32[2]" } ] }, - { "type" : "function", "name" : "slice256", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint256[2]" } ] }, - { "type" : "function", "name" : "sliceAddress", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "address[]" } ] }, - { "type" : "function", "name" : "sliceMultiAddress", "constant" : false, "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] }, - { "type" : "function", "name" : "nestedArray", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint256[2][2]" }, { "name" : "b", "type" : "address[]" } ] }, - { "type" : "function", "name" : "nestedArray2", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint8[][2]" } ] }, - { "type" : "function", "name" : "nestedSlice", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint8[][]" } ] } -]` +var ( + Uint256, _ = NewType("uint256", "", nil) + Uint32, _ = NewType("uint32", "", nil) + Uint16, _ = NewType("uint16", "", nil) + String, _ = NewType("string", "", nil) + Bool, _ = NewType("bool", "", nil) + Bytes, _ = NewType("bytes", "", nil) + Bytes32, _ = NewType("bytes32", "", nil) + Address, _ = NewType("address", "", nil) + Uint64Arr, _ = NewType("uint64[]", "", nil) + AddressArr, _ = NewType("address[]", "", nil) + Int8, _ = NewType("int8", "", nil) + // Special types for testing + Uint32Arr2, _ = NewType("uint32[2]", "", nil) + Uint64Arr2, _ = NewType("uint64[2]", "", nil) + Uint256Arr, _ = NewType("uint256[]", "", nil) + Uint256Arr2, _ = NewType("uint256[2]", "", nil) + Uint256Arr3, _ = NewType("uint256[3]", "", nil) + Uint256ArrNested, _ = NewType("uint256[2][2]", "", nil) + Uint8ArrNested, _ = NewType("uint8[][2]", "", nil) + Uint8SliceNested, _ = NewType("uint8[][]", "", nil) + TupleF, _ = NewType("tuple", "struct Overloader.F", []ArgumentMarshaling{ + {Name: "_f", Type: "uint256"}, + {Name: "__f", Type: "uint256"}, + {Name: "f", Type: "uint256"}}) +) + +var methods = map[string]Method{ + "": NewMethod("", "", Function, "", false, false, nil, nil), + "balance": NewMethod("balance", "balance", Function, "view", false, false, nil, nil), + "send": NewMethod("send", "send", Function, "", false, false, []Argument{{"amount", Uint256, false}}, nil), + "test": NewMethod("test", "test", Function, "", false, false, []Argument{{"number", Uint32, false}}, nil), + "string": NewMethod("string", "string", Function, "", false, false, []Argument{{"inputs", String, false}}, nil), + "bool": NewMethod("bool", "bool", Function, "", false, false, []Argument{{"inputs", Bool, false}}, nil), + "address": NewMethod("address", "address", Function, "", false, false, []Argument{{"inputs", Address, false}}, nil), + "uint64[]": NewMethod("uint64[]", "uint64[]", Function, "", false, false, []Argument{{"inputs", Uint64Arr, false}}, nil), + "uint64[2]": NewMethod("uint64[2]", "uint64[2]", Function, "", false, false, []Argument{{"inputs", Uint64Arr2, false}}, nil), + "int8": NewMethod("int8", "int8", Function, "", false, false, []Argument{{"inputs", Int8, false}}, nil), + "bytes32": NewMethod("bytes32", "bytes32", Function, "", false, false, []Argument{{"inputs", Bytes32, false}}, nil), + "foo": NewMethod("foo", "foo", Function, "", false, false, []Argument{{"inputs", Uint32, false}}, nil), + "bar": NewMethod("bar", "bar", Function, "", false, false, []Argument{{"inputs", Uint32, false}, {"string", Uint16, false}}, nil), + "slice": NewMethod("slice", "slice", Function, "", false, false, []Argument{{"inputs", Uint32Arr2, false}}, nil), + "slice256": NewMethod("slice256", "slice256", Function, "", false, false, []Argument{{"inputs", Uint256Arr2, false}}, nil), + "sliceAddress": NewMethod("sliceAddress", "sliceAddress", Function, "", false, false, []Argument{{"inputs", AddressArr, false}}, nil), + "sliceMultiAddress": NewMethod("sliceMultiAddress", "sliceMultiAddress", Function, "", false, false, []Argument{{"a", AddressArr, false}, {"b", AddressArr, false}}, nil), + "nestedArray": NewMethod("nestedArray", "nestedArray", Function, "", false, false, []Argument{{"a", Uint256ArrNested, false}, {"b", AddressArr, false}}, nil), + "nestedArray2": NewMethod("nestedArray2", "nestedArray2", Function, "", false, false, []Argument{{"a", Uint8ArrNested, false}}, nil), + "nestedSlice": NewMethod("nestedSlice", "nestedSlice", Function, "", false, false, []Argument{{"a", Uint8SliceNested, false}}, nil), + "receive": NewMethod("receive", "receive", Function, "payable", false, true, []Argument{{"memo", Bytes, false}}, []Argument{}), + "fixedArrStr": NewMethod("fixedArrStr", "fixedArrStr", Function, "view", false, false, []Argument{{"str", String, false}, {"fixedArr", Uint256Arr2, false}}, nil), + "fixedArrBytes": NewMethod("fixedArrBytes", "fixedArrBytes", Function, "view", false, false, []Argument{{"bytes", Bytes, false}, {"fixedArr", Uint256Arr2, false}}, nil), + "mixedArrStr": NewMethod("mixedArrStr", "mixedArrStr", Function, "view", false, false, []Argument{{"str", String, false}, {"fixedArr", Uint256Arr2, false}, {"dynArr", Uint256Arr, false}}, nil), + "doubleFixedArrStr": NewMethod("doubleFixedArrStr", "doubleFixedArrStr", Function, "view", false, false, []Argument{{"str", String, false}, {"fixedArr1", Uint256Arr2, false}, {"fixedArr2", Uint256Arr3, false}}, nil), + "multipleMixedArrStr": NewMethod("multipleMixedArrStr", "multipleMixedArrStr", Function, "view", false, false, []Argument{{"str", String, false}, {"fixedArr1", Uint256Arr2, false}, {"dynArr", Uint256Arr, false}, {"fixedArr2", Uint256Arr3, false}}, nil), + "overloadedNames": NewMethod("overloadedNames", "overloadedNames", Function, "view", false, false, []Argument{{"f", TupleF, false}}, nil), +} func TestReader(t *testing.T) { - Uint256, _ := NewType("uint256", "", nil) - exp := ABI{ - Methods: map[string]Method{ - "balance": { - "balance", "balance", true, nil, nil, - }, - "send": { - "send", "send", false, []Argument{ - {"amount", Uint256, false}, - }, nil, - }, - }, + abi := ABI{ + Methods: methods, } - abi, err := JSON(strings.NewReader(jsondata)) + exp, err := JSON(strings.NewReader(jsondata)) if err != nil { - t.Error(err) + t.Fatal(err) } - // deep equal fails for some reason for name, expM := range exp.Methods { gotM, exist := abi.Methods[name] if !exist { @@ -98,8 +150,55 @@ func TestReader(t *testing.T) { } } +func TestInvalidABI(t *testing.T) { + json := `[{ "type" : "function", "name" : "", "constant" : fals }]` + _, err := JSON(strings.NewReader(json)) + if err == nil { + t.Fatal("invalid json should produce error") + } + json2 := `[{ "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "typ" : "uint256" } ] }]` + _, err = JSON(strings.NewReader(json2)) + if err == nil { + t.Fatal("invalid json should produce error") + } +} + +// TestConstructor tests a constructor function. +// The test is based on the following contract: +// contract TestConstructor { +// constructor(uint256 a, uint256 b) public{} +// } +func TestConstructor(t *testing.T) { + json := `[{ "inputs": [{"internalType": "uint256","name": "a","type": "uint256" },{ "internalType": "uint256","name": "b","type": "uint256"}],"stateMutability": "nonpayable","type": "constructor"}]` + method := NewMethod("", "", Constructor, "nonpayable", false, false, []Argument{{"a", Uint256, false}, {"b", Uint256, false}}, nil) + // Test from JSON + abi, err := JSON(strings.NewReader(json)) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(abi.Constructor, method) { + t.Error("Missing expected constructor") + } + // Test pack/unpack + packed, err := abi.Pack("", big.NewInt(1), big.NewInt(2)) + if err != nil { + t.Error(err) + } + unpacked, err := abi.Constructor.Inputs.Unpack(packed) + if err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(unpacked[0], big.NewInt(1)) { + t.Error("Unable to pack/unpack from constructor") + } + if !reflect.DeepEqual(unpacked[1], big.NewInt(2)) { + t.Error("Unable to pack/unpack from constructor") + } +} + func TestTestNumbers(t *testing.T) { - abi, err := JSON(strings.NewReader(jsondata2)) + abi, err := JSON(strings.NewReader(jsondata)) if err != nil { t.Fatal(err) } @@ -135,60 +234,22 @@ func TestTestNumbers(t *testing.T) { } } -func TestTestString(t *testing.T) { - abi, err := JSON(strings.NewReader(jsondata2)) - if err != nil { - t.Fatal(err) - } - - if _, err := abi.Pack("string", "hello world"); err != nil { - t.Error(err) - } -} - -func TestTestBool(t *testing.T) { - abi, err := JSON(strings.NewReader(jsondata2)) - if err != nil { - t.Fatal(err) - } - - if _, err := abi.Pack("bool", true); err != nil { - t.Error(err) - } -} - -func TestTestSlice(t *testing.T) { - abi, err := JSON(strings.NewReader(jsondata2)) - if err != nil { - t.Fatal(err) - } - slice := make([]uint64, 2) - if _, err := abi.Pack("uint64[2]", slice); err != nil { - t.Error(err) - } - if _, err := abi.Pack("uint64[]", slice); err != nil { - t.Error(err) - } -} - func TestMethodSignature(t *testing.T) { - String, _ := NewType("string", "", nil) - m := Method{"foo", "foo", false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil} + m := NewMethod("foo", "foo", Function, "", false, false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil) exp := "foo(string,string)" - if m.Sig() != exp { - t.Error("signature mismatch", exp, "!=", m.Sig()) + if m.Sig != exp { + t.Error("signature mismatch", exp, "!=", m.Sig) } idexp := crypto.Keccak256([]byte(exp))[:4] - if !bytes.Equal(m.ID(), idexp) { - t.Errorf("expected ids to match %x != %x", m.ID(), idexp) + if !bytes.Equal(m.ID, idexp) { + t.Errorf("expected ids to match %x != %x", m.ID, idexp) } - uintt, _ := NewType("uint256", "", nil) - m = Method{"foo", "foo", false, []Argument{{"bar", uintt, false}}, nil} + m = NewMethod("foo", "foo", Function, "", false, false, []Argument{{"bar", Uint256, false}}, nil) exp = "foo(uint256)" - if m.Sig() != exp { - t.Error("signature mismatch", exp, "!=", m.Sig()) + if m.Sig != exp { + t.Error("signature mismatch", exp, "!=", m.Sig) } // Method with tuple arguments @@ -204,10 +265,10 @@ func TestMethodSignature(t *testing.T) { {Name: "y", Type: "int256"}, }}, }) - m = Method{"foo", "foo", false, []Argument{{"s", s, false}, {"bar", String, false}}, nil} + m = NewMethod("foo", "foo", Function, "", false, false, []Argument{{"s", s, false}, {"bar", String, false}}, nil) exp = "foo((int256,int256[],(int256,int256)[],(int256,int256)[2]),string)" - if m.Sig() != exp { - t.Error("signature mismatch", exp, "!=", m.Sig()) + if m.Sig != exp { + t.Error("signature mismatch", exp, "!=", m.Sig) } } @@ -219,12 +280,12 @@ func TestOverloadedMethodSignature(t *testing.T) { } check := func(name string, expect string, method bool) { if method { - if abi.Methods[name].Sig() != expect { - t.Fatalf("The signature of overloaded method mismatch, want %s, have %s", expect, abi.Methods[name].Sig()) + if abi.Methods[name].Sig != expect { + t.Fatalf("The signature of overloaded method mismatch, want %s, have %s", expect, abi.Methods[name].Sig) } } else { - if abi.Events[name].Sig() != expect { - t.Fatalf("The signature of overloaded event mismatch, want %s, have %s", expect, abi.Events[name].Sig()) + if abi.Events[name].Sig != expect { + t.Fatalf("The signature of overloaded event mismatch, want %s, have %s", expect, abi.Events[name].Sig) } } } @@ -234,8 +295,22 @@ func TestOverloadedMethodSignature(t *testing.T) { check("bar0", "bar(uint256,uint256)", false) } +func TestCustomErrors(t *testing.T) { + json := `[{ "inputs": [ { "internalType": "uint256", "name": "", "type": "uint256" } ],"name": "MyError", "type": "error"} ]` + abi, err := JSON(strings.NewReader(json)) + if err != nil { + t.Fatal(err) + } + check := func(name string, expect string) { + if abi.Errors[name].Sig != expect { + t.Fatalf("The signature of overloaded method mismatch, want %s, have %s", expect, abi.Methods[name].Sig) + } + } + check("MyError", "MyError(uint256)") +} + func TestMultiPack(t *testing.T) { - abi, err := JSON(strings.NewReader(jsondata2)) + abi, err := JSON(strings.NewReader(jsondata)) if err != nil { t.Fatal(err) } @@ -400,15 +475,7 @@ func TestInputVariableInputLength(t *testing.T) { } func TestInputFixedArrayAndVariableInputLength(t *testing.T) { - const definition = `[ - { "type" : "function", "name" : "fixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr", "type" : "uint256[2]" } ] }, - { "type" : "function", "name" : "fixedArrBytes", "constant" : true, "inputs" : [ { "name" : "str", "type" : "bytes" }, { "name" : "fixedArr", "type" : "uint256[2]" } ] }, - { "type" : "function", "name" : "mixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr", "type": "uint256[2]" }, { "name" : "dynArr", "type": "uint256[]" } ] }, - { "type" : "function", "name" : "doubleFixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr1", "type": "uint256[2]" }, { "name" : "fixedArr2", "type": "uint256[3]" } ] }, - { "type" : "function", "name" : "multipleMixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr1", "type": "uint256[2]" }, { "name" : "dynArr", "type" : "uint256[]" }, { "name" : "fixedArr2", "type" : "uint256[3]" } ] } - ]` - - abi, err := JSON(strings.NewReader(definition)) + abi, err := JSON(strings.NewReader(jsondata)) if err != nil { t.Error(err) } @@ -555,7 +622,7 @@ func TestInputFixedArrayAndVariableInputLength(t *testing.T) { strvalue = common.RightPadBytes([]byte(strin), 32) fixedarrin1value1 = common.LeftPadBytes(fixedarrin1[0].Bytes(), 32) fixedarrin1value2 = common.LeftPadBytes(fixedarrin1[1].Bytes(), 32) - dynarroffset = U256(big.NewInt(int64(256 + ((len(strin)/32)+1)*32))) + dynarroffset = math.U256Bytes(big.NewInt(int64(256 + ((len(strin)/32)+1)*32))) dynarrlength = make([]byte, 32) dynarrlength[31] = byte(len(dynarrin)) dynarrinvalue1 = common.LeftPadBytes(dynarrin[0].Bytes(), 32) @@ -582,7 +649,7 @@ func TestInputFixedArrayAndVariableInputLength(t *testing.T) { } func TestDefaultFunctionParsing(t *testing.T) { - const definition = `[{ "name" : "balance" }]` + const definition = `[{ "name" : "balance", "type" : "function" }]` abi, err := JSON(strings.NewReader(definition)) if err != nil { @@ -602,8 +669,6 @@ func TestBareEvents(t *testing.T) { { "type" : "event", "name" : "tuple", "inputs" : [{ "indexed":false, "name":"t", "type":"tuple", "components":[{"name":"a", "type":"uint256"}] }, { "indexed":true, "name":"arg1", "type":"address" }] } ]` - arg0, _ := NewType("uint256", "", nil) - arg1, _ := NewType("address", "", nil) tuple, _ := NewType("tuple", "", []ArgumentMarshaling{{Name: "a", Type: "uint256"}}) expectedEvents := map[string]struct { @@ -613,12 +678,12 @@ func TestBareEvents(t *testing.T) { "balance": {false, nil}, "anon": {true, nil}, "args": {false, []Argument{ - {Name: "arg0", Type: arg0, Indexed: false}, - {Name: "arg1", Type: arg1, Indexed: true}, + {Name: "arg0", Type: Uint256, Indexed: false}, + {Name: "arg1", Type: Address, Indexed: true}, }}, "tuple": {false, []Argument{ {Name: "t", Type: tuple, Indexed: false}, - {Name: "arg1", Type: arg1, Indexed: true}, + {Name: "arg1", Type: Address, Indexed: true}, }}, } @@ -692,7 +757,7 @@ func TestUnpackEvent(t *testing.T) { } var ev ReceivedEvent - err = abi.Unpack(&ev, "received", data) + err = abi.UnpackIntoInterface(&ev, "received", data) if err != nil { t.Error(err) } @@ -701,7 +766,7 @@ func TestUnpackEvent(t *testing.T) { Sender common.Address } var receivedAddrEv ReceivedAddrEvent - err = abi.Unpack(&receivedAddrEv, "receivedAddr", data) + err = abi.UnpackIntoInterface(&receivedAddrEv, "receivedAddr", data) if err != nil { t.Error(err) } @@ -891,45 +956,25 @@ func TestUnpackIntoMapNamingConflict(t *testing.T) { } func TestABI_MethodById(t *testing.T) { - const abiJSON = `[ - {"type":"function","name":"receive","constant":false,"inputs":[{"name":"memo","type":"bytes"}],"outputs":[],"payable":true,"stateMutability":"payable"}, - {"type":"event","name":"received","anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}]}, - {"type":"function","name":"fixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr","type":"uint256[2]"}]}, - {"type":"function","name":"fixedArrBytes","constant":true,"inputs":[{"name":"str","type":"bytes"},{"name":"fixedArr","type":"uint256[2]"}]}, - {"type":"function","name":"mixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr","type":"uint256[2]"},{"name":"dynArr","type":"uint256[]"}]}, - {"type":"function","name":"doubleFixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr1","type":"uint256[2]"},{"name":"fixedArr2","type":"uint256[3]"}]}, - {"type":"function","name":"multipleMixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr1","type":"uint256[2]"},{"name":"dynArr","type":"uint256[]"},{"name":"fixedArr2","type":"uint256[3]"}]}, - {"type":"function","name":"balance","constant":true}, - {"type":"function","name":"send","constant":false,"inputs":[{"name":"amount","type":"uint256"}]}, - {"type":"function","name":"test","constant":false,"inputs":[{"name":"number","type":"uint32"}]}, - {"type":"function","name":"string","constant":false,"inputs":[{"name":"inputs","type":"string"}]}, - {"type":"function","name":"bool","constant":false,"inputs":[{"name":"inputs","type":"bool"}]}, - {"type":"function","name":"address","constant":false,"inputs":[{"name":"inputs","type":"address"}]}, - {"type":"function","name":"uint64[2]","constant":false,"inputs":[{"name":"inputs","type":"uint64[2]"}]}, - {"type":"function","name":"uint64[]","constant":false,"inputs":[{"name":"inputs","type":"uint64[]"}]}, - {"type":"function","name":"foo","constant":false,"inputs":[{"name":"inputs","type":"uint32"}]}, - {"type":"function","name":"bar","constant":false,"inputs":[{"name":"inputs","type":"uint32"},{"name":"string","type":"uint16"}]}, - {"type":"function","name":"_slice","constant":false,"inputs":[{"name":"inputs","type":"uint32[2]"}]}, - {"type":"function","name":"__slice256","constant":false,"inputs":[{"name":"inputs","type":"uint256[2]"}]}, - {"type":"function","name":"sliceAddress","constant":false,"inputs":[{"name":"inputs","type":"address[]"}]}, - {"type":"function","name":"sliceMultiAddress","constant":false,"inputs":[{"name":"a","type":"address[]"},{"name":"b","type":"address[]"}]} - ] -` - abi, err := JSON(strings.NewReader(abiJSON)) + abi, err := JSON(strings.NewReader(jsondata)) if err != nil { t.Fatal(err) } for name, m := range abi.Methods { a := fmt.Sprintf("%v", m) - m2, err := abi.MethodById(m.ID()) + m2, err := abi.MethodById(m.ID) if err != nil { t.Fatalf("Failed to look up ABI method: %v", err) } b := fmt.Sprintf("%v", m2) if a != b { - t.Errorf("Method %v (id %x) not 'findable' by id in ABI", name, m.ID()) + t.Errorf("Method %v (id %x) not 'findable' by id in ABI", name, m.ID) } } + // test unsuccessful lookups + if _, err = abi.MethodById(crypto.Keccak256()); err == nil { + t.Error("Expected error: no method with this id") + } // Also test empty if _, err := abi.MethodById([]byte{0x00}); err == nil { t.Errorf("Expected error, too short to decode data") @@ -995,8 +1040,8 @@ func TestABI_EventById(t *testing.T) { t.Errorf("We should find a event for topic %s, test #%d", topicID.Hex(), testnum) } - if event.ID() != topicID { - t.Errorf("Event id %s does not match topic %s, test #%d", event.ID().Hex(), topicID.Hex(), testnum) + if event.ID != topicID { + t.Errorf("Event id %s does not match topic %s, test #%d", event.ID.Hex(), topicID.Hex(), testnum) } unknowntopicID := crypto.Keccak256Hash([]byte("unknownEvent")) @@ -1010,8 +1055,10 @@ func TestABI_EventById(t *testing.T) { } } -func TestDuplicateMethodNames(t *testing.T) { - abiJSON := `[{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"},{"name":"customFallback","type":"string"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]` +// TestDoubleDuplicateMethodNames checks that if transfer0 already exists, there won't be a name +// conflict and that the second transfer method will be renamed transfer1. +func TestDoubleDuplicateMethodNames(t *testing.T) { + abiJSON := `[{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"}],"name":"transfer0","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"},{"name":"customFallback","type":"string"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]` contractAbi, err := JSON(strings.NewReader(abiJSON)) if err != nil { t.Fatal(err) @@ -1030,24 +1077,86 @@ func TestDuplicateMethodNames(t *testing.T) { } } -// TestDoubleDuplicateMethodNames checks that if transfer0 already exists, there won't be a name -// conflict and that the second transfer method will be renamed transfer1. -func TestDoubleDuplicateMethodNames(t *testing.T) { - abiJSON := `[{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"}],"name":"transfer0","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"},{"name":"customFallback","type":"string"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]` +// TestDoubleDuplicateEventNames checks that if send0 already exists, there won't be a name +// conflict and that the second send event will be renamed send1. +// The test runs the abi of the following contract. +// contract DuplicateEvent { +// event send(uint256 a); +// event send0(); +// event send(); +// } +func TestDoubleDuplicateEventNames(t *testing.T) { + abiJSON := `[{"anonymous": false,"inputs": [{"indexed": false,"internalType": "uint256","name": "a","type": "uint256"}],"name": "send","type": "event"},{"anonymous": false,"inputs": [],"name": "send0","type": "event"},{ "anonymous": false, "inputs": [],"name": "send","type": "event"}]` contractAbi, err := JSON(strings.NewReader(abiJSON)) if err != nil { t.Fatal(err) } - if _, ok := contractAbi.Methods["transfer"]; !ok { - t.Fatalf("Could not find original method") + if _, ok := contractAbi.Events["send"]; !ok { + t.Fatalf("Could not find original event") } - if _, ok := contractAbi.Methods["transfer0"]; !ok { - t.Fatalf("Could not find duplicate method") + if _, ok := contractAbi.Events["send0"]; !ok { + t.Fatalf("Could not find duplicate event") } - if _, ok := contractAbi.Methods["transfer1"]; !ok { - t.Fatalf("Could not find duplicate method") + if _, ok := contractAbi.Events["send1"]; !ok { + t.Fatalf("Could not find duplicate event") } - if _, ok := contractAbi.Methods["transfer2"]; ok { - t.Fatalf("Should not have found extra method") + if _, ok := contractAbi.Events["send2"]; ok { + t.Fatalf("Should not have found extra event") + } +} + +// TestUnnamedEventParam checks that an event with unnamed parameters is +// correctly handled. +// The test runs the abi of the following contract. +// contract TestEvent { +// event send(uint256, uint256); +// } +func TestUnnamedEventParam(t *testing.T) { + abiJSON := `[{ "anonymous": false, "inputs": [{ "indexed": false,"internalType": "uint256", "name": "","type": "uint256"},{"indexed": false,"internalType": "uint256","name": "","type": "uint256"}],"name": "send","type": "event"}]` + contractAbi, err := JSON(strings.NewReader(abiJSON)) + if err != nil { + t.Fatal(err) + } + + event, ok := contractAbi.Events["send"] + if !ok { + t.Fatalf("Could not find event") + } + if event.Inputs[0].Name != "arg0" { + t.Fatalf("Could not find input") + } + if event.Inputs[1].Name != "arg1" { + t.Fatalf("Could not find input") + } +} + +func TestUnpackRevert(t *testing.T) { + t.Parallel() + + var cases = []struct { + input string + expect string + expectErr error + }{ + {"", "", errors.New("invalid data for unpacking")}, + {"08c379a1", "", errors.New("invalid data for unpacking")}, + {"08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000d72657665727420726561736f6e00000000000000000000000000000000000000", "revert reason", nil}, + } + for index, c := range cases { + t.Run(fmt.Sprintf("case %d", index), func(t *testing.T) { + got, err := UnpackRevert(common.Hex2Bytes(c.input)) + if c.expectErr != nil { + if err == nil { + t.Fatalf("Expected non-nil error") + } + if err.Error() != c.expectErr.Error() { + t.Fatalf("Expected error mismatch, want %v, got %v", c.expectErr, err) + } + return + } + if c.expect != got { + t.Fatalf("Output mismatch, want %v, got %v", c.expect, got) + } + }) } } diff --git a/accounts/abi/argument.go b/accounts/abi/argument.go index f8ec11b9fa89..e6c117fe5f0d 100644 --- a/accounts/abi/argument.go +++ b/accounts/abi/argument.go @@ -41,7 +41,7 @@ type ArgumentMarshaling struct { Indexed bool } -// UnmarshalJSON implements json.Unmarshaler interface +// UnmarshalJSON implements json.Unmarshaler interface. func (argument *Argument) UnmarshalJSON(data []byte) error { var arg ArgumentMarshaling err := json.Unmarshal(data, &arg) @@ -59,19 +59,7 @@ func (argument *Argument) UnmarshalJSON(data []byte) error { return nil } -// LengthNonIndexed returns the number of arguments when not counting 'indexed' ones. Only events -// can ever have 'indexed' arguments, it should always be false on arguments for method input/output -func (arguments Arguments) LengthNonIndexed() int { - out := 0 - for _, arg := range arguments { - if !arg.Indexed { - out++ - } - } - return out -} - -// NonIndexed returns the arguments with indexed arguments filtered out +// NonIndexed returns the arguments with indexed arguments filtered out. func (arguments Arguments) NonIndexed() Arguments { var ret []Argument for _, arg := range arguments { @@ -82,216 +70,121 @@ func (arguments Arguments) NonIndexed() Arguments { return ret } -// isTuple returns true for non-atomic constructs, like (uint,uint) or uint[] +// isTuple returns true for non-atomic constructs, like (uint,uint) or uint[]. func (arguments Arguments) isTuple() bool { return len(arguments) > 1 } -// Unpack performs the operation hexdata -> Go format -func (arguments Arguments) Unpack(v interface{}, data []byte) error { +// Unpack performs the operation hexdata -> Go format. +func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) { if len(data) == 0 { if len(arguments) != 0 { - return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected") - } else { - return nil // Nothing to unmarshal, return + return nil, fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected") } + return make([]interface{}, 0), nil } - // make sure the passed value is arguments pointer - if reflect.Ptr != reflect.ValueOf(v).Kind() { - return fmt.Errorf("abi: Unpack(non-pointer %T)", v) - } - marshalledValues, err := arguments.UnpackValues(data) - if err != nil { - return err - } - if arguments.isTuple() { - return arguments.unpackTuple(v, marshalledValues) - } - return arguments.unpackAtomic(v, marshalledValues[0]) + return arguments.UnpackValues(data) } -// UnpackIntoMap performs the operation hexdata -> mapping of argument name to argument value +// UnpackIntoMap performs the operation hexdata -> mapping of argument name to argument value. func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte) error { + // Make sure map is not nil + if v == nil { + return fmt.Errorf("abi: cannot unpack into a nil map") + } if len(data) == 0 { if len(arguments) != 0 { return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected") - } else { - return nil // Nothing to unmarshal, return } + return nil // Nothing to unmarshal, return } marshalledValues, err := arguments.UnpackValues(data) if err != nil { return err } - return arguments.unpackIntoMap(v, marshalledValues) -} - -// unpack sets the unmarshalled value to go format. -// Note the dst here must be settable. -func unpack(t *Type, dst interface{}, src interface{}) error { - var ( - dstVal = reflect.ValueOf(dst).Elem() - srcVal = reflect.ValueOf(src) - ) - tuple, typ := false, t - for { - if typ.T == SliceTy || typ.T == ArrayTy { - typ = typ.Elem - continue - } - tuple = typ.T == TupleTy - break - } - if !tuple { - return set(dstVal, srcVal) - } - - // Dereferences interface or pointer wrapper - dstVal = indirectInterfaceOrPtr(dstVal) - - switch t.T { - case TupleTy: - if dstVal.Kind() != reflect.Struct { - return fmt.Errorf("abi: invalid dst value for unpack, want struct, got %s", dstVal.Kind()) - } - fieldmap, err := mapArgNamesToStructFields(t.TupleRawNames, dstVal) - if err != nil { - return err - } - for i, elem := range t.TupleElems { - fname := fieldmap[t.TupleRawNames[i]] - field := dstVal.FieldByName(fname) - if !field.IsValid() { - return fmt.Errorf("abi: field %s can't found in the given value", t.TupleRawNames[i]) - } - if err := unpack(elem, field.Addr().Interface(), srcVal.Field(i).Interface()); err != nil { - return err - } - } - return nil - case SliceTy: - if dstVal.Kind() != reflect.Slice { - return fmt.Errorf("abi: invalid dst value for unpack, want slice, got %s", dstVal.Kind()) - } - slice := reflect.MakeSlice(dstVal.Type(), srcVal.Len(), srcVal.Len()) - for i := 0; i < slice.Len(); i++ { - if err := unpack(t.Elem, slice.Index(i).Addr().Interface(), srcVal.Index(i).Interface()); err != nil { - return err - } - } - dstVal.Set(slice) - case ArrayTy: - if dstVal.Kind() != reflect.Array { - return fmt.Errorf("abi: invalid dst value for unpack, want array, got %s", dstVal.Kind()) - } - array := reflect.New(dstVal.Type()).Elem() - for i := 0; i < array.Len(); i++ { - if err := unpack(t.Elem, array.Index(i).Addr().Interface(), srcVal.Index(i).Interface()); err != nil { - return err - } - } - dstVal.Set(array) + for i, arg := range arguments.NonIndexed() { + v[arg.Name] = marshalledValues[i] } return nil } -// unpackIntoMap unpacks marshalledValues into the provided map[string]interface{} -func (arguments Arguments) unpackIntoMap(v map[string]interface{}, marshalledValues []interface{}) error { - // Make sure map is not nil - if v == nil { - return fmt.Errorf("abi: cannot unpack into a nil map") +// Copy performs the operation go format -> provided struct. +func (arguments Arguments) Copy(v interface{}, values []interface{}) error { + // make sure the passed value is arguments pointer + if reflect.Ptr != reflect.ValueOf(v).Kind() { + return fmt.Errorf("abi: Unpack(non-pointer %T)", v) } - - for i, arg := range arguments.NonIndexed() { - v[arg.Name] = marshalledValues[i] + if len(values) == 0 { + if len(arguments) != 0 { + return fmt.Errorf("abi: attempting to copy no values while %d arguments are expected", len(arguments)) + } + return nil // Nothing to copy, return } - return nil + if arguments.isTuple() { + return arguments.copyTuple(v, values) + } + return arguments.copyAtomic(v, values[0]) } // unpackAtomic unpacks ( hexdata -> go ) a single value -func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues interface{}) error { - if arguments.LengthNonIndexed() == 0 { - return nil - } - argument := arguments.NonIndexed()[0] - elem := reflect.ValueOf(v).Elem() +func (arguments Arguments) copyAtomic(v interface{}, marshalledValues interface{}) error { + dst := reflect.ValueOf(v).Elem() + src := reflect.ValueOf(marshalledValues) - if elem.Kind() == reflect.Struct && argument.Type.T != TupleTy { - fieldmap, err := mapArgNamesToStructFields([]string{argument.Name}, elem) - if err != nil { - return err - } - field := elem.FieldByName(fieldmap[argument.Name]) - if !field.IsValid() { - return fmt.Errorf("abi: field %s can't be found in the given value", argument.Name) - } - return unpack(&argument.Type, field.Addr().Interface(), marshalledValues) + if dst.Kind() == reflect.Struct { + return set(dst.Field(0), src) } - return unpack(&argument.Type, elem.Addr().Interface(), marshalledValues) + return set(dst, src) } -// unpackTuple unpacks ( hexdata -> go ) a batch of values. -func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interface{}) error { - var ( - value = reflect.ValueOf(v).Elem() - typ = value.Type() - kind = value.Kind() - ) - if err := requireUnpackKind(value, typ, kind, arguments); err != nil { - return err - } +// copyTuple copies a batch of values from marshalledValues to v. +func (arguments Arguments) copyTuple(v interface{}, marshalledValues []interface{}) error { + value := reflect.ValueOf(v).Elem() + nonIndexedArgs := arguments.NonIndexed() - // If the interface is a struct, get of abi->struct_field mapping - var abi2struct map[string]string - if kind == reflect.Struct { - var ( - argNames []string - err error - ) - for _, arg := range arguments.NonIndexed() { - argNames = append(argNames, arg.Name) + switch value.Kind() { + case reflect.Struct: + argNames := make([]string, len(nonIndexedArgs)) + for i, arg := range nonIndexedArgs { + argNames[i] = arg.Name } - abi2struct, err = mapArgNamesToStructFields(argNames, value) + var err error + abi2struct, err := mapArgNamesToStructFields(argNames, value) if err != nil { return err } - } - for i, arg := range arguments.NonIndexed() { - switch kind { - case reflect.Struct: + for i, arg := range nonIndexedArgs { field := value.FieldByName(abi2struct[arg.Name]) if !field.IsValid() { return fmt.Errorf("abi: field %s can't be found in the given value", arg.Name) } - if err := unpack(&arg.Type, field.Addr().Interface(), marshalledValues[i]); err != nil { + if err := set(field, reflect.ValueOf(marshalledValues[i])); err != nil { return err } - case reflect.Slice, reflect.Array: - if value.Len() < i { - return fmt.Errorf("abi: insufficient number of arguments for unpack, want %d, got %d", len(arguments), value.Len()) - } - v := value.Index(i) - if err := requireAssignable(v, reflect.ValueOf(marshalledValues[i])); err != nil { - return err - } - if err := unpack(&arg.Type, v.Addr().Interface(), marshalledValues[i]); err != nil { + } + case reflect.Slice, reflect.Array: + if value.Len() < len(marshalledValues) { + return fmt.Errorf("abi: insufficient number of arguments for unpack, want %d, got %d", len(arguments), value.Len()) + } + for i := range nonIndexedArgs { + if err := set(value.Index(i), reflect.ValueOf(marshalledValues[i])); err != nil { return err } - default: - return fmt.Errorf("abi:[2] cannot unmarshal tuple in to %v", typ) } + default: + return fmt.Errorf("abi:[2] cannot unmarshal tuple in to %v", value.Type()) } return nil - } // UnpackValues can be used to unpack ABI-encoded hexdata according to the ABI-specification, // without supplying a struct to unpack into. Instead, this method returns a list containing the // values. An atomic argument will be a list with one element. func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) { - retval := make([]interface{}, 0, arguments.LengthNonIndexed()) + nonIndexedArgs := arguments.NonIndexed() + retval := make([]interface{}, 0, len(nonIndexedArgs)) virtualArgs := 0 - for index, arg := range arguments.NonIndexed() { + for index, arg := range nonIndexedArgs { marshalledValue, err := toGoType((index+virtualArgs)*32, arg.Type, data) if arg.Type.T == ArrayTy && !isDynamicType(arg.Type) { // If we have a static array, like [3]uint256, these are coded as @@ -318,18 +211,18 @@ func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) { return retval, nil } -// PackValues performs the operation Go format -> Hexdata -// It is the semantic opposite of UnpackValues +// PackValues performs the operation Go format -> Hexdata. +// It is the semantic opposite of UnpackValues. func (arguments Arguments) PackValues(args []interface{}) ([]byte, error) { return arguments.Pack(args...) } -// Pack performs the operation Go format -> Hexdata +// Pack performs the operation Go format -> Hexdata. func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) { // Make sure arguments match up and pack them abiArgs := arguments if len(args) != len(abiArgs) { - return nil, fmt.Errorf("argument count mismatch: %d for %d", len(args), len(abiArgs)) + return nil, fmt.Errorf("argument count mismatch: got %d for %d", len(args), len(abiArgs)) } // variable input is the output appended at the end of packed // output. This is used for strings and bytes types input. diff --git a/accounts/abi/bind/auth.go b/accounts/abi/bind/auth.go index e51f0bd8ead5..a4307a952910 100644 --- a/accounts/abi/bind/auth.go +++ b/accounts/abi/bind/auth.go @@ -17,10 +17,12 @@ package bind import ( + "context" "crypto/ecdsa" "errors" "io" "io/ioutil" + "math/big" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/external" @@ -28,11 +30,21 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" ) +// ErrNoChainID is returned whenever the user failed to specify a chain id. +var ErrNoChainID = errors.New("no chain id specified") + +// ErrNotAuthorized is returned when an account is not properly unlocked. +var ErrNotAuthorized = errors.New("not authorized to sign this account") + // NewTransactor is a utility method to easily create a transaction signer from // an encrypted json key stream and the associated passphrase. +// +// Deprecated: Use NewTransactorWithChainID instead. func NewTransactor(keyin io.Reader, passphrase string) (*TransactOpts, error) { + log.Warn("WARNING: NewTransactor has been deprecated in favour of NewTransactorWithChainID") json, err := ioutil.ReadAll(keyin) if err != nil { return nil, err @@ -45,13 +57,17 @@ func NewTransactor(keyin io.Reader, passphrase string) (*TransactOpts, error) { } // NewKeyStoreTransactor is a utility method to easily create a transaction signer from -// an decrypted key from a keystore +// an decrypted key from a keystore. +// +// Deprecated: Use NewKeyStoreTransactorWithChainID instead. func NewKeyStoreTransactor(keystore *keystore.KeyStore, account accounts.Account) (*TransactOpts, error) { + log.Warn("WARNING: NewKeyStoreTransactor has been deprecated in favour of NewTransactorWithChainID") + signer := types.HomesteadSigner{} return &TransactOpts{ From: account.Address, - Signer: func(signer types.Signer, address common.Address, tx *types.Transaction) (*types.Transaction, error) { + Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) { if address != account.Address { - return nil, errors.New("not authorized to sign this account") + return nil, ErrNotAuthorized } signature, err := keystore.SignHash(account, signer.Hash(tx).Bytes()) if err != nil { @@ -59,18 +75,23 @@ func NewKeyStoreTransactor(keystore *keystore.KeyStore, account accounts.Account } return tx.WithSignature(signer, signature) }, + Context: context.Background(), }, nil } // NewKeyedTransactor is a utility method to easily create a transaction signer // from a single private key. +// +// Deprecated: Use NewKeyedTransactorWithChainID instead. func NewKeyedTransactor(key *ecdsa.PrivateKey) *TransactOpts { + log.Warn("WARNING: NewKeyedTransactor has been deprecated in favour of NewKeyedTransactorWithChainID") keyAddr := crypto.PubkeyToAddress(key.PublicKey) + signer := types.HomesteadSigner{} return &TransactOpts{ From: keyAddr, - Signer: func(signer types.Signer, address common.Address, tx *types.Transaction) (*types.Transaction, error) { + Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) { if address != keyAddr { - return nil, errors.New("not authorized to sign this account") + return nil, ErrNotAuthorized } signature, err := crypto.Sign(signer.Hash(tx).Bytes(), key) if err != nil { @@ -78,7 +99,69 @@ func NewKeyedTransactor(key *ecdsa.PrivateKey) *TransactOpts { } return tx.WithSignature(signer, signature) }, + Context: context.Background(), + } +} + +// NewTransactorWithChainID is a utility method to easily create a transaction signer from +// an encrypted json key stream and the associated passphrase. +func NewTransactorWithChainID(keyin io.Reader, passphrase string, chainID *big.Int) (*TransactOpts, error) { + json, err := ioutil.ReadAll(keyin) + if err != nil { + return nil, err + } + key, err := keystore.DecryptKey(json, passphrase) + if err != nil { + return nil, err + } + return NewKeyedTransactorWithChainID(key.PrivateKey, chainID) +} + +// NewKeyStoreTransactorWithChainID is a utility method to easily create a transaction signer from +// an decrypted key from a keystore. +func NewKeyStoreTransactorWithChainID(keystore *keystore.KeyStore, account accounts.Account, chainID *big.Int) (*TransactOpts, error) { + if chainID == nil { + return nil, ErrNoChainID + } + signer := types.LatestSignerForChainID(chainID) + return &TransactOpts{ + From: account.Address, + Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) { + if address != account.Address { + return nil, ErrNotAuthorized + } + signature, err := keystore.SignHash(account, signer.Hash(tx).Bytes()) + if err != nil { + return nil, err + } + return tx.WithSignature(signer, signature) + }, + Context: context.Background(), + }, nil +} + +// NewKeyedTransactorWithChainID is a utility method to easily create a transaction signer +// from a single private key. +func NewKeyedTransactorWithChainID(key *ecdsa.PrivateKey, chainID *big.Int) (*TransactOpts, error) { + keyAddr := crypto.PubkeyToAddress(key.PublicKey) + if chainID == nil { + return nil, ErrNoChainID } + signer := types.LatestSignerForChainID(chainID) + return &TransactOpts{ + From: keyAddr, + Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) { + if address != keyAddr { + return nil, ErrNotAuthorized + } + signature, err := crypto.Sign(signer.Hash(tx).Bytes(), key) + if err != nil { + return nil, err + } + return tx.WithSignature(signer, signature) + }, + Context: context.Background(), + }, nil } // NewClefTransactor is a utility method to easily create a transaction signer @@ -86,11 +169,12 @@ func NewKeyedTransactor(key *ecdsa.PrivateKey) *TransactOpts { func NewClefTransactor(clef *external.ExternalSigner, account accounts.Account) *TransactOpts { return &TransactOpts{ From: account.Address, - Signer: func(signer types.Signer, address common.Address, transaction *types.Transaction) (*types.Transaction, error) { + Signer: func(address common.Address, transaction *types.Transaction) (*types.Transaction, error) { if address != account.Address { - return nil, errors.New("not authorized to sign this account") + return nil, ErrNotAuthorized } return clef.SignTx(account, transaction, nil) // Clef enforces its own chain id }, + Context: context.Background(), } } diff --git a/accounts/abi/bind/backend.go b/accounts/abi/bind/backend.go index ca60cc1b4320..c16990f395c4 100644 --- a/accounts/abi/bind/backend.go +++ b/accounts/abi/bind/backend.go @@ -32,22 +32,23 @@ var ( // have any code associated with it (i.e. suicided). ErrNoCode = errors.New("no contract code at given address") - // This error is raised when attempting to perform a pending state action + // ErrNoPendingState is raised when attempting to perform a pending state action // on a backend that doesn't implement PendingContractCaller. ErrNoPendingState = errors.New("backend does not support pending state") - // This error is returned by WaitDeployed if contract creation leaves an - // empty contract behind. + // ErrNoCodeAfterDeploy is returned by WaitDeployed if contract creation leaves + // an empty contract behind. ErrNoCodeAfterDeploy = errors.New("no contract code after deployment") ) -// ContractCaller defines the methods needed to allow operating with contract on a read +// ContractCaller defines the methods needed to allow operating with a contract on a read // only basis. type ContractCaller interface { // CodeAt returns the code of the given account. This is needed to differentiate // between contract internal errors and the local chain being out of sync. CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) - // ContractCall executes an Ethereum contract call with the specified data as the + + // CallContract executes an Ethereum contract call with the specified data as the // input. CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) } @@ -58,28 +59,41 @@ type ContractCaller interface { type PendingContractCaller interface { // PendingCodeAt returns the code of the given account in the pending state. PendingCodeAt(ctx context.Context, contract common.Address) ([]byte, error) + // PendingCallContract executes an Ethereum contract call against the pending state. PendingCallContract(ctx context.Context, call ethereum.CallMsg) ([]byte, error) } -// ContractTransactor defines the methods needed to allow operating with contract -// on a write only basis. Beside the transacting method, the remainder are helpers +// ContractTransactor defines the methods needed to allow operating with a contract +// on a write only basis. Besides the transacting method, the remainder are helpers // used when the user does not provide some needed values, but rather leaves it up // to the transactor to decide. type ContractTransactor interface { + // HeaderByNumber returns a block header from the current canonical chain. If + // number is nil, the latest known header is returned. + HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) + // PendingCodeAt returns the code of the given account in the pending state. PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) + // PendingNonceAt retrieves the current pending nonce associated with an account. PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) + // SuggestGasPrice retrieves the currently suggested gas price to allow a timely // execution of a transaction. SuggestGasPrice(ctx context.Context) (*big.Int, error) + + // SuggestGasTipCap retrieves the currently suggested 1559 priority fee to allow + // a timely execution of a transaction. + SuggestGasTipCap(ctx context.Context) (*big.Int, error) + // EstimateGas tries to estimate the gas needed to execute a specific // transaction based on the current pending state of the backend blockchain. // There is no guarantee that this is the true gas limit requirement as other // transactions may be added or removed by miners, but it should provide a basis // for setting a reasonable default. EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) + // SendTransaction injects the transaction into the pending pool for execution. SendTransaction(ctx context.Context, tx *types.Transaction) error } diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index 86ccc80d9f0c..ac696f446be6 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -25,8 +25,10 @@ import ( "time" "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" @@ -38,22 +40,22 @@ import ( "github.com/ethereum/go-ethereum/eth/filters" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" ) -// This nil assignment ensures compile time that SimulatedBackend implements bind.ContractBackend. +// This nil assignment ensures at compile time that SimulatedBackend implements bind.ContractBackend. var _ bind.ContractBackend = (*SimulatedBackend)(nil) var ( errBlockNumberUnsupported = errors.New("simulatedBackend cannot access blocks other than the latest block") errBlockDoesNotExist = errors.New("block does not exist in blockchain") errTransactionDoesNotExist = errors.New("transaction does not exist") - errGasEstimationFailed = errors.New("gas required exceeds allowance or always failing transaction") ) // SimulatedBackend implements bind.ContractBackend, simulating a blockchain in -// the background. Its main purpose is to allow easily testing contract bindings. +// the background. Its main purpose is to allow for easy testing of contract bindings. // Simulated backend implements the following interfaces: // ChainReader, ChainStateReader, ContractBackend, ContractCaller, ContractFilterer, ContractTransactor, // DeployBackend, GasEstimator, GasPricer, LogFilterer, PendingContractCaller, TransactionReader, and TransactionSender @@ -63,7 +65,7 @@ type SimulatedBackend struct { mu sync.Mutex pendingBlock *types.Block // Currently pending block that will be imported on request - pendingState *state.StateDB // Currently pending state that will be the active on on request + pendingState *state.StateDB // Currently pending state that will be the active on request events *filters.EventSystem // Event system for filtering log events live @@ -72,10 +74,11 @@ type SimulatedBackend struct { // NewSimulatedBackendWithDatabase creates a new binding backend based on the given database // and uses a simulated blockchain for testing purposes. +// A simulated backend always uses chainID 1337. func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend { genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc} genesis.MustCommit(database) - blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{}, nil) + blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{}, nil, nil) backend := &SimulatedBackend{ database: database, @@ -83,12 +86,13 @@ func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.Genesis config: genesis.Config, events: filters.NewEventSystem(&filterBackend{database, blockchain}, false), } - backend.rollback() + backend.rollback(blockchain.CurrentBlock()) return backend } // NewSimulatedBackend creates a new binding backend using a simulated blockchain // for testing purposes. +// A simulated backend always uses chainID 1337. func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend { return NewSimulatedBackendWithDatabase(rawdb.NewMemoryDatabase(), alloc, gasLimit) } @@ -108,7 +112,9 @@ func (b *SimulatedBackend) Commit() { if _, err := b.blockchain.InsertChain([]*types.Block{b.pendingBlock}); err != nil { panic(err) // This cannot happen unless the simulator is wrong, fail in that case } - b.rollback() + // Using the last inserted block here makes it possible to build on a side + // chain after a fork. + b.rollback(b.pendingBlock) } // Rollback aborts all pending transactions, reverting to the last committed state. @@ -116,15 +122,53 @@ func (b *SimulatedBackend) Rollback() { b.mu.Lock() defer b.mu.Unlock() - b.rollback() + b.rollback(b.blockchain.CurrentBlock()) } -func (b *SimulatedBackend) rollback() { - blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(int, *core.BlockGen) {}) - statedb, _ := b.blockchain.State() +func (b *SimulatedBackend) rollback(parent *types.Block) { + blocks, _ := core.GenerateChain(b.config, parent, ethash.NewFaker(), b.database, 1, func(int, *core.BlockGen) {}) b.pendingBlock = blocks[0] - b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database()) + b.pendingState, _ = state.New(b.pendingBlock.Root(), b.blockchain.StateCache(), nil) +} + +// Fork creates a side-chain that can be used to simulate reorgs. +// +// This function should be called with the ancestor block where the new side +// chain should be started. Transactions (old and new) can then be applied on +// top and Commit-ed. +// +// Note, the side-chain will only become canonical (and trigger the events) when +// it becomes longer. Until then CallContract will still operate on the current +// canonical chain. +// +// There is a % chance that the side chain becomes canonical at the same length +// to simulate live network behavior. +func (b *SimulatedBackend) Fork(ctx context.Context, parent common.Hash) error { + b.mu.Lock() + defer b.mu.Unlock() + + if len(b.pendingBlock.Transactions()) != 0 { + return errors.New("pending block dirty") + } + block, err := b.blockByHash(ctx, parent) + if err != nil { + return err + } + b.rollback(block) + return nil +} + +// stateByBlockNumber retrieves a state by a given blocknumber. +func (b *SimulatedBackend) stateByBlockNumber(ctx context.Context, blockNumber *big.Int) (*state.StateDB, error) { + if blockNumber == nil || blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) == 0 { + return b.blockchain.State() + } + block, err := b.blockByNumber(ctx, blockNumber) + if err != nil { + return nil, err + } + return b.blockchain.StateAt(block.Root()) } // CodeAt returns the code associated with a certain account in the blockchain. @@ -132,11 +176,12 @@ func (b *SimulatedBackend) CodeAt(ctx context.Context, contract common.Address, b.mu.Lock() defer b.mu.Unlock() - if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 { - return nil, errBlockNumberUnsupported + stateDB, err := b.stateByBlockNumber(ctx, blockNumber) + if err != nil { + return nil, err } - statedb, _ := b.blockchain.State() - return statedb.GetCode(contract), nil + + return stateDB.GetCode(contract), nil } // BalanceAt returns the wei balance of a certain account in the blockchain. @@ -144,11 +189,12 @@ func (b *SimulatedBackend) BalanceAt(ctx context.Context, contract common.Addres b.mu.Lock() defer b.mu.Unlock() - if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 { - return nil, errBlockNumberUnsupported + stateDB, err := b.stateByBlockNumber(ctx, blockNumber) + if err != nil { + return nil, err } - statedb, _ := b.blockchain.State() - return statedb.GetBalance(contract), nil + + return stateDB.GetBalance(contract), nil } // NonceAt returns the nonce of a certain account in the blockchain. @@ -156,11 +202,12 @@ func (b *SimulatedBackend) NonceAt(ctx context.Context, contract common.Address, b.mu.Lock() defer b.mu.Unlock() - if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 { - return 0, errBlockNumberUnsupported + stateDB, err := b.stateByBlockNumber(ctx, blockNumber) + if err != nil { + return 0, err } - statedb, _ := b.blockchain.State() - return statedb.GetNonce(contract), nil + + return stateDB.GetNonce(contract), nil } // StorageAt returns the value of key in the storage of an account in the blockchain. @@ -168,11 +215,12 @@ func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Addres b.mu.Lock() defer b.mu.Unlock() - if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 { - return nil, errBlockNumberUnsupported + stateDB, err := b.stateByBlockNumber(ctx, blockNumber) + if err != nil { + return nil, err } - statedb, _ := b.blockchain.State() - val := statedb.GetState(contract, key) + + val := stateDB.GetState(contract, key) return val[:], nil } @@ -182,6 +230,9 @@ func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common defer b.mu.Unlock() receipt, _, _, _ := rawdb.ReadReceipt(b.database, txHash, b.config) + if receipt == nil { + return nil, ethereum.NotFound + } return receipt, nil } @@ -204,11 +255,16 @@ func (b *SimulatedBackend) TransactionByHash(ctx context.Context, txHash common. return nil, false, ethereum.NotFound } -// BlockByHash retrieves a block based on the block hash +// BlockByHash retrieves a block based on the block hash. func (b *SimulatedBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { b.mu.Lock() defer b.mu.Unlock() + return b.blockByHash(ctx, hash) +} + +// blockByHash retrieves a block based on the block hash without Locking. +func (b *SimulatedBackend) blockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { if hash == b.pendingBlock.Hash() { return b.pendingBlock, nil } @@ -227,6 +283,12 @@ func (b *SimulatedBackend) BlockByNumber(ctx context.Context, number *big.Int) ( b.mu.Lock() defer b.mu.Unlock() + return b.blockByNumber(ctx, number) +} + +// blockByNumber retrieves a block from the database by number, caching it +// (associated with its hash) if found without Lock. +func (b *SimulatedBackend) blockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { if number == nil || number.Cmp(b.pendingBlock.Number()) == 0 { return b.blockchain.CurrentBlock(), nil } @@ -269,7 +331,7 @@ func (b *SimulatedBackend) HeaderByNumber(ctx context.Context, block *big.Int) ( return b.blockchain.GetHeaderByNumber(uint64(block.Int64())), nil } -// TransactionCount returns the number of transactions in a given block +// TransactionCount returns the number of transactions in a given block. func (b *SimulatedBackend) TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) { b.mu.Lock() defer b.mu.Unlock() @@ -286,7 +348,7 @@ func (b *SimulatedBackend) TransactionCount(ctx context.Context, blockHash commo return uint(block.Transactions().Len()), nil } -// TransactionInBlock returns the transaction for a specific block at a specific index +// TransactionInBlock returns the transaction for a specific block at a specific index. func (b *SimulatedBackend) TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) { b.mu.Lock() defer b.mu.Unlock() @@ -321,6 +383,36 @@ func (b *SimulatedBackend) PendingCodeAt(ctx context.Context, contract common.Ad return b.pendingState.GetCode(contract), nil } +func newRevertError(result *core.ExecutionResult) *revertError { + reason, errUnpack := abi.UnpackRevert(result.Revert()) + err := errors.New("execution reverted") + if errUnpack == nil { + err = fmt.Errorf("execution reverted: %v", reason) + } + return &revertError{ + error: err, + reason: hexutil.Encode(result.Revert()), + } +} + +// revertError is an API error that encompasses an EVM revert with JSON error +// code and a binary data blob. +type revertError struct { + error + reason string // revert reason hex encoded +} + +// ErrorCode returns the JSON error code for a revert. +// See: https://github.com/ethereum/wiki/wiki/JSON-RPC-Error-Codes-Improvement-Proposal +func (e *revertError) ErrorCode() int { + return 3 +} + +// ErrorData returns the hex encoded revert reason. +func (e *revertError) ErrorData() interface{} { + return e.reason +} + // CallContract executes a contract call. func (b *SimulatedBackend) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { b.mu.Lock() @@ -329,12 +421,19 @@ func (b *SimulatedBackend) CallContract(ctx context.Context, call ethereum.CallM if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 { return nil, errBlockNumberUnsupported } - state, err := b.blockchain.State() + stateDB, err := b.blockchain.State() + if err != nil { + return nil, err + } + res, err := b.callContract(ctx, call, b.blockchain.CurrentBlock(), stateDB) if err != nil { return nil, err } - rval, _, _, err := b.callContract(ctx, call, b.blockchain.CurrentBlock(), state) - return rval, err + // If the result contains a revert reason, try to unpack and return it. + if len(res.Revert()) > 0 { + return nil, newRevertError(res) + } + return res.Return(), res.Err } // PendingCallContract executes a contract call on the pending state. @@ -343,8 +442,15 @@ func (b *SimulatedBackend) PendingCallContract(ctx context.Context, call ethereu defer b.mu.Unlock() defer b.pendingState.RevertToSnapshot(b.pendingState.Snapshot()) - rval, _, _, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState) - return rval, err + res, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState) + if err != nil { + return nil, err + } + // If the result contains a revert reason, try to unpack and return it. + if len(res.Revert()) > 0 { + return nil, newRevertError(res) + } + return res.Return(), res.Err } // PendingNonceAt implements PendingStateReader.PendingNonceAt, retrieving @@ -359,6 +465,18 @@ func (b *SimulatedBackend) PendingNonceAt(ctx context.Context, account common.Ad // SuggestGasPrice implements ContractTransactor.SuggestGasPrice. Since the simulated // chain doesn't have miners, we just return a gas price of 1 for any call. func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) { + b.mu.Lock() + defer b.mu.Unlock() + + if b.pendingBlock.Header().BaseFee != nil { + return b.pendingBlock.Header().BaseFee, nil + } + return big.NewInt(1), nil +} + +// SuggestGasTipCap implements ContractTransactor.SuggestGasTipCap. Since the simulated +// chain doesn't have miners, we just return a gas tip of 1 for any call. +func (b *SimulatedBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { return big.NewInt(1), nil } @@ -379,25 +497,68 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs } else { hi = b.pendingBlock.GasLimit() } + // Normalize the max fee per gas the call is willing to spend. + var feeCap *big.Int + if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) { + return 0, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") + } else if call.GasPrice != nil { + feeCap = call.GasPrice + } else if call.GasFeeCap != nil { + feeCap = call.GasFeeCap + } else { + feeCap = common.Big0 + } + // Recap the highest gas allowance with account's balance. + if feeCap.BitLen() != 0 { + balance := b.pendingState.GetBalance(call.From) // from can't be nil + available := new(big.Int).Set(balance) + if call.Value != nil { + if call.Value.Cmp(available) >= 0 { + return 0, errors.New("insufficient funds for transfer") + } + available.Sub(available, call.Value) + } + allowance := new(big.Int).Div(available, feeCap) + if allowance.IsUint64() && hi > allowance.Uint64() { + transfer := call.Value + if transfer == nil { + transfer = new(big.Int) + } + log.Warn("Gas estimation capped by limited funds", "original", hi, "balance", balance, + "sent", transfer, "feecap", feeCap, "fundable", allowance) + hi = allowance.Uint64() + } + } cap = hi // Create a helper to check if a gas allowance results in an executable transaction - executable := func(gas uint64) bool { + executable := func(gas uint64) (bool, *core.ExecutionResult, error) { call.Gas = gas snapshot := b.pendingState.Snapshot() - _, _, failed, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState) + res, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState) b.pendingState.RevertToSnapshot(snapshot) - if err != nil || failed { - return false + if err != nil { + if errors.Is(err, core.ErrIntrinsicGas) { + return true, nil, nil // Special case, raise gas limit + } + return true, nil, err // Bail out } - return true + return res.Failed(), res, nil } // Execute the binary search and hone in on an executable gas limit for lo+1 < hi { mid := (hi + lo) / 2 - if !executable(mid) { + failed, _, err := executable(mid) + + // If the error is not nil(consensus error), it means the provided message + // call or transaction will never be accepted no matter how much gas it is + // assigned. Return the error directly, don't struggle any more + if err != nil { + return 0, err + } + if failed { lo = mid } else { hi = mid @@ -405,8 +566,19 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs } // Reject the transaction as invalid if it still fails at the highest allowance if hi == cap { - if !executable(hi) { - return 0, errGasEstimationFailed + failed, result, err := executable(hi) + if err != nil { + return 0, err + } + if failed { + if result != nil && result.Err != vm.ErrOutOfGas { + if len(result.Revert()) > 0 { + return 0, newRevertError(result) + } + return 0, result.Err + } + // Otherwise, the specified gas cap is too low + return 0, fmt.Errorf("gas required exceeds allowance (%d)", cap) } } return hi, nil @@ -414,11 +586,39 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs // callContract implements common code between normal and pending contract calls. // state is modified during execution, make sure to copy it if necessary. -func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, statedb *state.StateDB) ([]byte, uint64, bool, error) { - // Ensure message is initialized properly. - if call.GasPrice == nil { - call.GasPrice = big.NewInt(1) +func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, stateDB *state.StateDB) (*core.ExecutionResult, error) { + // Gas prices post 1559 need to be initialized + if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) { + return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") + } + head := b.blockchain.CurrentHeader() + if !b.blockchain.Config().IsLondon(head.Number) { + // If there's no basefee, then it must be a non-1559 execution + if call.GasPrice == nil { + call.GasPrice = new(big.Int) + } + call.GasFeeCap, call.GasTipCap = call.GasPrice, call.GasPrice + } else { + // A basefee is provided, necessitating 1559-type execution + if call.GasPrice != nil { + // User specified the legacy gas field, convert to 1559 gas typing + call.GasFeeCap, call.GasTipCap = call.GasPrice, call.GasPrice + } else { + // User specified 1559 gas feilds (or none), use those + if call.GasFeeCap == nil { + call.GasFeeCap = new(big.Int) + } + if call.GasTipCap == nil { + call.GasTipCap = new(big.Int) + } + // Backfill the legacy gasPrice for EVM execution, unless we're all zeroes + call.GasPrice = new(big.Int) + if call.GasFeeCap.BitLen() > 0 || call.GasTipCap.BitLen() > 0 { + call.GasPrice = math.BigMin(new(big.Int).Add(call.GasTipCap, head.BaseFee), call.GasFeeCap) + } + } } + // Ensure message is initialized properly. if call.Gas == 0 { call.Gas = 50000000 } @@ -426,46 +626,52 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM call.Value = new(big.Int) } // Set infinite balance to the fake caller account. - from := statedb.GetOrNewStateObject(call.From) + from := stateDB.GetOrNewStateObject(call.From) from.SetBalance(math.MaxBig256) // Execute the call. - msg := callmsg{call} + msg := callMsg{call} - evmContext := core.NewEVMContext(msg, block.Header(), b.blockchain, nil) + txContext := core.NewEVMTxContext(msg) + evmContext := core.NewEVMBlockContext(block.Header(), b.blockchain, nil) // Create a new environment which holds all relevant information // about the transaction and calling mechanisms. - vmenv := vm.NewEVM(evmContext, statedb, b.config, vm.Config{}) - gaspool := new(core.GasPool).AddGas(math.MaxUint64) + vmEnv := vm.NewEVM(evmContext, txContext, stateDB, b.config, vm.Config{NoBaseFee: true}) + gasPool := new(core.GasPool).AddGas(math.MaxUint64) - ret, usedGas, _, failed, err := core.NewStateTransition(vmenv, msg, gaspool).TransitionDb() - return ret, usedGas, failed, err + return core.NewStateTransition(vmEnv, msg, gasPool).TransitionDb() } // SendTransaction updates the pending block to include the given transaction. -// It panics if the transaction is invalid. func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transaction) error { b.mu.Lock() defer b.mu.Unlock() - sender, err := types.Sender(types.NewEIP155Signer(b.config.ChainID), tx) + // Get the last block + block, err := b.blockByHash(ctx, b.pendingBlock.ParentHash()) if err != nil { - panic(fmt.Errorf("invalid transaction: %v", err)) + return fmt.Errorf("could not fetch parent") + } + // Check transaction validity + signer := types.MakeSigner(b.blockchain.Config(), block.Number()) + sender, err := types.Sender(signer, tx) + if err != nil { + return fmt.Errorf("invalid transaction: %v", err) } nonce := b.pendingState.GetNonce(sender) if tx.Nonce() != nonce { - panic(fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce)) + return fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce) } - - blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) { + // Include tx in chain + blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) { for _, tx := range b.pendingBlock.Transactions() { block.AddTxWithChain(b.blockchain, tx) } block.AddTxWithChain(b.blockchain, tx) }) - statedb, _ := b.blockchain.State() + stateDB, _ := b.blockchain.State() b.pendingBlock = blocks[0] - b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database()) + b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil) return nil } @@ -479,7 +685,7 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.Filter // Block filter requested, construct a single-shot filter filter = filters.NewBlockFilter(&filterBackend{b.database, b.blockchain}, *query.BlockHash, query.Addresses, query.Topics) } else { - // Initialize unset filter boundaried to run from genesis to chain head + // Initialize unset filter boundaries to run from genesis to chain head from := int64(0) if query.FromBlock != nil { from = query.FromBlock.Int64() @@ -497,8 +703,8 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.Filter return nil, err } res := make([]types.Log, len(logs)) - for i, log := range logs { - res[i] = *log + for i, nLog := range logs { + res[i] = *nLog } return res, nil } @@ -519,9 +725,9 @@ func (b *SimulatedBackend) SubscribeFilterLogs(ctx context.Context, query ethere for { select { case logs := <-sink: - for _, log := range logs { + for _, nlog := range logs { select { - case ch <- *log: + case ch <- *nlog: case err := <-sub.Err(): return err case <-quit: @@ -537,7 +743,7 @@ func (b *SimulatedBackend) SubscribeFilterLogs(ctx context.Context, query ethere }), nil } -// SubscribeNewHead returns an event subscription for a new header +// SubscribeNewHead returns an event subscription for a new header. func (b *SimulatedBackend) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) { // subscribe to a new head sink := make(chan *types.Header) @@ -565,20 +771,22 @@ func (b *SimulatedBackend) SubscribeNewHead(ctx context.Context, ch chan<- *type } // AdjustTime adds a time shift to the simulated clock. +// It can only be called on empty blocks. func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error { b.mu.Lock() defer b.mu.Unlock() + if len(b.pendingBlock.Transactions()) != 0 { + return errors.New("Could not adjust time on non-empty block") + } + blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) { - for _, tx := range b.pendingBlock.Transactions() { - block.AddTx(tx) - } block.OffsetTime(int64(adjustment.Seconds())) }) - statedb, _ := b.blockchain.State() + stateDB, _ := b.blockchain.State() b.pendingBlock = blocks[0] - b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database()) + b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil) return nil } @@ -588,19 +796,22 @@ func (b *SimulatedBackend) Blockchain() *core.BlockChain { return b.blockchain } -// callmsg implements core.Message to allow passing it as a transaction simulator. -type callmsg struct { +// callMsg implements core.Message to allow passing it as a transaction simulator. +type callMsg struct { ethereum.CallMsg } -func (m callmsg) From() common.Address { return m.CallMsg.From } -func (m callmsg) Nonce() uint64 { return 0 } -func (m callmsg) CheckNonce() bool { return false } -func (m callmsg) To() *common.Address { return m.CallMsg.To } -func (m callmsg) GasPrice() *big.Int { return m.CallMsg.GasPrice } -func (m callmsg) Gas() uint64 { return m.CallMsg.Gas } -func (m callmsg) Value() *big.Int { return m.CallMsg.Value } -func (m callmsg) Data() []byte { return m.CallMsg.Data } +func (m callMsg) From() common.Address { return m.CallMsg.From } +func (m callMsg) Nonce() uint64 { return 0 } +func (m callMsg) IsFake() bool { return true } +func (m callMsg) To() *common.Address { return m.CallMsg.To } +func (m callMsg) GasPrice() *big.Int { return m.CallMsg.GasPrice } +func (m callMsg) GasFeeCap() *big.Int { return m.CallMsg.GasFeeCap } +func (m callMsg) GasTipCap() *big.Int { return m.CallMsg.GasTipCap } +func (m callMsg) Gas() uint64 { return m.CallMsg.Gas } +func (m callMsg) Value() *big.Int { return m.CallMsg.Value } +func (m callMsg) Data() []byte { return m.CallMsg.Data } +func (m callMsg) AccessList() types.AccessList { return m.CallMsg.AccessList } // filterBackend implements filters.Backend to support filtering for logs without // taking bloom-bits acceleration structures into account. diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go index 455d89c1e3be..8a0cbe335778 100644 --- a/accounts/abi/bind/backends/simulated_test.go +++ b/accounts/abi/bind/backends/simulated_test.go @@ -19,7 +19,10 @@ package backends import ( "bytes" "context" + "errors" "math/big" + "math/rand" + "reflect" "strings" "testing" "time" @@ -37,7 +40,7 @@ import ( func TestSimulatedBackend(t *testing.T) { var gasLimit uint64 = 8000029 key, _ := crypto.GenerateKey() // nolint: gosec - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) genAlloc := make(core.GenesisAlloc) genAlloc[auth.From] = core.GenesisAccount{Balance: big.NewInt(9223372036854775807)} @@ -56,9 +59,12 @@ func TestSimulatedBackend(t *testing.T) { } // generate a transaction and confirm you can retrieve it + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + code := `6060604052600a8060106000396000f360606040526008565b00` var gas uint64 = 3000000 - tx := types.NewContractCreation(0, big.NewInt(0), gas, big.NewInt(1), common.FromHex(code)) + tx := types.NewContractCreation(0, big.NewInt(0), gas, gasPrice, common.FromHex(code)) tx, _ = types.SignTx(tx, types.HomesteadSigner{}, key) err = sim.SendTransaction(context.Background(), tx) @@ -105,14 +111,18 @@ const deployedCode = `60806040526004361061003b576000357c010000000000000000000000 // expected return value contains "hello world" var expectedReturn = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} -func TestNewSimulatedBackend(t *testing.T) { - testAddr := crypto.PubkeyToAddress(testKey.PublicKey) - expectedBal := big.NewInt(10000000000) - sim := NewSimulatedBackend( +func simTestBackend(testAddr common.Address) *SimulatedBackend { + return NewSimulatedBackend( core.GenesisAlloc{ - testAddr: {Balance: expectedBal}, + testAddr: {Balance: big.NewInt(10000000000000000)}, }, 10000000, ) +} + +func TestNewSimulatedBackend(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + expectedBal := big.NewInt(10000000000000000) + sim := simTestBackend(testAddr) defer sim.Close() if sim.config != params.AllEthashProtocolChanges { @@ -123,22 +133,21 @@ func TestNewSimulatedBackend(t *testing.T) { t.Errorf("expected sim blockchain config to equal params.AllEthashProtocolChanges, got %v", sim.config) } - statedb, _ := sim.blockchain.State() - bal := statedb.GetBalance(testAddr) + stateDB, _ := sim.blockchain.State() + bal := stateDB.GetBalance(testAddr) if bal.Cmp(expectedBal) != 0 { t.Errorf("expected balance for test address not received. expected: %v actual: %v", expectedBal, bal) } } -func TestSimulatedBackend_AdjustTime(t *testing.T) { +func TestAdjustTime(t *testing.T) { sim := NewSimulatedBackend( core.GenesisAlloc{}, 10000000, ) defer sim.Close() prevTime := sim.pendingBlock.Time() - err := sim.AdjustTime(time.Second) - if err != nil { + if err := sim.AdjustTime(time.Second); err != nil { t.Error(err) } newTime := sim.pendingBlock.Time() @@ -148,14 +157,52 @@ func TestSimulatedBackend_AdjustTime(t *testing.T) { } } -func TestSimulatedBackend_BalanceAt(t *testing.T) { +func TestNewAdjustTimeFail(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) - expectedBal := big.NewInt(10000000000) - sim := NewSimulatedBackend( - core.GenesisAlloc{ - testAddr: {Balance: expectedBal}, - }, 10000000, - ) + sim := simTestBackend(testAddr) + + // Create tx and send + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(0, testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) + signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) + if err != nil { + t.Errorf("could not sign tx: %v", err) + } + sim.SendTransaction(context.Background(), signedTx) + // AdjustTime should fail on non-empty block + if err := sim.AdjustTime(time.Second); err == nil { + t.Error("Expected adjust time to error on non-empty block") + } + sim.Commit() + + prevTime := sim.pendingBlock.Time() + if err := sim.AdjustTime(time.Minute); err != nil { + t.Error(err) + } + newTime := sim.pendingBlock.Time() + if newTime-prevTime != uint64(time.Minute.Seconds()) { + t.Errorf("adjusted time not equal to a minute. prev: %v, new: %v", prevTime, newTime) + } + // Put a transaction after adjusting time + tx2 := types.NewTransaction(1, testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) + signedTx2, err := types.SignTx(tx2, types.HomesteadSigner{}, testKey) + if err != nil { + t.Errorf("could not sign tx: %v", err) + } + sim.SendTransaction(context.Background(), signedTx2) + sim.Commit() + newTime = sim.pendingBlock.Time() + if newTime-prevTime >= uint64(time.Minute.Seconds()) { + t.Errorf("time adjusted, but shouldn't be: prev: %v, new: %v", prevTime, newTime) + } +} + +func TestBalanceAt(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + expectedBal := big.NewInt(10000000000000000) + sim := simTestBackend(testAddr) defer sim.Close() bgCtx := context.Background() @@ -169,7 +216,7 @@ func TestSimulatedBackend_BalanceAt(t *testing.T) { } } -func TestSimulatedBackend_BlockByHash(t *testing.T) { +func TestBlockByHash(t *testing.T) { sim := NewSimulatedBackend( core.GenesisAlloc{}, 10000000, ) @@ -190,7 +237,7 @@ func TestSimulatedBackend_BlockByHash(t *testing.T) { } } -func TestSimulatedBackend_BlockByNumber(t *testing.T) { +func TestBlockByNumber(t *testing.T) { sim := NewSimulatedBackend( core.GenesisAlloc{}, 10000000, ) @@ -225,14 +272,10 @@ func TestSimulatedBackend_BlockByNumber(t *testing.T) { } } -func TestSimulatedBackend_NonceAt(t *testing.T) { +func TestNonceAt(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) - sim := NewSimulatedBackend( - core.GenesisAlloc{ - testAddr: {Balance: big.NewInt(10000000000)}, - }, 10000000, - ) + sim := simTestBackend(testAddr) defer sim.Close() bgCtx := context.Background() @@ -246,7 +289,10 @@ func TestSimulatedBackend_NonceAt(t *testing.T) { } // create a signed transaction to send - tx := types.NewTransaction(nonce, testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(nonce, testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -267,21 +313,30 @@ func TestSimulatedBackend_NonceAt(t *testing.T) { if newNonce != nonce+uint64(1) { t.Errorf("received incorrect nonce. expected 1, got %v", nonce) } + // create some more blocks + sim.Commit() + // Check that we can get data for an older block/state + newNonce, err = sim.NonceAt(bgCtx, testAddr, big.NewInt(1)) + if err != nil { + t.Fatalf("could not get nonce for test addr: %v", err) + } + if newNonce != nonce+uint64(1) { + t.Fatalf("received incorrect nonce. expected 1, got %v", nonce) + } } -func TestSimulatedBackend_SendTransaction(t *testing.T) { +func TestSendTransaction(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) - sim := NewSimulatedBackend( - core.GenesisAlloc{ - testAddr: {Balance: big.NewInt(10000000000)}, - }, 10000000, - ) + sim := simTestBackend(testAddr) defer sim.Close() bgCtx := context.Background() // create a signed transaction to send - tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -304,19 +359,22 @@ func TestSimulatedBackend_SendTransaction(t *testing.T) { } } -func TestSimulatedBackend_TransactionByHash(t *testing.T) { +func TestTransactionByHash(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := NewSimulatedBackend( core.GenesisAlloc{ - testAddr: {Balance: big.NewInt(10000000000)}, + testAddr: {Balance: big.NewInt(10000000000000000)}, }, 10000000, ) defer sim.Close() bgCtx := context.Background() // create a signed transaction to send - tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -355,37 +413,218 @@ func TestSimulatedBackend_TransactionByHash(t *testing.T) { } } -func TestSimulatedBackend_EstimateGas(t *testing.T) { - sim := NewSimulatedBackend( - core.GenesisAlloc{}, 10000000, - ) +func TestEstimateGas(t *testing.T) { + /* + pragma solidity ^0.6.4; + contract GasEstimation { + function PureRevert() public { revert(); } + function Revert() public { revert("revert reason");} + function OOG() public { for (uint i = 0; ; i++) {}} + function Assert() public { assert(false);} + function Valid() public {} + }*/ + const contractAbi = "[{\"inputs\":[],\"name\":\"Assert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"OOG\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PureRevert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Revert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Valid\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]" + const contractBin = "0x60806040523480156100115760006000fd5b50610017565b61016e806100266000396000f3fe60806040523480156100115760006000fd5b506004361061005c5760003560e01c806350f6fe3414610062578063aa8b1d301461006c578063b9b046f914610076578063d8b9839114610080578063e09fface1461008a5761005c565b60006000fd5b61006a610094565b005b6100746100ad565b005b61007e6100b5565b005b6100886100c2565b005b610092610135565b005b6000600090505b5b808060010191505061009b565b505b565b60006000fd5b565b600015156100bf57fe5b5b565b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600d8152602001807f72657665727420726561736f6e0000000000000000000000000000000000000081526020015060200191505060405180910390fd5b565b5b56fea2646970667358221220345bbcbb1a5ecf22b53a78eaebf95f8ee0eceff6d10d4b9643495084d2ec934a64736f6c63430006040033" + + key, _ := crypto.GenerateKey() + addr := crypto.PubkeyToAddress(key.PublicKey) + opts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + + sim := NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(params.Ether)}}, 10000000) defer sim.Close() - bgCtx := context.Background() - testAddr := crypto.PubkeyToAddress(testKey.PublicKey) - gas, err := sim.EstimateGas(bgCtx, ethereum.CallMsg{ - From: testAddr, - To: &testAddr, - Value: big.NewInt(1000), - Data: []byte{}, - }) - if err != nil { - t.Errorf("could not estimate gas: %v", err) + parsed, _ := abi.JSON(strings.NewReader(contractAbi)) + contractAddr, _, _, _ := bind.DeployContract(opts, parsed, common.FromHex(contractBin), sim) + sim.Commit() + + var cases = []struct { + name string + message ethereum.CallMsg + expect uint64 + expectError error + expectData interface{} + }{ + {"plain transfer(valid)", ethereum.CallMsg{ + From: addr, + To: &addr, + Gas: 0, + GasPrice: big.NewInt(0), + Value: big.NewInt(1), + Data: nil, + }, params.TxGas, nil, nil}, + + {"plain transfer(invalid)", ethereum.CallMsg{ + From: addr, + To: &contractAddr, + Gas: 0, + GasPrice: big.NewInt(0), + Value: big.NewInt(1), + Data: nil, + }, 0, errors.New("execution reverted"), nil}, + + {"Revert", ethereum.CallMsg{ + From: addr, + To: &contractAddr, + Gas: 0, + GasPrice: big.NewInt(0), + Value: nil, + Data: common.Hex2Bytes("d8b98391"), + }, 0, errors.New("execution reverted: revert reason"), "0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000d72657665727420726561736f6e00000000000000000000000000000000000000"}, + + {"PureRevert", ethereum.CallMsg{ + From: addr, + To: &contractAddr, + Gas: 0, + GasPrice: big.NewInt(0), + Value: nil, + Data: common.Hex2Bytes("aa8b1d30"), + }, 0, errors.New("execution reverted"), nil}, + + {"OOG", ethereum.CallMsg{ + From: addr, + To: &contractAddr, + Gas: 100000, + GasPrice: big.NewInt(0), + Value: nil, + Data: common.Hex2Bytes("50f6fe34"), + }, 0, errors.New("gas required exceeds allowance (100000)"), nil}, + + {"Assert", ethereum.CallMsg{ + From: addr, + To: &contractAddr, + Gas: 100000, + GasPrice: big.NewInt(0), + Value: nil, + Data: common.Hex2Bytes("b9b046f9"), + }, 0, errors.New("invalid opcode: INVALID"), nil}, + + {"Valid", ethereum.CallMsg{ + From: addr, + To: &contractAddr, + Gas: 100000, + GasPrice: big.NewInt(0), + Value: nil, + Data: common.Hex2Bytes("e09fface"), + }, 21275, nil, nil}, + } + for _, c := range cases { + got, err := sim.EstimateGas(context.Background(), c.message) + if c.expectError != nil { + if err == nil { + t.Fatalf("Expect error, got nil") + } + if c.expectError.Error() != err.Error() { + t.Fatalf("Expect error, want %v, got %v", c.expectError, err) + } + if c.expectData != nil { + if err, ok := err.(*revertError); !ok { + t.Fatalf("Expect revert error, got %T", err) + } else if !reflect.DeepEqual(err.ErrorData(), c.expectData) { + t.Fatalf("Error data mismatch, want %v, got %v", c.expectData, err.ErrorData()) + } + } + continue + } + if got != c.expect { + t.Fatalf("Gas estimation mismatch, want %d, got %d", c.expect, got) + } } +} - if gas != params.TxGas { - t.Errorf("expected 21000 gas cost for a transaction got %v", gas) +func TestEstimateGasWithPrice(t *testing.T) { + key, _ := crypto.GenerateKey() + addr := crypto.PubkeyToAddress(key.PublicKey) + + sim := NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(params.Ether*2 + 2e17)}}, 10000000) + defer sim.Close() + + recipient := common.HexToAddress("deadbeef") + var cases = []struct { + name string + message ethereum.CallMsg + expect uint64 + expectError error + }{ + {"EstimateWithoutPrice", ethereum.CallMsg{ + From: addr, + To: &recipient, + Gas: 0, + GasPrice: big.NewInt(0), + Value: big.NewInt(100000000000), + Data: nil, + }, 21000, nil}, + + {"EstimateWithPrice", ethereum.CallMsg{ + From: addr, + To: &recipient, + Gas: 0, + GasPrice: big.NewInt(100000000000), + Value: big.NewInt(100000000000), + Data: nil, + }, 21000, nil}, + + {"EstimateWithVeryHighPrice", ethereum.CallMsg{ + From: addr, + To: &recipient, + Gas: 0, + GasPrice: big.NewInt(1e14), // gascost = 2.1ether + Value: big.NewInt(1e17), // the remaining balance for fee is 2.1ether + Data: nil, + }, 21000, nil}, + + {"EstimateWithSuperhighPrice", ethereum.CallMsg{ + From: addr, + To: &recipient, + Gas: 0, + GasPrice: big.NewInt(2e14), // gascost = 4.2ether + Value: big.NewInt(100000000000), + Data: nil, + }, 21000, errors.New("gas required exceeds allowance (10999)")}, // 10999=(2.2ether-1000wei)/(2e14) + + {"EstimateEIP1559WithHighFees", ethereum.CallMsg{ + From: addr, + To: &addr, + Gas: 0, + GasFeeCap: big.NewInt(1e14), // maxgascost = 2.1ether + GasTipCap: big.NewInt(1), + Value: big.NewInt(1e17), // the remaining balance for fee is 2.1ether + Data: nil, + }, params.TxGas, nil}, + + {"EstimateEIP1559WithSuperHighFees", ethereum.CallMsg{ + From: addr, + To: &addr, + Gas: 0, + GasFeeCap: big.NewInt(1e14), // maxgascost = 2.1ether + GasTipCap: big.NewInt(1), + Value: big.NewInt(1e17 + 1), // the remaining balance for fee is 2.1ether + Data: nil, + }, params.TxGas, errors.New("gas required exceeds allowance (20999)")}, // 20999=(2.2ether-0.1ether-1wei)/(1e14) + } + for i, c := range cases { + got, err := sim.EstimateGas(context.Background(), c.message) + if c.expectError != nil { + if err == nil { + t.Fatalf("test %d: expect error, got nil", i) + } + if c.expectError.Error() != err.Error() { + t.Fatalf("test %d: expect error, want %v, got %v", i, c.expectError, err) + } + continue + } + if c.expectError == nil && err != nil { + t.Fatalf("test %d: didn't expect error, got %v", i, err) + } + if got != c.expect { + t.Fatalf("test %d: gas estimation mismatch, want %d, got %d", i, c.expect, got) + } } } -func TestSimulatedBackend_HeaderByHash(t *testing.T) { +func TestHeaderByHash(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) - sim := NewSimulatedBackend( - core.GenesisAlloc{ - testAddr: {Balance: big.NewInt(10000000000)}, - }, 10000000, - ) + sim := simTestBackend(testAddr) defer sim.Close() bgCtx := context.Background() @@ -403,14 +642,10 @@ func TestSimulatedBackend_HeaderByHash(t *testing.T) { } } -func TestSimulatedBackend_HeaderByNumber(t *testing.T) { +func TestHeaderByNumber(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) - sim := NewSimulatedBackend( - core.GenesisAlloc{ - testAddr: {Balance: big.NewInt(10000000000)}, - }, 10000000, - ) + sim := simTestBackend(testAddr) defer sim.Close() bgCtx := context.Background() @@ -454,14 +689,10 @@ func TestSimulatedBackend_HeaderByNumber(t *testing.T) { } } -func TestSimulatedBackend_TransactionCount(t *testing.T) { +func TestTransactionCount(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) - sim := NewSimulatedBackend( - core.GenesisAlloc{ - testAddr: {Balance: big.NewInt(10000000000)}, - }, 10000000, - ) + sim := simTestBackend(testAddr) defer sim.Close() bgCtx := context.Background() currentBlock, err := sim.BlockByNumber(bgCtx, nil) @@ -477,9 +708,11 @@ func TestSimulatedBackend_TransactionCount(t *testing.T) { if count != 0 { t.Errorf("expected transaction count of %v does not match actual count of %v", 0, count) } - // create a signed transaction to send - tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -508,14 +741,10 @@ func TestSimulatedBackend_TransactionCount(t *testing.T) { } } -func TestSimulatedBackend_TransactionInBlock(t *testing.T) { +func TestTransactionInBlock(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) - sim := NewSimulatedBackend( - core.GenesisAlloc{ - testAddr: {Balance: big.NewInt(10000000000)}, - }, 10000000, - ) + sim := simTestBackend(testAddr) defer sim.Close() bgCtx := context.Background() @@ -536,9 +765,11 @@ func TestSimulatedBackend_TransactionInBlock(t *testing.T) { if pendingNonce != uint64(0) { t.Errorf("expected pending nonce of 0 got %v", pendingNonce) } - // create a signed transaction to send - tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -575,14 +806,10 @@ func TestSimulatedBackend_TransactionInBlock(t *testing.T) { } } -func TestSimulatedBackend_PendingNonceAt(t *testing.T) { +func TestPendingNonceAt(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) - sim := NewSimulatedBackend( - core.GenesisAlloc{ - testAddr: {Balance: big.NewInt(10000000000)}, - }, 10000000, - ) + sim := simTestBackend(testAddr) defer sim.Close() bgCtx := context.Background() @@ -597,7 +824,10 @@ func TestSimulatedBackend_PendingNonceAt(t *testing.T) { } // create a signed transaction to send - tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -620,7 +850,7 @@ func TestSimulatedBackend_PendingNonceAt(t *testing.T) { } // make a new transaction with a nonce of 1 - tx = types.NewTransaction(uint64(1), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + tx = types.NewTransaction(uint64(1), testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err = types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -641,19 +871,18 @@ func TestSimulatedBackend_PendingNonceAt(t *testing.T) { } } -func TestSimulatedBackend_TransactionReceipt(t *testing.T) { +func TestTransactionReceipt(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) - sim := NewSimulatedBackend( - core.GenesisAlloc{ - testAddr: {Balance: big.NewInt(10000000000)}, - }, 10000000, - ) + sim := simTestBackend(testAddr) defer sim.Close() bgCtx := context.Background() // create a signed transaction to send - tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil) + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey) if err != nil { t.Errorf("could not sign tx: %v", err) @@ -676,7 +905,7 @@ func TestSimulatedBackend_TransactionReceipt(t *testing.T) { } } -func TestSimulatedBackend_SuggestGasPrice(t *testing.T) { +func TestSuggestGasPrice(t *testing.T) { sim := NewSimulatedBackend( core.GenesisAlloc{}, 10000000, @@ -687,19 +916,14 @@ func TestSimulatedBackend_SuggestGasPrice(t *testing.T) { if err != nil { t.Errorf("could not get gas price: %v", err) } - if gasPrice.Uint64() != uint64(1) { - t.Errorf("gas price was not expected value of 1. actual: %v", gasPrice.Uint64()) + if gasPrice.Uint64() != sim.pendingBlock.Header().BaseFee.Uint64() { + t.Errorf("gas price was not expected value of %v. actual: %v", sim.pendingBlock.Header().BaseFee.Uint64(), gasPrice.Uint64()) } } -func TestSimulatedBackend_PendingCodeAt(t *testing.T) { +func TestPendingCodeAt(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) - sim := NewSimulatedBackend( - core.GenesisAlloc{ - testAddr: {Balance: big.NewInt(10000000000)}, - }, - 10000000, - ) + sim := simTestBackend(testAddr) defer sim.Close() bgCtx := context.Background() code, err := sim.CodeAt(bgCtx, testAddr, nil) @@ -714,7 +938,7 @@ func TestSimulatedBackend_PendingCodeAt(t *testing.T) { if err != nil { t.Errorf("could not get code at test addr: %v", err) } - auth := bind.NewKeyedTransactor(testKey) + auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) contractAddr, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(abiBin), sim) if err != nil { t.Errorf("could not deploy contract: %v tx: %v contract: %v", err, tx, contract) @@ -733,14 +957,9 @@ func TestSimulatedBackend_PendingCodeAt(t *testing.T) { } } -func TestSimulatedBackend_CodeAt(t *testing.T) { +func TestCodeAt(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) - sim := NewSimulatedBackend( - core.GenesisAlloc{ - testAddr: {Balance: big.NewInt(10000000000)}, - }, - 10000000, - ) + sim := simTestBackend(testAddr) defer sim.Close() bgCtx := context.Background() code, err := sim.CodeAt(bgCtx, testAddr, nil) @@ -755,7 +974,7 @@ func TestSimulatedBackend_CodeAt(t *testing.T) { if err != nil { t.Errorf("could not get code at test addr: %v", err) } - auth := bind.NewKeyedTransactor(testKey) + auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) contractAddr, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(abiBin), sim) if err != nil { t.Errorf("could not deploy contract: %v tx: %v contract: %v", err, tx, contract) @@ -777,14 +996,9 @@ func TestSimulatedBackend_CodeAt(t *testing.T) { // When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt: // receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]} -func TestSimulatedBackend_PendingAndCallContract(t *testing.T) { +func TestPendingAndCallContract(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) - sim := NewSimulatedBackend( - core.GenesisAlloc{ - testAddr: {Balance: big.NewInt(10000000000)}, - }, - 10000000, - ) + sim := simTestBackend(testAddr) defer sim.Close() bgCtx := context.Background() @@ -792,7 +1006,7 @@ func TestSimulatedBackend_PendingAndCallContract(t *testing.T) { if err != nil { t.Errorf("could not get code at test addr: %v", err) } - contractAuth := bind.NewKeyedTransactor(testKey) + contractAuth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) addr, _, _, err := bind.DeployContract(contractAuth, parsed, common.FromHex(abiBin), sim) if err != nil { t.Errorf("could not deploy contract: %v", err) @@ -800,7 +1014,7 @@ func TestSimulatedBackend_PendingAndCallContract(t *testing.T) { input, err := parsed.Pack("receive", []byte("X")) if err != nil { - t.Errorf("could pack receive function on contract: %v", err) + t.Errorf("could not pack receive function on contract: %v", err) } // make sure you can call the contract in pending state @@ -840,3 +1054,285 @@ func TestSimulatedBackend_PendingAndCallContract(t *testing.T) { t.Errorf("response from calling contract was expected to be 'hello world' instead received %v", string(res)) } } + +// This test is based on the following contract: +/* +contract Reverter { + function revertString() public pure{ + require(false, "some error"); + } + function revertNoString() public pure { + require(false, ""); + } + function revertASM() public pure { + assembly { + revert(0x0, 0x0) + } + } + function noRevert() public pure { + assembly { + // Assembles something that looks like require(false, "some error") but is not reverted + mstore(0x0, 0x08c379a000000000000000000000000000000000000000000000000000000000) + mstore(0x4, 0x0000000000000000000000000000000000000000000000000000000000000020) + mstore(0x24, 0x000000000000000000000000000000000000000000000000000000000000000a) + mstore(0x44, 0x736f6d65206572726f7200000000000000000000000000000000000000000000) + return(0x0, 0x64) + } + } +}*/ +func TestCallContractRevert(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + sim := simTestBackend(testAddr) + defer sim.Close() + bgCtx := context.Background() + + reverterABI := `[{"inputs": [],"name": "noRevert","outputs": [],"stateMutability": "pure","type": "function"},{"inputs": [],"name": "revertASM","outputs": [],"stateMutability": "pure","type": "function"},{"inputs": [],"name": "revertNoString","outputs": [],"stateMutability": "pure","type": "function"},{"inputs": [],"name": "revertString","outputs": [],"stateMutability": "pure","type": "function"}]` + reverterBin := "608060405234801561001057600080fd5b506101d3806100206000396000f3fe608060405234801561001057600080fd5b506004361061004c5760003560e01c80634b409e01146100515780639b340e361461005b5780639bd6103714610065578063b7246fc11461006f575b600080fd5b610059610079565b005b6100636100ca565b005b61006d6100cf565b005b610077610145565b005b60006100c8576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526000815260200160200191505060405180910390fd5b565b600080fd5b6000610143576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600a8152602001807f736f6d65206572726f720000000000000000000000000000000000000000000081525060200191505060405180910390fd5b565b7f08c379a0000000000000000000000000000000000000000000000000000000006000526020600452600a6024527f736f6d65206572726f720000000000000000000000000000000000000000000060445260646000f3fea2646970667358221220cdd8af0609ec4996b7360c7c780bad5c735740c64b1fffc3445aa12d37f07cb164736f6c63430006070033" + + parsed, err := abi.JSON(strings.NewReader(reverterABI)) + if err != nil { + t.Errorf("could not get code at test addr: %v", err) + } + contractAuth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) + addr, _, _, err := bind.DeployContract(contractAuth, parsed, common.FromHex(reverterBin), sim) + if err != nil { + t.Errorf("could not deploy contract: %v", err) + } + + inputs := make(map[string]interface{}, 3) + inputs["revertASM"] = nil + inputs["revertNoString"] = "" + inputs["revertString"] = "some error" + + call := make([]func([]byte) ([]byte, error), 2) + call[0] = func(input []byte) ([]byte, error) { + return sim.PendingCallContract(bgCtx, ethereum.CallMsg{ + From: testAddr, + To: &addr, + Data: input, + }) + } + call[1] = func(input []byte) ([]byte, error) { + return sim.CallContract(bgCtx, ethereum.CallMsg{ + From: testAddr, + To: &addr, + Data: input, + }, nil) + } + + // Run pending calls then commit + for _, cl := range call { + for key, val := range inputs { + input, err := parsed.Pack(key) + if err != nil { + t.Errorf("could not pack %v function on contract: %v", key, err) + } + + res, err := cl(input) + if err == nil { + t.Errorf("call to %v was not reverted", key) + } + if res != nil { + t.Errorf("result from %v was not nil: %v", key, res) + } + if val != nil { + rerr, ok := err.(*revertError) + if !ok { + t.Errorf("expect revert error") + } + if rerr.Error() != "execution reverted: "+val.(string) { + t.Errorf("error was malformed: got %v want %v", rerr.Error(), val) + } + } else { + // revert(0x0,0x0) + if err.Error() != "execution reverted" { + t.Errorf("error was malformed: got %v want %v", err, "execution reverted") + } + } + } + input, err := parsed.Pack("noRevert") + if err != nil { + t.Errorf("could not pack noRevert function on contract: %v", err) + } + res, err := cl(input) + if err != nil { + t.Error("call to noRevert was reverted") + } + if res == nil { + t.Errorf("result from noRevert was nil") + } + sim.Commit() + } +} + +// TestFork check that the chain length after a reorg is correct. +// Steps: +// 1. Save the current block which will serve as parent for the fork. +// 2. Mine n blocks with n ∈ [0, 20]. +// 3. Assert that the chain length is n. +// 4. Fork by using the parent block as ancestor. +// 5. Mine n+1 blocks which should trigger a reorg. +// 6. Assert that the chain length is n+1. +// Since Commit() was called 2n+1 times in total, +// having a chain length of just n+1 means that a reorg occurred. +func TestFork(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + sim := simTestBackend(testAddr) + defer sim.Close() + // 1. + parent := sim.blockchain.CurrentBlock() + // 2. + n := int(rand.Int31n(21)) + for i := 0; i < n; i++ { + sim.Commit() + } + // 3. + if sim.blockchain.CurrentBlock().NumberU64() != uint64(n) { + t.Error("wrong chain length") + } + // 4. + sim.Fork(context.Background(), parent.Hash()) + // 5. + for i := 0; i < n+1; i++ { + sim.Commit() + } + // 6. + if sim.blockchain.CurrentBlock().NumberU64() != uint64(n+1) { + t.Error("wrong chain length") + } +} + +/* +Example contract to test event emission: + +pragma solidity >=0.7.0 <0.9.0; +contract Callable { + event Called(); + function Call() public { emit Called(); } +} +*/ +const callableAbi = "[{\"anonymous\":false,\"inputs\":[],\"name\":\"Called\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"Call\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]" + +const callableBin = "6080604052348015600f57600080fd5b5060998061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806334e2292114602d575b600080fd5b60336035565b005b7f81fab7a4a0aa961db47eefc81f143a5220e8c8495260dd65b1356f1d19d3c7b860405160405180910390a156fea2646970667358221220029436d24f3ac598ceca41d4d712e13ced6d70727f4cdc580667de66d2f51d8b64736f6c63430008010033" + +// TestForkLogsReborn check that the simulated reorgs +// correctly remove and reborn logs. +// Steps: +// 1. Deploy the Callable contract. +// 2. Set up an event subscription. +// 3. Save the current block which will serve as parent for the fork. +// 4. Send a transaction. +// 5. Check that the event was included. +// 6. Fork by using the parent block as ancestor. +// 7. Mine two blocks to trigger a reorg. +// 8. Check that the event was removed. +// 9. Re-send the transaction and mine a block. +// 10. Check that the event was reborn. +func TestForkLogsReborn(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + sim := simTestBackend(testAddr) + defer sim.Close() + // 1. + parsed, _ := abi.JSON(strings.NewReader(callableAbi)) + auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) + _, _, contract, err := bind.DeployContract(auth, parsed, common.FromHex(callableBin), sim) + if err != nil { + t.Errorf("deploying contract: %v", err) + } + sim.Commit() + // 2. + logs, sub, err := contract.WatchLogs(nil, "Called") + if err != nil { + t.Errorf("watching logs: %v", err) + } + defer sub.Unsubscribe() + // 3. + parent := sim.blockchain.CurrentBlock() + // 4. + tx, err := contract.Transact(auth, "Call") + if err != nil { + t.Errorf("transacting: %v", err) + } + sim.Commit() + // 5. + log := <-logs + if log.TxHash != tx.Hash() { + t.Error("wrong event tx hash") + } + if log.Removed { + t.Error("Event should be included") + } + // 6. + if err := sim.Fork(context.Background(), parent.Hash()); err != nil { + t.Errorf("forking: %v", err) + } + // 7. + sim.Commit() + sim.Commit() + // 8. + log = <-logs + if log.TxHash != tx.Hash() { + t.Error("wrong event tx hash") + } + if !log.Removed { + t.Error("Event should be removed") + } + // 9. + if err := sim.SendTransaction(context.Background(), tx); err != nil { + t.Errorf("sending transaction: %v", err) + } + sim.Commit() + // 10. + log = <-logs + if log.TxHash != tx.Hash() { + t.Error("wrong event tx hash") + } + if log.Removed { + t.Error("Event should be included") + } +} + +// TestForkResendTx checks that re-sending a TX after a fork +// is possible and does not cause a "nonce mismatch" panic. +// Steps: +// 1. Save the current block which will serve as parent for the fork. +// 2. Send a transaction. +// 3. Check that the TX is included in block 1. +// 4. Fork by using the parent block as ancestor. +// 5. Mine a block, Re-send the transaction and mine another one. +// 6. Check that the TX is now included in block 2. +func TestForkResendTx(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + sim := simTestBackend(testAddr) + defer sim.Close() + // 1. + parent := sim.blockchain.CurrentBlock() + // 2. + head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + _tx := types.NewTransaction(0, testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) + tx, _ := types.SignTx(_tx, types.HomesteadSigner{}, testKey) + sim.SendTransaction(context.Background(), tx) + sim.Commit() + // 3. + receipt, _ := sim.TransactionReceipt(context.Background(), tx.Hash()) + if h := receipt.BlockNumber.Uint64(); h != 1 { + t.Errorf("TX included in wrong block: %d", h) + } + // 4. + if err := sim.Fork(context.Background(), parent.Hash()); err != nil { + t.Errorf("forking: %v", err) + } + // 5. + sim.Commit() + if err := sim.SendTransaction(context.Background(), tx); err != nil { + t.Errorf("sending transaction: %v", err) + } + sim.Commit() + // 6. + receipt, _ = sim.TransactionReceipt(context.Background(), tx.Hash()) + if h := receipt.BlockNumber.Uint64(); h != 2 { + t.Errorf("TX included in wrong block: %d", h) + } +} diff --git a/accounts/abi/bind/base.go b/accounts/abi/bind/base.go index 499b4bda07d4..f4e5a2a900c6 100644 --- a/accounts/abi/bind/base.go +++ b/accounts/abi/bind/base.go @@ -21,6 +21,8 @@ import ( "errors" "fmt" "math/big" + "strings" + "sync" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" @@ -32,7 +34,7 @@ import ( // SignerFn is a signer function callback when a contract requires a method to // sign the transaction before submission. -type SignerFn func(types.Signer, common.Address, *types.Transaction) (*types.Transaction, error) +type SignerFn func(common.Address, *types.Transaction) (*types.Transaction, error) // CallOpts is the collection of options to fine tune a contract call request. type CallOpts struct { @@ -49,11 +51,15 @@ type TransactOpts struct { Nonce *big.Int // Nonce to use for the transaction execution (nil = use pending state) Signer SignerFn // Method to use for signing the transaction (mandatory) - Value *big.Int // Funds to transfer along along the transaction (nil = 0 = no funds) - GasPrice *big.Int // Gas price to use for the transaction execution (nil = gas price oracle) - GasLimit uint64 // Gas limit to set for the transaction execution (0 = estimate) + Value *big.Int // Funds to transfer along the transaction (nil = 0 = no funds) + GasPrice *big.Int // Gas price to use for the transaction execution (nil = gas price oracle) + GasFeeCap *big.Int // Gas fee cap to use for the 1559 transaction execution (nil = gas price oracle) + GasTipCap *big.Int // Gas priority fee cap to use for the 1559 transaction execution (nil = gas price oracle) + GasLimit uint64 // Gas limit to set for the transaction execution (0 = estimate) Context context.Context // Network context to support cancellation and timeouts (nil = no timeout) + + NoSend bool // Do all transact steps but do not send the transaction } // FilterOpts is the collection of options to fine tune filtering for events @@ -72,6 +78,29 @@ type WatchOpts struct { Context context.Context // Network context to support cancellation and timeouts (nil = no timeout) } +// MetaData collects all metadata for a bound contract. +type MetaData struct { + mu sync.Mutex + Sigs map[string]string + Bin string + ABI string + ab *abi.ABI +} + +func (m *MetaData) GetAbi() (*abi.ABI, error) { + m.mu.Lock() + defer m.mu.Unlock() + if m.ab != nil { + return m.ab, nil + } + if parsed, err := abi.JSON(strings.NewReader(m.ABI)); err != nil { + return nil, err + } else { + m.ab = &parsed + } + return m.ab, nil +} + // BoundContract is the base wrapper object that reflects a contract on the // Ethereum network. It contains a collection of methods that are used by the // higher level contract bindings to operate. @@ -117,11 +146,14 @@ func DeployContract(opts *TransactOpts, abi abi.ABI, bytecode []byte, backend Co // sets the output to result. The result type might be a single field for simple // returns, a slice of interfaces for anonymous returns and a struct for named // returns. -func (c *BoundContract) Call(opts *CallOpts, result interface{}, method string, params ...interface{}) error { +func (c *BoundContract) Call(opts *CallOpts, results *[]interface{}, method string, params ...interface{}) error { // Don't crash on a lazy user if opts == nil { opts = new(CallOpts) } + if results == nil { + results = new([]interface{}) + } // Pack the input, call and unpack the results input, err := c.abi.Pack(method, params...) if err != nil { @@ -149,7 +181,10 @@ func (c *BoundContract) Call(opts *CallOpts, result interface{}, method string, } } else { output, err = c.caller.CallContract(ctx, msg, opts.BlockNumber) - if err == nil && len(output) == 0 { + if err != nil { + return err + } + if len(output) == 0 { // Make sure we have a contract to operate on, and bail out otherwise. if code, err = c.caller.CodeAt(ctx, c.address, opts.BlockNumber); err != nil { return err @@ -158,10 +193,14 @@ func (c *BoundContract) Call(opts *CallOpts, result interface{}, method string, } } } - if err != nil { + + if len(*results) == 0 { + res, err := c.abi.Unpack(method, output) + *results = res return err } - return c.abi.Unpack(result, method, output) + res := *results + return c.abi.UnpackIntoInterface(res[0], method, output) } // Transact invokes the (paid) contract method with params as input values. @@ -171,73 +210,189 @@ func (c *BoundContract) Transact(opts *TransactOpts, method string, params ...in if err != nil { return nil, err } + // todo(rjl493456442) check the method is payable or not, + // reject invalid transaction at the first place return c.transact(opts, &c.address, input) } +// RawTransact initiates a transaction with the given raw calldata as the input. +// It's usually used to initiate transactions for invoking **Fallback** function. +func (c *BoundContract) RawTransact(opts *TransactOpts, calldata []byte) (*types.Transaction, error) { + // todo(rjl493456442) check the method is payable or not, + // reject invalid transaction at the first place + return c.transact(opts, &c.address, calldata) +} + // Transfer initiates a plain transaction to move funds to the contract, calling // its default method if one is available. func (c *BoundContract) Transfer(opts *TransactOpts) (*types.Transaction, error) { + // todo(rjl493456442) check the payable fallback or receive is defined + // or not, reject invalid transaction at the first place return c.transact(opts, &c.address, nil) } -// transact executes an actual transaction invocation, first deriving any missing -// authorization fields, and then scheduling the transaction for execution. -func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, input []byte) (*types.Transaction, error) { - var err error - - // Ensure a valid value field and resolve the account nonce +func (c *BoundContract) createDynamicTx(opts *TransactOpts, contract *common.Address, input []byte, head *types.Header) (*types.Transaction, error) { + // Normalize value value := opts.Value if value == nil { value = new(big.Int) } - var nonce uint64 - if opts.Nonce == nil { - nonce, err = c.transactor.PendingNonceAt(ensureContext(opts.Context), opts.From) + // Estimate TipCap + gasTipCap := opts.GasTipCap + if gasTipCap == nil { + tip, err := c.transactor.SuggestGasTipCap(ensureContext(opts.Context)) if err != nil { - return nil, fmt.Errorf("failed to retrieve account nonce: %v", err) + return nil, err } - } else { - nonce = opts.Nonce.Uint64() + gasTipCap = tip + } + // Estimate FeeCap + gasFeeCap := opts.GasFeeCap + if gasFeeCap == nil { + gasFeeCap = new(big.Int).Add( + gasTipCap, + new(big.Int).Mul(head.BaseFee, big.NewInt(2)), + ) + } + if gasFeeCap.Cmp(gasTipCap) < 0 { + return nil, fmt.Errorf("maxFeePerGas (%v) < maxPriorityFeePerGas (%v)", gasFeeCap, gasTipCap) + } + // Estimate GasLimit + gasLimit := opts.GasLimit + if opts.GasLimit == 0 { + var err error + gasLimit, err = c.estimateGasLimit(opts, contract, input, nil, gasTipCap, gasFeeCap, value) + if err != nil { + return nil, err + } + } + // create the transaction + nonce, err := c.getNonce(opts) + if err != nil { + return nil, err + } + baseTx := &types.DynamicFeeTx{ + To: contract, + Nonce: nonce, + GasFeeCap: gasFeeCap, + GasTipCap: gasTipCap, + Gas: gasLimit, + Value: value, + Data: input, + } + return types.NewTx(baseTx), nil +} + +func (c *BoundContract) createLegacyTx(opts *TransactOpts, contract *common.Address, input []byte) (*types.Transaction, error) { + if opts.GasFeeCap != nil || opts.GasTipCap != nil { + return nil, errors.New("maxFeePerGas or maxPriorityFeePerGas specified but london is not active yet") } - // Figure out the gas allowance and gas price values + // Normalize value + value := opts.Value + if value == nil { + value = new(big.Int) + } + // Estimate GasPrice gasPrice := opts.GasPrice if gasPrice == nil { - gasPrice, err = c.transactor.SuggestGasPrice(ensureContext(opts.Context)) + price, err := c.transactor.SuggestGasPrice(ensureContext(opts.Context)) if err != nil { - return nil, fmt.Errorf("failed to suggest gas price: %v", err) + return nil, err } + gasPrice = price } + // Estimate GasLimit gasLimit := opts.GasLimit - if gasLimit == 0 { - // Gas estimation cannot succeed without code for method invocations - if contract != nil { - if code, err := c.transactor.PendingCodeAt(ensureContext(opts.Context), c.address); err != nil { - return nil, err - } else if len(code) == 0 { - return nil, ErrNoCode - } - } - // If the contract surely has code (or code is not needed), estimate the transaction - msg := ethereum.CallMsg{From: opts.From, To: contract, GasPrice: gasPrice, Value: value, Data: input} - gasLimit, err = c.transactor.EstimateGas(ensureContext(opts.Context), msg) + if opts.GasLimit == 0 { + var err error + gasLimit, err = c.estimateGasLimit(opts, contract, input, gasPrice, nil, nil, value) if err != nil { - return nil, fmt.Errorf("failed to estimate gas needed: %v", err) + return nil, err + } + } + // create the transaction + nonce, err := c.getNonce(opts) + if err != nil { + return nil, err + } + baseTx := &types.LegacyTx{ + To: contract, + Nonce: nonce, + GasPrice: gasPrice, + Gas: gasLimit, + Value: value, + Data: input, + } + return types.NewTx(baseTx), nil +} + +func (c *BoundContract) estimateGasLimit(opts *TransactOpts, contract *common.Address, input []byte, gasPrice, gasTipCap, gasFeeCap, value *big.Int) (uint64, error) { + if contract != nil { + // Gas estimation cannot succeed without code for method invocations. + if code, err := c.transactor.PendingCodeAt(ensureContext(opts.Context), c.address); err != nil { + return 0, err + } else if len(code) == 0 { + return 0, ErrNoCode } } - // Create the transaction, sign it and schedule it for execution - var rawTx *types.Transaction - if contract == nil { - rawTx = types.NewContractCreation(nonce, value, gasLimit, gasPrice, input) + msg := ethereum.CallMsg{ + From: opts.From, + To: contract, + GasPrice: gasPrice, + GasTipCap: gasTipCap, + GasFeeCap: gasFeeCap, + Value: value, + Data: input, + } + return c.transactor.EstimateGas(ensureContext(opts.Context), msg) +} + +func (c *BoundContract) getNonce(opts *TransactOpts) (uint64, error) { + if opts.Nonce == nil { + return c.transactor.PendingNonceAt(ensureContext(opts.Context), opts.From) + } else { + return opts.Nonce.Uint64(), nil + } +} + +// transact executes an actual transaction invocation, first deriving any missing +// authorization fields, and then scheduling the transaction for execution. +func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, input []byte) (*types.Transaction, error) { + if opts.GasPrice != nil && (opts.GasFeeCap != nil || opts.GasTipCap != nil) { + return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") + } + // Create the transaction + var ( + rawTx *types.Transaction + err error + ) + if opts.GasPrice != nil { + rawTx, err = c.createLegacyTx(opts, contract, input) } else { - rawTx = types.NewTransaction(nonce, c.address, value, gasLimit, gasPrice, input) + // Only query for basefee if gasPrice not specified + if head, errHead := c.transactor.HeaderByNumber(ensureContext(opts.Context), nil); errHead != nil { + return nil, errHead + } else if head.BaseFee != nil { + rawTx, err = c.createDynamicTx(opts, contract, input, head) + } else { + // Chain is not London ready -> use legacy transaction + rawTx, err = c.createLegacyTx(opts, contract, input) + } } + if err != nil { + return nil, err + } + // Sign the transaction and schedule it for execution if opts.Signer == nil { return nil, errors.New("no signer to authorize the transaction with") } - signedTx, err := opts.Signer(types.HomesteadSigner{}, opts.From, rawTx) + signedTx, err := opts.Signer(opts.From, rawTx) if err != nil { return nil, err } + if opts.NoSend { + return signedTx, nil + } if err := c.transactor.SendTransaction(ensureContext(opts.Context), signedTx); err != nil { return nil, err } @@ -252,9 +407,9 @@ func (c *BoundContract) FilterLogs(opts *FilterOpts, name string, query ...[]int opts = new(FilterOpts) } // Append the event selector to the query parameters and construct the topic set - query = append([][]interface{}{{c.abi.Events[name].ID()}}, query...) + query = append([][]interface{}{{c.abi.Events[name].ID}}, query...) - topics, err := makeTopics(query...) + topics, err := abi.MakeTopics(query...) if err != nil { return nil, nil, err } @@ -301,9 +456,9 @@ func (c *BoundContract) WatchLogs(opts *WatchOpts, name string, query ...[]inter opts = new(WatchOpts) } // Append the event selector to the query parameters and construct the topic set - query = append([][]interface{}{{c.abi.Events[name].ID()}}, query...) + query = append([][]interface{}{{c.abi.Events[name].ID}}, query...) - topics, err := makeTopics(query...) + topics, err := abi.MakeTopics(query...) if err != nil { return nil, nil, err } @@ -326,8 +481,11 @@ func (c *BoundContract) WatchLogs(opts *WatchOpts, name string, query ...[]inter // UnpackLog unpacks a retrieved log into the provided output structure. func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) error { + if log.Topics[0] != c.abi.Events[event].ID { + return fmt.Errorf("event signature mismatch") + } if len(log.Data) > 0 { - if err := c.abi.Unpack(out, event, log.Data); err != nil { + if err := c.abi.UnpackIntoInterface(out, event, log.Data); err != nil { return err } } @@ -337,11 +495,14 @@ func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) indexed = append(indexed, arg) } } - return parseTopics(out, indexed, log.Topics[1:]) + return abi.ParseTopics(out, indexed, log.Topics[1:]) } // UnpackLogIntoMap unpacks a retrieved log into the provided map. func (c *BoundContract) UnpackLogIntoMap(out map[string]interface{}, event string, log types.Log) error { + if log.Topics[0] != c.abi.Events[event].ID { + return fmt.Errorf("event signature mismatch") + } if len(log.Data) > 0 { if err := c.abi.UnpackIntoMap(out, event, log.Data); err != nil { return err @@ -353,14 +514,14 @@ func (c *BoundContract) UnpackLogIntoMap(out map[string]interface{}, event strin indexed = append(indexed, arg) } } - return parseTopicsIntoMap(out, indexed, log.Topics[1:]) + return abi.ParseTopicsIntoMap(out, indexed, log.Topics[1:]) } // ensureContext is a helper method to ensure a context is not nil, even if the // user specified it as such. func ensureContext(ctx context.Context) context.Context { if ctx == nil { - return context.TODO() + return context.Background() } return ctx } diff --git a/accounts/abi/bind/base_test.go b/accounts/abi/bind/base_test.go index 3ae685e00f0a..08ba18f95e54 100644 --- a/accounts/abi/bind/base_test.go +++ b/accounts/abi/bind/base_test.go @@ -17,9 +17,9 @@ package bind_test import ( - "bytes" "context" "math/big" + "reflect" "strings" "testing" @@ -31,11 +31,54 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" + "github.com/stretchr/testify/assert" ) +func mockSign(addr common.Address, tx *types.Transaction) (*types.Transaction, error) { return tx, nil } + +type mockTransactor struct { + baseFee *big.Int + gasTipCap *big.Int + gasPrice *big.Int + suggestGasTipCapCalled bool + suggestGasPriceCalled bool +} + +func (mt *mockTransactor) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { + return &types.Header{BaseFee: mt.baseFee}, nil +} + +func (mt *mockTransactor) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { + return []byte{1}, nil +} + +func (mt *mockTransactor) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { + return 0, nil +} + +func (mt *mockTransactor) SuggestGasPrice(ctx context.Context) (*big.Int, error) { + mt.suggestGasPriceCalled = true + return mt.gasPrice, nil +} + +func (mt *mockTransactor) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { + mt.suggestGasTipCapCalled = true + return mt.gasTipCap, nil +} + +func (mt *mockTransactor) EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) { + return 0, nil +} + +func (mt *mockTransactor) SendTransaction(ctx context.Context, tx *types.Transaction) error { + return nil +} + type mockCaller struct { - codeAtBlockNumber *big.Int - callContractBlockNumber *big.Int + codeAtBlockNumber *big.Int + callContractBlockNumber *big.Int + pendingCodeAtCalled bool + pendingCallContractCalled bool } func (mc *mockCaller) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) { @@ -47,6 +90,16 @@ func (mc *mockCaller) CallContract(ctx context.Context, call ethereum.CallMsg, b mc.callContractBlockNumber = blockNumber return nil, nil } + +func (mc *mockCaller) PendingCodeAt(ctx context.Context, contract common.Address) ([]byte, error) { + mc.pendingCodeAtCalled = true + return nil, nil +} + +func (mc *mockCaller) PendingCallContract(ctx context.Context, call ethereum.CallMsg) ([]byte, error) { + mc.pendingCallContractCalled = true + return nil, nil +} func TestPassingBlockNumber(t *testing.T) { mc := &mockCaller{} @@ -59,11 +112,10 @@ func TestPassingBlockNumber(t *testing.T) { }, }, }, mc, nil, nil) - var ret string blockNumber := big.NewInt(42) - bc.Call(&bind.CallOpts{BlockNumber: blockNumber}, &ret, "something") + bc.Call(&bind.CallOpts{BlockNumber: blockNumber}, nil, "something") if mc.callContractBlockNumber != blockNumber { t.Fatalf("CallContract() was not passed the block number") @@ -73,7 +125,7 @@ func TestPassingBlockNumber(t *testing.T) { t.Fatalf("CodeAt() was not passed the block number") } - bc.Call(&bind.CallOpts{}, &ret, "something") + bc.Call(&bind.CallOpts{}, nil, "something") if mc.callContractBlockNumber != nil { t.Fatalf("CallContract() was passed a block number when it should not have been") @@ -82,57 +134,39 @@ func TestPassingBlockNumber(t *testing.T) { if mc.codeAtBlockNumber != nil { t.Fatalf("CodeAt() was passed a block number when it should not have been") } + + bc.Call(&bind.CallOpts{BlockNumber: blockNumber, Pending: true}, nil, "something") + + if !mc.pendingCallContractCalled { + t.Fatalf("CallContract() was not passed the block number") + } + + if !mc.pendingCodeAtCalled { + t.Fatalf("CodeAt() was not passed the block number") + } } const hexData = "0x000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158" func TestUnpackIndexedStringTyLogIntoMap(t *testing.T) { hash := crypto.Keccak256Hash([]byte("testName")) - mockLog := types.Log{ - Address: common.HexToAddress("0x0"), - Topics: []common.Hash{ - common.HexToHash("0x0"), - hash, - }, - Data: hexutil.MustDecode(hexData), - BlockNumber: uint64(26), - TxHash: common.HexToHash("0x0"), - TxIndex: 111, - BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}), - Index: 7, - Removed: false, + topics := []common.Hash{ + crypto.Keccak256Hash([]byte("received(string,address,uint256,bytes)")), + hash, } + mockLog := newMockLog(topics, common.HexToHash("0x0")) abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"string"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]` parsedAbi, _ := abi.JSON(strings.NewReader(abiString)) bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil) - receivedMap := make(map[string]interface{}) expectedReceivedMap := map[string]interface{}{ "name": hash, "sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"), "amount": big.NewInt(1), "memo": []byte{88}, } - if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil { - t.Error(err) - } - - if len(receivedMap) != 4 { - t.Fatal("unpacked map expected to have length 4") - } - if receivedMap["name"] != expectedReceivedMap["name"] { - t.Error("unpacked map does not match expected map") - } - if receivedMap["sender"] != expectedReceivedMap["sender"] { - t.Error("unpacked map does not match expected map") - } - if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 { - t.Error("unpacked map does not match expected map") - } - if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) { - t.Error("unpacked map does not match expected map") - } + unpackAndCheck(t, bc, expectedReceivedMap, mockLog) } func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) { @@ -141,51 +175,23 @@ func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) { t.Fatal(err) } hash := crypto.Keccak256Hash(sliceBytes) - mockLog := types.Log{ - Address: common.HexToAddress("0x0"), - Topics: []common.Hash{ - common.HexToHash("0x0"), - hash, - }, - Data: hexutil.MustDecode(hexData), - BlockNumber: uint64(26), - TxHash: common.HexToHash("0x0"), - TxIndex: 111, - BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}), - Index: 7, - Removed: false, + topics := []common.Hash{ + crypto.Keccak256Hash([]byte("received(string[],address,uint256,bytes)")), + hash, } + mockLog := newMockLog(topics, common.HexToHash("0x0")) abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"names","type":"string[]"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]` parsedAbi, _ := abi.JSON(strings.NewReader(abiString)) bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil) - receivedMap := make(map[string]interface{}) expectedReceivedMap := map[string]interface{}{ "names": hash, "sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"), "amount": big.NewInt(1), "memo": []byte{88}, } - if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil { - t.Error(err) - } - - if len(receivedMap) != 4 { - t.Fatal("unpacked map expected to have length 4") - } - if receivedMap["names"] != expectedReceivedMap["names"] { - t.Error("unpacked map does not match expected map") - } - if receivedMap["sender"] != expectedReceivedMap["sender"] { - t.Error("unpacked map does not match expected map") - } - if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 { - t.Error("unpacked map does not match expected map") - } - if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) { - t.Error("unpacked map does not match expected map") - } + unpackAndCheck(t, bc, expectedReceivedMap, mockLog) } func TestUnpackIndexedArrayTyLogIntoMap(t *testing.T) { @@ -194,51 +200,23 @@ func TestUnpackIndexedArrayTyLogIntoMap(t *testing.T) { t.Fatal(err) } hash := crypto.Keccak256Hash(arrBytes) - mockLog := types.Log{ - Address: common.HexToAddress("0x0"), - Topics: []common.Hash{ - common.HexToHash("0x0"), - hash, - }, - Data: hexutil.MustDecode(hexData), - BlockNumber: uint64(26), - TxHash: common.HexToHash("0x0"), - TxIndex: 111, - BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}), - Index: 7, - Removed: false, + topics := []common.Hash{ + crypto.Keccak256Hash([]byte("received(address[2],address,uint256,bytes)")), + hash, } + mockLog := newMockLog(topics, common.HexToHash("0x0")) abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"addresses","type":"address[2]"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]` parsedAbi, _ := abi.JSON(strings.NewReader(abiString)) bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil) - receivedMap := make(map[string]interface{}) expectedReceivedMap := map[string]interface{}{ "addresses": hash, "sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"), "amount": big.NewInt(1), "memo": []byte{88}, } - if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil { - t.Error(err) - } - - if len(receivedMap) != 4 { - t.Fatal("unpacked map expected to have length 4") - } - if receivedMap["addresses"] != expectedReceivedMap["addresses"] { - t.Error("unpacked map does not match expected map") - } - if receivedMap["sender"] != expectedReceivedMap["sender"] { - t.Error("unpacked map does not match expected map") - } - if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 { - t.Error("unpacked map does not match expected map") - } - if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) { - t.Error("unpacked map does not match expected map") - } + unpackAndCheck(t, bc, expectedReceivedMap, mockLog) } func TestUnpackIndexedFuncTyLogIntoMap(t *testing.T) { @@ -249,99 +227,117 @@ func TestUnpackIndexedFuncTyLogIntoMap(t *testing.T) { functionTyBytes := append(addrBytes, functionSelector...) var functionTy [24]byte copy(functionTy[:], functionTyBytes[0:24]) - mockLog := types.Log{ - Address: common.HexToAddress("0x0"), - Topics: []common.Hash{ - common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"), - common.BytesToHash(functionTyBytes), - }, - Data: hexutil.MustDecode(hexData), - BlockNumber: uint64(26), - TxHash: common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42"), - TxIndex: 111, - BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}), - Index: 7, - Removed: false, + topics := []common.Hash{ + crypto.Keccak256Hash([]byte("received(function,address,uint256,bytes)")), + common.BytesToHash(functionTyBytes), } - + mockLog := newMockLog(topics, common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42")) abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"function","type":"function"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]` parsedAbi, _ := abi.JSON(strings.NewReader(abiString)) bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil) - receivedMap := make(map[string]interface{}) expectedReceivedMap := map[string]interface{}{ "function": functionTy, "sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"), "amount": big.NewInt(1), "memo": []byte{88}, } - if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil { - t.Error(err) - } - - if len(receivedMap) != 4 { - t.Fatal("unpacked map expected to have length 4") - } - if receivedMap["function"] != expectedReceivedMap["function"] { - t.Error("unpacked map does not match expected map") - } - if receivedMap["sender"] != expectedReceivedMap["sender"] { - t.Error("unpacked map does not match expected map") - } - if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 { - t.Error("unpacked map does not match expected map") - } - if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) { - t.Error("unpacked map does not match expected map") - } + unpackAndCheck(t, bc, expectedReceivedMap, mockLog) } func TestUnpackIndexedBytesTyLogIntoMap(t *testing.T) { - byts := []byte{1, 2, 3, 4, 5} - hash := crypto.Keccak256Hash(byts) - mockLog := types.Log{ - Address: common.HexToAddress("0x0"), - Topics: []common.Hash{ - common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"), - hash, - }, - Data: hexutil.MustDecode(hexData), - BlockNumber: uint64(26), - TxHash: common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42"), - TxIndex: 111, - BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}), - Index: 7, - Removed: false, + bytes := []byte{1, 2, 3, 4, 5} + hash := crypto.Keccak256Hash(bytes) + topics := []common.Hash{ + crypto.Keccak256Hash([]byte("received(bytes,address,uint256,bytes)")), + hash, } + mockLog := newMockLog(topics, common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42")) abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"content","type":"bytes"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]` parsedAbi, _ := abi.JSON(strings.NewReader(abiString)) bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil) - receivedMap := make(map[string]interface{}) expectedReceivedMap := map[string]interface{}{ "content": hash, "sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"), "amount": big.NewInt(1), "memo": []byte{88}, } - if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil { + unpackAndCheck(t, bc, expectedReceivedMap, mockLog) +} + +func TestTransactGasFee(t *testing.T) { + assert := assert.New(t) + + // GasTipCap and GasFeeCap + // When opts.GasTipCap and opts.GasFeeCap are nil + mt := &mockTransactor{baseFee: big.NewInt(100), gasTipCap: big.NewInt(5)} + bc := bind.NewBoundContract(common.Address{}, abi.ABI{}, nil, mt, nil) + opts := &bind.TransactOpts{Signer: mockSign} + tx, err := bc.Transact(opts, "") + assert.Nil(err) + assert.Equal(big.NewInt(5), tx.GasTipCap()) + assert.Equal(big.NewInt(205), tx.GasFeeCap()) + assert.Nil(opts.GasTipCap) + assert.Nil(opts.GasFeeCap) + assert.True(mt.suggestGasTipCapCalled) + + // Second call to Transact should use latest suggested GasTipCap + mt.gasTipCap = big.NewInt(6) + mt.suggestGasTipCapCalled = false + tx, err = bc.Transact(opts, "") + assert.Nil(err) + assert.Equal(big.NewInt(6), tx.GasTipCap()) + assert.Equal(big.NewInt(206), tx.GasFeeCap()) + assert.True(mt.suggestGasTipCapCalled) + + // GasPrice + // When opts.GasPrice is nil + mt = &mockTransactor{gasPrice: big.NewInt(5)} + bc = bind.NewBoundContract(common.Address{}, abi.ABI{}, nil, mt, nil) + opts = &bind.TransactOpts{Signer: mockSign} + tx, err = bc.Transact(opts, "") + assert.Nil(err) + assert.Equal(big.NewInt(5), tx.GasPrice()) + assert.Nil(opts.GasPrice) + assert.True(mt.suggestGasPriceCalled) + + // Second call to Transact should use latest suggested GasPrice + mt.gasPrice = big.NewInt(6) + mt.suggestGasPriceCalled = false + tx, err = bc.Transact(opts, "") + assert.Nil(err) + assert.Equal(big.NewInt(6), tx.GasPrice()) + assert.True(mt.suggestGasPriceCalled) +} + +func unpackAndCheck(t *testing.T, bc *bind.BoundContract, expected map[string]interface{}, mockLog types.Log) { + received := make(map[string]interface{}) + if err := bc.UnpackLogIntoMap(received, "received", mockLog); err != nil { t.Error(err) } - if len(receivedMap) != 4 { - t.Fatal("unpacked map expected to have length 4") + if len(received) != len(expected) { + t.Fatalf("unpacked map length %v not equal expected length of %v", len(received), len(expected)) } - if receivedMap["content"] != expectedReceivedMap["content"] { - t.Error("unpacked map does not match expected map") + for name, elem := range expected { + if !reflect.DeepEqual(elem, received[name]) { + t.Errorf("field %v does not match expected, want %v, got %v", name, elem, received[name]) + } } - if receivedMap["sender"] != expectedReceivedMap["sender"] { - t.Error("unpacked map does not match expected map") - } - if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 { - t.Error("unpacked map does not match expected map") - } - if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) { - t.Error("unpacked map does not match expected map") +} + +func newMockLog(topics []common.Hash, txHash common.Hash) types.Log { + return types.Log{ + Address: common.HexToAddress("0x0"), + Topics: topics, + Data: hexutil.MustDecode(hexData), + BlockNumber: uint64(26), + TxHash: txHash, + TxIndex: 111, + BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}), + Index: 7, + Removed: false, } } diff --git a/accounts/abi/bind/bind.go b/accounts/abi/bind/bind.go index 13ac286b4552..ff69a78c642f 100644 --- a/accounts/abi/bind/bind.go +++ b/accounts/abi/bind/bind.go @@ -52,7 +52,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] // contracts is the map of each individual contract requested binding contracts = make(map[string]*tmplContract) - // structs is the map of all reclared structs shared by passed contracts. + // structs is the map of all redeclared structs shared by passed contracts. structs = make(map[string]*tmplStruct) // isLib is the map used to flag each encountered library as such @@ -77,22 +77,31 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] calls = make(map[string]*tmplMethod) transacts = make(map[string]*tmplMethod) events = make(map[string]*tmplEvent) + fallback *tmplMethod + receive *tmplMethod - // identifiers are used to detect duplicated identifier of function - // and event. For all calls, transacts and events, abigen will generate + // identifiers are used to detect duplicated identifiers of functions + // and events. For all calls, transacts and events, abigen will generate // corresponding bindings. However we have to ensure there is no - // identifier coliision in the bindings of these categories. + // identifier collisions in the bindings of these categories. callIdentifiers = make(map[string]bool) transactIdentifiers = make(map[string]bool) eventIdentifiers = make(map[string]bool) ) + + for _, input := range evmABI.Constructor.Inputs { + if hasStruct(input.Type) { + bindStructType[lang](input.Type, structs) + } + } + for _, original := range evmABI.Methods { // Normalize the method for capital cases and non-anonymous inputs/outputs normalized := original normalizedName := methodNormalizer[lang](alias(aliases, original.Name)) // Ensure there is no duplicated identifier var identifiers = callIdentifiers - if !original.Const { + if !original.IsConstant() { identifiers = transactIdentifiers } if identifiers[normalizedName] { @@ -121,7 +130,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] } } // Append the methods to the call or transact lists - if original.Const { + if original.IsConstant() { calls[original.Name] = &tmplMethod{Original: original, Normalized: normalized, Structured: structured(original.Outputs)} } else { transacts[original.Name] = &tmplMethod{Original: original, Normalized: normalized, Structured: structured(original.Outputs)} @@ -156,7 +165,13 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] // Append the event to the accumulator list events[original.Name] = &tmplEvent{Original: original, Normalized: normalized} } - + // Add two special fallback functions if they exist + if evmABI.HasFallback() { + fallback = &tmplMethod{Original: evmABI.Fallback} + } + if evmABI.HasReceive() { + receive = &tmplMethod{Original: evmABI.Receive} + } // There is no easy way to pass arbitrary java objects to the Go side. if len(structs) > 0 && lang == LangJava { return "", errors.New("java binding for tuple arguments is not supported yet") @@ -169,6 +184,8 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] Constructor: evmABI.Constructor, Calls: calls, Transacts: transacts, + Fallback: fallback, + Receive: receive, Events: events, Libraries: make(map[string]string), } @@ -210,8 +227,6 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] "bindtype": bindType[lang], "bindtopictype": bindTopicType[lang], "namedtype": namedType[lang], - "formatmethod": formatMethod, - "formatevent": formatEvent, "capitalise": capitalise, "decapitalise": decapitalise, } @@ -238,7 +253,7 @@ var bindType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) stri LangJava: bindTypeJava, } -// bindBasicTypeGo converts basic solidity types(except array, slice and tuple) to Go one. +// bindBasicTypeGo converts basic solidity types(except array, slice and tuple) to Go ones. func bindBasicTypeGo(kind abi.Type) string { switch kind.T { case abi.AddressTy: @@ -278,7 +293,7 @@ func bindTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { } } -// bindBasicTypeJava converts basic solidity types(except array, slice and tuple) to Java one. +// bindBasicTypeJava converts basic solidity types(except array, slice and tuple) to Java ones. func bindBasicTypeJava(kind abi.Type) string { switch kind.T { case abi.AddressTy: @@ -322,7 +337,7 @@ func bindBasicTypeJava(kind abi.Type) string { } // pluralizeJavaType explicitly converts multidimensional types to predefined -// type in go side. +// types in go side. func pluralizeJavaType(typ string) string { switch typ { case "boolean": @@ -361,7 +376,7 @@ var bindTopicType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) } // bindTopicTypeGo converts a Solidity topic type to a Go one. It is almost the same -// funcionality as for simple types, but dynamic types get converted to hashes. +// functionality as for simple types, but dynamic types get converted to hashes. func bindTopicTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { bound := bindTypeGo(kind, structs) @@ -378,7 +393,7 @@ func bindTopicTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { } // bindTopicTypeJava converts a Solidity topic type to a Java one. It is almost the same -// funcionality as for simple types, but dynamic types get converted to hashes. +// functionality as for simple types, but dynamic types get converted to hashes. func bindTopicTypeJava(kind abi.Type, structs map[string]*tmplStruct) string { bound := bindTypeJava(kind, structs) @@ -386,7 +401,7 @@ func bindTopicTypeJava(kind abi.Type, structs map[string]*tmplStruct) string { // parameters that are not value types i.e. arrays and structs are not // stored directly but instead a keccak256-hash of an encoding is stored. // - // We only convert stringS and bytes to hash, still need to deal with + // We only convert strings and bytes to hash, still need to deal with // array(both fixed-size and dynamic-size) and struct. if bound == "String" || bound == "byte[]" { bound = "Hash" @@ -407,7 +422,7 @@ var bindStructType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { switch kind.T { case abi.TupleTy: - // We compose raw struct name and canonical parameter expression + // We compose a raw struct name and a canonical parameter expression // together here. The reason is before solidity v0.5.11, kind.TupleRawName // is empty, so we use canonical parameter expression to distinguish // different struct definition. From the consideration of backward @@ -446,7 +461,7 @@ func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { func bindStructTypeJava(kind abi.Type, structs map[string]*tmplStruct) string { switch kind.T { case abi.TupleTy: - // We compose raw struct name and canonical parameter expression + // We compose a raw struct name and a canonical parameter expression // together here. The reason is before solidity v0.5.11, kind.TupleRawName // is empty, so we use canonical parameter expression to distinguish // different struct definition. From the consideration of backward @@ -478,7 +493,7 @@ func bindStructTypeJava(kind abi.Type, structs map[string]*tmplStruct) string { } // namedType is a set of functions that transform language specific types to -// named versions that my be used inside method names. +// named versions that may be used inside method names. var namedType = map[Lang]func(string, abi.Type) string{ LangGo: func(string, abi.Type) string { panic("this shouldn't be needed") }, LangJava: namedTypeJava, @@ -520,16 +535,14 @@ func alias(aliases map[string]string, n string) string { } // methodNormalizer is a name transformer that modifies Solidity method names to -// conform to target language naming concentions. +// conform to target language naming conventions. var methodNormalizer = map[Lang]func(string) string{ LangGo: abi.ToCamelCase, LangJava: decapitalise, } // capitalise makes a camel-case string which starts with an upper case character. -func capitalise(input string) string { - return abi.ToCamelCase(input) -} +var capitalise = abi.ToCamelCase // decapitalise makes a camel-case string which starts with a lower case character. func decapitalise(input string) string { @@ -578,63 +591,3 @@ func hasStruct(t abi.Type) bool { return false } } - -// resolveArgName converts a raw argument representation into a user friendly format. -func resolveArgName(arg abi.Argument, structs map[string]*tmplStruct) string { - var ( - prefix string - embedded string - typ = &arg.Type - ) -loop: - for { - switch typ.T { - case abi.SliceTy: - prefix += "[]" - case abi.ArrayTy: - prefix += fmt.Sprintf("[%d]", typ.Size) - default: - embedded = typ.TupleRawName + typ.String() - break loop - } - typ = typ.Elem - } - if s, exist := structs[embedded]; exist { - return prefix + s.Name - } else { - return arg.Type.String() - } -} - -// formatMethod transforms raw method representation into a user friendly one. -func formatMethod(method abi.Method, structs map[string]*tmplStruct) string { - inputs := make([]string, len(method.Inputs)) - for i, input := range method.Inputs { - inputs[i] = fmt.Sprintf("%v %v", resolveArgName(input, structs), input.Name) - } - outputs := make([]string, len(method.Outputs)) - for i, output := range method.Outputs { - outputs[i] = resolveArgName(output, structs) - if len(output.Name) > 0 { - outputs[i] += fmt.Sprintf(" %v", output.Name) - } - } - constant := "" - if method.Const { - constant = "constant " - } - return fmt.Sprintf("function %v(%v) %sreturns(%v)", method.RawName, strings.Join(inputs, ", "), constant, strings.Join(outputs, ", ")) -} - -// formatEvent transforms raw event representation into a user friendly one. -func formatEvent(event abi.Event, structs map[string]*tmplStruct) string { - inputs := make([]string, len(event.Inputs)) - for i, input := range event.Inputs { - if input.Indexed { - inputs[i] = fmt.Sprintf("%v indexed %v", resolveArgName(input, structs), input.Name) - } else { - inputs[i] = fmt.Sprintf("%v %v", resolveArgName(input, structs), input.Name) - } - } - return fmt.Sprintf("event %v(%v)", event.RawName, strings.Join(inputs, ", ")) -} diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go index fa1a7b1ca77d..992497993ad3 100644 --- a/accounts/abi/bind/bind_test.go +++ b/accounts/abi/bind/bind_test.go @@ -199,7 +199,8 @@ var bindTests = []struct { {"type":"event","name":"indexed","inputs":[{"name":"addr","type":"address","indexed":true},{"name":"num","type":"int256","indexed":true}]}, {"type":"event","name":"mixed","inputs":[{"name":"addr","type":"address","indexed":true},{"name":"num","type":"int256"}]}, {"type":"event","name":"anonymous","anonymous":true,"inputs":[]}, - {"type":"event","name":"dynamic","inputs":[{"name":"idxStr","type":"string","indexed":true},{"name":"idxDat","type":"bytes","indexed":true},{"name":"str","type":"string"},{"name":"dat","type":"bytes"}]} + {"type":"event","name":"dynamic","inputs":[{"name":"idxStr","type":"string","indexed":true},{"name":"idxDat","type":"bytes","indexed":true},{"name":"str","type":"string"},{"name":"dat","type":"bytes"}]}, + {"type":"event","name":"unnamed","inputs":[{"name":"","type":"uint256","indexed": true},{"name":"","type":"uint256","indexed":true}]} ] `}, ` @@ -249,6 +250,12 @@ var bindTests = []struct { fmt.Println(event.Addr) // Make sure the reconstructed indexed fields are present fmt.Println(res, str, dat, hash, err) + + oit, err := e.FilterUnnamed(nil, []*big.Int{}, []*big.Int{}) + + arg0 := oit.Event.Arg0 // Make sure unnamed arguments are handled correctly + arg1 := oit.Event.Arg1 // Make sure unnamed arguments are handled correctly + fmt.Println(arg0, arg1) } // Run a tiny reflection test to ensure disallowed methods don't appear if _, ok := reflect.TypeOf(&EventChecker{}).MethodByName("FilterAnonymous"); ok { @@ -289,9 +296,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy an interaction tester contract and call a transaction on it @@ -344,9 +351,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a tuple tester contract and execute a structured call on it @@ -390,9 +397,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a tuple tester contract and execute a structured call on it @@ -448,9 +455,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a slice tester contract and execute a n array call on it @@ -496,9 +503,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a default method invoker contract and execute its default method @@ -522,6 +529,70 @@ var bindTests = []struct { nil, nil, }, + // Tests that structs are correctly unpacked + { + + `Structs`, + ` + pragma solidity ^0.6.5; + pragma experimental ABIEncoderV2; + contract Structs { + struct A { + bytes32 B; + } + + function F() public view returns (A[] memory a, uint256[] memory c, bool[] memory d) { + A[] memory a = new A[](2); + a[0].B = bytes32(uint256(1234) << 96); + uint256[] memory c; + bool[] memory d; + return (a, c, d); + } + + function G() public view returns (A[] memory a) { + A[] memory a = new A[](2); + a[0].B = bytes32(uint256(1234) << 96); + return a; + } + } + `, + []string{`608060405234801561001057600080fd5b50610278806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806328811f591461003b5780636fecb6231461005b575b600080fd5b610043610070565b604051610052939291906101a0565b60405180910390f35b6100636100d6565b6040516100529190610186565b604080516002808252606082810190935282918291829190816020015b610095610131565b81526020019060019003908161008d575050805190915061026960611b9082906000906100be57fe5b60209081029190910101515293606093508392509050565b6040805160028082526060828101909352829190816020015b6100f7610131565b8152602001906001900390816100ef575050805190915061026960611b90829060009061012057fe5b602090810291909101015152905090565b60408051602081019091526000815290565b815260200190565b6000815180845260208085019450808401835b8381101561017b578151518752958201959082019060010161015e565b509495945050505050565b600060208252610199602083018461014b565b9392505050565b6000606082526101b3606083018661014b565b6020838203818501528186516101c98185610239565b91508288019350845b818110156101f3576101e5838651610143565b9484019492506001016101d2565b505084810360408601528551808252908201925081860190845b8181101561022b57825115158552938301939183019160010161020d565b509298975050505050505050565b9081526020019056fea2646970667358221220eb85327e285def14230424c52893aebecec1e387a50bb6b75fc4fdbed647f45f64736f6c63430006050033`}, + []string{`[{"inputs":[],"name":"F","outputs":[{"components":[{"internalType":"bytes32","name":"B","type":"bytes32"}],"internalType":"structStructs.A[]","name":"a","type":"tuple[]"},{"internalType":"uint256[]","name":"c","type":"uint256[]"},{"internalType":"bool[]","name":"d","type":"bool[]"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"G","outputs":[{"components":[{"internalType":"bytes32","name":"B","type":"bytes32"}],"internalType":"structStructs.A[]","name":"a","type":"tuple[]"}],"stateMutability":"view","type":"function"}]`}, + ` + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + `, + ` + // Generate a new random account and a funded simulator + key, _ := crypto.GenerateKey() + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) + defer sim.Close() + + // Deploy a structs method invoker contract and execute its default method + _, _, structs, err := DeployStructs(auth, sim) + if err != nil { + t.Fatalf("Failed to deploy defaulter contract: %v", err) + } + sim.Commit() + opts := bind.CallOpts{} + if _, err := structs.F(&opts); err != nil { + t.Fatalf("Failed to invoke F method: %v", err) + } + if _, err := structs.G(&opts); err != nil { + t.Fatalf("Failed to invoke G method: %v", err) + } + `, + nil, + nil, + nil, + nil, + }, // Tests that non-existent contracts are reported as such (though only simulator test) { `NonExistent`, @@ -562,6 +633,45 @@ var bindTests = []struct { nil, nil, }, + { + `NonExistentStruct`, + ` + contract NonExistentStruct { + function Struct() public view returns(uint256 a, uint256 b) { + return (10, 10); + } + } + `, + []string{`6080604052348015600f57600080fd5b5060888061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063d5f6622514602d575b600080fd5b6033604c565b6040805192835260208301919091528051918290030190f35b600a809156fea264697066735822beefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeef64736f6c6343decafe0033`}, + []string{`[{"inputs":[],"name":"Struct","outputs":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256","name":"b","type":"uint256"}],"stateMutability":"pure","type":"function"}]`}, + ` + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + `, + ` + // Create a simulator and wrap a non-deployed contract + + sim := backends.NewSimulatedBackend(core.GenesisAlloc{}, uint64(10000000000)) + defer sim.Close() + + nonexistent, err := NewNonExistentStruct(common.Address{}, sim) + if err != nil { + t.Fatalf("Failed to access non-existent contract: %v", err) + } + // Ensure that contract calls fail with the appropriate error + if res, err := nonexistent.Struct(nil); err == nil { + t.Fatalf("Call succeeded on non-existent contract: %v", res) + } else if (err != bind.ErrNoCode) { + t.Fatalf("Error mismatch: have %v, want %v", err, bind.ErrNoCode) + } + `, + nil, + nil, + nil, + nil, + }, // Tests that gas estimation works for contracts with weird gas mechanics too. { `FunkyGasPattern`, @@ -591,9 +701,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a funky gas pattern contract @@ -641,9 +751,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a sender tester contract and execute a structured call on it @@ -716,9 +826,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a underscorer tester contract and execute a structured call on it @@ -810,9 +920,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy an eventer contract @@ -1000,9 +1110,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() //deploy the test contract @@ -1135,9 +1245,9 @@ var bindTests = []struct { ` key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() _, _, contract, err := DeployTuple(auth, sim) @@ -1277,9 +1387,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() //deploy the test contract @@ -1343,8 +1453,8 @@ var bindTests = []struct { ` // Initialize test accounts key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // deploy the test contract @@ -1384,7 +1494,7 @@ var bindTests = []struct { if n != 3 { t.Fatalf("Invalid bar0 event") } - case <-time.NewTimer(100 * time.Millisecond).C: + case <-time.NewTimer(3 * time.Second).C: t.Fatalf("Wait bar0 event timeout") } @@ -1395,7 +1505,7 @@ var bindTests = []struct { if n != 1 { t.Fatalf("Invalid bar event") } - case <-time.NewTimer(100 * time.Millisecond).C: + case <-time.NewTimer(3 * time.Second).C: t.Fatalf("Wait bar event timeout") } close(stopCh) @@ -1434,10 +1544,10 @@ var bindTests = []struct { addr := crypto.PubkeyToAddress(key.PublicKey) // Deploy registrar contract - sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() - transactOpts := bind.NewKeyedTransactor(key) + transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) _, _, _, err := DeployIdentifierCollision(transactOpts, sim) if err != nil { t.Fatalf("failed to deploy contract: %v", err) @@ -1496,10 +1606,10 @@ var bindTests = []struct { addr := crypto.PubkeyToAddress(key.PublicKey) // Deploy registrar contract - sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() - transactOpts := bind.NewKeyedTransactor(key) + transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) _, _, c1, err := DeployContractOne(transactOpts, sim) if err != nil { t.Fatal("Failed to deploy contract") @@ -1556,9 +1666,9 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) defer sim.Close() // Deploy a tester contract and execute a structured call on it @@ -1585,6 +1695,266 @@ var bindTests = []struct { nil, nil, }, + // Test fallback separation introduced in v0.6.0 + { + `NewFallbacks`, + ` + pragma solidity >=0.6.0 <0.7.0; + + contract NewFallbacks { + event Fallback(bytes data); + fallback() external { + emit Fallback(msg.data); + } + + event Received(address addr, uint value); + receive() external payable { + emit Received(msg.sender, msg.value); + } + } + `, + []string{"6080604052348015600f57600080fd5b506101078061001f6000396000f3fe608060405236605f577f88a5966d370b9919b20f3e2c13ff65706f196a4e32cc2c12bf57088f885258743334604051808373ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390a1005b348015606a57600080fd5b507f9043988963722edecc2099c75b0af0ff76af14ffca42ed6bce059a20a2a9f98660003660405180806020018281038252848482818152602001925080828437600081840152601f19601f820116905080830192505050935050505060405180910390a100fea26469706673582212201f994dcfbc53bf610b19176f9a361eafa77b447fd9c796fa2c615dfd0aaf3b8b64736f6c634300060c0033"}, + []string{`[{"anonymous":false,"inputs":[{"indexed":false,"internalType":"bytes","name":"data","type":"bytes"}],"name":"Fallback","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"addr","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Received","type":"event"},{"stateMutability":"nonpayable","type":"fallback"},{"stateMutability":"payable","type":"receive"}]`}, + ` + "bytes" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + `, + ` + key, _ := crypto.GenerateKey() + addr := crypto.PubkeyToAddress(key.PublicKey) + + sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 1000000) + defer sim.Close() + + opts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + _, _, c, err := DeployNewFallbacks(opts, sim) + if err != nil { + t.Fatalf("Failed to deploy contract: %v", err) + } + sim.Commit() + + // Test receive function + opts.Value = big.NewInt(100) + c.Receive(opts) + sim.Commit() + + var gotEvent bool + iter, _ := c.FilterReceived(nil) + defer iter.Close() + for iter.Next() { + if iter.Event.Addr != addr { + t.Fatal("Msg.sender mismatch") + } + if iter.Event.Value.Uint64() != 100 { + t.Fatal("Msg.value mismatch") + } + gotEvent = true + break + } + if !gotEvent { + t.Fatal("Expect to receive event emitted by receive") + } + + // Test fallback function + gotEvent = false + opts.Value = nil + calldata := []byte{0x01, 0x02, 0x03} + c.Fallback(opts, calldata) + sim.Commit() + + iter2, _ := c.FilterFallback(nil) + defer iter2.Close() + for iter2.Next() { + if !bytes.Equal(iter2.Event.Data, calldata) { + t.Fatal("calldata mismatch") + } + gotEvent = true + break + } + if !gotEvent { + t.Fatal("Expect to receive event emitted by fallback") + } + `, + nil, + nil, + nil, + nil, + }, + // Test resolving single struct argument + { + `NewSingleStructArgument`, + ` + pragma solidity ^0.8.0; + + contract NewSingleStructArgument { + struct MyStruct{ + uint256 a; + uint256 b; + } + event StructEvent(MyStruct s); + function TestEvent() public { + emit StructEvent(MyStruct({a: 1, b: 2})); + } + } + `, + []string{"608060405234801561001057600080fd5b50610113806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806324ec1d3f14602d575b600080fd5b60336035565b005b7fb4b2ff75e30cb4317eaae16dd8a187dd89978df17565104caa6c2797caae27d460405180604001604052806001815260200160028152506040516078919060ba565b60405180910390a1565b6040820160008201516096600085018260ad565b50602082015160a7602085018260ad565b50505050565b60b48160d3565b82525050565b600060408201905060cd60008301846082565b92915050565b600081905091905056fea26469706673582212208823628796125bf9941ce4eda18da1be3cf2931b231708ab848e1bd7151c0c9a64736f6c63430008070033"}, + []string{`[{"anonymous":false,"inputs":[{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256","name":"b","type":"uint256"}],"indexed":false,"internalType":"struct Test.MyStruct","name":"s","type":"tuple"}],"name":"StructEvent","type":"event"},{"inputs":[],"name":"TestEvent","outputs":[],"stateMutability":"nonpayable","type":"function"}]`}, + ` + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/ethconfig" + `, + ` + var ( + key, _ = crypto.GenerateKey() + user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil) + ) + defer sim.Close() + + _, _, d, err := DeployNewSingleStructArgument(user, sim) + if err != nil { + t.Fatalf("Failed to deploy contract %v", err) + } + sim.Commit() + + _, err = d.TestEvent(user) + if err != nil { + t.Fatalf("Failed to call contract %v", err) + } + sim.Commit() + + it, err := d.FilterStructEvent(nil) + if err != nil { + t.Fatalf("Failed to filter contract event %v", err) + } + var count int + for it.Next() { + if it.Event.S.A.Cmp(big.NewInt(1)) != 0 { + t.Fatal("Unexpected contract event") + } + if it.Event.S.B.Cmp(big.NewInt(2)) != 0 { + t.Fatal("Unexpected contract event") + } + count += 1 + } + if count != 1 { + t.Fatal("Unexpected contract event number") + } + `, + nil, + nil, + nil, + nil, + }, + // Test errors introduced in v0.8.4 + { + `NewErrors`, + ` + pragma solidity >0.8.4; + + contract NewErrors { + error MyError(uint256); + error MyError1(uint256); + error MyError2(uint256, uint256); + error MyError3(uint256 a, uint256 b, uint256 c); + function Error() public pure { + revert MyError3(1,2,3); + } + } + `, + []string{"0x6080604052348015600f57600080fd5b5060998061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063726c638214602d575b600080fd5b60336035565b005b60405163024876cd60e61b815260016004820152600260248201526003604482015260640160405180910390fdfea264697066735822122093f786a1bc60216540cd999fbb4a6109e0fef20abcff6e9107fb2817ca968f3c64736f6c63430008070033"}, + []string{`[{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError","type":"error"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError1","type":"error"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"},{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError2","type":"error"},{"inputs":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256","name":"b","type":"uint256"},{"internalType":"uint256","name":"c","type":"uint256"}],"name":"MyError3","type":"error"},{"inputs":[],"name":"Error","outputs":[],"stateMutability":"pure","type":"function"}]`}, + ` + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/ethconfig" + `, + ` + var ( + key, _ = crypto.GenerateKey() + user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil) + ) + defer sim.Close() + + _, tx, contract, err := DeployNewErrors(user, sim) + if err != nil { + t.Fatal(err) + } + sim.Commit() + _, err = bind.WaitDeployed(nil, sim, tx) + if err != nil { + t.Error(err) + } + if err := contract.Error(new(bind.CallOpts)); err == nil { + t.Fatalf("expected contract to throw error") + } + // TODO (MariusVanDerWijden unpack error using abigen + // once that is implemented + `, + nil, + nil, + nil, + nil, + }, + { + name: `ConstructorWithStructParam`, + contract: ` + pragma solidity >=0.8.0 <0.9.0; + + contract ConstructorWithStructParam { + struct StructType { + uint256 field; + } + + constructor(StructType memory st) {} + } + `, + bytecode: []string{`0x608060405234801561001057600080fd5b506040516101c43803806101c48339818101604052810190610032919061014a565b50610177565b6000604051905090565b600080fd5b600080fd5b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6100958261004c565b810181811067ffffffffffffffff821117156100b4576100b361005d565b5b80604052505050565b60006100c7610038565b90506100d3828261008c565b919050565b6000819050919050565b6100eb816100d8565b81146100f657600080fd5b50565b600081519050610108816100e2565b92915050565b60006020828403121561012457610123610047565b5b61012e60206100bd565b9050600061013e848285016100f9565b60008301525092915050565b6000602082840312156101605761015f610042565b5b600061016e8482850161010e565b91505092915050565b603f806101856000396000f3fe6080604052600080fdfea2646970667358221220cdffa667affecefac5561f65f4a4ba914204a8d4eb859d8cd426fb306e5c12a364736f6c634300080a0033`}, + abi: []string{`[{"inputs":[{"components":[{"internalType":"uint256","name":"field","type":"uint256"}],"internalType":"struct ConstructorWithStructParam.StructType","name":"st","type":"tuple"}],"stateMutability":"nonpayable","type":"constructor"}]`}, + imports: ` + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/ethconfig" + `, + tester: ` + var ( + key, _ = crypto.GenerateKey() + user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil) + ) + defer sim.Close() + + _, tx, _, err := DeployConstructorWithStructParam(user, sim, ConstructorWithStructParamStructType{Field: big.NewInt(42)}) + if err != nil { + t.Fatalf("DeployConstructorWithStructParam() got err %v; want nil err", err) + } + sim.Commit() + + if _, err = bind.WaitDeployed(nil, sim, tx); err != nil { + t.Logf("Deployment tx: %+v", tx) + t.Errorf("bind.WaitDeployed(nil, %T, ) got err %v; want nil err", sim, err) + } + `, + }, } // Tests that packages generated by the binder can be successfully compiled and @@ -1596,11 +1966,11 @@ func TestGolangBindings(t *testing.T) { t.Skip("go sdk not found for testing") } // Create a temporary workspace for the test suite - ws, err := ioutil.TempDir("", "") + ws, err := ioutil.TempDir("", "binding-test") if err != nil { t.Fatalf("failed to create temporary workspace: %v", err) } - defer os.RemoveAll(ws) + //defer os.RemoveAll(ws) pkg := filepath.Join(ws, "bindtest") if err = os.MkdirAll(pkg, 0700); err != nil { @@ -1608,22 +1978,23 @@ func TestGolangBindings(t *testing.T) { } // Generate the test suite for all the contracts for i, tt := range bindTests { - var types []string - if tt.types != nil { - types = tt.types - } else { - types = []string{tt.name} - } - // Generate the binding and create a Go source file in the workspace - bind, err := Bind(types, tt.abi, tt.bytecode, tt.fsigs, "bindtest", LangGo, tt.libs, tt.aliases) - if err != nil { - t.Fatalf("test %d: failed to generate binding: %v", i, err) - } - if err = ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+".go"), []byte(bind), 0600); err != nil { - t.Fatalf("test %d: failed to write binding: %v", i, err) - } - // Generate the test file with the injected test code - code := fmt.Sprintf(` + t.Run(tt.name, func(t *testing.T) { + var types []string + if tt.types != nil { + types = tt.types + } else { + types = []string{tt.name} + } + // Generate the binding and create a Go source file in the workspace + bind, err := Bind(types, tt.abi, tt.bytecode, tt.fsigs, "bindtest", LangGo, tt.libs, tt.aliases) + if err != nil { + t.Fatalf("test %d: failed to generate binding: %v", i, err) + } + if err = ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+".go"), []byte(bind), 0600); err != nil { + t.Fatalf("test %d: failed to write binding: %v", i, err) + } + // Generate the test file with the injected test code + code := fmt.Sprintf(` package bindtest import ( @@ -1635,9 +2006,10 @@ func TestGolangBindings(t *testing.T) { %s } `, tt.imports, tt.name, tt.tester) - if err := ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+"_test.go"), []byte(code), 0600); err != nil { - t.Fatalf("test %d: failed to write tests: %v", i, err) - } + if err := ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+"_test.go"), []byte(code), 0600); err != nil { + t.Fatalf("test %d: failed to write tests: %v", i, err) + } + }) } // Convert the package to go modules and use the current source for go-ethereum moder := exec.Command(gocmd, "mod", "init", "bindtest") @@ -1646,11 +2018,16 @@ func TestGolangBindings(t *testing.T) { t.Fatalf("failed to convert binding test to modules: %v\n%s", err, out) } pwd, _ := os.Getwd() - replacer := exec.Command(gocmd, "mod", "edit", "-replace", "github.com/ethereum/go-ethereum="+filepath.Join(pwd, "..", "..", "..")) // Repo root + replacer := exec.Command(gocmd, "mod", "edit", "-x", "-require", "github.com/ethereum/go-ethereum@v0.0.0", "-replace", "github.com/ethereum/go-ethereum="+filepath.Join(pwd, "..", "..", "..")) // Repo root replacer.Dir = pkg if out, err := replacer.CombinedOutput(); err != nil { t.Fatalf("failed to replace binding test dependency to current source tree: %v\n%s", err, out) } + tidier := exec.Command(gocmd, "mod", "tidy") + tidier.Dir = pkg + if out, err := tidier.CombinedOutput(); err != nil { + t.Fatalf("failed to tidy Go module file: %v\n%s", err, out) + } // Test the entire package and report any failures cmd := exec.Command(gocmd, "test", "-v", "-count", "1") cmd.Dir = pkg @@ -1720,13 +2097,10 @@ package bindtest; import org.ethereum.geth.*; import java.util.*; - - public class Test { // ABI is the input ABI used to generate the binding from. public final static String ABI = "[{\"constant\":false,\"inputs\":[{\"name\":\"u16\",\"type\":\"uint16\"}],\"name\":\"setUint16\",\"outputs\":[{\"name\":\"\",\"type\":\"uint16\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b_a\",\"type\":\"bool[2]\"}],\"name\":\"setBoolArray\",\"outputs\":[{\"name\":\"\",\"type\":\"bool[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"a_a\",\"type\":\"address[2]\"}],\"name\":\"setAddressArray\",\"outputs\":[{\"name\":\"\",\"type\":\"address[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"bs_l\",\"type\":\"bytes[]\"}],\"name\":\"setBytesList\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u8\",\"type\":\"uint8\"}],\"name\":\"setUint8\",\"outputs\":[{\"name\":\"\",\"type\":\"uint8\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u32\",\"type\":\"uint32\"}],\"name\":\"setUint32\",\"outputs\":[{\"name\":\"\",\"type\":\"uint32\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b\",\"type\":\"bool\"}],\"name\":\"setBool\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i256_l\",\"type\":\"int256[]\"}],\"name\":\"setInt256List\",\"outputs\":[{\"name\":\"\",\"type\":\"int256[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u256_a\",\"type\":\"uint256[2]\"}],\"name\":\"setUint256Array\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b_l\",\"type\":\"bool[]\"}],\"name\":\"setBoolList\",\"outputs\":[{\"name\":\"\",\"type\":\"bool[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"bs_a\",\"type\":\"bytes[2]\"}],\"name\":\"setBytesArray\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"a_l\",\"type\":\"address[]\"}],\"name\":\"setAddressList\",\"outputs\":[{\"name\":\"\",\"type\":\"address[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i256_a\",\"type\":\"int256[2]\"}],\"name\":\"setInt256Array\",\"outputs\":[{\"name\":\"\",\"type\":\"int256[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"s_a\",\"type\":\"string[2]\"}],\"name\":\"setStringArray\",\"outputs\":[{\"name\":\"\",\"type\":\"string[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"s\",\"type\":\"string\"}],\"name\":\"setString\",\"outputs\":[{\"name\":\"\",\"type\":\"string\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u64\",\"type\":\"uint64\"}],\"name\":\"setUint64\",\"outputs\":[{\"name\":\"\",\"type\":\"uint64\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i16\",\"type\":\"int16\"}],\"name\":\"setInt16\",\"outputs\":[{\"name\":\"\",\"type\":\"int16\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i8\",\"type\":\"int8\"}],\"name\":\"setInt8\",\"outputs\":[{\"name\":\"\",\"type\":\"int8\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u256_l\",\"type\":\"uint256[]\"}],\"name\":\"setUint256List\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i256\",\"type\":\"int256\"}],\"name\":\"setInt256\",\"outputs\":[{\"name\":\"\",\"type\":\"int256\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i32\",\"type\":\"int32\"}],\"name\":\"setInt32\",\"outputs\":[{\"name\":\"\",\"type\":\"int32\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b32\",\"type\":\"bytes32\"}],\"name\":\"setBytes32\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"s_l\",\"type\":\"string[]\"}],\"name\":\"setStringList\",\"outputs\":[{\"name\":\"\",\"type\":\"string[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u256\",\"type\":\"uint256\"}],\"name\":\"setUint256\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"bs\",\"type\":\"bytes\"}],\"name\":\"setBytes\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"a\",\"type\":\"address\"}],\"name\":\"setAddress\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i64\",\"type\":\"int64\"}],\"name\":\"setInt64\",\"outputs\":[{\"name\":\"\",\"type\":\"int64\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b1\",\"type\":\"bytes1\"}],\"name\":\"setBytes1\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes1\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]"; - // BYTECODE is the compiled bytecode used for deploying new contracts. public final static String BYTECODE = "0x608060405234801561001057600080fd5b5061265a806100206000396000f3fe608060405234801561001057600080fd5b50600436106101e1576000357c0100000000000000000000000000000000000000000000000000000000900480637fcaf66611610116578063c2b12a73116100b4578063da359dc81161008e578063da359dc814610666578063e30081a014610696578063e673eb32146106c6578063fba1a1c3146106f6576101e1565b8063c2b12a73146105d6578063c577796114610606578063d2282dc514610636576101e1565b80639a19a953116100f05780639a19a95314610516578063a0709e1914610546578063a53b1c1e14610576578063b7d5df31146105a6576101e1565b80637fcaf66614610486578063822cba69146104b657806386114cea146104e6576101e1565b806322722302116101835780635119655d1161015d5780635119655d146103c65780635be6b37e146103f65780636aa482fc146104265780637173b69514610456576101e1565b806322722302146103365780632766a755146103665780634d5ee6da14610396576101e1565b806316c105e2116101bf57806316c105e2146102765780631774e646146102a65780631c9352e2146102d65780631e26fd3314610306576101e1565b80630477988a146101e6578063118a971814610216578063151f547114610246575b600080fd5b61020060048036036101fb9190810190611599565b610726565b60405161020d9190611f01565b60405180910390f35b610230600480360361022b919081019061118d565b61072d565b60405161023d9190611ca6565b60405180910390f35b610260600480360361025b9190810190611123565b61073a565b60405161026d9190611c69565b60405180910390f35b610290600480360361028b9190810190611238565b610747565b60405161029d9190611d05565b60405180910390f35b6102c060048036036102bb919081019061163d565b61074e565b6040516102cd9190611f6d565b60405180910390f35b6102f060048036036102eb91908101906115eb565b610755565b6040516102fd9190611f37565b60405180910390f35b610320600480360361031b91908101906113cf565b61075c565b60405161032d9190611de5565b60405180910390f35b610350600480360361034b91908101906112a2565b610763565b60405161035d9190611d42565b60405180910390f35b610380600480360361037b9190810190611365565b61076a565b60405161038d9190611da8565b60405180910390f35b6103b060048036036103ab91908101906111b6565b610777565b6040516103bd9190611cc1565b60405180910390f35b6103e060048036036103db91908101906111f7565b61077e565b6040516103ed9190611ce3565b60405180910390f35b610410600480360361040b919081019061114c565b61078b565b60405161041d9190611c84565b60405180910390f35b610440600480360361043b9190810190611279565b610792565b60405161044d9190611d27565b60405180910390f35b610470600480360361046b91908101906112e3565b61079f565b60405161047d9190611d64565b60405180910390f35b6104a0600480360361049b9190810190611558565b6107ac565b6040516104ad9190611edf565b60405180910390f35b6104d060048036036104cb9190810190611614565b6107b3565b6040516104dd9190611f52565b60405180910390f35b61050060048036036104fb919081019061148b565b6107ba565b60405161050d9190611e58565b60405180910390f35b610530600480360361052b919081019061152f565b6107c1565b60405161053d9190611ec4565b60405180910390f35b610560600480360361055b919081019061138e565b6107c8565b60405161056d9190611dc3565b60405180910390f35b610590600480360361058b91908101906114b4565b6107cf565b60405161059d9190611e73565b60405180910390f35b6105c060048036036105bb91908101906114dd565b6107d6565b6040516105cd9190611e8e565b60405180910390f35b6105f060048036036105eb9190810190611421565b6107dd565b6040516105fd9190611e1b565b60405180910390f35b610620600480360361061b9190810190611324565b6107e4565b60405161062d9190611d86565b60405180910390f35b610650600480360361064b91908101906115c2565b6107eb565b60405161065d9190611f1c565b60405180910390f35b610680600480360361067b919081019061144a565b6107f2565b60405161068d9190611e36565b60405180910390f35b6106b060048036036106ab91908101906110fa565b6107f9565b6040516106bd9190611c4e565b60405180910390f35b6106e060048036036106db9190810190611506565b610800565b6040516106ed9190611ea9565b60405180910390f35b610710600480360361070b91908101906113f8565b610807565b60405161071d9190611e00565b60405180910390f35b6000919050565b61073561080e565b919050565b610742610830565b919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b610772610852565b919050565b6060919050565b610786610874565b919050565b6060919050565b61079a61089b565b919050565b6107a76108bd565b919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b6000919050565b6060919050565b6000919050565b6000919050565b6000919050565b6040805190810160405280600290602082028038833980820191505090505090565b6040805190810160405280600290602082028038833980820191505090505090565b6040805190810160405280600290602082028038833980820191505090505090565b60408051908101604052806002905b60608152602001906001900390816108835790505090565b6040805190810160405280600290602082028038833980820191505090505090565b60408051908101604052806002905b60608152602001906001900390816108cc5790505090565b60006108f082356124f2565b905092915050565b600082601f830112151561090b57600080fd5b600261091e61091982611fb5565b611f88565b9150818385602084028201111561093457600080fd5b60005b83811015610964578161094a88826108e4565b845260208401935060208301925050600181019050610937565b5050505092915050565b600082601f830112151561098157600080fd5b813561099461098f82611fd7565b611f88565b915081818352602084019350602081019050838560208402820111156109b957600080fd5b60005b838110156109e957816109cf88826108e4565b8452602084019350602083019250506001810190506109bc565b5050505092915050565b600082601f8301121515610a0657600080fd5b6002610a19610a1482611fff565b611f88565b91508183856020840282011115610a2f57600080fd5b60005b83811015610a5f5781610a458882610e9e565b845260208401935060208301925050600181019050610a32565b5050505092915050565b600082601f8301121515610a7c57600080fd5b8135610a8f610a8a82612021565b611f88565b91508181835260208401935060208101905083856020840282011115610ab457600080fd5b60005b83811015610ae45781610aca8882610e9e565b845260208401935060208301925050600181019050610ab7565b5050505092915050565b600082601f8301121515610b0157600080fd5b6002610b14610b0f82612049565b611f88565b9150818360005b83811015610b4b5781358601610b318882610eda565b845260208401935060208301925050600181019050610b1b565b5050505092915050565b600082601f8301121515610b6857600080fd5b8135610b7b610b768261206b565b611f88565b9150818183526020840193506020810190508360005b83811015610bc15781358601610ba78882610eda565b845260208401935060208301925050600181019050610b91565b5050505092915050565b600082601f8301121515610bde57600080fd5b6002610bf1610bec82612093565b611f88565b91508183856020840282011115610c0757600080fd5b60005b83811015610c375781610c1d8882610f9a565b845260208401935060208301925050600181019050610c0a565b5050505092915050565b600082601f8301121515610c5457600080fd5b8135610c67610c62826120b5565b611f88565b91508181835260208401935060208101905083856020840282011115610c8c57600080fd5b60005b83811015610cbc5781610ca28882610f9a565b845260208401935060208301925050600181019050610c8f565b5050505092915050565b600082601f8301121515610cd957600080fd5b6002610cec610ce7826120dd565b611f88565b9150818360005b83811015610d235781358601610d098882610fea565b845260208401935060208301925050600181019050610cf3565b5050505092915050565b600082601f8301121515610d4057600080fd5b8135610d53610d4e826120ff565b611f88565b9150818183526020840193506020810190508360005b83811015610d995781358601610d7f8882610fea565b845260208401935060208301925050600181019050610d69565b5050505092915050565b600082601f8301121515610db657600080fd5b6002610dc9610dc482612127565b611f88565b91508183856020840282011115610ddf57600080fd5b60005b83811015610e0f5781610df588826110aa565b845260208401935060208301925050600181019050610de2565b5050505092915050565b600082601f8301121515610e2c57600080fd5b8135610e3f610e3a82612149565b611f88565b91508181835260208401935060208101905083856020840282011115610e6457600080fd5b60005b83811015610e945781610e7a88826110aa565b845260208401935060208301925050600181019050610e67565b5050505092915050565b6000610eaa8235612504565b905092915050565b6000610ebe8235612510565b905092915050565b6000610ed2823561253c565b905092915050565b600082601f8301121515610eed57600080fd5b8135610f00610efb82612171565b611f88565b91508082526020830160208301858383011115610f1c57600080fd5b610f278382846125cd565b50505092915050565b600082601f8301121515610f4357600080fd5b8135610f56610f518261219d565b611f88565b91508082526020830160208301858383011115610f7257600080fd5b610f7d8382846125cd565b50505092915050565b6000610f928235612546565b905092915050565b6000610fa68235612553565b905092915050565b6000610fba823561255d565b905092915050565b6000610fce823561256a565b905092915050565b6000610fe28235612577565b905092915050565b600082601f8301121515610ffd57600080fd5b813561101061100b826121c9565b611f88565b9150808252602083016020830185838301111561102c57600080fd5b6110378382846125cd565b50505092915050565b600082601f830112151561105357600080fd5b8135611066611061826121f5565b611f88565b9150808252602083016020830185838301111561108257600080fd5b61108d8382846125cd565b50505092915050565b60006110a28235612584565b905092915050565b60006110b68235612592565b905092915050565b60006110ca823561259c565b905092915050565b60006110de82356125ac565b905092915050565b60006110f282356125c0565b905092915050565b60006020828403121561110c57600080fd5b600061111a848285016108e4565b91505092915050565b60006040828403121561113557600080fd5b6000611143848285016108f8565b91505092915050565b60006020828403121561115e57600080fd5b600082013567ffffffffffffffff81111561117857600080fd5b6111848482850161096e565b91505092915050565b60006040828403121561119f57600080fd5b60006111ad848285016109f3565b91505092915050565b6000602082840312156111c857600080fd5b600082013567ffffffffffffffff8111156111e257600080fd5b6111ee84828501610a69565b91505092915050565b60006020828403121561120957600080fd5b600082013567ffffffffffffffff81111561122357600080fd5b61122f84828501610aee565b91505092915050565b60006020828403121561124a57600080fd5b600082013567ffffffffffffffff81111561126457600080fd5b61127084828501610b55565b91505092915050565b60006040828403121561128b57600080fd5b600061129984828501610bcb565b91505092915050565b6000602082840312156112b457600080fd5b600082013567ffffffffffffffff8111156112ce57600080fd5b6112da84828501610c41565b91505092915050565b6000602082840312156112f557600080fd5b600082013567ffffffffffffffff81111561130f57600080fd5b61131b84828501610cc6565b91505092915050565b60006020828403121561133657600080fd5b600082013567ffffffffffffffff81111561135057600080fd5b61135c84828501610d2d565b91505092915050565b60006040828403121561137757600080fd5b600061138584828501610da3565b91505092915050565b6000602082840312156113a057600080fd5b600082013567ffffffffffffffff8111156113ba57600080fd5b6113c684828501610e19565b91505092915050565b6000602082840312156113e157600080fd5b60006113ef84828501610e9e565b91505092915050565b60006020828403121561140a57600080fd5b600061141884828501610eb2565b91505092915050565b60006020828403121561143357600080fd5b600061144184828501610ec6565b91505092915050565b60006020828403121561145c57600080fd5b600082013567ffffffffffffffff81111561147657600080fd5b61148284828501610f30565b91505092915050565b60006020828403121561149d57600080fd5b60006114ab84828501610f86565b91505092915050565b6000602082840312156114c657600080fd5b60006114d484828501610f9a565b91505092915050565b6000602082840312156114ef57600080fd5b60006114fd84828501610fae565b91505092915050565b60006020828403121561151857600080fd5b600061152684828501610fc2565b91505092915050565b60006020828403121561154157600080fd5b600061154f84828501610fd6565b91505092915050565b60006020828403121561156a57600080fd5b600082013567ffffffffffffffff81111561158457600080fd5b61159084828501611040565b91505092915050565b6000602082840312156115ab57600080fd5b60006115b984828501611096565b91505092915050565b6000602082840312156115d457600080fd5b60006115e2848285016110aa565b91505092915050565b6000602082840312156115fd57600080fd5b600061160b848285016110be565b91505092915050565b60006020828403121561162657600080fd5b6000611634848285016110d2565b91505092915050565b60006020828403121561164f57600080fd5b600061165d848285016110e6565b91505092915050565b61166f816123f7565b82525050565b61167e816122ab565b61168782612221565b60005b828110156116b95761169d858351611666565b6116a68261235b565b915060208501945060018101905061168a565b5050505050565b60006116cb826122b6565b8084526020840193506116dd8361222b565b60005b8281101561170f576116f3868351611666565b6116fc82612368565b91506020860195506001810190506116e0565b50849250505092915050565b611724816122c1565b61172d82612238565b60005b8281101561175f57611743858351611ab3565b61174c82612375565b9150602085019450600181019050611730565b5050505050565b6000611771826122cc565b80845260208401935061178383612242565b60005b828110156117b557611799868351611ab3565b6117a282612382565b9150602086019550600181019050611786565b50849250505092915050565b60006117cc826122d7565b836020820285016117dc8561224f565b60005b848110156118155783830388526117f7838351611b16565b92506118028261238f565b91506020880197506001810190506117df565b508196508694505050505092915050565b6000611831826122e2565b8084526020840193508360208202850161184a85612259565b60005b84811015611883578383038852611865838351611b16565b92506118708261239c565b915060208801975060018101905061184d565b508196508694505050505092915050565b61189d816122ed565b6118a682612266565b60005b828110156118d8576118bc858351611b5b565b6118c5826123a9565b91506020850194506001810190506118a9565b5050505050565b60006118ea826122f8565b8084526020840193506118fc83612270565b60005b8281101561192e57611912868351611b5b565b61191b826123b6565b91506020860195506001810190506118ff565b50849250505092915050565b600061194582612303565b836020820285016119558561227d565b60005b8481101561198e578383038852611970838351611bcd565b925061197b826123c3565b9150602088019750600181019050611958565b508196508694505050505092915050565b60006119aa8261230e565b808452602084019350836020820285016119c385612287565b60005b848110156119fc5783830388526119de838351611bcd565b92506119e9826123d0565b91506020880197506001810190506119c6565b508196508694505050505092915050565b611a1681612319565b611a1f82612294565b60005b82811015611a5157611a35858351611c12565b611a3e826123dd565b9150602085019450600181019050611a22565b5050505050565b6000611a6382612324565b808452602084019350611a758361229e565b60005b82811015611aa757611a8b868351611c12565b611a94826123ea565b9150602086019550600181019050611a78565b50849250505092915050565b611abc81612409565b82525050565b611acb81612415565b82525050565b611ada81612441565b82525050565b6000611aeb8261233a565b808452611aff8160208601602086016125dc565b611b088161260f565b602085010191505092915050565b6000611b218261232f565b808452611b358160208601602086016125dc565b611b3e8161260f565b602085010191505092915050565b611b558161244b565b82525050565b611b6481612458565b82525050565b611b7381612462565b82525050565b611b828161246f565b82525050565b611b918161247c565b82525050565b6000611ba282612350565b808452611bb68160208601602086016125dc565b611bbf8161260f565b602085010191505092915050565b6000611bd882612345565b808452611bec8160208601602086016125dc565b611bf58161260f565b602085010191505092915050565b611c0c81612489565b82525050565b611c1b816124b7565b82525050565b611c2a816124c1565b82525050565b611c39816124d1565b82525050565b611c48816124e5565b82525050565b6000602082019050611c636000830184611666565b92915050565b6000604082019050611c7e6000830184611675565b92915050565b60006020820190508181036000830152611c9e81846116c0565b905092915050565b6000604082019050611cbb600083018461171b565b92915050565b60006020820190508181036000830152611cdb8184611766565b905092915050565b60006020820190508181036000830152611cfd81846117c1565b905092915050565b60006020820190508181036000830152611d1f8184611826565b905092915050565b6000604082019050611d3c6000830184611894565b92915050565b60006020820190508181036000830152611d5c81846118df565b905092915050565b60006020820190508181036000830152611d7e818461193a565b905092915050565b60006020820190508181036000830152611da0818461199f565b905092915050565b6000604082019050611dbd6000830184611a0d565b92915050565b60006020820190508181036000830152611ddd8184611a58565b905092915050565b6000602082019050611dfa6000830184611ab3565b92915050565b6000602082019050611e156000830184611ac2565b92915050565b6000602082019050611e306000830184611ad1565b92915050565b60006020820190508181036000830152611e508184611ae0565b905092915050565b6000602082019050611e6d6000830184611b4c565b92915050565b6000602082019050611e886000830184611b5b565b92915050565b6000602082019050611ea36000830184611b6a565b92915050565b6000602082019050611ebe6000830184611b79565b92915050565b6000602082019050611ed96000830184611b88565b92915050565b60006020820190508181036000830152611ef98184611b97565b905092915050565b6000602082019050611f166000830184611c03565b92915050565b6000602082019050611f316000830184611c12565b92915050565b6000602082019050611f4c6000830184611c21565b92915050565b6000602082019050611f676000830184611c30565b92915050565b6000602082019050611f826000830184611c3f565b92915050565b6000604051905081810181811067ffffffffffffffff82111715611fab57600080fd5b8060405250919050565b600067ffffffffffffffff821115611fcc57600080fd5b602082029050919050565b600067ffffffffffffffff821115611fee57600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561201657600080fd5b602082029050919050565b600067ffffffffffffffff82111561203857600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561206057600080fd5b602082029050919050565b600067ffffffffffffffff82111561208257600080fd5b602082029050602081019050919050565b600067ffffffffffffffff8211156120aa57600080fd5b602082029050919050565b600067ffffffffffffffff8211156120cc57600080fd5b602082029050602081019050919050565b600067ffffffffffffffff8211156120f457600080fd5b602082029050919050565b600067ffffffffffffffff82111561211657600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561213e57600080fd5b602082029050919050565b600067ffffffffffffffff82111561216057600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561218857600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff8211156121b457600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff8211156121e057600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff82111561220c57600080fd5b601f19601f8301169050602081019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600081519050919050565b600081519050919050565b600081519050919050565b600081519050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b600061240282612497565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b60008160010b9050919050565b6000819050919050565b60008160030b9050919050565b60008160070b9050919050565b60008160000b9050919050565b600061ffff82169050919050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b600063ffffffff82169050919050565b600067ffffffffffffffff82169050919050565b600060ff82169050919050565b60006124fd82612497565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b60008160010b9050919050565b6000819050919050565b60008160030b9050919050565b60008160070b9050919050565b60008160000b9050919050565b600061ffff82169050919050565b6000819050919050565b600063ffffffff82169050919050565b600067ffffffffffffffff82169050919050565b600060ff82169050919050565b82818337600083830152505050565b60005b838110156125fa5780820151818401526020810190506125df565b83811115612609576000848401525b50505050565b6000601f19601f830116905091905056fea265627a7a723058206fe37171cf1b10ebd291cfdca61d67e7fc3c208795e999c833c42a14d86cf00d6c6578706572696d656e74616cf50037"; @@ -1734,8 +2108,6 @@ public class Test { public static Test deploy(TransactOpts auth, EthereumClient client) throws Exception { Interfaces args = Geth.newInterfaces(0); String bytecode = BYTECODE; - - return new Test(Geth.deployContract(auth, ABI, Geth.decodeFromHex(bytecode), client, args)); } @@ -1746,7 +2118,6 @@ public class Test { this.Contract = deployment; } - // Ethereum address where this contract is located at. public final Address Address; @@ -1761,9 +2132,6 @@ public class Test { this(Geth.bindContract(address, ABI, client)); } - - - // setAddress is a paid mutator transaction binding the contract method 0xe30081a0. // // Solidity: function setAddress(address a) returns(address) @@ -2043,9 +2411,7 @@ public class Test { return this.Contract.transact(opts, "setUint8" , args); } - } - `, }, } @@ -2054,7 +2420,22 @@ public class Test { if err != nil { t.Fatalf("test %d: failed to generate binding: %v", i, err) } - if binding != c.expected { + // Remove empty lines + removeEmptys := func(input string) string { + lines := strings.Split(input, "\n") + var index int + for _, line := range lines { + if strings.TrimSpace(line) != "" { + lines[index] = line + index += 1 + } + } + lines = lines[:index] + return strings.Join(lines, "\n") + } + binding = removeEmptys(binding) + expect := removeEmptys(c.expected) + if binding != expect { t.Fatalf("test %d: generated binding mismatch, has %s, want %s", i, binding, c.expected) } } diff --git a/accounts/abi/bind/template.go b/accounts/abi/bind/template.go index c96dd1b9955d..492bad8c577f 100644 --- a/accounts/abi/bind/template.go +++ b/accounts/abi/bind/template.go @@ -30,11 +30,13 @@ type tmplData struct { type tmplContract struct { Type string // Type name of the main contract binding InputABI string // JSON ABI used as the input to generate the binding from - InputBin string // Optional EVM bytecode used to denetare deploy code from + InputBin string // Optional EVM bytecode used to generate deploy code from FuncSigs map[string]string // Optional map: string signature -> 4-byte signature Constructor abi.Method // Contract constructor for deploy parametrization Calls map[string]*tmplMethod // Contract calls that only read state data Transacts map[string]*tmplMethod // Contract calls that write state data + Fallback *tmplMethod // Additional special fallback function + Receive *tmplMethod // Additional special receive function Events map[string]*tmplEvent // Contract events accessors Libraries map[string]string // Same as tmplData, but filtered to only keep what the contract needs Library bool // Indicator whether the contract is a library @@ -48,7 +50,8 @@ type tmplMethod struct { Structured bool // Whether the returns should be accumulated into a struct } -// tmplEvent is a wrapper around an a +// tmplEvent is a wrapper around an abi.Event that contains a few preprocessed +// and cached data fields. type tmplEvent struct { Original abi.Event // Original event as parsed by the abi package Normalized abi.Event // Normalized version of the parsed fields @@ -62,7 +65,7 @@ type tmplField struct { SolKind abi.Type // Raw abi type information } -// tmplStruct is a wrapper around an abi.tuple contains a auto-generated +// tmplStruct is a wrapper around an abi.tuple and contains an auto-generated // struct name. type tmplStruct struct { Name string // Auto-generated struct name(before solidity v0.5.11) or raw name. @@ -76,8 +79,8 @@ var tmplSource = map[Lang]string{ LangJava: tmplSourceJava, } -// tmplSourceGo is the Go source template use to generate the contract binding -// based on. +// tmplSourceGo is the Go source template that the generated Go contract binding +// is based on. const tmplSourceGo = ` // Code generated - DO NOT EDIT. // This file is a generated binding and any manual changes will be lost. @@ -87,6 +90,7 @@ package {{.Package}} import ( "math/big" "strings" + "errors" ethereum "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" @@ -98,10 +102,10 @@ import ( // Reference imports to suppress errors if they are not otherwise used. var ( + _ = errors.New _ = big.NewInt _ = strings.NewReader _ = ethereum.NotFound - _ = abi.U256 _ = bind.Bind _ = common.Big1 _ = types.BloomLookup @@ -118,32 +122,48 @@ var ( {{end}} {{range $contract := .Contracts}} + // {{.Type}}MetaData contains all meta data concerning the {{.Type}} contract. + var {{.Type}}MetaData = &bind.MetaData{ + ABI: "{{.InputABI}}", + {{if $contract.FuncSigs -}} + Sigs: map[string]string{ + {{range $strsig, $binsig := .FuncSigs}}"{{$binsig}}": "{{$strsig}}", + {{end}} + }, + {{end -}} + {{if .InputBin -}} + Bin: "0x{{.InputBin}}", + {{end}} + } // {{.Type}}ABI is the input ABI used to generate the binding from. - const {{.Type}}ABI = "{{.InputABI}}" + // Deprecated: Use {{.Type}}MetaData.ABI instead. + var {{.Type}}ABI = {{.Type}}MetaData.ABI {{if $contract.FuncSigs}} + // Deprecated: Use {{.Type}}MetaData.Sigs instead. // {{.Type}}FuncSigs maps the 4-byte function signature to its string representation. - var {{.Type}}FuncSigs = map[string]string{ - {{range $strsig, $binsig := .FuncSigs}}"{{$binsig}}": "{{$strsig}}", - {{end}} - } + var {{.Type}}FuncSigs = {{.Type}}MetaData.Sigs {{end}} {{if .InputBin}} // {{.Type}}Bin is the compiled bytecode used for deploying new contracts. - var {{.Type}}Bin = "0x{{.InputBin}}" + // Deprecated: Use {{.Type}}MetaData.Bin instead. + var {{.Type}}Bin = {{.Type}}MetaData.Bin // Deploy{{.Type}} deploys a new Ethereum contract, binding an instance of {{.Type}} to it. func Deploy{{.Type}}(auth *bind.TransactOpts, backend bind.ContractBackend {{range .Constructor.Inputs}}, {{.Name}} {{bindtype .Type $structs}}{{end}}) (common.Address, *types.Transaction, *{{.Type}}, error) { - parsed, err := abi.JSON(strings.NewReader({{.Type}}ABI)) + parsed, err := {{.Type}}MetaData.GetAbi() if err != nil { return common.Address{}, nil, nil, err } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } {{range $pattern, $name := .Libraries}} {{decapitalise $name}}Addr, _, _, _ := Deploy{{capitalise $name}}(auth, backend) {{$contract.Type}}Bin = strings.Replace({{$contract.Type}}Bin, "__${{$pattern}}$__", {{decapitalise $name}}Addr.String()[2:], -1) {{end}} - address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex({{.Type}}Bin), backend {{range .Constructor.Inputs}}, {{.Name}}{{end}}) + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex({{.Type}}Bin), backend {{range .Constructor.Inputs}}, {{.Name}}{{end}}) if err != nil { return common.Address{}, nil, nil, err } @@ -259,7 +279,7 @@ var ( // sets the output to result. The result type might be a single field for simple // returns, a slice of interfaces for anonymous returns and a struct for named // returns. - func (_{{$contract.Type}} *{{$contract.Type}}Raw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { + func (_{{$contract.Type}} *{{$contract.Type}}Raw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { return _{{$contract.Type}}.Contract.{{$contract.Type}}Caller.contract.Call(opts, result, method, params...) } @@ -278,7 +298,7 @@ var ( // sets the output to result. The result type might be a single field for simple // returns, a slice of interfaces for anonymous returns and a struct for named // returns. - func (_{{$contract.Type}} *{{$contract.Type}}CallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { + func (_{{$contract.Type}} *{{$contract.Type}}CallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { return _{{$contract.Type}}.Contract.contract.Call(opts, result, method, params...) } @@ -296,33 +316,40 @@ var ( {{range .Calls}} // {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}. // - // Solidity: {{formatmethod .Original $structs}} + // Solidity: {{.Original.String}} func (_{{$contract.Type}} *{{$contract.Type}}Caller) {{.Normalized.Name}}(opts *bind.CallOpts {{range .Normalized.Inputs}}, {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} },{{else}}{{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}}{{end}} error) { - {{if .Structured}}ret := new(struct{ - {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}} - {{end}} - }){{else}}var ( - {{range $i, $_ := .Normalized.Outputs}}ret{{$i}} = new({{bindtype .Type $structs}}) - {{end}} - ){{end}} - out := {{if .Structured}}ret{{else}}{{if eq (len .Normalized.Outputs) 1}}ret0{{else}}&[]interface{}{ - {{range $i, $_ := .Normalized.Outputs}}ret{{$i}}, - {{end}} - }{{end}}{{end}} - err := _{{$contract.Type}}.contract.Call(opts, out, "{{.Original.Name}}" {{range .Normalized.Inputs}}, {{.Name}}{{end}}) - return {{if .Structured}}*ret,{{else}}{{range $i, $_ := .Normalized.Outputs}}*ret{{$i}},{{end}}{{end}} err + var out []interface{} + err := _{{$contract.Type}}.contract.Call(opts, &out, "{{.Original.Name}}" {{range .Normalized.Inputs}}, {{.Name}}{{end}}) + {{if .Structured}} + outstruct := new(struct{ {{range .Normalized.Outputs}} {{.Name}} {{bindtype .Type $structs}}; {{end}} }) + if err != nil { + return *outstruct, err + } + {{range $i, $t := .Normalized.Outputs}} + outstruct.{{.Name}} = *abi.ConvertType(out[{{$i}}], new({{bindtype .Type $structs}})).(*{{bindtype .Type $structs}}){{end}} + + return *outstruct, err + {{else}} + if err != nil { + return {{range $i, $_ := .Normalized.Outputs}}*new({{bindtype .Type $structs}}), {{end}} err + } + {{range $i, $t := .Normalized.Outputs}} + out{{$i}} := *abi.ConvertType(out[{{$i}}], new({{bindtype .Type $structs}})).(*{{bindtype .Type $structs}}){{end}} + + return {{range $i, $t := .Normalized.Outputs}}out{{$i}}, {{end}} err + {{end}} } // {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}. // - // Solidity: {{formatmethod .Original $structs}} + // Solidity: {{.Original.String}} func (_{{$contract.Type}} *{{$contract.Type}}Session) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} }, {{else}} {{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}} {{end}} error) { return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.CallOpts {{range .Normalized.Inputs}}, {{.Name}}{{end}}) } // {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}. // - // Solidity: {{formatmethod .Original $structs}} + // Solidity: {{.Original.String}} func (_{{$contract.Type}} *{{$contract.Type}}CallerSession) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} }, {{else}} {{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}} {{end}} error) { return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.CallOpts {{range .Normalized.Inputs}}, {{.Name}}{{end}}) } @@ -331,26 +358,72 @@ var ( {{range .Transacts}} // {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}. // - // Solidity: {{formatmethod .Original $structs}} + // Solidity: {{.Original.String}} func (_{{$contract.Type}} *{{$contract.Type}}Transactor) {{.Normalized.Name}}(opts *bind.TransactOpts {{range .Normalized.Inputs}}, {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) { return _{{$contract.Type}}.contract.Transact(opts, "{{.Original.Name}}" {{range .Normalized.Inputs}}, {{.Name}}{{end}}) } // {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}. // - // Solidity: {{formatmethod .Original $structs}} + // Solidity: {{.Original.String}} func (_{{$contract.Type}} *{{$contract.Type}}Session) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) { return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.TransactOpts {{range $i, $_ := .Normalized.Inputs}}, {{.Name}}{{end}}) } // {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}. // - // Solidity: {{formatmethod .Original $structs}} + // Solidity: {{.Original.String}} func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) { return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.TransactOpts {{range $i, $_ := .Normalized.Inputs}}, {{.Name}}{{end}}) } {{end}} + {{if .Fallback}} + // Fallback is a paid mutator transaction binding the contract fallback function. + // + // Solidity: {{.Fallback.Original.String}} + func (_{{$contract.Type}} *{{$contract.Type}}Transactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) { + return _{{$contract.Type}}.contract.RawTransact(opts, calldata) + } + + // Fallback is a paid mutator transaction binding the contract fallback function. + // + // Solidity: {{.Fallback.Original.String}} + func (_{{$contract.Type}} *{{$contract.Type}}Session) Fallback(calldata []byte) (*types.Transaction, error) { + return _{{$contract.Type}}.Contract.Fallback(&_{{$contract.Type}}.TransactOpts, calldata) + } + + // Fallback is a paid mutator transaction binding the contract fallback function. + // + // Solidity: {{.Fallback.Original.String}} + func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) Fallback(calldata []byte) (*types.Transaction, error) { + return _{{$contract.Type}}.Contract.Fallback(&_{{$contract.Type}}.TransactOpts, calldata) + } + {{end}} + + {{if .Receive}} + // Receive is a paid mutator transaction binding the contract receive function. + // + // Solidity: {{.Receive.Original.String}} + func (_{{$contract.Type}} *{{$contract.Type}}Transactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) { + return _{{$contract.Type}}.contract.RawTransact(opts, nil) // calldata is disallowed for receive function + } + + // Receive is a paid mutator transaction binding the contract receive function. + // + // Solidity: {{.Receive.Original.String}} + func (_{{$contract.Type}} *{{$contract.Type}}Session) Receive() (*types.Transaction, error) { + return _{{$contract.Type}}.Contract.Receive(&_{{$contract.Type}}.TransactOpts) + } + + // Receive is a paid mutator transaction binding the contract receive function. + // + // Solidity: {{.Receive.Original.String}} + func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) Receive() (*types.Transaction, error) { + return _{{$contract.Type}}.Contract.Receive(&_{{$contract.Type}}.TransactOpts) + } + {{end}} + {{range .Events}} // {{$contract.Type}}{{.Normalized.Name}}Iterator is returned from Filter{{.Normalized.Name}} and is used to iterate over the raw logs and unpacked data for {{.Normalized.Name}} events raised by the {{$contract.Type}} contract. type {{$contract.Type}}{{.Normalized.Name}}Iterator struct { @@ -424,7 +497,7 @@ var ( // Filter{{.Normalized.Name}} is a free log retrieval operation binding the contract event 0x{{printf "%x" .Original.ID}}. // - // Solidity: {{formatevent .Original $structs}} + // Solidity: {{.Original.String}} func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Filter{{.Normalized.Name}}(opts *bind.FilterOpts{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}} []{{bindtype .Type $structs}}{{end}}{{end}}) (*{{$contract.Type}}{{.Normalized.Name}}Iterator, error) { {{range .Normalized.Inputs}} {{if .Indexed}}var {{.Name}}Rule []interface{} @@ -441,7 +514,7 @@ var ( // Watch{{.Normalized.Name}} is a free log subscription operation binding the contract event 0x{{printf "%x" .Original.ID}}. // - // Solidity: {{formatevent .Original $structs}} + // Solidity: {{.Original.String}} func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Watch{{.Normalized.Name}}(opts *bind.WatchOpts, sink chan<- *{{$contract.Type}}{{.Normalized.Name}}{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}} []{{bindtype .Type $structs}}{{end}}{{end}}) (event.Subscription, error) { {{range .Normalized.Inputs}} {{if .Indexed}}var {{.Name}}Rule []interface{} @@ -483,12 +556,13 @@ var ( // Parse{{.Normalized.Name}} is a log parse operation binding the contract event 0x{{printf "%x" .Original.ID}}. // - // Solidity: {{formatevent .Original $structs}} + // Solidity: {{.Original.String}} func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Parse{{.Normalized.Name}}(log types.Log) (*{{$contract.Type}}{{.Normalized.Name}}, error) { event := new({{$contract.Type}}{{.Normalized.Name}}) if err := _{{$contract.Type}}.contract.UnpackLog(event, "{{.Original.Name}}", log); err != nil { return nil, err } + event.Raw = log return event, nil } @@ -496,8 +570,8 @@ var ( {{end}} ` -// tmplSourceJava is the Java source template use to generate the contract binding -// based on. +// tmplSourceJava is the Java source template that the generated Java contract binding +// is based on. const tmplSourceJava = ` // This file is an automatically generated Java binding. Do not modify as any // change will likely be lost upon the next re-generation! @@ -577,7 +651,7 @@ import java.util.*; // {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}. // // Solidity: {{.Original.String}} - public {{if gt (len .Normalized.Outputs) 1}}{{capitalise .Normalized.Name}}Results{{else}}{{range .Normalized.Outputs}}{{bindtype .Type $structs}}{{end}}{{end}} {{.Normalized.Name}}(CallOpts opts{{range .Normalized.Inputs}}, {{bindtype .Type $structs}} {{.Name}}{{end}}) throws Exception { + public {{if gt (len .Normalized.Outputs) 1}}{{capitalise .Normalized.Name}}Results{{else if eq (len .Normalized.Outputs) 0}}void{{else}}{{range .Normalized.Outputs}}{{bindtype .Type $structs}}{{end}}{{end}} {{.Normalized.Name}}(CallOpts opts{{range .Normalized.Inputs}}, {{bindtype .Type $structs}} {{.Name}}{{end}}) throws Exception { Interfaces args = Geth.newInterfaces({{(len .Normalized.Inputs)}}); {{range $index, $item := .Normalized.Inputs}}Interface arg{{$index}} = Geth.newInterface();arg{{$index}}.set{{namedtype (bindtype .Type $structs) .Type}}({{.Name}});args.set({{$index}},arg{{$index}}); {{end}} @@ -611,6 +685,24 @@ import java.util.*; return this.Contract.transact(opts, "{{.Original.Name}}" , args); } {{end}} + + {{if .Fallback}} + // Fallback is a paid mutator transaction binding the contract fallback function. + // + // Solidity: {{.Fallback.Original.String}} + public Transaction Fallback(TransactOpts opts, byte[] calldata) throws Exception { + return this.Contract.rawTransact(opts, calldata); + } + {{end}} + + {{if .Receive}} + // Receive is a paid mutator transaction binding the contract receive function. + // + // Solidity: {{.Receive.Original.String}} + public Transaction Receive(TransactOpts opts) throws Exception { + return this.Contract.rawTransact(opts, null); + } + {{end}} } {{end}} ` diff --git a/accounts/abi/bind/topics.go b/accounts/abi/bind/topics.go deleted file mode 100644 index c908c925821a..000000000000 --- a/accounts/abi/bind/topics.go +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package bind - -import ( - "encoding/binary" - "errors" - "fmt" - "math/big" - "reflect" - - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" -) - -// makeTopics converts a filter query argument list into a filter topic set. -func makeTopics(query ...[]interface{}) ([][]common.Hash, error) { - topics := make([][]common.Hash, len(query)) - for i, filter := range query { - for _, rule := range filter { - var topic common.Hash - - // Try to generate the topic based on simple types - switch rule := rule.(type) { - case common.Hash: - copy(topic[:], rule[:]) - case common.Address: - copy(topic[common.HashLength-common.AddressLength:], rule[:]) - case *big.Int: - blob := rule.Bytes() - copy(topic[common.HashLength-len(blob):], blob) - case bool: - if rule { - topic[common.HashLength-1] = 1 - } - case int8: - blob := big.NewInt(int64(rule)).Bytes() - copy(topic[common.HashLength-len(blob):], blob) - case int16: - blob := big.NewInt(int64(rule)).Bytes() - copy(topic[common.HashLength-len(blob):], blob) - case int32: - blob := big.NewInt(int64(rule)).Bytes() - copy(topic[common.HashLength-len(blob):], blob) - case int64: - blob := big.NewInt(rule).Bytes() - copy(topic[common.HashLength-len(blob):], blob) - case uint8: - blob := new(big.Int).SetUint64(uint64(rule)).Bytes() - copy(topic[common.HashLength-len(blob):], blob) - case uint16: - blob := new(big.Int).SetUint64(uint64(rule)).Bytes() - copy(topic[common.HashLength-len(blob):], blob) - case uint32: - blob := new(big.Int).SetUint64(uint64(rule)).Bytes() - copy(topic[common.HashLength-len(blob):], blob) - case uint64: - blob := new(big.Int).SetUint64(rule).Bytes() - copy(topic[common.HashLength-len(blob):], blob) - case string: - hash := crypto.Keccak256Hash([]byte(rule)) - copy(topic[:], hash[:]) - case []byte: - hash := crypto.Keccak256Hash(rule) - copy(topic[:], hash[:]) - - default: - // todo(rjl493456442) according solidity documentation, indexed event - // parameters that are not value types i.e. arrays and structs are not - // stored directly but instead a keccak256-hash of an encoding is stored. - // - // We only convert stringS and bytes to hash, still need to deal with - // array(both fixed-size and dynamic-size) and struct. - - // Attempt to generate the topic from funky types - val := reflect.ValueOf(rule) - switch { - // static byte array - case val.Kind() == reflect.Array && reflect.TypeOf(rule).Elem().Kind() == reflect.Uint8: - reflect.Copy(reflect.ValueOf(topic[:val.Len()]), val) - default: - return nil, fmt.Errorf("unsupported indexed type: %T", rule) - } - } - topics[i] = append(topics[i], topic) - } - } - return topics, nil -} - -// Big batch of reflect types for topic reconstruction. -var ( - reflectHash = reflect.TypeOf(common.Hash{}) - reflectAddress = reflect.TypeOf(common.Address{}) - reflectBigInt = reflect.TypeOf(new(big.Int)) -) - -// parseTopics converts the indexed topic fields into actual log field values. -// -// Note, dynamic types cannot be reconstructed since they get mapped to Keccak256 -// hashes as the topic value! -func parseTopics(out interface{}, fields abi.Arguments, topics []common.Hash) error { - // Sanity check that the fields and topics match up - if len(fields) != len(topics) { - return errors.New("topic/field count mismatch") - } - // Iterate over all the fields and reconstruct them from topics - for _, arg := range fields { - if !arg.Indexed { - return errors.New("non-indexed field in topic reconstruction") - } - field := reflect.ValueOf(out).Elem().FieldByName(capitalise(arg.Name)) - - // Try to parse the topic back into the fields based on primitive types - switch field.Kind() { - case reflect.Bool: - if topics[0][common.HashLength-1] == 1 { - field.Set(reflect.ValueOf(true)) - } - case reflect.Int8: - num := new(big.Int).SetBytes(topics[0][:]) - field.Set(reflect.ValueOf(int8(num.Int64()))) - - case reflect.Int16: - num := new(big.Int).SetBytes(topics[0][:]) - field.Set(reflect.ValueOf(int16(num.Int64()))) - - case reflect.Int32: - num := new(big.Int).SetBytes(topics[0][:]) - field.Set(reflect.ValueOf(int32(num.Int64()))) - - case reflect.Int64: - num := new(big.Int).SetBytes(topics[0][:]) - field.Set(reflect.ValueOf(num.Int64())) - - case reflect.Uint8: - num := new(big.Int).SetBytes(topics[0][:]) - field.Set(reflect.ValueOf(uint8(num.Uint64()))) - - case reflect.Uint16: - num := new(big.Int).SetBytes(topics[0][:]) - field.Set(reflect.ValueOf(uint16(num.Uint64()))) - - case reflect.Uint32: - num := new(big.Int).SetBytes(topics[0][:]) - field.Set(reflect.ValueOf(uint32(num.Uint64()))) - - case reflect.Uint64: - num := new(big.Int).SetBytes(topics[0][:]) - field.Set(reflect.ValueOf(num.Uint64())) - - default: - // Ran out of plain primitive types, try custom types - - switch field.Type() { - case reflectHash: // Also covers all dynamic types - field.Set(reflect.ValueOf(topics[0])) - - case reflectAddress: - var addr common.Address - copy(addr[:], topics[0][common.HashLength-common.AddressLength:]) - field.Set(reflect.ValueOf(addr)) - - case reflectBigInt: - num := new(big.Int).SetBytes(topics[0][:]) - if arg.Type.T == abi.IntTy { - if num.Cmp(abi.MaxInt256) > 0 { - num.Add(abi.MaxUint256, big.NewInt(0).Neg(num)) - num.Add(num, big.NewInt(1)) - num.Neg(num) - } - } - field.Set(reflect.ValueOf(num)) - - default: - // Ran out of custom types, try the crazies - switch { - // static byte array - case arg.Type.T == abi.FixedBytesTy: - reflect.Copy(field, reflect.ValueOf(topics[0][:arg.Type.Size])) - default: - return fmt.Errorf("unsupported indexed type: %v", arg.Type) - } - } - } - topics = topics[1:] - } - return nil -} - -// parseTopicsIntoMap converts the indexed topic field-value pairs into map key-value pairs -func parseTopicsIntoMap(out map[string]interface{}, fields abi.Arguments, topics []common.Hash) error { - // Sanity check that the fields and topics match up - if len(fields) != len(topics) { - return errors.New("topic/field count mismatch") - } - // Iterate over all the fields and reconstruct them from topics - for _, arg := range fields { - if !arg.Indexed { - return errors.New("non-indexed field in topic reconstruction") - } - - switch arg.Type.T { - case abi.BoolTy: - out[arg.Name] = topics[0][common.HashLength-1] == 1 - case abi.IntTy, abi.UintTy: - out[arg.Name] = abi.ReadInteger(arg.Type.T, arg.Type.Kind, topics[0].Bytes()) - case abi.AddressTy: - var addr common.Address - copy(addr[:], topics[0][common.HashLength-common.AddressLength:]) - out[arg.Name] = addr - case abi.HashTy: - out[arg.Name] = topics[0] - case abi.FixedBytesTy: - array, err := abi.ReadFixedBytes(arg.Type, topics[0].Bytes()) - if err != nil { - return err - } - out[arg.Name] = array - case abi.StringTy, abi.BytesTy, abi.SliceTy, abi.ArrayTy: - // Array types (including strings and bytes) have their keccak256 hashes stored in the topic- not a hash - // whose bytes can be decoded to the actual value- so the best we can do is retrieve that hash - out[arg.Name] = topics[0] - case abi.FunctionTy: - if garbage := binary.BigEndian.Uint64(topics[0][0:8]); garbage != 0 { - return fmt.Errorf("bind: got improperly encoded function type, got %v", topics[0].Bytes()) - } - var tmp [24]byte - copy(tmp[:], topics[0][8:32]) - out[arg.Name] = tmp - default: // Not handling tuples - return fmt.Errorf("unsupported indexed type: %v", arg.Type) - } - - topics = topics[1:] - } - - return nil -} diff --git a/accounts/abi/bind/topics_test.go b/accounts/abi/bind/topics_test.go deleted file mode 100644 index c62f5bab323e..000000000000 --- a/accounts/abi/bind/topics_test.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package bind - -import ( - "math/big" - "reflect" - "testing" - - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/common" -) - -func TestMakeTopics(t *testing.T) { - type args struct { - query [][]interface{} - } - tests := []struct { - name string - args args - want [][]common.Hash - wantErr bool - }{ - { - "support fixed byte types, right padded to 32 bytes", - args{[][]interface{}{{[5]byte{1, 2, 3, 4, 5}}}}, - [][]common.Hash{{common.Hash{1, 2, 3, 4, 5}}}, - false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := makeTopics(tt.args.query...) - if (err != nil) != tt.wantErr { - t.Errorf("makeTopics() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("makeTopics() = %v, want %v", got, tt.want) - } - }) - } -} - -type args struct { - createObj func() interface{} - resultObj func() interface{} - resultMap func() map[string]interface{} - fields abi.Arguments - topics []common.Hash -} - -type bytesStruct struct { - StaticBytes [5]byte -} -type int8Struct struct { - Int8Value int8 -} -type int256Struct struct { - Int256Value *big.Int -} - -type topicTest struct { - name string - args args - wantErr bool -} - -func setupTopicsTests() []topicTest { - bytesType, _ := abi.NewType("bytes5", "", nil) - int8Type, _ := abi.NewType("int8", "", nil) - int256Type, _ := abi.NewType("int256", "", nil) - - tests := []topicTest{ - { - name: "support fixed byte types, right padded to 32 bytes", - args: args{ - createObj: func() interface{} { return &bytesStruct{} }, - resultObj: func() interface{} { return &bytesStruct{StaticBytes: [5]byte{1, 2, 3, 4, 5}} }, - resultMap: func() map[string]interface{} { - return map[string]interface{}{"staticBytes": [5]byte{1, 2, 3, 4, 5}} - }, - fields: abi.Arguments{abi.Argument{ - Name: "staticBytes", - Type: bytesType, - Indexed: true, - }}, - topics: []common.Hash{ - {1, 2, 3, 4, 5}, - }, - }, - wantErr: false, - }, - { - name: "int8 with negative value", - args: args{ - createObj: func() interface{} { return &int8Struct{} }, - resultObj: func() interface{} { return &int8Struct{Int8Value: -1} }, - resultMap: func() map[string]interface{} { - return map[string]interface{}{"int8Value": int8(-1)} - }, - fields: abi.Arguments{abi.Argument{ - Name: "int8Value", - Type: int8Type, - Indexed: true, - }}, - topics: []common.Hash{ - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - }, - wantErr: false, - }, - { - name: "int256 with negative value", - args: args{ - createObj: func() interface{} { return &int256Struct{} }, - resultObj: func() interface{} { return &int256Struct{Int256Value: big.NewInt(-1)} }, - resultMap: func() map[string]interface{} { - return map[string]interface{}{"int256Value": big.NewInt(-1)} - }, - fields: abi.Arguments{abi.Argument{ - Name: "int256Value", - Type: int256Type, - Indexed: true, - }}, - topics: []common.Hash{ - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - }, - wantErr: false, - }, - } - - return tests -} - -func TestParseTopics(t *testing.T) { - tests := setupTopicsTests() - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - createObj := tt.args.createObj() - if err := parseTopics(createObj, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr { - t.Errorf("parseTopics() error = %v, wantErr %v", err, tt.wantErr) - } - resultObj := tt.args.resultObj() - if !reflect.DeepEqual(createObj, resultObj) { - t.Errorf("parseTopics() = %v, want %v", createObj, resultObj) - } - }) - } -} - -func TestParseTopicsIntoMap(t *testing.T) { - tests := setupTopicsTests() - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - outMap := make(map[string]interface{}) - if err := parseTopicsIntoMap(outMap, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr { - t.Errorf("parseTopicsIntoMap() error = %v, wantErr %v", err, tt.wantErr) - } - resultMap := tt.args.resultMap() - if !reflect.DeepEqual(outMap, resultMap) { - t.Errorf("parseTopicsIntoMap() = %v, want %v", outMap, resultMap) - } - }) - } -} diff --git a/accounts/abi/bind/util.go b/accounts/abi/bind/util.go index d129993ca12f..b931fbb04d64 100644 --- a/accounts/abi/bind/util.go +++ b/accounts/abi/bind/util.go @@ -18,9 +18,10 @@ package bind import ( "context" - "fmt" + "errors" "time" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" @@ -35,14 +36,16 @@ func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*ty logger := log.New("hash", tx.Hash()) for { receipt, err := b.TransactionReceipt(ctx, tx.Hash()) - if receipt != nil { + if err == nil { return receipt, nil } - if err != nil { - logger.Trace("Receipt retrieval failed", "err", err) - } else { + + if errors.Is(err, ethereum.NotFound) { logger.Trace("Transaction not yet mined") + } else { + logger.Trace("Receipt retrieval failed", "err", err) } + // Wait for the next round. select { case <-ctx.Done(): @@ -56,14 +59,14 @@ func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*ty // contract address when it is mined. It stops waiting when ctx is canceled. func WaitDeployed(ctx context.Context, b DeployBackend, tx *types.Transaction) (common.Address, error) { if tx.To() != nil { - return common.Address{}, fmt.Errorf("tx is not contract creation") + return common.Address{}, errors.New("tx is not contract creation") } receipt, err := WaitMined(ctx, b, tx) if err != nil { return common.Address{}, err } if receipt.ContractAddress == (common.Address{}) { - return common.Address{}, fmt.Errorf("zero address") + return common.Address{}, errors.New("zero address") } // Check that code has indeed been deployed at the address. // This matters on pre-Homestead chains: OOG in the constructor diff --git a/accounts/abi/bind/util_test.go b/accounts/abi/bind/util_test.go index e0141f46e06f..75fbc91cebfe 100644 --- a/accounts/abi/bind/util_test.go +++ b/accounts/abi/bind/util_test.go @@ -18,6 +18,7 @@ package bind_test import ( "context" + "errors" "math/big" "testing" "time" @@ -55,14 +56,17 @@ func TestWaitDeployed(t *testing.T) { for name, test := range waitDeployedTests { backend := backends.NewSimulatedBackend( core.GenesisAlloc{ - crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)}, + crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)}, }, 10000000, ) defer backend.Close() - // Create the transaction. - tx := types.NewContractCreation(0, big.NewInt(0), test.gas, big.NewInt(1), common.FromHex(test.code)) + // Create the transaction + head, _ := backend.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + tx := types.NewContractCreation(0, big.NewInt(0), test.gas, gasPrice, common.FromHex(test.code)) tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) // Wait for it to get mined in the background. @@ -84,7 +88,7 @@ func TestWaitDeployed(t *testing.T) { select { case <-mined: if err != test.wantErr { - t.Errorf("test %q: error mismatch: got %q, want %q", name, err, test.wantErr) + t.Errorf("test %q: error mismatch: want %q, got %q", name, test.wantErr, err) } if address != test.wantAddress { t.Errorf("test %q: unexpected contract address %s", name, address.Hex()) @@ -94,3 +98,43 @@ func TestWaitDeployed(t *testing.T) { } } } + +func TestWaitDeployedCornerCases(t *testing.T) { + backend := backends.NewSimulatedBackend( + core.GenesisAlloc{ + crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)}, + }, + 10000000, + ) + defer backend.Close() + + head, _ := backend.HeaderByNumber(context.Background(), nil) // Should be child's, good enough + gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) + + // Create a transaction to an account. + code := "6060604052600a8060106000396000f360606040526008565b00" + tx := types.NewTransaction(0, common.HexToAddress("0x01"), big.NewInt(0), 3000000, gasPrice, common.FromHex(code)) + tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + backend.SendTransaction(ctx, tx) + backend.Commit() + notContentCreation := errors.New("tx is not contract creation") + if _, err := bind.WaitDeployed(ctx, backend, tx); err.Error() != notContentCreation.Error() { + t.Errorf("error missmatch: want %q, got %q, ", notContentCreation, err) + } + + // Create a transaction that is not mined. + tx = types.NewContractCreation(1, big.NewInt(0), 3000000, gasPrice, common.FromHex(code)) + tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) + + go func() { + contextCanceled := errors.New("context canceled") + if _, err := bind.WaitDeployed(ctx, backend, tx); err.Error() != contextCanceled.Error() { + t.Errorf("error missmatch: want %q, got %q, ", contextCanceled, err) + } + }() + + backend.SendTransaction(ctx, tx) + cancel() +} diff --git a/accounts/abi/error.go b/accounts/abi/error.go index 9d8674ad088b..3d1a4877ddcd 100644 --- a/accounts/abi/error.go +++ b/accounts/abi/error.go @@ -1,4 +1,4 @@ -// Copyright 2016 The go-ethereum Authors +// Copyright 2021 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -17,68 +17,75 @@ package abi import ( + "bytes" "errors" "fmt" - "reflect" -) + "strings" -var ( - errBadBool = errors.New("abi: improperly encoded boolean value") + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" ) -// formatSliceString formats the reflection kind with the given slice size -// and returns a formatted string representation. -func formatSliceString(kind reflect.Kind, sliceSize int) string { - if sliceSize == -1 { - return fmt.Sprintf("[]%v", kind) - } - return fmt.Sprintf("[%d]%v", sliceSize, kind) +type Error struct { + Name string + Inputs Arguments + str string + // Sig contains the string signature according to the ABI spec. + // e.g. event foo(uint32 a, int b) = "foo(uint32,int256)" + // Please note that "int" is substitute for its canonical representation "int256" + Sig string + // ID returns the canonical representation of the event's signature used by the + // abi definition to identify event names and types. + ID common.Hash } -// sliceTypeCheck checks that the given slice can by assigned to the reflection -// type in t. -func sliceTypeCheck(t Type, val reflect.Value) error { - if val.Kind() != reflect.Slice && val.Kind() != reflect.Array { - return typeErr(formatSliceString(t.Kind, t.Size), val.Type()) +func NewError(name string, inputs Arguments) Error { + // sanitize inputs to remove inputs without names + // and precompute string and sig representation. + names := make([]string, len(inputs)) + types := make([]string, len(inputs)) + for i, input := range inputs { + if input.Name == "" { + inputs[i] = Argument{ + Name: fmt.Sprintf("arg%d", i), + Indexed: input.Indexed, + Type: input.Type, + } + } else { + inputs[i] = input + } + // string representation + names[i] = fmt.Sprintf("%v %v", input.Type, inputs[i].Name) + if input.Indexed { + names[i] = fmt.Sprintf("%v indexed %v", input.Type, inputs[i].Name) + } + // sig representation + types[i] = input.Type.String() } - if t.T == ArrayTy && val.Len() != t.Size { - return typeErr(formatSliceString(t.Elem.Kind, t.Size), formatSliceString(val.Type().Elem().Kind(), val.Len())) - } + str := fmt.Sprintf("error %v(%v)", name, strings.Join(names, ", ")) + sig := fmt.Sprintf("%v(%v)", name, strings.Join(types, ",")) + id := common.BytesToHash(crypto.Keccak256([]byte(sig))) - if t.Elem.T == SliceTy { - if val.Len() > 0 { - return sliceTypeCheck(*t.Elem, val.Index(0)) - } - } else if t.Elem.T == ArrayTy { - return sliceTypeCheck(*t.Elem, val.Index(0)) + return Error{ + Name: name, + Inputs: inputs, + str: str, + Sig: sig, + ID: id, } +} - if elemKind := val.Type().Elem().Kind(); elemKind != t.Elem.Kind { - return typeErr(formatSliceString(t.Elem.Kind, t.Size), val.Type()) - } - return nil +func (e *Error) String() string { + return e.str } -// typeCheck checks that the given reflection value can be assigned to the reflection -// type in t. -func typeCheck(t Type, value reflect.Value) error { - if t.T == SliceTy || t.T == ArrayTy { - return sliceTypeCheck(t, value) +func (e *Error) Unpack(data []byte) (interface{}, error) { + if len(data) < 4 { + return "", errors.New("invalid data for unpacking") } - - // Check base type validity. Element types will be checked later on. - if t.Kind != value.Kind() { - return typeErr(t.Kind, value.Kind()) - } else if t.T == FixedBytesTy && t.Size != value.Len() { - return typeErr(t.Type, value.Type()) - } else { - return nil + if !bytes.Equal(data[:4], e.ID[:4]) { + return "", errors.New("invalid data for unpacking") } - -} - -// typeErr returns a formatted type casting error. -func typeErr(expected, got interface{}) error { - return fmt.Errorf("abi: cannot use %v as type %v as argument", got, expected) + return e.Inputs.Unpack(data[4:]) } diff --git a/accounts/abi/error_handling.go b/accounts/abi/error_handling.go new file mode 100644 index 000000000000..f0f71b6c9164 --- /dev/null +++ b/accounts/abi/error_handling.go @@ -0,0 +1,82 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package abi + +import ( + "errors" + "fmt" + "reflect" +) + +var ( + errBadBool = errors.New("abi: improperly encoded boolean value") +) + +// formatSliceString formats the reflection kind with the given slice size +// and returns a formatted string representation. +func formatSliceString(kind reflect.Kind, sliceSize int) string { + if sliceSize == -1 { + return fmt.Sprintf("[]%v", kind) + } + return fmt.Sprintf("[%d]%v", sliceSize, kind) +} + +// sliceTypeCheck checks that the given slice can by assigned to the reflection +// type in t. +func sliceTypeCheck(t Type, val reflect.Value) error { + if val.Kind() != reflect.Slice && val.Kind() != reflect.Array { + return typeErr(formatSliceString(t.GetType().Kind(), t.Size), val.Type()) + } + + if t.T == ArrayTy && val.Len() != t.Size { + return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), formatSliceString(val.Type().Elem().Kind(), val.Len())) + } + + if t.Elem.T == SliceTy || t.Elem.T == ArrayTy { + if val.Len() > 0 { + return sliceTypeCheck(*t.Elem, val.Index(0)) + } + } + + if val.Type().Elem().Kind() != t.Elem.GetType().Kind() { + return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), val.Type()) + } + return nil +} + +// typeCheck checks that the given reflection value can be assigned to the reflection +// type in t. +func typeCheck(t Type, value reflect.Value) error { + if t.T == SliceTy || t.T == ArrayTy { + return sliceTypeCheck(t, value) + } + + // Check base type validity. Element types will be checked later on. + if t.GetType().Kind() != value.Kind() { + return typeErr(t.GetType().Kind(), value.Kind()) + } else if t.T == FixedBytesTy && t.Size != value.Len() { + return typeErr(t.GetType(), value.Type()) + } else { + return nil + } + +} + +// typeErr returns a formatted type casting error. +func typeErr(expected, got interface{}) error { + return fmt.Errorf("abi: cannot use %v as type %v as argument", got, expected) +} diff --git a/accounts/abi/event.go b/accounts/abi/event.go index f1474813afe4..b238a36d7cea 100644 --- a/accounts/abi/event.go +++ b/accounts/abi/event.go @@ -32,7 +32,7 @@ type Event struct { // the raw name and a suffix will be added in the case of a event overload. // // e.g. - // There are two events have same name: + // These are two events that have the same name: // * foo(int,int) // * foo(uint,uint) // The event name of the first one wll be resolved as foo while the second one @@ -42,36 +42,59 @@ type Event struct { RawName string Anonymous bool Inputs Arguments + str string + // Sig contains the string signature according to the ABI spec. + // e.g. event foo(uint32 a, int b) = "foo(uint32,int256)" + // Please note that "int" is substitute for its canonical representation "int256" + Sig string + // ID returns the canonical representation of the event's signature used by the + // abi definition to identify event names and types. + ID common.Hash } -func (e Event) String() string { - inputs := make([]string, len(e.Inputs)) - for i, input := range e.Inputs { - inputs[i] = fmt.Sprintf("%v %v", input.Type, input.Name) +// NewEvent creates a new Event. +// It sanitizes the input arguments to remove unnamed arguments. +// It also precomputes the id, signature and string representation +// of the event. +func NewEvent(name, rawName string, anonymous bool, inputs Arguments) Event { + // sanitize inputs to remove inputs without names + // and precompute string and sig representation. + names := make([]string, len(inputs)) + types := make([]string, len(inputs)) + for i, input := range inputs { + if input.Name == "" { + inputs[i] = Argument{ + Name: fmt.Sprintf("arg%d", i), + Indexed: input.Indexed, + Type: input.Type, + } + } else { + inputs[i] = input + } + // string representation + names[i] = fmt.Sprintf("%v %v", input.Type, inputs[i].Name) if input.Indexed { - inputs[i] = fmt.Sprintf("%v indexed %v", input.Type, input.Name) + names[i] = fmt.Sprintf("%v indexed %v", input.Type, inputs[i].Name) } + // sig representation + types[i] = input.Type.String() } - return fmt.Sprintf("event %v(%v)", e.RawName, strings.Join(inputs, ", ")) -} -// Sig returns the event string signature according to the ABI spec. -// -// Example -// -// event foo(uint32 a, int b) = "foo(uint32,int256)" -// -// Please note that "int" is substitute for its canonical representation "int256" -func (e Event) Sig() string { - types := make([]string, len(e.Inputs)) - for i, input := range e.Inputs { - types[i] = input.Type.String() + str := fmt.Sprintf("event %v(%v)", rawName, strings.Join(names, ", ")) + sig := fmt.Sprintf("%v(%v)", rawName, strings.Join(types, ",")) + id := common.BytesToHash(crypto.Keccak256([]byte(sig))) + + return Event{ + Name: name, + RawName: rawName, + Anonymous: anonymous, + Inputs: inputs, + str: str, + Sig: sig, + ID: id, } - return fmt.Sprintf("%v(%v)", e.RawName, strings.Join(types, ",")) } -// ID returns the canonical representation of the event's signature used by the -// abi definition to identify event names and types. -func (e Event) ID() common.Hash { - return common.BytesToHash(crypto.Keccak256([]byte(e.Sig()))) +func (e Event) String() string { + return e.str } diff --git a/accounts/abi/event_test.go b/accounts/abi/event_test.go index 090b9217dbdd..3332f8a07216 100644 --- a/accounts/abi/event_test.go +++ b/accounts/abi/event_test.go @@ -104,8 +104,8 @@ func TestEventId(t *testing.T) { } for name, event := range abi.Events { - if event.ID() != test.expectations[name] { - t.Errorf("expected id to be %x, got %x", test.expectations[name], event.ID()) + if event.ID != test.expectations[name] { + t.Errorf("expected id to be %x, got %x", test.expectations[name], event.ID) } } } @@ -147,10 +147,6 @@ func TestEventString(t *testing.T) { // TestEventMultiValueWithArrayUnpack verifies that array fields will be counted after parsing array. func TestEventMultiValueWithArrayUnpack(t *testing.T) { definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": false, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"uint8"}]}]` - type testStruct struct { - Value1 [2]uint8 - Value2 uint8 - } abi, err := JSON(strings.NewReader(definition)) require.NoError(t, err) var b bytes.Buffer @@ -158,10 +154,10 @@ func TestEventMultiValueWithArrayUnpack(t *testing.T) { for ; i <= 3; i++ { b.Write(packNum(reflect.ValueOf(i))) } - var rst testStruct - require.NoError(t, abi.Unpack(&rst, "test", b.Bytes())) - require.Equal(t, [2]uint8{1, 2}, rst.Value1) - require.Equal(t, uint8(3), rst.Value2) + unpacked, err := abi.Unpack("test", b.Bytes()) + require.NoError(t, err) + require.Equal(t, [2]uint8{1, 2}, unpacked[0]) + require.Equal(t, uint8(3), unpacked[1]) } func TestEventTupleUnpack(t *testing.T) { @@ -312,14 +308,14 @@ func TestEventTupleUnpack(t *testing.T) { &[]interface{}{common.Address{}, new(big.Int)}, &[]interface{}{}, jsonEventPledge, - "abi: insufficient number of elements in the list/array for unpack, want 3, got 2", + "abi: insufficient number of arguments for unpack, want 3, got 2", "Can not unpack Pledge event into too short slice", }, { pledgeData1, new(map[string]interface{}), &[]interface{}{}, jsonEventPledge, - "abi: cannot unmarshal tuple into map[string]interface {}", + "abi:[2] cannot unmarshal tuple in to map[string]interface {}", "Can not unpack Pledge event into map", }, { mixedCaseData1, @@ -351,14 +347,14 @@ func unpackTestEventData(dest interface{}, hexData string, jsonEvent []byte, ass var e Event assert.NoError(json.Unmarshal(jsonEvent, &e), "Should be able to unmarshal event ABI") a := ABI{Events: map[string]Event{"e": e}} - return a.Unpack(dest, "e", data) + return a.UnpackIntoInterface(dest, "e", data) } // TestEventUnpackIndexed verifies that indexed field will be skipped by event decoder. func TestEventUnpackIndexed(t *testing.T) { definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": true, "name":"value1", "type":"uint8"},{"indexed": false, "name":"value2", "type":"uint8"}]}]` type testStruct struct { - Value1 uint8 + Value1 uint8 // indexed Value2 uint8 } abi, err := JSON(strings.NewReader(definition)) @@ -366,16 +362,16 @@ func TestEventUnpackIndexed(t *testing.T) { var b bytes.Buffer b.Write(packNum(reflect.ValueOf(uint8(8)))) var rst testStruct - require.NoError(t, abi.Unpack(&rst, "test", b.Bytes())) + require.NoError(t, abi.UnpackIntoInterface(&rst, "test", b.Bytes())) require.Equal(t, uint8(0), rst.Value1) require.Equal(t, uint8(8), rst.Value2) } -// TestEventIndexedWithArrayUnpack verifies that decoder will not overlow when static array is indexed input. +// TestEventIndexedWithArrayUnpack verifies that decoder will not overflow when static array is indexed input. func TestEventIndexedWithArrayUnpack(t *testing.T) { definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": true, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"string"}]}]` type testStruct struct { - Value1 [2]uint8 + Value1 [2]uint8 // indexed Value2 string } abi, err := JSON(strings.NewReader(definition)) @@ -388,7 +384,7 @@ func TestEventIndexedWithArrayUnpack(t *testing.T) { b.Write(common.RightPadBytes([]byte(stringOut), 32)) var rst testStruct - require.NoError(t, abi.Unpack(&rst, "test", b.Bytes())) + require.NoError(t, abi.UnpackIntoInterface(&rst, "test", b.Bytes())) require.Equal(t, [2]uint8{0, 0}, rst.Value1) require.Equal(t, stringOut, rst.Value2) } diff --git a/accounts/abi/method.go b/accounts/abi/method.go index 7da2e18fc61a..f69e3ee9b562 100644 --- a/accounts/abi/method.go +++ b/accounts/abi/method.go @@ -23,11 +23,29 @@ import ( "github.com/ethereum/go-ethereum/crypto" ) +// FunctionType represents different types of functions a contract might have. +type FunctionType int + +const ( + // Constructor represents the constructor of the contract. + // The constructor function is called while deploying a contract. + Constructor FunctionType = iota + // Fallback represents the fallback function. + // This function is executed if no other function matches the given function + // signature and no receive function is specified. + Fallback + // Receive represents the receive function. + // This function is executed on plain Ether transfers. + Receive + // Function represents a normal function. + Function +) + // Method represents a callable given a `Name` and whether the method is a constant. // If the method is `Const` no transaction needs to be created for this // particular Method call. It can easily be simulated using a local VM. // For example a `Balance()` method only needs to retrieve something -// from the storage and therefore requires no Tx to be send to the +// from the storage and therefore requires no Tx to be sent to the // network. A method such as `Transact` does require a Tx and thus will // be flagged `false`. // Input specifies the required input parameters for this gives method. @@ -36,55 +54,114 @@ type Method struct { // the raw name and a suffix will be added in the case of a function overload. // // e.g. - // There are two functions have same name: + // These are two functions that have the same name: // * foo(int,int) // * foo(uint,uint) // The method name of the first one will be resolved as foo while the second one // will be resolved as foo0. - Name string - // RawName is the raw method name parsed from ABI. - RawName string - Const bool + Name string + RawName string // RawName is the raw method name parsed from ABI + + // Type indicates whether the method is a + // special fallback introduced in solidity v0.6.0 + Type FunctionType + + // StateMutability indicates the mutability state of method, + // the default value is nonpayable. It can be empty if the abi + // is generated by legacy compiler. + StateMutability string + + // Legacy indicators generated by compiler before v0.6.0 + Constant bool + Payable bool + Inputs Arguments Outputs Arguments + str string + // Sig returns the methods string signature according to the ABI spec. + // e.g. function foo(uint32 a, int b) = "foo(uint32,int256)" + // Please note that "int" is substitute for its canonical representation "int256" + Sig string + // ID returns the canonical representation of the method's signature used by the + // abi definition to identify method names and types. + ID []byte } -// Sig returns the methods string signature according to the ABI spec. -// -// Example -// -// function foo(uint32 a, int b) = "foo(uint32,int256)" -// -// Please note that "int" is substitute for its canonical representation "int256" -func (method Method) Sig() string { - types := make([]string, len(method.Inputs)) - for i, input := range method.Inputs { +// NewMethod creates a new Method. +// A method should always be created using NewMethod. +// It also precomputes the sig representation and the string representation +// of the method. +func NewMethod(name string, rawName string, funType FunctionType, mutability string, isConst, isPayable bool, inputs Arguments, outputs Arguments) Method { + var ( + types = make([]string, len(inputs)) + inputNames = make([]string, len(inputs)) + outputNames = make([]string, len(outputs)) + ) + for i, input := range inputs { + inputNames[i] = fmt.Sprintf("%v %v", input.Type, input.Name) types[i] = input.Type.String() } - return fmt.Sprintf("%v(%v)", method.RawName, strings.Join(types, ",")) -} - -func (method Method) String() string { - inputs := make([]string, len(method.Inputs)) - for i, input := range method.Inputs { - inputs[i] = fmt.Sprintf("%v %v", input.Type, input.Name) - } - outputs := make([]string, len(method.Outputs)) - for i, output := range method.Outputs { - outputs[i] = output.Type.String() + for i, output := range outputs { + outputNames[i] = output.Type.String() if len(output.Name) > 0 { - outputs[i] += fmt.Sprintf(" %v", output.Name) + outputNames[i] += fmt.Sprintf(" %v", output.Name) } } - constant := "" - if method.Const { - constant = "constant " + // calculate the signature and method id. Note only function + // has meaningful signature and id. + var ( + sig string + id []byte + ) + if funType == Function { + sig = fmt.Sprintf("%v(%v)", rawName, strings.Join(types, ",")) + id = crypto.Keccak256([]byte(sig))[:4] + } + // Extract meaningful state mutability of solidity method. + // If it's default value, never print it. + state := mutability + if state == "nonpayable" { + state = "" + } + if state != "" { + state = state + " " + } + identity := fmt.Sprintf("function %v", rawName) + if funType == Fallback { + identity = "fallback" + } else if funType == Receive { + identity = "receive" + } else if funType == Constructor { + identity = "constructor" + } + str := fmt.Sprintf("%v(%v) %sreturns(%v)", identity, strings.Join(inputNames, ", "), state, strings.Join(outputNames, ", ")) + + return Method{ + Name: name, + RawName: rawName, + Type: funType, + StateMutability: mutability, + Constant: isConst, + Payable: isPayable, + Inputs: inputs, + Outputs: outputs, + str: str, + Sig: sig, + ID: id, } - return fmt.Sprintf("function %v(%v) %sreturns(%v)", method.RawName, strings.Join(inputs, ", "), constant, strings.Join(outputs, ", ")) } -// ID returns the canonical representation of the method's signature used by the -// abi definition to identify method names and types. -func (method Method) ID() []byte { - return crypto.Keccak256([]byte(method.Sig()))[:4] +func (method Method) String() string { + return method.str +} + +// IsConstant returns the indicator whether the method is read-only. +func (method Method) IsConstant() bool { + return method.StateMutability == "view" || method.StateMutability == "pure" || method.Constant +} + +// IsPayable returns the indicator whether the method can process +// plain ether transfers. +func (method Method) IsPayable() bool { + return method.StateMutability == "payable" || method.Payable } diff --git a/accounts/abi/method_test.go b/accounts/abi/method_test.go index 3ffdb702b35b..395a5289654a 100644 --- a/accounts/abi/method_test.go +++ b/accounts/abi/method_test.go @@ -23,13 +23,15 @@ import ( const methoddata = ` [ - {"type": "function", "name": "balance", "constant": true }, - {"type": "function", "name": "send", "constant": false, "inputs": [{ "name": "amount", "type": "uint256" }]}, - {"type": "function", "name": "transfer", "constant": false, "inputs": [{"name": "from", "type": "address"}, {"name": "to", "type": "address"}, {"name": "value", "type": "uint256"}], "outputs": [{"name": "success", "type": "bool"}]}, + {"type": "function", "name": "balance", "stateMutability": "view"}, + {"type": "function", "name": "send", "inputs": [{ "name": "amount", "type": "uint256" }]}, + {"type": "function", "name": "transfer", "inputs": [{"name": "from", "type": "address"}, {"name": "to", "type": "address"}, {"name": "value", "type": "uint256"}], "outputs": [{"name": "success", "type": "bool"}]}, {"constant":false,"inputs":[{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"a","type":"tuple"}],"name":"tuple","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}, {"constant":false,"inputs":[{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"a","type":"tuple[]"}],"name":"tupleSlice","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}, {"constant":false,"inputs":[{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"a","type":"tuple[5]"}],"name":"tupleArray","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}, - {"constant":false,"inputs":[{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"a","type":"tuple[5][]"}],"name":"complexTuple","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"} + {"constant":false,"inputs":[{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"a","type":"tuple[5][]"}],"name":"complexTuple","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}, + {"stateMutability":"nonpayable","type":"fallback"}, + {"stateMutability":"payable","type":"receive"} ]` func TestMethodString(t *testing.T) { @@ -39,7 +41,7 @@ func TestMethodString(t *testing.T) { }{ { method: "balance", - expectation: "function balance() constant returns()", + expectation: "function balance() view returns()", }, { method: "send", @@ -65,6 +67,14 @@ func TestMethodString(t *testing.T) { method: "complexTuple", expectation: "function complexTuple((uint256,uint256)[5][] a) returns()", }, + { + method: "fallback", + expectation: "fallback() returns()", + }, + { + method: "receive", + expectation: "receive() payable returns()", + }, } abi, err := JSON(strings.NewReader(methoddata)) @@ -73,7 +83,14 @@ func TestMethodString(t *testing.T) { } for _, test := range table { - got := abi.Methods[test.method].String() + var got string + if test.method == "fallback" { + got = abi.Fallback.String() + } else if test.method == "receive" { + got = abi.Receive.String() + } else { + got = abi.Methods[test.method].String() + } if got != test.expectation { t.Errorf("expected string to be %s, got %s", test.expectation, got) } @@ -120,7 +137,7 @@ func TestMethodSig(t *testing.T) { } for _, test := range cases { - got := abi.Methods[test.method].Sig() + got := abi.Methods[test.method].Sig if got != test.expect { t.Errorf("expected string to be %s, got %s", test.expect, got) } diff --git a/accounts/abi/pack.go b/accounts/abi/pack.go index dd1c9a5df84f..0cd91cb4fad9 100644 --- a/accounts/abi/pack.go +++ b/accounts/abi/pack.go @@ -17,6 +17,8 @@ package abi import ( + "errors" + "fmt" "math/big" "reflect" @@ -25,7 +27,7 @@ import ( ) // packBytesSlice packs the given bytes as [L, V] as the canonical representation -// bytes slice +// bytes slice. func packBytesSlice(bytes []byte, l int) []byte { len := packNum(reflect.ValueOf(l)) return append(len, common.RightPadBytes(bytes, (l+31)/32*32)...) @@ -33,49 +35,51 @@ func packBytesSlice(bytes []byte, l int) []byte { // packElement packs the given reflect value according to the abi specification in // t. -func packElement(t Type, reflectValue reflect.Value) []byte { +func packElement(t Type, reflectValue reflect.Value) ([]byte, error) { switch t.T { case IntTy, UintTy: - return packNum(reflectValue) + return packNum(reflectValue), nil case StringTy: - return packBytesSlice([]byte(reflectValue.String()), reflectValue.Len()) + return packBytesSlice([]byte(reflectValue.String()), reflectValue.Len()), nil case AddressTy: if reflectValue.Kind() == reflect.Array { reflectValue = mustArrayToByteSlice(reflectValue) } - return common.LeftPadBytes(reflectValue.Bytes(), 32) + return common.LeftPadBytes(reflectValue.Bytes(), 32), nil case BoolTy: if reflectValue.Bool() { - return math.PaddedBigBytes(common.Big1, 32) + return math.PaddedBigBytes(common.Big1, 32), nil } - return math.PaddedBigBytes(common.Big0, 32) + return math.PaddedBigBytes(common.Big0, 32), nil case BytesTy: if reflectValue.Kind() == reflect.Array { reflectValue = mustArrayToByteSlice(reflectValue) } - return packBytesSlice(reflectValue.Bytes(), reflectValue.Len()) + if reflectValue.Type() != reflect.TypeOf([]byte{}) { + return []byte{}, errors.New("Bytes type is neither slice nor array") + } + return packBytesSlice(reflectValue.Bytes(), reflectValue.Len()), nil case FixedBytesTy, FunctionTy: if reflectValue.Kind() == reflect.Array { reflectValue = mustArrayToByteSlice(reflectValue) } - return common.RightPadBytes(reflectValue.Bytes(), 32) + return common.RightPadBytes(reflectValue.Bytes(), 32), nil default: - panic("abi: fatal error") + return []byte{}, fmt.Errorf("Could not pack element, unknown type: %v", t.T) } } -// packNum packs the given number (using the reflect value) and will cast it to appropriate number representation +// packNum packs the given number (using the reflect value) and will cast it to appropriate number representation. func packNum(value reflect.Value) []byte { switch kind := value.Kind(); kind { case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return U256(new(big.Int).SetUint64(value.Uint())) + return math.U256Bytes(new(big.Int).SetUint64(value.Uint())) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return U256(big.NewInt(value.Int())) + return math.U256Bytes(big.NewInt(value.Int())) case reflect.Ptr: - return U256(new(big.Int).Set(value.Interface().(*big.Int))) + return math.U256Bytes(new(big.Int).Set(value.Interface().(*big.Int))) default: panic("abi: fatal error") } - } diff --git a/accounts/abi/pack_test.go b/accounts/abi/pack_test.go index cf649b480734..5c7cb1cc1a24 100644 --- a/accounts/abi/pack_test.go +++ b/accounts/abi/pack_test.go @@ -18,623 +18,51 @@ package abi import ( "bytes" + "encoding/hex" + "fmt" "math" "math/big" "reflect" + "strconv" "strings" "testing" "github.com/ethereum/go-ethereum/common" ) +// TestPack tests the general pack/unpack tests in packing_test.go func TestPack(t *testing.T) { - for i, test := range []struct { - typ string - components []ArgumentMarshaling - input interface{} - output []byte - }{ - { - "uint8", - nil, - uint8(2), - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "uint8[]", - nil, - []uint8{1, 2}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "uint16", - nil, - uint16(2), - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "uint16[]", - nil, - []uint16{1, 2}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "uint32", - nil, - uint32(2), - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "uint32[]", - nil, - []uint32{1, 2}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "uint64", - nil, - uint64(2), - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "uint64[]", - nil, - []uint64{1, 2}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "uint256", - nil, - big.NewInt(2), - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "uint256[]", - nil, - []*big.Int{big.NewInt(1), big.NewInt(2)}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "int8", - nil, - int8(2), - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "int8[]", - nil, - []int8{1, 2}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "int16", - nil, - int16(2), - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "int16[]", - nil, - []int16{1, 2}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "int32", - nil, - int32(2), - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "int32[]", - nil, - []int32{1, 2}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "int64", - nil, - int64(2), - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "int64[]", - nil, - []int64{1, 2}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "int256", - nil, - big.NewInt(2), - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "int256[]", - nil, - []*big.Int{big.NewInt(1), big.NewInt(2)}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"), - }, - { - "bytes1", - nil, - [1]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes2", - nil, - [2]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes3", - nil, - [3]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes4", - nil, - [4]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes5", - nil, - [5]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes6", - nil, - [6]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes7", - nil, - [7]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes8", - nil, - [8]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes9", - nil, - [9]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes10", - nil, - [10]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes11", - nil, - [11]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes12", - nil, - [12]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes13", - nil, - [13]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes14", - nil, - [14]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes15", - nil, - [15]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes16", - nil, - [16]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes17", - nil, - [17]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes18", - nil, - [18]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes19", - nil, - [19]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes20", - nil, - [20]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes21", - nil, - [21]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes22", - nil, - [22]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes23", - nil, - [23]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes24", - nil, - [24]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes25", - nil, - [25]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes26", - nil, - [26]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes27", - nil, - [27]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes28", - nil, - [28]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes29", - nil, - [29]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes30", - nil, - [30]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes31", - nil, - [31]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "bytes32", - nil, - [32]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "uint32[2][3][4]", - nil, - [4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000f000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001300000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000015000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000170000000000000000000000000000000000000000000000000000000000000018"), - }, - { - "address[]", - nil, - []common.Address{{1}, {2}}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000"), - }, - { - "bytes32[]", - nil, - []common.Hash{{1}, {2}}, - common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000201000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000"), - }, - { - "function", - nil, - [24]byte{1}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - "string", - nil, - "foobar", - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000006666f6f6261720000000000000000000000000000000000000000000000000000"), - }, - { - "string[]", - nil, - []string{"hello", "foobar"}, - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2 - "0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0 - "0000000000000000000000000000000000000000000000000000000000000080" + // offset 128 to i = 1 - "0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5 - "68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0] - "0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6 - "666f6f6261720000000000000000000000000000000000000000000000000000"), // str[1] - }, - { - "string[2]", - nil, - []string{"hello", "foobar"}, - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset to i = 0 - "0000000000000000000000000000000000000000000000000000000000000080" + // offset to i = 1 - "0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5 - "68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0] - "0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6 - "666f6f6261720000000000000000000000000000000000000000000000000000"), // str[1] - }, - { - "bytes32[][]", - nil, - [][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}}, - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2 - "0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0 - "00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1 - "0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2 - "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0] - "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1] - "0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3 - "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0] - "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1] - "0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2] - }, - - { - "bytes32[][2]", - nil, - [][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}}, - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0 - "00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1 - "0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2 - "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0] - "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1] - "0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3 - "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0] - "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1] - "0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2] - }, - - { - "bytes32[3][2]", - nil, - [][]common.Hash{{{1}, {2}, {3}}, {{3}, {4}, {5}}}, - common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0] - "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1] - "0300000000000000000000000000000000000000000000000000000000000000" + // array[0][2] - "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0] - "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1] - "0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2] - }, - { - // static tuple - "tuple", - []ArgumentMarshaling{ - {Name: "a", Type: "int64"}, - {Name: "b", Type: "int256"}, - {Name: "c", Type: "int256"}, - {Name: "d", Type: "bool"}, - {Name: "e", Type: "bytes32[3][2]"}, - }, - struct { - A int64 - B *big.Int - C *big.Int - D bool - E [][]common.Hash - }{1, big.NewInt(1), big.NewInt(-1), true, [][]common.Hash{{{1}, {2}, {3}}, {{3}, {4}, {5}}}}, - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001" + // struct[a] - "0000000000000000000000000000000000000000000000000000000000000001" + // struct[b] - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // struct[c] - "0000000000000000000000000000000000000000000000000000000000000001" + // struct[d] - "0100000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][0] - "0200000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][1] - "0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][2] - "0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][0] - "0400000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][1] - "0500000000000000000000000000000000000000000000000000000000000000"), // struct[e] array[1][2] - }, - { - // dynamic tuple - "tuple", - []ArgumentMarshaling{ - {Name: "a", Type: "string"}, - {Name: "b", Type: "int64"}, - {Name: "c", Type: "bytes"}, - {Name: "d", Type: "string[]"}, - {Name: "e", Type: "int256[]"}, - {Name: "f", Type: "address[]"}, - }, - struct { - FieldA string `abi:"a"` // Test whether abi tag works - FieldB int64 `abi:"b"` - C []byte - D []string - E []*big.Int - F []common.Address - }{"foobar", 1, []byte{1}, []string{"foo", "bar"}, []*big.Int{big.NewInt(1), big.NewInt(-1)}, []common.Address{{1}, {2}}}, - common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000000000c0" + // struct[a] offset - "0000000000000000000000000000000000000000000000000000000000000001" + // struct[b] - "0000000000000000000000000000000000000000000000000000000000000100" + // struct[c] offset - "0000000000000000000000000000000000000000000000000000000000000140" + // struct[d] offset - "0000000000000000000000000000000000000000000000000000000000000220" + // struct[e] offset - "0000000000000000000000000000000000000000000000000000000000000280" + // struct[f] offset - "0000000000000000000000000000000000000000000000000000000000000006" + // struct[a] length - "666f6f6261720000000000000000000000000000000000000000000000000000" + // struct[a] "foobar" - "0000000000000000000000000000000000000000000000000000000000000001" + // struct[c] length - "0100000000000000000000000000000000000000000000000000000000000000" + // []byte{1} - "0000000000000000000000000000000000000000000000000000000000000002" + // struct[d] length - "0000000000000000000000000000000000000000000000000000000000000040" + // foo offset - "0000000000000000000000000000000000000000000000000000000000000080" + // bar offset - "0000000000000000000000000000000000000000000000000000000000000003" + // foo length - "666f6f0000000000000000000000000000000000000000000000000000000000" + // foo - "0000000000000000000000000000000000000000000000000000000000000003" + // bar offset - "6261720000000000000000000000000000000000000000000000000000000000" + // bar - "0000000000000000000000000000000000000000000000000000000000000002" + // struct[e] length - "0000000000000000000000000000000000000000000000000000000000000001" + // 1 - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // -1 - "0000000000000000000000000000000000000000000000000000000000000002" + // struct[f] length - "0000000000000000000000000100000000000000000000000000000000000000" + // common.Address{1} - "0000000000000000000000000200000000000000000000000000000000000000"), // common.Address{2} - }, - { - // nested tuple - "tuple", - []ArgumentMarshaling{ - {Name: "a", Type: "tuple", Components: []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256[]"}}}, - {Name: "b", Type: "int256[]"}, - }, - struct { - A struct { - FieldA *big.Int `abi:"a"` - B []*big.Int - } - B []*big.Int - }{ - A: struct { - FieldA *big.Int `abi:"a"` // Test whether abi tag works for nested tuple - B []*big.Int - }{big.NewInt(1), []*big.Int{big.NewInt(1), big.NewInt(0)}}, - B: []*big.Int{big.NewInt(1), big.NewInt(0)}}, - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // a offset - "00000000000000000000000000000000000000000000000000000000000000e0" + // b offset - "0000000000000000000000000000000000000000000000000000000000000001" + // a.a value - "0000000000000000000000000000000000000000000000000000000000000040" + // a.b offset - "0000000000000000000000000000000000000000000000000000000000000002" + // a.b length - "0000000000000000000000000000000000000000000000000000000000000001" + // a.b[0] value - "0000000000000000000000000000000000000000000000000000000000000000" + // a.b[1] value - "0000000000000000000000000000000000000000000000000000000000000002" + // b length - "0000000000000000000000000000000000000000000000000000000000000001" + // b[0] value - "0000000000000000000000000000000000000000000000000000000000000000"), // b[1] value - }, - { - // tuple slice - "tuple[]", - []ArgumentMarshaling{ - {Name: "a", Type: "int256"}, - {Name: "b", Type: "int256[]"}, - }, - []struct { - A *big.Int - B []*big.Int - }{ - {big.NewInt(-1), []*big.Int{big.NewInt(1), big.NewInt(0)}}, - {big.NewInt(1), []*big.Int{big.NewInt(2), big.NewInt(-1)}}, - }, - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // tuple length - "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset - "00000000000000000000000000000000000000000000000000000000000000e0" + // tuple[1] offset - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A - "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0].B offset - "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].B length - "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].B[0] value - "0000000000000000000000000000000000000000000000000000000000000000" + // tuple[0].B[1] value - "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A - "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[1].B offset - "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B length - "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B[0] value - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].B[1] value - }, - { - // static tuple array - "tuple[2]", - []ArgumentMarshaling{ - {Name: "a", Type: "int256"}, - {Name: "b", Type: "int256"}, - }, - [2]struct { - A *big.Int - B *big.Int - }{ - {big.NewInt(-1), big.NewInt(1)}, - {big.NewInt(1), big.NewInt(-1)}, - }, - common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].a - "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].b - "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].a - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].b - }, - { - // dynamic tuple array - "tuple[2]", - []ArgumentMarshaling{ - {Name: "a", Type: "int256[]"}, - }, - [2]struct { - A []*big.Int - }{ - {[]*big.Int{big.NewInt(-1), big.NewInt(1)}}, - {[]*big.Int{big.NewInt(1), big.NewInt(-1)}}, - }, - common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset - "00000000000000000000000000000000000000000000000000000000000000c0" + // tuple[1] offset - "0000000000000000000000000000000000000000000000000000000000000020" + // tuple[0].A offset - "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].A length - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A[0] - "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].A[1] - "0000000000000000000000000000000000000000000000000000000000000020" + // tuple[1].A offset - "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].A length - "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A[0] - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].A[1] - }, - } { - typ, err := NewType(test.typ, "", test.components) - if err != nil { - t.Fatalf("%v failed. Unexpected parse error: %v", i, err) - } - output, err := typ.pack(reflect.ValueOf(test.input)) - if err != nil { - t.Fatalf("%v failed. Unexpected pack error: %v", i, err) - } - - if !bytes.Equal(output, test.output) { - t.Errorf("input %d for typ: %v failed. Expected bytes: '%x' Got: '%x'", i, typ.String(), test.output, output) - } + for i, test := range packUnpackTests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + encb, err := hex.DecodeString(test.packed) + if err != nil { + t.Fatalf("invalid hex %s: %v", test.packed, err) + } + inDef := fmt.Sprintf(`[{ "name" : "method", "type": "function", "inputs": %s}]`, test.def) + inAbi, err := JSON(strings.NewReader(inDef)) + if err != nil { + t.Fatalf("invalid ABI definition %s, %v", inDef, err) + } + var packed []byte + packed, err = inAbi.Pack("method", test.unpacked) + + if err != nil { + t.Fatalf("test %d (%v) failed: %v", i, test.def, err) + } + if !reflect.DeepEqual(packed[4:], encb) { + t.Errorf("test %d (%v) failed: expected %v, got %v", i, test.def, encb, packed[4:]) + } + }) } } func TestMethodPack(t *testing.T) { - abi, err := JSON(strings.NewReader(jsondata2)) + abi, err := JSON(strings.NewReader(jsondata)) if err != nil { t.Fatal(err) } - sig := abi.Methods["slice"].ID() + sig := abi.Methods["slice"].ID sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...) sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...) @@ -648,7 +76,7 @@ func TestMethodPack(t *testing.T) { } var addrA, addrB = common.Address{1}, common.Address{2} - sig = abi.Methods["sliceAddress"].ID() + sig = abi.Methods["sliceAddress"].ID sig = append(sig, common.LeftPadBytes([]byte{32}, 32)...) sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...) sig = append(sig, common.LeftPadBytes(addrA[:], 32)...) @@ -663,7 +91,7 @@ func TestMethodPack(t *testing.T) { } var addrC, addrD = common.Address{3}, common.Address{4} - sig = abi.Methods["sliceMultiAddress"].ID() + sig = abi.Methods["sliceMultiAddress"].ID sig = append(sig, common.LeftPadBytes([]byte{64}, 32)...) sig = append(sig, common.LeftPadBytes([]byte{160}, 32)...) sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...) @@ -681,7 +109,7 @@ func TestMethodPack(t *testing.T) { t.Errorf("expected %x got %x", sig, packed) } - sig = abi.Methods["slice256"].ID() + sig = abi.Methods["slice256"].ID sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...) sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...) @@ -695,7 +123,7 @@ func TestMethodPack(t *testing.T) { } a := [2][2]*big.Int{{big.NewInt(1), big.NewInt(1)}, {big.NewInt(2), big.NewInt(0)}} - sig = abi.Methods["nestedArray"].ID() + sig = abi.Methods["nestedArray"].ID sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...) sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...) sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...) @@ -712,7 +140,7 @@ func TestMethodPack(t *testing.T) { t.Errorf("expected %x got %x", sig, packed) } - sig = abi.Methods["nestedArray2"].ID() + sig = abi.Methods["nestedArray2"].ID sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...) sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...) sig = append(sig, common.LeftPadBytes([]byte{0x80}, 32)...) @@ -728,7 +156,7 @@ func TestMethodPack(t *testing.T) { t.Errorf("expected %x got %x", sig, packed) } - sig = abi.Methods["nestedSlice"].ID() + sig = abi.Methods["nestedSlice"].ID sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...) sig = append(sig, common.LeftPadBytes([]byte{0x02}, 32)...) sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...) diff --git a/accounts/abi/packing_test.go b/accounts/abi/packing_test.go new file mode 100644 index 000000000000..eae3b0df2056 --- /dev/null +++ b/accounts/abi/packing_test.go @@ -0,0 +1,990 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package abi + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" +) + +type packUnpackTest struct { + def string + unpacked interface{} + packed string +} + +var packUnpackTests = []packUnpackTest{ + // Booleans + { + def: `[{ "type": "bool" }]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001", + unpacked: true, + }, + { + def: `[{ "type": "bool" }]`, + packed: "0000000000000000000000000000000000000000000000000000000000000000", + unpacked: false, + }, + // Integers + { + def: `[{ "type": "uint8" }]`, + unpacked: uint8(2), + packed: "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{ "type": "uint8[]" }]`, + unpacked: []uint8{1, 2}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{ "type": "uint16" }]`, + unpacked: uint16(2), + packed: "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{ "type": "uint16[]" }]`, + unpacked: []uint16{1, 2}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "uint17"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001", + unpacked: big.NewInt(1), + }, + { + def: `[{"type": "uint32"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001", + unpacked: uint32(1), + }, + { + def: `[{"type": "uint32[]"}]`, + unpacked: []uint32{1, 2}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "uint64"}]`, + unpacked: uint64(2), + packed: "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "uint64[]"}]`, + unpacked: []uint64{1, 2}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "uint256"}]`, + unpacked: big.NewInt(2), + packed: "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "uint256[]"}]`, + unpacked: []*big.Int{big.NewInt(1), big.NewInt(2)}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "int8"}]`, + unpacked: int8(2), + packed: "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "int8[]"}]`, + unpacked: []int8{1, 2}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "int16"}]`, + unpacked: int16(2), + packed: "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "int16[]"}]`, + unpacked: []int16{1, 2}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "int17"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001", + unpacked: big.NewInt(1), + }, + { + def: `[{"type": "int32"}]`, + unpacked: int32(2), + packed: "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "int32"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001", + unpacked: int32(1), + }, + { + def: `[{"type": "int32[]"}]`, + unpacked: []int32{1, 2}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "int64"}]`, + unpacked: int64(2), + packed: "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "int64[]"}]`, + unpacked: []int64{1, 2}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "int256"}]`, + unpacked: big.NewInt(2), + packed: "0000000000000000000000000000000000000000000000000000000000000002", + }, + { + def: `[{"type": "int256"}]`, + packed: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + unpacked: big.NewInt(-1), + }, + { + def: `[{"type": "int256[]"}]`, + unpacked: []*big.Int{big.NewInt(1), big.NewInt(2)}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + }, + // Address + { + def: `[{"type": "address"}]`, + packed: "0000000000000000000000000100000000000000000000000000000000000000", + unpacked: common.Address{1}, + }, + { + def: `[{"type": "address[]"}]`, + unpacked: []common.Address{{1}, {2}}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000100000000000000000000000000000000000000" + + "0000000000000000000000000200000000000000000000000000000000000000", + }, + // Bytes + { + def: `[{"type": "bytes1"}]`, + unpacked: [1]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes2"}]`, + unpacked: [2]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes3"}]`, + unpacked: [3]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes4"}]`, + unpacked: [4]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes5"}]`, + unpacked: [5]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes6"}]`, + unpacked: [6]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes7"}]`, + unpacked: [7]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes8"}]`, + unpacked: [8]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes9"}]`, + unpacked: [9]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes10"}]`, + unpacked: [10]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes11"}]`, + unpacked: [11]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes12"}]`, + unpacked: [12]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes13"}]`, + unpacked: [13]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes14"}]`, + unpacked: [14]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes15"}]`, + unpacked: [15]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes16"}]`, + unpacked: [16]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes17"}]`, + unpacked: [17]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes18"}]`, + unpacked: [18]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes19"}]`, + unpacked: [19]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes20"}]`, + unpacked: [20]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes21"}]`, + unpacked: [21]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes22"}]`, + unpacked: [22]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes23"}]`, + unpacked: [23]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes24"}]`, + unpacked: [24]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes25"}]`, + unpacked: [25]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes26"}]`, + unpacked: [26]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes27"}]`, + unpacked: [27]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes28"}]`, + unpacked: [28]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes29"}]`, + unpacked: [29]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes30"}]`, + unpacked: [30]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes31"}]`, + unpacked: [31]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes32"}]`, + unpacked: [32]byte{1}, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "bytes32"}]`, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + unpacked: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + { + def: `[{"type": "bytes"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000020" + + "0100000000000000000000000000000000000000000000000000000000000000", + unpacked: common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), + }, + { + def: `[{"type": "bytes32"}]`, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + unpacked: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + // Functions + { + def: `[{"type": "function"}]`, + packed: "0100000000000000000000000000000000000000000000000000000000000000", + unpacked: [24]byte{1}, + }, + // Slice and Array + { + def: `[{"type": "uint8[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: []uint8{1, 2}, + }, + { + def: `[{"type": "uint8[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000000", + unpacked: []uint8{}, + }, + { + def: `[{"type": "uint256[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000000", + unpacked: []*big.Int{}, + }, + { + def: `[{"type": "uint8[2]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [2]uint8{1, 2}, + }, + { + def: `[{"type": "int8[2]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [2]int8{1, 2}, + }, + { + def: `[{"type": "int16[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: []int16{1, 2}, + }, + { + def: `[{"type": "int16[2]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [2]int16{1, 2}, + }, + { + def: `[{"type": "int32[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: []int32{1, 2}, + }, + { + def: `[{"type": "int32[2]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [2]int32{1, 2}, + }, + { + def: `[{"type": "int64[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: []int64{1, 2}, + }, + { + def: `[{"type": "int64[2]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [2]int64{1, 2}, + }, + { + def: `[{"type": "int256[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: []*big.Int{big.NewInt(1), big.NewInt(2)}, + }, + { + def: `[{"type": "int256[3]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000003", + unpacked: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}, + }, + // multi dimensional, if these pass, all types that don't require length prefix should pass + { + def: `[{"type": "uint8[][]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000000", + unpacked: [][]uint8{}, + }, + { + def: `[{"type": "uint8[][]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000040" + + "00000000000000000000000000000000000000000000000000000000000000a0" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [][]uint8{{1, 2}, {1, 2}}, + }, + { + def: `[{"type": "uint8[][]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000040" + + "00000000000000000000000000000000000000000000000000000000000000a0" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000003" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000003", + unpacked: [][]uint8{{1, 2}, {1, 2, 3}}, + }, + { + def: `[{"type": "uint8[2][2]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [2][2]uint8{{1, 2}, {1, 2}}, + }, + { + def: `[{"type": "uint8[][2]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000040" + + "0000000000000000000000000000000000000000000000000000000000000060" + + "0000000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000000", + unpacked: [2][]uint8{{}, {}}, + }, + { + def: `[{"type": "uint8[][2]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000040" + + "0000000000000000000000000000000000000000000000000000000000000080" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000001", + unpacked: [2][]uint8{{1}, {1}}, + }, + { + def: `[{"type": "uint8[2][]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000000", + unpacked: [][2]uint8{}, + }, + { + def: `[{"type": "uint8[2][]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [][2]uint8{{1, 2}}, + }, + { + def: `[{"type": "uint8[2][]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [][2]uint8{{1, 2}, {1, 2}}, + }, + { + def: `[{"type": "uint16[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: []uint16{1, 2}, + }, + { + def: `[{"type": "uint16[2]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [2]uint16{1, 2}, + }, + { + def: `[{"type": "uint32[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: []uint32{1, 2}, + }, + { + def: `[{"type": "uint32[2][3][4]"}]`, + unpacked: [4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}}, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000003" + + "0000000000000000000000000000000000000000000000000000000000000004" + + "0000000000000000000000000000000000000000000000000000000000000005" + + "0000000000000000000000000000000000000000000000000000000000000006" + + "0000000000000000000000000000000000000000000000000000000000000007" + + "0000000000000000000000000000000000000000000000000000000000000008" + + "0000000000000000000000000000000000000000000000000000000000000009" + + "000000000000000000000000000000000000000000000000000000000000000a" + + "000000000000000000000000000000000000000000000000000000000000000b" + + "000000000000000000000000000000000000000000000000000000000000000c" + + "000000000000000000000000000000000000000000000000000000000000000d" + + "000000000000000000000000000000000000000000000000000000000000000e" + + "000000000000000000000000000000000000000000000000000000000000000f" + + "0000000000000000000000000000000000000000000000000000000000000010" + + "0000000000000000000000000000000000000000000000000000000000000011" + + "0000000000000000000000000000000000000000000000000000000000000012" + + "0000000000000000000000000000000000000000000000000000000000000013" + + "0000000000000000000000000000000000000000000000000000000000000014" + + "0000000000000000000000000000000000000000000000000000000000000015" + + "0000000000000000000000000000000000000000000000000000000000000016" + + "0000000000000000000000000000000000000000000000000000000000000017" + + "0000000000000000000000000000000000000000000000000000000000000018", + }, + + { + def: `[{"type": "bytes32[]"}]`, + unpacked: [][32]byte{{1}, {2}}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0100000000000000000000000000000000000000000000000000000000000000" + + "0200000000000000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "uint32[2]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [2]uint32{1, 2}, + }, + { + def: `[{"type": "uint64[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: []uint64{1, 2}, + }, + { + def: `[{"type": "uint64[2]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: [2]uint64{1, 2}, + }, + { + def: `[{"type": "uint256[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: []*big.Int{big.NewInt(1), big.NewInt(2)}, + }, + { + def: `[{"type": "uint256[3]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000003", + unpacked: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}, + }, + { + def: `[{"type": "string[4]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000080" + + "00000000000000000000000000000000000000000000000000000000000000c0" + + "0000000000000000000000000000000000000000000000000000000000000100" + + "0000000000000000000000000000000000000000000000000000000000000140" + + "0000000000000000000000000000000000000000000000000000000000000005" + + "48656c6c6f000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000005" + + "576f726c64000000000000000000000000000000000000000000000000000000" + + "000000000000000000000000000000000000000000000000000000000000000b" + + "476f2d657468657265756d000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000008" + + "457468657265756d000000000000000000000000000000000000000000000000", + unpacked: [4]string{"Hello", "World", "Go-ethereum", "Ethereum"}, + }, + { + def: `[{"type": "string[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000040" + + "0000000000000000000000000000000000000000000000000000000000000080" + + "0000000000000000000000000000000000000000000000000000000000000008" + + "457468657265756d000000000000000000000000000000000000000000000000" + + "000000000000000000000000000000000000000000000000000000000000000b" + + "676f2d657468657265756d000000000000000000000000000000000000000000", + unpacked: []string{"Ethereum", "go-ethereum"}, + }, + { + def: `[{"type": "bytes[]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000040" + + "0000000000000000000000000000000000000000000000000000000000000080" + + "0000000000000000000000000000000000000000000000000000000000000003" + + "f0f0f00000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000003" + + "f0f0f00000000000000000000000000000000000000000000000000000000000", + unpacked: [][]byte{{0xf0, 0xf0, 0xf0}, {0xf0, 0xf0, 0xf0}}, + }, + { + def: `[{"type": "uint256[2][][]"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000040" + + "00000000000000000000000000000000000000000000000000000000000000e0" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "00000000000000000000000000000000000000000000000000000000000000c8" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "00000000000000000000000000000000000000000000000000000000000003e8" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "00000000000000000000000000000000000000000000000000000000000000c8" + + "0000000000000000000000000000000000000000000000000000000000000001" + + "00000000000000000000000000000000000000000000000000000000000003e8", + unpacked: [][][2]*big.Int{{{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}, {{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}}, + }, + // struct outputs + { + def: `[{"components": [{"name":"int1","type":"int256"},{"name":"int2","type":"int256"}], "type":"tuple"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: struct { + Int1 *big.Int + Int2 *big.Int + }{big.NewInt(1), big.NewInt(2)}, + }, + { + def: `[{"components": [{"name":"int_one","type":"int256"}], "type":"tuple"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001", + unpacked: struct { + IntOne *big.Int + }{big.NewInt(1)}, + }, + { + def: `[{"components": [{"name":"int__one","type":"int256"}], "type":"tuple"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001", + unpacked: struct { + IntOne *big.Int + }{big.NewInt(1)}, + }, + { + def: `[{"components": [{"name":"int_one_","type":"int256"}], "type":"tuple"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001", + unpacked: struct { + IntOne *big.Int + }{big.NewInt(1)}, + }, + { + def: `[{"components": [{"name":"int_one","type":"int256"}, {"name":"intone","type":"int256"}], "type":"tuple"}]`, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + + "0000000000000000000000000000000000000000000000000000000000000002", + unpacked: struct { + IntOne *big.Int + Intone *big.Int + }{big.NewInt(1), big.NewInt(2)}, + }, + { + def: `[{"type": "string"}]`, + unpacked: "foobar", + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000006" + + "666f6f6261720000000000000000000000000000000000000000000000000000", + }, + { + def: `[{"type": "string[]"}]`, + unpacked: []string{"hello", "foobar"}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2 + "0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0 + "0000000000000000000000000000000000000000000000000000000000000080" + // offset 128 to i = 1 + "0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5 + "68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0] + "0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6 + "666f6f6261720000000000000000000000000000000000000000000000000000", // str[1] + }, + { + def: `[{"type": "string[2]"}]`, + unpacked: [2]string{"hello", "foobar"}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000040" + // offset to i = 0 + "0000000000000000000000000000000000000000000000000000000000000080" + // offset to i = 1 + "0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5 + "68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0] + "0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6 + "666f6f6261720000000000000000000000000000000000000000000000000000", // str[1] + }, + { + def: `[{"type": "bytes32[][]"}]`, + unpacked: [][][32]byte{{{1}, {2}}, {{3}, {4}, {5}}}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2 + "0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0 + "00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1 + "0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2 + "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0] + "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1] + "0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3 + "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0] + "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1] + "0500000000000000000000000000000000000000000000000000000000000000", // array[1][2] + }, + { + def: `[{"type": "bytes32[][2]"}]`, + unpacked: [2][][32]byte{{{1}, {2}}, {{3}, {4}, {5}}}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0 + "00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1 + "0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2 + "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0] + "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1] + "0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3 + "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0] + "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1] + "0500000000000000000000000000000000000000000000000000000000000000", // array[1][2] + }, + { + def: `[{"type": "bytes32[3][2]"}]`, + unpacked: [2][3][32]byte{{{1}, {2}, {3}}, {{3}, {4}, {5}}}, + packed: "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0] + "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1] + "0300000000000000000000000000000000000000000000000000000000000000" + // array[0][2] + "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0] + "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1] + "0500000000000000000000000000000000000000000000000000000000000000", // array[1][2] + }, + { + // static tuple + def: `[{"components": [{"name":"a","type":"int64"}, + {"name":"b","type":"int256"}, + {"name":"c","type":"int256"}, + {"name":"d","type":"bool"}, + {"name":"e","type":"bytes32[3][2]"}], "type":"tuple"}]`, + unpacked: struct { + A int64 + B *big.Int + C *big.Int + D bool + E [2][3][32]byte + }{1, big.NewInt(1), big.NewInt(-1), true, [2][3][32]byte{{{1}, {2}, {3}}, {{3}, {4}, {5}}}}, + packed: "0000000000000000000000000000000000000000000000000000000000000001" + // struct[a] + "0000000000000000000000000000000000000000000000000000000000000001" + // struct[b] + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // struct[c] + "0000000000000000000000000000000000000000000000000000000000000001" + // struct[d] + "0100000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][0] + "0200000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][1] + "0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][2] + "0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][0] + "0400000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][1] + "0500000000000000000000000000000000000000000000000000000000000000", // struct[e] array[1][2] + }, + { + def: `[{"components": [{"name":"a","type":"string"}, + {"name":"b","type":"int64"}, + {"name":"c","type":"bytes"}, + {"name":"d","type":"string[]"}, + {"name":"e","type":"int256[]"}, + {"name":"f","type":"address[]"}], "type":"tuple"}]`, + unpacked: struct { + A string + B int64 + C []byte + D []string + E []*big.Int + F []common.Address + }{"foobar", 1, []byte{1}, []string{"foo", "bar"}, []*big.Int{big.NewInt(1), big.NewInt(-1)}, []common.Address{{1}, {2}}}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + // struct a + "00000000000000000000000000000000000000000000000000000000000000c0" + // struct[a] offset + "0000000000000000000000000000000000000000000000000000000000000001" + // struct[b] + "0000000000000000000000000000000000000000000000000000000000000100" + // struct[c] offset + "0000000000000000000000000000000000000000000000000000000000000140" + // struct[d] offset + "0000000000000000000000000000000000000000000000000000000000000220" + // struct[e] offset + "0000000000000000000000000000000000000000000000000000000000000280" + // struct[f] offset + "0000000000000000000000000000000000000000000000000000000000000006" + // struct[a] length + "666f6f6261720000000000000000000000000000000000000000000000000000" + // struct[a] "foobar" + "0000000000000000000000000000000000000000000000000000000000000001" + // struct[c] length + "0100000000000000000000000000000000000000000000000000000000000000" + // []byte{1} + "0000000000000000000000000000000000000000000000000000000000000002" + // struct[d] length + "0000000000000000000000000000000000000000000000000000000000000040" + // foo offset + "0000000000000000000000000000000000000000000000000000000000000080" + // bar offset + "0000000000000000000000000000000000000000000000000000000000000003" + // foo length + "666f6f0000000000000000000000000000000000000000000000000000000000" + // foo + "0000000000000000000000000000000000000000000000000000000000000003" + // bar offset + "6261720000000000000000000000000000000000000000000000000000000000" + // bar + "0000000000000000000000000000000000000000000000000000000000000002" + // struct[e] length + "0000000000000000000000000000000000000000000000000000000000000001" + // 1 + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // -1 + "0000000000000000000000000000000000000000000000000000000000000002" + // struct[f] length + "0000000000000000000000000100000000000000000000000000000000000000" + // common.Address{1} + "0000000000000000000000000200000000000000000000000000000000000000", // common.Address{2} + }, + { + def: `[{"components": [{ "type": "tuple","components": [{"name": "a","type": "uint256"}, + {"name": "b","type": "uint256[]"}], + "name": "a","type": "tuple"}, + {"name": "b","type": "uint256[]"}], "type": "tuple"}]`, + unpacked: struct { + A struct { + A *big.Int + B []*big.Int + } + B []*big.Int + }{ + A: struct { + A *big.Int + B []*big.Int + }{big.NewInt(1), []*big.Int{big.NewInt(1), big.NewInt(2)}}, + B: []*big.Int{big.NewInt(1), big.NewInt(2)}}, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + // struct a + "0000000000000000000000000000000000000000000000000000000000000040" + // a offset + "00000000000000000000000000000000000000000000000000000000000000e0" + // b offset + "0000000000000000000000000000000000000000000000000000000000000001" + // a.a value + "0000000000000000000000000000000000000000000000000000000000000040" + // a.b offset + "0000000000000000000000000000000000000000000000000000000000000002" + // a.b length + "0000000000000000000000000000000000000000000000000000000000000001" + // a.b[0] value + "0000000000000000000000000000000000000000000000000000000000000002" + // a.b[1] value + "0000000000000000000000000000000000000000000000000000000000000002" + // b length + "0000000000000000000000000000000000000000000000000000000000000001" + // b[0] value + "0000000000000000000000000000000000000000000000000000000000000002", // b[1] value + }, + + { + def: `[{"components": [{"name": "a","type": "int256"}, + {"name": "b","type": "int256[]"}], + "name": "a","type": "tuple[]"}]`, + unpacked: []struct { + A *big.Int + B []*big.Int + }{ + {big.NewInt(-1), []*big.Int{big.NewInt(1), big.NewInt(3)}}, + {big.NewInt(1), []*big.Int{big.NewInt(2), big.NewInt(-1)}}, + }, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + // tuple length + "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset + "00000000000000000000000000000000000000000000000000000000000000e0" + // tuple[1] offset + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A + "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0].B offset + "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].B length + "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].B[0] value + "0000000000000000000000000000000000000000000000000000000000000003" + // tuple[0].B[1] value + "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A + "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[1].B offset + "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B length + "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B[0] value + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // tuple[1].B[1] value + }, + { + def: `[{"components": [{"name": "a","type": "int256"}, + {"name": "b","type": "int256"}], + "name": "a","type": "tuple[2]"}]`, + unpacked: [2]struct { + A *big.Int + B *big.Int + }{ + {big.NewInt(-1), big.NewInt(1)}, + {big.NewInt(1), big.NewInt(-1)}, + }, + packed: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].a + "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].b + "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].a + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // tuple[1].b + }, + { + def: `[{"components": [{"name": "a","type": "int256[]"}], + "name": "a","type": "tuple[2]"}]`, + unpacked: [2]struct { + A []*big.Int + }{ + {[]*big.Int{big.NewInt(-1), big.NewInt(1)}}, + {[]*big.Int{big.NewInt(1), big.NewInt(-1)}}, + }, + packed: "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset + "00000000000000000000000000000000000000000000000000000000000000c0" + // tuple[1] offset + "0000000000000000000000000000000000000000000000000000000000000020" + // tuple[0].A offset + "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].A length + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A[0] + "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].A[1] + "0000000000000000000000000000000000000000000000000000000000000020" + // tuple[1].A offset + "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].A length + "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A[0] + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // tuple[1].A[1] + }, +} diff --git a/accounts/abi/reflect.go b/accounts/abi/reflect.go index 73ca8fa2bd7d..35e5556d2c5a 100644 --- a/accounts/abi/reflect.go +++ b/accounts/abi/reflect.go @@ -17,57 +17,74 @@ package abi import ( + "errors" "fmt" + "math/big" "reflect" "strings" ) -// indirect recursively dereferences the value until it either gets the value -// or finds a big.Int -func indirect(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Ptr && v.Elem().Type() != derefbigT { - return indirect(v.Elem()) +// ConvertType converts an interface of a runtime type into a interface of the +// given type +// e.g. turn +// var fields []reflect.StructField +// fields = append(fields, reflect.StructField{ +// Name: "X", +// Type: reflect.TypeOf(new(big.Int)), +// Tag: reflect.StructTag("json:\"" + "x" + "\""), +// } +// into +// type TupleT struct { X *big.Int } +func ConvertType(in interface{}, proto interface{}) interface{} { + protoType := reflect.TypeOf(proto) + if reflect.TypeOf(in).ConvertibleTo(protoType) { + return reflect.ValueOf(in).Convert(protoType).Interface() } - return v + // Use set as a last ditch effort + if err := set(reflect.ValueOf(proto), reflect.ValueOf(in)); err != nil { + panic(err) + } + return proto } -// indirectInterfaceOrPtr recursively dereferences the value until value is not interface. -func indirectInterfaceOrPtr(v reflect.Value) reflect.Value { - if (v.Kind() == reflect.Interface || v.Kind() == reflect.Ptr) && v.Elem().IsValid() { +// indirect recursively dereferences the value until it either gets the value +// or finds a big.Int +func indirect(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Ptr && v.Elem().Type() != reflect.TypeOf(big.Int{}) { return indirect(v.Elem()) } return v } -// reflectIntKind returns the reflect using the given size and +// reflectIntType returns the reflect using the given size and // unsignedness. -func reflectIntKindAndType(unsigned bool, size int) (reflect.Kind, reflect.Type) { +func reflectIntType(unsigned bool, size int) reflect.Type { + if unsigned { + switch size { + case 8: + return reflect.TypeOf(uint8(0)) + case 16: + return reflect.TypeOf(uint16(0)) + case 32: + return reflect.TypeOf(uint32(0)) + case 64: + return reflect.TypeOf(uint64(0)) + } + } switch size { case 8: - if unsigned { - return reflect.Uint8, uint8T - } - return reflect.Int8, int8T + return reflect.TypeOf(int8(0)) case 16: - if unsigned { - return reflect.Uint16, uint16T - } - return reflect.Int16, int16T + return reflect.TypeOf(int16(0)) case 32: - if unsigned { - return reflect.Uint32, uint32T - } - return reflect.Int32, int32T + return reflect.TypeOf(int32(0)) case 64: - if unsigned { - return reflect.Uint64, uint64T - } - return reflect.Int64, int64T + return reflect.TypeOf(int64(0)) } - return reflect.Ptr, bigT + return reflect.TypeOf(&big.Int{}) } -// mustArrayToBytesSlice creates a new byte slice with the exact same size as value +// mustArrayToByteSlice creates a new byte slice with the exact same size as value // and copies the bytes in value to the new slice. func mustArrayToByteSlice(value reflect.Value) reflect.Value { slice := reflect.MakeSlice(reflect.TypeOf([]byte{}), value.Len(), value.Len()) @@ -84,12 +101,16 @@ func set(dst, src reflect.Value) error { switch { case dstType.Kind() == reflect.Interface && dst.Elem().IsValid(): return set(dst.Elem(), src) - case dstType.Kind() == reflect.Ptr && dstType.Elem() != derefbigT: + case dstType.Kind() == reflect.Ptr && dstType.Elem() != reflect.TypeOf(big.Int{}): return set(dst.Elem(), src) case srcType.AssignableTo(dstType) && dst.CanSet(): dst.Set(src) - case dstType.Kind() == reflect.Slice && srcType.Kind() == reflect.Slice: + case dstType.Kind() == reflect.Slice && srcType.Kind() == reflect.Slice && dst.CanSet(): return setSlice(dst, src) + case dstType.Kind() == reflect.Array: + return setArray(dst, src) + case dstType.Kind() == reflect.Struct: + return setStruct(dst, src) default: return fmt.Errorf("abi: cannot unmarshal %v in to %v", src.Type(), dst.Type()) } @@ -98,38 +119,52 @@ func set(dst, src reflect.Value) error { // setSlice attempts to assign src to dst when slices are not assignable by default // e.g. src: [][]byte -> dst: [][15]byte +// setSlice ignores if we cannot copy all of src' elements. func setSlice(dst, src reflect.Value) error { slice := reflect.MakeSlice(dst.Type(), src.Len(), src.Len()) for i := 0; i < src.Len(); i++ { - v := src.Index(i) - reflect.Copy(slice.Index(i), v) + if err := set(slice.Index(i), src.Index(i)); err != nil { + return err + } } - - dst.Set(slice) - return nil + if dst.CanSet() { + dst.Set(slice) + return nil + } + return errors.New("Cannot set slice, destination not settable") } -// requireAssignable assures that `dest` is a pointer and it's not an interface. -func requireAssignable(dst, src reflect.Value) error { - if dst.Kind() != reflect.Ptr && dst.Kind() != reflect.Interface { - return fmt.Errorf("abi: cannot unmarshal %v into %v", src.Type(), dst.Type()) +func setArray(dst, src reflect.Value) error { + if src.Kind() == reflect.Ptr { + return set(dst, indirect(src)) } - return nil + array := reflect.New(dst.Type()).Elem() + min := src.Len() + if src.Len() > dst.Len() { + min = dst.Len() + } + for i := 0; i < min; i++ { + if err := set(array.Index(i), src.Index(i)); err != nil { + return err + } + } + if dst.CanSet() { + dst.Set(array) + return nil + } + return errors.New("Cannot set array, destination not settable") } -// requireUnpackKind verifies preconditions for unpacking `args` into `kind` -func requireUnpackKind(v reflect.Value, t reflect.Type, k reflect.Kind, - args Arguments) error { - - switch k { - case reflect.Struct: - case reflect.Slice, reflect.Array: - if minLen := args.LengthNonIndexed(); v.Len() < minLen { - return fmt.Errorf("abi: insufficient number of elements in the list/array for unpack, want %d, got %d", - minLen, v.Len()) +func setStruct(dst, src reflect.Value) error { + for i := 0; i < src.NumField(); i++ { + srcField := src.Field(i) + dstField := dst.Field(i) + if !dstField.IsValid() || !srcField.IsValid() { + return fmt.Errorf("Could not find src field: %v value: %v in destination", srcField.Type().Name(), srcField) + } + if err := set(dstField, srcField); err != nil { + return err } - default: - return fmt.Errorf("abi: cannot unmarshal tuple into %v", t) } return nil } @@ -156,9 +191,8 @@ func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[stri continue } // skip fields that have no abi:"" tag. - var ok bool - var tagName string - if tagName, ok = typ.Field(i).Tag.Lookup("abi"); !ok { + tagName, ok := typ.Field(i).Tag.Lookup("abi") + if !ok { continue } // check if tag is empty. diff --git a/accounts/abi/reflect_test.go b/accounts/abi/reflect_test.go index c425e6e54bff..cf13a79da84e 100644 --- a/accounts/abi/reflect_test.go +++ b/accounts/abi/reflect_test.go @@ -17,6 +17,7 @@ package abi import ( + "math/big" "reflect" "testing" ) @@ -189,3 +190,72 @@ func TestReflectNameToStruct(t *testing.T) { }) } } + +func TestConvertType(t *testing.T) { + // Test Basic Struct + type T struct { + X *big.Int + Y *big.Int + } + // Create on-the-fly structure + var fields []reflect.StructField + fields = append(fields, reflect.StructField{ + Name: "X", + Type: reflect.TypeOf(new(big.Int)), + Tag: "json:\"" + "x" + "\"", + }) + fields = append(fields, reflect.StructField{ + Name: "Y", + Type: reflect.TypeOf(new(big.Int)), + Tag: "json:\"" + "y" + "\"", + }) + val := reflect.New(reflect.StructOf(fields)) + val.Elem().Field(0).Set(reflect.ValueOf(big.NewInt(1))) + val.Elem().Field(1).Set(reflect.ValueOf(big.NewInt(2))) + // ConvertType + out := *ConvertType(val.Interface(), new(T)).(*T) + if out.X.Cmp(big.NewInt(1)) != 0 { + t.Errorf("ConvertType failed, got %v want %v", out.X, big.NewInt(1)) + } + if out.Y.Cmp(big.NewInt(2)) != 0 { + t.Errorf("ConvertType failed, got %v want %v", out.Y, big.NewInt(2)) + } + // Slice Type + val2 := reflect.MakeSlice(reflect.SliceOf(reflect.StructOf(fields)), 2, 2) + val2.Index(0).Field(0).Set(reflect.ValueOf(big.NewInt(1))) + val2.Index(0).Field(1).Set(reflect.ValueOf(big.NewInt(2))) + val2.Index(1).Field(0).Set(reflect.ValueOf(big.NewInt(3))) + val2.Index(1).Field(1).Set(reflect.ValueOf(big.NewInt(4))) + out2 := *ConvertType(val2.Interface(), new([]T)).(*[]T) + if out2[0].X.Cmp(big.NewInt(1)) != 0 { + t.Errorf("ConvertType failed, got %v want %v", out2[0].X, big.NewInt(1)) + } + if out2[0].Y.Cmp(big.NewInt(2)) != 0 { + t.Errorf("ConvertType failed, got %v want %v", out2[1].Y, big.NewInt(2)) + } + if out2[1].X.Cmp(big.NewInt(3)) != 0 { + t.Errorf("ConvertType failed, got %v want %v", out2[0].X, big.NewInt(1)) + } + if out2[1].Y.Cmp(big.NewInt(4)) != 0 { + t.Errorf("ConvertType failed, got %v want %v", out2[1].Y, big.NewInt(2)) + } + // Array Type + val3 := reflect.New(reflect.ArrayOf(2, reflect.StructOf(fields))) + val3.Elem().Index(0).Field(0).Set(reflect.ValueOf(big.NewInt(1))) + val3.Elem().Index(0).Field(1).Set(reflect.ValueOf(big.NewInt(2))) + val3.Elem().Index(1).Field(0).Set(reflect.ValueOf(big.NewInt(3))) + val3.Elem().Index(1).Field(1).Set(reflect.ValueOf(big.NewInt(4))) + out3 := *ConvertType(val3.Interface(), new([2]T)).(*[2]T) + if out3[0].X.Cmp(big.NewInt(1)) != 0 { + t.Errorf("ConvertType failed, got %v want %v", out3[0].X, big.NewInt(1)) + } + if out3[0].Y.Cmp(big.NewInt(2)) != 0 { + t.Errorf("ConvertType failed, got %v want %v", out3[1].Y, big.NewInt(2)) + } + if out3[1].X.Cmp(big.NewInt(3)) != 0 { + t.Errorf("ConvertType failed, got %v want %v", out3[0].X, big.NewInt(1)) + } + if out3[1].Y.Cmp(big.NewInt(4)) != 0 { + t.Errorf("ConvertType failed, got %v want %v", out3[1].Y, big.NewInt(2)) + } +} diff --git a/accounts/abi/topics.go b/accounts/abi/topics.go new file mode 100644 index 000000000000..360df7d5e846 --- /dev/null +++ b/accounts/abi/topics.go @@ -0,0 +1,173 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package abi + +import ( + "encoding/binary" + "errors" + "fmt" + "math/big" + "reflect" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +// MakeTopics converts a filter query argument list into a filter topic set. +func MakeTopics(query ...[]interface{}) ([][]common.Hash, error) { + topics := make([][]common.Hash, len(query)) + for i, filter := range query { + for _, rule := range filter { + var topic common.Hash + + // Try to generate the topic based on simple types + switch rule := rule.(type) { + case common.Hash: + copy(topic[:], rule[:]) + case common.Address: + copy(topic[common.HashLength-common.AddressLength:], rule[:]) + case *big.Int: + blob := rule.Bytes() + copy(topic[common.HashLength-len(blob):], blob) + case bool: + if rule { + topic[common.HashLength-1] = 1 + } + case int8: + copy(topic[:], genIntType(int64(rule), 1)) + case int16: + copy(topic[:], genIntType(int64(rule), 2)) + case int32: + copy(topic[:], genIntType(int64(rule), 4)) + case int64: + copy(topic[:], genIntType(rule, 8)) + case uint8: + blob := new(big.Int).SetUint64(uint64(rule)).Bytes() + copy(topic[common.HashLength-len(blob):], blob) + case uint16: + blob := new(big.Int).SetUint64(uint64(rule)).Bytes() + copy(topic[common.HashLength-len(blob):], blob) + case uint32: + blob := new(big.Int).SetUint64(uint64(rule)).Bytes() + copy(topic[common.HashLength-len(blob):], blob) + case uint64: + blob := new(big.Int).SetUint64(rule).Bytes() + copy(topic[common.HashLength-len(blob):], blob) + case string: + hash := crypto.Keccak256Hash([]byte(rule)) + copy(topic[:], hash[:]) + case []byte: + hash := crypto.Keccak256Hash(rule) + copy(topic[:], hash[:]) + + default: + // todo(rjl493456442) according solidity documentation, indexed event + // parameters that are not value types i.e. arrays and structs are not + // stored directly but instead a keccak256-hash of an encoding is stored. + // + // We only convert stringS and bytes to hash, still need to deal with + // array(both fixed-size and dynamic-size) and struct. + + // Attempt to generate the topic from funky types + val := reflect.ValueOf(rule) + switch { + // static byte array + case val.Kind() == reflect.Array && reflect.TypeOf(rule).Elem().Kind() == reflect.Uint8: + reflect.Copy(reflect.ValueOf(topic[:val.Len()]), val) + default: + return nil, fmt.Errorf("unsupported indexed type: %T", rule) + } + } + topics[i] = append(topics[i], topic) + } + } + return topics, nil +} + +func genIntType(rule int64, size uint) []byte { + var topic [common.HashLength]byte + if rule < 0 { + // if a rule is negative, we need to put it into two's complement. + // extended to common.HashLength bytes. + topic = [common.HashLength]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255} + } + for i := uint(0); i < size; i++ { + topic[common.HashLength-i-1] = byte(rule >> (i * 8)) + } + return topic[:] +} + +// ParseTopics converts the indexed topic fields into actual log field values. +func ParseTopics(out interface{}, fields Arguments, topics []common.Hash) error { + return parseTopicWithSetter(fields, topics, + func(arg Argument, reconstr interface{}) { + field := reflect.ValueOf(out).Elem().FieldByName(ToCamelCase(arg.Name)) + field.Set(reflect.ValueOf(reconstr)) + }) +} + +// ParseTopicsIntoMap converts the indexed topic field-value pairs into map key-value pairs. +func ParseTopicsIntoMap(out map[string]interface{}, fields Arguments, topics []common.Hash) error { + return parseTopicWithSetter(fields, topics, + func(arg Argument, reconstr interface{}) { + out[arg.Name] = reconstr + }) +} + +// parseTopicWithSetter converts the indexed topic field-value pairs and stores them using the +// provided set function. +// +// Note, dynamic types cannot be reconstructed since they get mapped to Keccak256 +// hashes as the topic value! +func parseTopicWithSetter(fields Arguments, topics []common.Hash, setter func(Argument, interface{})) error { + // Sanity check that the fields and topics match up + if len(fields) != len(topics) { + return errors.New("topic/field count mismatch") + } + // Iterate over all the fields and reconstruct them from topics + for i, arg := range fields { + if !arg.Indexed { + return errors.New("non-indexed field in topic reconstruction") + } + var reconstr interface{} + switch arg.Type.T { + case TupleTy: + return errors.New("tuple type in topic reconstruction") + case StringTy, BytesTy, SliceTy, ArrayTy: + // Array types (including strings and bytes) have their keccak256 hashes stored in the topic- not a hash + // whose bytes can be decoded to the actual value- so the best we can do is retrieve that hash + reconstr = topics[i] + case FunctionTy: + if garbage := binary.BigEndian.Uint64(topics[i][0:8]); garbage != 0 { + return fmt.Errorf("bind: got improperly encoded function type, got %v", topics[i].Bytes()) + } + var tmp [24]byte + copy(tmp[:], topics[i][8:32]) + reconstr = tmp + default: + var err error + reconstr, err = toGoType(0, arg.Type, topics[i].Bytes()) + if err != nil { + return err + } + } + // Use the setter function to store the value + setter(arg, reconstr) + } + + return nil +} diff --git a/accounts/abi/topics_test.go b/accounts/abi/topics_test.go new file mode 100644 index 000000000000..4a539a71166c --- /dev/null +++ b/accounts/abi/topics_test.go @@ -0,0 +1,381 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package abi + +import ( + "math/big" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +func TestMakeTopics(t *testing.T) { + type args struct { + query [][]interface{} + } + tests := []struct { + name string + args args + want [][]common.Hash + wantErr bool + }{ + { + "support fixed byte types, right padded to 32 bytes", + args{[][]interface{}{{[5]byte{1, 2, 3, 4, 5}}}}, + [][]common.Hash{{common.Hash{1, 2, 3, 4, 5}}}, + false, + }, + { + "support common hash types in topics", + args{[][]interface{}{{common.Hash{1, 2, 3, 4, 5}}}}, + [][]common.Hash{{common.Hash{1, 2, 3, 4, 5}}}, + false, + }, + { + "support address types in topics", + args{[][]interface{}{{common.Address{1, 2, 3, 4, 5}}}}, + [][]common.Hash{{common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5}}}, + false, + }, + { + "support *big.Int types in topics", + args{[][]interface{}{{big.NewInt(1).Lsh(big.NewInt(2), 254)}}}, + [][]common.Hash{{common.Hash{128}}}, + false, + }, + { + "support boolean types in topics", + args{[][]interface{}{ + {true}, + {false}, + }}, + [][]common.Hash{ + {common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, + {common.Hash{0}}, + }, + false, + }, + { + "support int/uint(8/16/32/64) types in topics", + args{[][]interface{}{ + {int8(-2)}, + {int16(-3)}, + {int32(-4)}, + {int64(-5)}, + {int8(1)}, + {int16(256)}, + {int32(65536)}, + {int64(4294967296)}, + {uint8(1)}, + {uint16(256)}, + {uint32(65536)}, + {uint64(4294967296)}, + }}, + [][]common.Hash{ + {common.Hash{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254}}, + {common.Hash{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 253}}, + {common.Hash{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 252}}, + {common.Hash{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 251}}, + {common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, + {common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0}}, + {common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0}}, + {common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0}}, + {common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, + {common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0}}, + {common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0}}, + {common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0}}, + }, + false, + }, + { + "support string types in topics", + args{[][]interface{}{{"hello world"}}}, + [][]common.Hash{{crypto.Keccak256Hash([]byte("hello world"))}}, + false, + }, + { + "support byte slice types in topics", + args{[][]interface{}{{[]byte{1, 2, 3}}}}, + [][]common.Hash{{crypto.Keccak256Hash([]byte{1, 2, 3})}}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := MakeTopics(tt.args.query...) + if (err != nil) != tt.wantErr { + t.Errorf("makeTopics() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("makeTopics() = %v, want %v", got, tt.want) + } + }) + } +} + +type args struct { + createObj func() interface{} + resultObj func() interface{} + resultMap func() map[string]interface{} + fields Arguments + topics []common.Hash +} + +type bytesStruct struct { + StaticBytes [5]byte +} +type int8Struct struct { + Int8Value int8 +} +type int256Struct struct { + Int256Value *big.Int +} + +type hashStruct struct { + HashValue common.Hash +} + +type funcStruct struct { + FuncValue [24]byte +} + +type topicTest struct { + name string + args args + wantErr bool +} + +func setupTopicsTests() []topicTest { + bytesType, _ := NewType("bytes5", "", nil) + int8Type, _ := NewType("int8", "", nil) + int256Type, _ := NewType("int256", "", nil) + tupleType, _ := NewType("tuple(int256,int8)", "", nil) + stringType, _ := NewType("string", "", nil) + funcType, _ := NewType("function", "", nil) + + tests := []topicTest{ + { + name: "support fixed byte types, right padded to 32 bytes", + args: args{ + createObj: func() interface{} { return &bytesStruct{} }, + resultObj: func() interface{} { return &bytesStruct{StaticBytes: [5]byte{1, 2, 3, 4, 5}} }, + resultMap: func() map[string]interface{} { + return map[string]interface{}{"staticBytes": [5]byte{1, 2, 3, 4, 5}} + }, + fields: Arguments{Argument{ + Name: "staticBytes", + Type: bytesType, + Indexed: true, + }}, + topics: []common.Hash{ + {1, 2, 3, 4, 5}, + }, + }, + wantErr: false, + }, + { + name: "int8 with negative value", + args: args{ + createObj: func() interface{} { return &int8Struct{} }, + resultObj: func() interface{} { return &int8Struct{Int8Value: -1} }, + resultMap: func() map[string]interface{} { + return map[string]interface{}{"int8Value": int8(-1)} + }, + fields: Arguments{Argument{ + Name: "int8Value", + Type: int8Type, + Indexed: true, + }}, + topics: []common.Hash{ + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + }, + wantErr: false, + }, + { + name: "int256 with negative value", + args: args{ + createObj: func() interface{} { return &int256Struct{} }, + resultObj: func() interface{} { return &int256Struct{Int256Value: big.NewInt(-1)} }, + resultMap: func() map[string]interface{} { + return map[string]interface{}{"int256Value": big.NewInt(-1)} + }, + fields: Arguments{Argument{ + Name: "int256Value", + Type: int256Type, + Indexed: true, + }}, + topics: []common.Hash{ + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + }, + wantErr: false, + }, + { + name: "hash type", + args: args{ + createObj: func() interface{} { return &hashStruct{} }, + resultObj: func() interface{} { return &hashStruct{crypto.Keccak256Hash([]byte("stringtopic"))} }, + resultMap: func() map[string]interface{} { + return map[string]interface{}{"hashValue": crypto.Keccak256Hash([]byte("stringtopic"))} + }, + fields: Arguments{Argument{ + Name: "hashValue", + Type: stringType, + Indexed: true, + }}, + topics: []common.Hash{ + crypto.Keccak256Hash([]byte("stringtopic")), + }, + }, + wantErr: false, + }, + { + name: "function type", + args: args{ + createObj: func() interface{} { return &funcStruct{} }, + resultObj: func() interface{} { + return &funcStruct{[24]byte{255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + }, + resultMap: func() map[string]interface{} { + return map[string]interface{}{"funcValue": [24]byte{255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + }, + fields: Arguments{Argument{ + Name: "funcValue", + Type: funcType, + Indexed: true, + }}, + topics: []common.Hash{ + {0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + }, + wantErr: false, + }, + { + name: "error on topic/field count mismatch", + args: args{ + createObj: func() interface{} { return nil }, + resultObj: func() interface{} { return nil }, + resultMap: func() map[string]interface{} { return make(map[string]interface{}) }, + fields: Arguments{Argument{ + Name: "tupletype", + Type: tupleType, + Indexed: true, + }}, + topics: []common.Hash{}, + }, + wantErr: true, + }, + { + name: "error on unindexed arguments", + args: args{ + createObj: func() interface{} { return &int256Struct{} }, + resultObj: func() interface{} { return &int256Struct{} }, + resultMap: func() map[string]interface{} { return make(map[string]interface{}) }, + fields: Arguments{Argument{ + Name: "int256Value", + Type: int256Type, + Indexed: false, + }}, + topics: []common.Hash{ + {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + }, + wantErr: true, + }, + { + name: "error on tuple in topic reconstruction", + args: args{ + createObj: func() interface{} { return &tupleType }, + resultObj: func() interface{} { return &tupleType }, + resultMap: func() map[string]interface{} { return make(map[string]interface{}) }, + fields: Arguments{Argument{ + Name: "tupletype", + Type: tupleType, + Indexed: true, + }}, + topics: []common.Hash{{0}}, + }, + wantErr: true, + }, + { + name: "error on improper encoded function", + args: args{ + createObj: func() interface{} { return &funcStruct{} }, + resultObj: func() interface{} { return &funcStruct{} }, + resultMap: func() map[string]interface{} { + return make(map[string]interface{}) + }, + fields: Arguments{Argument{ + Name: "funcValue", + Type: funcType, + Indexed: true, + }}, + topics: []common.Hash{ + {0, 0, 0, 0, 0, 0, 0, 128, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + }, + wantErr: true, + }, + } + + return tests +} + +func TestParseTopics(t *testing.T) { + tests := setupTopicsTests() + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + createObj := tt.args.createObj() + if err := ParseTopics(createObj, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr { + t.Errorf("parseTopics() error = %v, wantErr %v", err, tt.wantErr) + } + resultObj := tt.args.resultObj() + if !reflect.DeepEqual(createObj, resultObj) { + t.Errorf("parseTopics() = %v, want %v", createObj, resultObj) + } + }) + } +} + +func TestParseTopicsIntoMap(t *testing.T) { + tests := setupTopicsTests() + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + outMap := make(map[string]interface{}) + if err := ParseTopicsIntoMap(outMap, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr { + t.Errorf("parseTopicsIntoMap() error = %v, wantErr %v", err, tt.wantErr) + } + resultMap := tt.args.resultMap() + if !reflect.DeepEqual(outMap, resultMap) { + t.Errorf("parseTopicsIntoMap() = %v, want %v", outMap, resultMap) + } + }) + } +} diff --git a/accounts/abi/type.go b/accounts/abi/type.go index 4792283ee8ef..ffa3acafe9c2 100644 --- a/accounts/abi/type.go +++ b/accounts/abi/type.go @@ -23,6 +23,8 @@ import ( "regexp" "strconv" "strings" + + "github.com/ethereum/go-ethereum/common" ) // Type enumerator @@ -42,20 +44,19 @@ const ( FunctionTy ) -// Type is the reflection of the supported argument type +// Type is the reflection of the supported argument type. type Type struct { Elem *Type - Kind reflect.Kind - Type reflect.Type Size int T byte // Our own type checking stringKind string // holds the unparsed string for deriving signatures // Tuple relative fields - TupleRawName string // Raw struct name defined in source code, may be empty. - TupleElems []*Type // Type information of all tuple fields - TupleRawNames []string // Raw field name of all tuple fields + TupleRawName string // Raw struct name defined in source code, may be empty. + TupleElems []*Type // Type information of all tuple fields + TupleRawNames []string // Raw field name of all tuple fields + TupleType reflect.Type // Underlying struct of the tuple } var ( @@ -94,20 +95,16 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty if len(intz) == 0 { // is a slice typ.T = SliceTy - typ.Kind = reflect.Slice typ.Elem = &embeddedType - typ.Type = reflect.SliceOf(embeddedType.Type) typ.stringKind = embeddedType.stringKind + sliced } else if len(intz) == 1 { - // is a array + // is an array typ.T = ArrayTy - typ.Kind = reflect.Array typ.Elem = &embeddedType typ.Size, err = strconv.Atoi(intz[0]) if err != nil { return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err) } - typ.Type = reflect.ArrayOf(typ.Size, embeddedType.Type) typ.stringKind = embeddedType.stringKind + sliced } else { return Type{}, fmt.Errorf("invalid formatting of array type") @@ -139,36 +136,24 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty // varType is the parsed abi type switch varType := parsedType[1]; varType { case "int": - typ.Kind, typ.Type = reflectIntKindAndType(false, varSize) typ.Size = varSize typ.T = IntTy case "uint": - typ.Kind, typ.Type = reflectIntKindAndType(true, varSize) typ.Size = varSize typ.T = UintTy case "bool": - typ.Kind = reflect.Bool typ.T = BoolTy - typ.Type = reflect.TypeOf(bool(false)) case "address": - typ.Kind = reflect.Array - typ.Type = addressT typ.Size = 20 typ.T = AddressTy case "string": - typ.Kind = reflect.String - typ.Type = reflect.TypeOf("") typ.T = StringTy case "bytes": if varSize == 0 { typ.T = BytesTy - typ.Kind = reflect.Slice - typ.Type = reflect.SliceOf(reflect.TypeOf(byte(0))) } else { typ.T = FixedBytesTy - typ.Kind = reflect.Array typ.Size = varSize - typ.Type = reflect.ArrayOf(varSize, reflect.TypeOf(byte(0))) } case "tuple": var ( @@ -178,17 +163,20 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty expression string // canonical parameter expression ) expression += "(" + overloadedNames := make(map[string]string) for idx, c := range components { cType, err := NewType(c.Type, c.InternalType, c.Components) if err != nil { return Type{}, err } - if ToCamelCase(c.Name) == "" { - return Type{}, errors.New("abi: purely anonymous or underscored field is not supported") + fieldName, err := overloadedArgName(c.Name, overloadedNames) + if err != nil { + return Type{}, err } + overloadedNames[fieldName] = fieldName fields = append(fields, reflect.StructField{ - Name: ToCamelCase(c.Name), // reflect.StructOf will panic for any exported field. - Type: cType.Type, + Name: fieldName, // reflect.StructOf will panic for any exported field. + Type: cType.GetType(), Tag: reflect.StructTag("json:\"" + c.Name + "\""), }) elems = append(elems, &cType) @@ -199,8 +187,8 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty } } expression += ")" - typ.Kind = reflect.Struct - typ.Type = reflect.StructOf(fields) + + typ.TupleType = reflect.StructOf(fields) typ.TupleElems = elems typ.TupleRawNames = names typ.T = TupleTy @@ -217,10 +205,8 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty } case "function": - typ.Kind = reflect.Array typ.T = FunctionTy typ.Size = 24 - typ.Type = reflect.ArrayOf(24, reflect.TypeOf(byte(0))) default: return Type{}, fmt.Errorf("unsupported arg type: %s", t) } @@ -228,7 +214,57 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty return } -// String implements Stringer +// GetType returns the reflection type of the ABI type. +func (t Type) GetType() reflect.Type { + switch t.T { + case IntTy: + return reflectIntType(false, t.Size) + case UintTy: + return reflectIntType(true, t.Size) + case BoolTy: + return reflect.TypeOf(false) + case StringTy: + return reflect.TypeOf("") + case SliceTy: + return reflect.SliceOf(t.Elem.GetType()) + case ArrayTy: + return reflect.ArrayOf(t.Size, t.Elem.GetType()) + case TupleTy: + return t.TupleType + case AddressTy: + return reflect.TypeOf(common.Address{}) + case FixedBytesTy: + return reflect.ArrayOf(t.Size, reflect.TypeOf(byte(0))) + case BytesTy: + return reflect.SliceOf(reflect.TypeOf(byte(0))) + case HashTy: + // hashtype currently not used + return reflect.ArrayOf(32, reflect.TypeOf(byte(0))) + case FixedPointTy: + // fixedpoint type currently not used + return reflect.ArrayOf(32, reflect.TypeOf(byte(0))) + case FunctionTy: + return reflect.ArrayOf(24, reflect.TypeOf(byte(0))) + default: + panic("Invalid type") + } +} + +func overloadedArgName(rawName string, names map[string]string) (string, error) { + fieldName := ToCamelCase(rawName) + if fieldName == "" { + return "", errors.New("abi: purely anonymous or underscored field is not supported") + } + // Handle overloaded fieldNames + _, ok := names[fieldName] + for idx := 0; ok; idx++ { + fieldName = fmt.Sprintf("%s%d", ToCamelCase(rawName), idx) + _, ok = names[fieldName] + } + return fieldName, nil +} + +// String implements Stringer. func (t Type) String() (out string) { return t.stringKind } @@ -310,7 +346,7 @@ func (t Type) pack(v reflect.Value) ([]byte, error) { return append(ret, tail...), nil default: - return packElement(t, v), nil + return packElement(t, v) } } @@ -350,7 +386,7 @@ func isDynamicType(t Type) bool { func getTypeSize(t Type) int { if t.T == ArrayTy && !isDynamicType(*t.Elem) { // Recursively calculate type size if it is a nested array - if t.Elem.T == ArrayTy { + if t.Elem.T == ArrayTy || t.Elem.T == TupleTy { return t.Size * getTypeSize(*t.Elem) } return t.Size * 32 diff --git a/accounts/abi/type_test.go b/accounts/abi/type_test.go index a2c78dc2e020..8c3aedca6a4d 100644 --- a/accounts/abi/type_test.go +++ b/accounts/abi/type_test.go @@ -36,58 +36,58 @@ func TestTypeRegexp(t *testing.T) { components []ArgumentMarshaling kind Type }{ - {"bool", nil, Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}}, - {"bool[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool(nil)), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}}, - {"bool[2]", nil, Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}}, - {"bool[2][]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}}, - {"bool[][]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}}, - {"bool[][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}}, - {"bool[2][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}}, - {"bool[2][][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][][2]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}, stringKind: "bool[2][][2]"}}, - {"bool[2][2][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}, stringKind: "bool[2][2][2]"}}, - {"bool[][][]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}, stringKind: "bool[][][]"}}, - {"bool[][2][]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][2][]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}, stringKind: "bool[][2][]"}}, - {"int8", nil, Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}}, - {"int16", nil, Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}}, - {"int32", nil, Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}}, - {"int64", nil, Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}}, - {"int256", nil, Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}}, - {"int8[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}}, - {"int8[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}}, - {"int16[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}}, - {"int16[2]", nil, Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}}, - {"int32[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}}, - {"int32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}}, - {"int64[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}}, - {"int64[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}}, - {"int256[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}}, - {"int256[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}}, - {"uint8", nil, Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}}, - {"uint16", nil, Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}}, - {"uint32", nil, Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}}, - {"uint64", nil, Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}}, - {"uint256", nil, Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}}, - {"uint8[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}}, - {"uint8[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}}, - {"uint16[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}}, - {"uint16[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}}, - {"uint32[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}}, - {"uint32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}}, - {"uint64[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}}, - {"uint64[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}}, - {"uint256[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}}, - {"uint256[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]*big.Int{}), Size: 2, Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}}, - {"bytes32", nil, Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}}, - {"bytes[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]byte{}), Elem: &Type{Kind: reflect.Slice, Type: reflect.TypeOf([]byte{}), T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}}, - {"bytes[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]byte{}), Elem: &Type{T: BytesTy, Type: reflect.TypeOf([]byte{}), Kind: reflect.Slice, stringKind: "bytes"}, stringKind: "bytes[2]"}}, - {"bytes32[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][32]byte{}), Elem: &Type{Kind: reflect.Array, Type: reflect.TypeOf([32]byte{}), T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[]"}}, - {"bytes32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][32]byte{}), Elem: &Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}, stringKind: "bytes32[2]"}}, - {"string", nil, Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}}, - {"string[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]string{}), Elem: &Type{Kind: reflect.String, Type: reflect.TypeOf(""), T: StringTy, stringKind: "string"}, stringKind: "string[]"}}, - {"string[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]string{}), Elem: &Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}, stringKind: "string[2]"}}, - {"address", nil, Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}}, - {"address[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}}, - {"address[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}}, + {"bool", nil, Type{T: BoolTy, stringKind: "bool"}}, + {"bool[]", nil, Type{T: SliceTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}}, + {"bool[2]", nil, Type{Size: 2, T: ArrayTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}}, + {"bool[2][]", nil, Type{T: SliceTy, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}}, + {"bool[][]", nil, Type{T: SliceTy, Elem: &Type{T: SliceTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}}, + {"bool[][2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: SliceTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}}, + {"bool[2][2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}}, + {"bool[2][][2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: SliceTy, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}, stringKind: "bool[2][][2]"}}, + {"bool[2][2][2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}, stringKind: "bool[2][2][2]"}}, + {"bool[][][]", nil, Type{T: SliceTy, Elem: &Type{T: SliceTy, Elem: &Type{T: SliceTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}, stringKind: "bool[][][]"}}, + {"bool[][2][]", nil, Type{T: SliceTy, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: SliceTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}, stringKind: "bool[][2][]"}}, + {"int8", nil, Type{Size: 8, T: IntTy, stringKind: "int8"}}, + {"int16", nil, Type{Size: 16, T: IntTy, stringKind: "int16"}}, + {"int32", nil, Type{Size: 32, T: IntTy, stringKind: "int32"}}, + {"int64", nil, Type{Size: 64, T: IntTy, stringKind: "int64"}}, + {"int256", nil, Type{Size: 256, T: IntTy, stringKind: "int256"}}, + {"int8[]", nil, Type{T: SliceTy, Elem: &Type{Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}}, + {"int8[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}}, + {"int16[]", nil, Type{T: SliceTy, Elem: &Type{Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}}, + {"int16[2]", nil, Type{Size: 2, T: ArrayTy, Elem: &Type{Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}}, + {"int32[]", nil, Type{T: SliceTy, Elem: &Type{Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}}, + {"int32[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}}, + {"int64[]", nil, Type{T: SliceTy, Elem: &Type{Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}}, + {"int64[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}}, + {"int256[]", nil, Type{T: SliceTy, Elem: &Type{Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}}, + {"int256[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}}, + {"uint8", nil, Type{Size: 8, T: UintTy, stringKind: "uint8"}}, + {"uint16", nil, Type{Size: 16, T: UintTy, stringKind: "uint16"}}, + {"uint32", nil, Type{Size: 32, T: UintTy, stringKind: "uint32"}}, + {"uint64", nil, Type{Size: 64, T: UintTy, stringKind: "uint64"}}, + {"uint256", nil, Type{Size: 256, T: UintTy, stringKind: "uint256"}}, + {"uint8[]", nil, Type{T: SliceTy, Elem: &Type{Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}}, + {"uint8[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}}, + {"uint16[]", nil, Type{T: SliceTy, Elem: &Type{Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}}, + {"uint16[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}}, + {"uint32[]", nil, Type{T: SliceTy, Elem: &Type{Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}}, + {"uint32[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}}, + {"uint64[]", nil, Type{T: SliceTy, Elem: &Type{Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}}, + {"uint64[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}}, + {"uint256[]", nil, Type{T: SliceTy, Elem: &Type{Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}}, + {"uint256[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}}, + {"bytes32", nil, Type{T: FixedBytesTy, Size: 32, stringKind: "bytes32"}}, + {"bytes[]", nil, Type{T: SliceTy, Elem: &Type{T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}}, + {"bytes[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[2]"}}, + {"bytes32[]", nil, Type{T: SliceTy, Elem: &Type{T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[]"}}, + {"bytes32[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[2]"}}, + {"string", nil, Type{T: StringTy, stringKind: "string"}}, + {"string[]", nil, Type{T: SliceTy, Elem: &Type{T: StringTy, stringKind: "string"}, stringKind: "string[]"}}, + {"string[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: StringTy, stringKind: "string"}, stringKind: "string[2]"}}, + {"address", nil, Type{Size: 20, T: AddressTy, stringKind: "address"}}, + {"address[]", nil, Type{T: SliceTy, Elem: &Type{Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}}, + {"address[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}}, // TODO when fixed types are implemented properly // {"fixed", nil, Type{}}, // {"fixed128x128", nil, Type{}}, @@ -95,14 +95,14 @@ func TestTypeRegexp(t *testing.T) { // {"fixed[2]", nil, Type{}}, // {"fixed128x128[]", nil, Type{}}, // {"fixed128x128[2]", nil, Type{}}, - {"tuple", []ArgumentMarshaling{{Name: "a", Type: "int64"}}, Type{Kind: reflect.Struct, T: TupleTy, Type: reflect.TypeOf(struct { + {"tuple", []ArgumentMarshaling{{Name: "a", Type: "int64"}}, Type{T: TupleTy, TupleType: reflect.TypeOf(struct { A int64 `json:"a"` }{}), stringKind: "(int64)", - TupleElems: []*Type{{Kind: reflect.Int64, T: IntTy, Type: reflect.TypeOf(int64(0)), Size: 64, stringKind: "int64"}}, TupleRawNames: []string{"a"}}}, - {"tuple with long name", []ArgumentMarshaling{{Name: "aTypicalParamName", Type: "int64"}}, Type{Kind: reflect.Struct, T: TupleTy, Type: reflect.TypeOf(struct { + TupleElems: []*Type{{T: IntTy, Size: 64, stringKind: "int64"}}, TupleRawNames: []string{"a"}}}, + {"tuple with long name", []ArgumentMarshaling{{Name: "aTypicalParamName", Type: "int64"}}, Type{T: TupleTy, TupleType: reflect.TypeOf(struct { ATypicalParamName int64 `json:"aTypicalParamName"` }{}), stringKind: "(int64)", - TupleElems: []*Type{{Kind: reflect.Int64, T: IntTy, Type: reflect.TypeOf(int64(0)), Size: 64, stringKind: "int64"}}, TupleRawNames: []string{"aTypicalParamName"}}}, + TupleElems: []*Type{{T: IntTy, Size: 64, stringKind: "int64"}}, TupleRawNames: []string{"aTypicalParamName"}}}, } for _, tt := range tests { @@ -255,7 +255,7 @@ func TestTypeCheck(t *testing.T) { {"bytes", nil, [2]byte{0, 1}, "abi: cannot use array as type slice as argument"}, {"bytes", nil, common.Hash{1}, "abi: cannot use array as type slice as argument"}, {"string", nil, "hello world", ""}, - {"string", nil, string(""), ""}, + {"string", nil, "", ""}, {"string", nil, []byte{}, "abi: cannot use slice as type string as argument"}, {"bytes32[]", nil, [][32]byte{{}}, ""}, {"function", nil, [24]byte{}, ""}, @@ -306,3 +306,63 @@ func TestTypeCheck(t *testing.T) { } } } + +func TestInternalType(t *testing.T) { + components := []ArgumentMarshaling{{Name: "a", Type: "int64"}} + internalType := "struct a.b[]" + kind := Type{ + T: TupleTy, + TupleType: reflect.TypeOf(struct { + A int64 `json:"a"` + }{}), + stringKind: "(int64)", + TupleRawName: "ab[]", + TupleElems: []*Type{{T: IntTy, Size: 64, stringKind: "int64"}}, + TupleRawNames: []string{"a"}, + } + + blob := "tuple" + typ, err := NewType(blob, internalType, components) + if err != nil { + t.Errorf("type %q: failed to parse type string: %v", blob, err) + } + if !reflect.DeepEqual(typ, kind) { + t.Errorf("type %q: parsed type mismatch:\nGOT %s\nWANT %s ", blob, spew.Sdump(typeWithoutStringer(typ)), spew.Sdump(typeWithoutStringer(kind))) + } +} + +func TestGetTypeSize(t *testing.T) { + var testCases = []struct { + typ string + components []ArgumentMarshaling + typSize int + }{ + // simple array + {"uint256[2]", nil, 32 * 2}, + {"address[3]", nil, 32 * 3}, + {"bytes32[4]", nil, 32 * 4}, + // array array + {"uint256[2][3][4]", nil, 32 * (2 * 3 * 4)}, + // array tuple + {"tuple[2]", []ArgumentMarshaling{{Name: "x", Type: "bytes32"}, {Name: "y", Type: "bytes32"}}, (32 * 2) * 2}, + // simple tuple + {"tuple", []ArgumentMarshaling{{Name: "x", Type: "uint256"}, {Name: "y", Type: "uint256"}}, 32 * 2}, + // tuple array + {"tuple", []ArgumentMarshaling{{Name: "x", Type: "bytes32[2]"}}, 32 * 2}, + // tuple tuple + {"tuple", []ArgumentMarshaling{{Name: "x", Type: "tuple", Components: []ArgumentMarshaling{{Name: "x", Type: "bytes32"}}}}, 32}, + {"tuple", []ArgumentMarshaling{{Name: "x", Type: "tuple", Components: []ArgumentMarshaling{{Name: "x", Type: "bytes32[2]"}, {Name: "y", Type: "uint256"}}}}, 32 * (2 + 1)}, + } + + for i, data := range testCases { + typ, err := NewType(data.typ, "", data.components) + if err != nil { + t.Errorf("type %q: failed to parse type string: %v", data.typ, err) + } + + result := getTypeSize(typ) + if result != data.typSize { + t.Errorf("case %d type %q: get type size error: actual: %d expected: %d", i, data.typ, result, data.typSize) + } + } +} diff --git a/accounts/abi/unpack.go b/accounts/abi/unpack.go index 2a5db3b315a7..43cd6c64575c 100644 --- a/accounts/abi/unpack.go +++ b/accounts/abi/unpack.go @@ -26,54 +26,54 @@ import ( ) var ( - // MaxUint256 is the maximum value that can be represented by a uint256 - MaxUint256 = big.NewInt(0).Add( - big.NewInt(0).Exp(big.NewInt(2), big.NewInt(256), nil), - big.NewInt(-1)) - // MaxInt256 is the maximum value that can be represented by a int256 - MaxInt256 = big.NewInt(0).Add( - big.NewInt(0).Exp(big.NewInt(2), big.NewInt(255), nil), - big.NewInt(-1)) + // MaxUint256 is the maximum value that can be represented by a uint256. + MaxUint256 = new(big.Int).Sub(new(big.Int).Lsh(common.Big1, 256), common.Big1) + // MaxInt256 is the maximum value that can be represented by a int256. + MaxInt256 = new(big.Int).Sub(new(big.Int).Lsh(common.Big1, 255), common.Big1) ) -// ReadInteger reads the integer based on its kind and returns the appropriate value -func ReadInteger(typ byte, kind reflect.Kind, b []byte) interface{} { - switch kind { - case reflect.Uint8: - return b[len(b)-1] - case reflect.Uint16: - return binary.BigEndian.Uint16(b[len(b)-2:]) - case reflect.Uint32: - return binary.BigEndian.Uint32(b[len(b)-4:]) - case reflect.Uint64: - return binary.BigEndian.Uint64(b[len(b)-8:]) - case reflect.Int8: +// ReadInteger reads the integer based on its kind and returns the appropriate value. +func ReadInteger(typ Type, b []byte) interface{} { + if typ.T == UintTy { + switch typ.Size { + case 8: + return b[len(b)-1] + case 16: + return binary.BigEndian.Uint16(b[len(b)-2:]) + case 32: + return binary.BigEndian.Uint32(b[len(b)-4:]) + case 64: + return binary.BigEndian.Uint64(b[len(b)-8:]) + default: + // the only case left for unsigned integer is uint256. + return new(big.Int).SetBytes(b) + } + } + switch typ.Size { + case 8: return int8(b[len(b)-1]) - case reflect.Int16: + case 16: return int16(binary.BigEndian.Uint16(b[len(b)-2:])) - case reflect.Int32: + case 32: return int32(binary.BigEndian.Uint32(b[len(b)-4:])) - case reflect.Int64: + case 64: return int64(binary.BigEndian.Uint64(b[len(b)-8:])) default: - // the only case lefts for integer is int256/uint256. - // big.SetBytes can't tell if a number is negative, positive on itself. + // the only case left for integer is int256 + // big.SetBytes can't tell if a number is negative or positive in itself. // On EVM, if the returned number > max int256, it is negative. + // A number is > max int256 if the bit at position 255 is set. ret := new(big.Int).SetBytes(b) - if typ == UintTy { - return ret - } - - if ret.Cmp(MaxInt256) > 0 { - ret.Add(MaxUint256, big.NewInt(0).Neg(ret)) - ret.Add(ret, big.NewInt(1)) + if ret.Bit(255) == 1 { + ret.Add(MaxUint256, new(big.Int).Neg(ret)) + ret.Add(ret, common.Big1) ret.Neg(ret) } return ret } } -// reads a bool +// readBool reads a bool. func readBool(word []byte) (bool, error) { for _, b := range word[:31] { if b != 0 { @@ -91,7 +91,8 @@ func readBool(word []byte) (bool, error) { } // A function type is simply the address with the function selection signature at the end. -// This enforces that standard by always presenting it as a 24-array (address + sig = 24 bytes) +// +// readFunctionType enforces that standard by always presenting it as a 24-array (address + sig = 24 bytes) func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) { if t.T != FunctionTy { return [24]byte{}, fmt.Errorf("abi: invalid type in call to make function type byte array") @@ -104,20 +105,20 @@ func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) { return } -// ReadFixedBytes uses reflection to create a fixed array to be read from +// ReadFixedBytes uses reflection to create a fixed array to be read from. func ReadFixedBytes(t Type, word []byte) (interface{}, error) { if t.T != FixedBytesTy { return nil, fmt.Errorf("abi: invalid type in call to make fixed byte array") } // convert - array := reflect.New(t.Type).Elem() + array := reflect.New(t.GetType()).Elem() reflect.Copy(array, reflect.ValueOf(word[0:t.Size])) return array.Interface(), nil } -// iteratively unpack elements +// forEachUnpack iteratively unpack elements. func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) { if size < 0 { return nil, fmt.Errorf("cannot marshal input to array, size is negative (%d)", size) @@ -131,10 +132,10 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) if t.T == SliceTy { // declare our slice - refSlice = reflect.MakeSlice(t.Type, size, size) + refSlice = reflect.MakeSlice(t.GetType(), size, size) } else if t.T == ArrayTy { // declare our array - refSlice = reflect.New(t.Type).Elem() + refSlice = reflect.New(t.GetType()).Elem() } else { return nil, fmt.Errorf("abi: invalid type in array/slice unpacking stage") } @@ -158,7 +159,7 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) } func forTupleUnpack(t Type, output []byte) (interface{}, error) { - retval := reflect.New(t.Type).Elem() + retval := reflect.New(t.GetType()).Elem() virtualArgs := 0 for index, elem := range t.TupleElems { marshalledValue, err := toGoType((index+virtualArgs)*32, *elem, output) @@ -218,21 +219,23 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) { return nil, err } return forTupleUnpack(t, output[begin:]) - } else { - return forTupleUnpack(t, output[index:]) } + return forTupleUnpack(t, output[index:]) case SliceTy: return forEachUnpack(t, output[begin:], 0, length) case ArrayTy: if isDynamicType(*t.Elem) { - offset := int64(binary.BigEndian.Uint64(returnOutput[len(returnOutput)-8:])) + offset := binary.BigEndian.Uint64(returnOutput[len(returnOutput)-8:]) + if offset > uint64(len(output)) { + return nil, fmt.Errorf("abi: toGoType offset greater than output length: offset: %d, len(output): %d", offset, len(output)) + } return forEachUnpack(t, output[offset:], 0, t.Size) } return forEachUnpack(t, output[index:], 0, t.Size) case StringTy: // variable arrays are written at the end of the return bytes return string(output[begin : begin+length]), nil case IntTy, UintTy: - return ReadInteger(t.T, t.Kind, returnOutput), nil + return ReadInteger(t, returnOutput), nil case BoolTy: return readBool(returnOutput) case AddressTy: @@ -250,7 +253,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) { } } -// interprets a 32 byte slice as an offset and then determines which indice to look to decode the type. +// lengthPrefixPointsTo interprets a 32 byte slice as an offset and then determines which indices to look to decode the type. func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err error) { bigOffsetEnd := big.NewInt(0).SetBytes(output[index : index+32]) bigOffsetEnd.Add(bigOffsetEnd, common.Big32) @@ -287,7 +290,7 @@ func tuplePointsTo(index int, output []byte) (start int, err error) { offset := big.NewInt(0).SetBytes(output[index : index+32]) outputLen := big.NewInt(int64(len(output))) - if offset.Cmp(big.NewInt(int64(len(output)))) > 0 { + if offset.Cmp(outputLen) > 0 { return 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %v would go over slice boundary (len=%v)", offset, outputLen) } if offset.BitLen() > 63 { diff --git a/accounts/abi/unpack_test.go b/accounts/abi/unpack_test.go index dfea8db671d8..e617f8abc51d 100644 --- a/accounts/abi/unpack_test.go +++ b/accounts/abi/unpack_test.go @@ -30,6 +30,32 @@ import ( "github.com/stretchr/testify/require" ) +// TestUnpack tests the general pack/unpack tests in packing_test.go +func TestUnpack(t *testing.T) { + for i, test := range packUnpackTests { + t.Run(strconv.Itoa(i)+" "+test.def, func(t *testing.T) { + //Unpack + def := fmt.Sprintf(`[{ "name" : "method", "type": "function", "outputs": %s}]`, test.def) + abi, err := JSON(strings.NewReader(def)) + if err != nil { + t.Fatalf("invalid ABI definition %s: %v", def, err) + } + encb, err := hex.DecodeString(test.packed) + if err != nil { + t.Fatalf("invalid hex %s: %v", test.packed, err) + } + out, err := abi.Unpack("method", encb) + if err != nil { + t.Errorf("test %d (%v) failed: %v", i, test.def, err) + return + } + if !reflect.DeepEqual(test.unpacked, ConvertType(out[0], test.unpacked)) { + t.Errorf("test %d (%v) failed: expected %v, got %v", i, test.def, test.unpacked, out[0]) + } + }) + } +} + type unpackTest struct { def string // ABI definition JSON enc string // evm return data @@ -52,16 +78,6 @@ func (test unpackTest) checkError(err error) error { var unpackTests = []unpackTest{ // Bools - { - def: `[{ "type": "bool" }]`, - enc: "0000000000000000000000000000000000000000000000000000000000000001", - want: true, - }, - { - def: `[{ "type": "bool" }]`, - enc: "0000000000000000000000000000000000000000000000000000000000000000", - want: false, - }, { def: `[{ "type": "bool" }]`, enc: "0000000000000000000000000000000000000000000000000001000000000001", @@ -75,11 +91,6 @@ var unpackTests = []unpackTest{ err: "abi: improperly encoded boolean value", }, // Integers - { - def: `[{"type": "uint32"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000001", - want: uint32(1), - }, { def: `[{"type": "uint32"}]`, enc: "0000000000000000000000000000000000000000000000000000000000000001", @@ -92,16 +103,6 @@ var unpackTests = []unpackTest{ want: uint16(0), err: "abi: cannot unmarshal *big.Int in to uint16", }, - { - def: `[{"type": "uint17"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000001", - want: big.NewInt(1), - }, - { - def: `[{"type": "int32"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000001", - want: int32(1), - }, { def: `[{"type": "int32"}]`, enc: "0000000000000000000000000000000000000000000000000000000000000001", @@ -114,38 +115,10 @@ var unpackTests = []unpackTest{ want: int16(0), err: "abi: cannot unmarshal *big.Int in to int16", }, - { - def: `[{"type": "int17"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000001", - want: big.NewInt(1), - }, - { - def: `[{"type": "int256"}]`, - enc: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - want: big.NewInt(-1), - }, - // Address - { - def: `[{"type": "address"}]`, - enc: "0000000000000000000000000100000000000000000000000000000000000000", - want: common.Address{1}, - }, - // Bytes - { - def: `[{"type": "bytes32"}]`, - enc: "0100000000000000000000000000000000000000000000000000000000000000", - want: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - }, { def: `[{"type": "bytes"}]`, enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200100000000000000000000000000000000000000000000000000000000000000", - want: common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"), - }, - { - def: `[{"type": "bytes"}]`, - enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200100000000000000000000000000000000000000000000000000000000000000", - want: [32]byte{}, - err: "abi: cannot unmarshal []uint8 in to [32]uint8", + want: [32]byte{1}, }, { def: `[{"type": "bytes32"}]`, @@ -153,245 +126,13 @@ var unpackTests = []unpackTest{ want: []byte(nil), err: "abi: cannot unmarshal [32]uint8 in to []uint8", }, - { - def: `[{"type": "bytes32"}]`, - enc: "0100000000000000000000000000000000000000000000000000000000000000", - want: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - }, - // Functions - { - def: `[{"type": "function"}]`, - enc: "0100000000000000000000000000000000000000000000000000000000000000", - want: [24]byte{1}, - }, - // Slice and Array - { - def: `[{"type": "uint8[]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: []uint8{1, 2}, - }, - { - def: `[{"type": "uint8[]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000", - want: []uint8{}, - }, - { - def: `[{"type": "uint256[]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000", - want: []*big.Int{}, - }, - { - def: `[{"type": "uint8[2]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [2]uint8{1, 2}, - }, - // multi dimensional, if these pass, all types that don't require length prefix should pass - { - def: `[{"type": "uint8[][]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000", - want: [][]uint8{}, - }, - { - def: `[{"type": "uint8[][]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [][]uint8{{1, 2}, {1, 2}}, - }, - { - def: `[{"type": "uint8[][]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003", - want: [][]uint8{{1, 2}, {1, 2, 3}}, - }, - { - def: `[{"type": "uint8[2][2]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [2][2]uint8{{1, 2}, {1, 2}}, - }, - { - def: `[{"type": "uint8[][2]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - want: [2][]uint8{{}, {}}, - }, - { - def: `[{"type": "uint8[][2]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001", - want: [2][]uint8{{1}, {1}}, - }, - { - def: `[{"type": "uint8[2][]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000", - want: [][2]uint8{}, - }, - { - def: `[{"type": "uint8[2][]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [][2]uint8{{1, 2}}, - }, - { - def: `[{"type": "uint8[2][]"}]`, - enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [][2]uint8{{1, 2}, {1, 2}}, - }, - { - def: `[{"type": "uint16[]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: []uint16{1, 2}, - }, - { - def: `[{"type": "uint16[2]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [2]uint16{1, 2}, - }, - { - def: `[{"type": "uint32[]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: []uint32{1, 2}, - }, - { - def: `[{"type": "uint32[2]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [2]uint32{1, 2}, - }, - { - def: `[{"type": "uint32[2][3][4]"}]`, - enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000f000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001300000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000015000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000170000000000000000000000000000000000000000000000000000000000000018", - want: [4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}}, - }, - { - def: `[{"type": "uint64[]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: []uint64{1, 2}, - }, - { - def: `[{"type": "uint64[2]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [2]uint64{1, 2}, - }, - { - def: `[{"type": "uint256[]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: []*big.Int{big.NewInt(1), big.NewInt(2)}, - }, - { - def: `[{"type": "uint256[3]"}]`, - enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003", - want: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}, - }, - { - def: `[{"type": "string[4]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000000548656c6c6f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005576f726c64000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b476f2d657468657265756d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000", - want: [4]string{"Hello", "World", "Go-ethereum", "Ethereum"}, - }, - { - def: `[{"type": "string[]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b676f2d657468657265756d000000000000000000000000000000000000000000", - want: []string{"Ethereum", "go-ethereum"}, - }, - { - def: `[{"type": "bytes[]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000003f0f0f000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003f0f0f00000000000000000000000000000000000000000000000000000000000", - want: [][]byte{{0xf0, 0xf0, 0xf0}, {0xf0, 0xf0, 0xf0}}, - }, - { - def: `[{"type": "uint256[2][][]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000003e80000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000003e8", - want: [][][2]*big.Int{{{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}, {{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}}, - }, - { - def: `[{"type": "int8[]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: []int8{1, 2}, - }, - { - def: `[{"type": "int8[2]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [2]int8{1, 2}, - }, - { - def: `[{"type": "int16[]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: []int16{1, 2}, - }, - { - def: `[{"type": "int16[2]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [2]int16{1, 2}, - }, - { - def: `[{"type": "int32[]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: []int32{1, 2}, - }, - { - def: `[{"type": "int32[2]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [2]int32{1, 2}, - }, - { - def: `[{"type": "int64[]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: []int64{1, 2}, - }, - { - def: `[{"type": "int64[2]"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: [2]int64{1, 2}, - }, - { - def: `[{"type": "int256[]"}]`, - enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: []*big.Int{big.NewInt(1), big.NewInt(2)}, - }, - { - def: `[{"type": "int256[3]"}]`, - enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003", - want: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}, - }, - // struct outputs - { - def: `[{"name":"int1","type":"int256"},{"name":"int2","type":"int256"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: struct { - Int1 *big.Int - Int2 *big.Int - }{big.NewInt(1), big.NewInt(2)}, - }, - { - def: `[{"name":"int_one","type":"int256"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: struct { - IntOne *big.Int - }{big.NewInt(1)}, - }, - { - def: `[{"name":"int__one","type":"int256"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: struct { - IntOne *big.Int - }{big.NewInt(1)}, - }, - { - def: `[{"name":"int_one_","type":"int256"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: struct { - IntOne *big.Int - }{big.NewInt(1)}, - }, - { - def: `[{"name":"int_one","type":"int256"}, {"name":"intone","type":"int256"}]`, - enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - want: struct { - IntOne *big.Int - Intone *big.Int - }{big.NewInt(1), big.NewInt(2)}, - }, { def: `[{"name":"___","type":"int256"}]`, enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", want: struct { IntOne *big.Int Intone *big.Int - }{}, - err: "abi: purely underscored output cannot unpack to struct", + }{IntOne: big.NewInt(1)}, }, { def: `[{"name":"int_one","type":"int256"},{"name":"IntOne","type":"int256"}]`, @@ -438,12 +179,37 @@ var unpackTests = []unpackTest{ }{}, err: "abi: purely underscored output cannot unpack to struct", }, + // Make sure only the first argument is consumed + { + def: `[{"name":"int_one","type":"int256"}]`, + enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", + want: struct { + IntOne *big.Int + }{big.NewInt(1)}, + }, + { + def: `[{"name":"int__one","type":"int256"}]`, + enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", + want: struct { + IntOne *big.Int + }{big.NewInt(1)}, + }, + { + def: `[{"name":"int_one_","type":"int256"}]`, + enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", + want: struct { + IntOne *big.Int + }{big.NewInt(1)}, + }, } -func TestUnpack(t *testing.T) { +// TestLocalUnpackTests runs test specially designed only for unpacking. +// All test cases that can be used to test packing and unpacking should move to packing_test.go +func TestLocalUnpackTests(t *testing.T) { for i, test := range unpackTests { t.Run(strconv.Itoa(i), func(t *testing.T) { - def := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def) + //Unpack + def := fmt.Sprintf(`[{ "name" : "method", "type": "function", "outputs": %s}]`, test.def) abi, err := JSON(strings.NewReader(def)) if err != nil { t.Fatalf("invalid ABI definition %s: %v", def, err) @@ -453,7 +219,7 @@ func TestUnpack(t *testing.T) { t.Fatalf("invalid hex %s: %v", test.enc, err) } outptr := reflect.New(reflect.TypeOf(test.want)) - err = abi.Unpack(outptr.Interface(), "method", encb) + err = abi.UnpackIntoInterface(outptr.Interface(), "method", encb) if err := test.checkError(err); err != nil { t.Errorf("test %d (%v) failed: %v", i, test.def, err) return @@ -466,7 +232,7 @@ func TestUnpack(t *testing.T) { } } -func TestUnpackSetDynamicArrayOutput(t *testing.T) { +func TestUnpackIntoInterfaceSetDynamicArrayOutput(t *testing.T) { abi, err := JSON(strings.NewReader(`[{"constant":true,"inputs":[],"name":"testDynamicFixedBytes15","outputs":[{"name":"","type":"bytes15[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"testDynamicFixedBytes32","outputs":[{"name":"","type":"bytes32[]"}],"payable":false,"stateMutability":"view","type":"function"}]`)) if err != nil { t.Fatal(err) @@ -481,7 +247,7 @@ func TestUnpackSetDynamicArrayOutput(t *testing.T) { ) // test 32 - err = abi.Unpack(&out32, "testDynamicFixedBytes32", marshalledReturn32) + err = abi.UnpackIntoInterface(&out32, "testDynamicFixedBytes32", marshalledReturn32) if err != nil { t.Fatal(err) } @@ -498,7 +264,7 @@ func TestUnpackSetDynamicArrayOutput(t *testing.T) { } // test 15 - err = abi.Unpack(&out15, "testDynamicFixedBytes32", marshalledReturn15) + err = abi.UnpackIntoInterface(&out15, "testDynamicFixedBytes32", marshalledReturn15) if err != nil { t.Fatal(err) } @@ -522,7 +288,7 @@ type methodMultiOutput struct { func methodMultiReturn(require *require.Assertions) (ABI, []byte, methodMultiOutput) { const definition = `[ - { "name" : "multi", "constant" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]` + { "name" : "multi", "type": "function", "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]` var expected = methodMultiOutput{big.NewInt(1), "hello"} abi, err := JSON(strings.NewReader(definition)) @@ -592,14 +358,14 @@ func TestMethodMultiReturn(t *testing.T) { }, { &[]interface{}{new(int)}, &[]interface{}{}, - "abi: insufficient number of elements in the list/array for unpack, want 2, got 1", + "abi: insufficient number of arguments for unpack, want 2, got 1", "Can not unpack into a slice with wrong types", }} for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { require := require.New(t) - err := abi.Unpack(tc.dest, "multi", data) + err := abi.UnpackIntoInterface(tc.dest, "multi", data) if tc.error == "" { require.Nil(err, "Should be able to unpack method outputs.") require.Equal(tc.expected, tc.dest) @@ -611,7 +377,7 @@ func TestMethodMultiReturn(t *testing.T) { } func TestMultiReturnWithArray(t *testing.T) { - const definition = `[{"name" : "multi", "outputs": [{"type": "uint64[3]"}, {"type": "uint64"}]}]` + const definition = `[{"name" : "multi", "type": "function", "outputs": [{"type": "uint64[3]"}, {"type": "uint64"}]}]` abi, err := JSON(strings.NewReader(definition)) if err != nil { t.Fatal(err) @@ -622,7 +388,7 @@ func TestMultiReturnWithArray(t *testing.T) { ret1, ret1Exp := new([3]uint64), [3]uint64{9, 9, 9} ret2, ret2Exp := new(uint64), uint64(8) - if err := abi.Unpack(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil { + if err := abi.UnpackIntoInterface(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil { t.Fatal(err) } if !reflect.DeepEqual(*ret1, ret1Exp) { @@ -634,7 +400,7 @@ func TestMultiReturnWithArray(t *testing.T) { } func TestMultiReturnWithStringArray(t *testing.T) { - const definition = `[{"name" : "multi", "outputs": [{"name": "","type": "uint256[3]"},{"name": "","type": "address"},{"name": "","type": "string[2]"},{"name": "","type": "bool"}]}]` + const definition = `[{"name" : "multi", "type": "function", "outputs": [{"name": "","type": "uint256[3]"},{"name": "","type": "address"},{"name": "","type": "string[2]"},{"name": "","type": "bool"}]}]` abi, err := JSON(strings.NewReader(definition)) if err != nil { t.Fatal(err) @@ -646,7 +412,7 @@ func TestMultiReturnWithStringArray(t *testing.T) { ret2, ret2Exp := new(common.Address), common.HexToAddress("ab1257528b3782fb40d7ed5f72e624b744dffb2f") ret3, ret3Exp := new([2]string), [2]string{"Ethereum", "Hello, Ethereum!"} ret4, ret4Exp := new(bool), false - if err := abi.Unpack(&[]interface{}{ret1, ret2, ret3, ret4}, "multi", buff.Bytes()); err != nil { + if err := abi.UnpackIntoInterface(&[]interface{}{ret1, ret2, ret3, ret4}, "multi", buff.Bytes()); err != nil { t.Fatal(err) } if !reflect.DeepEqual(*ret1, ret1Exp) { @@ -664,7 +430,7 @@ func TestMultiReturnWithStringArray(t *testing.T) { } func TestMultiReturnWithStringSlice(t *testing.T) { - const definition = `[{"name" : "multi", "outputs": [{"name": "","type": "string[]"},{"name": "","type": "uint256[]"}]}]` + const definition = `[{"name" : "multi", "type": "function", "outputs": [{"name": "","type": "string[]"},{"name": "","type": "uint256[]"}]}]` abi, err := JSON(strings.NewReader(definition)) if err != nil { t.Fatal(err) @@ -684,7 +450,7 @@ func TestMultiReturnWithStringSlice(t *testing.T) { buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000065")) // output[1][1] value ret1, ret1Exp := new([]string), []string{"ethereum", "go-ethereum"} ret2, ret2Exp := new([]*big.Int), []*big.Int{big.NewInt(100), big.NewInt(101)} - if err := abi.Unpack(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil { + if err := abi.UnpackIntoInterface(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil { t.Fatal(err) } if !reflect.DeepEqual(*ret1, ret1Exp) { @@ -700,7 +466,7 @@ func TestMultiReturnWithDeeplyNestedArray(t *testing.T) { // values of nested static arrays count towards the size as well, and any element following // after such nested array argument should be read with the correct offset, // so that it does not read content from the previous array argument. - const definition = `[{"name" : "multi", "outputs": [{"type": "uint64[3][2][4]"}, {"type": "uint64"}]}]` + const definition = `[{"name" : "multi", "type": "function", "outputs": [{"type": "uint64[3][2][4]"}, {"type": "uint64"}]}]` abi, err := JSON(strings.NewReader(definition)) if err != nil { t.Fatal(err) @@ -724,7 +490,7 @@ func TestMultiReturnWithDeeplyNestedArray(t *testing.T) { {{0x411, 0x412, 0x413}, {0x421, 0x422, 0x423}}, } ret2, ret2Exp := new(uint64), uint64(0x9876) - if err := abi.Unpack(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil { + if err := abi.UnpackIntoInterface(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil { t.Fatal(err) } if !reflect.DeepEqual(*ret1, ret1Exp) { @@ -737,15 +503,15 @@ func TestMultiReturnWithDeeplyNestedArray(t *testing.T) { func TestUnmarshal(t *testing.T) { const definition = `[ - { "name" : "int", "constant" : false, "outputs": [ { "type": "uint256" } ] }, - { "name" : "bool", "constant" : false, "outputs": [ { "type": "bool" } ] }, - { "name" : "bytes", "constant" : false, "outputs": [ { "type": "bytes" } ] }, - { "name" : "fixed", "constant" : false, "outputs": [ { "type": "bytes32" } ] }, - { "name" : "multi", "constant" : false, "outputs": [ { "type": "bytes" }, { "type": "bytes" } ] }, - { "name" : "intArraySingle", "constant" : false, "outputs": [ { "type": "uint256[3]" } ] }, - { "name" : "addressSliceSingle", "constant" : false, "outputs": [ { "type": "address[]" } ] }, - { "name" : "addressSliceDouble", "constant" : false, "outputs": [ { "name": "a", "type": "address[]" }, { "name": "b", "type": "address[]" } ] }, - { "name" : "mixedBytes", "constant" : true, "outputs": [ { "name": "a", "type": "bytes" }, { "name": "b", "type": "bytes32" } ] }]` + { "name" : "int", "type": "function", "outputs": [ { "type": "uint256" } ] }, + { "name" : "bool", "type": "function", "outputs": [ { "type": "bool" } ] }, + { "name" : "bytes", "type": "function", "outputs": [ { "type": "bytes" } ] }, + { "name" : "fixed", "type": "function", "outputs": [ { "type": "bytes32" } ] }, + { "name" : "multi", "type": "function", "outputs": [ { "type": "bytes" }, { "type": "bytes" } ] }, + { "name" : "intArraySingle", "type": "function", "outputs": [ { "type": "uint256[3]" } ] }, + { "name" : "addressSliceSingle", "type": "function", "outputs": [ { "type": "address[]" } ] }, + { "name" : "addressSliceDouble", "type": "function", "outputs": [ { "name": "a", "type": "address[]" }, { "name": "b", "type": "address[]" } ] }, + { "name" : "mixedBytes", "type": "function", "stateMutability" : "view", "outputs": [ { "name": "a", "type": "bytes" }, { "name": "b", "type": "bytes32" } ] }]` abi, err := JSON(strings.NewReader(definition)) if err != nil { @@ -763,7 +529,7 @@ func TestUnmarshal(t *testing.T) { buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000a")) buff.Write(common.Hex2Bytes("0102000000000000000000000000000000000000000000000000000000000000")) - err = abi.Unpack(&mixedBytes, "mixedBytes", buff.Bytes()) + err = abi.UnpackIntoInterface(&mixedBytes, "mixedBytes", buff.Bytes()) if err != nil { t.Error(err) } else { @@ -778,7 +544,7 @@ func TestUnmarshal(t *testing.T) { // marshal int var Int *big.Int - err = abi.Unpack(&Int, "int", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) + err = abi.UnpackIntoInterface(&Int, "int", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) if err != nil { t.Error(err) } @@ -789,7 +555,7 @@ func TestUnmarshal(t *testing.T) { // marshal bool var Bool bool - err = abi.Unpack(&Bool, "bool", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) + err = abi.UnpackIntoInterface(&Bool, "bool", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) if err != nil { t.Error(err) } @@ -806,7 +572,7 @@ func TestUnmarshal(t *testing.T) { buff.Write(bytesOut) var Bytes []byte - err = abi.Unpack(&Bytes, "bytes", buff.Bytes()) + err = abi.UnpackIntoInterface(&Bytes, "bytes", buff.Bytes()) if err != nil { t.Error(err) } @@ -822,7 +588,7 @@ func TestUnmarshal(t *testing.T) { bytesOut = common.RightPadBytes([]byte("hello"), 64) buff.Write(bytesOut) - err = abi.Unpack(&Bytes, "bytes", buff.Bytes()) + err = abi.UnpackIntoInterface(&Bytes, "bytes", buff.Bytes()) if err != nil { t.Error(err) } @@ -838,7 +604,7 @@ func TestUnmarshal(t *testing.T) { bytesOut = common.RightPadBytes([]byte("hello"), 64) buff.Write(bytesOut) - err = abi.Unpack(&Bytes, "bytes", buff.Bytes()) + err = abi.UnpackIntoInterface(&Bytes, "bytes", buff.Bytes()) if err != nil { t.Error(err) } @@ -848,7 +614,7 @@ func TestUnmarshal(t *testing.T) { } // marshal dynamic bytes output empty - err = abi.Unpack(&Bytes, "bytes", nil) + err = abi.UnpackIntoInterface(&Bytes, "bytes", nil) if err == nil { t.Error("expected error") } @@ -859,7 +625,7 @@ func TestUnmarshal(t *testing.T) { buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005")) buff.Write(common.RightPadBytes([]byte("hello"), 32)) - err = abi.Unpack(&Bytes, "bytes", buff.Bytes()) + err = abi.UnpackIntoInterface(&Bytes, "bytes", buff.Bytes()) if err != nil { t.Error(err) } @@ -873,7 +639,7 @@ func TestUnmarshal(t *testing.T) { buff.Write(common.RightPadBytes([]byte("hello"), 32)) var hash common.Hash - err = abi.Unpack(&hash, "fixed", buff.Bytes()) + err = abi.UnpackIntoInterface(&hash, "fixed", buff.Bytes()) if err != nil { t.Error(err) } @@ -886,12 +652,12 @@ func TestUnmarshal(t *testing.T) { // marshal error buff.Reset() buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020")) - err = abi.Unpack(&Bytes, "bytes", buff.Bytes()) + err = abi.UnpackIntoInterface(&Bytes, "bytes", buff.Bytes()) if err == nil { t.Error("expected error") } - err = abi.Unpack(&Bytes, "multi", make([]byte, 64)) + err = abi.UnpackIntoInterface(&Bytes, "multi", make([]byte, 64)) if err == nil { t.Error("expected error") } @@ -902,7 +668,7 @@ func TestUnmarshal(t *testing.T) { buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000003")) // marshal int array var intArray [3]*big.Int - err = abi.Unpack(&intArray, "intArraySingle", buff.Bytes()) + err = abi.UnpackIntoInterface(&intArray, "intArraySingle", buff.Bytes()) if err != nil { t.Error(err) } @@ -923,7 +689,7 @@ func TestUnmarshal(t *testing.T) { buff.Write(common.Hex2Bytes("0000000000000000000000000100000000000000000000000000000000000000")) var outAddr []common.Address - err = abi.Unpack(&outAddr, "addressSliceSingle", buff.Bytes()) + err = abi.UnpackIntoInterface(&outAddr, "addressSliceSingle", buff.Bytes()) if err != nil { t.Fatal("didn't expect error:", err) } @@ -950,7 +716,7 @@ func TestUnmarshal(t *testing.T) { A []common.Address B []common.Address } - err = abi.Unpack(&outAddrStruct, "addressSliceDouble", buff.Bytes()) + err = abi.UnpackIntoInterface(&outAddrStruct, "addressSliceDouble", buff.Bytes()) if err != nil { t.Fatal("didn't expect error:", err) } @@ -978,14 +744,14 @@ func TestUnmarshal(t *testing.T) { buff.Reset() buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000100")) - err = abi.Unpack(&outAddr, "addressSliceSingle", buff.Bytes()) + err = abi.UnpackIntoInterface(&outAddr, "addressSliceSingle", buff.Bytes()) if err == nil { t.Fatal("expected error:", err) } } func TestUnpackTuple(t *testing.T) { - const simpleTuple = `[{"name":"tuple","constant":false,"outputs":[{"type":"tuple","name":"ret","components":[{"type":"int256","name":"a"},{"type":"int256","name":"b"}]}]}]` + const simpleTuple = `[{"name":"tuple","type":"function","outputs":[{"type":"tuple","name":"ret","components":[{"type":"int256","name":"a"},{"type":"int256","name":"b"}]}]}]` abi, err := JSON(strings.NewReader(simpleTuple)) if err != nil { t.Fatal(err) @@ -996,25 +762,29 @@ func TestUnpackTuple(t *testing.T) { buff.Write(common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) // ret[b] = -1 // If the result is single tuple, use struct as return value container directly. - v := struct { + type v struct { A *big.Int B *big.Int - }{new(big.Int), new(big.Int)} + } + type r struct { + Result v + } + var ret0 = new(r) + err = abi.UnpackIntoInterface(ret0, "tuple", buff.Bytes()) - err = abi.Unpack(&v, "tuple", buff.Bytes()) if err != nil { t.Error(err) } else { - if v.A.Cmp(big.NewInt(1)) != 0 { - t.Errorf("unexpected value unpacked: want %x, got %x", 1, v.A) + if ret0.Result.A.Cmp(big.NewInt(1)) != 0 { + t.Errorf("unexpected value unpacked: want %x, got %x", 1, ret0.Result.A) } - if v.B.Cmp(big.NewInt(-1)) != 0 { - t.Errorf("unexpected value unpacked: want %x, got %x", v.B, -1) + if ret0.Result.B.Cmp(big.NewInt(-1)) != 0 { + t.Errorf("unexpected value unpacked: want %x, got %x", -1, ret0.Result.B) } } // Test nested tuple - const nestedTuple = `[{"name":"tuple","constant":false,"outputs":[ + const nestedTuple = `[{"name":"tuple","type":"function","outputs":[ {"type":"tuple","name":"s","components":[{"type":"uint256","name":"a"},{"type":"uint256[]","name":"b"},{"type":"tuple[]","name":"c","components":[{"name":"x", "type":"uint256"},{"name":"y","type":"uint256"}]}]}, {"type":"tuple","name":"t","components":[{"name":"x", "type":"uint256"},{"name":"y","type":"uint256"}]}, {"type":"uint256","name":"a"} @@ -1073,7 +843,7 @@ func TestUnpackTuple(t *testing.T) { A: big.NewInt(1), } - err = abi.Unpack(&ret, "tuple", buff.Bytes()) + err = abi.UnpackIntoInterface(&ret, "tuple", buff.Bytes()) if err != nil { t.Error(err) } @@ -1136,7 +906,7 @@ func TestOOMMaliciousInput(t *testing.T) { }, } for i, test := range oomTests { - def := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def) + def := fmt.Sprintf(`[{ "name" : "method", "type": "function", "outputs": %s}]`, test.def) abi, err := JSON(strings.NewReader(def)) if err != nil { t.Fatalf("invalid ABI definition %s: %v", def, err) diff --git a/accounts/accounts.go b/accounts/accounts.go index bf5190ad98f7..179a33c59fd3 100644 --- a/accounts/accounts.go +++ b/accounts/accounts.go @@ -21,7 +21,7 @@ import ( "fmt" "math/big" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" @@ -46,7 +46,7 @@ const ( // accounts (derived from the same seed). type Wallet interface { // URL retrieves the canonical path under which this wallet is reachable. It is - // user by upper layers to define a sorting order over all wallets from multiple + // used by upper layers to define a sorting order over all wallets from multiple // backends. URL() URL @@ -88,8 +88,8 @@ type Wallet interface { // to discover non zero accounts and automatically add them to list of tracked // accounts. // - // Note, self derivaton will increment the last component of the specified path - // opposed to decending into a child path to allow discovering accounts starting + // Note, self derivation will increment the last component of the specified path + // opposed to descending into a child path to allow discovering accounts starting // from non zero components. // // Some hardware wallets switched derivation paths through their evolution, so @@ -105,7 +105,7 @@ type Wallet interface { // or optionally with the aid of any location metadata from the embedded URL field. // // If the wallet requires additional authentication to sign the request (e.g. - // a password to decrypt the account, or a PIN code o verify the transaction), + // a password to decrypt the account, or a PIN code to verify the transaction), // an AuthNeededError instance will be returned, containing infos for the user // about which fields or actions are needed. The user may retry by providing // the needed details via SignDataWithPassphrase, or by other means (e.g. unlock @@ -113,7 +113,7 @@ type Wallet interface { SignData(account Account, mimeType string, data []byte) ([]byte, error) // SignDataWithPassphrase is identical to SignData, but also takes a password - // NOTE: there's an chance that an erroneous call might mistake the two strings, and + // NOTE: there's a chance that an erroneous call might mistake the two strings, and // supply password in the mimetype field, or vice versa. Thus, an implementation // should never echo the mimetype or return the mimetype in the error-response SignDataWithPassphrase(account Account, passphrase, mimeType string, data []byte) ([]byte, error) @@ -124,11 +124,13 @@ type Wallet interface { // or optionally with the aid of any location metadata from the embedded URL field. // // If the wallet requires additional authentication to sign the request (e.g. - // a password to decrypt the account, or a PIN code o verify the transaction), + // a password to decrypt the account, or a PIN code to verify the transaction), // an AuthNeededError instance will be returned, containing infos for the user // about which fields or actions are needed. The user may retry by providing - // the needed details via SignHashWithPassphrase, or by other means (e.g. unlock + // the needed details via SignTextWithPassphrase, or by other means (e.g. unlock // the account in a keystore). + // + // This method should return the signature in 'canonical' format, with v 0 or 1. SignText(account Account, text []byte) ([]byte, error) // SignTextWithPassphrase is identical to Signtext, but also takes a password @@ -174,7 +176,7 @@ type Backend interface { // TextHash is a helper function that calculates a hash for the given message that can be // safely used to calculate a signature from. // -// The hash is calulcated as +// The hash is calculated as // keccak256("\x19Ethereum Signed Message:\n"${message length}${message}). // // This gives context to the signed message and prevents signing of transactions. @@ -186,7 +188,7 @@ func TextHash(data []byte) []byte { // TextAndHash is a helper function that calculates a hash for the given message that can be // safely used to calculate a signature from. // -// The hash is calulcated as +// The hash is calculated as // keccak256("\x19Ethereum Signed Message:\n"${message length}${message}). // // This gives context to the signed message and prevents signing of transactions. diff --git a/accounts/errors.go b/accounts/errors.go index 2fed35f9d074..727e5329befa 100644 --- a/accounts/errors.go +++ b/accounts/errors.go @@ -42,7 +42,7 @@ var ErrInvalidPassphrase = errors.New("invalid password") var ErrWalletAlreadyOpen = errors.New("wallet already open") // ErrWalletClosed is returned if a wallet is attempted to be opened the -// secodn time. +// second time. var ErrWalletClosed = errors.New("wallet closed") // AuthNeededError is returned by backends for signing requests where the user diff --git a/accounts/external/backend.go b/accounts/external/backend.go index 6089ca984446..e3f754eafcc4 100644 --- a/accounts/external/backend.go +++ b/accounts/external/backend.go @@ -27,10 +27,9 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" - "github.com/ethereum/go-ethereum/signer/core" + "github.com/ethereum/go-ethereum/signer/core/apitypes" ) type ExternalBackend struct { @@ -131,6 +130,12 @@ func (api *ExternalSigner) Accounts() []accounts.Account { func (api *ExternalSigner) Contains(account accounts.Account) bool { api.cacheMu.RLock() defer api.cacheMu.RUnlock() + if api.cache == nil { + // If we haven't already fetched the accounts, it's time to do so now + api.cacheMu.RUnlock() + api.Accounts() + api.cacheMu.RLock() + } for _, a := range api.cache { if a.Address == account.Address && (account.URL == (accounts.URL{}) || account.URL == api.URL()) { return true @@ -161,7 +166,7 @@ func (api *ExternalSigner) SignData(account accounts.Account, mimeType string, d hexutil.Encode(data)); err != nil { return nil, err } - // If V is on 27/28-form, convert to to 0/1 for Clique + // If V is on 27/28-form, convert to 0/1 for Clique if mimeType == accounts.MimetypeClique && (res[64] == 27 || res[64] == 28) { res[64] -= 27 // Transform V from 27/28 to 0/1 for Clique use } @@ -169,34 +174,71 @@ func (api *ExternalSigner) SignData(account accounts.Account, mimeType string, d } func (api *ExternalSigner) SignText(account accounts.Account, text []byte) ([]byte, error) { - var res hexutil.Bytes + var signature hexutil.Bytes var signAddress = common.NewMixedcaseAddress(account.Address) - if err := api.client.Call(&res, "account_signData", + if err := api.client.Call(&signature, "account_signData", accounts.MimetypeTextPlain, &signAddress, // Need to use the pointer here, because of how MarshalJSON is defined hexutil.Encode(text)); err != nil { return nil, err } - return res, nil + if signature[64] == 27 || signature[64] == 28 { + // If clef is used as a backend, it may already have transformed + // the signature to ethereum-type signature. + signature[64] -= 27 // Transform V from Ethereum-legacy to 0/1 + } + return signature, nil } +// signTransactionResult represents the signinig result returned by clef. +type signTransactionResult struct { + Raw hexutil.Bytes `json:"raw"` + Tx *types.Transaction `json:"tx"` +} + +// SignTx sends the transaction to the external signer. +// If chainID is nil, or tx.ChainID is zero, the chain ID will be assigned +// by the external signer. For non-legacy transactions, the chain ID of the +// transaction overrides the chainID parameter. func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) { - res := ethapi.SignTransactionResult{} data := hexutil.Bytes(tx.Data()) var to *common.MixedcaseAddress if tx.To() != nil { t := common.NewMixedcaseAddress(*tx.To()) to = &t } - args := &core.SendTxArgs{ - Data: &data, - Nonce: hexutil.Uint64(tx.Nonce()), - Value: hexutil.Big(*tx.Value()), - Gas: hexutil.Uint64(tx.Gas()), - GasPrice: hexutil.Big(*tx.GasPrice()), - To: to, - From: common.NewMixedcaseAddress(account.Address), + args := &apitypes.SendTxArgs{ + Data: &data, + Nonce: hexutil.Uint64(tx.Nonce()), + Value: hexutil.Big(*tx.Value()), + Gas: hexutil.Uint64(tx.Gas()), + To: to, + From: common.NewMixedcaseAddress(account.Address), + } + switch tx.Type() { + case types.LegacyTxType, types.AccessListTxType: + args.GasPrice = (*hexutil.Big)(tx.GasPrice()) + case types.DynamicFeeTxType: + args.MaxFeePerGas = (*hexutil.Big)(tx.GasFeeCap()) + args.MaxPriorityFeePerGas = (*hexutil.Big)(tx.GasTipCap()) + default: + return nil, fmt.Errorf("unsupported tx type %d", tx.Type()) + } + // We should request the default chain id that we're operating with + // (the chain we're executing on) + if chainID != nil && chainID.Sign() != 0 { + args.ChainID = (*hexutil.Big)(chainID) + } + if tx.Type() != types.LegacyTxType { + // However, if the user asked for a particular chain id, then we should + // use that instead. + if tx.ChainId().Sign() != 0 { + args.ChainID = (*hexutil.Big)(tx.ChainId()) + } + accessList := tx.AccessList() + args.AccessList = &accessList } + var res signTransactionResult if err := api.client.Call(&res, "account_signTransaction", args); err != nil { return nil, err } diff --git a/accounts/hd.go b/accounts/hd.go index 75c47611061c..54acea3b261d 100644 --- a/accounts/hd.go +++ b/accounts/hd.go @@ -150,3 +150,31 @@ func (path *DerivationPath) UnmarshalJSON(b []byte) error { *path, err = ParseDerivationPath(dp) return err } + +// DefaultIterator creates a BIP-32 path iterator, which progresses by increasing the last component: +// i.e. m/44'/60'/0'/0/0, m/44'/60'/0'/0/1, m/44'/60'/0'/0/2, ... m/44'/60'/0'/0/N. +func DefaultIterator(base DerivationPath) func() DerivationPath { + path := make(DerivationPath, len(base)) + copy(path[:], base[:]) + // Set it back by one, so the first call gives the first result + path[len(path)-1]-- + return func() DerivationPath { + path[len(path)-1]++ + return path + } +} + +// LedgerLiveIterator creates a bip44 path iterator for Ledger Live. +// Ledger Live increments the third component rather than the fifth component +// i.e. m/44'/60'/0'/0/0, m/44'/60'/1'/0/0, m/44'/60'/2'/0/0, ... m/44'/60'/N'/0/0. +func LedgerLiveIterator(base DerivationPath) func() DerivationPath { + path := make(DerivationPath, len(base)) + copy(path[:], base[:]) + // Set it back by one, so the first call gives the first result + path[2]-- + return func() DerivationPath { + // ledgerLivePathIterator iterates on the third component + path[2]++ + return path + } +} diff --git a/accounts/hd_test.go b/accounts/hd_test.go index b6b23230dc74..0743bbe66628 100644 --- a/accounts/hd_test.go +++ b/accounts/hd_test.go @@ -17,6 +17,7 @@ package accounts import ( + "fmt" "reflect" "testing" ) @@ -61,7 +62,7 @@ func TestHDPathParsing(t *testing.T) { // Weird inputs just to ensure they work {" m / 44 '\n/\n 60 \n\n\t' /\n0 ' /\t\t 0", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}}, - // Invaid derivation paths + // Invalid derivation paths {"", nil}, // Empty relative derivation path {"m", nil}, // Empty absolute derivation path {"m/", nil}, // Missing last derivation component @@ -77,3 +78,41 @@ func TestHDPathParsing(t *testing.T) { } } } + +func testDerive(t *testing.T, next func() DerivationPath, expected []string) { + t.Helper() + for i, want := range expected { + if have := next(); fmt.Sprintf("%v", have) != want { + t.Errorf("step %d, have %v, want %v", i, have, want) + } + } +} + +func TestHdPathIteration(t *testing.T) { + testDerive(t, DefaultIterator(DefaultBaseDerivationPath), + []string{ + "m/44'/60'/0'/0/0", "m/44'/60'/0'/0/1", + "m/44'/60'/0'/0/2", "m/44'/60'/0'/0/3", + "m/44'/60'/0'/0/4", "m/44'/60'/0'/0/5", + "m/44'/60'/0'/0/6", "m/44'/60'/0'/0/7", + "m/44'/60'/0'/0/8", "m/44'/60'/0'/0/9", + }) + + testDerive(t, DefaultIterator(LegacyLedgerBaseDerivationPath), + []string{ + "m/44'/60'/0'/0", "m/44'/60'/0'/1", + "m/44'/60'/0'/2", "m/44'/60'/0'/3", + "m/44'/60'/0'/4", "m/44'/60'/0'/5", + "m/44'/60'/0'/6", "m/44'/60'/0'/7", + "m/44'/60'/0'/8", "m/44'/60'/0'/9", + }) + + testDerive(t, LedgerLiveIterator(DefaultBaseDerivationPath), + []string{ + "m/44'/60'/0'/0/0", "m/44'/60'/1'/0/0", + "m/44'/60'/2'/0/0", "m/44'/60'/3'/0/0", + "m/44'/60'/4'/0/0", "m/44'/60'/5'/0/0", + "m/44'/60'/6'/0/0", "m/44'/60'/7'/0/0", + "m/44'/60'/8'/0/0", "m/44'/60'/9'/0/0", + }) +} diff --git a/accounts/keystore/account_cache.go b/accounts/keystore/account_cache.go index 8f660e282f57..a3ec6e9c5606 100644 --- a/accounts/keystore/account_cache.go +++ b/accounts/keystore/account_cache.go @@ -262,7 +262,7 @@ func (ac *accountCache) scanAccounts() error { switch { case err != nil: log.Debug("Failed to decode keystore key", "path", path, "err", err) - case (addr == common.Address{}): + case addr == common.Address{}: log.Debug("Failed to decode keystore key", "path", path, "err", "missing or zero address") default: return &accounts.Account{ diff --git a/accounts/keystore/account_cache_test.go b/accounts/keystore/account_cache_test.go index fe9233c046e7..a847545bc8e1 100644 --- a/accounts/keystore/account_cache_test.go +++ b/accounts/keystore/account_cache_test.go @@ -96,7 +96,7 @@ func TestWatchNoDir(t *testing.T) { // Create ks but not the directory that it watches. rand.Seed(time.Now().UnixNano()) - dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watch-test-%d-%d", os.Getpid(), rand.Int())) + dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watchnodir-test-%d-%d", os.Getpid(), rand.Int())) ks := NewKeyStore(dir, LightScryptN, LightScryptP) list := ks.Accounts() @@ -322,7 +322,7 @@ func TestUpdatedKeyfileContents(t *testing.T) { // Create a temporary kesytore to test with rand.Seed(time.Now().UnixNano()) - dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watch-test-%d-%d", os.Getpid(), rand.Int())) + dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-updatedkeyfilecontents-test-%d-%d", os.Getpid(), rand.Int())) ks := NewKeyStore(dir, LightScryptN, LightScryptP) list := ks.Accounts() diff --git a/accounts/keystore/file_cache.go b/accounts/keystore/file_cache.go index 73ff6ae9ee6f..8b309321d370 100644 --- a/accounts/keystore/file_cache.go +++ b/accounts/keystore/file_cache.go @@ -32,7 +32,7 @@ import ( type fileCache struct { all mapset.Set // Set of all files from the keystore folder lastMod time.Time // Last time instance when a file was modified - mu sync.RWMutex + mu sync.Mutex } // scan performs a new scan on the given directory, compares against the already diff --git a/accounts/keystore/key.go b/accounts/keystore/key.go index 38046e39e07b..54e5c0564261 100644 --- a/accounts/keystore/key.go +++ b/accounts/keystore/key.go @@ -32,7 +32,7 @@ import ( "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/pborman/uuid" + "github.com/google/uuid" ) const ( @@ -110,7 +110,10 @@ func (k *Key) UnmarshalJSON(j []byte) (err error) { } u := new(uuid.UUID) - *u = uuid.Parse(keyJSON.Id) + *u, err = uuid.Parse(keyJSON.Id) + if err != nil { + return err + } k.Id = *u addr, err := hex.DecodeString(keyJSON.Address) if err != nil { @@ -128,7 +131,10 @@ func (k *Key) UnmarshalJSON(j []byte) (err error) { } func newKeyFromECDSA(privateKeyECDSA *ecdsa.PrivateKey) *Key { - id := uuid.NewRandom() + id, err := uuid.NewRandom() + if err != nil { + panic(fmt.Sprintf("Could not create random uuid: %v", err)) + } key := &Key{ Id: id, Address: crypto.PubkeyToAddress(privateKeyECDSA.PublicKey), diff --git a/accounts/keystore/keystore.go b/accounts/keystore/keystore.go index 5b55175b1f3e..88dcfbeb69e0 100644 --- a/accounts/keystore/keystore.go +++ b/accounts/keystore/keystore.go @@ -24,7 +24,6 @@ import ( "crypto/ecdsa" crand "crypto/rand" "errors" - "fmt" "math/big" "os" "path/filepath" @@ -44,6 +43,10 @@ var ( ErrLocked = accounts.NewAuthNeededError("password or unlock") ErrNoMatch = errors.New("no key for given address or file") ErrDecrypt = errors.New("could not decrypt key with given password") + + // ErrAccountAlreadyExists is returned if an account attempted to import is + // already present in the keystore. + ErrAccountAlreadyExists = errors.New("account already exists") ) // KeyStoreType is the reflect type of a keystore backend. @@ -67,7 +70,8 @@ type KeyStore struct { updateScope event.SubscriptionScope // Subscription scope tracking current live listeners updating bool // Whether the event notification loop is running - mu sync.RWMutex + mu sync.RWMutex + importMu sync.Mutex // Import Mutex locks the import to prevent two insertions from racing } type unlocked struct { @@ -279,11 +283,9 @@ func (ks *KeyStore) SignTx(a accounts.Account, tx *types.Transaction, chainID *b if !found { return nil, ErrLocked } - // Depending on the presence of the chain ID, sign with EIP155 or homestead - if chainID != nil { - return types.SignTx(tx, types.NewEIP155Signer(chainID), unlockedKey.PrivateKey) - } - return types.SignTx(tx, types.HomesteadSigner{}, unlockedKey.PrivateKey) + // Depending on the presence of the chain ID, sign with 2718 or homestead + signer := types.LatestSignerForChainID(chainID) + return types.SignTx(tx, signer, unlockedKey.PrivateKey) } // SignHashWithPassphrase signs hash if the private key matching the given address @@ -306,12 +308,9 @@ func (ks *KeyStore) SignTxWithPassphrase(a accounts.Account, passphrase string, return nil, err } defer zeroKey(key.PrivateKey) - - // Depending on the presence of the chain ID, sign with EIP155 or homestead - if chainID != nil { - return types.SignTx(tx, types.NewEIP155Signer(chainID), key.PrivateKey) - } - return types.SignTx(tx, types.HomesteadSigner{}, key.PrivateKey) + // Depending on the presence of the chain ID, sign with or without replay protection. + signer := types.LatestSignerForChainID(chainID) + return types.SignTx(tx, signer, key.PrivateKey) } // Unlock unlocks the given account indefinitely. @@ -443,14 +442,27 @@ func (ks *KeyStore) Import(keyJSON []byte, passphrase, newPassphrase string) (ac if err != nil { return accounts.Account{}, err } + ks.importMu.Lock() + defer ks.importMu.Unlock() + + if ks.cache.hasAddress(key.Address) { + return accounts.Account{ + Address: key.Address, + }, ErrAccountAlreadyExists + } return ks.importKey(key, newPassphrase) } // ImportECDSA stores the given key into the key directory, encrypting it with the passphrase. func (ks *KeyStore) ImportECDSA(priv *ecdsa.PrivateKey, passphrase string) (accounts.Account, error) { + ks.importMu.Lock() + defer ks.importMu.Unlock() + key := newKeyFromECDSA(priv) if ks.cache.hasAddress(key.Address) { - return accounts.Account{}, fmt.Errorf("account already exists") + return accounts.Account{ + Address: key.Address, + }, ErrAccountAlreadyExists } return ks.importKey(key, passphrase) } diff --git a/accounts/keystore/keystore_test.go b/accounts/keystore/keystore_test.go index a691c5062706..cb5de11c0ddb 100644 --- a/accounts/keystore/keystore_test.go +++ b/accounts/keystore/keystore_test.go @@ -23,11 +23,14 @@ import ( "runtime" "sort" "strings" + "sync" + "sync/atomic" "testing" "time" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/event" ) @@ -333,11 +336,95 @@ func TestWalletNotifications(t *testing.T) { // Shut down the event collector and check events. sub.Unsubscribe() - <-updates + for ev := range updates { + events = append(events, walletEvent{ev, ev.Wallet.Accounts()[0]}) + } checkAccounts(t, live, ks.Wallets()) checkEvents(t, wantEvents, events) } +// TestImportExport tests the import functionality of a keystore. +func TestImportECDSA(t *testing.T) { + dir, ks := tmpKeyStore(t, true) + defer os.RemoveAll(dir) + key, err := crypto.GenerateKey() + if err != nil { + t.Fatalf("failed to generate key: %v", key) + } + if _, err = ks.ImportECDSA(key, "old"); err != nil { + t.Errorf("importing failed: %v", err) + } + if _, err = ks.ImportECDSA(key, "old"); err == nil { + t.Errorf("importing same key twice succeeded") + } + if _, err = ks.ImportECDSA(key, "new"); err == nil { + t.Errorf("importing same key twice succeeded") + } +} + +// TestImportECDSA tests the import and export functionality of a keystore. +func TestImportExport(t *testing.T) { + dir, ks := tmpKeyStore(t, true) + defer os.RemoveAll(dir) + acc, err := ks.NewAccount("old") + if err != nil { + t.Fatalf("failed to create account: %v", acc) + } + json, err := ks.Export(acc, "old", "new") + if err != nil { + t.Fatalf("failed to export account: %v", acc) + } + dir2, ks2 := tmpKeyStore(t, true) + defer os.RemoveAll(dir2) + if _, err = ks2.Import(json, "old", "old"); err == nil { + t.Errorf("importing with invalid password succeeded") + } + acc2, err := ks2.Import(json, "new", "new") + if err != nil { + t.Errorf("importing failed: %v", err) + } + if acc.Address != acc2.Address { + t.Error("imported account does not match exported account") + } + if _, err = ks2.Import(json, "new", "new"); err == nil { + t.Errorf("importing a key twice succeeded") + } + +} + +// TestImportRace tests the keystore on races. +// This test should fail under -race if importing races. +func TestImportRace(t *testing.T) { + dir, ks := tmpKeyStore(t, true) + defer os.RemoveAll(dir) + acc, err := ks.NewAccount("old") + if err != nil { + t.Fatalf("failed to create account: %v", acc) + } + json, err := ks.Export(acc, "old", "new") + if err != nil { + t.Fatalf("failed to export account: %v", acc) + } + dir2, ks2 := tmpKeyStore(t, true) + defer os.RemoveAll(dir2) + var atom uint32 + var wg sync.WaitGroup + wg.Add(2) + for i := 0; i < 2; i++ { + go func() { + defer wg.Done() + if _, err := ks2.Import(json, "new", "new"); err != nil { + atomic.AddUint32(&atom, 1) + } + + }() + } + wg.Wait() + if atom != 1 { + t.Errorf("Import is racy") + } +} + // checkAccounts checks that all known live accounts are present in the wallet list. func checkAccounts(t *testing.T, live map[common.Address]accounts.Account, wallets []accounts.Wallet) { if len(live) != len(wallets) { diff --git a/accounts/keystore/passphrase.go b/accounts/keystore/passphrase.go index 89cdf0bfca14..3b3e63188840 100644 --- a/accounts/keystore/passphrase.go +++ b/accounts/keystore/passphrase.go @@ -42,7 +42,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" - "github.com/pborman/uuid" + "github.com/google/uuid" "golang.org/x/crypto/pbkdf2" "golang.org/x/crypto/scrypt" ) @@ -228,9 +228,12 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) { return nil, err } key := crypto.ToECDSAUnsafe(keyBytes) - + id, err := uuid.FromBytes(keyId) + if err != nil { + return nil, err + } return &Key{ - Id: uuid.UUID(keyId), + Id: id, Address: crypto.PubkeyToAddress(key.PublicKey), PrivateKey: key, }, nil @@ -276,7 +279,11 @@ func decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byt if keyProtected.Version != version { return nil, nil, fmt.Errorf("version not supported: %v", keyProtected.Version) } - keyId = uuid.Parse(keyProtected.Id) + keyUUID, err := uuid.Parse(keyProtected.Id) + if err != nil { + return nil, nil, err + } + keyId = keyUUID[:] plainText, err := DecryptDataV3(keyProtected.Crypto, auth) if err != nil { return nil, nil, err @@ -285,7 +292,11 @@ func decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byt } func decryptKeyV1(keyProtected *encryptedKeyJSONV1, auth string) (keyBytes []byte, keyId []byte, err error) { - keyId = uuid.Parse(keyProtected.Id) + keyUUID, err := uuid.Parse(keyProtected.Id) + if err != nil { + return nil, nil, err + } + keyId = keyUUID[:] mac, err := hex.DecodeString(keyProtected.Crypto.MAC) if err != nil { return nil, nil, err diff --git a/accounts/keystore/presale.go b/accounts/keystore/presale.go index 03055245f5e7..0664dc2cdd05 100644 --- a/accounts/keystore/presale.go +++ b/accounts/keystore/presale.go @@ -27,7 +27,7 @@ import ( "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/crypto" - "github.com/pborman/uuid" + "github.com/google/uuid" "golang.org/x/crypto/pbkdf2" ) @@ -37,7 +37,10 @@ func importPreSaleKey(keyStore keyStore, keyJSON []byte, password string) (accou if err != nil { return accounts.Account{}, nil, err } - key.Id = uuid.NewRandom() + key.Id, err = uuid.NewRandom() + if err != nil { + return accounts.Account{}, nil, err + } a := accounts.Account{ Address: key.Address, URL: accounts.URL{ @@ -86,7 +89,7 @@ func decryptPreSaleKey(fileContent []byte, password string) (key *Key, err error ecKey := crypto.ToECDSAUnsafe(ethPriv) key = &Key{ - Id: nil, + Id: uuid.UUID{}, Address: crypto.PubkeyToAddress(ecKey.PublicKey), PrivateKey: ecKey, } diff --git a/accounts/keystore/wallet.go b/accounts/keystore/wallet.go index 498067d49730..1066095f6d07 100644 --- a/accounts/keystore/wallet.go +++ b/accounts/keystore/wallet.go @@ -19,7 +19,7 @@ package keystore import ( "math/big" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" @@ -58,7 +58,7 @@ func (w *keystoreWallet) Open(passphrase string) error { return nil } func (w *keystoreWallet) Close() error { return nil } // Accounts implements accounts.Wallet, returning an account list consisting of -// a single account that the plain kestore wallet contains. +// a single account that the plain keystore wallet contains. func (w *keystoreWallet) Accounts() []accounts.Account { return []accounts.Account{w.account} } @@ -93,12 +93,12 @@ func (w *keystoreWallet) signHash(account accounts.Account, hash []byte) ([]byte return w.keystore.SignHash(account, hash) } -// SignData signs keccak256(data). The mimetype parameter describes the type of data being signed +// SignData signs keccak256(data). The mimetype parameter describes the type of data being signed. func (w *keystoreWallet) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) { return w.signHash(account, crypto.Keccak256(data)) } -// SignDataWithPassphrase signs keccak256(data). The mimetype parameter describes the type of data being signed +// SignDataWithPassphrase signs keccak256(data). The mimetype parameter describes the type of data being signed. func (w *keystoreWallet) SignDataWithPassphrase(account accounts.Account, passphrase, mimeType string, data []byte) ([]byte, error) { // Make sure the requested account is contained within if !w.Contains(account) { @@ -108,12 +108,14 @@ func (w *keystoreWallet) SignDataWithPassphrase(account accounts.Account, passph return w.keystore.SignHashWithPassphrase(account, passphrase, crypto.Keccak256(data)) } +// SignText implements accounts.Wallet, attempting to sign the hash of +// the given text with the given account. func (w *keystoreWallet) SignText(account accounts.Account, text []byte) ([]byte, error) { return w.signHash(account, accounts.TextHash(text)) } // SignTextWithPassphrase implements accounts.Wallet, attempting to sign the -// given hash with the given account using passphrase as extra authentication. +// hash of the given text with the given account using passphrase as extra authentication. func (w *keystoreWallet) SignTextWithPassphrase(account accounts.Account, passphrase string, text []byte) ([]byte, error) { // Make sure the requested account is contained within if !w.Contains(account) { diff --git a/accounts/keystore/watch.go b/accounts/keystore/watch.go index d6ef53327d43..ad176040d68c 100644 --- a/accounts/keystore/watch.go +++ b/accounts/keystore/watch.go @@ -14,6 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . +//go:build (darwin && !ios && cgo) || freebsd || (linux && !arm64) || netbsd || solaris // +build darwin,!ios,cgo freebsd linux,!arm64 netbsd solaris package keystore diff --git a/accounts/keystore/watch_fallback.go b/accounts/keystore/watch_fallback.go index de0e87f8a5a7..e40eca42fe75 100644 --- a/accounts/keystore/watch_fallback.go +++ b/accounts/keystore/watch_fallback.go @@ -14,6 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . +//go:build (darwin && !cgo) || ios || (linux && arm64) || windows || (!darwin && !freebsd && !linux && !netbsd && !solaris) // +build darwin,!cgo ios linux,arm64 windows !darwin,!freebsd,!linux,!netbsd,!solaris // This is the fallback implementation of directory watching. diff --git a/accounts/manager.go b/accounts/manager.go index 731d12ea30a9..1e111d19487b 100644 --- a/accounts/manager.go +++ b/accounts/manager.go @@ -25,6 +25,10 @@ import ( "github.com/ethereum/go-ethereum/event" ) +// managerSubBufferSize determines how many incoming wallet events +// the manager will buffer in its channel. +const managerSubBufferSize = 50 + // Config contains the settings of the global account manager. // // TODO(rjl493456442, karalabe, holiman): Get rid of this when account management @@ -33,18 +37,27 @@ type Config struct { InsecureUnlockAllowed bool // Whether account unlocking in insecure environment is allowed } +// newBackendEvent lets the manager know it should +// track the given backend for wallet updates. +type newBackendEvent struct { + backend Backend + processed chan struct{} // Informs event emitter that backend has been integrated +} + // Manager is an overarching account manager that can communicate with various // backends for signing transactions. type Manager struct { - config *Config // Global account manager configurations - backends map[reflect.Type][]Backend // Index of backends currently registered - updaters []event.Subscription // Wallet update subscriptions for all backends - updates chan WalletEvent // Subscription sink for backend wallet changes - wallets []Wallet // Cache of all wallets from all registered backends + config *Config // Global account manager configurations + backends map[reflect.Type][]Backend // Index of backends currently registered + updaters []event.Subscription // Wallet update subscriptions for all backends + updates chan WalletEvent // Subscription sink for backend wallet changes + newBackends chan newBackendEvent // Incoming backends to be tracked by the manager + wallets []Wallet // Cache of all wallets from all registered backends feed event.Feed // Wallet feed notifying of arrivals/departures quit chan chan error + term chan struct{} // Channel is closed upon termination of the update loop lock sync.RWMutex } @@ -57,7 +70,7 @@ func NewManager(config *Config, backends ...Backend) *Manager { wallets = merge(wallets, backend.Wallets()...) } // Subscribe to wallet notifications from all backends - updates := make(chan WalletEvent, 4*len(backends)) + updates := make(chan WalletEvent, managerSubBufferSize) subs := make([]event.Subscription, len(backends)) for i, backend := range backends { @@ -65,12 +78,14 @@ func NewManager(config *Config, backends ...Backend) *Manager { } // Assemble the account manager and return am := &Manager{ - config: config, - backends: make(map[reflect.Type][]Backend), - updaters: subs, - updates: updates, - wallets: wallets, - quit: make(chan chan error), + config: config, + backends: make(map[reflect.Type][]Backend), + updaters: subs, + updates: updates, + newBackends: make(chan newBackendEvent), + wallets: wallets, + quit: make(chan chan error), + term: make(chan struct{}), } for _, backend := range backends { kind := reflect.TypeOf(backend) @@ -93,6 +108,14 @@ func (am *Manager) Config() *Config { return am.config } +// AddBackend starts the tracking of an additional backend for wallet updates. +// cmd/geth assumes once this func returns the backends have been already integrated. +func (am *Manager) AddBackend(backend Backend) { + done := make(chan struct{}) + am.newBackends <- newBackendEvent{backend, done} + <-done +} + // update is the wallet event loop listening for notifications from the backends // and updating the cache of wallets. func (am *Manager) update() { @@ -122,10 +145,22 @@ func (am *Manager) update() { // Notify any listeners of the event am.feed.Send(event) - + case event := <-am.newBackends: + am.lock.Lock() + // Update caches + backend := event.backend + am.wallets = merge(am.wallets, backend.Wallets()...) + am.updaters = append(am.updaters, backend.Subscribe(am.updates)) + kind := reflect.TypeOf(backend) + am.backends[kind] = append(am.backends[kind], backend) + am.lock.Unlock() + close(event.processed) case errc := <-am.quit: // Manager terminating, return errc <- nil + // Signals event emitters the loop is not receiving values + // to prevent them from getting stuck. + close(am.term) return } } @@ -133,6 +168,9 @@ func (am *Manager) update() { // Backends retrieves the backend(s) with the given type from the account manager. func (am *Manager) Backends(kind reflect.Type) []Backend { + am.lock.RLock() + defer am.lock.RUnlock() + return am.backends[kind] } @@ -141,6 +179,11 @@ func (am *Manager) Wallets() []Wallet { am.lock.RLock() defer am.lock.RUnlock() + return am.walletsNoLock() +} + +// walletsNoLock returns all registered wallets. Callers must hold am.lock. +func (am *Manager) walletsNoLock() []Wallet { cpy := make([]Wallet, len(am.wallets)) copy(cpy, am.wallets) return cpy @@ -155,7 +198,7 @@ func (am *Manager) Wallet(url string) (Wallet, error) { if err != nil { return nil, err } - for _, wallet := range am.Wallets() { + for _, wallet := range am.walletsNoLock() { if wallet.URL() == parsed { return wallet, nil } diff --git a/accounts/scwallet/README.md b/accounts/scwallet/README.md index cfca916b3ae8..4313d9c6b2f8 100644 --- a/accounts/scwallet/README.md +++ b/accounts/scwallet/README.md @@ -31,12 +31,16 @@ Write down the URL (`keycard://044def09` in this example). Then ask `geth` to open the wallet: ``` - > personal.openWallet("keycard://044def09") - Please enter the pairing password: + > personal.openWallet("keycard://044def09", "pairing password") ``` - Enter the pairing password that you have received during card initialization. Same with the PIN that you will subsequently be - asked for. + The pairing password has been generated during the card initialization process. + + The process needs to be repeated once more with the PIN: + + ``` + > personal.openWallet("keycard://044def09", "PIN number") + ``` If everything goes well, you should see your new account when typing `personal` on the console: diff --git a/accounts/scwallet/hub.go b/accounts/scwallet/hub.go index 5f939c658627..811f8c695e48 100644 --- a/accounts/scwallet/hub.go +++ b/accounts/scwallet/hub.go @@ -220,7 +220,7 @@ func (hub *Hub) refreshWallets() { // Mark the reader as present seen[reader] = struct{}{} - // If we alreay know about this card, skip to the next reader, otherwise clean up + // If we already know about this card, skip to the next reader, otherwise clean up if wallet, ok := hub.wallets[reader]; ok { if err := wallet.ping(); err == nil { continue diff --git a/accounts/scwallet/securechannel.go b/accounts/scwallet/securechannel.go index 9b70c69dccdd..10887a8b43d0 100644 --- a/accounts/scwallet/securechannel.go +++ b/accounts/scwallet/securechannel.go @@ -20,6 +20,7 @@ import ( "bytes" "crypto/aes" "crypto/cipher" + "crypto/elliptic" "crypto/rand" "crypto/sha256" "crypto/sha512" @@ -27,7 +28,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" pcsc "github.com/gballet/go-libpcsclite" - "github.com/wsddn/go-ecdh" "golang.org/x/crypto/pbkdf2" "golang.org/x/text/unicode/norm" ) @@ -63,26 +63,19 @@ type SecureChannelSession struct { // NewSecureChannelSession creates a new secure channel for the given card and public key. func NewSecureChannelSession(card *pcsc.Card, keyData []byte) (*SecureChannelSession, error) { // Generate an ECDSA keypair for ourselves - gen := ecdh.NewEllipticECDH(crypto.S256()) - private, public, err := gen.GenerateKey(rand.Reader) + key, err := crypto.GenerateKey() if err != nil { return nil, err } - - cardPublic, ok := gen.Unmarshal(keyData) - if !ok { - return nil, fmt.Errorf("could not unmarshal public key from card") - } - - secret, err := gen.GenerateSharedSecret(private, cardPublic) + cardPublic, err := crypto.UnmarshalPubkey(keyData) if err != nil { - return nil, err + return nil, fmt.Errorf("could not unmarshal public key from card: %v", err) } - + secret, _ := key.Curve.ScalarMult(cardPublic.X, cardPublic.Y, key.D.Bytes()) return &SecureChannelSession{ card: card, - secret: secret, - publicKey: gen.Marshal(public), + secret: secret.Bytes(), + publicKey: elliptic.Marshal(crypto.S256(), key.PublicKey.X, key.PublicKey.Y), }, nil } diff --git a/accounts/scwallet/wallet.go b/accounts/scwallet/wallet.go index dd9266cb3124..2a2b83bd1b15 100644 --- a/accounts/scwallet/wallet.go +++ b/accounts/scwallet/wallet.go @@ -33,7 +33,7 @@ import ( "sync" "time" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -312,15 +312,15 @@ func (w *Wallet) Status() (string, error) { } switch { case !w.session.verified && status.PinRetryCount == 0 && status.PukRetryCount == 0: - return fmt.Sprintf("Bricked, waiting for full wipe"), nil + return "Bricked, waiting for full wipe", nil case !w.session.verified && status.PinRetryCount == 0: return fmt.Sprintf("Blocked, waiting for PUK (%d attempts left) and new PIN", status.PukRetryCount), nil case !w.session.verified: return fmt.Sprintf("Locked, waiting for PIN (%d attempts left)", status.PinRetryCount), nil case !status.Initialized: - return fmt.Sprintf("Empty, waiting for initialization"), nil + return "Empty, waiting for initialization", nil default: - return fmt.Sprintf("Online"), nil + return "Online", nil } } @@ -362,7 +362,7 @@ func (w *Wallet) Open(passphrase string) error { return err } // Pairing succeeded, fall through to PIN checks. This will of course fail, - // but we can't return ErrPINNeeded directly here becase we don't know whether + // but we can't return ErrPINNeeded directly here because we don't know whether // a PIN check or a PIN reset is needed. passphrase = "" } @@ -637,8 +637,8 @@ func (w *Wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun // to discover non zero accounts and automatically add them to list of tracked // accounts. // -// Note, self derivaton will increment the last component of the specified path -// opposed to decending into a child path to allow discovering accounts starting +// Note, self derivation will increment the last component of the specified path +// opposed to descending into a child path to allow discovering accounts starting // from non zero components. // // Some hardware wallets switched derivation paths through their evolution, so @@ -699,7 +699,7 @@ func (w *Wallet) signHash(account accounts.Account, hash []byte) ([]byte, error) // the needed details via SignTxWithPassphrase, or by other means (e.g. unlock // the account in a keystore). func (w *Wallet) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) { - signer := types.NewEIP155Signer(chainID) + signer := types.LatestSignerForChainID(chainID) hash := signer.Hash(tx) sig, err := w.signHash(account, hash[:]) if err != nil { diff --git a/accounts/url.go b/accounts/url.go index a5add1021687..12a84414a057 100644 --- a/accounts/url.go +++ b/accounts/url.go @@ -64,7 +64,7 @@ func (u URL) String() string { func (u URL) TerminalString() string { url := u.String() if len(url) > 32 { - return url[:31] + "…" + return url[:31] + ".." } return url } diff --git a/accounts/usbwallet/ledger.go b/accounts/usbwallet/ledger.go index 64eae64f689a..3de3b4091cfc 100644 --- a/accounts/usbwallet/ledger.go +++ b/accounts/usbwallet/ledger.go @@ -52,8 +52,10 @@ const ( ledgerOpRetrieveAddress ledgerOpcode = 0x02 // Returns the public key and Ethereum address for a given BIP 32 path ledgerOpSignTransaction ledgerOpcode = 0x04 // Signs an Ethereum transaction after having the user validate the parameters ledgerOpGetConfiguration ledgerOpcode = 0x06 // Returns specific wallet application configuration + ledgerOpSignTypedMessage ledgerOpcode = 0x0c // Signs an Ethereum message following the EIP 712 specification ledgerP1DirectlyFetchAddress ledgerParam1 = 0x00 // Return address directly from the wallet + ledgerP1InitTypedMessageData ledgerParam1 = 0x00 // First chunk of Typed Message data ledgerP1InitTransactionData ledgerParam1 = 0x00 // First transaction data block for signing ledgerP1ContTransactionData ledgerParam1 = 0x80 // Subsequent transaction data block for signing ledgerP2DiscardAddressChainCode ledgerParam2 = 0x00 // Do not return the chain code along with the address @@ -162,7 +164,7 @@ func (w *ledgerDriver) SignTx(path accounts.DerivationPath, tx *types.Transactio return common.Address{}, nil, accounts.ErrWalletClosed } // Ensure the wallet is capable of signing the given transaction - if chainID != nil && w.version[0] <= 1 && w.version[2] <= 2 { + if chainID != nil && w.version[0] <= 1 && w.version[1] <= 0 && w.version[2] <= 2 { //lint:ignore ST1005 brand name displayed on the console return common.Address{}, nil, fmt.Errorf("Ledger v%d.%d.%d doesn't support signing this transaction, please update to v1.0.3 at least", w.version[0], w.version[1], w.version[2]) } @@ -170,6 +172,24 @@ func (w *ledgerDriver) SignTx(path accounts.DerivationPath, tx *types.Transactio return w.ledgerSign(path, tx, chainID) } +// SignTypedMessage implements usbwallet.driver, sending the message to the Ledger and +// waiting for the user to sign or deny the transaction. +// +// Note: this was introduced in the ledger 1.5.0 firmware +func (w *ledgerDriver) SignTypedMessage(path accounts.DerivationPath, domainHash []byte, messageHash []byte) ([]byte, error) { + // If the Ethereum app doesn't run, abort + if w.offline() { + return nil, accounts.ErrWalletClosed + } + // Ensure the wallet is capable of signing the given transaction + if w.version[0] < 1 && w.version[1] < 5 { + //lint:ignore ST1005 brand name displayed on the console + return nil, fmt.Errorf("Ledger version >= 1.5.0 required for EIP-712 signing (found version v%d.%d.%d)", w.version[0], w.version[1], w.version[2]) + } + // All infos gathered and metadata checks out, request signing + return w.ledgerSignTypedMessage(path, domainHash, messageHash) +} + // ledgerVersion retrieves the current version of the Ethereum wallet app running // on the Ledger wallet. // @@ -367,6 +387,68 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction return sender, signed, nil } +// ledgerSignTypedMessage sends the transaction to the Ledger wallet, and waits for the user +// to confirm or deny the transaction. +// +// The signing protocol is defined as follows: +// +// CLA | INS | P1 | P2 | Lc | Le +// ----+-----+----+-----------------------------+-----+--- +// E0 | 0C | 00 | implementation version : 00 | variable | variable +// +// Where the input is: +// +// Description | Length +// -------------------------------------------------+---------- +// Number of BIP 32 derivations to perform (max 10) | 1 byte +// First derivation index (big endian) | 4 bytes +// ... | 4 bytes +// Last derivation index (big endian) | 4 bytes +// domain hash | 32 bytes +// message hash | 32 bytes +// +// +// +// And the output data is: +// +// Description | Length +// ------------+--------- +// signature V | 1 byte +// signature R | 32 bytes +// signature S | 32 bytes +func (w *ledgerDriver) ledgerSignTypedMessage(derivationPath []uint32, domainHash []byte, messageHash []byte) ([]byte, error) { + // Flatten the derivation path into the Ledger request + path := make([]byte, 1+4*len(derivationPath)) + path[0] = byte(len(derivationPath)) + for i, component := range derivationPath { + binary.BigEndian.PutUint32(path[1+4*i:], component) + } + // Create the 712 message + payload := append(path, domainHash...) + payload = append(payload, messageHash...) + + // Send the request and wait for the response + var ( + op = ledgerP1InitTypedMessageData + reply []byte + err error + ) + + // Send the message over, ensuring it's processed correctly + reply, err = w.ledgerExchange(ledgerOpSignTypedMessage, op, 0, payload) + + if err != nil { + return nil, err + } + + // Extract the Ethereum signature and do a sanity validation + if len(reply) != crypto.SignatureLength { + return nil, errors.New("reply lacks signature") + } + signature := append(reply[1:], reply[0]) + return signature, nil +} + // ledgerExchange performs a data exchange with the Ledger wallet, sending it a // message and retrieving the response. // diff --git a/accounts/usbwallet/trezor.go b/accounts/usbwallet/trezor.go index 1892097baf65..c2182b88d03b 100644 --- a/accounts/usbwallet/trezor.go +++ b/accounts/usbwallet/trezor.go @@ -185,6 +185,10 @@ func (w *trezorDriver) SignTx(path accounts.DerivationPath, tx *types.Transactio return w.trezorSign(path, tx, chainID) } +func (w *trezorDriver) SignTypedMessage(path accounts.DerivationPath, domainHash []byte, messageHash []byte) ([]byte, error) { + return nil, accounts.ErrNotSupported +} + // trezorDerive sends a derivation request to the Trezor device and returns the // Ethereum address located on that path. func (w *trezorDriver) trezorDerive(derivationPath []uint32) (common.Address, error) { @@ -255,9 +259,11 @@ func (w *trezorDriver) trezorSign(derivationPath []uint32, tx *types.Transaction if chainID == nil { signer = new(types.HomesteadSigner) } else { + // Trezor backend does not support typed transactions yet. signer = types.NewEIP155Signer(chainID) signature[64] -= byte(chainID.Uint64()*2 + 35) } + // Inject the final signature into the transaction and sanity check the sender signed, err := tx.WithSignature(signer, signature) if err != nil { diff --git a/accounts/usbwallet/wallet.go b/accounts/usbwallet/wallet.go index ee539d96535d..382f3ddaee21 100644 --- a/accounts/usbwallet/wallet.go +++ b/accounts/usbwallet/wallet.go @@ -25,7 +25,7 @@ import ( "sync" "time" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -67,6 +67,8 @@ type driver interface { // SignTx sends the transaction to the USB device and waits for the user to confirm // or deny the transaction. SignTx(path accounts.DerivationPath, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) + + SignTypedMessage(path accounts.DerivationPath, messageHash []byte, domainHash []byte) ([]byte, error) } // wallet represents the common functionality shared by all USB hardware @@ -368,18 +370,22 @@ func (w *wallet) selfDerive() { w.log.Warn("USB wallet nonce retrieval failed", "err", err) break } - // If the next account is empty, stop self-derivation, but add for the last base path + // We've just self-derived a new account, start tracking it locally + // unless the account was empty. + path := make(accounts.DerivationPath, len(nextPaths[i])) + copy(path[:], nextPaths[i][:]) if balance.Sign() == 0 && nonce == 0 { empty = true + // If it indeed was empty, make a log output for it anyway. In the case + // of legacy-ledger, the first account on the legacy-path will + // be shown to the user, even if we don't actively track it if i < len(nextAddrs)-1 { + w.log.Info("Skipping trakcking first account on legacy path, use personal.deriveAccount(,, false) to track", + "path", path, "address", nextAddrs[i]) break } } - // We've just self-derived a new account, start tracking it locally - path := make(accounts.DerivationPath, len(nextPaths[i])) - copy(path[:], nextPaths[i][:]) paths = append(paths, path) - account := accounts.Account{ Address: nextAddrs[i], URL: accounts.URL{Scheme: w.url.Scheme, Path: fmt.Sprintf("%s/%s", w.url.Path, path)}, @@ -489,8 +495,8 @@ func (w *wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun // to discover non zero accounts and automatically add them to list of tracked // accounts. // -// Note, self derivaton will increment the last component of the specified path -// opposed to decending into a child path to allow discovering accounts starting +// Note, self derivation will increment the last component of the specified path +// opposed to descending into a child path to allow discovering accounts starting // from non zero components. // // Some hardware wallets switched derivation paths through their evolution, so @@ -520,7 +526,46 @@ func (w *wallet) signHash(account accounts.Account, hash []byte) ([]byte, error) // SignData signs keccak256(data). The mimetype parameter describes the type of data being signed func (w *wallet) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) { - return w.signHash(account, crypto.Keccak256(data)) + + // Unless we are doing 712 signing, simply dispatch to signHash + if !(mimeType == accounts.MimetypeTypedData && len(data) == 66 && data[0] == 0x19 && data[1] == 0x01) { + return w.signHash(account, crypto.Keccak256(data)) + } + + // dispatch to 712 signing if the mimetype is TypedData and the format matches + w.stateLock.RLock() // Comms have own mutex, this is for the state fields + defer w.stateLock.RUnlock() + + // If the wallet is closed, abort + if w.device == nil { + return nil, accounts.ErrWalletClosed + } + // Make sure the requested account is contained within + path, ok := w.paths[account.Address] + if !ok { + return nil, accounts.ErrUnknownAccount + } + // All infos gathered and metadata checks out, request signing + <-w.commsLock + defer func() { w.commsLock <- struct{}{} }() + + // Ensure the device isn't screwed with while user confirmation is pending + // TODO(karalabe): remove if hotplug lands on Windows + w.hub.commsLock.Lock() + w.hub.commsPend++ + w.hub.commsLock.Unlock() + + defer func() { + w.hub.commsLock.Lock() + w.hub.commsPend-- + w.hub.commsLock.Unlock() + }() + // Sign the transaction + signature, err := w.driver.SignTypedMessage(path, data[2:34], data[34:66]) + if err != nil { + return nil, err + } + return signature, nil } // SignDataWithPassphrase implements accounts.Wallet, attempting to sign the given diff --git a/appveyor.yml b/appveyor.yml index 8fcacfd5fe7d..65b5f96841e2 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,40 +1,57 @@ -os: Visual Studio 2015 - -# Clone directly into GOPATH. -clone_folder: C:\gopath\src\github.com\ethereum\go-ethereum clone_depth: 5 version: "{branch}.{build}" + +image: + - Ubuntu + - Visual Studio 2019 + environment: - global: - GOPATH: C:\gopath - CC: gcc.exe matrix: - GETH_ARCH: amd64 - MSYS2_ARCH: x86_64 - MSYS2_BITS: 64 - MSYSTEM: MINGW64 - PATH: C:\msys64\mingw64\bin\;C:\Program Files (x86)\NSIS\;%PATH% + GETH_MINGW: 'C:\msys64\mingw64' - GETH_ARCH: 386 - MSYS2_ARCH: i686 - MSYS2_BITS: 32 - MSYSTEM: MINGW32 - PATH: C:\msys64\mingw32\bin\;C:\Program Files (x86)\NSIS\;%PATH% + GETH_MINGW: 'C:\msys64\mingw32' install: - - git submodule update --init - - rmdir C:\go /s /q - - appveyor DownloadFile https://dl.google.com/go/go1.13.6.windows-%GETH_ARCH%.zip - - 7z x go1.13.6.windows-%GETH_ARCH%.zip -y -oC:\ > NUL + - git submodule update --init --depth 1 - go version - - gcc --version -build_script: - - go run build\ci.go install +for: + # Linux has its own script without -arch and -cc. + # The linux builder also runs lint. + - matrix: + only: + - image: Ubuntu + build_script: + - go run build/ci.go lint + - go run build/ci.go install -dlgo + test_script: + - go run build/ci.go test -dlgo -coverage -after_build: - - go run build\ci.go archive -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds - - go run build\ci.go nsis -signer WINDOWS_SIGNING_KEY -upload gethstore/builds + # linux/386 is disabled. + - matrix: + exclude: + - image: Ubuntu + GETH_ARCH: 386 -test_script: - - set CGO_ENABLED=1 - - go run build\ci.go test -coverage + # Windows builds for amd64 + 386. + - matrix: + only: + - image: Visual Studio 2019 + environment: + # We use gcc from MSYS2 because it is the most recent compiler version available on + # AppVeyor. Note: gcc.exe only works properly if the corresponding bin/ directory is + # contained in PATH. + GETH_CC: '%GETH_MINGW%\bin\gcc.exe' + PATH: '%GETH_MINGW%\bin;C:\Program Files (x86)\NSIS\;%PATH%' + build_script: + - 'echo %GETH_ARCH%' + - 'echo %GETH_CC%' + - '%GETH_CC% --version' + - go run build/ci.go install -dlgo -arch %GETH_ARCH% -cc %GETH_CC% + after_build: + # Upload builds. Note that ci.go makes this a no-op PR builds. + - go run build/ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds + test_script: + - go run build/ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -coverage diff --git a/build/checksums.txt b/build/checksums.txt index 44530ce4bec3..5df27bbf6173 100644 --- a/build/checksums.txt +++ b/build/checksums.txt @@ -1,19 +1,37 @@ # This file contains sha256 checksums of optional build dependencies. -aae5be954bdc40bcf8006eb77e8d8a5dde412722bc8effcdaf9772620d06420c go1.13.6.src.tar.gz +3defb9a09bed042403195e872dcbc8c6fae1485963332279668ec52e80a95a2d go1.17.5.src.tar.gz +2db6a5d25815b56072465a2cacc8ed426c18f1d5fc26c1fc8c4f5a7188658264 go1.17.5.darwin-amd64.tar.gz +111f71166de0cb8089bb3e8f9f5b02d76e1bf1309256824d4062a47b0e5f98e0 go1.17.5.darwin-arm64.tar.gz +443c1cd9768df02085014f1eb034ebc7dbe032ffc8a9bb9f2e6617d037eee23c go1.17.5.freebsd-386.tar.gz +17180bdc4126acffd0ebf86d66ef5cbc3488b6734e93374fb00eb09494e006d3 go1.17.5.freebsd-amd64.tar.gz +4f4914303bc18f24fd137a97e595735308f5ce81323c7224c12466fd763fc59f go1.17.5.linux-386.tar.gz +bd78114b0d441b029c8fe0341f4910370925a4d270a6a590668840675b0c653e go1.17.5.linux-amd64.tar.gz +6f95ce3da40d9ce1355e48f31f4eb6508382415ca4d7413b1e7a3314e6430e7e go1.17.5.linux-arm64.tar.gz +aa1fb6c53b4fe72f159333362a10aca37ae938bde8adc9c6eaf2a8e87d1e47de go1.17.5.linux-armv6l.tar.gz +3d4be616e568f0a02cb7f7769bcaafda4b0969ed0f9bb4277619930b96847e70 go1.17.5.linux-ppc64le.tar.gz +8087d4fe991e82804e6485c26568c2e0ee0bfde00ceb9015dc86cb6bf84ef40b go1.17.5.linux-s390x.tar.gz +6d7b9948ee14a906b14f5cbebdfab63cd6828b0b618160847ecd3cc3470a26fe go1.17.5.windows-386.zip +671faf99cd5d81cd7e40936c0a94363c64d654faa0148d2af4bbc262555620b9 go1.17.5.windows-amd64.zip +45e88676b68e9cf364be469b5a27965397f4e339aa622c2f52c10433c56e5030 go1.17.5.windows-arm64.zip -478994633b0f5121a7a8d4f368078093e21014fdc7fb2c0ceeae63668c13c5b6 golangci-lint-1.22.2-freebsd-amd64.tar.gz -fcf80824c21567eb0871055711bf9bdca91cf9a081122e2a45f1d11fed754600 golangci-lint-1.22.2-darwin-amd64.tar.gz -cda85c72fc128b2ea0ae05baea7b91172c63aea34064829f65285f1dd536f1e0 golangci-lint-1.22.2-windows-386.zip -94f04899f620aadc9c1524e5482e415efdbd993fa2b2918c4fec2798f030ac1c golangci-lint-1.22.2-linux-armv7.tar.gz -0e72a87d71edde00b6e37e84a99841833ad55fee83e20d21130a7a622b2860bb golangci-lint-1.22.2-freebsd-386.tar.gz -86def2f31fe8fd7c05674104ed2a4bef3e44b7132b93c6ad2f52f198b3d01801 golangci-lint-1.22.2-linux-s390x.tar.gz -b0df4546d36be94e8107733ba290b98dd9b7e41a42d3fb202e87fc7e4ee800c3 golangci-lint-1.22.2-freebsd-armv6.tar.gz -3d45958dcf6a8d195086d2fced1a21db42a90815dfd156d180efa62dbdda6724 golangci-lint-1.22.2-darwin-386.tar.gz -7ee29f35c74fab017a454237990c74d984ce3855960f2c10509238992bb781f9 golangci-lint-1.22.2-linux-arm64.tar.gz -52086ac52a502b68578e58e35d3964f127c16d7a90b9ffcb399a004d055ded51 golangci-lint-1.22.2-linux-386.tar.gz -c2e4df1fab2ae53762f9baac6041503eeeaa968ce38ea41779f7cb526751c667 golangci-lint-1.22.2-windows-amd64.zip -109d38cdc89f271392f5a138d6782657157f9f496fd4801956efa2d0428e0cbe golangci-lint-1.22.2-linux-amd64.tar.gz -f08aae4868d4828c8f07deb0dcd941a1da695b97e58d15e9f3d1d07dcc7a0c84 golangci-lint-1.22.2-linux-armv6.tar.gz -37af03d9c144d527cb15c46a07e6a22d3f62b5491e34ad6f3bfe6bb0b0b597d4 golangci-lint-1.22.2-linux-ppc64le.tar.gz -251a1081d53944f1d5f86216d752837b23079f90605c9d1cc628da1ffcd2e749 golangci-lint-1.22.2-freebsd-armv7.tar.gz +d4bd25b9814eeaa2134197dd2c7671bb791eae786d42010d9d788af20dee4bfa golangci-lint-1.42.0-darwin-amd64.tar.gz +e56859c04a2ad5390c6a497b1acb1cc9329ecb1010260c6faae9b5a4c35b35ea golangci-lint-1.42.0-darwin-arm64.tar.gz +14d912a3fa856830339472fc4dc341933adf15f37bdb7130bbbfcf960ecf4809 golangci-lint-1.42.0-freebsd-386.tar.gz +337257fccc9baeb5ee1cd7e70c153e9d9f59d3afde46d631659500048afbdf80 golangci-lint-1.42.0-freebsd-amd64.tar.gz +6debcc266b629359fdd8eef4f4abb05a621604079d27016265afb5b4593b0eff golangci-lint-1.42.0-freebsd-armv6.tar.gz +878f0e190169db2ce9dde8cefbd99adc4fe28b90b68686bbfcfcc2085e6d693e golangci-lint-1.42.0-freebsd-armv7.tar.gz +42c78e31faf62b225363eff1b1d2aa74f9dbcb75686c8914aa3e90d6af65cece golangci-lint-1.42.0-linux-386.tar.gz +6937f62f8e2329e94822dc11c10b871ace5557ae1fcc4ee2f9980cd6aecbc159 golangci-lint-1.42.0-linux-amd64.tar.gz +2cf8d23d96cd854a537b355dab2962b960b88a06b615232599f066afd233f246 golangci-lint-1.42.0-linux-arm64.tar.gz +08b003d1ed61367473886defc957af5301066e62338e5d96a319c34dadc4c1d1 golangci-lint-1.42.0-linux-armv6.tar.gz +c7c00ec4845e806a1f32685f5b150219e180bd6d6a9d584be8d27f0c41d7a1bf golangci-lint-1.42.0-linux-armv7.tar.gz +3650fcf29eb3d8ee326d77791a896b15259eb2d5bf77437dc72e7efe5af6bd40 golangci-lint-1.42.0-linux-mips64.tar.gz +f51ae003fdbca4fef78ba73e2eb736a939c8eaa178cd452234213b489da5a420 golangci-lint-1.42.0-linux-mips64le.tar.gz +1b0bb7b8b22cc4ea7da44fd5ad5faaf6111d0677e01cc6f961b62a96537de2c6 golangci-lint-1.42.0-linux-ppc64le.tar.gz +8cb56927eb75e572450efbe0ff0f9cf3f56dc9faa81d9e8d30d6559fc1d06e6d golangci-lint-1.42.0-linux-riscv64.tar.gz +5ac41cd31825a176b21505a371a7b307cd9cdf17df0f35bbb3bf1466f9356ccc golangci-lint-1.42.0-linux-s390x.tar.gz +e1cebd2af621ac4b64c20937df92c3819264f2174c92f51e196db1e64ae097e0 golangci-lint-1.42.0-windows-386.zip +7e70fcde8e87a17cae0455df07d257ebc86669f3968d568e12727fa24bbe9883 golangci-lint-1.42.0-windows-amd64.zip +59da7ce1bda432616bfc28ae663e52c3675adee8d9bf5959fafd657c159576ab golangci-lint-1.42.0-windows-armv6.zip +65f62dda937bfcede0326ac77abe947ce1548931e6e13298ca036cb31f224db5 golangci-lint-1.42.0-windows-armv7.zip diff --git a/build/ci.go b/build/ci.go index 49f0673cbf0b..381f6115b385 100644 --- a/build/ci.go +++ b/build/ci.go @@ -14,6 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . +//go:build none // +build none /* @@ -26,13 +27,12 @@ Available commands are: install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables test [ -coverage ] [ packages... ] -- runs the tests lint -- runs certain pre-selected linters - archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -upload dest ] -- archives build artifacts + archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -signify key-envvar ] [ -upload dest ] -- archives build artifacts importkeys -- imports signing keys from env debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package nsis -- creates a Windows NSIS installer aar [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an Android archive xcode [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an iOS XCode framework - xgo [ -alltools ] [ options ] -- cross builds according to options purge [ -store blobstore ] [ -days threshold ] -- purges old archives from the blobstore For all commands, -n prevents execution of external programs (dry run mode). @@ -46,19 +46,20 @@ import ( "encoding/base64" "flag" "fmt" - "go/parser" - "go/token" "io/ioutil" "log" "os" "os/exec" + "path" "path/filepath" "regexp" "runtime" + "strconv" "strings" "time" "github.com/cespare/cp" + "github.com/ethereum/go-ethereum/crypto/signify" "github.com/ethereum/go-ethereum/internal/build" "github.com/ethereum/go-ethereum/params" ) @@ -79,7 +80,6 @@ var ( executablePath("geth"), executablePath("puppeth"), executablePath("rlpdump"), - executablePath("wnode"), executablePath("clef"), } @@ -109,10 +109,6 @@ var ( BinaryName: "rlpdump", Description: "Developer utility tool that prints RLP structures.", }, - { - BinaryName: "wnode", - Description: "Ethereum Whisper diagnostic tool", - }, { BinaryName: "clef", Description: "Ethereum account management tool.", @@ -120,7 +116,6 @@ var ( } // A debian package is created for all executables listed here. - debEthereum = debPackage{ Name: "ethereum", Version: params.Version, @@ -134,24 +129,25 @@ var ( // Distros for which packages are created. // Note: vivid is unsupported because there is no golang-1.6 package for it. - // Note: wily is unsupported because it was officially deprecated on Launchpad. - // Note: yakkety is unsupported because it was officially deprecated on Launchpad. - // Note: zesty is unsupported because it was officially deprecated on Launchpad. - // Note: artful is unsupported because it was officially deprecated on Launchpad. - // Note: cosmic is unsupported because it was officially deprecated on Launchpad. + // Note: the following Ubuntu releases have been officially deprecated on Launchpad: + // wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy debDistroGoBoots = map[string]string{ - "trusty": "golang-1.11", - "xenial": "golang-go", - "bionic": "golang-go", - "disco": "golang-go", - "eoan": "golang-go", - "focal": "golang-go", + "trusty": "golang-1.11", + "xenial": "golang-go", + "bionic": "golang-go", + "focal": "golang-go", + "hirsute": "golang-go", } debGoBootPaths = map[string]string{ "golang-1.11": "/usr/lib/go-1.11", "golang-go": "/usr/lib/go", } + + // This is the version of go that will be downloaded by + // + // go run ci.go install -dlgo + dlgoVersion = "1.17.5" ) var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin")) @@ -181,6 +177,8 @@ func main() { doLint(os.Args[2:]) case "archive": doArchive(os.Args[2:]) + case "docker": + doDocker(os.Args[2:]) case "debsrc": doDebianSource(os.Args[2:]) case "nsis": @@ -189,8 +187,6 @@ func main() { doAndroidArchive(os.Args[2:]) case "xcode": doXCodeFramework(os.Args[2:]) - case "xgo": - doXgo(os.Args[2:]) case "purge": doPurge(os.Args[2:]) default: @@ -202,144 +198,122 @@ func main() { func doInstall(cmdline []string) { var ( + dlgo = flag.Bool("dlgo", false, "Download Go and build with it") arch = flag.String("arch", "", "Architecture to cross build for") cc = flag.String("cc", "", "C compiler to cross build with") tags = flag.String("tags", "", "Build tags") ) flag.CommandLine.Parse(cmdline) + + // Configure the toolchain. + tc := build.GoToolchain{GOARCH: *arch, CC: *cc} + if *dlgo { + csdb := build.MustLoadChecksums("build/checksums.txt") + tc.Root = build.DownloadGo(csdb, dlgoVersion) + } + + // Configure the build. env := build.Env() + gobuild := tc.Go("build", buildFlags(env)...) - // Check Go version. People regularly open issues about compilation - // failure with outdated Go. This should save them the trouble. - if !strings.Contains(runtime.Version(), "devel") { - // Figure out the minor version number since we can't textually compare (1.10 < 1.9) - var minor int - fmt.Sscanf(strings.TrimPrefix(runtime.Version(), "go1."), "%d", &minor) - - if minor < 11 { - log.Println("You have Go version", runtime.Version()) - log.Println("go-ethereum requires at least Go version 1.11 and cannot") - log.Println("be compiled with an earlier version. Please upgrade your Go installation.") - os.Exit(1) - } + // arm64 CI builders are memory-constrained and can't handle concurrent builds, + // better disable it. This check isn't the best, it should probably + // check for something in env instead. + if env.CI && runtime.GOARCH == "arm64" { + gobuild.Args = append(gobuild.Args, "-p", "1") } - // Compile packages given as arguments, or everything if there are no arguments. - packages := []string{"./..."} - if flag.NArg() > 0 { - packages = flag.Args() + + // We use -trimpath to avoid leaking local paths into the built executables. + gobuild.Args = append(gobuild.Args, "-trimpath") + + // Show packages during build. + gobuild.Args = append(gobuild.Args, "-v") + + // For rocksdb, we still use tags + if len(*tags) > 0 { + gobuild.Args = append(gobuild.Args, []string{"-tags", *tags}...) } - if *arch == "" || *arch == runtime.GOARCH { - goinstall := goTool("install", buildFlags(env)...) - if len(*tags) > 0 { - goinstall.Args = append(goinstall.Args, []string{"-tags", *tags}...) - } - if runtime.GOARCH == "arm64" { - goinstall.Args = append(goinstall.Args, "-p", "1") - } - goinstall.Args = append(goinstall.Args, "-v") - goinstall.Args = append(goinstall.Args, packages...) - build.MustRun(goinstall) - return + // Now we choose what we're even building. + // Default: collect all 'main' packages in cmd/ and build those. + packages := flag.Args() + if len(packages) == 0 { + packages = build.FindMainPackages("./cmd") } - // Seems we are cross compiling, work around forbidden GOBIN - goinstall := goToolArch(*arch, *cc, "install", buildFlags(env)...) - if len(*tags) > 0 { - goinstall.Args = append(goinstall.Args, []string{"-tags", *tags}...) - } - goinstall.Args = append(goinstall.Args, "-v") - goinstall.Args = append(goinstall.Args, []string{"-buildmode", "archive"}...) - goinstall.Args = append(goinstall.Args, packages...) - build.MustRun(goinstall) - - if cmds, err := ioutil.ReadDir("cmd"); err == nil { - for _, cmd := range cmds { - pkgs, err := parser.ParseDir(token.NewFileSet(), filepath.Join(".", "cmd", cmd.Name()), nil, parser.PackageClauseOnly) - if err != nil { - log.Fatal(err) - } - for name := range pkgs { - if name == "main" { - gobuild := goToolArch(*arch, *cc, "build", buildFlags(env)...) - gobuild.Args = append(gobuild.Args, "-v") - gobuild.Args = append(gobuild.Args, []string{"-o", executablePath(cmd.Name())}...) - gobuild.Args = append(gobuild.Args, "."+string(filepath.Separator)+filepath.Join("cmd", cmd.Name())) - build.MustRun(gobuild) - break - } - } - } + // Do the build! + for _, pkg := range packages { + args := make([]string, len(gobuild.Args)) + copy(args, gobuild.Args) + args = append(args, "-o", executablePath(path.Base(pkg))) + args = append(args, pkg) + build.MustRun(&exec.Cmd{Path: gobuild.Path, Args: args, Env: gobuild.Env}) } } +// buildFlags returns the go tool flags for building. func buildFlags(env build.Environment) (flags []string) { var ld []string if env.Commit != "" { ld = append(ld, "-X", "main.gitCommit="+env.Commit) ld = append(ld, "-X", "main.gitDate="+env.Date) } + // Strip DWARF on darwin. This used to be required for certain things, + // and there is no downside to this, so we just keep doing it. if runtime.GOOS == "darwin" { ld = append(ld, "-s") } - + // Enforce the stacksize to 8M, which is the case on most platforms apart from + // alpine Linux. + if runtime.GOOS == "linux" { + ld = append(ld, "-extldflags", "-Wl,-z,stack-size=0x800000") + } if len(ld) > 0 { flags = append(flags, "-ldflags", strings.Join(ld, " ")) } return flags } -func goTool(subcmd string, args ...string) *exec.Cmd { - return goToolArch(runtime.GOARCH, os.Getenv("CC"), subcmd, args...) -} - -func goToolArch(arch string, cc string, subcmd string, args ...string) *exec.Cmd { - cmd := build.GoTool(subcmd, args...) - if arch == "" || arch == runtime.GOARCH { - cmd.Env = append(cmd.Env, "GOBIN="+GOBIN) - } else { - cmd.Env = append(cmd.Env, "CGO_ENABLED=1") - cmd.Env = append(cmd.Env, "GOARCH="+arch) - } - if cc != "" { - cmd.Env = append(cmd.Env, "CC="+cc) - } - for _, e := range os.Environ() { - if strings.HasPrefix(e, "GOBIN=") { - continue - } - cmd.Env = append(cmd.Env, e) - } - return cmd -} - // Running The Tests // // "tests" also includes static analysis tools such as vet. func doTest(cmdline []string) { - coverage := flag.Bool("coverage", false, "Whether to record code coverage") - verbose := flag.Bool("v", false, "Whether to log verbosely") + var ( + dlgo = flag.Bool("dlgo", false, "Download Go and build with it") + arch = flag.String("arch", "", "Run tests for given architecture") + cc = flag.String("cc", "", "Sets C compiler binary") + coverage = flag.Bool("coverage", false, "Whether to record code coverage") + verbose = flag.Bool("v", false, "Whether to log verbosely") + race = flag.Bool("race", false, "Execute the race detector") + ) flag.CommandLine.Parse(cmdline) - env := build.Env() - packages := []string{"./..."} - if len(flag.CommandLine.Args()) > 0 { - packages = flag.CommandLine.Args() + // Configure the toolchain. + tc := build.GoToolchain{GOARCH: *arch, CC: *cc} + if *dlgo { + csdb := build.MustLoadChecksums("build/checksums.txt") + tc.Root = build.DownloadGo(csdb, dlgoVersion) } + gotest := tc.Go("test") - // Run the actual tests. // Test a single package at a time. CI builders are slow // and some tests run into timeouts under load. - gotest := goTool("test", buildFlags(env)...) - gotest.Args = append(gotest.Args, "-p", "1", "-timeout", "5m") + gotest.Args = append(gotest.Args, "-p", "1") if *coverage { gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover") } if *verbose { gotest.Args = append(gotest.Args, "-v") } + if *race { + gotest.Args = append(gotest.Args, "-race") + } + packages := []string{"./..."} + if len(flag.CommandLine.Args()) > 0 { + packages = flag.CommandLine.Args() + } gotest.Args = append(gotest.Args, packages...) build.MustRun(gotest) } @@ -363,16 +337,20 @@ func doLint(cmdline []string) { // downloadLinter downloads and unpacks golangci-lint. func downloadLinter(cachedir string) string { - const version = "1.22.2" + const version = "1.42.0" csdb := build.MustLoadChecksums("build/checksums.txt") - base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, runtime.GOARCH) + arch := runtime.GOARCH + if arch == "arm" { + arch += "v" + os.Getenv("GOARM") + } + base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, arch) url := fmt.Sprintf("https://github.com/golangci/golangci-lint/releases/download/v%s/%s.tar.gz", version, base) archivePath := filepath.Join(cachedir, base+".tar.gz") if err := csdb.DownloadFile(url, archivePath); err != nil { log.Fatal(err) } - if err := build.ExtractTarballArchive(archivePath, cachedir); err != nil { + if err := build.ExtractArchive(archivePath, cachedir); err != nil { log.Fatal(err) } return filepath.Join(cachedir, base, "golangci-lint") @@ -381,11 +359,12 @@ func downloadLinter(cachedir string) string { // Release Packaging func doArchive(cmdline []string) { var ( - arch = flag.String("arch", runtime.GOARCH, "Architecture cross packaging") - atype = flag.String("type", "zip", "Type of archive to write (zip|tar)") - signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. LINUX_SIGNING_KEY)`) - upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`) - ext string + arch = flag.String("arch", runtime.GOARCH, "Architecture cross packaging") + atype = flag.String("type", "zip", "Type of archive to write (zip|tar)") + signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. LINUX_SIGNING_KEY)`) + signify = flag.String("signify", "", `Environment variable holding the signify key (e.g. LINUX_SIGNIFY_KEY)`) + upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`) + ext string ) flag.CommandLine.Parse(cmdline) switch *atype { @@ -398,8 +377,7 @@ func doArchive(cmdline []string) { } var ( - env = build.Env() - + env = build.Env() basegeth = archiveBasename(*arch, params.ArchiveVersion(env.Commit)) geth = "geth-" + basegeth + ext alltools = "geth-alltools-" + basegeth + ext @@ -412,7 +390,7 @@ func doArchive(cmdline []string) { log.Fatal(err) } for _, archive := range []string{geth, alltools} { - if err := archiveUpload(archive, *upload, *signer); err != nil { + if err := archiveUpload(archive, *upload, *signer, *signify); err != nil { log.Fatal(err) } } @@ -432,7 +410,7 @@ func archiveBasename(arch string, archiveVersion string) string { return platform + "-" + archiveVersion } -func archiveUpload(archive string, blobstore string, signer string) error { +func archiveUpload(archive string, blobstore string, signer string, signifyVar string) error { // If signing was requested, generate the signature files if signer != "" { key := getenvBase64(signer) @@ -440,6 +418,14 @@ func archiveUpload(archive string, blobstore string, signer string) error { return err } } + if signifyVar != "" { + key := os.Getenv(signifyVar) + untrustedComment := "verify with geth-release.pub" + trustedComment := fmt.Sprintf("%s (%s)", archive, time.Now().UTC().Format(time.RFC1123)) + if err := signify.SignFile(archive, archive+".sig", key, untrustedComment, trustedComment); err != nil { + return err + } + } // If uploading to Azure was requested, push the archive possibly with its signature if blobstore != "" { auth := build.AzureBlobstoreConfig{ @@ -455,6 +441,11 @@ func archiveUpload(archive string, blobstore string, signer string) error { return err } } + if signifyVar != "" { + if err := build.AzureBlobstoreUpload(archive+".sig", filepath.Base(archive+".sig"), auth); err != nil { + return err + } + } } return nil } @@ -462,33 +453,199 @@ func archiveUpload(archive string, blobstore string, signer string) error { // skips archiving for some build configurations. func maybeSkipArchive(env build.Environment) { if env.IsPullRequest { - log.Printf("skipping because this is a PR build") + log.Printf("skipping archive creation because this is a PR build") os.Exit(0) } if env.IsCronJob { - log.Printf("skipping because this is a cron job") + log.Printf("skipping archive creation because this is a cron job") os.Exit(0) } if env.Branch != "master" && !strings.HasPrefix(env.Tag, "v1.") { - log.Printf("skipping because branch %q, tag %q is not on the whitelist", env.Branch, env.Tag) + log.Printf("skipping archive creation because branch %q, tag %q is not on the inclusion list", env.Branch, env.Tag) os.Exit(0) } } +// Builds the docker images and optionally uploads them to Docker Hub. +func doDocker(cmdline []string) { + var ( + image = flag.Bool("image", false, `Whether to build and push an arch specific docker image`) + manifest = flag.String("manifest", "", `Push a multi-arch docker image for the specified architectures (usually "amd64,arm64")`) + upload = flag.String("upload", "", `Where to upload the docker image (usually "ethereum/client-go")`) + ) + flag.CommandLine.Parse(cmdline) + + // Skip building and pushing docker images for PR builds + env := build.Env() + maybeSkipArchive(env) + + // Retrieve the upload credentials and authenticate + user := getenvBase64("DOCKER_HUB_USERNAME") + pass := getenvBase64("DOCKER_HUB_PASSWORD") + + if len(user) > 0 && len(pass) > 0 { + auther := exec.Command("docker", "login", "-u", string(user), "--password-stdin") + auther.Stdin = bytes.NewReader(pass) + build.MustRun(auther) + } + // Retrieve the version infos to build and push to the following paths: + // - ethereum/client-go:latest - Pushes to the master branch, Geth only + // - ethereum/client-go:stable - Version tag publish on GitHub, Geth only + // - ethereum/client-go:alltools-latest - Pushes to the master branch, Geth & tools + // - ethereum/client-go:alltools-stable - Version tag publish on GitHub, Geth & tools + // - ethereum/client-go:release-. - Version tag publish on GitHub, Geth only + // - ethereum/client-go:alltools-release-. - Version tag publish on GitHub, Geth & tools + // - ethereum/client-go:v.. - Version tag publish on GitHub, Geth only + // - ethereum/client-go:alltools-v.. - Version tag publish on GitHub, Geth & tools + var tags []string + + switch { + case env.Branch == "master": + tags = []string{"latest"} + case strings.HasPrefix(env.Tag, "v1."): + tags = []string{"stable", fmt.Sprintf("release-1.%d", params.VersionMinor), "v" + params.Version} + } + // If architecture specific image builds are requested, build and push them + if *image { + build.MustRunCommand("docker", "build", "--build-arg", "COMMIT="+env.Commit, "--build-arg", "VERSION="+params.VersionWithMeta, "--build-arg", "BUILDNUM="+env.Buildnum, "--tag", fmt.Sprintf("%s:TAG", *upload), ".") + build.MustRunCommand("docker", "build", "--build-arg", "COMMIT="+env.Commit, "--build-arg", "VERSION="+params.VersionWithMeta, "--build-arg", "BUILDNUM="+env.Buildnum, "--tag", fmt.Sprintf("%s:alltools-TAG", *upload), "-f", "Dockerfile.alltools", ".") + + // Tag and upload the images to Docker Hub + for _, tag := range tags { + gethImage := fmt.Sprintf("%s:%s-%s", *upload, tag, runtime.GOARCH) + toolImage := fmt.Sprintf("%s:alltools-%s-%s", *upload, tag, runtime.GOARCH) + + // If the image already exists (non version tag), check the build + // number to prevent overwriting a newer commit if concurrent builds + // are running. This is still a tiny bit racey if two published are + // done at the same time, but that's extremely unlikely even on the + // master branch. + for _, img := range []string{gethImage, toolImage} { + if exec.Command("docker", "pull", img).Run() != nil { + continue // Generally the only failure is a missing image, which is good + } + buildnum, err := exec.Command("docker", "inspect", "--format", "{{index .Config.Labels \"buildnum\"}}", img).CombinedOutput() + if err != nil { + log.Fatalf("Failed to inspect container: %v\nOutput: %s", err, string(buildnum)) + } + buildnum = bytes.TrimSpace(buildnum) + + if len(buildnum) > 0 && len(env.Buildnum) > 0 { + oldnum, err := strconv.Atoi(string(buildnum)) + if err != nil { + log.Fatalf("Failed to parse old image build number: %v", err) + } + newnum, err := strconv.Atoi(env.Buildnum) + if err != nil { + log.Fatalf("Failed to parse current build number: %v", err) + } + if oldnum > newnum { + log.Fatalf("Current build number %d not newer than existing %d", newnum, oldnum) + } else { + log.Printf("Updating %s from build %d to %d", img, oldnum, newnum) + } + } + } + build.MustRunCommand("docker", "image", "tag", fmt.Sprintf("%s:TAG", *upload), gethImage) + build.MustRunCommand("docker", "image", "tag", fmt.Sprintf("%s:alltools-TAG", *upload), toolImage) + build.MustRunCommand("docker", "push", gethImage) + build.MustRunCommand("docker", "push", toolImage) + } + } + // If multi-arch image manifest push is requested, assemble it + if len(*manifest) != 0 { + // Since different architectures are pushed by different builders, wait + // until all required images are updated. + var mismatch bool + for i := 0; i < 2; i++ { // 2 attempts, second is race check + mismatch = false // hope there's no mismatch now + + for _, tag := range tags { + for _, arch := range strings.Split(*manifest, ",") { + gethImage := fmt.Sprintf("%s:%s-%s", *upload, tag, arch) + toolImage := fmt.Sprintf("%s:alltools-%s-%s", *upload, tag, arch) + + for _, img := range []string{gethImage, toolImage} { + if out, err := exec.Command("docker", "pull", img).CombinedOutput(); err != nil { + log.Printf("Required image %s unavailable: %v\nOutput: %s", img, err, out) + mismatch = true + break + } + buildnum, err := exec.Command("docker", "inspect", "--format", "{{index .Config.Labels \"buildnum\"}}", img).CombinedOutput() + if err != nil { + log.Fatalf("Failed to inspect container: %v\nOutput: %s", err, string(buildnum)) + } + buildnum = bytes.TrimSpace(buildnum) + + if string(buildnum) != env.Buildnum { + log.Printf("Build number mismatch on %s: want %s, have %s", img, env.Buildnum, buildnum) + mismatch = true + break + } + } + if mismatch { + break + } + } + if mismatch { + break + } + } + if mismatch { + // Build numbers mismatching, retry in a short time to + // avoid concurrent failes in both publisher images. If + // however the retry failed too, it means the concurrent + // builder is still crunching, let that do the publish. + if i == 0 { + time.Sleep(30 * time.Second) + } + continue + } + break + } + if mismatch { + log.Println("Relinquishing publish to other builder") + return + } + // Assemble and push the Geth manifest image + for _, tag := range tags { + gethImage := fmt.Sprintf("%s:%s", *upload, tag) + + var gethSubImages []string + for _, arch := range strings.Split(*manifest, ",") { + gethSubImages = append(gethSubImages, gethImage+"-"+arch) + } + build.MustRunCommand("docker", append([]string{"manifest", "create", gethImage}, gethSubImages...)...) + build.MustRunCommand("docker", "manifest", "push", gethImage) + } + // Assemble and push the alltools manifest image + for _, tag := range tags { + toolImage := fmt.Sprintf("%s:alltools-%s", *upload, tag) + + var toolSubImages []string + for _, arch := range strings.Split(*manifest, ",") { + toolSubImages = append(toolSubImages, toolImage+"-"+arch) + } + build.MustRunCommand("docker", append([]string{"manifest", "create", toolImage}, toolSubImages...)...) + build.MustRunCommand("docker", "manifest", "push", toolImage) + } + } +} + // Debian Packaging func doDebianSource(cmdline []string) { var ( - goversion = flag.String("goversion", "", `Go version to build with (will be included in the source package)`) - cachedir = flag.String("cachedir", "./build/cache", `Filesystem path to cache the downloaded Go bundles at`) - signer = flag.String("signer", "", `Signing key name, also used as package author`) - upload = flag.String("upload", "", `Where to upload the source package (usually "ethereum/ethereum")`) - sshUser = flag.String("sftp-user", "", `Username for SFTP upload (usually "geth-ci")`) - workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`) - now = time.Now() + cachedir = flag.String("cachedir", "./build/cache", `Filesystem path to cache the downloaded Go bundles at`) + signer = flag.String("signer", "", `Signing key name, also used as package author`) + upload = flag.String("upload", "", `Where to upload the source package (usually "ethereum/ethereum")`) + sshUser = flag.String("sftp-user", "", `Username for SFTP upload (usually "geth-ci")`) + workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`) + now = time.Now() ) flag.CommandLine.Parse(cmdline) *workdir = makeWorkdir(*workdir) env := build.Env() + tc := new(build.GoToolchain) maybeSkipArchive(env) // Import the signing key. @@ -499,15 +656,15 @@ func doDebianSource(cmdline []string) { } // Download and verify the Go source package. - gobundle := downloadGoSources(*goversion, *cachedir) + gobundle := downloadGoSources(*cachedir) // Download all the dependencies needed to build the sources and run the ci script - srcdepfetch := goTool("install", "-n", "./...") - srcdepfetch.Env = append(os.Environ(), "GOPATH="+filepath.Join(*workdir, "modgopath")) + srcdepfetch := tc.Go("mod", "download") + srcdepfetch.Env = append(srcdepfetch.Env, "GOPATH="+filepath.Join(*workdir, "modgopath")) build.MustRun(srcdepfetch) - cidepfetch := goTool("run", "./build/ci.go") - cidepfetch.Env = append(os.Environ(), "GOPATH="+filepath.Join(*workdir, "modgopath")) + cidepfetch := tc.Go("run", "./build/ci.go") + cidepfetch.Env = append(cidepfetch.Env, "GOPATH="+filepath.Join(*workdir, "modgopath")) cidepfetch.Run() // Command fails, don't care, we only need the deps to start it // Create Debian packages and upload them. @@ -518,7 +675,7 @@ func doDebianSource(cmdline []string) { pkgdir := stageDebianSource(*workdir, meta) // Add Go source code - if err := build.ExtractTarballArchive(gobundle, pkgdir); err != nil { + if err := build.ExtractArchive(gobundle, pkgdir); err != nil { log.Fatalf("Failed to extract Go sources: %v", err) } if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, ".go")); err != nil { @@ -535,24 +692,26 @@ func doDebianSource(cmdline []string) { build.MustRun(debuild) var ( - basename = fmt.Sprintf("%s_%s", meta.Name(), meta.VersionString()) - source = filepath.Join(*workdir, basename+".tar.xz") - dsc = filepath.Join(*workdir, basename+".dsc") - changes = filepath.Join(*workdir, basename+"_source.changes") + basename = fmt.Sprintf("%s_%s", meta.Name(), meta.VersionString()) + source = filepath.Join(*workdir, basename+".tar.xz") + dsc = filepath.Join(*workdir, basename+".dsc") + changes = filepath.Join(*workdir, basename+"_source.changes") + buildinfo = filepath.Join(*workdir, basename+"_source.buildinfo") ) if *signer != "" { build.MustRunCommand("debsign", changes) } if *upload != "" { - ppaUpload(*workdir, *upload, *sshUser, []string{source, dsc, changes}) + ppaUpload(*workdir, *upload, *sshUser, []string{source, dsc, changes, buildinfo}) } } } } -func downloadGoSources(version string, cachedir string) string { +// downloadGoSources downloads the Go source tarball. +func downloadGoSources(cachedir string) string { csdb := build.MustLoadChecksums("build/checksums.txt") - file := fmt.Sprintf("go%s.src.tar.gz", version) + file := fmt.Sprintf("go%s.src.tar.gz", dlgoVersion) url := "https://dl.google.com/go/" + file dst := filepath.Join(cachedir, file) if err := csdb.DownloadFile(url, dst); err != nil { @@ -756,6 +915,7 @@ func doWindowsInstaller(cmdline []string) { var ( arch = flag.String("arch", runtime.GOARCH, "Architecture for cross build packaging") signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. WINDOWS_SIGNING_KEY)`) + signify = flag.String("signify key", "", `Environment variable holding the signify signing key (e.g. WINDOWS_SIGNIFY_KEY)`) upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`) workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`) ) @@ -817,7 +977,7 @@ func doWindowsInstaller(cmdline []string) { filepath.Join(*workdir, "geth.nsi"), ) // Sign and publish installer. - if err := archiveUpload(installer, *upload, *signer); err != nil { + if err := archiveUpload(installer, *upload, *signer, *signify); err != nil { log.Fatal(err) } } @@ -826,25 +986,37 @@ func doWindowsInstaller(cmdline []string) { func doAndroidArchive(cmdline []string) { var ( - local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`) - signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. ANDROID_SIGNING_KEY)`) - deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "https://oss.sonatype.org")`) - upload = flag.String("upload", "", `Destination to upload the archive (usually "gethstore/builds")`) + local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`) + signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. ANDROID_SIGNING_KEY)`) + signify = flag.String("signify", "", `Environment variable holding the signify signing key (e.g. ANDROID_SIGNIFY_KEY)`) + deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "https://oss.sonatype.org")`) + upload = flag.String("upload", "", `Destination to upload the archive (usually "gethstore/builds")`) ) flag.CommandLine.Parse(cmdline) env := build.Env() + tc := new(build.GoToolchain) // Sanity check that the SDK and NDK are installed and set if os.Getenv("ANDROID_HOME") == "" { log.Fatal("Please ensure ANDROID_HOME points to your Android SDK") } + + // Build gomobile. + install := tc.Install(GOBIN, "golang.org/x/mobile/cmd/gomobile@latest", "golang.org/x/mobile/cmd/gobind@latest") + install.Env = append(install.Env) + build.MustRun(install) + + // Ensure all dependencies are available. This is required to make + // gomobile bind work because it expects go.sum to contain all checksums. + build.MustRun(tc.Go("mod", "download")) + // Build the Android archive and Maven resources - build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind")) build.MustRun(gomobileTool("bind", "-ldflags", "-s -w", "--target", "android", "--javapkg", "org.ethereum", "-v", "github.com/ethereum/go-ethereum/mobile")) if *local { // If we're building locally, copy bundle to build dir and skip Maven os.Rename("geth.aar", filepath.Join(GOBIN, "geth.aar")) + os.Rename("geth-sources.jar", filepath.Join(GOBIN, "geth-sources.jar")) return } meta := newMavenMetadata(env) @@ -857,7 +1029,7 @@ func doAndroidArchive(cmdline []string) { archive := "geth-" + archiveBasename("android", params.ArchiveVersion(env.Commit)) + ".aar" os.Rename("geth.aar", archive) - if err := archiveUpload(archive, *upload, *signer); err != nil { + if err := archiveUpload(archive, *upload, *signer, *signify); err != nil { log.Fatal(err) } // Sign and upload all the artifacts to Maven Central @@ -891,11 +1063,12 @@ func gomobileTool(subcmd string, args ...string) *exec.Cmd { "PATH=" + GOBIN + string(os.PathListSeparator) + os.Getenv("PATH"), } for _, e := range os.Environ() { - if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "PATH=") { + if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "PATH=") || strings.HasPrefix(e, "GOBIN=") { continue } cmd.Env = append(cmd.Env, e) } + cmd.Env = append(cmd.Env, "GOBIN="+GOBIN) return cmd } @@ -949,45 +1122,52 @@ func newMavenMetadata(env build.Environment) mavenMetadata { func doXCodeFramework(cmdline []string) { var ( - local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`) - signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. IOS_SIGNING_KEY)`) - deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "trunk")`) - upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`) + local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`) + signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. IOS_SIGNING_KEY)`) + signify = flag.String("signify", "", `Environment variable holding the signify signing key (e.g. IOS_SIGNIFY_KEY)`) + deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "trunk")`) + upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`) ) flag.CommandLine.Parse(cmdline) env := build.Env() + tc := new(build.GoToolchain) + + // Build gomobile. + build.MustRun(tc.Install(GOBIN, "golang.org/x/mobile/cmd/gomobile@latest", "golang.org/x/mobile/cmd/gobind@latest")) + + // Ensure all dependencies are available. This is required to make + // gomobile bind work because it expects go.sum to contain all checksums. + build.MustRun(tc.Go("mod", "download")) // Build the iOS XCode framework - build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind")) - build.MustRun(gomobileTool("init")) bind := gomobileTool("bind", "-ldflags", "-s -w", "--target", "ios", "-v", "github.com/ethereum/go-ethereum/mobile") if *local { // If we're building locally, use the build folder and stop afterwards - bind.Dir, _ = filepath.Abs(GOBIN) + bind.Dir = GOBIN build.MustRun(bind) return } + + // Create the archive. + maybeSkipArchive(env) archive := "geth-" + archiveBasename("ios", params.ArchiveVersion(env.Commit)) - if err := os.Mkdir(archive, os.ModePerm); err != nil { + if err := os.MkdirAll(archive, 0755); err != nil { log.Fatal(err) } bind.Dir, _ = filepath.Abs(archive) build.MustRun(bind) build.MustRunCommand("tar", "-zcvf", archive+".tar.gz", archive) - // Skip CocoaPods deploy and Azure upload for PR builds - maybeSkipArchive(env) - // Sign and upload the framework to Azure - if err := archiveUpload(archive+".tar.gz", *upload, *signer); err != nil { + if err := archiveUpload(archive+".tar.gz", *upload, *signer, *signify); err != nil { log.Fatal(err) } // Prepare and upload a PodSpec to CocoaPods if *deploy != "" { meta := newPodMetadata(env, archive) build.Render("build/pod.podspec", "Geth.podspec", 0755, meta) - build.MustRunCommand("pod", *deploy, "push", "Geth.podspec", "--allow-warnings", "--verbose") + build.MustRunCommand("pod", *deploy, "push", "Geth.podspec", "--allow-warnings") } } @@ -1036,52 +1216,6 @@ func newPodMetadata(env build.Environment, archive string) podMetadata { } } -// Cross compilation - -func doXgo(cmdline []string) { - var ( - alltools = flag.Bool("alltools", false, `Flag whether we're building all known tools, or only on in particular`) - ) - flag.CommandLine.Parse(cmdline) - env := build.Env() - - // Make sure xgo is available for cross compilation - gogetxgo := goTool("get", "github.com/karalabe/xgo") - build.MustRun(gogetxgo) - - // If all tools building is requested, build everything the builder wants - args := append(buildFlags(env), flag.Args()...) - - if *alltools { - args = append(args, []string{"--dest", GOBIN}...) - for _, res := range allToolsArchiveFiles { - if strings.HasPrefix(res, GOBIN) { - // Binary tool found, cross build it explicitly - args = append(args, "./"+filepath.Join("cmd", filepath.Base(res))) - xgo := xgoTool(args) - build.MustRun(xgo) - args = args[:len(args)-1] - } - } - return - } - // Otherwise xxecute the explicit cross compilation - path := args[len(args)-1] - args = append(args[:len(args)-1], []string{"--dest", GOBIN, path}...) - - xgo := xgoTool(args) - build.MustRun(xgo) -} - -func xgoTool(args []string) *exec.Cmd { - cmd := exec.Command(filepath.Join(GOBIN, "xgo"), args...) - cmd.Env = os.Environ() - cmd.Env = append(cmd.Env, []string{ - "GOBIN=" + GOBIN, - }...) - return cmd -} - // Binary distribution cleanups func doPurge(cmdline []string) { @@ -1105,6 +1239,8 @@ func doPurge(cmdline []string) { if err != nil { log.Fatal(err) } + fmt.Printf("Found %d blobs\n", len(blobs)) + // Iterate over the blobs, collect and sort all unstable builds for i := 0; i < len(blobs); i++ { if !strings.Contains(blobs[i].Name, "unstable") { @@ -1126,6 +1262,7 @@ func doPurge(cmdline []string) { break } } + fmt.Printf("Deleting %d blobs\n", len(blobs)) // Delete all marked as such and return if err := build.AzureBlobstoreDelete(auth, blobs); err != nil { log.Fatal(err) diff --git a/build/nsis.envvarupdate.nsh b/build/nsis.envvarupdate.nsh index 9c3ecbe3372f..95c2f1f6392b 100644 --- a/build/nsis.envvarupdate.nsh +++ b/build/nsis.envvarupdate.nsh @@ -43,7 +43,7 @@ !ifndef Un${StrFuncName}_INCLUDED ${Un${StrFuncName}} !endif - !define un.${StrFuncName} "${Un${StrFuncName}}" + !define un.${StrFuncName} '${Un${StrFuncName}}' !macroend !insertmacro _IncludeStrFunction StrTok diff --git a/build/nsis.install.nsh b/build/nsis.install.nsh index 57ef5a37c6a4..9b73148a4497 100644 --- a/build/nsis.install.nsh +++ b/build/nsis.install.nsh @@ -19,9 +19,9 @@ Section "Geth" GETH_IDX # Create start menu launcher createDirectory "$SMPROGRAMS\${APPNAME}" - createShortCut "$SMPROGRAMS\${APPNAME}\${APPNAME}.lnk" "$INSTDIR\geth.exe" "--fast" "--cache=512" - createShortCut "$SMPROGRAMS\${APPNAME}\Attach.lnk" "$INSTDIR\geth.exe" "attach" "" "" - createShortCut "$SMPROGRAMS\${APPNAME}\Uninstall.lnk" "$INSTDIR\uninstall.exe" "" "" "" + createShortCut "$SMPROGRAMS\${APPNAME}\${APPNAME}.lnk" "$INSTDIR\geth.exe" + createShortCut "$SMPROGRAMS\${APPNAME}\Attach.lnk" "$INSTDIR\geth.exe" "attach" + createShortCut "$SMPROGRAMS\${APPNAME}\Uninstall.lnk" "$INSTDIR\uninstall.exe" # Firewall - remove rules (if exists) SimpleFC::AdvRemoveRule "Geth incoming peers (TCP:30303)" diff --git a/cmd/abidump/main.go b/cmd/abidump/main.go new file mode 100644 index 000000000000..4f942749dfdf --- /dev/null +++ b/cmd/abidump/main.go @@ -0,0 +1,74 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package main + +import ( + "encoding/hex" + "flag" + "fmt" + "os" + "strings" + + "github.com/ethereum/go-ethereum/signer/core/apitypes" + "github.com/ethereum/go-ethereum/signer/fourbyte" +) + +func init() { + flag.Usage = func() { + fmt.Fprintln(os.Stderr, "Usage:", os.Args[0], "") + flag.PrintDefaults() + fmt.Fprintln(os.Stderr, ` +Parses the given ABI data and tries to interpret it from the fourbyte database.`) + } +} + +func parse(data []byte) { + db, err := fourbyte.New() + if err != nil { + die(err) + } + messages := apitypes.ValidationMessages{} + db.ValidateCallData(nil, data, &messages) + for _, m := range messages.Messages { + fmt.Printf("%v: %v\n", m.Typ, m.Message) + } +} + +// Example +// ./abidump a9059cbb000000000000000000000000ea0e2dc7d65a50e77fc7e84bff3fd2a9e781ff5c0000000000000000000000000000000000000000000000015af1d78b58c40000 +func main() { + flag.Parse() + + switch { + case flag.NArg() == 1: + hexdata := flag.Arg(0) + data, err := hex.DecodeString(strings.TrimPrefix(hexdata, "0x")) + if err != nil { + die(err) + } + parse(data) + default: + fmt.Fprintln(os.Stderr, "Error: one argument needed") + flag.Usage() + os.Exit(2) + } +} + +func die(args ...interface{}) { + fmt.Fprintln(os.Stderr, args...) + os.Exit(1) +} diff --git a/cmd/abigen/main.go b/cmd/abigen/main.go index ed4a3b88709a..7b3b35e4e54f 100644 --- a/cmd/abigen/main.go +++ b/cmd/abigen/main.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common/compiler" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/log" "gopkg.in/urfave/cli.v1" ) @@ -95,12 +96,12 @@ var ( } aliasFlag = cli.StringFlag{ Name: "alias", - Usage: "Comma separated aliases for function and event renaming, e.g. foo=bar", + Usage: "Comma separated aliases for function and event renaming, e.g. original1=alias1, original2=alias2", } ) func init() { - app = utils.NewApp(gitCommit, gitDate, "ethereum checkpoint helper tool") + app = flags.NewApp(gitCommit, gitDate, "ethereum checkpoint helper tool") app.Flags = []cli.Flag{ abiFlag, binFlag, @@ -117,7 +118,7 @@ func init() { aliasFlag, } app.Action = utils.MigrateFlags(abigen) - cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate + cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate } func abigen(c *cli.Context) error { diff --git a/cmd/bootnode/main.go b/cmd/bootnode/main.go index f6e2a14c3bd2..036b968ef83d 100644 --- a/cmd/bootnode/main.go +++ b/cmd/bootnode/main.go @@ -28,7 +28,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/discover" - "github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/netutil" @@ -44,7 +43,7 @@ func main() { natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|extip:)") netrestrict = flag.String("netrestrict", "", "restrict network communication to the given IP networks (CIDR masks)") runv5 = flag.Bool("v5", false, "run a v5 topic discovery bootnode") - verbosity = flag.Int("verbosity", int(log.LvlInfo), "log verbosity (0-9)") + verbosity = flag.Int("verbosity", int(log.LvlInfo), "log verbosity (0-5)") vmodule = flag.String("vmodule", "", "log verbosity pattern") nodeKey *ecdsa.PrivateKey @@ -121,17 +120,17 @@ func main() { printNotice(&nodeKey.PublicKey, *realaddr) + db, _ := enode.OpenDB("") + ln := enode.NewLocalNode(db, nodeKey) + cfg := discover.Config{ + PrivateKey: nodeKey, + NetRestrict: restrictList, + } if *runv5 { - if _, err := discv5.ListenUDP(nodeKey, conn, "", restrictList); err != nil { + if _, err := discover.ListenV5(conn, ln, cfg); err != nil { utils.Fatalf("%v", err) } } else { - db, _ := enode.OpenDB("") - ln := enode.NewLocalNode(db, nodeKey) - cfg := discover.Config{ - PrivateKey: nodeKey, - NetRestrict: restrictList, - } if _, err := discover.ListenUDP(conn, ln, cfg); err != nil { utils.Fatalf("%v", err) } diff --git a/cmd/checkpoint-admin/README.md b/cmd/checkpoint-admin/README.md new file mode 100644 index 000000000000..43e3785ec2fa --- /dev/null +++ b/cmd/checkpoint-admin/README.md @@ -0,0 +1,103 @@ +## Checkpoint-admin + +Checkpoint-admin is a tool for updating checkpoint oracle status. It provides a series of functions including deploying checkpoint oracle contract, signing for new checkpoints, and updating checkpoints in the checkpoint oracle contract. + +### Checkpoint + +In the LES protocol, there is an important concept called checkpoint. In simple terms, whenever a certain number of blocks are generated on the blockchain, a new checkpoint is generated which contains some important information such as + +* Block hash at checkpoint +* Canonical hash trie root at checkpoint +* Bloom trie root at checkpoint + +*For a more detailed introduction to checkpoint, please see the LES [spec](https://github.com/ethereum/devp2p/blob/master/caps/les.md).* + +Using this information, light clients can skip all historical block headers when synchronizing data and start synchronization from this checkpoint. Therefore, as long as the light client can obtain some latest and correct checkpoints, the amount of data and time for synchronization will be greatly reduced. + +However, from a security perspective, the most critical step in a synchronization algorithm based on checkpoints is to determine whether the checkpoint used by the light client is correct. Otherwise, all blockchain data synchronized based on this checkpoint may be wrong. For this we provide two different ways to ensure the correctness of the checkpoint used by the light client. + +#### Hardcoded checkpoint + +There are several hardcoded checkpoints in the [source code](https://github.com/ethereum/go-ethereum/blob/master/params/config.go#L38) of the go-ethereum project. These checkpoints are updated by go-ethereum developers when new versions of software are released. Because light client users trust Geth developers to some extent, hardcoded checkpoints in the code can also be considered correct. + +#### Checkpoint oracle + +Hardcoded checkpoints can solve the problem of verifying the correctness of checkpoints (although this is a more centralized solution). But the pain point of this solution is that developers can only update checkpoints when a new version of software is released. In addition, light client users usually do not keep the Geth version they use always up to date. So hardcoded checkpoints used by users are generally stale. Therefore, it still needs to download a large amount of blockchain data during synchronization. + +Checkpoint oracle is a more flexible solution. In simple terms, this is a smart contract that is deployed on the blockchain. The smart contract records several designated trusted signers. Whenever enough trusted signers have issued their signatures for the same checkpoint, it can be considered that the checkpoint has been authenticated by the signers. Checkpoints authenticated by trusted signers can be considered correct. + +So this way, even without updating the software version, as long as the trusted signers regularly update the checkpoint in oracle on time, the light client can always use the latest and verified checkpoint for data synchronization. + +### Usage + +Checkpoint-admin is a command line tool designed for checkpoint oracle. Users can easily deploy contracts and update checkpoints through this tool. + +#### Install + +```shell +go get github.com/ethereum/go-ethereum/cmd/checkpoint-admin +``` + +#### Deploy + +Deploy checkpoint oracle contract. `--signers` indicates the specified trusted signer, and `--threshold` indicates the minimum number of signatures required by trusted signers to update a checkpoint. + +```shell +checkpoint-admin deploy --rpc --clef --signer --signers --threshold 1 +``` + +It is worth noting that checkpoint-admin only supports clef as a signer for transactions and plain text(checkpoint). For more clef usage, please see the clef [tutorial](https://geth.ethereum.org/docs/clef/tutorial) . + +#### Sign + +Checkpoint-admin provides two different modes of signing. You can automatically obtain the current stable checkpoint and sign it interactively, and you can also use the information provided by the command line flags to sign checkpoint offline. + +**Interactive mode** + +```shell +checkpoint-admin sign --clef --signer --rpc +``` + +*It is worth noting that the connected Geth node can be a fullnode or a light client. If it is fullnode, you must enable the LES protocol. E.G. add `--light.serv 50` to the startup command line flags*. + +**Offline mode** + +```shell +checkpoint-admin sign --clef --signer --index --hash --oracle +``` + +*CHECKPOINT_HASH is obtained based on this [calculation method](https://github.com/ethereum/go-ethereum/blob/master/params/config.go#L251).* + +#### Publish + +Collect enough signatures from different trusted signers for the same checkpoint and submit them to oracle to update the "authenticated" checkpoint in the contract. + +```shell +checkpoint-admin publish --clef --rpc --signer --index --signatures +``` + +#### Status query + +Check the latest status of checkpoint oracle. + +```shell +checkpoint-admin status --rpc +``` + +### Enable checkpoint oracle in your private network + +Currently, only the Ethereum mainnet and the default supported test networks (ropsten, rinkeby, goerli) activate this feature. If you want to activate this feature in your private network, you can overwrite the relevant checkpoint oracle settings through the configuration file after deploying the oracle contract. + +* Get your node configuration file `geth dumpconfig OTHER_COMMAND_LINE_OPTIONS > config.toml` +* Edit the configuration file and add the following information + +```toml +[Eth.CheckpointOracle] +Address = CHECKPOINT_ORACLE_ADDRESS +Signers = [TRUSTED_SIGNER_1, ..., TRUSTED_SIGNER_N] +Threshold = THRESHOLD +``` + +* Start geth with the modified configuration file + +*In the private network, all fullnodes and light clients need to be started using the same checkpoint oracle settings.* \ No newline at end of file diff --git a/cmd/checkpoint-admin/main.go b/cmd/checkpoint-admin/main.go index b4d8e0db5acf..0fb553214778 100644 --- a/cmd/checkpoint-admin/main.go +++ b/cmd/checkpoint-admin/main.go @@ -22,8 +22,8 @@ import ( "fmt" "os" - "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common/fdlimit" + "github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/log" "gopkg.in/urfave/cli.v1" ) @@ -37,7 +37,7 @@ var ( var app *cli.App func init() { - app = utils.NewApp(gitCommit, gitDate, "ethereum checkpoint helper tool") + app = flags.NewApp(gitCommit, gitDate, "ethereum checkpoint helper tool") app.Commands = []cli.Command{ commandStatus, commandDeploy, @@ -48,7 +48,7 @@ func init() { oracleFlag, nodeURLFlag, } - cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate + cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate } // Commonly used command line flags. diff --git a/cmd/clef/README.md b/cmd/clef/README.md index 90afe8c8c8e7..27c62817f925 100644 --- a/cmd/clef/README.md +++ b/cmd/clef/README.md @@ -9,7 +9,7 @@ Clef can run as a daemon on the same machine, off a usb-stick like [USB armory]( Check out the * [CLI tutorial](tutorial.md) for some concrete examples on how Clef works. -* [Setup docs](docs/setup.md) for infos on how to configure Clef on QubesOS or USB Armory. +* [Setup docs](docs/setup.md) for information on how to configure Clef on QubesOS or USB Armory. * [Data types](datatypes.md) for details on the communication messages between Clef and an external UI. ## Command line flags @@ -33,12 +33,12 @@ GLOBAL OPTIONS: --lightkdf Reduce key-derivation RAM & CPU usage at some expense of KDF strength --nousb Disables monitoring for and managing USB hardware wallets --pcscdpath value Path to the smartcard daemon (pcscd) socket file (default: "/run/pcscd/pcscd.comm") - --rpcaddr value HTTP-RPC server listening interface (default: "localhost") - --rpcvhosts value Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. (default: "localhost") + --http.addr value HTTP-RPC server listening interface (default: "localhost") + --http.vhosts value Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. (default: "localhost") --ipcdisable Disable the IPC-RPC server --ipcpath Filename for IPC socket/pipe within the datadir (explicit paths escape it) - --rpc Enable the HTTP-RPC server - --rpcport value HTTP-RPC server listening port (default: 8550) + --http Enable the HTTP-RPC server + --http.port value HTTP-RPC server listening port (default: 8550) --signersecret value A file containing the (encrypted) master seed to encrypt Clef data, e.g. keystore credentials and ruleset hash --4bytedb-custom value File used for writing new 4byte-identifiers submitted via API (default: "./4byte-custom.json") --auditlog value File used to emit audit logs. Set to "" to disable (default: "audit.log") @@ -46,6 +46,7 @@ GLOBAL OPTIONS: --stdio-ui Use STDIN/STDOUT as a channel for an external UI. This means that an STDIN/STDOUT is used for RPC-communication with a e.g. a graphical user interface, and can be used when Clef is started by an external process. --stdio-ui-test Mechanism to test interface between Clef and UI. Requires 'stdio-ui'. --advanced If enabled, issues warnings instead of rejections for suspicious requests. Default off + --suppress-bootwarn If set, does not show the warning during boot --help, -h show help --version, -v print the version ``` @@ -112,11 +113,11 @@ Some snags and todos ### External API -Clef listens to HTTP requests on `rpcaddr`:`rpcport` (or to IPC on `ipcpath`), with the same JSON-RPC standard as Geth. The messages are expected to be [JSON-RPC 2.0 standard](https://www.jsonrpc.org/specification). +Clef listens to HTTP requests on `http.addr`:`http.port` (or to IPC on `ipcpath`), with the same JSON-RPC standard as Geth. The messages are expected to be [JSON-RPC 2.0 standard](https://www.jsonrpc.org/specification). -Some of these call can require user interaction. Clients must be aware that responses may be delayed significantly or may never be received if a users decides to ignore the confirmation request. +Some of these calls can require user interaction. Clients must be aware that responses may be delayed significantly or may never be received if a user decides to ignore the confirmation request. -The External API is **untrusted**: it does not accept credentials over this API, nor does it expect that requests have any authority. +The External API is **untrusted**: it does not accept credentials, nor does it expect that requests have any authority. ### Internal UI API @@ -145,13 +146,11 @@ See the [external API changelog](extapi_changelog.md) for information about chan All hex encoded values must be prefixed with `0x`. -## Methods - ### account_new #### Create new password protected account -The signer will generate a new private key, encrypts it according to [web3 keystore spec](https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition) and stores it in the keystore directory. +The signer will generate a new private key, encrypt it according to [web3 keystore spec](https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition) and store it in the keystore directory. The client is responsible for creating a backup of the keystore. If the keystore is lost there is no method of retrieving lost accounts. #### Arguments @@ -160,7 +159,6 @@ None #### Result - address [string]: account address that is derived from the generated key - - url [string]: location of the keyfile #### Sample call ```json @@ -172,14 +170,11 @@ None } ``` Response -``` +```json { "id": 0, "jsonrpc": "2.0", - "result": { - "address": "0xbea9183f8f4f03d427f6bcea17388bdff1cab133", - "url": "keystore:///my/keystore/UTC--2017-08-24T08-40-15.419655028Z--bea9183f8f4f03d427f6bcea17388bdff1cab133" - } + "result": "0xbea9183f8f4f03d427f6bcea17388bdff1cab133" } ``` @@ -195,8 +190,6 @@ None #### Result - array with account records: - account.address [string]: account address that is derived from the generated key - - account.type [string]: type of the - - account.url [string]: location of the account #### Sample call ```json @@ -207,21 +200,13 @@ None } ``` Response -``` +```json { "id": 1, "jsonrpc": "2.0", "result": [ - { - "address": "0xafb2f771f58513609765698f65d3f2f0224a956f", - "type": "account", - "url": "keystore:///tmp/keystore/UTC--2017-08-24T07-26-47.162109726Z--afb2f771f58513609765698f65d3f2f0224a956f" - }, - { - "address": "0xbea9183f8f4f03d427f6bcea17388bdff1cab133", - "type": "account", - "url": "keystore:///tmp/keystore/UTC--2017-08-24T08-40-15.419655028Z--bea9183f8f4f03d427f6bcea17388bdff1cab133" - } + "0xafb2f771f58513609765698f65d3f2f0224a956f", + "0xbea9183f8f4f03d427f6bcea17388bdff1cab133" ] } ``` @@ -229,10 +214,10 @@ Response ### account_signTransaction #### Sign transactions - Signs a transactions and responds with the signed transaction in RLP encoded form. + Signs a transaction and responds with the signed transaction in RLP-encoded and JSON forms. #### Arguments - 2. transaction object: + 1. transaction object: - `from` [address]: account to send the transaction from - `to` [address]: receiver account. If omitted or `0x`, will cause contract creation. - `gas` [number]: maximum amount of gas to burn @@ -240,12 +225,13 @@ Response - `value` [number:optional]: amount of Wei to send with the transaction - `data` [data:optional]: input data - `nonce` [number]: account nonce - 3. method signature [string:optional] + 1. method signature [string:optional] - The method signature, if present, is to aid decoding the calldata. Should consist of `methodname(paramtype,...)`, e.g. `transfer(uint256,address)`. The signer may use this data to parse the supplied calldata, and show the user. The data, however, is considered totally untrusted, and reliability is not expected. #### Result - - signed transaction in RLP encoded form [data] + - raw [data]: signed transaction in RLP encoded form + - tx [json]: signed transaction in JSON form #### Sample call ```json @@ -270,11 +256,22 @@ Response ```json { - "id": 2, "jsonrpc": "2.0", - "error": { - "code": -32000, - "message": "Request denied" + "id": 2, + "result": { + "raw": "0xf88380018203339407a565b7ed7d7a678680a4c162885bedbb695fe080a44401a6e4000000000000000000000000000000000000000000000000000000000000001226a0223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20ea02aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663", + "tx": { + "nonce": "0x0", + "gasPrice": "0x1234", + "gas": "0x55555", + "to": "0x07a565b7ed7d7a678680a4c162885bedbb695fe0", + "value": "0x1234", + "input": "0xabcd", + "v": "0x26", + "r": "0x223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20e", + "s": "0x2aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663", + "hash": "0xeba2df809e7a612a0a0d444ccfa5c839624bdc00dd29e3340d46df3870f8a30e" + } } } ``` @@ -326,7 +323,7 @@ Response Bash example: ```bash -#curl -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"account_signTransaction","params":[{"from":"0x694267f14675d7e1b9494fd8d72fefe1755710fa","gas":"0x333","gasPrice":"0x1","nonce":"0x0","to":"0x07a565b7ed7d7a678680a4c162885bedbb695fe0", "value":"0x0", "data":"0x4401a6e40000000000000000000000000000000000000000000000000000000000000012"},"safeSend(address)"],"id":67}' http://localhost:8550/ +> curl -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"account_signTransaction","params":[{"from":"0x694267f14675d7e1b9494fd8d72fefe1755710fa","gas":"0x333","gasPrice":"0x1","nonce":"0x0","to":"0x07a565b7ed7d7a678680a4c162885bedbb695fe0", "value":"0x0", "data":"0x4401a6e40000000000000000000000000000000000000000000000000000000000000012"},"safeSend(address)"],"id":67}' http://localhost:8550/ {"jsonrpc":"2.0","id":67,"result":{"raw":"0xf88380018203339407a565b7ed7d7a678680a4c162885bedbb695fe080a44401a6e4000000000000000000000000000000000000000000000000000000000000001226a0223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20ea02aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663","tx":{"nonce":"0x0","gasPrice":"0x1","gas":"0x333","to":"0x07a565b7ed7d7a678680a4c162885bedbb695fe0","value":"0x0","input":"0x4401a6e40000000000000000000000000000000000000000000000000000000000000012","v":"0x26","r":"0x223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20e","s":"0x2aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663","hash":"0xeba2df809e7a612a0a0d444ccfa5c839624bdc00dd29e3340d46df3870f8a30e"}}} ``` @@ -373,7 +370,7 @@ Response ### account_signTypedData #### Sign data - Signs a chunk of structured data conformant to [EIP712]([EIP-712](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-712.md)) and returns the calculated signature. + Signs a chunk of structured data conformant to [EIP-712](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-712.md) and returns the calculated signature. #### Arguments - account [address]: account to sign with @@ -469,7 +466,7 @@ Response ### account_ecRecover -#### Sign data +#### Recover the signing address Derive the address from the account that was used to sign data with content type `text/plain` and the signature. @@ -487,7 +484,6 @@ Derive the address from the account that was used to sign data with content type "jsonrpc": "2.0", "method": "account_ecRecover", "params": [ - "data/plain", "0xaabbccdd", "0x5b6693f153b48ec1c706ba4169960386dbaa6903e249cc79a8e6ddc434451d417e1e57327872c7f538beeb323c300afa9999a3d4a5de6caf3be0d5ef832b67ef1c" ] @@ -503,117 +499,36 @@ Response } ``` -### account_import +### account_version -#### Import account - Import a private key into the keystore. The imported key is expected to be encrypted according to the web3 keystore - format. - -#### Arguments - - account [object]: key in [web3 keystore format](https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition) (retrieved with account_export) +#### Get external API version -#### Result - - imported key [object]: - - key.address [address]: address of the imported key - - key.type [string]: type of the account - - key.url [string]: key URL - -#### Sample call -```json -{ - "id": 6, - "jsonrpc": "2.0", - "method": "account_import", - "params": [ - { - "address": "c7412fc59930fd90099c917a50e5f11d0934b2f5", - "crypto": { - "cipher": "aes-128-ctr", - "cipherparams": { - "iv": "401c39a7c7af0388491c3d3ecb39f532" - }, - "ciphertext": "eb045260b18dd35cd0e6d99ead52f8fa1e63a6b0af2d52a8de198e59ad783204", - "kdf": "scrypt", - "kdfparams": { - "dklen": 32, - "n": 262144, - "p": 1, - "r": 8, - "salt": "9a657e3618527c9b5580ded60c12092e5038922667b7b76b906496f021bb841a" - }, - "mac": "880dc10bc06e9cec78eb9830aeb1e7a4a26b4c2c19615c94acb632992b952806" - }, - "id": "09bccb61-b8d3-4e93-bf4f-205a8194f0b9", - "version": 3 - } - ] -} -``` -Response - -```json -{ - "id": 6, - "jsonrpc": "2.0", - "result": { - "address": "0xc7412fc59930fd90099c917a50e5f11d0934b2f5", - "type": "account", - "url": "keystore:///tmp/keystore/UTC--2017-08-24T11-00-42.032024108Z--c7412fc59930fd90099c917a50e5f11d0934b2f5" - } -} -``` - -### account_export - -#### Export account from keystore - Export a private key from the keystore. The exported private key is encrypted with the original password. When the - key is imported later this password is required. +Get the version of the external API used by Clef. #### Arguments - - account [address]: export private key that is associated with this account + +None #### Result - - exported key, see [web3 keystore format](https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition) for - more information + +* external API version [string] #### Sample call ```json { - "id": 5, + "id": 0, "jsonrpc": "2.0", - "method": "account_export", - "params": [ - "0xc7412fc59930fd90099c917a50e5f11d0934b2f5" - ] + "method": "account_version", + "params": [] } ``` -Response +Response ```json { - "id": 5, - "jsonrpc": "2.0", - "result": { - "address": "c7412fc59930fd90099c917a50e5f11d0934b2f5", - "crypto": { - "cipher": "aes-128-ctr", - "cipherparams": { - "iv": "401c39a7c7af0388491c3d3ecb39f532" - }, - "ciphertext": "eb045260b18dd35cd0e6d99ead52f8fa1e63a6b0af2d52a8de198e59ad783204", - "kdf": "scrypt", - "kdfparams": { - "dklen": 32, - "n": 262144, - "p": 1, - "r": 8, - "salt": "9a657e3618527c9b5580ded60c12092e5038922667b7b76b906496f021bb841a" - }, - "mac": "880dc10bc06e9cec78eb9830aeb1e7a4a26b4c2c19615c94acb632992b952806" - }, - "id": "09bccb61-b8d3-4e93-bf4f-205a8194f0b9", - "version": 3 - } + "id": 0, + "jsonrpc": "2.0", + "result": "6.0.0" } ``` @@ -625,7 +540,7 @@ By starting the signer with the switch `--stdio-ui-test`, the signer will invoke denials. This can be used during development to ensure that the API is (at least somewhat) correctly implemented. See `pythonsigner`, which can be invoked via `python3 pythonsigner.py test` to perform the 'denial-handshake-test'. -All methods in this API uses object-based parameters, so that there can be no mixups of parameters: each piece of data is accessed by key. +All methods in this API use object-based parameters, so that there can be no mixup of parameters: each piece of data is accessed by key. See the [ui API changelog](intapi_changelog.md) for information about changes to this API. @@ -784,12 +699,10 @@ Invoked when a request for account listing has been made. { "accounts": [ { - "type": "Account", "url": "keystore:///home/bazonk/.ethereum/keystore/UTC--2017-11-20T14-44-54.089682944Z--123409812340981234098123409812deadbeef42", "address": "0x123409812340981234098123409812deadbeef42" }, { - "type": "Account", "url": "keystore:///home/bazonk/.ethereum/keystore/UTC--2017-11-23T21-59-03.199240693Z--cafebabedeadbeef34098123409812deadbeef42", "address": "0xcafebabedeadbeef34098123409812deadbeef42" } @@ -819,7 +732,13 @@ Invoked when a request for account listing has been made. { "address": "0x123409812340981234098123409812deadbeef42", "raw_data": "0x01020304", - "message": "\u0019Ethereum Signed Message:\n4\u0001\u0002\u0003\u0004", + "messages": [ + { + "name": "message", + "value": "\u0019Ethereum Signed Message:\n4\u0001\u0002\u0003\u0004", + "type": "text/plain" + } + ], "hash": "0x7e3a4e7a9d1744bc5c675c25e1234ca8ed9162bd17f78b9085e48047c15ac310", "meta": { "remote": "signer binary", @@ -829,12 +748,34 @@ Invoked when a request for account listing has been made. } ] } +``` + +### ApproveNewAccount / `ui_approveNewAccount` + +Invoked when a request for creating a new account has been made. +#### Sample call + +```json +{ + "jsonrpc": "2.0", + "id": 4, + "method": "ui_approveNewAccount", + "params": [ + { + "meta": { + "remote": "signer binary", + "local": "main", + "scheme": "in-proc" + } + } + ] +} ``` ### ShowInfo / `ui_showInfo` -The UI should show the info to the user. Does not expect response. +The UI should show the info (a single message) to the user. Does not expect response. #### Sample call @@ -844,9 +785,7 @@ The UI should show the info to the user. Does not expect response. "id": 9, "method": "ui_showInfo", "params": [ - { - "text": "Tests completed" - } + "Tests completed" ] } @@ -854,18 +793,16 @@ The UI should show the info to the user. Does not expect response. ### ShowError / `ui_showError` -The UI should show the info to the user. Does not expect response. +The UI should show the error (a single message) to the user. Does not expect response. ```json { "jsonrpc": "2.0", "id": 2, - "method": "ShowError", + "method": "ui_showError", "params": [ - { - "text": "Testing 'ShowError'" - } + "Something bad happened!" ] } @@ -879,9 +816,36 @@ When implementing rate-limited rules, this callback should be used. TLDR; Use this method to keep track of signed transactions, instead of using the data in `ApproveTx`. +Example call: +```json + +{ + "jsonrpc": "2.0", + "id": 1, + "method": "ui_onApprovedTx", + "params": [ + { + "raw": "0xf88380018203339407a565b7ed7d7a678680a4c162885bedbb695fe080a44401a6e4000000000000000000000000000000000000000000000000000000000000001226a0223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20ea02aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663", + "tx": { + "nonce": "0x0", + "gasPrice": "0x1", + "gas": "0x333", + "to": "0x07a565b7ed7d7a678680a4c162885bedbb695fe0", + "value": "0x0", + "input": "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012", + "v": "0x26", + "r": "0x223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20e", + "s": "0x2aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663", + "hash": "0xeba2df809e7a612a0a0d444ccfa5c839624bdc00dd29e3340d46df3870f8a30e" + } + } + ] +} +``` + ### OnSignerStartup / `ui_onSignerStartup` -This method provide the UI with information about what API version the signer uses (both internal and external) aswell as build-info and external API, +This method provides the UI with information about what API version the signer uses (both internal and external) as well as build-info and external API, in k/v-form. Example call: @@ -905,6 +869,27 @@ Example call: ``` +### OnInputRequired / `ui_onInputRequired` + +Invoked when Clef requires user input (e.g. a password). + +Example call: +```json + +{ + "jsonrpc": "2.0", + "id": 1, + "method": "ui_onInputRequired", + "params": [ + { + "title": "Account password", + "prompt": "Please enter the password for account 0x694267f14675d7e1b9494fd8d72fefe1755710fa", + "isPassword": true + } + ] +} +``` + ### Rules for UI apis @@ -934,4 +919,4 @@ There are a couple of implementation for a UI. We'll try to keep this list up to | QtSigner| https://github.com/holiman/qtsigner/| Python3/QT-based| :+1:| :+1:| :+1:| :+1:| :+1:| :x: | :+1: (partially)| | GtkSigner| https://github.com/holiman/gtksigner| Python3/GTK-based| :+1:| :x:| :x:| :+1:| :+1:| :x: | :x: | | Frame | https://github.com/floating/frame/commits/go-signer| Electron-based| :x:| :x:| :x:| :x:| ?| :x: | :x: | -| Clef UI| https://github.com/kyokan/clef-ui| Golang/QT-based| :+1:| :+1:| :x:| :+1:| :+1:| :x: | :+1: (approve tx only)| +| Clef UI| https://github.com/ethereum/clef-ui| Golang/QT-based| :+1:| :+1:| :x:| :+1:| :+1:| :x: | :+1: (approve tx only)| diff --git a/cmd/clef/datatypes.md b/cmd/clef/datatypes.md index 5ebf9adc9712..dd8cda584649 100644 --- a/cmd/clef/datatypes.md +++ b/cmd/clef/datatypes.md @@ -3,7 +3,7 @@ These data types are defined in the channel between clef and the UI ### SignDataRequest -SignDataRequest contains information about a pending request to sign some data. The data to be signed can be of various types, defined by content-type. Clef has done most of the work in canonicalizing and making sense of the data, and it's up to the UI to presentthe user with the contents of the `message` +SignDataRequest contains information about a pending request to sign some data. The data to be signed can be of various types, defined by content-type. Clef has done most of the work in canonicalizing and making sense of the data, and it's up to the UI to present the user with the contents of the `message` Example: ```json diff --git a/cmd/clef/docs/setup.md b/cmd/clef/docs/setup.md index 33d2b0381f06..6cc7a4120d97 100644 --- a/cmd/clef/docs/setup.md +++ b/cmd/clef/docs/setup.md @@ -34,7 +34,7 @@ There are two ways that this can be achieved: integrated via Qubes or integrated #### 1. Qubes Integrated -Qubes provdes a facility for inter-qubes communication via `qrexec`. A qube can request to make a cross-qube RPC request +Qubes provides a facility for inter-qubes communication via `qrexec`. A qube can request to make a cross-qube RPC request to another qube. The OS then asks the user if the call is permitted. ![Example](qubes/qrexec-example.png) @@ -48,7 +48,7 @@ This is how [Split GPG](https://www.qubes-os.org/doc/split-gpg/) is implemented. ![Clef via qrexec](qubes/clef_qubes_qrexec.png) -On the `target` qubes, we need to define the rpc service. +On the `target` qubes, we need to define the RPC service. [qubes.Clefsign](qubes/qubes.Clefsign): @@ -94,7 +94,7 @@ with minimal requirements. On the `client` qube, we need to create a listener which will receive the request from the Dapp, and proxy it. -[qubes-client.py](qubes/client/qubes-client.py): +[qubes-client.py](qubes/qubes-client.py): ```python @@ -135,11 +135,11 @@ $ cat newaccnt.json $ cat newaccnt.json| qrexec-client-vm debian-work qubes.Clefsign ``` -This should pop up first a dialog to allow the IPC call: +A dialog should pop up first to allow the IPC call: ![one](qubes/qubes_newaccount-1.png) -Followed by a GTK-dialog to approve the operation +Followed by a GTK-dialog to approve the operation: ![two](qubes/qubes_newaccount-2.png) @@ -169,7 +169,7 @@ However, it comes with a couple of drawbacks: - The `Origin` header must be forwarded - Information about the remote ip must be added as a `X-Forwarded-For`. However, Clef cannot always trust an `XFF` header, since malicious clients may lie about `XFF` in order to fool the http server into believing it comes from another address. -- Even with a policy in place to allow rpc-calls between `caller` and `target`, there will be several popups: +- Even with a policy in place to allow RPC calls between `caller` and `target`, there will be several popups: - One qubes-specific where the user specifies the `target` vm - One clef-specific to approve the transaction @@ -177,7 +177,7 @@ However, it comes with a couple of drawbacks: #### 2. Network integrated The second way to set up Clef on a qubes system is to allow networking, and have Clef listen to a port which is accessible -form other qubes. +from other qubes. ![Clef via http](qubes/clef_qubes_http.png) @@ -186,13 +186,13 @@ form other qubes. ## USBArmory -The [USB armory](https://inversepath.com/usbarmory) is an open source hardware design with an 800 Mhz ARM processor. It is a pocket-size +The [USB armory](https://inversepath.com/usbarmory) is an open source hardware design with an 800 MHz ARM processor. It is a pocket-size computer. When inserted into a laptop, it identifies itself as a USB network interface, basically adding another network to your computer. Over this new network interface, you can SSH into the device. Running Clef off a USB armory means that you can use the armory as a very versatile offline computer, which only ever connects to a local network between your computer and the device itself. -Needless to say, the while this model should be fairly secure against remote attacks, an attacker with physical access +Needless to say, while this model should be fairly secure against remote attacks, an attacker with physical access to the USB Armory would trivially be able to extract the contents of the device filesystem. diff --git a/cmd/clef/extapi_changelog.md b/cmd/clef/extapi_changelog.md index dbc302631bc1..31554f079020 100644 --- a/cmd/clef/extapi_changelog.md +++ b/cmd/clef/extapi_changelog.md @@ -10,6 +10,64 @@ TL;DR: Given a version number MAJOR.MINOR.PATCH, increment the: Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR.PATCH format. +### 6.1.0 + +The API-method `account_signGnosisSafeTx` was added. This method takes two parameters, +`[address, safeTx]`. The latter, `safeTx`, can be copy-pasted from the gnosis relay. For example: + +``` +{ + "jsonrpc": "2.0", + "method": "account_signGnosisSafeTx", + "params": ["0xfd1c4226bfD1c436672092F4eCbfC270145b7256", + { + "safe": "0x25a6c4BBd32B2424A9c99aEB0584Ad12045382B3", + "to": "0xB372a646f7F05Cc1785018dBDA7EBc734a2A20E2", + "value": "20000000000000000", + "data": null, + "operation": 0, + "gasToken": "0x0000000000000000000000000000000000000000", + "safeTxGas": 27845, + "baseGas": 0, + "gasPrice": "0", + "refundReceiver": "0x0000000000000000000000000000000000000000", + "nonce": 2, + "executionDate": null, + "submissionDate": "2020-09-15T21:54:49.617634Z", + "modified": "2020-09-15T21:54:49.617634Z", + "blockNumber": null, + "transactionHash": null, + "safeTxHash": "0x2edfbd5bc113ff18c0631595db32eb17182872d88d9bf8ee4d8c2dd5db6d95e2", + "executor": null, + "isExecuted": false, + "isSuccessful": null, + "ethGasPrice": null, + "gasUsed": null, + "fee": null, + "origin": null, + "dataDecoded": null, + "confirmationsRequired": null, + "confirmations": [ + { + "owner": "0xAd2e180019FCa9e55CADe76E4487F126Fd08DA34", + "submissionDate": "2020-09-15T21:54:49.663299Z", + "transactionHash": null, + "confirmationType": "CONFIRMATION", + "signature": "0x95a7250bb645f831c86defc847350e7faff815b2fb586282568e96cc859e39315876db20a2eed5f7a0412906ec5ab57652a6f645ad4833f345bda059b9da2b821c", + "signatureType": "EOA" + } + ], + "signatures": null + } + ], + "id": 67 +} +``` + +Not all fields are required, though. This method is really just a UX helper, which massages the +input to conform to the `EIP-712` [specification](https://docs.gnosis.io/safe/docs/contracts_tx_execution/#transaction-hash) +for the Gnosis Safe, and making the output be directly importable to by a relay service. + ### 6.0.0 diff --git a/cmd/clef/intapi_changelog.md b/cmd/clef/intapi_changelog.md index 38424f06b9fd..eaeb2e68620b 100644 --- a/cmd/clef/intapi_changelog.md +++ b/cmd/clef/intapi_changelog.md @@ -10,6 +10,17 @@ TL;DR: Given a version number MAJOR.MINOR.PATCH, increment the: Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR.PATCH format. +### 7.0.1 + +Added `clef_New` to the internal API callable from a UI. + +> `New` creates a new password protected Account. The private key is protected with +> the given password. Users are responsible to backup the private key that is stored +> in the keystore location that was specified when this API was created. +> This method is the same as New on the external API, the difference being that +> this implementation does not ask for confirmation, since it's initiated by +> the user + ### 7.0.0 - The `message` field was renamed to `messages` in all data signing request methods to better reflect that it's a list, not a value. @@ -150,7 +161,7 @@ UserInputResponse struct { #### 1.2.0 * Add `OnStartup` method, to provide the UI with information about what API version -the signer uses (both internal and external) aswell as build-info and external api. +the signer uses (both internal and external) as well as build-info and external api. Example call: ```json diff --git a/cmd/clef/main.go b/cmd/clef/main.go index b2c8812ab269..3aaf898db2e6 100644 --- a/cmd/clef/main.go +++ b/cmd/clef/main.go @@ -29,9 +29,9 @@ import ( "math/big" "os" "os/signal" - "os/user" "path/filepath" "runtime" + "sort" "strings" "time" @@ -40,20 +40,21 @@ import ( "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/console" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/internal/ethapi" + "github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/signer/core" + "github.com/ethereum/go-ethereum/signer/core/apitypes" "github.com/ethereum/go-ethereum/signer/fourbyte" "github.com/ethereum/go-ethereum/signer/rules" "github.com/ethereum/go-ethereum/signer/storage" - colorable "github.com/mattn/go-colorable" + "github.com/mattn/go-colorable" "github.com/mattn/go-isatty" "gopkg.in/urfave/cli.v1" ) @@ -82,6 +83,10 @@ var ( Name: "advanced", Usage: "If enabled, issues warnings instead of rejections for suspicious requests. Default off", } + acceptFlag = cli.BoolFlag{ + Name: "suppress-bootwarn", + Usage: "If set, does not show the warning during boot", + } keystoreFlag = cli.StringFlag{ Name: "keystore", Value: filepath.Join(node.DefaultDataDir(), "keystore"), @@ -98,7 +103,7 @@ var ( Usage: "Chain id to use for signing (1=mainnet, 3=Ropsten, 4=Rinkeby, 5=Goerli)", } rpcPortFlag = cli.IntFlag{ - Name: "rpcport", + Name: "http.port", Usage: "HTTP-RPC server listening port", Value: node.DefaultHTTPPort + 5, } @@ -187,6 +192,22 @@ The setpw command stores a password for a given address (keyfile). Description: ` The delpw command removes a password for a given address (keyfile). `} + newAccountCommand = cli.Command{ + Action: utils.MigrateFlags(newAccount), + Name: "newaccount", + Usage: "Create a new account", + ArgsUsage: "", + Flags: []cli.Flag{ + logLevelFlag, + keystoreFlag, + utils.LightKDFFlag, + acceptFlag, + }, + Description: ` +The newaccount command creates a new keystore-backed account. It is a convenience-method +which can be used in lieu of an external UI.`, + } + gendocCommand = cli.Command{ Action: GenDoc, Name: "gendoc", @@ -196,6 +217,36 @@ The gendoc generates example structures of the json-rpc communication types. `} ) +// AppHelpFlagGroups is the application flags, grouped by functionality. +var AppHelpFlagGroups = []flags.FlagGroup{ + { + Name: "FLAGS", + Flags: []cli.Flag{ + logLevelFlag, + keystoreFlag, + configdirFlag, + chainIdFlag, + utils.LightKDFFlag, + utils.NoUSBFlag, + utils.SmartCardDaemonPathFlag, + utils.HTTPListenAddrFlag, + utils.HTTPVirtualHostsFlag, + utils.IPCDisabledFlag, + utils.IPCPathFlag, + utils.HTTPEnabledFlag, + rpcPortFlag, + signerSecretFlag, + customDBFlag, + auditLogFlag, + ruleFlag, + stdiouiFlag, + testFlag, + advancedMode, + acceptFlag, + }, + }, +} + func init() { app.Name = "Clef" app.Usage = "Manage Ethereum account operations" @@ -207,11 +258,11 @@ func init() { utils.LightKDFFlag, utils.NoUSBFlag, utils.SmartCardDaemonPathFlag, - utils.RPCListenAddrFlag, - utils.RPCVirtualHostsFlag, + utils.HTTPListenAddrFlag, + utils.HTTPVirtualHostsFlag, utils.IPCDisabledFlag, utils.IPCPathFlag, - utils.RPCEnabledFlag, + utils.HTTPEnabledFlag, rpcPortFlag, signerSecretFlag, customDBFlag, @@ -220,10 +271,50 @@ func init() { stdiouiFlag, testFlag, advancedMode, + acceptFlag, } app.Action = signer - app.Commands = []cli.Command{initCommand, attestCommand, setCredentialCommand, delCredentialCommand, gendocCommand} - cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate + app.Commands = []cli.Command{initCommand, + attestCommand, + setCredentialCommand, + delCredentialCommand, + newAccountCommand, + gendocCommand} + cli.CommandHelpTemplate = flags.CommandHelpTemplate + // Override the default app help template + cli.AppHelpTemplate = flags.ClefAppHelpTemplate + + // Override the default app help printer, but only for the global app help + originalHelpPrinter := cli.HelpPrinter + cli.HelpPrinter = func(w io.Writer, tmpl string, data interface{}) { + if tmpl == flags.ClefAppHelpTemplate { + // Render out custom usage screen + originalHelpPrinter(w, tmpl, flags.HelpData{App: data, FlagGroups: AppHelpFlagGroups}) + } else if tmpl == flags.CommandHelpTemplate { + // Iterate over all command specific flags and categorize them + categorized := make(map[string][]cli.Flag) + for _, flag := range data.(cli.Command).Flags { + if _, ok := categorized[flag.String()]; !ok { + categorized[flags.FlagCategory(flag, AppHelpFlagGroups)] = append(categorized[flags.FlagCategory(flag, AppHelpFlagGroups)], flag) + } + } + + // sort to get a stable ordering + sorted := make([]flags.FlagGroup, 0, len(categorized)) + for cat, flgs := range categorized { + sorted = append(sorted, flags.FlagGroup{Name: cat, Flags: flgs}) + } + sort.Sort(flags.ByCategory(sorted)) + + // add sorted array to data and render with default printer + originalHelpPrinter(w, tmpl, map[string]interface{}{ + "cmd": data, + "categorizedFlags": sorted, + }) + } else { + originalHelpPrinter(w, tmpl, data) + } + } } func main() { @@ -263,7 +354,7 @@ func initializeSecrets(c *cli.Context) error { text := "The master seed of clef will be locked with a password.\nPlease specify a password. Do not forget this password!" var password string for { - password = getPassPhrase(text, true) + password = utils.GetPassPhrase(text, true) if err := core.ValidatePasswordFormat(password); err != nil { fmt.Printf("invalid password: %v\n", err) } else { @@ -336,7 +427,7 @@ func setCredential(ctx *cli.Context) error { utils.Fatalf("Invalid address specified: %s", addr) } address := common.HexToAddress(addr) - password := getPassPhrase("Please enter a password to store for this address:", true) + password := utils.GetPassPhrase("Please enter a password to store for this address:", true) fmt.Println() stretchedKey, err := readMasterKey(ctx, nil) @@ -382,14 +473,41 @@ func removeCredential(ctx *cli.Context) error { return nil } +func newAccount(c *cli.Context) error { + if err := initialize(c); err != nil { + return err + } + // The newaccount is meant for users using the CLI, since 'real' external + // UIs can use the UI-api instead. So we'll just use the native CLI UI here. + var ( + ui = core.NewCommandlineUI() + pwStorage storage.Storage = &storage.NoStorage{} + ksLoc = c.GlobalString(keystoreFlag.Name) + lightKdf = c.GlobalBool(utils.LightKDFFlag.Name) + ) + log.Info("Starting clef", "keystore", ksLoc, "light-kdf", lightKdf) + am := core.StartClefAccountManager(ksLoc, true, lightKdf, "") + // This gives is us access to the external API + apiImpl := core.NewSignerAPI(am, 0, true, ui, nil, false, pwStorage) + // This gives us access to the internal API + internalApi := core.NewUIServerAPI(apiImpl) + addr, err := internalApi.New(context.Background()) + if err == nil { + fmt.Printf("Generated account %v\n", addr.String()) + } + return err +} + func initialize(c *cli.Context) error { // Set up the logger to print everything logOutput := os.Stdout if c.GlobalBool(stdiouiFlag.Name) { logOutput = os.Stderr // If using the stdioui, we can't do the 'confirm'-flow - fmt.Fprint(logOutput, legalWarning) - } else { + if !c.GlobalBool(acceptFlag.Name) { + fmt.Fprint(logOutput, legalWarning) + } + } else if !c.GlobalBool(acceptFlag.Name) { if !confirm(legalWarning) { return fmt.Errorf("aborted by user") } @@ -457,7 +575,6 @@ func signer(c *cli.Context) error { api core.ExternalAPI pwStorage storage.Storage = &storage.NoStorage{} ) - configDir := c.GlobalString(configdirFlag.Name) if stretchedKey, err := readMasterKey(c, ui); err != nil { log.Warn("Failed to open master, rules disabled", "err", err) @@ -535,22 +652,33 @@ func signer(c *cli.Context) error { Service: api, Version: "1.0"}, } - if c.GlobalBool(utils.RPCEnabledFlag.Name) { - vhosts := splitAndTrim(c.GlobalString(utils.RPCVirtualHostsFlag.Name)) - cors := splitAndTrim(c.GlobalString(utils.RPCCORSDomainFlag.Name)) + if c.GlobalBool(utils.HTTPEnabledFlag.Name) { + vhosts := utils.SplitAndTrim(c.GlobalString(utils.HTTPVirtualHostsFlag.Name)) + cors := utils.SplitAndTrim(c.GlobalString(utils.HTTPCORSDomainFlag.Name)) + + srv := rpc.NewServer() + err := node.RegisterApis(rpcAPI, []string{"account"}, srv, false) + if err != nil { + utils.Fatalf("Could not register API: %w", err) + } + handler := node.NewHTTPHandlerStack(srv, cors, vhosts) + + // set port + port := c.Int(rpcPortFlag.Name) // start http server - httpEndpoint := fmt.Sprintf("%s:%d", c.GlobalString(utils.RPCListenAddrFlag.Name), c.Int(rpcPortFlag.Name)) - listener, _, err := rpc.StartHTTPEndpoint(httpEndpoint, rpcAPI, []string{"account"}, cors, vhosts, rpc.DefaultHTTPTimeouts) + httpEndpoint := fmt.Sprintf("%s:%d", c.GlobalString(utils.HTTPListenAddrFlag.Name), port) + httpServer, addr, err := node.StartHTTPEndpoint(httpEndpoint, rpc.DefaultHTTPTimeouts, handler) if err != nil { utils.Fatalf("Could not start RPC api: %v", err) } - extapiURL = fmt.Sprintf("http://%s", httpEndpoint) + extapiURL = fmt.Sprintf("http://%v/", addr) log.Info("HTTP endpoint opened", "url", extapiURL) defer func() { - listener.Close() - log.Info("HTTP endpoint closed", "url", httpEndpoint) + // Don't bother imposing a timeout here. + httpServer.Shutdown(context.Background()) + log.Info("HTTP endpoint closed", "url", extapiURL) }() } if !c.GlobalBool(utils.IPCDisabledFlag.Name) { @@ -589,21 +717,11 @@ func signer(c *cli.Context) error { return nil } -// splitAndTrim splits input separated by a comma -// and trims excessive white space from the substrings. -func splitAndTrim(input string) []string { - result := strings.Split(input, ",") - for i, r := range result { - result[i] = strings.TrimSpace(r) - } - return result -} - // DefaultConfigDir is the default config directory to use for the vaults and other // persistence requirements. func DefaultConfigDir() string { // Try to place the data folder in the user's home dir - home := homeDir() + home := utils.HomeDir() if home != "" { if runtime.GOOS == "darwin" { return filepath.Join(home, "Library", "Signer") @@ -611,26 +729,15 @@ func DefaultConfigDir() string { appdata := os.Getenv("APPDATA") if appdata != "" { return filepath.Join(appdata, "Signer") - } else { - return filepath.Join(home, "AppData", "Roaming", "Signer") } - } else { - return filepath.Join(home, ".clef") + return filepath.Join(home, "AppData", "Roaming", "Signer") } + return filepath.Join(home, ".clef") } // As we cannot guess a stable location, return empty and handle later return "" } -func homeDir() string { - if home := os.Getenv("HOME"); home != "" { - return home - } - if usr, err := user.Current(); err == nil { - return usr.HomeDir - } - return "" -} func readMasterKey(ctx *cli.Context, ui core.UIClientAPI) ([]byte, error) { var ( file string @@ -660,7 +767,7 @@ func readMasterKey(ctx *cli.Context, ui core.UIClientAPI) ([]byte, error) { } password = resp.Text } else { - password = getPassPhrase("Decrypt master seed of clef", false) + password = utils.GetPassPhrase("Decrypt master seed of clef", false) } masterSeed, err := decryptSeed(cipherKey, password) if err != nil { @@ -680,14 +787,16 @@ func readMasterKey(ctx *cli.Context, ui core.UIClientAPI) ([]byte, error) { // checkFile is a convenience function to check if a file // * exists -// * is mode 0400 +// * is mode 0400 (unix only) func checkFile(filename string) error { info, err := os.Stat(filename) if err != nil { return fmt.Errorf("failed stat on %s: %v", filename, err) } // Check the unix permission bits - if info.Mode().Perm()&0377 != 0 { + // However, on windows, we cannot use the unix perm-bits, see + // https://github.com/ethereum/go-ethereum/issues/20123 + if runtime.GOOS != "windows" && info.Mode().Perm()&0377 != 0 { return fmt.Errorf("file (%v) has insecure file permissions (%v)", filename, info.Mode().String()) } return nil @@ -789,7 +898,7 @@ func testExternalUI(api *core.SignerAPI) { addr, _ := common.NewMixedcaseAddressFromString("0x0011223344556677889900112233445566778899") data := `{"types":{"EIP712Domain":[{"name":"name","type":"string"},{"name":"version","type":"string"},{"name":"chainId","type":"uint256"},{"name":"verifyingContract","type":"address"}],"Person":[{"name":"name","type":"string"},{"name":"test","type":"uint8"},{"name":"wallet","type":"address"}],"Mail":[{"name":"from","type":"Person"},{"name":"to","type":"Person"},{"name":"contents","type":"string"}]},"primaryType":"Mail","domain":{"name":"Ether Mail","version":"1","chainId":"1","verifyingContract":"0xCCCcccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC"},"message":{"from":{"name":"Cow","test":"3","wallet":"0xcD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826"},"to":{"name":"Bob","wallet":"0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB","test":"2"},"contents":"Hello, Bob!"}}` //_, err := api.SignData(ctx, accounts.MimetypeTypedData, *addr, hexutil.Encode([]byte(data))) - var typedData core.TypedData + var typedData apitypes.TypedData json.Unmarshal([]byte(data), &typedData) _, err := api.SignTypedData(ctx, *addr, typedData) expectApprove("sign 712 typed data", err) @@ -814,13 +923,13 @@ func testExternalUI(api *core.SignerAPI) { time.Sleep(delay) data := hexutil.Bytes([]byte{}) to := common.NewMixedcaseAddress(a) - tx := core.SendTxArgs{ + tx := apitypes.SendTxArgs{ Data: &data, Nonce: 0x1, Value: hexutil.Big(*big.NewInt(6)), From: common.NewMixedcaseAddress(a), To: &to, - GasPrice: hexutil.Big(*big.NewInt(5)), + GasPrice: (*hexutil.Big)(big.NewInt(5)), Gas: 1000, Input: nil, } @@ -855,27 +964,6 @@ func testExternalUI(api *core.SignerAPI) { } -// getPassPhrase retrieves the password associated with clef, either fetched -// from a list of preloaded passphrases, or requested interactively from the user. -// TODO: there are many `getPassPhrase` functions, it will be better to abstract them into one. -func getPassPhrase(prompt string, confirmation bool) string { - fmt.Println(prompt) - password, err := console.Stdin.PromptPassword("Password: ") - if err != nil { - utils.Fatalf("Failed to read password: %v", err) - } - if confirmation { - confirm, err := console.Stdin.PromptPassword("Repeat password: ") - if err != nil { - utils.Fatalf("Failed to read password confirmation: %v", err) - } - if password != confirm { - utils.Fatalf("Passwords do not match") - } - } - return password -} - type encryptedSeedStorage struct { Description string `json:"description"` Version int `json:"version"` @@ -926,7 +1014,7 @@ func GenDoc(ctx *cli.Context) { if data, err := json.MarshalIndent(v, "", " "); err == nil { output = append(output, fmt.Sprintf("### %s\n\n%s\n\nExample:\n```json\n%s\n```", name, desc, data)) } else { - log.Error("Error generating output", err) + log.Error("Error generating output", "err", err) } } ) @@ -937,7 +1025,7 @@ func GenDoc(ctx *cli.Context) { "of the work in canonicalizing and making sense of the data, and it's up to the UI to present" + "the user with the contents of the `message`" sighash, msg := accounts.TextAndHash([]byte("hello world")) - messages := []*core.NameValueType{{Name: "message", Value: msg, Typ: accounts.MimetypeTextPlain}} + messages := []*apitypes.NameValueType{{Name: "message", Value: msg, Typ: accounts.MimetypeTextPlain}} add("SignDataRequest", desc, &core.SignDataRequest{ Address: common.NewMixedcaseAddress(a), @@ -967,17 +1055,17 @@ func GenDoc(ctx *cli.Context) { data := hexutil.Bytes([]byte{0x01, 0x02, 0x03, 0x04}) add("SignTxRequest", desc, &core.SignTxRequest{ Meta: meta, - Callinfo: []core.ValidationInfo{ + Callinfo: []apitypes.ValidationInfo{ {Typ: "Warning", Message: "Something looks odd, show this message as a warning"}, {Typ: "Info", Message: "User should see this as well"}, }, - Transaction: core.SendTxArgs{ + Transaction: apitypes.SendTxArgs{ Data: &data, Nonce: 0x1, Value: hexutil.Big(*big.NewInt(6)), From: common.NewMixedcaseAddress(a), To: nil, - GasPrice: hexutil.Big(*big.NewInt(5)), + GasPrice: (*hexutil.Big)(big.NewInt(5)), Gas: 1000, Input: nil, }}) @@ -987,13 +1075,13 @@ func GenDoc(ctx *cli.Context) { add("SignTxResponse - approve", "Response to request to sign a transaction. This response needs to contain the `transaction`"+ ", because the UI is free to make modifications to the transaction.", &core.SignTxResponse{Approved: true, - Transaction: core.SendTxArgs{ + Transaction: apitypes.SendTxArgs{ Data: &data, Nonce: 0x4, Value: hexutil.Big(*big.NewInt(6)), From: common.NewMixedcaseAddress(a), To: nil, - GasPrice: hexutil.Big(*big.NewInt(5)), + GasPrice: (*hexutil.Big)(big.NewInt(5)), Gas: 1000, Input: nil, }}) @@ -1018,7 +1106,7 @@ func GenDoc(ctx *cli.Context) { rlpdata := common.FromHex("0xf85d640101948a8eafb1cf62bfbeb1741769dae1a9dd47996192018026a0716bd90515acb1e68e5ac5867aa11a1e65399c3349d479f5fb698554ebc6f293a04e8a4ebfff434e971e0ef12c5bf3a881b06fd04fc3f8b8a7291fb67a26a1d4ed") var tx types.Transaction - rlp.DecodeBytes(rlpdata, &tx) + tx.UnmarshalBinary(rlpdata) add("OnApproved - SignTransactionResult", desc, ðapi.SignTransactionResult{Raw: rlpdata, Tx: &tx}) } diff --git a/cmd/clef/sign_flow.png b/cmd/clef/sign_flow.png index 93ef81a32e8e..e7010ab43f3a 100644 Binary files a/cmd/clef/sign_flow.png and b/cmd/clef/sign_flow.png differ diff --git a/cmd/clef/testdata/sign_1559_missing_field_exp_fail.json b/cmd/clef/testdata/sign_1559_missing_field_exp_fail.json new file mode 100644 index 000000000000..c5a133686085 --- /dev/null +++ b/cmd/clef/testdata/sign_1559_missing_field_exp_fail.json @@ -0,0 +1,16 @@ +{ + "jsonrpc": "2.0", + "method": "account_signTransaction", + "params": [ + { + "from": "0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "to": "0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "gas": "0x333", + "maxFeePerGas": "0x123", + "nonce": "0x0", + "value": "0x10", + "data": "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012" + } + ], + "id": 67 +} diff --git a/cmd/clef/testdata/sign_1559_missing_maxfeepergas_exp_fail.json b/cmd/clef/testdata/sign_1559_missing_maxfeepergas_exp_fail.json new file mode 100644 index 000000000000..df69231d7efe --- /dev/null +++ b/cmd/clef/testdata/sign_1559_missing_maxfeepergas_exp_fail.json @@ -0,0 +1,16 @@ +{ + "jsonrpc": "2.0", + "method": "account_signTransaction", + "params": [ + { + "from": "0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "to": "0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "gas": "0x333", + "maxPriorityFeePerGas": "0x123", + "nonce": "0x0", + "value": "0x10", + "data": "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012" + } + ], + "id": 67 +} diff --git a/cmd/clef/testdata/sign_1559_tx.json b/cmd/clef/testdata/sign_1559_tx.json new file mode 100644 index 000000000000..29355f6cf5bd --- /dev/null +++ b/cmd/clef/testdata/sign_1559_tx.json @@ -0,0 +1,17 @@ +{ + "jsonrpc": "2.0", + "method": "account_signTransaction", + "params": [ + { + "from": "0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "to": "0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "gas": "0x333", + "maxPriorityFeePerGas": "0x123", + "maxFeePerGas": "0x123", + "nonce": "0x0", + "value": "0x10", + "data": "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012" + } + ], + "id": 67 +} diff --git a/cmd/clef/testdata/sign_bad_checksum_exp_fail.json b/cmd/clef/testdata/sign_bad_checksum_exp_fail.json new file mode 100644 index 000000000000..21ba7b3fc090 --- /dev/null +++ b/cmd/clef/testdata/sign_bad_checksum_exp_fail.json @@ -0,0 +1,17 @@ +{ + "jsonrpc": "2.0", + "method": "account_signTransaction", + "params": [ + { + "from":"0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192", + "to":"0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192", + "gas": "0x333", + "gasPrice": "0x123", + "nonce": "0x0", + "value": "0x10", + "data": + "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012" + } + ], + "id": 67 +} diff --git a/cmd/clef/testdata/sign_normal_exp_ok.json b/cmd/clef/testdata/sign_normal_exp_ok.json new file mode 100644 index 000000000000..7f3a9202a07d --- /dev/null +++ b/cmd/clef/testdata/sign_normal_exp_ok.json @@ -0,0 +1,17 @@ +{ + "jsonrpc": "2.0", + "method": "account_signTransaction", + "params": [ + { + "from":"0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "to":"0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192", + "gas": "0x333", + "gasPrice": "0x123", + "nonce": "0x0", + "value": "0x10", + "data": + "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012" + } + ], + "id": 67 +} diff --git a/cmd/clef/tutorial.md b/cmd/clef/tutorial.md index 4453472e2d1c..3ea662b5d4c7 100644 --- a/cmd/clef/tutorial.md +++ b/cmd/clef/tutorial.md @@ -1,6 +1,6 @@ ## Initializing Clef -First thing's first, Clef needs to store some data itself. Since that data might be sensitive (passwords, signing rules, accounts), Clef's entire storage is encrypted. To support encrypting data, the first step is to initialize Clef with a random master seed, itself too encrypted with your chosen password: +First things first, Clef needs to store some data itself. Since that data might be sensitive (passwords, signing rules, accounts), Clef's entire storage is encrypted. To support encrypting data, the first step is to initialize Clef with a random master seed, itself too encrypted with your chosen password: ```text $ clef init diff --git a/cmd/dbbench/main.go b/cmd/dbbench/main.go index 7226da3d3016..a6efd001b14e 100644 --- a/cmd/dbbench/main.go +++ b/cmd/dbbench/main.go @@ -8,7 +8,6 @@ import ( "encoding/binary" "encoding/hex" "fmt" - "math/big" "math/rand" "os" "os/exec" @@ -19,22 +18,19 @@ import ( "sync/atomic" "time" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" ) -var ( - big1 = big.NewInt(1) -) - func pack4(s []byte) []byte { - if len(s) % 4 == 0 { + if len(s)%4 == 0 { return s } else { - l := (len(s) + 3) / 4 * 4; + l := (len(s) + 3) / 4 * 4 b := make([]byte, l) copy(b, s) for i := len(s); i < l; i++ { - b[i] = ' '; + b[i] = ' ' } return b } @@ -91,7 +87,7 @@ func read(db ethdb.Database, prefix string, start, end, numThreads int, verbose return err } if verbose { - fmt.Printf("%s(<-%s): %d %s\n", hex.EncodeToString(k[:]), string(ks), len(v), hex.EncodeToString(v)) + fmt.Printf("%s(<-%s): %d %s\n", hex.EncodeToString(k[:]), ks, len(v), hex.EncodeToString(v)) } return nil } @@ -135,7 +131,7 @@ func rread(db ethdb.Database, prefix string, count, numThreads int, verbose bool return err } if verbose { - fmt.Printf("%s(<-%s): %d %s\n", hex.EncodeToString(k[:]), string(ks), len(v), hex.EncodeToString(v)) + fmt.Printf("%s(<-%s): %d %s\n", hex.EncodeToString(k[:]), ks, len(v), hex.EncodeToString(v)) } return nil } @@ -285,7 +281,7 @@ func post(dbPath, device, header string, ot time.Time, count int, ss []uint64) { } fmt.Printf("%s,%d,%d,%.3f,%d", header, ot.Unix(), dur/1000, - float64(count) * 1000.0 / float64(dur), du) + float64(count)*1000.0/float64(dur), du) for i := 0; i < len(se); i++ { v := se[i] // 1: disk read bytes @@ -396,13 +392,13 @@ func main() { ethdb.EnableStats(true) switch which { case "leveldb": - db, err = ethdb.NewLDBDatabase(dbPath, 1024, 1024) + db, err = rawdb.NewLevelDBDatabase(dbPath, 1024, 1024, "", false) if err != nil { fmt.Printf("Cannot open DB %s: %v\n", dbPath, err) return } case "rocksdb": - db, err = ethdb.NewRDBDatabase(dbPath, 1024, 1024) + db, err = rawdb.NewRocksDBDatabase(dbPath, 1024, 1024, "", false) if err != nil { fmt.Printf("Cannot open DB %s: %v\n", dbPath, err) return diff --git a/cmd/devp2p/README.md b/cmd/devp2p/README.md new file mode 100644 index 000000000000..7f816b602e3d --- /dev/null +++ b/cmd/devp2p/README.md @@ -0,0 +1,140 @@ +# The devp2p command + +The devp2p command line tool is a utility for low-level peer-to-peer debugging and +protocol development purposes. It can do many things. + +### ENR Decoding + +Use `devp2p enrdump ` to verify and display an Ethereum Node Record. + +### Node Key Management + +The `devp2p key ...` command family deals with node key files. + +Run `devp2p key generate mynode.key` to create a new node key in the `mynode.key` file. + +Run `devp2p key to-enode mynode.key -ip 127.0.0.1 -tcp 30303` to create an enode:// URL +corresponding to the given node key and address information. + +### Maintaining DNS Discovery Node Lists + +The devp2p command can create and publish DNS discovery node lists. + +Run `devp2p dns sign ` to update the signature of a DNS discovery tree. + +Run `devp2p dns sync ` to download a complete DNS discovery tree. + +Run `devp2p dns to-cloudflare ` to publish a tree to CloudFlare DNS. + +Run `devp2p dns to-route53 ` to publish a tree to Amazon Route53. + +You can find more information about these commands in the [DNS Discovery Setup Guide][dns-tutorial]. + +### Node Set Utilities + +There are several commands for working with JSON node set files. These files are generated +by the discovery crawlers and DNS client commands. Node sets also used as the input of the +DNS deployer commands. + +Run `devp2p nodeset info ` to display statistics of a node set. + +Run `devp2p nodeset filter ` to write a new, filtered node +set to standard output. The following filters are supported: + +- `-limit ` limits the output set to N entries, taking the top N nodes by score +- `-ip ` filters nodes by IP subnet +- `-min-age ` filters nodes by 'first seen' time +- `-eth-network ` filters nodes by "eth" ENR entry +- `-les-server` filters nodes by LES server support +- `-snap` filters nodes by snap protocol support + +For example, given a node set in `nodes.json`, you could create a filtered set containing +up to 20 eth mainnet nodes which also support snap sync using this command: + + devp2p nodeset filter nodes.json -eth-network mainnet -snap -limit 20 + +### Discovery v4 Utilities + +The `devp2p discv4 ...` command family deals with the [Node Discovery v4][discv4] +protocol. + +Run `devp2p discv4 ping ` to ping a node. + +Run `devp2p discv4 resolve ` to find the most recent node record of a node in +the DHT. + +Run `devp2p discv4 crawl ` to create or update a JSON node set. + +### Discovery v5 Utilities + +The `devp2p discv5 ...` command family deals with the [Node Discovery v5][discv5] +protocol. This protocol is currently under active development. + +Run `devp2p discv5 ping ` to ping a node. + +Run `devp2p discv5 resolve ` to find the most recent node record of a node in +the discv5 DHT. + +Run `devp2p discv5 listen` to run a Discovery v5 node. + +Run `devp2p discv5 crawl ` to create or update a JSON node set containing +discv5 nodes. + +### Discovery Test Suites + +The devp2p command also contains interactive test suites for Discovery v4 and Discovery +v5. + +To run these tests against your implementation, you need to set up a networking +environment where two separate UDP listening addresses are available on the same machine. +The two listening addresses must also be routed such that they are able to reach the node +you want to test. + +For example, if you want to run the test on your local host, and the node under test is +also on the local host, you need to assign two IP addresses (or a larger range) to your +loopback interface. On macOS, this can be done by executing the following command: + + sudo ifconfig lo0 add 127.0.0.2 + +You can now run either test suite as follows: Start the node under test first, ensuring +that it won't talk to the Internet (i.e. disable bootstrapping). An easy way to prevent +unintended connections to the global DHT is listening on `127.0.0.1`. + +Now get the ENR of your node and store it in the `NODE` environment variable. + +Start the test by running `devp2p discv5 test -listen1 127.0.0.1 -listen2 127.0.0.2 $NODE`. + +### Eth Protocol Test Suite + +The Eth Protocol test suite is a conformance test suite for the [eth protocol][eth]. + +To run the eth protocol test suite against your implementation, the node needs to be initialized as such: + +1. initialize the geth node with the `genesis.json` file contained in the `testdata` directory +2. import the `halfchain.rlp` file in the `testdata` directory +3. run geth with the following flags: +``` +geth --datadir --nodiscover --nat=none --networkid 19763 --verbosity 5 +``` + +Then, run the following command, replacing `` with the enode of the geth node: + ``` + devp2p rlpx eth-test cmd/devp2p/internal/ethtest/testdata/chain.rlp cmd/devp2p/internal/ethtest/testdata/genesis.json +``` + +Repeat the above process (re-initialising the node) in order to run the Eth Protocol test suite again. + +#### Eth66 Test Suite + +The Eth66 test suite is also a conformance test suite for the eth 66 protocol version specifically. +To run the eth66 protocol test suite, initialize a geth node as described above and run the following command, +replacing `` with the enode of the geth node: + + ``` + devp2p rlpx eth66-test cmd/devp2p/internal/ethtest/testdata/chain.rlp cmd/devp2p/internal/ethtest/testdata/genesis.json +``` + +[eth]: https://github.com/ethereum/devp2p/blob/master/caps/eth.md +[dns-tutorial]: https://geth.ethereum.org/docs/developers/dns-discovery-setup +[discv4]: https://github.com/ethereum/devp2p/tree/master/discv4.md +[discv5]: https://github.com/ethereum/devp2p/tree/master/discv5/discv5.md diff --git a/cmd/devp2p/crawl.go b/cmd/devp2p/crawl.go index 92aaad72a372..9259b4894c9a 100644 --- a/cmd/devp2p/crawl.go +++ b/cmd/devp2p/crawl.go @@ -20,14 +20,13 @@ import ( "time" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode" ) type crawler struct { input nodeSet output nodeSet - disc *discover.UDPv4 + disc resolver iters []enode.Iterator inputIter enode.Iterator ch chan *enode.Node @@ -37,7 +36,11 @@ type crawler struct { revalidateInterval time.Duration } -func newCrawler(input nodeSet, disc *discover.UDPv4, iters ...enode.Iterator) *crawler { +type resolver interface { + RequestENR(*enode.Node) (*enode.Node, error) +} + +func newCrawler(input nodeSet, disc resolver, iters ...enode.Iterator) *crawler { c := &crawler{ input: input, output: make(nodeSet, len(input)), @@ -63,6 +66,7 @@ func (c *crawler) run(timeout time.Duration) nodeSet { doneCh = make(chan enode.Iterator, len(c.iters)) liveIters = len(c.iters) ) + defer timeoutTimer.Stop() for _, it := range c.iters { go c.runIterator(doneCh, it) } diff --git a/cmd/devp2p/discv4cmd.go b/cmd/devp2p/discv4cmd.go index 9525bec66817..3b6dc09a1cc8 100644 --- a/cmd/devp2p/discv4cmd.go +++ b/cmd/devp2p/discv4cmd.go @@ -22,6 +22,7 @@ import ( "strings" "time" + "github.com/ethereum/go-ethereum/cmd/devp2p/internal/v4test" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/p2p/discover" @@ -40,6 +41,7 @@ var ( discv4ResolveCommand, discv4ResolveJSONCommand, discv4CrawlCommand, + discv4TestCommand, }, } discv4PingCommand = cli.Command{ @@ -74,6 +76,18 @@ var ( Action: discv4Crawl, Flags: []cli.Flag{bootnodesFlag, crawlTimeoutFlag}, } + discv4TestCommand = cli.Command{ + Name: "test", + Usage: "Runs tests against a node", + Action: discv4Test, + Flags: []cli.Flag{ + remoteEnodeFlag, + testPatternFlag, + testTAPFlag, + testListen1Flag, + testListen2Flag, + }, + } ) var ( @@ -81,11 +95,28 @@ var ( Name: "bootnodes", Usage: "Comma separated nodes used for bootstrapping", } + nodekeyFlag = cli.StringFlag{ + Name: "nodekey", + Usage: "Hex-encoded node key", + } + nodedbFlag = cli.StringFlag{ + Name: "nodedb", + Usage: "Nodes database location", + } + listenAddrFlag = cli.StringFlag{ + Name: "addr", + Usage: "Listening address", + } crawlTimeoutFlag = cli.DurationFlag{ Name: "timeout", Usage: "Time limit for the crawl.", Value: 30 * time.Minute, } + remoteEnodeFlag = cli.StringFlag{ + Name: "remote", + Usage: "Enode of the remote node under test", + EnvVar: "REMOTE_ENODE", + } ) func discv4Ping(ctx *cli.Context) error { @@ -172,28 +203,42 @@ func discv4Crawl(ctx *cli.Context) error { return nil } -func parseBootnodes(ctx *cli.Context) ([]*enode.Node, error) { - s := params.RinkebyBootnodes - if ctx.IsSet(bootnodesFlag.Name) { - s = strings.Split(ctx.String(bootnodesFlag.Name), ",") - } - nodes := make([]*enode.Node, len(s)) - var err error - for i, record := range s { - nodes[i], err = parseNode(record) - if err != nil { - return nil, fmt.Errorf("invalid bootstrap node: %v", err) - } +// discv4Test runs the protocol test suite. +func discv4Test(ctx *cli.Context) error { + // Configure test package globals. + if !ctx.IsSet(remoteEnodeFlag.Name) { + return fmt.Errorf("Missing -%v", remoteEnodeFlag.Name) } - return nodes, nil + v4test.Remote = ctx.String(remoteEnodeFlag.Name) + v4test.Listen1 = ctx.String(testListen1Flag.Name) + v4test.Listen2 = ctx.String(testListen2Flag.Name) + return runTests(ctx, v4test.AllTests) } // startV4 starts an ephemeral discovery V4 node. func startV4(ctx *cli.Context) *discover.UDPv4 { - socket, ln, cfg, err := listen() + ln, config := makeDiscoveryConfig(ctx) + socket := listen(ln, ctx.String(listenAddrFlag.Name)) + disc, err := discover.ListenV4(socket, ln, config) if err != nil { exit(err) } + return disc +} + +func makeDiscoveryConfig(ctx *cli.Context) (*enode.LocalNode, discover.Config) { + var cfg discover.Config + + if ctx.IsSet(nodekeyFlag.Name) { + key, err := crypto.HexToECDSA(ctx.String(nodekeyFlag.Name)) + if err != nil { + exit(fmt.Errorf("-%s: %v", nodekeyFlag.Name, err)) + } + cfg.PrivateKey = key + } else { + cfg.PrivateKey, _ = crypto.GenerateKey() + } + if commandHasFlag(ctx, bootnodesFlag) { bn, err := parseBootnodes(ctx) if err != nil { @@ -201,26 +246,51 @@ func startV4(ctx *cli.Context) *discover.UDPv4 { } cfg.Bootnodes = bn } - disc, err := discover.ListenV4(socket, ln, cfg) + + dbpath := ctx.String(nodedbFlag.Name) + db, err := enode.OpenDB(dbpath) if err != nil { exit(err) } - return disc -} - -func listen() (*net.UDPConn, *enode.LocalNode, discover.Config, error) { - var cfg discover.Config - cfg.PrivateKey, _ = crypto.GenerateKey() - db, _ := enode.OpenDB("") ln := enode.NewLocalNode(db, cfg.PrivateKey) + return ln, cfg +} - socket, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IP{0, 0, 0, 0}}) +func listen(ln *enode.LocalNode, addr string) *net.UDPConn { + if addr == "" { + addr = "0.0.0.0:0" + } + socket, err := net.ListenPacket("udp4", addr) if err != nil { - db.Close() - return nil, nil, cfg, err + exit(err) + } + usocket := socket.(*net.UDPConn) + uaddr := socket.LocalAddr().(*net.UDPAddr) + if uaddr.IP.IsUnspecified() { + ln.SetFallbackIP(net.IP{127, 0, 0, 1}) + } else { + ln.SetFallbackIP(uaddr.IP) + } + ln.SetFallbackUDP(uaddr.Port) + return usocket +} + +func parseBootnodes(ctx *cli.Context) ([]*enode.Node, error) { + s := params.RinkebyBootnodes + if ctx.IsSet(bootnodesFlag.Name) { + input := ctx.String(bootnodesFlag.Name) + if input == "" { + return nil, nil + } + s = strings.Split(input, ",") } - addr := socket.LocalAddr().(*net.UDPAddr) - ln.SetFallbackIP(net.IP{127, 0, 0, 1}) - ln.SetFallbackUDP(addr.Port) - return socket, ln, cfg, nil + nodes := make([]*enode.Node, len(s)) + var err error + for i, record := range s { + nodes[i], err = parseNode(record) + if err != nil { + return nil, fmt.Errorf("invalid bootstrap node: %v", err) + } + } + return nodes, nil } diff --git a/cmd/devp2p/discv5cmd.go b/cmd/devp2p/discv5cmd.go new file mode 100644 index 000000000000..e20d7c9cfae6 --- /dev/null +++ b/cmd/devp2p/discv5cmd.go @@ -0,0 +1,146 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "fmt" + "time" + + "github.com/ethereum/go-ethereum/cmd/devp2p/internal/v5test" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/p2p/discover" + "gopkg.in/urfave/cli.v1" +) + +var ( + discv5Command = cli.Command{ + Name: "discv5", + Usage: "Node Discovery v5 tools", + Subcommands: []cli.Command{ + discv5PingCommand, + discv5ResolveCommand, + discv5CrawlCommand, + discv5TestCommand, + discv5ListenCommand, + }, + } + discv5PingCommand = cli.Command{ + Name: "ping", + Usage: "Sends ping to a node", + Action: discv5Ping, + } + discv5ResolveCommand = cli.Command{ + Name: "resolve", + Usage: "Finds a node in the DHT", + Action: discv5Resolve, + Flags: []cli.Flag{bootnodesFlag}, + } + discv5CrawlCommand = cli.Command{ + Name: "crawl", + Usage: "Updates a nodes.json file with random nodes found in the DHT", + Action: discv5Crawl, + Flags: []cli.Flag{bootnodesFlag, crawlTimeoutFlag}, + } + discv5TestCommand = cli.Command{ + Name: "test", + Usage: "Runs protocol tests against a node", + Action: discv5Test, + Flags: []cli.Flag{ + testPatternFlag, + testTAPFlag, + testListen1Flag, + testListen2Flag, + }, + } + discv5ListenCommand = cli.Command{ + Name: "listen", + Usage: "Runs a node", + Action: discv5Listen, + Flags: []cli.Flag{ + bootnodesFlag, + nodekeyFlag, + nodedbFlag, + listenAddrFlag, + }, + } +) + +func discv5Ping(ctx *cli.Context) error { + n := getNodeArg(ctx) + disc := startV5(ctx) + defer disc.Close() + + fmt.Println(disc.Ping(n)) + return nil +} + +func discv5Resolve(ctx *cli.Context) error { + n := getNodeArg(ctx) + disc := startV5(ctx) + defer disc.Close() + + fmt.Println(disc.Resolve(n)) + return nil +} + +func discv5Crawl(ctx *cli.Context) error { + if ctx.NArg() < 1 { + return fmt.Errorf("need nodes file as argument") + } + nodesFile := ctx.Args().First() + var inputSet nodeSet + if common.FileExist(nodesFile) { + inputSet = loadNodesJSON(nodesFile) + } + + disc := startV5(ctx) + defer disc.Close() + c := newCrawler(inputSet, disc, disc.RandomNodes()) + c.revalidateInterval = 10 * time.Minute + output := c.run(ctx.Duration(crawlTimeoutFlag.Name)) + writeNodesJSON(nodesFile, output) + return nil +} + +// discv5Test runs the protocol test suite. +func discv5Test(ctx *cli.Context) error { + suite := &v5test.Suite{ + Dest: getNodeArg(ctx), + Listen1: ctx.String(testListen1Flag.Name), + Listen2: ctx.String(testListen2Flag.Name), + } + return runTests(ctx, suite.AllTests()) +} + +func discv5Listen(ctx *cli.Context) error { + disc := startV5(ctx) + defer disc.Close() + + fmt.Println(disc.Self()) + select {} +} + +// startV5 starts an ephemeral discovery v5 node. +func startV5(ctx *cli.Context) *discover.UDPv5 { + ln, config := makeDiscoveryConfig(ctx) + socket := listen(ln, ctx.String(listenAddrFlag.Name)) + disc, err := discover.ListenV5(socket, ln, config) + if err != nil { + exit(err) + } + return disc +} diff --git a/cmd/devp2p/dns_cloudflare.go b/cmd/devp2p/dns_cloudflare.go index a4d10dcfdd3c..d67aaea1a7fb 100644 --- a/cmd/devp2p/dns_cloudflare.go +++ b/cmd/devp2p/dns_cloudflare.go @@ -17,6 +17,7 @@ package main import ( + "context" "fmt" "strings" @@ -79,7 +80,7 @@ func (c *cloudflareClient) checkZone(name string) error { c.zoneID = id } log.Info(fmt.Sprintf("Checking Permissions on zone %s", c.zoneID)) - zone, err := c.ZoneDetails(c.zoneID) + zone, err := c.ZoneDetails(context.Background(), c.zoneID) if err != nil { return err } @@ -112,7 +113,7 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string) records = lrecords log.Info(fmt.Sprintf("Retrieving existing TXT records on %s", name)) - entries, err := c.DNSRecords(c.zoneID, cloudflare.DNSRecord{Type: "TXT"}) + entries, err := c.DNSRecords(context.Background(), c.zoneID, cloudflare.DNSRecord{Type: "TXT"}) if err != nil { return err } @@ -132,16 +133,18 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string) log.Info(fmt.Sprintf("Creating %s = %q", path, val)) ttl := rootTTL if path != name { - ttl = treeNodeTTL // Max TTL permitted by Cloudflare + ttl = treeNodeTTLCloudflare // Max TTL permitted by Cloudflare + } - _, err = c.CreateDNSRecord(c.zoneID, cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl}) + record := cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl} + _, err = c.CreateDNSRecord(context.Background(), c.zoneID, record) } else if old.Content != val { // Entry already exists, only change its content. log.Info(fmt.Sprintf("Updating %s from %q to %q", path, old.Content, val)) old.Content = val - err = c.UpdateDNSRecord(c.zoneID, old.ID, old) + err = c.UpdateDNSRecord(context.Background(), c.zoneID, old.ID, old) } else { - log.Info(fmt.Sprintf("Skipping %s = %q", path, val)) + log.Debug(fmt.Sprintf("Skipping %s = %q", path, val)) } if err != nil { return fmt.Errorf("failed to publish %s: %v", path, err) @@ -155,7 +158,7 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string) } // Stale entry, nuke it. log.Info(fmt.Sprintf("Deleting %s = %q", path, entry.Content)) - if err := c.DeleteDNSRecord(c.zoneID, entry.ID); err != nil { + if err := c.DeleteDNSRecord(context.Background(), c.zoneID, entry.ID); err != nil { return fmt.Errorf("failed to delete %s: %v", path, err) } } diff --git a/cmd/devp2p/dns_route53.go b/cmd/devp2p/dns_route53.go index bdad5c2e9c90..1d4f975dda0b 100644 --- a/cmd/devp2p/dns_route53.go +++ b/cmd/devp2p/dns_route53.go @@ -17,24 +17,32 @@ package main import ( + "context" "errors" "fmt" "sort" "strconv" "strings" + "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/route53" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/route53" + "github.com/aws/aws-sdk-go-v2/service/route53/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/dnsdisc" "gopkg.in/urfave/cli.v1" ) -// The Route53 limits change sets to this size. DNS changes need to be split -// up into multiple batches to work around the limit. -const route53ChangeLimit = 30000 +const ( + // Route53 limits change sets to 32k of 'RDATA size'. Change sets are also limited to + // 1000 items. UPSERTs count double. + // https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html#limits-api-requests-changeresourcerecordsets + route53ChangeSizeLimit = 32000 + route53ChangeCountLimit = 1000 + maxRetryLimit = 60 +) var ( route53AccessKeyFlag = cli.StringFlag{ @@ -51,10 +59,15 @@ var ( Name: "zone-id", Usage: "Route53 Zone ID", } + route53RegionFlag = cli.StringFlag{ + Name: "aws-region", + Usage: "AWS Region", + Value: "eu-central-1", + } ) type route53Client struct { - api *route53.Route53 + api *route53.Client zoneID string } @@ -68,15 +81,16 @@ func newRoute53Client(ctx *cli.Context) *route53Client { akey := ctx.String(route53AccessKeyFlag.Name) asec := ctx.String(route53AccessSecretFlag.Name) if akey == "" || asec == "" { - exit(fmt.Errorf("need Route53 Access Key ID and secret proceed")) + exit(fmt.Errorf("need Route53 Access Key ID and secret to proceed")) } - config := &aws.Config{Credentials: credentials.NewStaticCredentials(akey, asec, "")} - session, err := session.NewSession(config) + creds := aws.NewCredentialsCache(credentials.NewStaticCredentialsProvider(akey, asec, "")) + cfg, err := config.LoadDefaultConfig(context.Background(), config.WithCredentialsProvider(creds)) if err != nil { - exit(fmt.Errorf("can't create AWS session: %v", err)) + exit(fmt.Errorf("can't initialize AWS configuration: %v", err)) } + cfg.Region = ctx.String(route53RegionFlag.Name) return &route53Client{ - api: route53.New(session), + api: route53.NewFromConfig(cfg), zoneID: ctx.String(route53ZoneIDFlag.Name), } } @@ -93,31 +107,74 @@ func (c *route53Client) deploy(name string, t *dnsdisc.Tree) error { return err } log.Info(fmt.Sprintf("Found %d TXT records", len(existing))) - records := t.ToTXT(name) changes := c.computeChanges(name, records, existing) + + // Submit to API. + comment := fmt.Sprintf("enrtree update of %s at seq %d", name, t.Seq()) + return c.submitChanges(changes, comment) +} + +// deleteDomain removes all TXT records of the given domain. +func (c *route53Client) deleteDomain(name string) error { + if err := c.checkZone(name); err != nil { + return err + } + + // Compute DNS changes. + existing, err := c.collectRecords(name) + if err != nil { + return err + } + log.Info(fmt.Sprintf("Found %d TXT records", len(existing))) + changes := makeDeletionChanges(existing, nil) + + // Submit to API. + comment := "enrtree delete of " + name + return c.submitChanges(changes, comment) +} + +// submitChanges submits the given DNS changes to Route53. +func (c *route53Client) submitChanges(changes []types.Change, comment string) error { if len(changes) == 0 { log.Info("No DNS changes needed") return nil } - // Submit change batches. - batches := splitChanges(changes, route53ChangeLimit) + var err error + batches := splitChanges(changes, route53ChangeSizeLimit, route53ChangeCountLimit) + changesToCheck := make([]*route53.ChangeResourceRecordSetsOutput, len(batches)) for i, changes := range batches { log.Info(fmt.Sprintf("Submitting %d changes to Route53", len(changes))) - batch := new(route53.ChangeBatch) - batch.SetChanges(changes) - batch.SetComment(fmt.Sprintf("enrtree update %d/%d of %s at seq %d", i+1, len(batches), name, t.Seq())) + batch := &types.ChangeBatch{ + Changes: changes, + Comment: aws.String(fmt.Sprintf("%s (%d/%d)", comment, i+1, len(batches))), + } req := &route53.ChangeResourceRecordSetsInput{HostedZoneId: &c.zoneID, ChangeBatch: batch} - resp, err := c.api.ChangeResourceRecordSets(req) + changesToCheck[i], err = c.api.ChangeResourceRecordSets(context.TODO(), req) if err != nil { return err } + } - log.Info(fmt.Sprintf("Waiting for change request %s", *resp.ChangeInfo.Id)) - wreq := &route53.GetChangeInput{Id: resp.ChangeInfo.Id} - if err := c.api.WaitUntilResourceRecordSetsChanged(wreq); err != nil { - return err + // Wait for all change batches to propagate. + for _, change := range changesToCheck { + log.Info(fmt.Sprintf("Waiting for change request %s", *change.ChangeInfo.Id)) + wreq := &route53.GetChangeInput{Id: change.ChangeInfo.Id} + var count int + for { + wresp, err := c.api.GetChange(context.TODO(), wreq) + if err != nil { + return err + } + + count++ + + if wresp.ChangeInfo.Status == types.ChangeStatusInsync || count >= maxRetryLimit { + break + } + + time.Sleep(30 * time.Second) } } return nil @@ -136,7 +193,7 @@ func (c *route53Client) findZoneID(name string) (string, error) { log.Info(fmt.Sprintf("Finding Route53 Zone ID for %s", name)) var req route53.ListHostedZonesByNameInput for { - resp, err := c.api.ListHostedZonesByName(&req) + resp, err := c.api.ListHostedZonesByName(context.TODO(), &req) if err != nil { return "", err } @@ -145,7 +202,7 @@ func (c *route53Client) findZoneID(name string) (string, error) { return *zone.Id, nil } } - if !*resp.IsTruncated { + if !resp.IsTruncated { break } req.DNSName = resp.NextDNSName @@ -154,8 +211,9 @@ func (c *route53Client) findZoneID(name string) (string, error) { return "", errors.New("can't find zone ID for " + name) } -// computeChanges creates DNS changes for the given record. -func (c *route53Client) computeChanges(name string, records map[string]string, existing map[string]recordSet) []*route53.Change { +// computeChanges creates DNS changes for the given set of DNS discovery records. +// The 'existing' arg is the set of records that already exist on Route53. +func (c *route53Client) computeChanges(name string, records map[string]string, existing map[string]recordSet) []types.Change { // Convert all names to lowercase. lrecords := make(map[string]string, len(records)) for name, r := range records { @@ -163,73 +221,93 @@ func (c *route53Client) computeChanges(name string, records map[string]string, e } records = lrecords - var changes []*route53.Change - for path, val := range records { + var changes []types.Change + for path, newValue := range records { + prevRecords, exists := existing[path] + prevValue := strings.Join(prevRecords.values, "") + + // prevValue contains quoted strings, encode newValue to compare. + newValue = splitTXT(newValue) + + // Assign TTL. ttl := int64(rootTTL) if path != name { ttl = int64(treeNodeTTL) } - prevRecords, exists := existing[path] - prevValue := combineTXT(prevRecords.values) if !exists { // Entry is unknown, push a new one - log.Info(fmt.Sprintf("Creating %s = %q", path, val)) - changes = append(changes, newTXTChange("CREATE", path, ttl, splitTXT(val))) - } else if prevValue != val { + log.Info(fmt.Sprintf("Creating %s = %s", path, newValue)) + changes = append(changes, newTXTChange("CREATE", path, ttl, newValue)) + } else if prevValue != newValue || prevRecords.ttl != ttl { // Entry already exists, only change its content. - log.Info(fmt.Sprintf("Updating %s from %q to %q", path, prevValue, val)) - changes = append(changes, newTXTChange("UPSERT", path, ttl, splitTXT(val))) + log.Info(fmt.Sprintf("Updating %s from %s to %s", path, prevValue, newValue)) + changes = append(changes, newTXTChange("UPSERT", path, ttl, newValue)) } else { - log.Info(fmt.Sprintf("Skipping %s = %q", path, val)) + log.Debug(fmt.Sprintf("Skipping %s = %s", path, newValue)) } } // Iterate over the old records and delete anything stale. - for path, set := range existing { - if _, ok := records[path]; ok { + changes = append(changes, makeDeletionChanges(existing, records)...) + + // Ensure changes are in the correct order. + sortChanges(changes) + return changes +} + +// makeDeletionChanges creates record changes which delete all records not contained in 'keep'. +func makeDeletionChanges(records map[string]recordSet, keep map[string]string) []types.Change { + var changes []types.Change + for path, set := range records { + if _, ok := keep[path]; ok { continue } - // Stale entry, nuke it. - log.Info(fmt.Sprintf("Deleting %s = %q", path, combineTXT(set.values))) - changes = append(changes, newTXTChange("DELETE", path, set.ttl, set.values)) + log.Info(fmt.Sprintf("Deleting %s = %s", path, strings.Join(set.values, ""))) + changes = append(changes, newTXTChange("DELETE", path, set.ttl, set.values...)) } - - sortChanges(changes) return changes } // sortChanges ensures DNS changes are in leaf-added -> root-changed -> leaf-deleted order. -func sortChanges(changes []*route53.Change) { +func sortChanges(changes []types.Change) { score := map[string]int{"CREATE": 1, "UPSERT": 2, "DELETE": 3} sort.Slice(changes, func(i, j int) bool { - if *changes[i].Action == *changes[j].Action { + if changes[i].Action == changes[j].Action { return *changes[i].ResourceRecordSet.Name < *changes[j].ResourceRecordSet.Name } - return score[*changes[i].Action] < score[*changes[j].Action] + return score[string(changes[i].Action)] < score[string(changes[j].Action)] }) } // splitChanges splits up DNS changes such that each change batch // is smaller than the given RDATA limit. -func splitChanges(changes []*route53.Change, limit int) [][]*route53.Change { - var batches [][]*route53.Change - var batchSize int +func splitChanges(changes []types.Change, sizeLimit, countLimit int) [][]types.Change { + var ( + batches [][]types.Change + batchSize int + batchCount int + ) for _, ch := range changes { // Start new batch if this change pushes the current one over the limit. - size := changeSize(ch) - if len(batches) == 0 || batchSize+size > limit { + count := changeCount(ch) + size := changeSize(ch) * count + overSize := batchSize+size > sizeLimit + overCount := batchCount+count > countLimit + if len(batches) == 0 || overSize || overCount { batches = append(batches, nil) batchSize = 0 + batchCount = 0 } batches[len(batches)-1] = append(batches[len(batches)-1], ch) batchSize += size + batchCount += count } return batches } // changeSize returns the RDATA size of a DNS change. -func changeSize(ch *route53.Change) int { +func changeSize(ch types.Change) int { size := 0 for _, rr := range ch.ResourceRecordSet.ResourceRecords { if rr.Value != nil { @@ -239,15 +317,26 @@ func changeSize(ch *route53.Change) int { return size } +func changeCount(ch types.Change) int { + if ch.Action == types.ChangeActionUpsert { + return 2 + } + return 1 +} + // collectRecords collects all TXT records below the given name. func (c *route53Client) collectRecords(name string) (map[string]recordSet, error) { - log.Info(fmt.Sprintf("Retrieving existing TXT records on %s (%s)", name, c.zoneID)) var req route53.ListResourceRecordSetsInput - req.SetHostedZoneId(c.zoneID) + req.HostedZoneId = &c.zoneID existing := make(map[string]recordSet) - err := c.api.ListResourceRecordSetsPages(&req, func(resp *route53.ListResourceRecordSetsOutput, last bool) bool { + for page := 0; ; page++ { + log.Info("Loading existing TXT records", "name", name, "zone", c.zoneID, "page", page) + resp, err := c.api.ListResourceRecordSets(context.TODO(), &req) + if err != nil { + return existing, err + } for _, set := range resp.ResourceRecordSets { - if !isSubdomain(*set.Name, name) || *set.Type != "TXT" { + if !isSubdomain(*set.Name, name) || set.Type != types.RRTypeTxt { continue } s := recordSet{ttl: *set.TTL} @@ -257,28 +346,44 @@ func (c *route53Client) collectRecords(name string) (map[string]recordSet, error name := strings.TrimSuffix(*set.Name, ".") existing[name] = s } - return true - }) - return existing, err + + if !resp.IsTruncated { + break + } + // Set the cursor to the next batch. From the AWS docs: + // + // To display the next page of results, get the values of NextRecordName, + // NextRecordType, and NextRecordIdentifier (if any) from the response. Then submit + // another ListResourceRecordSets request, and specify those values for + // StartRecordName, StartRecordType, and StartRecordIdentifier. + req.StartRecordIdentifier = resp.NextRecordIdentifier + req.StartRecordName = resp.NextRecordName + req.StartRecordType = resp.NextRecordType + } + + return existing, nil } // newTXTChange creates a change to a TXT record. -func newTXTChange(action, name string, ttl int64, values []string) *route53.Change { - var c route53.Change - var r route53.ResourceRecordSet - var rrs []*route53.ResourceRecord +func newTXTChange(action, name string, ttl int64, values ...string) types.Change { + r := types.ResourceRecordSet{ + Type: types.RRTypeTxt, + Name: &name, + TTL: &ttl, + } + var rrs []types.ResourceRecord for _, val := range values { - rr := new(route53.ResourceRecord) - rr.SetValue(val) + var rr types.ResourceRecord + rr.Value = aws.String(val) rrs = append(rrs, rr) } - r.SetType("TXT") - r.SetName(name) - r.SetTTL(ttl) - r.SetResourceRecords(rrs) - c.SetAction(action) - c.SetResourceRecordSet(&r) - return &c + + r.ResourceRecords = rrs + + return types.Change{ + Action: types.ChangeAction(action), + ResourceRecordSet: &r, + } } // isSubdomain returns true if name is a subdomain of domain. @@ -288,28 +393,16 @@ func isSubdomain(name, domain string) bool { return strings.HasSuffix("."+name, "."+domain) } -// combineTXT concatenates the given quoted strings into a single unquoted string. -func combineTXT(values []string) string { - result := "" - for _, v := range values { - if v[0] == '"' { - v = v[1 : len(v)-1] - } - result += v - } - return result -} - // splitTXT splits value into a list of quoted 255-character strings. -func splitTXT(value string) []string { - var result []string +func splitTXT(value string) string { + var result strings.Builder for len(value) > 0 { rlen := len(value) if rlen > 253 { rlen = 253 } - result = append(result, strconv.Quote(value[:rlen])) + result.WriteString(strconv.Quote(value[:rlen])) value = value[rlen:] } - return result + return result.String() } diff --git a/cmd/devp2p/dns_route53_test.go b/cmd/devp2p/dns_route53_test.go index c64f1d169814..e6eb516e6bbc 100644 --- a/cmd/devp2p/dns_route53_test.go +++ b/cmd/devp2p/dns_route53_test.go @@ -20,7 +20,7 @@ import ( "reflect" "testing" - "github.com/aws/aws-sdk-go/service/route53" + "github.com/aws/aws-sdk-go-v2/service/route53/types" ) // This test checks that computeChanges/splitChanges create DNS changes in @@ -28,8 +28,7 @@ import ( func TestRoute53ChangeSort(t *testing.T) { testTree0 := map[string]recordSet{ "2kfjogvxdqtxxugbh7gs7naaai.n": {ttl: 3333, values: []string{ - `"enr:-HW4QO1ml1DdXLeZLsUxewnthhUy8eROqkDyoMTyavfks9JlYQIlMFEUoM78PovJDPQrAkrb3LRJ-"`, - `"vtrymDguKCOIAWAgmlkgnY0iXNlY3AyNTZrMaEDffaGfJzgGhUif1JqFruZlYmA31HzathLSWxfbq_QoQ4"`, + `"enr:-HW4QO1ml1DdXLeZLsUxewnthhUy8eROqkDyoMTyavfks9JlYQIlMFEUoM78PovJDPQrAkrb3LRJ-""vtrymDguKCOIAWAgmlkgnY0iXNlY3AyNTZrMaEDffaGfJzgGhUif1JqFruZlYmA31HzathLSWxfbq_QoQ4"`, }}, "fdxn3sn67na5dka4j2gok7bvqi.n": {ttl: treeNodeTTL, values: []string{`"enrtree-branch:"`}}, "n": {ttl: rootTTL, values: []string{`"enrtree-root:v1 e=2KFJOGVXDQTXXUGBH7GS7NAAAI l=FDXN3SN67NA5DKA4J2GOK7BVQI seq=0 sig=v_-J_q_9ICQg5ztExFvLQhDBGMb0lZPJLhe3ts9LAcgqhOhtT3YFJsl8BWNDSwGtamUdR-9xl88_w-X42SVpjwE"`}}, @@ -44,94 +43,93 @@ func TestRoute53ChangeSort(t *testing.T) { "MHTDO6TMUBRIA2XWG5LUDACK24.n": "enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o", } - wantChanges := []*route53.Change{ + wantChanges := []types.Change{ { - Action: sp("CREATE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "CREATE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("2xs2367yhaxjfglzhvawlqd4zy.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("CREATE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "CREATE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("c7hrfpf3blgf3yr4dy5kx3smbe.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("CREATE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "CREATE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("h4fht4b454p6uxfd7jcyq5pwdy.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enr:-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("CREATE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "CREATE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("jwxydbpxywg6fx3gmdibfa6cj4.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enrtree-branch:2XS2367YHAXJFGLZHVAWLQD4ZY,H4FHT4B454P6UXFD7JCYQ5PWDY,MHTDO6TMUBRIA2XWG5LUDACK24"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("CREATE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "CREATE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("mhtdo6tmubria2xwg5ludack24.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("UPSERT"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "UPSERT", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA"`), }}, TTL: ip(rootTTL), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("DELETE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "DELETE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("2kfjogvxdqtxxugbh7gs7naaai.n"), - ResourceRecords: []*route53.ResourceRecord{ - {Value: sp(`"enr:-HW4QO1ml1DdXLeZLsUxewnthhUy8eROqkDyoMTyavfks9JlYQIlMFEUoM78PovJDPQrAkrb3LRJ-"`)}, - {Value: sp(`"vtrymDguKCOIAWAgmlkgnY0iXNlY3AyNTZrMaEDffaGfJzgGhUif1JqFruZlYmA31HzathLSWxfbq_QoQ4"`)}, + ResourceRecords: []types.ResourceRecord{ + {Value: sp(`"enr:-HW4QO1ml1DdXLeZLsUxewnthhUy8eROqkDyoMTyavfks9JlYQIlMFEUoM78PovJDPQrAkrb3LRJ-""vtrymDguKCOIAWAgmlkgnY0iXNlY3AyNTZrMaEDffaGfJzgGhUif1JqFruZlYmA31HzathLSWxfbq_QoQ4"`)}, }, TTL: ip(3333), - Type: sp("TXT"), + Type: "TXT", }, }, { - Action: sp("DELETE"), - ResourceRecordSet: &route53.ResourceRecordSet{ + Action: "DELETE", + ResourceRecordSet: &types.ResourceRecordSet{ Name: sp("fdxn3sn67na5dka4j2gok7bvqi.n"), - ResourceRecords: []*route53.ResourceRecord{{ + ResourceRecords: []types.ResourceRecord{{ Value: sp(`"enrtree-branch:"`), }}, TTL: ip(treeNodeTTL), - Type: sp("TXT"), + Type: "TXT", }, }, } @@ -142,14 +140,50 @@ func TestRoute53ChangeSort(t *testing.T) { t.Fatalf("wrong changes (got %d, want %d)", len(changes), len(wantChanges)) } - wantSplit := [][]*route53.Change{ + // Check splitting according to size. + wantSplit := [][]types.Change{ wantChanges[:4], - wantChanges[4:8], + wantChanges[4:6], + wantChanges[6:], } - split := splitChanges(changes, 600) + split := splitChanges(changes, 600, 4000) if !reflect.DeepEqual(split, wantSplit) { t.Fatalf("wrong split batches: got %d, want %d", len(split), len(wantSplit)) } + + // Check splitting according to count. + wantSplit = [][]types.Change{ + wantChanges[:5], + wantChanges[5:], + } + split = splitChanges(changes, 10000, 6) + if !reflect.DeepEqual(split, wantSplit) { + t.Fatalf("wrong split batches: got %d, want %d", len(split), len(wantSplit)) + } +} + +// This test checks that computeChanges compares the quoted value of the records correctly. +func TestRoute53NoChange(t *testing.T) { + // Existing record set. + testTree0 := map[string]recordSet{ + "n": {ttl: rootTTL, values: []string{ + `"enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA"`, + }}, + "2xs2367yhaxjfglzhvawlqd4zy.n": {ttl: treeNodeTTL, values: []string{ + `"enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA"`, + }}, + } + // New set. + testTree1 := map[string]string{ + "n": "enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA", + "2XS2367YHAXJFGLZHVAWLQD4ZY.n": "enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA", + } + + var client route53Client + changes := client.computeChanges("n", testTree1, testTree0) + if len(changes) > 0 { + t.Fatalf("wrong changes (got %d, want 0)", len(changes)) + } } func sp(s string) *string { return &s } diff --git a/cmd/devp2p/dnscmd.go b/cmd/devp2p/dnscmd.go index 7c9ccd31f4c2..85f28b8cb182 100644 --- a/cmd/devp2p/dnscmd.go +++ b/cmd/devp2p/dnscmd.go @@ -27,10 +27,10 @@ import ( "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/console" + "github.com/ethereum/go-ethereum/console/prompt" "github.com/ethereum/go-ethereum/p2p/dnsdisc" "github.com/ethereum/go-ethereum/p2p/enode" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) var ( @@ -43,6 +43,7 @@ var ( dnsTXTCommand, dnsCloudflareCommand, dnsRoute53Command, + dnsRoute53NukeCommand, }, } dnsSyncCommand = cli.Command{ @@ -77,7 +78,24 @@ var ( Usage: "Deploy DNS TXT records to Amazon Route53", ArgsUsage: "", Action: dnsToRoute53, - Flags: []cli.Flag{route53AccessKeyFlag, route53AccessSecretFlag, route53ZoneIDFlag}, + Flags: []cli.Flag{ + route53AccessKeyFlag, + route53AccessSecretFlag, + route53ZoneIDFlag, + route53RegionFlag, + }, + } + dnsRoute53NukeCommand = cli.Command{ + Name: "nuke-route53", + Usage: "Deletes DNS TXT records of a subdomain on Amazon Route53", + ArgsUsage: "", + Action: dnsNukeRoute53, + Flags: []cli.Flag{ + route53AccessKeyFlag, + route53AccessSecretFlag, + route53ZoneIDFlag, + route53RegionFlag, + }, } ) @@ -97,8 +115,9 @@ var ( ) const ( - rootTTL = 1 - treeNodeTTL = 2147483647 + rootTTL = 30 * 60 // 30 min + treeNodeTTL = 4 * 7 * 24 * 60 * 60 // 4 weeks + treeNodeTTLCloudflare = 24 * 60 * 60 // 1 day ) // dnsSync performs dnsSyncCommand. @@ -169,6 +188,9 @@ func dnsSign(ctx *cli.Context) error { return nil } +// directoryName returns the directory name of the given path. +// For example, when dir is "foo/bar", it returns "bar". +// When dir is ".", and the working directory is "example/foo", it returns "foo". func directoryName(dir string) string { abs, err := filepath.Abs(dir) if err != nil { @@ -177,7 +199,7 @@ func directoryName(dir string) string { return filepath.Base(abs) } -// dnsToTXT peforms dnsTXTCommand. +// dnsToTXT performs dnsTXTCommand. func dnsToTXT(ctx *cli.Context) error { if ctx.NArg() < 1 { return fmt.Errorf("need tree definition directory as argument") @@ -194,9 +216,9 @@ func dnsToTXT(ctx *cli.Context) error { return nil } -// dnsToCloudflare peforms dnsCloudflareCommand. +// dnsToCloudflare performs dnsCloudflareCommand. func dnsToCloudflare(ctx *cli.Context) error { - if ctx.NArg() < 1 { + if ctx.NArg() != 1 { return fmt.Errorf("need tree definition directory as argument") } domain, t, err := loadTreeDefinitionForExport(ctx.Args().Get(0)) @@ -207,9 +229,9 @@ func dnsToCloudflare(ctx *cli.Context) error { return client.deploy(domain, t) } -// dnsToRoute53 peforms dnsRoute53Command. +// dnsToRoute53 performs dnsRoute53Command. func dnsToRoute53(ctx *cli.Context) error { - if ctx.NArg() < 1 { + if ctx.NArg() != 1 { return fmt.Errorf("need tree definition directory as argument") } domain, t, err := loadTreeDefinitionForExport(ctx.Args().Get(0)) @@ -220,13 +242,22 @@ func dnsToRoute53(ctx *cli.Context) error { return client.deploy(domain, t) } +// dnsNukeRoute53 performs dnsRoute53NukeCommand. +func dnsNukeRoute53(ctx *cli.Context) error { + if ctx.NArg() != 1 { + return fmt.Errorf("need domain name as argument") + } + client := newRoute53Client(ctx) + return client.deleteDomain(ctx.Args().First()) +} + // loadSigningKey loads a private key in Ethereum keystore format. func loadSigningKey(keyfile string) *ecdsa.PrivateKey { keyjson, err := ioutil.ReadFile(keyfile) if err != nil { exit(fmt.Errorf("failed to read the keyfile at '%s': %v", keyfile, err)) } - password, _ := console.Stdin.PromptPassword("Please enter the password for '" + keyfile + "': ") + password, _ := prompt.Stdin.PromptPassword("Please enter the password for '" + keyfile + "': ") key, err := keystore.DecryptKey(keyjson, password) if err != nil { exit(fmt.Errorf("error decrypting key: %v", err)) diff --git a/cmd/devp2p/enrcmd.go b/cmd/devp2p/enrcmd.go index 15d77dd011a8..48ede616ee16 100644 --- a/cmd/devp2p/enrcmd.go +++ b/cmd/devp2p/enrcmd.go @@ -21,6 +21,7 @@ import ( "encoding/base64" "encoding/hex" "fmt" + "io" "io/ioutil" "net" "os" @@ -69,22 +70,30 @@ func enrdump(ctx *cli.Context) error { if err != nil { return fmt.Errorf("INVALID: %v", err) } - fmt.Print(dumpRecord(r)) + dumpRecord(os.Stdout, r) return nil } // dumpRecord creates a human-readable description of the given node record. -func dumpRecord(r *enr.Record) string { - out := new(bytes.Buffer) - if n, err := enode.New(enode.ValidSchemes, r); err != nil { +func dumpRecord(out io.Writer, r *enr.Record) { + n, err := enode.New(enode.ValidSchemes, r) + if err != nil { fmt.Fprintf(out, "INVALID: %v\n", err) } else { fmt.Fprintf(out, "Node ID: %v\n", n.ID()) + dumpNodeURL(out, n) } kv := r.AppendElements(nil)[1:] fmt.Fprintf(out, "Record has sequence number %d and %d key/value pairs.\n", r.Seq(), len(kv)/2) fmt.Fprint(out, dumpRecordKV(kv, 2)) - return out.String() +} + +func dumpNodeURL(out io.Writer, n *enode.Node) { + var key enode.Secp256k1 + if n.Load(&key) != nil { + return // no secp256k1 public key + } + fmt.Fprintf(out, "URLv4: %s\n", n.URLv4()) } func dumpRecordKV(kv []interface{}, indent int) string { diff --git a/cmd/devp2p/internal/ethtest/chain.go b/cmd/devp2p/internal/ethtest/chain.go new file mode 100644 index 000000000000..d0d55a455d30 --- /dev/null +++ b/cmd/devp2p/internal/ethtest/chain.go @@ -0,0 +1,196 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "compress/gzip" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "math/big" + "os" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/forkid" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" +) + +type Chain struct { + genesis core.Genesis + blocks []*types.Block + chainConfig *params.ChainConfig +} + +// Len returns the length of the chain. +func (c *Chain) Len() int { + return len(c.blocks) +} + +// TD calculates the total difficulty of the chain at the +// chain head. +func (c *Chain) TD() *big.Int { + sum := big.NewInt(0) + for _, block := range c.blocks[:c.Len()] { + sum.Add(sum, block.Difficulty()) + } + return sum +} + +// TotalDifficultyAt calculates the total difficulty of the chain +// at the given block height. +func (c *Chain) TotalDifficultyAt(height int) *big.Int { + sum := big.NewInt(0) + if height >= c.Len() { + return sum + } + for _, block := range c.blocks[:height+1] { + sum.Add(sum, block.Difficulty()) + } + return sum +} + +func (c *Chain) RootAt(height int) common.Hash { + if height < c.Len() { + return c.blocks[height].Root() + } + return common.Hash{} +} + +// ForkID gets the fork id of the chain. +func (c *Chain) ForkID() forkid.ID { + return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len())) +} + +// Shorten returns a copy chain of a desired height from the imported +func (c *Chain) Shorten(height int) *Chain { + blocks := make([]*types.Block, height) + copy(blocks, c.blocks[:height]) + + config := *c.chainConfig + return &Chain{ + blocks: blocks, + chainConfig: &config, + } +} + +// Head returns the chain head. +func (c *Chain) Head() *types.Block { + return c.blocks[c.Len()-1] +} + +func (c *Chain) GetHeaders(req GetBlockHeaders) (BlockHeaders, error) { + if req.Amount < 1 { + return nil, fmt.Errorf("no block headers requested") + } + + headers := make(BlockHeaders, req.Amount) + var blockNumber uint64 + + // range over blocks to check if our chain has the requested header + for _, block := range c.blocks { + if block.Hash() == req.Origin.Hash || block.Number().Uint64() == req.Origin.Number { + headers[0] = block.Header() + blockNumber = block.Number().Uint64() + } + } + if headers[0] == nil { + return nil, fmt.Errorf("no headers found for given origin number %v, hash %v", req.Origin.Number, req.Origin.Hash) + } + + if req.Reverse { + for i := 1; i < int(req.Amount); i++ { + blockNumber -= (1 - req.Skip) + headers[i] = c.blocks[blockNumber].Header() + + } + + return headers, nil + } + + for i := 1; i < int(req.Amount); i++ { + blockNumber += (1 + req.Skip) + headers[i] = c.blocks[blockNumber].Header() + } + + return headers, nil +} + +// loadChain takes the given chain.rlp file, and decodes and returns +// the blocks from the file. +func loadChain(chainfile string, genesis string) (*Chain, error) { + gen, err := loadGenesis(genesis) + if err != nil { + return nil, err + } + gblock := gen.ToBlock(nil) + + blocks, err := blocksFromFile(chainfile, gblock) + if err != nil { + return nil, err + } + + c := &Chain{genesis: gen, blocks: blocks, chainConfig: gen.Config} + return c, nil +} + +func loadGenesis(genesisFile string) (core.Genesis, error) { + chainConfig, err := ioutil.ReadFile(genesisFile) + if err != nil { + return core.Genesis{}, err + } + var gen core.Genesis + if err := json.Unmarshal(chainConfig, &gen); err != nil { + return core.Genesis{}, err + } + return gen, nil +} + +func blocksFromFile(chainfile string, gblock *types.Block) ([]*types.Block, error) { + // Load chain.rlp. + fh, err := os.Open(chainfile) + if err != nil { + return nil, err + } + defer fh.Close() + var reader io.Reader = fh + if strings.HasSuffix(chainfile, ".gz") { + if reader, err = gzip.NewReader(reader); err != nil { + return nil, err + } + } + stream := rlp.NewStream(reader, 0) + var blocks = make([]*types.Block, 1) + blocks[0] = gblock + for i := 0; ; i++ { + var b types.Block + if err := stream.Decode(&b); err == io.EOF { + break + } else if err != nil { + return nil, fmt.Errorf("at block index %d: %v", i, err) + } + if b.NumberU64() != uint64(i+1) { + return nil, fmt.Errorf("block at index %d has wrong number %d", i, b.NumberU64()) + } + blocks = append(blocks, &b) + } + return blocks, nil +} diff --git a/cmd/devp2p/internal/ethtest/chain_test.go b/cmd/devp2p/internal/ethtest/chain_test.go new file mode 100644 index 000000000000..ec98833ab529 --- /dev/null +++ b/cmd/devp2p/internal/ethtest/chain_test.go @@ -0,0 +1,201 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "path/filepath" + "strconv" + "testing" + + "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/p2p" + "github.com/stretchr/testify/assert" +) + +// TestEthProtocolNegotiation tests whether the test suite +// can negotiate the highest eth protocol in a status message exchange +func TestEthProtocolNegotiation(t *testing.T) { + var tests = []struct { + conn *Conn + caps []p2p.Cap + expected uint32 + }{ + { + conn: &Conn{ + ourHighestProtoVersion: 65, + }, + caps: []p2p.Cap{ + {Name: "eth", Version: 63}, + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + expected: uint32(65), + }, + { + conn: &Conn{ + ourHighestProtoVersion: 65, + }, + caps: []p2p.Cap{ + {Name: "eth", Version: 63}, + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + expected: uint32(65), + }, + { + conn: &Conn{ + ourHighestProtoVersion: 65, + }, + caps: []p2p.Cap{ + {Name: "eth", Version: 63}, + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + expected: uint32(65), + }, + { + conn: &Conn{ + ourHighestProtoVersion: 64, + }, + caps: []p2p.Cap{ + {Name: "eth", Version: 63}, + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + expected: 64, + }, + { + conn: &Conn{ + ourHighestProtoVersion: 65, + }, + caps: []p2p.Cap{ + {Name: "eth", Version: 0}, + {Name: "eth", Version: 89}, + {Name: "eth", Version: 65}, + }, + expected: uint32(65), + }, + { + conn: &Conn{ + ourHighestProtoVersion: 64, + }, + caps: []p2p.Cap{ + {Name: "eth", Version: 63}, + {Name: "eth", Version: 64}, + {Name: "wrongProto", Version: 65}, + }, + expected: uint32(64), + }, + { + conn: &Conn{ + ourHighestProtoVersion: 65, + }, + caps: []p2p.Cap{ + {Name: "eth", Version: 63}, + {Name: "eth", Version: 64}, + {Name: "wrongProto", Version: 65}, + }, + expected: uint32(64), + }, + } + + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + tt.conn.negotiateEthProtocol(tt.caps) + assert.Equal(t, tt.expected, uint32(tt.conn.negotiatedProtoVersion)) + }) + } +} + +// TestChain_GetHeaders tests whether the test suite can correctly +// respond to a GetBlockHeaders request from a node. +func TestChain_GetHeaders(t *testing.T) { + chainFile, err := filepath.Abs("./testdata/chain.rlp") + if err != nil { + t.Fatal(err) + } + genesisFile, err := filepath.Abs("./testdata/genesis.json") + if err != nil { + t.Fatal(err) + } + + chain, err := loadChain(chainFile, genesisFile) + if err != nil { + t.Fatal(err) + } + + var tests = []struct { + req GetBlockHeaders + expected BlockHeaders + }{ + { + req: GetBlockHeaders{ + Origin: eth.HashOrNumber{ + Number: uint64(2), + }, + Amount: uint64(5), + Skip: 1, + Reverse: false, + }, + expected: BlockHeaders{ + chain.blocks[2].Header(), + chain.blocks[4].Header(), + chain.blocks[6].Header(), + chain.blocks[8].Header(), + chain.blocks[10].Header(), + }, + }, + { + req: GetBlockHeaders{ + Origin: eth.HashOrNumber{ + Number: uint64(chain.Len() - 1), + }, + Amount: uint64(3), + Skip: 0, + Reverse: true, + }, + expected: BlockHeaders{ + chain.blocks[chain.Len()-1].Header(), + chain.blocks[chain.Len()-2].Header(), + chain.blocks[chain.Len()-3].Header(), + }, + }, + { + req: GetBlockHeaders{ + Origin: eth.HashOrNumber{ + Hash: chain.Head().Hash(), + }, + Amount: uint64(1), + Skip: 0, + Reverse: false, + }, + expected: BlockHeaders{ + chain.Head().Header(), + }, + }, + } + + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + headers, err := chain.GetHeaders(tt.req) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, headers, tt.expected) + }) + } +} diff --git a/cmd/devp2p/internal/ethtest/helpers.go b/cmd/devp2p/internal/ethtest/helpers.go new file mode 100644 index 000000000000..dd9dfd861981 --- /dev/null +++ b/cmd/devp2p/internal/ethtest/helpers.go @@ -0,0 +1,789 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "fmt" + "net" + "reflect" + "strings" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/rlpx" +) + +var ( + pretty = spew.ConfigState{ + Indent: " ", + DisableCapacities: true, + DisablePointerAddresses: true, + SortKeys: true, + } + timeout = 20 * time.Second +) + +// Is_66 checks if the node supports the eth66 protocol version, +// and if not, exists the test suite +func (s *Suite) Is_66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + if err := conn.handshake(); err != nil { + t.Fatalf("handshake failed: %v", err) + } + if conn.negotiatedProtoVersion < 66 { + t.Fail() + } +} + +// dial attempts to dial the given node and perform a handshake, +// returning the created Conn if successful. +func (s *Suite) dial() (*Conn, error) { + // dial + fd, err := net.Dial("tcp", fmt.Sprintf("%v:%d", s.Dest.IP(), s.Dest.TCP())) + if err != nil { + return nil, err + } + conn := Conn{Conn: rlpx.NewConn(fd, s.Dest.Pubkey())} + // do encHandshake + conn.ourKey, _ = crypto.GenerateKey() + _, err = conn.Handshake(conn.ourKey) + if err != nil { + conn.Close() + return nil, err + } + // set default p2p capabilities + conn.caps = []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + } + conn.ourHighestProtoVersion = 65 + return &conn, nil +} + +// dial66 attempts to dial the given node and perform a handshake, +// returning the created Conn with additional eth66 capabilities if +// successful +func (s *Suite) dial66() (*Conn, error) { + conn, err := s.dial() + if err != nil { + return nil, fmt.Errorf("dial failed: %v", err) + } + conn.caps = append(conn.caps, p2p.Cap{Name: "eth", Version: 66}) + conn.ourHighestProtoVersion = 66 + return conn, nil +} + +// dial66 attempts to dial the given node and perform a handshake, +// returning the created Conn with additional snap/1 capabilities if +// successful. +func (s *Suite) dialSnap() (*Conn, error) { + conn, err := s.dial66() + if err != nil { + return nil, fmt.Errorf("dial failed: %v", err) + } + conn.caps = append(conn.caps, p2p.Cap{Name: "snap", Version: 1}) + conn.ourHighestSnapProtoVersion = 1 + return conn, nil +} + +// peer performs both the protocol handshake and the status message +// exchange with the node in order to peer with it. +func (c *Conn) peer(chain *Chain, status *Status) error { + if err := c.handshake(); err != nil { + return fmt.Errorf("handshake failed: %v", err) + } + if _, err := c.statusExchange(chain, status); err != nil { + return fmt.Errorf("status exchange failed: %v", err) + } + return nil +} + +// handshake performs a protocol handshake with the node. +func (c *Conn) handshake() error { + defer c.SetDeadline(time.Time{}) + c.SetDeadline(time.Now().Add(10 * time.Second)) + // write hello to client + pub0 := crypto.FromECDSAPub(&c.ourKey.PublicKey)[1:] + ourHandshake := &Hello{ + Version: 5, + Caps: c.caps, + ID: pub0, + } + if err := c.Write(ourHandshake); err != nil { + return fmt.Errorf("write to connection failed: %v", err) + } + // read hello from client + switch msg := c.Read().(type) { + case *Hello: + // set snappy if version is at least 5 + if msg.Version >= 5 { + c.SetSnappy(true) + } + c.negotiateEthProtocol(msg.Caps) + if c.negotiatedProtoVersion == 0 { + return fmt.Errorf("could not negotiate eth protocol (remote caps: %v, local eth version: %v)", msg.Caps, c.ourHighestProtoVersion) + } + // If we require snap, verify that it was negotiated + if c.ourHighestSnapProtoVersion != c.negotiatedSnapProtoVersion { + return fmt.Errorf("could not negotiate snap protocol (remote caps: %v, local snap version: %v)", msg.Caps, c.ourHighestSnapProtoVersion) + } + return nil + default: + return fmt.Errorf("bad handshake: %#v", msg) + } +} + +// negotiateEthProtocol sets the Conn's eth protocol version to highest +// advertised capability from peer. +func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) { + var highestEthVersion uint + var highestSnapVersion uint + for _, capability := range caps { + switch capability.Name { + case "eth": + if capability.Version > highestEthVersion && capability.Version <= c.ourHighestProtoVersion { + highestEthVersion = capability.Version + } + case "snap": + if capability.Version > highestSnapVersion && capability.Version <= c.ourHighestSnapProtoVersion { + highestSnapVersion = capability.Version + } + } + } + c.negotiatedProtoVersion = highestEthVersion + c.negotiatedSnapProtoVersion = highestSnapVersion +} + +// statusExchange performs a `Status` message exchange with the given node. +func (c *Conn) statusExchange(chain *Chain, status *Status) (Message, error) { + defer c.SetDeadline(time.Time{}) + c.SetDeadline(time.Now().Add(20 * time.Second)) + + // read status message from client + var message Message +loop: + for { + switch msg := c.Read().(type) { + case *Status: + if have, want := msg.Head, chain.blocks[chain.Len()-1].Hash(); have != want { + return nil, fmt.Errorf("wrong head block in status, want: %#x (block %d) have %#x", + want, chain.blocks[chain.Len()-1].NumberU64(), have) + } + if have, want := msg.TD.Cmp(chain.TD()), 0; have != want { + return nil, fmt.Errorf("wrong TD in status: have %v want %v", have, want) + } + if have, want := msg.ForkID, chain.ForkID(); !reflect.DeepEqual(have, want) { + return nil, fmt.Errorf("wrong fork ID in status: have %v, want %v", have, want) + } + if have, want := msg.ProtocolVersion, c.ourHighestProtoVersion; have != uint32(want) { + return nil, fmt.Errorf("wrong protocol version: have %v, want %v", have, want) + } + message = msg + break loop + case *Disconnect: + return nil, fmt.Errorf("disconnect received: %v", msg.Reason) + case *Ping: + c.Write(&Pong{}) // TODO (renaynay): in the future, this should be an error + // (PINGs should not be a response upon fresh connection) + default: + return nil, fmt.Errorf("bad status message: %s", pretty.Sdump(msg)) + } + } + // make sure eth protocol version is set for negotiation + if c.negotiatedProtoVersion == 0 { + return nil, fmt.Errorf("eth protocol version must be set in Conn") + } + if status == nil { + // default status message + status = &Status{ + ProtocolVersion: uint32(c.negotiatedProtoVersion), + NetworkID: chain.chainConfig.ChainID.Uint64(), + TD: chain.TD(), + Head: chain.blocks[chain.Len()-1].Hash(), + Genesis: chain.blocks[0].Hash(), + ForkID: chain.ForkID(), + } + } + if err := c.Write(status); err != nil { + return nil, fmt.Errorf("write to connection failed: %v", err) + } + return message, nil +} + +// createSendAndRecvConns creates two connections, one for sending messages to the +// node, and one for receiving messages from the node. +func (s *Suite) createSendAndRecvConns(isEth66 bool) (*Conn, *Conn, error) { + var ( + sendConn *Conn + recvConn *Conn + err error + ) + if isEth66 { + sendConn, err = s.dial66() + if err != nil { + return nil, nil, fmt.Errorf("dial failed: %v", err) + } + recvConn, err = s.dial66() + if err != nil { + sendConn.Close() + return nil, nil, fmt.Errorf("dial failed: %v", err) + } + } else { + sendConn, err = s.dial() + if err != nil { + return nil, nil, fmt.Errorf("dial failed: %v", err) + } + recvConn, err = s.dial() + if err != nil { + sendConn.Close() + return nil, nil, fmt.Errorf("dial failed: %v", err) + } + } + return sendConn, recvConn, nil +} + +func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message { + if c.negotiatedProtoVersion == 66 { + _, msg := c.readAndServe66(chain, timeout) + return msg + } + return c.readAndServe65(chain, timeout) +} + +// readAndServe serves GetBlockHeaders requests while waiting +// on another message from the node. +func (c *Conn) readAndServe65(chain *Chain, timeout time.Duration) Message { + start := time.Now() + for time.Since(start) < timeout { + c.SetReadDeadline(time.Now().Add(5 * time.Second)) + switch msg := c.Read().(type) { + case *Ping: + c.Write(&Pong{}) + case *GetBlockHeaders: + req := *msg + headers, err := chain.GetHeaders(req) + if err != nil { + return errorf("could not get headers for inbound header request: %v", err) + } + if err := c.Write(headers); err != nil { + return errorf("could not write to connection: %v", err) + } + default: + return msg + } + } + return errorf("no message received within %v", timeout) +} + +// readAndServe66 serves eth66 GetBlockHeaders requests while waiting +// on another message from the node. +func (c *Conn) readAndServe66(chain *Chain, timeout time.Duration) (uint64, Message) { + start := time.Now() + for time.Since(start) < timeout { + c.SetReadDeadline(time.Now().Add(10 * time.Second)) + + reqID, msg := c.Read66() + + switch msg := msg.(type) { + case *Ping: + c.Write(&Pong{}) + case GetBlockHeaders: + headers, err := chain.GetHeaders(msg) + if err != nil { + return 0, errorf("could not get headers for inbound header request: %v", err) + } + resp := ð.BlockHeadersPacket66{ + RequestId: reqID, + BlockHeadersPacket: eth.BlockHeadersPacket(headers), + } + if err := c.Write66(resp, BlockHeaders{}.Code()); err != nil { + return 0, errorf("could not write to connection: %v", err) + } + default: + return reqID, msg + } + } + return 0, errorf("no message received within %v", timeout) +} + +// headersRequest executes the given `GetBlockHeaders` request. +func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, isEth66 bool, reqID uint64) (BlockHeaders, error) { + defer c.SetReadDeadline(time.Time{}) + c.SetReadDeadline(time.Now().Add(20 * time.Second)) + // if on eth66 connection, perform eth66 GetBlockHeaders request + if isEth66 { + return getBlockHeaders66(chain, c, request, reqID) + } + if err := c.Write(request); err != nil { + return nil, err + } + switch msg := c.readAndServe(chain, timeout).(type) { + case *BlockHeaders: + return *msg, nil + default: + return nil, fmt.Errorf("invalid message: %s", pretty.Sdump(msg)) + } +} + +func (c *Conn) snapRequest(msg Message, id uint64, chain *Chain) (Message, error) { + defer c.SetReadDeadline(time.Time{}) + c.SetReadDeadline(time.Now().Add(5 * time.Second)) + if err := c.Write(msg); err != nil { + return nil, fmt.Errorf("could not write to connection: %v", err) + } + return c.ReadSnap(id) +} + +// getBlockHeaders66 executes the given `GetBlockHeaders` request over the eth66 protocol. +func getBlockHeaders66(chain *Chain, conn *Conn, request *GetBlockHeaders, id uint64) (BlockHeaders, error) { + // write request + packet := eth.GetBlockHeadersPacket(*request) + req := ð.GetBlockHeadersPacket66{ + RequestId: id, + GetBlockHeadersPacket: &packet, + } + if err := conn.Write66(req, GetBlockHeaders{}.Code()); err != nil { + return nil, fmt.Errorf("could not write to connection: %v", err) + } + // wait for response + msg := conn.waitForResponse(chain, timeout, req.RequestId) + headers, ok := msg.(BlockHeaders) + if !ok { + return nil, fmt.Errorf("unexpected message received: %s", pretty.Sdump(msg)) + } + return headers, nil +} + +// headersMatch returns whether the received headers match the given request +func headersMatch(expected BlockHeaders, headers BlockHeaders) bool { + return reflect.DeepEqual(expected, headers) +} + +// waitForResponse reads from the connection until a response with the expected +// request ID is received. +func (c *Conn) waitForResponse(chain *Chain, timeout time.Duration, requestID uint64) Message { + for { + id, msg := c.readAndServe66(chain, timeout) + if id == requestID { + return msg + } + } +} + +// sendNextBlock broadcasts the next block in the chain and waits +// for the node to propagate the block and import it into its chain. +func (s *Suite) sendNextBlock(isEth66 bool) error { + // set up sending and receiving connections + sendConn, recvConn, err := s.createSendAndRecvConns(isEth66) + if err != nil { + return err + } + defer sendConn.Close() + defer recvConn.Close() + if err = sendConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + if err = recvConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + // create new block announcement + nextBlock := s.fullChain.blocks[s.chain.Len()] + blockAnnouncement := &NewBlock{ + Block: nextBlock, + TD: s.fullChain.TotalDifficultyAt(s.chain.Len()), + } + // send announcement and wait for node to request the header + if err = s.testAnnounce(sendConn, recvConn, blockAnnouncement); err != nil { + return fmt.Errorf("failed to announce block: %v", err) + } + // wait for client to update its chain + if err = s.waitForBlockImport(recvConn, nextBlock, isEth66); err != nil { + return fmt.Errorf("failed to receive confirmation of block import: %v", err) + } + // update test suite chain + s.chain.blocks = append(s.chain.blocks, nextBlock) + return nil +} + +// testAnnounce writes a block announcement to the node and waits for the node +// to propagate it. +func (s *Suite) testAnnounce(sendConn, receiveConn *Conn, blockAnnouncement *NewBlock) error { + if err := sendConn.Write(blockAnnouncement); err != nil { + return fmt.Errorf("could not write to connection: %v", err) + } + return s.waitAnnounce(receiveConn, blockAnnouncement) +} + +// waitAnnounce waits for a NewBlock or NewBlockHashes announcement from the node. +func (s *Suite) waitAnnounce(conn *Conn, blockAnnouncement *NewBlock) error { + for { + switch msg := conn.readAndServe(s.chain, timeout).(type) { + case *NewBlock: + if !reflect.DeepEqual(blockAnnouncement.Block.Header(), msg.Block.Header()) { + return fmt.Errorf("wrong header in block announcement: \nexpected %v "+ + "\ngot %v", blockAnnouncement.Block.Header(), msg.Block.Header()) + } + if !reflect.DeepEqual(blockAnnouncement.TD, msg.TD) { + return fmt.Errorf("wrong TD in announcement: expected %v, got %v", blockAnnouncement.TD, msg.TD) + } + return nil + case *NewBlockHashes: + hashes := *msg + if blockAnnouncement.Block.Hash() != hashes[0].Hash { + return fmt.Errorf("wrong block hash in announcement: expected %v, got %v", blockAnnouncement.Block.Hash(), hashes[0].Hash) + } + return nil + case *NewPooledTransactionHashes: + // ignore tx announcements from previous tests + continue + default: + return fmt.Errorf("unexpected: %s", pretty.Sdump(msg)) + } + } +} + +func (s *Suite) waitForBlockImport(conn *Conn, block *types.Block, isEth66 bool) error { + defer conn.SetReadDeadline(time.Time{}) + conn.SetReadDeadline(time.Now().Add(20 * time.Second)) + // create request + req := &GetBlockHeaders{ + Origin: eth.HashOrNumber{ + Hash: block.Hash(), + }, + Amount: 1, + } + // loop until BlockHeaders response contains desired block, confirming the + // node imported the block + for { + var ( + headers BlockHeaders + err error + ) + if isEth66 { + requestID := uint64(54) + headers, err = conn.headersRequest(req, s.chain, eth66, requestID) + } else { + headers, err = conn.headersRequest(req, s.chain, eth65, 0) + } + if err != nil { + return fmt.Errorf("GetBlockHeader request failed: %v", err) + } + // if headers response is empty, node hasn't imported block yet, try again + if len(headers) == 0 { + time.Sleep(100 * time.Millisecond) + continue + } + if !reflect.DeepEqual(block.Header(), headers[0]) { + return fmt.Errorf("wrong header returned: wanted %v, got %v", block.Header(), headers[0]) + } + return nil + } +} + +func (s *Suite) oldAnnounce(isEth66 bool) error { + sendConn, receiveConn, err := s.createSendAndRecvConns(isEth66) + if err != nil { + return err + } + defer sendConn.Close() + defer receiveConn.Close() + if err := sendConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + if err := receiveConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + // create old block announcement + oldBlockAnnounce := &NewBlock{ + Block: s.chain.blocks[len(s.chain.blocks)/2], + TD: s.chain.blocks[len(s.chain.blocks)/2].Difficulty(), + } + if err := sendConn.Write(oldBlockAnnounce); err != nil { + return fmt.Errorf("could not write to connection: %v", err) + } + // wait to see if the announcement is propagated + switch msg := receiveConn.readAndServe(s.chain, time.Second*8).(type) { + case *NewBlock: + block := *msg + if block.Block.Hash() == oldBlockAnnounce.Block.Hash() { + return fmt.Errorf("unexpected: block propagated: %s", pretty.Sdump(msg)) + } + case *NewBlockHashes: + hashes := *msg + for _, hash := range hashes { + if hash.Hash == oldBlockAnnounce.Block.Hash() { + return fmt.Errorf("unexpected: block announced: %s", pretty.Sdump(msg)) + } + } + case *Error: + errMsg := *msg + // check to make sure error is timeout (propagation didn't come through == test successful) + if !strings.Contains(errMsg.String(), "timeout") { + return fmt.Errorf("unexpected error: %v", pretty.Sdump(msg)) + } + default: + return fmt.Errorf("unexpected: %s", pretty.Sdump(msg)) + } + return nil +} + +func (s *Suite) maliciousHandshakes(t *utesting.T, isEth66 bool) error { + var ( + conn *Conn + err error + ) + if isEth66 { + conn, err = s.dial66() + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + } else { + conn, err = s.dial() + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + } + defer conn.Close() + // write hello to client + pub0 := crypto.FromECDSAPub(&conn.ourKey.PublicKey)[1:] + handshakes := []*Hello{ + { + Version: 5, + Caps: []p2p.Cap{ + {Name: largeString(2), Version: 64}, + }, + ID: pub0, + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + ID: append(pub0, byte(0)), + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + ID: append(pub0, pub0...), + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + ID: largeBuffer(2), + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: largeString(2), Version: 64}, + }, + ID: largeBuffer(2), + }, + } + for i, handshake := range handshakes { + t.Logf("Testing malicious handshake %v\n", i) + if err := conn.Write(handshake); err != nil { + return fmt.Errorf("could not write to connection: %v", err) + } + // check that the peer disconnected + for i := 0; i < 2; i++ { + switch msg := conn.readAndServe(s.chain, 20*time.Second).(type) { + case *Disconnect: + case *Error: + case *Hello: + // Discard one hello as Hello's are sent concurrently + continue + default: + return fmt.Errorf("unexpected: %s", pretty.Sdump(msg)) + } + } + // dial for the next round + if isEth66 { + conn, err = s.dial66() + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + } else { + conn, err = s.dial() + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + } + } + return nil +} + +func (s *Suite) maliciousStatus(conn *Conn) error { + if err := conn.handshake(); err != nil { + return fmt.Errorf("handshake failed: %v", err) + } + status := &Status{ + ProtocolVersion: uint32(conn.negotiatedProtoVersion), + NetworkID: s.chain.chainConfig.ChainID.Uint64(), + TD: largeNumber(2), + Head: s.chain.blocks[s.chain.Len()-1].Hash(), + Genesis: s.chain.blocks[0].Hash(), + ForkID: s.chain.ForkID(), + } + // get status + msg, err := conn.statusExchange(s.chain, status) + if err != nil { + return fmt.Errorf("status exchange failed: %v", err) + } + switch msg := msg.(type) { + case *Status: + default: + return fmt.Errorf("expected status, got: %#v ", msg) + } + // wait for disconnect + switch msg := conn.readAndServe(s.chain, timeout).(type) { + case *Disconnect: + return nil + case *Error: + return nil + default: + return fmt.Errorf("expected disconnect, got: %s", pretty.Sdump(msg)) + } +} + +func (s *Suite) hashAnnounce(isEth66 bool) error { + // create connections + sendConn, recvConn, err := s.createSendAndRecvConns(isEth66) + if err != nil { + return fmt.Errorf("failed to create connections: %v", err) + } + defer sendConn.Close() + defer recvConn.Close() + if err := sendConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + if err := recvConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + // create NewBlockHashes announcement + type anno struct { + Hash common.Hash // Hash of one particular block being announced + Number uint64 // Number of one particular block being announced + } + nextBlock := s.fullChain.blocks[s.chain.Len()] + announcement := anno{Hash: nextBlock.Hash(), Number: nextBlock.Number().Uint64()} + newBlockHash := &NewBlockHashes{announcement} + if err := sendConn.Write(newBlockHash); err != nil { + return fmt.Errorf("failed to write to connection: %v", err) + } + // Announcement sent, now wait for a header request + var ( + id uint64 + msg Message + blockHeaderReq GetBlockHeaders + ) + if isEth66 { + id, msg = sendConn.Read66() + switch msg := msg.(type) { + case GetBlockHeaders: + blockHeaderReq = msg + default: + return fmt.Errorf("unexpected %s", pretty.Sdump(msg)) + } + if blockHeaderReq.Amount != 1 { + return fmt.Errorf("unexpected number of block headers requested: %v", blockHeaderReq.Amount) + } + if blockHeaderReq.Origin.Hash != announcement.Hash { + return fmt.Errorf("unexpected block header requested. Announced:\n %v\n Remote request:\n%v", + pretty.Sdump(announcement), + pretty.Sdump(blockHeaderReq)) + } + if err := sendConn.Write66(ð.BlockHeadersPacket66{ + RequestId: id, + BlockHeadersPacket: eth.BlockHeadersPacket{ + nextBlock.Header(), + }, + }, BlockHeaders{}.Code()); err != nil { + return fmt.Errorf("failed to write to connection: %v", err) + } + } else { + msg = sendConn.Read() + switch msg := msg.(type) { + case *GetBlockHeaders: + blockHeaderReq = *msg + default: + return fmt.Errorf("unexpected %s", pretty.Sdump(msg)) + } + if blockHeaderReq.Amount != 1 { + return fmt.Errorf("unexpected number of block headers requested: %v", blockHeaderReq.Amount) + } + if blockHeaderReq.Origin.Hash != announcement.Hash { + return fmt.Errorf("unexpected block header requested. Announced:\n %v\n Remote request:\n%v", + pretty.Sdump(announcement), + pretty.Sdump(blockHeaderReq)) + } + if err := sendConn.Write(&BlockHeaders{nextBlock.Header()}); err != nil { + return fmt.Errorf("failed to write to connection: %v", err) + } + } + // wait for block announcement + msg = recvConn.readAndServe(s.chain, timeout) + switch msg := msg.(type) { + case *NewBlockHashes: + hashes := *msg + if len(hashes) != 1 { + return fmt.Errorf("unexpected new block hash announcement: wanted 1 announcement, got %d", len(hashes)) + } + if nextBlock.Hash() != hashes[0].Hash { + return fmt.Errorf("unexpected block hash announcement, wanted %v, got %v", nextBlock.Hash(), + hashes[0].Hash) + } + case *NewBlock: + // node should only propagate NewBlock without having requested the body if the body is empty + nextBlockBody := nextBlock.Body() + if len(nextBlockBody.Transactions) != 0 || len(nextBlockBody.Uncles) != 0 { + return fmt.Errorf("unexpected non-empty new block propagated: %s", pretty.Sdump(msg)) + } + if msg.Block.Hash() != nextBlock.Hash() { + return fmt.Errorf("mismatched hash of propagated new block: wanted %v, got %v", + nextBlock.Hash(), msg.Block.Hash()) + } + // check to make sure header matches header that was sent to the node + if !reflect.DeepEqual(nextBlock.Header(), msg.Block.Header()) { + return fmt.Errorf("incorrect header received: wanted %v, got %v", nextBlock.Header(), msg.Block.Header()) + } + default: + return fmt.Errorf("unexpected: %s", pretty.Sdump(msg)) + } + // confirm node imported block + if err := s.waitForBlockImport(recvConn, nextBlock, isEth66); err != nil { + return fmt.Errorf("error waiting for node to import new block: %v", err) + } + // update the chain + s.chain.blocks = append(s.chain.blocks, nextBlock) + return nil +} diff --git a/cmd/devp2p/internal/ethtest/large.go b/cmd/devp2p/internal/ethtest/large.go new file mode 100644 index 000000000000..22421355abd4 --- /dev/null +++ b/cmd/devp2p/internal/ethtest/large.go @@ -0,0 +1,80 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "crypto/rand" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" +) + +// largeNumber returns a very large big.Int. +func largeNumber(megabytes int) *big.Int { + buf := make([]byte, megabytes*1024*1024) + rand.Read(buf) + bigint := new(big.Int) + bigint.SetBytes(buf) + return bigint +} + +// largeBuffer returns a very large buffer. +func largeBuffer(megabytes int) []byte { + buf := make([]byte, megabytes*1024*1024) + rand.Read(buf) + return buf +} + +// largeString returns a very large string. +func largeString(megabytes int) string { + buf := make([]byte, megabytes*1024*1024) + rand.Read(buf) + return hexutil.Encode(buf) +} + +func largeBlock() *types.Block { + return types.NewBlockWithHeader(largeHeader()) +} + +// Returns a random hash +func randHash() common.Hash { + var h common.Hash + rand.Read(h[:]) + return h +} + +func largeHeader() *types.Header { + return &types.Header{ + MixDigest: randHash(), + ReceiptHash: randHash(), + TxHash: randHash(), + Nonce: types.BlockNonce{}, + Extra: []byte{}, + Bloom: types.Bloom{}, + GasUsed: 0, + Coinbase: common.Address{}, + GasLimit: 0, + UncleHash: types.EmptyUncleHash, + Time: 1337, + ParentHash: randHash(), + Root: randHash(), + Number: largeNumber(2), + Difficulty: largeNumber(2), + } +} diff --git a/cmd/devp2p/internal/ethtest/snap.go b/cmd/devp2p/internal/ethtest/snap.go new file mode 100644 index 000000000000..95dd90fd3b4b --- /dev/null +++ b/cmd/devp2p/internal/ethtest/snap.go @@ -0,0 +1,675 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "bytes" + "errors" + "fmt" + "math/rand" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/protocols/snap" + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/light" + "github.com/ethereum/go-ethereum/trie" + "golang.org/x/crypto/sha3" +) + +func (s *Suite) TestSnapStatus(t *utesting.T) { + conn, err := s.dialSnap() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } +} + +type accRangeTest struct { + nBytes uint64 + root common.Hash + origin common.Hash + limit common.Hash + + expAccounts int + expFirst common.Hash + expLast common.Hash +} + +// TestSnapGetAccountRange various forms of GetAccountRange requests. +func (s *Suite) TestSnapGetAccountRange(t *utesting.T) { + var ( + root = s.chain.RootAt(999) + ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + zero = common.Hash{} + firstKeyMinus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf29") + firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a") + firstKeyPlus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2b") + secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606") + storageRoot = common.HexToHash("0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790") + ) + for i, tc := range []accRangeTest{ + // Tests decreasing the number of bytes + {4000, root, zero, ffHash, 76, firstKey, common.HexToHash("0xd2669dcf3858e7f1eecb8b5fedbf22fbea3e9433848a75035f79d68422c2dcda")}, + {3000, root, zero, ffHash, 57, firstKey, common.HexToHash("0x9b63fa753ece5cb90657d02ecb15df4dc1508d8c1d187af1bf7f1a05e747d3c7")}, + {2000, root, zero, ffHash, 38, firstKey, common.HexToHash("0x5e6140ecae4354a9e8f47559a8c6209c1e0e69cb077b067b528556c11698b91f")}, + {1, root, zero, ffHash, 1, firstKey, firstKey}, + + // Tests variations of the range + // + // [00b to firstkey]: should return [firstkey, secondkey], where secondkey is out of bounds + {4000, root, common.HexToHash("0x00bf000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2b"), 2, firstKey, secondKey}, + // [00b0 to 0bf0]: where both are before firstkey. Should return firstKey (even though it's out of bounds) + {4000, root, common.HexToHash("0x00b0000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x00bf100000000000000000000000000000000000000000000000000000000000"), 1, firstKey, firstKey}, + {4000, root, zero, zero, 1, firstKey, firstKey}, + {4000, root, firstKey, ffHash, 76, firstKey, common.HexToHash("0xd2669dcf3858e7f1eecb8b5fedbf22fbea3e9433848a75035f79d68422c2dcda")}, + {4000, root, firstKeyPlus1, ffHash, 76, secondKey, common.HexToHash("0xd28f55d3b994f16389f36944ad685b48e0fc3f8fbe86c3ca92ebecadf16a783f")}, + + // Test different root hashes + // + // A stateroot that does not exist + {4000, common.Hash{0x13, 37}, zero, ffHash, 0, zero, zero}, + // The genesis stateroot (we expect it to not be served) + {4000, s.chain.RootAt(0), zero, ffHash, 0, zero, zero}, + // A 127 block old stateroot, expected to be served + {4000, s.chain.RootAt(999 - 127), zero, ffHash, 77, firstKey, common.HexToHash("0xe4c6fdef5dd4e789a2612390806ee840b8ec0fe52548f8b4efe41abb20c37aac")}, + // A root which is not actually an account root, but a storage orot + {4000, storageRoot, zero, ffHash, 0, zero, zero}, + + // And some non-sensical requests + // + // range from [0xFF to 0x00], wrong order. Expect not to be serviced + {4000, root, ffHash, zero, 0, zero, zero}, + // range from [firstkey, firstkey-1], wrong order. Expect to get first key. + {4000, root, firstKey, firstKeyMinus1, 1, firstKey, firstKey}, + // range from [firstkey, 0], wrong order. Expect to get first key. + {4000, root, firstKey, zero, 1, firstKey, firstKey}, + // Max bytes: 0. Expect to deliver one account. + {0, root, zero, ffHash, 1, firstKey, firstKey}, + } { + if err := s.snapGetAccountRange(t, &tc); err != nil { + t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\nfailed: %v", i, tc.root, tc.origin, tc.limit, tc.nBytes, err) + } + } +} + +type stRangesTest struct { + root common.Hash + accounts []common.Hash + origin []byte + limit []byte + nBytes uint64 + + expSlots int +} + +// TestSnapGetStorageRange various forms of GetStorageRanges requests. +func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) { + var ( + ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + zero = common.Hash{} + firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a") + secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606") + ) + for i, tc := range []stRangesTest{ + { + root: s.chain.RootAt(999), + accounts: []common.Hash{secondKey, firstKey}, + origin: zero[:], + limit: ffHash[:], + nBytes: 500, + expSlots: 0, + }, + + /* + Some tests against this account: + { + "balance": "0", + "nonce": 1, + "root": "0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790", + "codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", + "storage": { + "0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace": "02", + "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6": "01", + "0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b": "03" + }, + "key": "0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844" + } + */ + { // [:] -> [slot1, slot2, slot3] + root: s.chain.RootAt(999), + accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")}, + origin: zero[:], + limit: ffHash[:], + nBytes: 500, + expSlots: 3, + }, + { // [slot1:] -> [slot1, slot2, slot3] + root: s.chain.RootAt(999), + accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")}, + origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace"), + limit: ffHash[:], + nBytes: 500, + expSlots: 3, + }, + { // [slot1+ :] -> [slot2, slot3] + root: s.chain.RootAt(999), + accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")}, + origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5acf"), + limit: ffHash[:], + nBytes: 500, + expSlots: 2, + }, + { // [slot1:slot2] -> [slot1, slot2] + root: s.chain.RootAt(999), + accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")}, + origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace"), + limit: common.FromHex("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6"), + nBytes: 500, + expSlots: 2, + }, + { // [slot1+:slot2+] -> [slot2, slot3] + root: s.chain.RootAt(999), + accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")}, + origin: common.FromHex("0x4fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + limit: common.FromHex("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf7"), + nBytes: 500, + expSlots: 2, + }, + } { + if err := s.snapGetStorageRanges(t, &tc); err != nil { + t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\n #accounts: %d\nfailed: %v", + i, tc.root, tc.origin, tc.limit, tc.nBytes, len(tc.accounts), err) + } + } +} + +type byteCodesTest struct { + nBytes uint64 + hashes []common.Hash + + expHashes int +} + +var ( + // emptyRoot is the known root hash of an empty trie. + emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + // emptyCode is the known hash of the empty EVM bytecode. + emptyCode = common.HexToHash("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470") +) + +// TestSnapGetByteCodes various forms of GetByteCodes requests. +func (s *Suite) TestSnapGetByteCodes(t *utesting.T) { + // The halfchain import should yield these bytecodes + var hcBytecodes []common.Hash + for _, s := range []string{ + "0x200c90460d8b0063210d5f5b9918e053c8f2c024485e0f1b48be8b1fc71b1317", + "0x20ba67ed4ac6aff626e0d1d4db623e2fada9593daeefc4a6eb4b70e6cff986f3", + "0x24b5b4902cb3d897c1cee9f16be8e897d8fa277c04c6dc8214f18295fca5de44", + "0x320b9d0a2be39b8a1c858f9f8cb96b1df0983071681de07ded3a7c0d05db5fd6", + "0x48cb0d5275936a24632babc7408339f9f7b051274809de565b8b0db76e97e03c", + "0x67c7a6f5cdaa43b4baa0e15b2be63346d1b9ce9f2c3d7e5804e0cacd44ee3b04", + "0x6d8418059bdc8c3fabf445e6bfc662af3b6a4ae45999b953996e42c7ead2ab49", + "0x7043422e5795d03f17ee0463a37235258e609fdd542247754895d72695e3e142", + "0x727f9e6f0c4bac1ff8d72c2972122d9c8d37ccb37e04edde2339e8da193546f1", + "0x86ccd5e23c78568a8334e0cebaf3e9f48c998307b0bfb1c378cee83b4bfb29cb", + "0x8fc89b00d6deafd4c4279531e743365626dbfa28845ec697919d305c2674302d", + "0x92cfc353bcb9746bb6f9996b6b9df779c88af2e9e0eeac44879ca19887c9b732", + "0x941b4872104f0995a4898fcf0f615ea6bf46bfbdfcf63ea8f2fd45b3f3286b77", + "0xa02fe8f41159bb39d2b704c633c3d6389cf4bfcb61a2539a9155f60786cf815f", + "0xa4b94e0afdffcb0af599677709dac067d3145489ea7aede57672bee43e3b7373", + "0xaf4e64edd3234c1205b725e42963becd1085f013590bd7ed93f8d711c5eb65fb", + "0xb69a18fa855b742031420081999086f6fb56c3930ae8840944e8b8ae9931c51e", + "0xc246c217bc73ce6666c93a93a94faa5250564f50a3fdc27ea74c231c07fe2ca6", + "0xcd6e4ab2c3034df2a8a1dfaaeb1c4baecd162a93d22de35e854ee2945cbe0c35", + "0xe24b692d09d6fc2f3d1a6028c400a27c37d7cbb11511907c013946d6ce263d3b", + "0xe440c5f0e8603fd1ed25976eee261ccee8038cf79d6a4c0eb31b2bf883be737f", + "0xe6eacbc509203d21ac814b350e72934fde686b7f673c19be8cf956b0c70078ce", + "0xe8530de4371467b5be7ea0e69e675ab36832c426d6c1ce9513817c0f0ae1486b", + "0xe85d487abbbc83bf3423cf9731360cf4f5a37220e18e5add54e72ee20861196a", + "0xf195ea389a5eea28db0be93660014275b158963dec44af1dfa7d4743019a9a49", + } { + hcBytecodes = append(hcBytecodes, common.HexToHash(s)) + } + + for i, tc := range []byteCodesTest{ + // A few stateroots + { + nBytes: 10000, hashes: []common.Hash{s.chain.RootAt(0), s.chain.RootAt(999)}, + expHashes: 0, + }, + { + nBytes: 10000, hashes: []common.Hash{s.chain.RootAt(0), s.chain.RootAt(0)}, + expHashes: 0, + }, + // Empties + { + nBytes: 10000, hashes: []common.Hash{emptyRoot}, + expHashes: 0, + }, + { + nBytes: 10000, hashes: []common.Hash{emptyCode}, + expHashes: 1, + }, + { + nBytes: 10000, hashes: []common.Hash{emptyCode, emptyCode, emptyCode}, + expHashes: 3, + }, + // The existing bytecodes + { + nBytes: 10000, hashes: hcBytecodes, + expHashes: len(hcBytecodes), + }, + // The existing, with limited byte arg + { + nBytes: 1, hashes: hcBytecodes, + expHashes: 1, + }, + { + nBytes: 0, hashes: hcBytecodes, + expHashes: 1, + }, + { + nBytes: 1000, hashes: []common.Hash{hcBytecodes[0], hcBytecodes[0], hcBytecodes[0], hcBytecodes[0]}, + expHashes: 4, + }, + } { + if err := s.snapGetByteCodes(t, &tc); err != nil { + t.Errorf("test %d \n bytes: %d\n #hashes: %d\nfailed: %v", i, tc.nBytes, len(tc.hashes), err) + } + } +} + +type trieNodesTest struct { + root common.Hash + paths []snap.TrieNodePathSet + nBytes uint64 + + expHashes []common.Hash + expReject bool +} + +func decodeNibbles(nibbles []byte, bytes []byte) { + for bi, ni := 0, 0; ni < len(nibbles); bi, ni = bi+1, ni+2 { + bytes[bi] = nibbles[ni]<<4 | nibbles[ni+1] + } +} + +// hasTerm returns whether a hex key has the terminator flag. +func hasTerm(s []byte) bool { + return len(s) > 0 && s[len(s)-1] == 16 +} + +func keybytesToHex(str []byte) []byte { + l := len(str)*2 + 1 + var nibbles = make([]byte, l) + for i, b := range str { + nibbles[i*2] = b / 16 + nibbles[i*2+1] = b % 16 + } + nibbles[l-1] = 16 + return nibbles +} + +func hexToCompact(hex []byte) []byte { + terminator := byte(0) + if hasTerm(hex) { + terminator = 1 + hex = hex[:len(hex)-1] + } + buf := make([]byte, len(hex)/2+1) + buf[0] = terminator << 5 // the flag byte + if len(hex)&1 == 1 { + buf[0] |= 1 << 4 // odd flag + buf[0] |= hex[0] // first nibble is contained in the first byte + hex = hex[1:] + } + decodeNibbles(hex, buf[1:]) + return buf +} + +// TestSnapTrieNodes various forms of GetTrieNodes requests. +func (s *Suite) TestSnapTrieNodes(t *utesting.T) { + + key := common.FromHex("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a") + // helper function to iterate the key, and generate the compact-encoded + // trie paths along the way. + pathTo := func(length int) snap.TrieNodePathSet { + hex := keybytesToHex(key)[:length] + hex[len(hex)-1] = 0 // remove term flag + hKey := hexToCompact(hex) + return snap.TrieNodePathSet{hKey} + } + var accPaths []snap.TrieNodePathSet + for i := 1; i <= 65; i++ { + accPaths = append(accPaths, pathTo(i)) + } + empty := emptyCode + for i, tc := range []trieNodesTest{ + { + root: s.chain.RootAt(999), + paths: nil, + nBytes: 500, + expHashes: nil, + }, + { + root: s.chain.RootAt(999), + paths: []snap.TrieNodePathSet{ + snap.TrieNodePathSet{}, // zero-length pathset should 'abort' and kick us off + snap.TrieNodePathSet{[]byte{0}}, + }, + nBytes: 5000, + expHashes: []common.Hash{}, + expReject: true, + }, + { + root: s.chain.RootAt(999), + paths: []snap.TrieNodePathSet{ + snap.TrieNodePathSet{[]byte{0}}, + snap.TrieNodePathSet{[]byte{1}, []byte{0}}, + }, + nBytes: 5000, + //0x6b3724a41b8c38b46d4d02fba2bb2074c47a507eb16a9a4b978f91d32e406faf + expHashes: []common.Hash{s.chain.RootAt(999)}, + }, + { // nonsensically long path + root: s.chain.RootAt(999), + paths: []snap.TrieNodePathSet{ + snap.TrieNodePathSet{[]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8}}, + }, + nBytes: 5000, + expHashes: []common.Hash{common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")}, + }, + { + root: s.chain.RootAt(0), + paths: []snap.TrieNodePathSet{ + snap.TrieNodePathSet{[]byte{0}}, + snap.TrieNodePathSet{[]byte{1}, []byte{0}}, + }, + nBytes: 5000, + expHashes: []common.Hash{}, + }, + { + // The leaf is only a couple of levels down, so the continued trie traversal causes lookup failures. + root: s.chain.RootAt(999), + paths: accPaths, + nBytes: 5000, + expHashes: []common.Hash{ + common.HexToHash("0xbcefee69b37cca1f5bf3a48aebe08b35f2ea1864fa958bb0723d909a0e0d28d8"), + common.HexToHash("0x4fb1e4e2391e4b4da471d59641319b8fa25d76c973d4bec594d7b00a69ae5135"), + empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, + empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, + empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, + empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, + empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, + empty, empty, empty}, + }, + { + // Basically the same as above, with different ordering + root: s.chain.RootAt(999), + paths: []snap.TrieNodePathSet{ + accPaths[10], accPaths[1], accPaths[0], + }, + nBytes: 5000, + expHashes: []common.Hash{ + empty, + common.HexToHash("0x4fb1e4e2391e4b4da471d59641319b8fa25d76c973d4bec594d7b00a69ae5135"), + common.HexToHash("0xbcefee69b37cca1f5bf3a48aebe08b35f2ea1864fa958bb0723d909a0e0d28d8"), + }, + }, + } { + if err := s.snapGetTrieNodes(t, &tc); err != nil { + t.Errorf("test %d \n #hashes %x\n root: %#x\n bytes: %d\nfailed: %v", i, len(tc.expHashes), tc.root, tc.nBytes, err) + } + } +} + +func (s *Suite) snapGetAccountRange(t *utesting.T, tc *accRangeTest) error { + conn, err := s.dialSnap() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // write request + req := &GetAccountRange{ + ID: uint64(rand.Int63()), + Root: tc.root, + Origin: tc.origin, + Limit: tc.limit, + Bytes: tc.nBytes, + } + resp, err := conn.snapRequest(req, req.ID, s.chain) + if err != nil { + return fmt.Errorf("account range request failed: %v", err) + } + var res *snap.AccountRangePacket + if r, ok := resp.(*AccountRange); !ok { + return fmt.Errorf("account range response wrong: %T %v", resp, resp) + } else { + res = (*snap.AccountRangePacket)(r) + } + if exp, got := tc.expAccounts, len(res.Accounts); exp != got { + return fmt.Errorf("expected %d accounts, got %d", exp, got) + } + // Check that the encoding order is correct + for i := 1; i < len(res.Accounts); i++ { + if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 { + return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:]) + } + } + var ( + hashes []common.Hash + accounts [][]byte + proof = res.Proof + ) + hashes, accounts, err = res.Unpack() + if err != nil { + return err + } + if len(hashes) == 0 && len(accounts) == 0 && len(proof) == 0 { + return nil + } + if len(hashes) > 0 { + if exp, got := tc.expFirst, res.Accounts[0].Hash; exp != got { + return fmt.Errorf("expected first account 0x%x, got 0x%x", exp, got) + } + if exp, got := tc.expLast, res.Accounts[len(res.Accounts)-1].Hash; exp != got { + return fmt.Errorf("expected last account 0x%x, got 0x%x", exp, got) + } + } + // Reconstruct a partial trie from the response and verify it + keys := make([][]byte, len(hashes)) + for i, key := range hashes { + keys[i] = common.CopyBytes(key[:]) + } + nodes := make(light.NodeList, len(proof)) + for i, node := range proof { + nodes[i] = node + } + proofdb := nodes.NodeSet() + + var end []byte + if len(keys) > 0 { + end = keys[len(keys)-1] + } + _, err = trie.VerifyRangeProof(tc.root, tc.origin[:], end, keys, accounts, proofdb) + return err +} + +func (s *Suite) snapGetStorageRanges(t *utesting.T, tc *stRangesTest) error { + conn, err := s.dialSnap() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // write request + req := &GetStorageRanges{ + ID: uint64(rand.Int63()), + Root: tc.root, + Accounts: tc.accounts, + Origin: tc.origin, + Limit: tc.limit, + Bytes: tc.nBytes, + } + resp, err := conn.snapRequest(req, req.ID, s.chain) + if err != nil { + return fmt.Errorf("account range request failed: %v", err) + } + var res *snap.StorageRangesPacket + if r, ok := resp.(*StorageRanges); !ok { + return fmt.Errorf("account range response wrong: %T %v", resp, resp) + } else { + res = (*snap.StorageRangesPacket)(r) + } + gotSlots := 0 + // Ensure the ranges are monotonically increasing + for i, slots := range res.Slots { + gotSlots += len(slots) + for j := 1; j < len(slots); j++ { + if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 { + return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:]) + } + } + } + if exp, got := tc.expSlots, gotSlots; exp != got { + return fmt.Errorf("expected %d slots, got %d", exp, got) + } + return nil +} + +func (s *Suite) snapGetByteCodes(t *utesting.T, tc *byteCodesTest) error { + conn, err := s.dialSnap() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // write request + req := &GetByteCodes{ + ID: uint64(rand.Int63()), + Hashes: tc.hashes, + Bytes: tc.nBytes, + } + resp, err := conn.snapRequest(req, req.ID, s.chain) + if err != nil { + return fmt.Errorf("getBytecodes request failed: %v", err) + } + var res *snap.ByteCodesPacket + if r, ok := resp.(*ByteCodes); !ok { + return fmt.Errorf("bytecodes response wrong: %T %v", resp, resp) + } else { + res = (*snap.ByteCodesPacket)(r) + } + if exp, got := tc.expHashes, len(res.Codes); exp != got { + for i, c := range res.Codes { + fmt.Printf("%d. %#x\n", i, c) + } + return fmt.Errorf("expected %d bytecodes, got %d", exp, got) + } + // Cross reference the requested bytecodes with the response to find gaps + // that the serving node is missing + var ( + bytecodes = res.Codes + hasher = sha3.NewLegacyKeccak256().(crypto.KeccakState) + hash = make([]byte, 32) + codes = make([][]byte, len(req.Hashes)) + ) + + for i, j := 0, 0; i < len(bytecodes); i++ { + // Find the next hash that we've been served, leaving misses with nils + hasher.Reset() + hasher.Write(bytecodes[i]) + hasher.Read(hash) + + for j < len(req.Hashes) && !bytes.Equal(hash, req.Hashes[j][:]) { + j++ + } + if j < len(req.Hashes) { + codes[j] = bytecodes[i] + j++ + continue + } + // We've either ran out of hashes, or got unrequested data + return errors.New("unexpected bytecode") + } + + return nil +} + +func (s *Suite) snapGetTrieNodes(t *utesting.T, tc *trieNodesTest) error { + conn, err := s.dialSnap() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // write request + req := &GetTrieNodes{ + ID: uint64(rand.Int63()), + Root: tc.root, + Paths: tc.paths, + Bytes: tc.nBytes, + } + resp, err := conn.snapRequest(req, req.ID, s.chain) + if err != nil { + if tc.expReject { + return nil + } + return fmt.Errorf("trienodes request failed: %v", err) + } + var res *snap.TrieNodesPacket + if r, ok := resp.(*TrieNodes); !ok { + return fmt.Errorf("trienodes response wrong: %T %v", resp, resp) + } else { + res = (*snap.TrieNodesPacket)(r) + } + + // Check the correctness + + // Cross reference the requested trienodes with the response to find gaps + // that the serving node is missing + hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hash := make([]byte, 32) + trienodes := res.Nodes + if got, want := len(trienodes), len(tc.expHashes); got != want { + return fmt.Errorf("wrong trienode count, got %d, want %d\n", got, want) + } + for i, trienode := range trienodes { + hasher.Reset() + hasher.Write(trienode) + hasher.Read(hash) + if got, want := hash, tc.expHashes[i]; !bytes.Equal(got, want[:]) { + fmt.Printf("hash %d wrong, got %#x, want %#x\n", i, got, want) + err = fmt.Errorf("hash %d wrong, got %#x, want %#x", i, got, want) + } + } + return err +} diff --git a/cmd/devp2p/internal/ethtest/snapTypes.go b/cmd/devp2p/internal/ethtest/snapTypes.go new file mode 100644 index 000000000000..bb8638c3d803 --- /dev/null +++ b/cmd/devp2p/internal/ethtest/snapTypes.go @@ -0,0 +1,36 @@ +package ethtest + +import "github.com/ethereum/go-ethereum/eth/protocols/snap" + +// GetAccountRange represents an account range query. +type GetAccountRange snap.GetAccountRangePacket + +func (g GetAccountRange) Code() int { return 33 } + +type AccountRange snap.AccountRangePacket + +func (g AccountRange) Code() int { return 34 } + +type GetStorageRanges snap.GetStorageRangesPacket + +func (g GetStorageRanges) Code() int { return 35 } + +type StorageRanges snap.StorageRangesPacket + +func (g StorageRanges) Code() int { return 36 } + +type GetByteCodes snap.GetByteCodesPacket + +func (g GetByteCodes) Code() int { return 37 } + +type ByteCodes snap.ByteCodesPacket + +func (g ByteCodes) Code() int { return 38 } + +type GetTrieNodes snap.GetTrieNodesPacket + +func (g GetTrieNodes) Code() int { return 39 } + +type TrieNodes snap.TrieNodesPacket + +func (g TrieNodes) Code() int { return 40 } diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go new file mode 100644 index 000000000000..dee59bc57987 --- /dev/null +++ b/cmd/devp2p/internal/ethtest/suite.go @@ -0,0 +1,793 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/p2p/enode" +) + +// Suite represents a structure used to test a node's conformance +// to the eth protocol. +type Suite struct { + Dest *enode.Node + + chain *Chain + fullChain *Chain +} + +// NewSuite creates and returns a new eth-test suite that can +// be used to test the given node against the given blockchain +// data. +func NewSuite(dest *enode.Node, chainfile string, genesisfile string) (*Suite, error) { + chain, err := loadChain(chainfile, genesisfile) + if err != nil { + return nil, err + } + return &Suite{ + Dest: dest, + chain: chain.Shorten(1000), + fullChain: chain, + }, nil +} + +func (s *Suite) AllEthTests() []utesting.Test { + return []utesting.Test{ + // status + {Name: "TestStatus65", Fn: s.TestStatus65}, + {Name: "TestStatus66", Fn: s.TestStatus66}, + // get block headers + {Name: "TestGetBlockHeaders65", Fn: s.TestGetBlockHeaders65}, + {Name: "TestGetBlockHeaders66", Fn: s.TestGetBlockHeaders66}, + {Name: "TestSimultaneousRequests66", Fn: s.TestSimultaneousRequests66}, + {Name: "TestSameRequestID66", Fn: s.TestSameRequestID66}, + {Name: "TestZeroRequestID66", Fn: s.TestZeroRequestID66}, + // get block bodies + {Name: "TestGetBlockBodies65", Fn: s.TestGetBlockBodies65}, + {Name: "TestGetBlockBodies66", Fn: s.TestGetBlockBodies66}, + // broadcast + {Name: "TestBroadcast65", Fn: s.TestBroadcast65}, + {Name: "TestBroadcast66", Fn: s.TestBroadcast66}, + {Name: "TestLargeAnnounce65", Fn: s.TestLargeAnnounce65}, + {Name: "TestLargeAnnounce66", Fn: s.TestLargeAnnounce66}, + {Name: "TestOldAnnounce65", Fn: s.TestOldAnnounce65}, + {Name: "TestOldAnnounce66", Fn: s.TestOldAnnounce66}, + {Name: "TestBlockHashAnnounce65", Fn: s.TestBlockHashAnnounce65}, + {Name: "TestBlockHashAnnounce66", Fn: s.TestBlockHashAnnounce66}, + // malicious handshakes + status + {Name: "TestMaliciousHandshake65", Fn: s.TestMaliciousHandshake65}, + {Name: "TestMaliciousStatus65", Fn: s.TestMaliciousStatus65}, + {Name: "TestMaliciousHandshake66", Fn: s.TestMaliciousHandshake66}, + {Name: "TestMaliciousStatus66", Fn: s.TestMaliciousStatus66}, + // test transactions + {Name: "TestTransaction65", Fn: s.TestTransaction65}, + {Name: "TestTransaction66", Fn: s.TestTransaction66}, + {Name: "TestMaliciousTx65", Fn: s.TestMaliciousTx65}, + {Name: "TestMaliciousTx66", Fn: s.TestMaliciousTx66}, + {Name: "TestLargeTxRequest66", Fn: s.TestLargeTxRequest66}, + {Name: "TestNewPooledTxs66", Fn: s.TestNewPooledTxs66}, + } +} + +func (s *Suite) EthTests() []utesting.Test { + return []utesting.Test{ + {Name: "TestStatus65", Fn: s.TestStatus65}, + {Name: "TestGetBlockHeaders65", Fn: s.TestGetBlockHeaders65}, + {Name: "TestGetBlockBodies65", Fn: s.TestGetBlockBodies65}, + {Name: "TestBroadcast65", Fn: s.TestBroadcast65}, + {Name: "TestLargeAnnounce65", Fn: s.TestLargeAnnounce65}, + {Name: "TestOldAnnounce65", Fn: s.TestOldAnnounce65}, + {Name: "TestBlockHashAnnounce65", Fn: s.TestBlockHashAnnounce65}, + {Name: "TestMaliciousHandshake65", Fn: s.TestMaliciousHandshake65}, + {Name: "TestMaliciousStatus65", Fn: s.TestMaliciousStatus65}, + {Name: "TestTransaction65", Fn: s.TestTransaction65}, + {Name: "TestMaliciousTx65", Fn: s.TestMaliciousTx65}, + } +} + +func (s *Suite) Eth66Tests() []utesting.Test { + return []utesting.Test{ + // only proceed with eth66 test suite if node supports eth 66 protocol + {Name: "TestStatus66", Fn: s.TestStatus66}, + {Name: "TestGetBlockHeaders66", Fn: s.TestGetBlockHeaders66}, + {Name: "TestSimultaneousRequests66", Fn: s.TestSimultaneousRequests66}, + {Name: "TestSameRequestID66", Fn: s.TestSameRequestID66}, + {Name: "TestZeroRequestID66", Fn: s.TestZeroRequestID66}, + {Name: "TestGetBlockBodies66", Fn: s.TestGetBlockBodies66}, + {Name: "TestBroadcast66", Fn: s.TestBroadcast66}, + {Name: "TestLargeAnnounce66", Fn: s.TestLargeAnnounce66}, + {Name: "TestOldAnnounce66", Fn: s.TestOldAnnounce66}, + {Name: "TestBlockHashAnnounce66", Fn: s.TestBlockHashAnnounce66}, + {Name: "TestMaliciousHandshake66", Fn: s.TestMaliciousHandshake66}, + {Name: "TestMaliciousStatus66", Fn: s.TestMaliciousStatus66}, + {Name: "TestTransaction66", Fn: s.TestTransaction66}, + {Name: "TestMaliciousTx66", Fn: s.TestMaliciousTx66}, + {Name: "TestLargeTxRequest66", Fn: s.TestLargeTxRequest66}, + {Name: "TestNewPooledTxs66", Fn: s.TestNewPooledTxs66}, + } +} + +func (s *Suite) SnapTests() []utesting.Test { + return []utesting.Test{ + {Name: "TestSnapStatus", Fn: s.TestSnapStatus}, + {Name: "TestSnapAccountRange", Fn: s.TestSnapGetAccountRange}, + {Name: "TestSnapGetByteCodes", Fn: s.TestSnapGetByteCodes}, + {Name: "TestSnapGetTrieNodes", Fn: s.TestSnapTrieNodes}, + {Name: "TestSnapGetStorageRanges", Fn: s.TestSnapGetStorageRanges}, + } +} + +var ( + eth66 = true // indicates whether suite should negotiate eth66 connection + eth65 = false // indicates whether suite should negotiate eth65 connection or below. +) + +// TestStatus65 attempts to connect to the given node and exchange +// a status message with it. +func (s *Suite) TestStatus65(t *utesting.T) { + conn, err := s.dial() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } +} + +// TestStatus66 attempts to connect to the given node and exchange +// a status message with it on the eth66 protocol. +func (s *Suite) TestStatus66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } +} + +// TestGetBlockHeaders65 tests whether the given node can respond to +// a `GetBlockHeaders` request accurately. +func (s *Suite) TestGetBlockHeaders65(t *utesting.T) { + conn, err := s.dial() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("handshake(s) failed: %v", err) + } + // write request + req := &GetBlockHeaders{ + Origin: eth.HashOrNumber{ + Hash: s.chain.blocks[1].Hash(), + }, + Amount: 2, + Skip: 1, + Reverse: false, + } + headers, err := conn.headersRequest(req, s.chain, eth65, 0) + if err != nil { + t.Fatalf("GetBlockHeaders request failed: %v", err) + } + // check for correct headers + expected, err := s.chain.GetHeaders(*req) + if err != nil { + t.Fatalf("failed to get headers for given request: %v", err) + } + if !headersMatch(expected, headers) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers) + } +} + +// TestGetBlockHeaders66 tests whether the given node can respond to +// an eth66 `GetBlockHeaders` request and that the response is accurate. +func (s *Suite) TestGetBlockHeaders66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // write request + req := &GetBlockHeaders{ + Origin: eth.HashOrNumber{ + Hash: s.chain.blocks[1].Hash(), + }, + Amount: 2, + Skip: 1, + Reverse: false, + } + headers, err := conn.headersRequest(req, s.chain, eth66, 33) + if err != nil { + t.Fatalf("could not get block headers: %v", err) + } + // check for correct headers + expected, err := s.chain.GetHeaders(*req) + if err != nil { + t.Fatalf("failed to get headers for given request: %v", err) + } + if !headersMatch(expected, headers) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers) + } +} + +// TestSimultaneousRequests66 sends two simultaneous `GetBlockHeader` requests from +// the same connection with different request IDs and checks to make sure the node +// responds with the correct headers per request. +func (s *Suite) TestSimultaneousRequests66(t *utesting.T) { + // create a connection + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // create two requests + req1 := ð.GetBlockHeadersPacket66{ + RequestId: uint64(111), + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Hash: s.chain.blocks[1].Hash(), + }, + Amount: 2, + Skip: 1, + Reverse: false, + }, + } + req2 := ð.GetBlockHeadersPacket66{ + RequestId: uint64(222), + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Hash: s.chain.blocks[1].Hash(), + }, + Amount: 4, + Skip: 1, + Reverse: false, + }, + } + // write the first request + if err := conn.Write66(req1, GetBlockHeaders{}.Code()); err != nil { + t.Fatalf("failed to write to connection: %v", err) + } + // write the second request + if err := conn.Write66(req2, GetBlockHeaders{}.Code()); err != nil { + t.Fatalf("failed to write to connection: %v", err) + } + // wait for responses + msg := conn.waitForResponse(s.chain, timeout, req1.RequestId) + headers1, ok := msg.(BlockHeaders) + if !ok { + t.Fatalf("unexpected %s", pretty.Sdump(msg)) + } + msg = conn.waitForResponse(s.chain, timeout, req2.RequestId) + headers2, ok := msg.(BlockHeaders) + if !ok { + t.Fatalf("unexpected %s", pretty.Sdump(msg)) + } + // check received headers for accuracy + expected1, err := s.chain.GetHeaders(GetBlockHeaders(*req1.GetBlockHeadersPacket)) + if err != nil { + t.Fatalf("failed to get expected headers for request 1: %v", err) + } + expected2, err := s.chain.GetHeaders(GetBlockHeaders(*req2.GetBlockHeadersPacket)) + if err != nil { + t.Fatalf("failed to get expected headers for request 2: %v", err) + } + if !headersMatch(expected1, headers1) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) + } + if !headersMatch(expected2, headers2) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) + } +} + +// TestSameRequestID66 sends two requests with the same request ID to a +// single node. +func (s *Suite) TestSameRequestID66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // create requests + reqID := uint64(1234) + request1 := ð.GetBlockHeadersPacket66{ + RequestId: reqID, + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Number: 1, + }, + Amount: 2, + }, + } + request2 := ð.GetBlockHeadersPacket66{ + RequestId: reqID, + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Number: 33, + }, + Amount: 2, + }, + } + // write the requests + if err = conn.Write66(request1, GetBlockHeaders{}.Code()); err != nil { + t.Fatalf("failed to write to connection: %v", err) + } + if err = conn.Write66(request2, GetBlockHeaders{}.Code()); err != nil { + t.Fatalf("failed to write to connection: %v", err) + } + // wait for responses + msg := conn.waitForResponse(s.chain, timeout, reqID) + headers1, ok := msg.(BlockHeaders) + if !ok { + t.Fatalf("unexpected %s", pretty.Sdump(msg)) + } + msg = conn.waitForResponse(s.chain, timeout, reqID) + headers2, ok := msg.(BlockHeaders) + if !ok { + t.Fatalf("unexpected %s", pretty.Sdump(msg)) + } + // check if headers match + expected1, err := s.chain.GetHeaders(GetBlockHeaders(*request1.GetBlockHeadersPacket)) + if err != nil { + t.Fatalf("failed to get expected block headers: %v", err) + } + expected2, err := s.chain.GetHeaders(GetBlockHeaders(*request2.GetBlockHeadersPacket)) + if err != nil { + t.Fatalf("failed to get expected block headers: %v", err) + } + if !headersMatch(expected1, headers1) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) + } + if !headersMatch(expected2, headers2) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) + } +} + +// TestZeroRequestID_66 checks that a message with a request ID of zero is still handled +// by the node. +func (s *Suite) TestZeroRequestID66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + req := &GetBlockHeaders{ + Origin: eth.HashOrNumber{ + Number: 0, + }, + Amount: 2, + } + headers, err := conn.headersRequest(req, s.chain, eth66, 0) + if err != nil { + t.Fatalf("failed to get block headers: %v", err) + } + expected, err := s.chain.GetHeaders(*req) + if err != nil { + t.Fatalf("failed to get expected block headers: %v", err) + } + if !headersMatch(expected, headers) { + t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers) + } +} + +// TestGetBlockBodies65 tests whether the given node can respond to +// a `GetBlockBodies` request and that the response is accurate. +func (s *Suite) TestGetBlockBodies65(t *utesting.T) { + conn, err := s.dial() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // create block bodies request + req := &GetBlockBodies{ + s.chain.blocks[54].Hash(), + s.chain.blocks[75].Hash(), + } + if err := conn.Write(req); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + // wait for response + switch msg := conn.readAndServe(s.chain, timeout).(type) { + case *BlockBodies: + t.Logf("received %d block bodies", len(*msg)) + if len(*msg) != len(*req) { + t.Fatalf("wrong bodies in response: expected %d bodies, "+ + "got %d", len(*req), len(*msg)) + } + default: + t.Fatalf("unexpected: %s", pretty.Sdump(msg)) + } +} + +// TestGetBlockBodies66 tests whether the given node can respond to +// a `GetBlockBodies` request and that the response is accurate over +// the eth66 protocol. +func (s *Suite) TestGetBlockBodies66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // create block bodies request + req := ð.GetBlockBodiesPacket66{ + RequestId: uint64(55), + GetBlockBodiesPacket: eth.GetBlockBodiesPacket{ + s.chain.blocks[54].Hash(), + s.chain.blocks[75].Hash(), + }, + } + if err := conn.Write66(req, GetBlockBodies{}.Code()); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + // wait for block bodies response + msg := conn.waitForResponse(s.chain, timeout, req.RequestId) + blockBodies, ok := msg.(BlockBodies) + if !ok { + t.Fatalf("unexpected: %s", pretty.Sdump(msg)) + } + t.Logf("received %d block bodies", len(blockBodies)) + if len(blockBodies) != len(req.GetBlockBodiesPacket) { + t.Fatalf("wrong bodies in response: expected %d bodies, "+ + "got %d", len(req.GetBlockBodiesPacket), len(blockBodies)) + } +} + +// TestBroadcast65 tests whether a block announcement is correctly +// propagated to the given node's peer(s). +func (s *Suite) TestBroadcast65(t *utesting.T) { + if err := s.sendNextBlock(eth65); err != nil { + t.Fatalf("block broadcast failed: %v", err) + } +} + +// TestBroadcast66 tests whether a block announcement is correctly +// propagated to the given node's peer(s) on the eth66 protocol. +func (s *Suite) TestBroadcast66(t *utesting.T) { + if err := s.sendNextBlock(eth66); err != nil { + t.Fatalf("block broadcast failed: %v", err) + } +} + +// TestLargeAnnounce65 tests the announcement mechanism with a large block. +func (s *Suite) TestLargeAnnounce65(t *utesting.T) { + nextBlock := len(s.chain.blocks) + blocks := []*NewBlock{ + { + Block: largeBlock(), + TD: s.fullChain.TotalDifficultyAt(nextBlock), + }, + { + Block: s.fullChain.blocks[nextBlock], + TD: largeNumber(2), + }, + { + Block: largeBlock(), + TD: largeNumber(2), + }, + } + + for i, blockAnnouncement := range blocks { + t.Logf("Testing malicious announcement: %v\n", i) + conn, err := s.dial() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + if err = conn.Write(blockAnnouncement); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + // Invalid announcement, check that peer disconnected + switch msg := conn.readAndServe(s.chain, time.Second*8).(type) { + case *Disconnect: + case *Error: + break + default: + t.Fatalf("unexpected: %s wanted disconnect", pretty.Sdump(msg)) + } + conn.Close() + } + // Test the last block as a valid block + if err := s.sendNextBlock(eth65); err != nil { + t.Fatalf("failed to broadcast next block: %v", err) + } +} + +// TestLargeAnnounce66 tests the announcement mechanism with a large +// block over the eth66 protocol. +func (s *Suite) TestLargeAnnounce66(t *utesting.T) { + nextBlock := len(s.chain.blocks) + blocks := []*NewBlock{ + { + Block: largeBlock(), + TD: s.fullChain.TotalDifficultyAt(nextBlock), + }, + { + Block: s.fullChain.blocks[nextBlock], + TD: largeNumber(2), + }, + { + Block: largeBlock(), + TD: largeNumber(2), + }, + } + + for i, blockAnnouncement := range blocks[0:3] { + t.Logf("Testing malicious announcement: %v\n", i) + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + if err := conn.Write(blockAnnouncement); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + // Invalid announcement, check that peer disconnected + switch msg := conn.readAndServe(s.chain, time.Second*8).(type) { + case *Disconnect: + case *Error: + break + default: + t.Fatalf("unexpected: %s wanted disconnect", pretty.Sdump(msg)) + } + conn.Close() + } + // Test the last block as a valid block + if err := s.sendNextBlock(eth66); err != nil { + t.Fatalf("failed to broadcast next block: %v", err) + } +} + +// TestOldAnnounce65 tests the announcement mechanism with an old block. +func (s *Suite) TestOldAnnounce65(t *utesting.T) { + if err := s.oldAnnounce(eth65); err != nil { + t.Fatal(err) + } +} + +// TestOldAnnounce66 tests the announcement mechanism with an old block, +// over the eth66 protocol. +func (s *Suite) TestOldAnnounce66(t *utesting.T) { + if err := s.oldAnnounce(eth66); err != nil { + t.Fatal(err) + } +} + +// TestBlockHashAnnounce65 sends a new block hash announcement and expects +// the node to perform a `GetBlockHeaders` request. +func (s *Suite) TestBlockHashAnnounce65(t *utesting.T) { + if err := s.hashAnnounce(eth65); err != nil { + t.Fatalf("block hash announcement failed: %v", err) + } +} + +// TestBlockHashAnnounce66 sends a new block hash announcement and expects +// the node to perform a `GetBlockHeaders` request. +func (s *Suite) TestBlockHashAnnounce66(t *utesting.T) { + if err := s.hashAnnounce(eth66); err != nil { + t.Fatalf("block hash announcement failed: %v", err) + } +} + +// TestMaliciousHandshake65 tries to send malicious data during the handshake. +func (s *Suite) TestMaliciousHandshake65(t *utesting.T) { + if err := s.maliciousHandshakes(t, eth65); err != nil { + t.Fatal(err) + } +} + +// TestMaliciousHandshake66 tries to send malicious data during the handshake. +func (s *Suite) TestMaliciousHandshake66(t *utesting.T) { + if err := s.maliciousHandshakes(t, eth66); err != nil { + t.Fatal(err) + } +} + +// TestMaliciousStatus65 sends a status package with a large total difficulty. +func (s *Suite) TestMaliciousStatus65(t *utesting.T) { + conn, err := s.dial() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + + if err := s.maliciousStatus(conn); err != nil { + t.Fatal(err) + } +} + +// TestMaliciousStatus66 sends a status package with a large total +// difficulty over the eth66 protocol. +func (s *Suite) TestMaliciousStatus66(t *utesting.T) { + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + + if err := s.maliciousStatus(conn); err != nil { + t.Fatal(err) + } +} + +// TestTransaction65 sends a valid transaction to the node and +// checks if the transaction gets propagated. +func (s *Suite) TestTransaction65(t *utesting.T) { + if err := s.sendSuccessfulTxs(t, eth65); err != nil { + t.Fatal(err) + } +} + +// TestTransaction66 sends a valid transaction to the node and +// checks if the transaction gets propagated. +func (s *Suite) TestTransaction66(t *utesting.T) { + if err := s.sendSuccessfulTxs(t, eth66); err != nil { + t.Fatal(err) + } +} + +// TestMaliciousTx65 sends several invalid transactions and tests whether +// the node will propagate them. +func (s *Suite) TestMaliciousTx65(t *utesting.T) { + if err := s.sendMaliciousTxs(t, eth65); err != nil { + t.Fatal(err) + } +} + +// TestMaliciousTx66 sends several invalid transactions and tests whether +// the node will propagate them. +func (s *Suite) TestMaliciousTx66(t *utesting.T) { + if err := s.sendMaliciousTxs(t, eth66); err != nil { + t.Fatal(err) + } +} + +// TestLargeTxRequest66 tests whether a node can fulfill a large GetPooledTransactions +// request. +func (s *Suite) TestLargeTxRequest66(t *utesting.T) { + // send the next block to ensure the node is no longer syncing and + // is able to accept txs + if err := s.sendNextBlock(eth66); err != nil { + t.Fatalf("failed to send next block: %v", err) + } + // send 2000 transactions to the node + hashMap, txs, err := generateTxs(s, 2000) + if err != nil { + t.Fatalf("failed to generate transactions: %v", err) + } + if err = sendMultipleSuccessfulTxs(t, s, txs); err != nil { + t.Fatalf("failed to send multiple txs: %v", err) + } + // set up connection to receive to ensure node is peered with the receiving connection + // before tx request is sent + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // create and send pooled tx request + hashes := make([]common.Hash, 0) + for _, hash := range hashMap { + hashes = append(hashes, hash) + } + getTxReq := ð.GetPooledTransactionsPacket66{ + RequestId: 1234, + GetPooledTransactionsPacket: hashes, + } + if err = conn.Write66(getTxReq, GetPooledTransactions{}.Code()); err != nil { + t.Fatalf("could not write to conn: %v", err) + } + // check that all received transactions match those that were sent to node + switch msg := conn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) { + case PooledTransactions: + for _, gotTx := range msg { + if _, exists := hashMap[gotTx.Hash()]; !exists { + t.Fatalf("unexpected tx received: %v", gotTx.Hash()) + } + } + default: + t.Fatalf("unexpected %s", pretty.Sdump(msg)) + } +} + +// TestNewPooledTxs_66 tests whether a node will do a GetPooledTransactions +// request upon receiving a NewPooledTransactionHashes announcement. +func (s *Suite) TestNewPooledTxs66(t *utesting.T) { + // send the next block to ensure the node is no longer syncing and + // is able to accept txs + if err := s.sendNextBlock(eth66); err != nil { + t.Fatalf("failed to send next block: %v", err) + } + + // generate 50 txs + hashMap, _, err := generateTxs(s, 50) + if err != nil { + t.Fatalf("failed to generate transactions: %v", err) + } + + // create new pooled tx hashes announcement + hashes := make([]common.Hash, 0) + for _, hash := range hashMap { + hashes = append(hashes, hash) + } + announce := NewPooledTransactionHashes(hashes) + + // send announcement + conn, err := s.dial66() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + if err = conn.Write(announce); err != nil { + t.Fatalf("failed to write to connection: %v", err) + } + + // wait for GetPooledTxs request + for { + _, msg := conn.readAndServe66(s.chain, timeout) + switch msg := msg.(type) { + case GetPooledTransactions: + if len(msg) != len(hashes) { + t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg)) + } + return + // ignore propagated txs from previous tests + case *NewPooledTransactionHashes: + continue + // ignore block announcements from previous tests + case *NewBlockHashes: + continue + case *NewBlock: + continue + default: + t.Fatalf("unexpected %s", pretty.Sdump(msg)) + } + } +} diff --git a/cmd/devp2p/internal/ethtest/suite_test.go b/cmd/devp2p/internal/ethtest/suite_test.go new file mode 100644 index 000000000000..24b1a1960a5b --- /dev/null +++ b/cmd/devp2p/internal/ethtest/suite_test.go @@ -0,0 +1,128 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "os" + "testing" + "time" + + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" +) + +var ( + genesisFile = "./testdata/genesis.json" + halfchainFile = "./testdata/halfchain.rlp" + fullchainFile = "./testdata/chain.rlp" +) + +func TestEthSuite(t *testing.T) { + geth, err := runGeth() + if err != nil { + t.Fatalf("could not run geth: %v", err) + } + defer geth.Close() + + suite, err := NewSuite(geth.Server().Self(), fullchainFile, genesisFile) + if err != nil { + t.Fatalf("could not create new test suite: %v", err) + } + for _, test := range suite.AllEthTests() { + t.Run(test.Name, func(t *testing.T) { + result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout) + if result[0].Failed { + t.Fatal() + } + }) + } +} + +func TestSnapSuite(t *testing.T) { + geth, err := runGeth() + if err != nil { + t.Fatalf("could not run geth: %v", err) + } + defer geth.Close() + + suite, err := NewSuite(geth.Server().Self(), fullchainFile, genesisFile) + if err != nil { + t.Fatalf("could not create new test suite: %v", err) + } + for _, test := range suite.SnapTests() { + t.Run(test.Name, func(t *testing.T) { + result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout) + if result[0].Failed { + t.Fatal() + } + }) + } +} + +// runGeth creates and starts a geth node +func runGeth() (*node.Node, error) { + stack, err := node.New(&node.Config{ + P2P: p2p.Config{ + ListenAddr: "127.0.0.1:0", + NoDiscovery: true, + MaxPeers: 10, // in case a test requires multiple connections, can be changed in the future + NoDial: true, + }, + }) + if err != nil { + return nil, err + } + + err = setupGeth(stack) + if err != nil { + stack.Close() + return nil, err + } + if err = stack.Start(); err != nil { + stack.Close() + return nil, err + } + return stack, nil +} + +func setupGeth(stack *node.Node) error { + chain, err := loadChain(halfchainFile, genesisFile) + if err != nil { + return err + } + + backend, err := eth.New(stack, ðconfig.Config{ + Genesis: &chain.genesis, + NetworkId: chain.genesis.Config.ChainID.Uint64(), // 19763 + DatabaseCache: 10, + TrieCleanCache: 10, + TrieCleanCacheJournal: "", + TrieCleanCacheRejournal: 60 * time.Minute, + TrieDirtyCache: 16, + TrieTimeout: 60 * time.Minute, + SnapshotCache: 10, + }) + if err != nil { + return err + } + + _, err = backend.BlockChain().InsertChain(chain.blocks[1:]) + return err +} diff --git a/cmd/devp2p/internal/ethtest/testdata/chain.rlp b/cmd/devp2p/internal/ethtest/testdata/chain.rlp new file mode 100644 index 000000000000..5ebc2f3bb788 Binary files /dev/null and b/cmd/devp2p/internal/ethtest/testdata/chain.rlp differ diff --git a/cmd/devp2p/internal/ethtest/testdata/genesis.json b/cmd/devp2p/internal/ethtest/testdata/genesis.json new file mode 100644 index 000000000000..d8b5d225024e --- /dev/null +++ b/cmd/devp2p/internal/ethtest/testdata/genesis.json @@ -0,0 +1,26 @@ +{ + "config": { + "chainId": 19763, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "ethash": {} + }, + "nonce": "0xdeadbeefdeadbeef", + "timestamp": "0x0", + "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000", + "gasLimit": "0x80000000", + "difficulty": "0x20000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x0000000000000000000000000000000000000000", + "alloc": { + "71562b71999873db5b286df957af199ec94617f7": { + "balance": "0xffffffffffffffffffffffffff" + } + }, + "number": "0x0", + "gasUsed": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000" +} diff --git a/cmd/devp2p/internal/ethtest/testdata/halfchain.rlp b/cmd/devp2p/internal/ethtest/testdata/halfchain.rlp new file mode 100644 index 000000000000..1a820734e10c Binary files /dev/null and b/cmd/devp2p/internal/ethtest/testdata/halfchain.rlp differ diff --git a/cmd/devp2p/internal/ethtest/transaction.go b/cmd/devp2p/internal/ethtest/transaction.go new file mode 100644 index 000000000000..d2dbe0a7d69b --- /dev/null +++ b/cmd/devp2p/internal/ethtest/transaction.go @@ -0,0 +1,419 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "fmt" + "math/big" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/params" +) + +//var faucetAddr = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") +var faucetKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + +func (s *Suite) sendSuccessfulTxs(t *utesting.T, isEth66 bool) error { + tests := []*types.Transaction{ + getNextTxFromChain(s), + unknownTx(s), + } + for i, tx := range tests { + if tx == nil { + return fmt.Errorf("could not find tx to send") + } + t.Logf("Testing tx propagation %d: sending tx %v %v %v\n", i, tx.Hash().String(), tx.GasPrice(), tx.Gas()) + // get previous tx if exists for reference in case of old tx propagation + var prevTx *types.Transaction + if i != 0 { + prevTx = tests[i-1] + } + // write tx to connection + if err := sendSuccessfulTx(s, tx, prevTx, isEth66); err != nil { + return fmt.Errorf("send successful tx test failed: %v", err) + } + } + return nil +} + +func sendSuccessfulTx(s *Suite, tx *types.Transaction, prevTx *types.Transaction, isEth66 bool) error { + sendConn, recvConn, err := s.createSendAndRecvConns(isEth66) + if err != nil { + return err + } + defer sendConn.Close() + defer recvConn.Close() + if err = sendConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + // Send the transaction + if err = sendConn.Write(&Transactions{tx}); err != nil { + return fmt.Errorf("failed to write to connection: %v", err) + } + // peer receiving connection to node + if err = recvConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + // update last nonce seen + nonce = tx.Nonce() + // Wait for the transaction announcement + for { + switch msg := recvConn.readAndServe(s.chain, timeout).(type) { + case *Transactions: + recTxs := *msg + // if you receive an old tx propagation, read from connection again + if len(recTxs) == 1 && prevTx != nil { + if recTxs[0] == prevTx { + continue + } + } + for _, gotTx := range recTxs { + if gotTx.Hash() == tx.Hash() { + // Ok + return nil + } + } + return fmt.Errorf("missing transaction: got %v missing %v", recTxs, tx.Hash()) + case *NewPooledTransactionHashes: + txHashes := *msg + // if you receive an old tx propagation, read from connection again + if len(txHashes) == 1 && prevTx != nil { + if txHashes[0] == prevTx.Hash() { + continue + } + } + for _, gotHash := range txHashes { + if gotHash == tx.Hash() { + // Ok + return nil + } + } + return fmt.Errorf("missing transaction announcement: got %v missing %v", txHashes, tx.Hash()) + default: + return fmt.Errorf("unexpected message in sendSuccessfulTx: %s", pretty.Sdump(msg)) + } + } +} + +func (s *Suite) sendMaliciousTxs(t *utesting.T, isEth66 bool) error { + badTxs := []*types.Transaction{ + getOldTxFromChain(s), + invalidNonceTx(s), + hugeAmount(s), + hugeGasPrice(s), + hugeData(s), + } + // setup receiving connection before sending malicious txs + var ( + recvConn *Conn + err error + ) + if isEth66 { + recvConn, err = s.dial66() + } else { + recvConn, err = s.dial() + } + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + defer recvConn.Close() + if err = recvConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + for i, tx := range badTxs { + t.Logf("Testing malicious tx propagation: %v\n", i) + if err = sendMaliciousTx(s, tx, isEth66); err != nil { + return fmt.Errorf("malicious tx test failed:\ntx: %v\nerror: %v", tx, err) + } + } + // check to make sure bad txs aren't propagated + return checkMaliciousTxPropagation(s, badTxs, recvConn) +} + +func sendMaliciousTx(s *Suite, tx *types.Transaction, isEth66 bool) error { + // setup connection + var ( + conn *Conn + err error + ) + if isEth66 { + conn, err = s.dial66() + } else { + conn, err = s.dial() + } + if err != nil { + return fmt.Errorf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + // write malicious tx + if err = conn.Write(&Transactions{tx}); err != nil { + return fmt.Errorf("failed to write to connection: %v", err) + } + return nil +} + +var nonce = uint64(99) + +// sendMultipleSuccessfulTxs sends the given transactions to the node and +// expects the node to accept and propagate them. +func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, txs []*types.Transaction) error { + txMsg := Transactions(txs) + t.Logf("sending %d txs\n", len(txs)) + + sendConn, recvConn, err := s.createSendAndRecvConns(true) + if err != nil { + return err + } + defer sendConn.Close() + defer recvConn.Close() + if err = sendConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + if err = recvConn.peer(s.chain, nil); err != nil { + return fmt.Errorf("peering failed: %v", err) + } + // Send the transactions + if err = sendConn.Write(&txMsg); err != nil { + return fmt.Errorf("failed to write message to connection: %v", err) + } + // update nonce + nonce = txs[len(txs)-1].Nonce() + // Wait for the transaction announcement(s) and make sure all sent txs are being propagated + recvHashes := make([]common.Hash, 0) + // all txs should be announced within 3 announcements + for i := 0; i < 3; i++ { + switch msg := recvConn.readAndServe(s.chain, timeout).(type) { + case *Transactions: + for _, tx := range *msg { + recvHashes = append(recvHashes, tx.Hash()) + } + case *NewPooledTransactionHashes: + recvHashes = append(recvHashes, *msg...) + default: + if !strings.Contains(pretty.Sdump(msg), "i/o timeout") { + return fmt.Errorf("unexpected message while waiting to receive txs: %s", pretty.Sdump(msg)) + } + } + // break once all 2000 txs have been received + if len(recvHashes) == 2000 { + break + } + if len(recvHashes) > 0 { + _, missingTxs := compareReceivedTxs(recvHashes, txs) + if len(missingTxs) > 0 { + continue + } else { + t.Logf("successfully received all %d txs", len(txs)) + return nil + } + } + } + _, missingTxs := compareReceivedTxs(recvHashes, txs) + if len(missingTxs) > 0 { + for _, missing := range missingTxs { + t.Logf("missing tx: %v", missing.Hash()) + } + return fmt.Errorf("missing %d txs", len(missingTxs)) + } + return nil +} + +// checkMaliciousTxPropagation checks whether the given malicious transactions were +// propagated by the node. +func checkMaliciousTxPropagation(s *Suite, txs []*types.Transaction, conn *Conn) error { + switch msg := conn.readAndServe(s.chain, time.Second*8).(type) { + case *Transactions: + // check to see if any of the failing txs were in the announcement + recvTxs := make([]common.Hash, len(*msg)) + for i, recvTx := range *msg { + recvTxs[i] = recvTx.Hash() + } + badTxs, _ := compareReceivedTxs(recvTxs, txs) + if len(badTxs) > 0 { + return fmt.Errorf("received %d bad txs: \n%v", len(badTxs), badTxs) + } + case *NewPooledTransactionHashes: + badTxs, _ := compareReceivedTxs(*msg, txs) + if len(badTxs) > 0 { + return fmt.Errorf("received %d bad txs: \n%v", len(badTxs), badTxs) + } + case *Error: + // Transaction should not be announced -> wait for timeout + return nil + default: + return fmt.Errorf("unexpected message in sendFailingTx: %s", pretty.Sdump(msg)) + } + return nil +} + +// compareReceivedTxs compares the received set of txs against the given set of txs, +// returning both the set received txs that were present within the given txs, and +// the set of txs that were missing from the set of received txs +func compareReceivedTxs(recvTxs []common.Hash, txs []*types.Transaction) (present []*types.Transaction, missing []*types.Transaction) { + // create a map of the hashes received from node + recvHashes := make(map[common.Hash]common.Hash) + for _, hash := range recvTxs { + recvHashes[hash] = hash + } + + // collect present txs and missing txs separately + present = make([]*types.Transaction, 0) + missing = make([]*types.Transaction, 0) + for _, tx := range txs { + if _, exists := recvHashes[tx.Hash()]; exists { + present = append(present, tx) + } else { + missing = append(missing, tx) + } + } + return present, missing +} + +func unknownTx(s *Suite) *types.Transaction { + tx := getNextTxFromChain(s) + if tx == nil { + return nil + } + var to common.Address + if tx.To() != nil { + to = *tx.To() + } + txNew := types.NewTransaction(tx.Nonce()+1, to, tx.Value(), tx.Gas(), tx.GasPrice(), tx.Data()) + return signWithFaucet(s.chain.chainConfig, txNew) +} + +func getNextTxFromChain(s *Suite) *types.Transaction { + // Get a new transaction + for _, blocks := range s.fullChain.blocks[s.chain.Len():] { + txs := blocks.Transactions() + if txs.Len() != 0 { + return txs[0] + } + } + return nil +} + +func generateTxs(s *Suite, numTxs int) (map[common.Hash]common.Hash, []*types.Transaction, error) { + txHashMap := make(map[common.Hash]common.Hash, numTxs) + txs := make([]*types.Transaction, numTxs) + + nextTx := getNextTxFromChain(s) + if nextTx == nil { + return nil, nil, fmt.Errorf("failed to get the next transaction") + } + gas := nextTx.Gas() + + nonce = nonce + 1 + // generate txs + for i := 0; i < numTxs; i++ { + tx := generateTx(s.chain.chainConfig, nonce, gas) + if tx == nil { + return nil, nil, fmt.Errorf("failed to get the next transaction") + } + txHashMap[tx.Hash()] = tx.Hash() + txs[i] = tx + nonce = nonce + 1 + } + return txHashMap, txs, nil +} + +func generateTx(chainConfig *params.ChainConfig, nonce uint64, gas uint64) *types.Transaction { + var to common.Address + tx := types.NewTransaction(nonce, to, big.NewInt(1), gas, big.NewInt(1), []byte{}) + return signWithFaucet(chainConfig, tx) +} + +func getOldTxFromChain(s *Suite) *types.Transaction { + for _, blocks := range s.fullChain.blocks[:s.chain.Len()-1] { + txs := blocks.Transactions() + if txs.Len() != 0 { + return txs[0] + } + } + return nil +} + +func invalidNonceTx(s *Suite) *types.Transaction { + tx := getNextTxFromChain(s) + if tx == nil { + return nil + } + var to common.Address + if tx.To() != nil { + to = *tx.To() + } + txNew := types.NewTransaction(tx.Nonce()-2, to, tx.Value(), tx.Gas(), tx.GasPrice(), tx.Data()) + return signWithFaucet(s.chain.chainConfig, txNew) +} + +func hugeAmount(s *Suite) *types.Transaction { + tx := getNextTxFromChain(s) + if tx == nil { + return nil + } + amount := largeNumber(2) + var to common.Address + if tx.To() != nil { + to = *tx.To() + } + txNew := types.NewTransaction(tx.Nonce(), to, amount, tx.Gas(), tx.GasPrice(), tx.Data()) + return signWithFaucet(s.chain.chainConfig, txNew) +} + +func hugeGasPrice(s *Suite) *types.Transaction { + tx := getNextTxFromChain(s) + if tx == nil { + return nil + } + gasPrice := largeNumber(2) + var to common.Address + if tx.To() != nil { + to = *tx.To() + } + txNew := types.NewTransaction(tx.Nonce(), to, tx.Value(), tx.Gas(), gasPrice, tx.Data()) + return signWithFaucet(s.chain.chainConfig, txNew) +} + +func hugeData(s *Suite) *types.Transaction { + tx := getNextTxFromChain(s) + if tx == nil { + return nil + } + var to common.Address + if tx.To() != nil { + to = *tx.To() + } + txNew := types.NewTransaction(tx.Nonce(), to, tx.Value(), tx.Gas(), tx.GasPrice(), largeBuffer(2)) + return signWithFaucet(s.chain.chainConfig, txNew) +} + +func signWithFaucet(chainConfig *params.ChainConfig, tx *types.Transaction) *types.Transaction { + signer := types.LatestSigner(chainConfig) + signedTx, err := types.SignTx(tx, signer, faucetKey) + if err != nil { + return nil + } + return signedTx +} diff --git a/cmd/devp2p/internal/ethtest/types.go b/cmd/devp2p/internal/ethtest/types.go new file mode 100644 index 000000000000..09bb218d5183 --- /dev/null +++ b/cmd/devp2p/internal/ethtest/types.go @@ -0,0 +1,321 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "crypto/ecdsa" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/rlpx" + "github.com/ethereum/go-ethereum/rlp" +) + +type Message interface { + Code() int +} + +type Error struct { + err error +} + +func (e *Error) Unwrap() error { return e.err } +func (e *Error) Error() string { return e.err.Error() } +func (e *Error) Code() int { return -1 } +func (e *Error) String() string { return e.Error() } + +func errorf(format string, args ...interface{}) *Error { + return &Error{fmt.Errorf(format, args...)} +} + +// Hello is the RLP structure of the protocol handshake. +type Hello struct { + Version uint64 + Name string + Caps []p2p.Cap + ListenPort uint64 + ID []byte // secp256k1 public key + + // Ignore additional fields (for forward compatibility). + Rest []rlp.RawValue `rlp:"tail"` +} + +func (h Hello) Code() int { return 0x00 } + +// Disconnect is the RLP structure for a disconnect message. +type Disconnect struct { + Reason p2p.DiscReason +} + +func (d Disconnect) Code() int { return 0x01 } + +type Ping struct{} + +func (p Ping) Code() int { return 0x02 } + +type Pong struct{} + +func (p Pong) Code() int { return 0x03 } + +// Status is the network packet for the status message for eth/64 and later. +type Status eth.StatusPacket + +func (s Status) Code() int { return 16 } + +// NewBlockHashes is the network packet for the block announcements. +type NewBlockHashes eth.NewBlockHashesPacket + +func (nbh NewBlockHashes) Code() int { return 17 } + +type Transactions eth.TransactionsPacket + +func (t Transactions) Code() int { return 18 } + +// GetBlockHeaders represents a block header query. +type GetBlockHeaders eth.GetBlockHeadersPacket + +func (g GetBlockHeaders) Code() int { return 19 } + +type BlockHeaders eth.BlockHeadersPacket + +func (bh BlockHeaders) Code() int { return 20 } + +// GetBlockBodies represents a GetBlockBodies request +type GetBlockBodies eth.GetBlockBodiesPacket + +func (gbb GetBlockBodies) Code() int { return 21 } + +// BlockBodies is the network packet for block content distribution. +type BlockBodies eth.BlockBodiesPacket + +func (bb BlockBodies) Code() int { return 22 } + +// NewBlock is the network packet for the block propagation message. +type NewBlock eth.NewBlockPacket + +func (nb NewBlock) Code() int { return 23 } + +// NewPooledTransactionHashes is the network packet for the tx hash propagation message. +type NewPooledTransactionHashes eth.NewPooledTransactionHashesPacket + +func (nb NewPooledTransactionHashes) Code() int { return 24 } + +type GetPooledTransactions eth.GetPooledTransactionsPacket + +func (gpt GetPooledTransactions) Code() int { return 25 } + +type PooledTransactions eth.PooledTransactionsPacket + +func (pt PooledTransactions) Code() int { return 26 } + +// Conn represents an individual connection with a peer +type Conn struct { + *rlpx.Conn + ourKey *ecdsa.PrivateKey + negotiatedProtoVersion uint + negotiatedSnapProtoVersion uint + ourHighestProtoVersion uint + ourHighestSnapProtoVersion uint + caps []p2p.Cap +} + +// Read reads an eth packet from the connection. +func (c *Conn) Read() Message { + code, rawData, _, err := c.Conn.Read() + if err != nil { + return errorf("could not read from connection: %v", err) + } + + var msg Message + switch int(code) { + case (Hello{}).Code(): + msg = new(Hello) + case (Ping{}).Code(): + msg = new(Ping) + case (Pong{}).Code(): + msg = new(Pong) + case (Disconnect{}).Code(): + msg = new(Disconnect) + case (Status{}).Code(): + msg = new(Status) + case (GetBlockHeaders{}).Code(): + msg = new(GetBlockHeaders) + case (BlockHeaders{}).Code(): + msg = new(BlockHeaders) + case (GetBlockBodies{}).Code(): + msg = new(GetBlockBodies) + case (BlockBodies{}).Code(): + msg = new(BlockBodies) + case (NewBlock{}).Code(): + msg = new(NewBlock) + case (NewBlockHashes{}).Code(): + msg = new(NewBlockHashes) + case (Transactions{}).Code(): + msg = new(Transactions) + case (NewPooledTransactionHashes{}).Code(): + msg = new(NewPooledTransactionHashes) + case (GetPooledTransactions{}.Code()): + msg = new(GetPooledTransactions) + case (PooledTransactions{}.Code()): + msg = new(PooledTransactions) + default: + return errorf("invalid message code: %d", code) + } + // if message is devp2p, decode here + if err := rlp.DecodeBytes(rawData, msg); err != nil { + return errorf("could not rlp decode message: %v", err) + } + return msg +} + +// Read66 reads an eth66 packet from the connection. +func (c *Conn) Read66() (uint64, Message) { + code, rawData, _, err := c.Conn.Read() + if err != nil { + return 0, errorf("could not read from connection: %v", err) + } + + var msg Message + switch int(code) { + case (Hello{}).Code(): + msg = new(Hello) + case (Ping{}).Code(): + msg = new(Ping) + case (Pong{}).Code(): + msg = new(Pong) + case (Disconnect{}).Code(): + msg = new(Disconnect) + case (Status{}).Code(): + msg = new(Status) + case (GetBlockHeaders{}).Code(): + ethMsg := new(eth.GetBlockHeadersPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return ethMsg.RequestId, GetBlockHeaders(*ethMsg.GetBlockHeadersPacket) + case (BlockHeaders{}).Code(): + ethMsg := new(eth.BlockHeadersPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return ethMsg.RequestId, BlockHeaders(ethMsg.BlockHeadersPacket) + case (GetBlockBodies{}).Code(): + ethMsg := new(eth.GetBlockBodiesPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return ethMsg.RequestId, GetBlockBodies(ethMsg.GetBlockBodiesPacket) + case (BlockBodies{}).Code(): + ethMsg := new(eth.BlockBodiesPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return ethMsg.RequestId, BlockBodies(ethMsg.BlockBodiesPacket) + case (NewBlock{}).Code(): + msg = new(NewBlock) + case (NewBlockHashes{}).Code(): + msg = new(NewBlockHashes) + case (Transactions{}).Code(): + msg = new(Transactions) + case (NewPooledTransactionHashes{}).Code(): + msg = new(NewPooledTransactionHashes) + case (GetPooledTransactions{}.Code()): + ethMsg := new(eth.GetPooledTransactionsPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return ethMsg.RequestId, GetPooledTransactions(ethMsg.GetPooledTransactionsPacket) + case (PooledTransactions{}.Code()): + ethMsg := new(eth.PooledTransactionsPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return ethMsg.RequestId, PooledTransactions(ethMsg.PooledTransactionsPacket) + default: + msg = errorf("invalid message code: %d", code) + } + + if msg != nil { + if err := rlp.DecodeBytes(rawData, msg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return 0, msg + } + return 0, errorf("invalid message: %s", string(rawData)) +} + +// Write writes a eth packet to the connection. +func (c *Conn) Write(msg Message) error { + payload, err := rlp.EncodeToBytes(msg) + if err != nil { + return err + } + _, err = c.Conn.Write(uint64(msg.Code()), payload) + return err +} + +// Write66 writes an eth66 packet to the connection. +func (c *Conn) Write66(req eth.Packet, code int) error { + payload, err := rlp.EncodeToBytes(req) + if err != nil { + return err + } + _, err = c.Conn.Write(uint64(code), payload) + return err +} + +// ReadSnap reads a snap/1 response with the given id from the connection. +func (c *Conn) ReadSnap(id uint64) (Message, error) { + respId := id + 1 + start := time.Now() + for respId != id && time.Since(start) < timeout { + code, rawData, _, err := c.Conn.Read() + if err != nil { + return nil, fmt.Errorf("could not read from connection: %v", err) + } + var snpMsg interface{} + switch int(code) { + case (GetAccountRange{}).Code(): + snpMsg = new(GetAccountRange) + case (AccountRange{}).Code(): + snpMsg = new(AccountRange) + case (GetStorageRanges{}).Code(): + snpMsg = new(GetStorageRanges) + case (StorageRanges{}).Code(): + snpMsg = new(StorageRanges) + case (GetByteCodes{}).Code(): + snpMsg = new(GetByteCodes) + case (ByteCodes{}).Code(): + snpMsg = new(ByteCodes) + case (GetTrieNodes{}).Code(): + snpMsg = new(GetTrieNodes) + case (TrieNodes{}).Code(): + snpMsg = new(TrieNodes) + default: + //return nil, fmt.Errorf("invalid message code: %d", code) + continue + } + if err := rlp.DecodeBytes(rawData, snpMsg); err != nil { + return nil, fmt.Errorf("could not rlp decode message: %v", err) + } + return snpMsg.(Message), nil + + } + return nil, fmt.Errorf("request timed out") +} diff --git a/cmd/devp2p/internal/v4test/discv4tests.go b/cmd/devp2p/internal/v4test/discv4tests.go new file mode 100644 index 000000000000..5f340ed94c24 --- /dev/null +++ b/cmd/devp2p/internal/v4test/discv4tests.go @@ -0,0 +1,500 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package v4test + +import ( + "bytes" + "crypto/rand" + "fmt" + "net" + "time" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/p2p/discover/v4wire" +) + +const ( + expiration = 20 * time.Second + wrongPacket = 66 + macSize = 256 / 8 +) + +var ( + // Remote node under test + Remote string + // IP where the first tester is listening, port will be assigned + Listen1 string = "127.0.0.1" + // IP where the second tester is listening, port will be assigned + // Before running the test, you may have to `sudo ifconfig lo0 add 127.0.0.2` (on MacOS at least) + Listen2 string = "127.0.0.2" +) + +type pingWithJunk struct { + Version uint + From, To v4wire.Endpoint + Expiration uint64 + JunkData1 uint + JunkData2 []byte +} + +func (req *pingWithJunk) Name() string { return "PING/v4" } +func (req *pingWithJunk) Kind() byte { return v4wire.PingPacket } + +type pingWrongType struct { + Version uint + From, To v4wire.Endpoint + Expiration uint64 +} + +func (req *pingWrongType) Name() string { return "WRONG/v4" } +func (req *pingWrongType) Kind() byte { return wrongPacket } + +func futureExpiration() uint64 { + return uint64(time.Now().Add(expiration).Unix()) +} + +// This test just sends a PING packet and expects a response. +func BasicPing(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + + pingHash := te.send(te.l1, &v4wire.Ping{ + Version: 4, + From: te.localEndpoint(te.l1), + To: te.remoteEndpoint(), + Expiration: futureExpiration(), + }) + if err := te.checkPingPong(pingHash); err != nil { + t.Fatal(err) + } +} + +// checkPingPong verifies that the remote side sends both a PONG with the +// correct hash, and a PING. +// The two packets do not have to be in any particular order. +func (te *testenv) checkPingPong(pingHash []byte) error { + var ( + pings int + pongs int + ) + for i := 0; i < 2; i++ { + reply, _, err := te.read(te.l1) + if err != nil { + return err + } + switch reply.Kind() { + case v4wire.PongPacket: + if err := te.checkPong(reply, pingHash); err != nil { + return err + } + pongs++ + case v4wire.PingPacket: + pings++ + default: + return fmt.Errorf("expected PING or PONG, got %v %v", reply.Name(), reply) + } + } + if pongs == 1 && pings == 1 { + return nil + } + return fmt.Errorf("expected 1 PING (got %d) and 1 PONG (got %d)", pings, pongs) +} + +// checkPong verifies that reply is a valid PONG matching the given ping hash, +// and a PING. The two packets do not have to be in any particular order. +func (te *testenv) checkPong(reply v4wire.Packet, pingHash []byte) error { + if reply == nil { + return fmt.Errorf("expected PONG reply, got nil") + } + if reply.Kind() != v4wire.PongPacket { + return fmt.Errorf("expected PONG reply, got %v %v", reply.Name(), reply) + } + pong := reply.(*v4wire.Pong) + if !bytes.Equal(pong.ReplyTok, pingHash) { + return fmt.Errorf("PONG reply token mismatch: got %x, want %x", pong.ReplyTok, pingHash) + } + if want := te.localEndpoint(te.l1); !want.IP.Equal(pong.To.IP) || want.UDP != pong.To.UDP { + return fmt.Errorf("PONG 'to' endpoint mismatch: got %+v, want %+v", pong.To, want) + } + if v4wire.Expired(pong.Expiration) { + return fmt.Errorf("PONG is expired (%v)", pong.Expiration) + } + return nil +} + +// This test sends a PING packet with wrong 'to' field and expects a PONG response. +func PingWrongTo(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + + wrongEndpoint := v4wire.Endpoint{IP: net.ParseIP("192.0.2.0")} + pingHash := te.send(te.l1, &v4wire.Ping{ + Version: 4, + From: te.localEndpoint(te.l1), + To: wrongEndpoint, + Expiration: futureExpiration(), + }) + if err := te.checkPingPong(pingHash); err != nil { + t.Fatal(err) + } +} + +// This test sends a PING packet with wrong 'from' field and expects a PONG response. +func PingWrongFrom(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + + wrongEndpoint := v4wire.Endpoint{IP: net.ParseIP("192.0.2.0")} + pingHash := te.send(te.l1, &v4wire.Ping{ + Version: 4, + From: wrongEndpoint, + To: te.remoteEndpoint(), + Expiration: futureExpiration(), + }) + + if err := te.checkPingPong(pingHash); err != nil { + t.Fatal(err) + } +} + +// This test sends a PING packet with additional data at the end and expects a PONG +// response. The remote node should respond because EIP-8 mandates ignoring additional +// trailing data. +func PingExtraData(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + + pingHash := te.send(te.l1, &pingWithJunk{ + Version: 4, + From: te.localEndpoint(te.l1), + To: te.remoteEndpoint(), + Expiration: futureExpiration(), + JunkData1: 42, + JunkData2: []byte{9, 8, 7, 6, 5, 4, 3, 2, 1}, + }) + + if err := te.checkPingPong(pingHash); err != nil { + t.Fatal(err) + } +} + +// This test sends a PING packet with additional data and wrong 'from' field +// and expects a PONG response. +func PingExtraDataWrongFrom(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + + wrongEndpoint := v4wire.Endpoint{IP: net.ParseIP("192.0.2.0")} + req := pingWithJunk{ + Version: 4, + From: wrongEndpoint, + To: te.remoteEndpoint(), + Expiration: futureExpiration(), + JunkData1: 42, + JunkData2: []byte{9, 8, 7, 6, 5, 4, 3, 2, 1}, + } + pingHash := te.send(te.l1, &req) + if err := te.checkPingPong(pingHash); err != nil { + t.Fatal(err) + } +} + +// This test sends a PING packet with an expiration in the past. +// The remote node should not respond. +func PingPastExpiration(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + + te.send(te.l1, &v4wire.Ping{ + Version: 4, + From: te.localEndpoint(te.l1), + To: te.remoteEndpoint(), + Expiration: -futureExpiration(), + }) + + reply, _, _ := te.read(te.l1) + if reply != nil { + t.Fatalf("Expected no reply, got %v %v", reply.Name(), reply) + } +} + +// This test sends an invalid packet. The remote node should not respond. +func WrongPacketType(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + + te.send(te.l1, &pingWrongType{ + Version: 4, + From: te.localEndpoint(te.l1), + To: te.remoteEndpoint(), + Expiration: futureExpiration(), + }) + + reply, _, _ := te.read(te.l1) + if reply != nil { + t.Fatalf("Expected no reply, got %v %v", reply.Name(), reply) + } +} + +// This test verifies that the default behaviour of ignoring 'from' fields is unaffected by +// the bonding process. After bonding, it pings the target with a different from endpoint. +func BondThenPingWithWrongFrom(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + bond(t, te) + + wrongEndpoint := v4wire.Endpoint{IP: net.ParseIP("192.0.2.0")} + pingHash := te.send(te.l1, &v4wire.Ping{ + Version: 4, + From: wrongEndpoint, + To: te.remoteEndpoint(), + Expiration: futureExpiration(), + }) + if reply, _, err := te.read(te.l1); err != nil { + t.Fatal(err) + } else if err := te.checkPong(reply, pingHash); err != nil { + t.Fatal(err) + } +} + +// This test just sends FINDNODE. The remote node should not reply +// because the endpoint proof has not completed. +func FindnodeWithoutEndpointProof(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + + req := v4wire.Findnode{Expiration: futureExpiration()} + rand.Read(req.Target[:]) + te.send(te.l1, &req) + + for { + reply, _, _ := te.read(te.l1) + if reply == nil { + // No response, all good + break + } + if reply.Kind() == v4wire.PingPacket { + continue // A ping is ok, just ignore it + } + t.Fatalf("Expected no reply, got %v %v", reply.Name(), reply) + } +} + +// BasicFindnode sends a FINDNODE request after performing the endpoint +// proof. The remote node should respond. +func BasicFindnode(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + bond(t, te) + + findnode := v4wire.Findnode{Expiration: futureExpiration()} + rand.Read(findnode.Target[:]) + te.send(te.l1, &findnode) + + reply, _, err := te.read(te.l1) + if err != nil { + t.Fatal("read find nodes", err) + } + if reply.Kind() != v4wire.NeighborsPacket { + t.Fatalf("Expected neighbors, got %v %v", reply.Name(), reply) + } +} + +// This test sends an unsolicited NEIGHBORS packet after the endpoint proof, then sends +// FINDNODE to read the remote table. The remote node should not return the node contained +// in the unsolicited NEIGHBORS packet. +func UnsolicitedNeighbors(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + bond(t, te) + + // Send unsolicited NEIGHBORS response. + fakeKey, _ := crypto.GenerateKey() + encFakeKey := v4wire.EncodePubkey(&fakeKey.PublicKey) + neighbors := v4wire.Neighbors{ + Expiration: futureExpiration(), + Nodes: []v4wire.Node{{ + ID: encFakeKey, + IP: net.IP{1, 2, 3, 4}, + UDP: 30303, + TCP: 30303, + }}, + } + te.send(te.l1, &neighbors) + + // Check if the remote node included the fake node. + te.send(te.l1, &v4wire.Findnode{ + Expiration: futureExpiration(), + Target: encFakeKey, + }) + + reply, _, err := te.read(te.l1) + if err != nil { + t.Fatal("read find nodes", err) + } + if reply.Kind() != v4wire.NeighborsPacket { + t.Fatalf("Expected neighbors, got %v %v", reply.Name(), reply) + } + nodes := reply.(*v4wire.Neighbors).Nodes + if contains(nodes, encFakeKey) { + t.Fatal("neighbors response contains node from earlier unsolicited neighbors response") + } +} + +// This test sends FINDNODE with an expiration timestamp in the past. +// The remote node should not respond. +func FindnodePastExpiration(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + bond(t, te) + + findnode := v4wire.Findnode{Expiration: -futureExpiration()} + rand.Read(findnode.Target[:]) + te.send(te.l1, &findnode) + + for { + reply, _, _ := te.read(te.l1) + if reply == nil { + return + } else if reply.Kind() == v4wire.NeighborsPacket { + t.Fatal("Unexpected NEIGHBORS response for expired FINDNODE request") + } + } +} + +// bond performs the endpoint proof with the remote node. +func bond(t *utesting.T, te *testenv) { + te.send(te.l1, &v4wire.Ping{ + Version: 4, + From: te.localEndpoint(te.l1), + To: te.remoteEndpoint(), + Expiration: futureExpiration(), + }) + + var gotPing, gotPong bool + for !gotPing || !gotPong { + req, hash, err := te.read(te.l1) + if err != nil { + t.Fatal(err) + } + switch req.(type) { + case *v4wire.Ping: + te.send(te.l1, &v4wire.Pong{ + To: te.remoteEndpoint(), + ReplyTok: hash, + Expiration: futureExpiration(), + }) + gotPing = true + case *v4wire.Pong: + // TODO: maybe verify pong data here + gotPong = true + } + } +} + +// This test attempts to perform a traffic amplification attack against a +// 'victim' endpoint using FINDNODE. In this attack scenario, the attacker +// attempts to complete the endpoint proof non-interactively by sending a PONG +// with mismatching reply token from the 'victim' endpoint. The attack works if +// the remote node does not verify the PONG reply token field correctly. The +// attacker could then perform traffic amplification by sending many FINDNODE +// requests to the discovery node, which would reply to the 'victim' address. +func FindnodeAmplificationInvalidPongHash(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + + // Send PING to start endpoint verification. + te.send(te.l1, &v4wire.Ping{ + Version: 4, + From: te.localEndpoint(te.l1), + To: te.remoteEndpoint(), + Expiration: futureExpiration(), + }) + + var gotPing, gotPong bool + for !gotPing || !gotPong { + req, _, err := te.read(te.l1) + if err != nil { + t.Fatal(err) + } + switch req.(type) { + case *v4wire.Ping: + // Send PONG from this node ID, but with invalid ReplyTok. + te.send(te.l1, &v4wire.Pong{ + To: te.remoteEndpoint(), + ReplyTok: make([]byte, macSize), + Expiration: futureExpiration(), + }) + gotPing = true + case *v4wire.Pong: + gotPong = true + } + } + + // Now send FINDNODE. The remote node should not respond because our + // PONG did not reference the PING hash. + findnode := v4wire.Findnode{Expiration: futureExpiration()} + rand.Read(findnode.Target[:]) + te.send(te.l1, &findnode) + + // If we receive a NEIGHBORS response, the attack worked and the test fails. + reply, _, _ := te.read(te.l1) + if reply != nil && reply.Kind() == v4wire.NeighborsPacket { + t.Error("Got neighbors") + } +} + +// This test attempts to perform a traffic amplification attack using FINDNODE. +// The attack works if the remote node does not verify the IP address of FINDNODE +// against the endpoint verification proof done by PING/PONG. +func FindnodeAmplificationWrongIP(t *utesting.T) { + te := newTestEnv(Remote, Listen1, Listen2) + defer te.close() + + // Do the endpoint proof from the l1 IP. + bond(t, te) + + // Now send FINDNODE from the same node ID, but different IP address. + // The remote node should not respond. + findnode := v4wire.Findnode{Expiration: futureExpiration()} + rand.Read(findnode.Target[:]) + te.send(te.l2, &findnode) + + // If we receive a NEIGHBORS response, the attack worked and the test fails. + reply, _, _ := te.read(te.l2) + if reply != nil { + t.Error("Got NEIGHORS response for FINDNODE from wrong IP") + } +} + +var AllTests = []utesting.Test{ + {Name: "Ping/Basic", Fn: BasicPing}, + {Name: "Ping/WrongTo", Fn: PingWrongTo}, + {Name: "Ping/WrongFrom", Fn: PingWrongFrom}, + {Name: "Ping/ExtraData", Fn: PingExtraData}, + {Name: "Ping/ExtraDataWrongFrom", Fn: PingExtraDataWrongFrom}, + {Name: "Ping/PastExpiration", Fn: PingPastExpiration}, + {Name: "Ping/WrongPacketType", Fn: WrongPacketType}, + {Name: "Ping/BondThenPingWithWrongFrom", Fn: BondThenPingWithWrongFrom}, + {Name: "Findnode/WithoutEndpointProof", Fn: FindnodeWithoutEndpointProof}, + {Name: "Findnode/BasicFindnode", Fn: BasicFindnode}, + {Name: "Findnode/UnsolicitedNeighbors", Fn: UnsolicitedNeighbors}, + {Name: "Findnode/PastExpiration", Fn: FindnodePastExpiration}, + {Name: "Amplification/InvalidPongHash", Fn: FindnodeAmplificationInvalidPongHash}, + {Name: "Amplification/WrongIP", Fn: FindnodeAmplificationWrongIP}, +} diff --git a/cmd/devp2p/internal/v4test/framework.go b/cmd/devp2p/internal/v4test/framework.go new file mode 100644 index 000000000000..92865941810b --- /dev/null +++ b/cmd/devp2p/internal/v4test/framework.go @@ -0,0 +1,123 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package v4test + +import ( + "crypto/ecdsa" + "fmt" + "net" + "time" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/p2p/discover/v4wire" + "github.com/ethereum/go-ethereum/p2p/enode" +) + +const waitTime = 300 * time.Millisecond + +type testenv struct { + l1, l2 net.PacketConn + key *ecdsa.PrivateKey + remote *enode.Node + remoteAddr *net.UDPAddr +} + +func newTestEnv(remote string, listen1, listen2 string) *testenv { + l1, err := net.ListenPacket("udp", fmt.Sprintf("%v:0", listen1)) + if err != nil { + panic(err) + } + l2, err := net.ListenPacket("udp", fmt.Sprintf("%v:0", listen2)) + if err != nil { + panic(err) + } + key, err := crypto.GenerateKey() + if err != nil { + panic(err) + } + node, err := enode.Parse(enode.ValidSchemes, remote) + if err != nil { + panic(err) + } + if node.IP() == nil || node.UDP() == 0 { + var ip net.IP + var tcpPort, udpPort int + if ip = node.IP(); ip == nil { + ip = net.ParseIP("127.0.0.1") + } + if tcpPort = node.TCP(); tcpPort == 0 { + tcpPort = 30303 + } + if udpPort = node.TCP(); udpPort == 0 { + udpPort = 30303 + } + node = enode.NewV4(node.Pubkey(), ip, tcpPort, udpPort) + } + addr := &net.UDPAddr{IP: node.IP(), Port: node.UDP()} + return &testenv{l1, l2, key, node, addr} +} + +func (te *testenv) close() { + te.l1.Close() + te.l2.Close() +} + +func (te *testenv) send(c net.PacketConn, req v4wire.Packet) []byte { + packet, hash, err := v4wire.Encode(te.key, req) + if err != nil { + panic(fmt.Errorf("can't encode %v packet: %v", req.Name(), err)) + } + if _, err := c.WriteTo(packet, te.remoteAddr); err != nil { + panic(fmt.Errorf("can't send %v: %v", req.Name(), err)) + } + return hash +} + +func (te *testenv) read(c net.PacketConn) (v4wire.Packet, []byte, error) { + buf := make([]byte, 2048) + if err := c.SetReadDeadline(time.Now().Add(waitTime)); err != nil { + return nil, nil, err + } + n, _, err := c.ReadFrom(buf) + if err != nil { + return nil, nil, err + } + p, _, hash, err := v4wire.Decode(buf[:n]) + return p, hash, err +} + +func (te *testenv) localEndpoint(c net.PacketConn) v4wire.Endpoint { + addr := c.LocalAddr().(*net.UDPAddr) + return v4wire.Endpoint{ + IP: addr.IP.To4(), + UDP: uint16(addr.Port), + TCP: 0, + } +} + +func (te *testenv) remoteEndpoint() v4wire.Endpoint { + return v4wire.NewEndpoint(te.remoteAddr, 0) +} + +func contains(ns []v4wire.Node, key v4wire.Pubkey) bool { + for _, n := range ns { + if n.ID == key { + return true + } + } + return false +} diff --git a/cmd/devp2p/internal/v5test/discv5tests.go b/cmd/devp2p/internal/v5test/discv5tests.go new file mode 100644 index 000000000000..7866498f7376 --- /dev/null +++ b/cmd/devp2p/internal/v5test/discv5tests.go @@ -0,0 +1,377 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package v5test + +import ( + "bytes" + "net" + "sync" + "time" + + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/p2p/discover/v5wire" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/netutil" +) + +// Suite is the discv5 test suite. +type Suite struct { + Dest *enode.Node + Listen1, Listen2 string // listening addresses +} + +func (s *Suite) listen1(log logger) (*conn, net.PacketConn) { + c := newConn(s.Dest, log) + l := c.listen(s.Listen1) + return c, l +} + +func (s *Suite) listen2(log logger) (*conn, net.PacketConn, net.PacketConn) { + c := newConn(s.Dest, log) + l1, l2 := c.listen(s.Listen1), c.listen(s.Listen2) + return c, l1, l2 +} + +func (s *Suite) AllTests() []utesting.Test { + return []utesting.Test{ + {Name: "Ping", Fn: s.TestPing}, + {Name: "PingLargeRequestID", Fn: s.TestPingLargeRequestID}, + {Name: "PingMultiIP", Fn: s.TestPingMultiIP}, + {Name: "PingHandshakeInterrupted", Fn: s.TestPingHandshakeInterrupted}, + {Name: "TalkRequest", Fn: s.TestTalkRequest}, + {Name: "FindnodeZeroDistance", Fn: s.TestFindnodeZeroDistance}, + {Name: "FindnodeResults", Fn: s.TestFindnodeResults}, + } +} + +// This test sends PING and expects a PONG response. +func (s *Suite) TestPing(t *utesting.T) { + conn, l1 := s.listen1(t) + defer conn.close() + + ping := &v5wire.Ping{ReqID: conn.nextReqID()} + switch resp := conn.reqresp(l1, ping).(type) { + case *v5wire.Pong: + checkPong(t, resp, ping, l1) + default: + t.Fatal("expected PONG, got", resp.Name()) + } +} + +func checkPong(t *utesting.T, pong *v5wire.Pong, ping *v5wire.Ping, c net.PacketConn) { + if !bytes.Equal(pong.ReqID, ping.ReqID) { + t.Fatalf("wrong request ID %x in PONG, want %x", pong.ReqID, ping.ReqID) + } + if !pong.ToIP.Equal(laddr(c).IP) { + t.Fatalf("wrong destination IP %v in PONG, want %v", pong.ToIP, laddr(c).IP) + } + if int(pong.ToPort) != laddr(c).Port { + t.Fatalf("wrong destination port %v in PONG, want %v", pong.ToPort, laddr(c).Port) + } +} + +// This test sends PING with a 9-byte request ID, which isn't allowed by the spec. +// The remote node should not respond. +func (s *Suite) TestPingLargeRequestID(t *utesting.T) { + conn, l1 := s.listen1(t) + defer conn.close() + + ping := &v5wire.Ping{ReqID: make([]byte, 9)} + switch resp := conn.reqresp(l1, ping).(type) { + case *v5wire.Pong: + t.Errorf("PONG response with unknown request ID %x", resp.ReqID) + case *readError: + if resp.err == v5wire.ErrInvalidReqID { + t.Error("response with oversized request ID") + } else if !netutil.IsTimeout(resp.err) { + t.Error(resp) + } + } +} + +// In this test, a session is established from one IP as usual. The session is then reused +// on another IP, which shouldn't work. The remote node should respond with WHOAREYOU for +// the attempt from a different IP. +func (s *Suite) TestPingMultiIP(t *utesting.T) { + conn, l1, l2 := s.listen2(t) + defer conn.close() + + // Create the session on l1. + ping := &v5wire.Ping{ReqID: conn.nextReqID()} + resp := conn.reqresp(l1, ping) + if resp.Kind() != v5wire.PongMsg { + t.Fatal("expected PONG, got", resp) + } + checkPong(t, resp.(*v5wire.Pong), ping, l1) + + // Send on l2. This reuses the session because there is only one codec. + ping2 := &v5wire.Ping{ReqID: conn.nextReqID()} + conn.write(l2, ping2, nil) + switch resp := conn.read(l2).(type) { + case *v5wire.Pong: + t.Fatalf("remote responded to PING from %v for session on IP %v", laddr(l2).IP, laddr(l1).IP) + case *v5wire.Whoareyou: + t.Logf("got WHOAREYOU for new session as expected") + resp.Node = s.Dest + conn.write(l2, ping2, resp) + default: + t.Fatal("expected WHOAREYOU, got", resp) + } + + // Catch the PONG on l2. + switch resp := conn.read(l2).(type) { + case *v5wire.Pong: + checkPong(t, resp, ping2, l2) + default: + t.Fatal("expected PONG, got", resp) + } + + // Try on l1 again. + ping3 := &v5wire.Ping{ReqID: conn.nextReqID()} + conn.write(l1, ping3, nil) + switch resp := conn.read(l1).(type) { + case *v5wire.Pong: + t.Fatalf("remote responded to PING from %v for session on IP %v", laddr(l1).IP, laddr(l2).IP) + case *v5wire.Whoareyou: + t.Logf("got WHOAREYOU for new session as expected") + default: + t.Fatal("expected WHOAREYOU, got", resp) + } +} + +// This test starts a handshake, but doesn't finish it and sends a second ordinary message +// packet instead of a handshake message packet. The remote node should respond with +// another WHOAREYOU challenge for the second packet. +func (s *Suite) TestPingHandshakeInterrupted(t *utesting.T) { + conn, l1 := s.listen1(t) + defer conn.close() + + // First PING triggers challenge. + ping := &v5wire.Ping{ReqID: conn.nextReqID()} + conn.write(l1, ping, nil) + switch resp := conn.read(l1).(type) { + case *v5wire.Whoareyou: + t.Logf("got WHOAREYOU for PING") + default: + t.Fatal("expected WHOAREYOU, got", resp) + } + + // Send second PING. + ping2 := &v5wire.Ping{ReqID: conn.nextReqID()} + switch resp := conn.reqresp(l1, ping2).(type) { + case *v5wire.Pong: + checkPong(t, resp, ping2, l1) + default: + t.Fatal("expected WHOAREYOU, got", resp) + } +} + +// This test sends TALKREQ and expects an empty TALKRESP response. +func (s *Suite) TestTalkRequest(t *utesting.T) { + conn, l1 := s.listen1(t) + defer conn.close() + + // Non-empty request ID. + id := conn.nextReqID() + resp := conn.reqresp(l1, &v5wire.TalkRequest{ReqID: id, Protocol: "test-protocol"}) + switch resp := resp.(type) { + case *v5wire.TalkResponse: + if !bytes.Equal(resp.ReqID, id) { + t.Fatalf("wrong request ID %x in TALKRESP, want %x", resp.ReqID, id) + } + if len(resp.Message) > 0 { + t.Fatalf("non-empty message %x in TALKRESP", resp.Message) + } + default: + t.Fatal("expected TALKRESP, got", resp.Name()) + } + + // Empty request ID. + resp = conn.reqresp(l1, &v5wire.TalkRequest{Protocol: "test-protocol"}) + switch resp := resp.(type) { + case *v5wire.TalkResponse: + if len(resp.ReqID) > 0 { + t.Fatalf("wrong request ID %x in TALKRESP, want empty byte array", resp.ReqID) + } + if len(resp.Message) > 0 { + t.Fatalf("non-empty message %x in TALKRESP", resp.Message) + } + default: + t.Fatal("expected TALKRESP, got", resp.Name()) + } +} + +// This test checks that the remote node returns itself for FINDNODE with distance zero. +func (s *Suite) TestFindnodeZeroDistance(t *utesting.T) { + conn, l1 := s.listen1(t) + defer conn.close() + + nodes, err := conn.findnode(l1, []uint{0}) + if err != nil { + t.Fatal(err) + } + if len(nodes) != 1 { + t.Fatalf("remote returned more than one node for FINDNODE [0]") + } + if nodes[0].ID() != conn.remote.ID() { + t.Errorf("ID of response node is %v, want %v", nodes[0].ID(), conn.remote.ID()) + } +} + +// In this test, multiple nodes ping the node under test. After waiting for them to be +// accepted into the remote table, the test checks that they are returned by FINDNODE. +func (s *Suite) TestFindnodeResults(t *utesting.T) { + // Create bystanders. + nodes := make([]*bystander, 5) + added := make(chan enode.ID, len(nodes)) + for i := range nodes { + nodes[i] = newBystander(t, s, added) + defer nodes[i].close() + } + + // Get them added to the remote table. + timeout := 60 * time.Second + timeoutCh := time.After(timeout) + for count := 0; count < len(nodes); { + select { + case id := <-added: + t.Logf("bystander node %v added to remote table", id) + count++ + case <-timeoutCh: + t.Errorf("remote added %d bystander nodes in %v, need %d to continue", count, timeout, len(nodes)) + t.Logf("this can happen if the node has a non-empty table from previous runs") + return + } + } + t.Logf("all %d bystander nodes were added", len(nodes)) + + // Collect our nodes by distance. + var dists []uint + expect := make(map[enode.ID]*enode.Node) + for _, bn := range nodes { + n := bn.conn.localNode.Node() + expect[n.ID()] = n + d := uint(enode.LogDist(n.ID(), s.Dest.ID())) + if !containsUint(dists, d) { + dists = append(dists, d) + } + } + + // Send FINDNODE for all distances. + conn, l1 := s.listen1(t) + defer conn.close() + foundNodes, err := conn.findnode(l1, dists) + if err != nil { + t.Fatal(err) + } + t.Logf("remote returned %d nodes for distance list %v", len(foundNodes), dists) + for _, n := range foundNodes { + delete(expect, n.ID()) + } + if len(expect) > 0 { + t.Errorf("missing %d nodes in FINDNODE result", len(expect)) + t.Logf("this can happen if the test is run multiple times in quick succession") + t.Logf("and the remote node hasn't removed dead nodes from previous runs yet") + } else { + t.Logf("all %d expected nodes were returned", len(nodes)) + } +} + +// A bystander is a node whose only purpose is filling a spot in the remote table. +type bystander struct { + dest *enode.Node + conn *conn + l net.PacketConn + + addedCh chan enode.ID + done sync.WaitGroup +} + +func newBystander(t *utesting.T, s *Suite, added chan enode.ID) *bystander { + conn, l := s.listen1(t) + conn.setEndpoint(l) // bystander nodes need IP/port to get pinged + bn := &bystander{ + conn: conn, + l: l, + dest: s.Dest, + addedCh: added, + } + bn.done.Add(1) + go bn.loop() + return bn +} + +// id returns the node ID of the bystander. +func (bn *bystander) id() enode.ID { + return bn.conn.localNode.ID() +} + +// close shuts down loop. +func (bn *bystander) close() { + bn.conn.close() + bn.done.Wait() +} + +// loop answers packets from the remote node until quit. +func (bn *bystander) loop() { + defer bn.done.Done() + + var ( + lastPing time.Time + wasAdded bool + ) + for { + // Ping the remote node. + if !wasAdded && time.Since(lastPing) > 10*time.Second { + bn.conn.reqresp(bn.l, &v5wire.Ping{ + ReqID: bn.conn.nextReqID(), + ENRSeq: bn.dest.Seq(), + }) + lastPing = time.Now() + } + // Answer packets. + switch p := bn.conn.read(bn.l).(type) { + case *v5wire.Ping: + bn.conn.write(bn.l, &v5wire.Pong{ + ReqID: p.ReqID, + ENRSeq: bn.conn.localNode.Seq(), + ToIP: bn.dest.IP(), + ToPort: uint16(bn.dest.UDP()), + }, nil) + wasAdded = true + bn.notifyAdded() + case *v5wire.Findnode: + bn.conn.write(bn.l, &v5wire.Nodes{ReqID: p.ReqID, Total: 1}, nil) + wasAdded = true + bn.notifyAdded() + case *v5wire.TalkRequest: + bn.conn.write(bn.l, &v5wire.TalkResponse{ReqID: p.ReqID}, nil) + case *readError: + if !netutil.IsTemporaryError(p.err) { + bn.conn.logf("shutting down: %v", p.err) + return + } + } + } +} + +func (bn *bystander) notifyAdded() { + if bn.addedCh != nil { + bn.addedCh <- bn.id() + bn.addedCh = nil + } +} diff --git a/cmd/devp2p/internal/v5test/framework.go b/cmd/devp2p/internal/v5test/framework.go new file mode 100644 index 000000000000..9eac37520f7b --- /dev/null +++ b/cmd/devp2p/internal/v5test/framework.go @@ -0,0 +1,263 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package v5test + +import ( + "bytes" + "crypto/ecdsa" + "encoding/binary" + "fmt" + "net" + "time" + + "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/p2p/discover/v5wire" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" +) + +// readError represents an error during packet reading. +// This exists to facilitate type-switching on the result of conn.read. +type readError struct { + err error +} + +func (p *readError) Kind() byte { return 99 } +func (p *readError) Name() string { return fmt.Sprintf("error: %v", p.err) } +func (p *readError) Error() string { return p.err.Error() } +func (p *readError) Unwrap() error { return p.err } +func (p *readError) RequestID() []byte { return nil } +func (p *readError) SetRequestID([]byte) {} + +// readErrorf creates a readError with the given text. +func readErrorf(format string, args ...interface{}) *readError { + return &readError{fmt.Errorf(format, args...)} +} + +// This is the response timeout used in tests. +const waitTime = 300 * time.Millisecond + +// conn is a connection to the node under test. +type conn struct { + localNode *enode.LocalNode + localKey *ecdsa.PrivateKey + remote *enode.Node + remoteAddr *net.UDPAddr + listeners []net.PacketConn + + log logger + codec *v5wire.Codec + lastRequest v5wire.Packet + lastChallenge *v5wire.Whoareyou + idCounter uint32 +} + +type logger interface { + Logf(string, ...interface{}) +} + +// newConn sets up a connection to the given node. +func newConn(dest *enode.Node, log logger) *conn { + key, err := crypto.GenerateKey() + if err != nil { + panic(err) + } + db, err := enode.OpenDB("") + if err != nil { + panic(err) + } + ln := enode.NewLocalNode(db, key) + + return &conn{ + localKey: key, + localNode: ln, + remote: dest, + remoteAddr: &net.UDPAddr{IP: dest.IP(), Port: dest.UDP()}, + codec: v5wire.NewCodec(ln, key, mclock.System{}), + log: log, + } +} + +func (tc *conn) setEndpoint(c net.PacketConn) { + tc.localNode.SetStaticIP(laddr(c).IP) + tc.localNode.SetFallbackUDP(laddr(c).Port) +} + +func (tc *conn) listen(ip string) net.PacketConn { + l, err := net.ListenPacket("udp", fmt.Sprintf("%v:0", ip)) + if err != nil { + panic(err) + } + tc.listeners = append(tc.listeners, l) + return l +} + +// close shuts down all listeners and the local node. +func (tc *conn) close() { + for _, l := range tc.listeners { + l.Close() + } + tc.localNode.Database().Close() +} + +// nextReqID creates a request id. +func (tc *conn) nextReqID() []byte { + id := make([]byte, 4) + tc.idCounter++ + binary.BigEndian.PutUint32(id, tc.idCounter) + return id +} + +// reqresp performs a request/response interaction on the given connection. +// The request is retried if a handshake is requested. +func (tc *conn) reqresp(c net.PacketConn, req v5wire.Packet) v5wire.Packet { + reqnonce := tc.write(c, req, nil) + switch resp := tc.read(c).(type) { + case *v5wire.Whoareyou: + if resp.Nonce != reqnonce { + return readErrorf("wrong nonce %x in WHOAREYOU (want %x)", resp.Nonce[:], reqnonce[:]) + } + resp.Node = tc.remote + tc.write(c, req, resp) + return tc.read(c) + default: + return resp + } +} + +// findnode sends a FINDNODE request and waits for its responses. +func (tc *conn) findnode(c net.PacketConn, dists []uint) ([]*enode.Node, error) { + var ( + findnode = &v5wire.Findnode{ReqID: tc.nextReqID(), Distances: dists} + reqnonce = tc.write(c, findnode, nil) + first = true + total uint8 + results []*enode.Node + ) + for n := 1; n > 0; { + switch resp := tc.read(c).(type) { + case *v5wire.Whoareyou: + // Handle handshake. + if resp.Nonce == reqnonce { + resp.Node = tc.remote + tc.write(c, findnode, resp) + } else { + return nil, fmt.Errorf("unexpected WHOAREYOU (nonce %x), waiting for NODES", resp.Nonce[:]) + } + case *v5wire.Ping: + // Handle ping from remote. + tc.write(c, &v5wire.Pong{ + ReqID: resp.ReqID, + ENRSeq: tc.localNode.Seq(), + }, nil) + case *v5wire.Nodes: + // Got NODES! Check request ID. + if !bytes.Equal(resp.ReqID, findnode.ReqID) { + return nil, fmt.Errorf("NODES response has wrong request id %x", resp.ReqID) + } + // Check total count. It should be greater than one + // and needs to be the same across all responses. + if first { + if resp.Total == 0 || resp.Total > 6 { + return nil, fmt.Errorf("invalid NODES response 'total' %d (not in (0,7))", resp.Total) + } + total = resp.Total + n = int(total) - 1 + first = false + } else { + n-- + if resp.Total != total { + return nil, fmt.Errorf("invalid NODES response 'total' %d (!= %d)", resp.Total, total) + } + } + // Check nodes. + nodes, err := checkRecords(resp.Nodes) + if err != nil { + return nil, fmt.Errorf("invalid node in NODES response: %v", err) + } + results = append(results, nodes...) + default: + return nil, fmt.Errorf("expected NODES, got %v", resp) + } + } + return results, nil +} + +// write sends a packet on the given connection. +func (tc *conn) write(c net.PacketConn, p v5wire.Packet, challenge *v5wire.Whoareyou) v5wire.Nonce { + packet, nonce, err := tc.codec.Encode(tc.remote.ID(), tc.remoteAddr.String(), p, challenge) + if err != nil { + panic(fmt.Errorf("can't encode %v packet: %v", p.Name(), err)) + } + if _, err := c.WriteTo(packet, tc.remoteAddr); err != nil { + tc.logf("Can't send %s: %v", p.Name(), err) + } else { + tc.logf(">> %s", p.Name()) + } + return nonce +} + +// read waits for an incoming packet on the given connection. +func (tc *conn) read(c net.PacketConn) v5wire.Packet { + buf := make([]byte, 1280) + if err := c.SetReadDeadline(time.Now().Add(waitTime)); err != nil { + return &readError{err} + } + n, fromAddr, err := c.ReadFrom(buf) + if err != nil { + return &readError{err} + } + _, _, p, err := tc.codec.Decode(buf[:n], fromAddr.String()) + if err != nil { + return &readError{err} + } + tc.logf("<< %s", p.Name()) + return p +} + +// logf prints to the test log. +func (tc *conn) logf(format string, args ...interface{}) { + if tc.log != nil { + tc.log.Logf("(%s) %s", tc.localNode.ID().TerminalString(), fmt.Sprintf(format, args...)) + } +} + +func laddr(c net.PacketConn) *net.UDPAddr { + return c.LocalAddr().(*net.UDPAddr) +} + +func checkRecords(records []*enr.Record) ([]*enode.Node, error) { + nodes := make([]*enode.Node, len(records)) + for i := range records { + n, err := enode.New(enode.ValidSchemes, records[i]) + if err != nil { + return nil, err + } + nodes[i] = n + } + return nodes, nil +} + +func containsUint(ints []uint, x uint) bool { + for i := range ints { + if ints[i] == x { + return true + } + } + return false +} diff --git a/cmd/devp2p/keycmd.go b/cmd/devp2p/keycmd.go new file mode 100644 index 000000000000..869b8c2a44f0 --- /dev/null +++ b/cmd/devp2p/keycmd.go @@ -0,0 +1,105 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "fmt" + "net" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/p2p/enode" + "gopkg.in/urfave/cli.v1" +) + +var ( + keyCommand = cli.Command{ + Name: "key", + Usage: "Operations on node keys", + Subcommands: []cli.Command{ + keyGenerateCommand, + keyToNodeCommand, + }, + } + keyGenerateCommand = cli.Command{ + Name: "generate", + Usage: "Generates node key files", + ArgsUsage: "keyfile", + Action: genkey, + } + keyToNodeCommand = cli.Command{ + Name: "to-enode", + Usage: "Creates an enode URL from a node key file", + ArgsUsage: "keyfile", + Action: keyToURL, + Flags: []cli.Flag{hostFlag, tcpPortFlag, udpPortFlag}, + } +) + +var ( + hostFlag = cli.StringFlag{ + Name: "ip", + Usage: "IP address of the node", + Value: "127.0.0.1", + } + tcpPortFlag = cli.IntFlag{ + Name: "tcp", + Usage: "TCP port of the node", + Value: 30303, + } + udpPortFlag = cli.IntFlag{ + Name: "udp", + Usage: "UDP port of the node", + Value: 30303, + } +) + +func genkey(ctx *cli.Context) error { + if ctx.NArg() != 1 { + return fmt.Errorf("need key file as argument") + } + file := ctx.Args().Get(0) + + key, err := crypto.GenerateKey() + if err != nil { + return fmt.Errorf("could not generate key: %v", err) + } + return crypto.SaveECDSA(file, key) +} + +func keyToURL(ctx *cli.Context) error { + if ctx.NArg() != 1 { + return fmt.Errorf("need key file as argument") + } + + var ( + file = ctx.Args().Get(0) + host = ctx.String(hostFlag.Name) + tcp = ctx.Int(tcpPortFlag.Name) + udp = ctx.Int(udpPortFlag.Name) + ) + key, err := crypto.LoadECDSA(file) + if err != nil { + return err + } + ip := net.ParseIP(host) + if ip == nil { + return fmt.Errorf("invalid IP address %q", host) + } + node := enode.NewV4(&key.PublicKey, ip, tcp, udp) + fmt.Println(node.URLv4()) + return nil +} diff --git a/cmd/devp2p/main.go b/cmd/devp2p/main.go index 6faa65093737..4a4e905a424e 100644 --- a/cmd/devp2p/main.go +++ b/cmd/devp2p/main.go @@ -45,7 +45,7 @@ func init() { // Set up the CLI app. app.Flags = append(app.Flags, debug.Flags...) app.Before = func(ctx *cli.Context) error { - return debug.Setup(ctx, "") + return debug.Setup(ctx) } app.After = func(ctx *cli.Context) error { debug.Exit() @@ -58,9 +58,12 @@ func init() { // Add subcommands. app.Commands = []cli.Command{ enrdumpCommand, + keyCommand, discv4Command, + discv5Command, dnsCommand, nodesetCommand, + rlpxCommand, } } @@ -78,7 +81,7 @@ func commandHasFlag(ctx *cli.Context, flag cli.Flag) bool { // getNodeArg handles the common case of a single node descriptor argument. func getNodeArg(ctx *cli.Context) *enode.Node { - if ctx.NArg() != 1 { + if ctx.NArg() < 1 { exit("missing node as command-line argument") } n, err := parseNode(ctx.Args()[0]) diff --git a/cmd/devp2p/nodeset.go b/cmd/devp2p/nodeset.go index 2d86c3f65aba..1d78e34c7361 100644 --- a/cmd/devp2p/nodeset.go +++ b/cmd/devp2p/nodeset.go @@ -71,6 +71,7 @@ func writeNodesJSON(file string, nodes nodeSet) { } } +// nodes returns the node records contained in the set. func (ns nodeSet) nodes() []*enode.Node { result := make([]*enode.Node, 0, len(ns)) for _, n := range ns { @@ -83,12 +84,37 @@ func (ns nodeSet) nodes() []*enode.Node { return result } +// add ensures the given nodes are present in the set. func (ns nodeSet) add(nodes ...*enode.Node) { for _, n := range nodes { - ns[n.ID()] = nodeJSON{Seq: n.Seq(), N: n} + v := ns[n.ID()] + v.N = n + v.Seq = n.Seq() + ns[n.ID()] = v } } +// topN returns the top n nodes by score as a new set. +func (ns nodeSet) topN(n int) nodeSet { + if n >= len(ns) { + return ns + } + + byscore := make([]nodeJSON, 0, len(ns)) + for _, v := range ns { + byscore = append(byscore, v) + } + sort.Slice(byscore, func(i, j int) bool { + return byscore[i].Score >= byscore[j].Score + }) + result := make(nodeSet, n) + for _, v := range byscore[:n] { + result[v.N.ID()] = v + } + return result +} + +// verify performs integrity checks on the node set. func (ns nodeSet) verify() error { for id, n := range ns { if n.N.ID() != id { diff --git a/cmd/devp2p/nodesetcmd.go b/cmd/devp2p/nodesetcmd.go index de8e6d45ee6c..d65d6314c8e1 100644 --- a/cmd/devp2p/nodesetcmd.go +++ b/cmd/devp2p/nodesetcmd.go @@ -17,8 +17,12 @@ package main import ( + "errors" "fmt" "net" + "sort" + "strconv" + "strings" "time" "github.com/ethereum/go-ethereum/core/forkid" @@ -60,25 +64,64 @@ func nodesetInfo(ctx *cli.Context) error { ns := loadNodesJSON(ctx.Args().First()) fmt.Printf("Set contains %d nodes.\n", len(ns)) + showAttributeCounts(ns) return nil } +// showAttributeCounts prints the distribution of ENR attributes in a node set. +func showAttributeCounts(ns nodeSet) { + attrcount := make(map[string]int) + var attrlist []interface{} + for _, n := range ns { + r := n.N.Record() + attrlist = r.AppendElements(attrlist[:0])[1:] + for i := 0; i < len(attrlist); i += 2 { + key := attrlist[i].(string) + attrcount[key]++ + } + } + + var keys []string + var maxlength int + for key := range attrcount { + keys = append(keys, key) + if len(key) > maxlength { + maxlength = len(key) + } + } + sort.Strings(keys) + fmt.Println("ENR attribute counts:") + for _, key := range keys { + fmt.Printf("%s%s: %d\n", strings.Repeat(" ", maxlength-len(key)+1), key, attrcount[key]) + } +} + func nodesetFilter(ctx *cli.Context) error { if ctx.NArg() < 1 { return fmt.Errorf("need nodes file as argument") } - ns := loadNodesJSON(ctx.Args().First()) + // Parse -limit. + limit, err := parseFilterLimit(ctx.Args().Tail()) + if err != nil { + return err + } + // Parse the filters. filter, err := andFilter(ctx.Args().Tail()) if err != nil { return err } + // Load nodes and apply filters. + ns := loadNodesJSON(ctx.Args().First()) result := make(nodeSet) for id, n := range ns { if filter(n) { result[id] = n } } + if limit >= 0 { + result = result.topN(limit) + } writeNodesJSON("-", result) return nil } @@ -91,12 +134,15 @@ type nodeFilterC struct { } var filterFlags = map[string]nodeFilterC{ + "-limit": {1, trueFilter}, // needed to skip over -limit "-ip": {1, ipFilter}, "-min-age": {1, minAgeFilter}, "-eth-network": {1, ethFilter}, "-les-server": {0, lesFilter}, + "-snap": {0, snapFilter}, } +// parseFilters parses nodeFilters from args. func parseFilters(args []string) ([]nodeFilter, error) { var filters []nodeFilter for len(args) > 0 { @@ -104,19 +150,39 @@ func parseFilters(args []string) ([]nodeFilter, error) { if !ok { return nil, fmt.Errorf("invalid filter %q", args[0]) } - if len(args) < fc.narg { - return nil, fmt.Errorf("filter %q wants %d arguments, have %d", args[0], fc.narg, len(args)) + if len(args)-1 < fc.narg { + return nil, fmt.Errorf("filter %q wants %d arguments, have %d", args[0], fc.narg, len(args)-1) } - filter, err := fc.fn(args[1:]) + filter, err := fc.fn(args[1 : 1+fc.narg]) if err != nil { return nil, fmt.Errorf("%s: %v", args[0], err) } filters = append(filters, filter) - args = args[fc.narg+1:] + args = args[1+fc.narg:] } return filters, nil } +// parseFilterLimit parses the -limit option in args. It returns -1 if there is no limit. +func parseFilterLimit(args []string) (int, error) { + limit := -1 + for i, arg := range args { + if arg == "-limit" { + if i == len(args)-1 { + return -1, errors.New("-limit requires an argument") + } + n, err := strconv.Atoi(args[i+1]) + if err != nil { + return -1, fmt.Errorf("invalid -limit %q", args[i+1]) + } + limit = n + } + } + return limit, nil +} + +// andFilter parses node filters in args and and returns a single filter that requires all +// of them to match. func andFilter(args []string) (nodeFilter, error) { checks, err := parseFilters(args) if err != nil { @@ -133,6 +199,10 @@ func andFilter(args []string) (nodeFilter, error) { return f, nil } +func trueFilter(args []string) (nodeFilter, error) { + return func(n nodeJSON) bool { return true }, nil +} + func ipFilter(args []string) (nodeFilter, error) { _, cidr, err := net.ParseCIDR(args[0]) if err != nil { @@ -164,7 +234,9 @@ func ethFilter(args []string) (nodeFilter, error) { case "goerli": filter = forkid.NewStaticFilter(params.GoerliChainConfig, params.GoerliGenesisHash) case "ropsten": - filter = forkid.NewStaticFilter(params.TestnetChainConfig, params.TestnetGenesisHash) + filter = forkid.NewStaticFilter(params.RopstenChainConfig, params.RopstenGenesisHash) + case "sepolia": + filter = forkid.NewStaticFilter(params.SepoliaChainConfig, params.SepoliaGenesisHash) default: return nil, fmt.Errorf("unknown network %q", args[0]) } @@ -172,7 +244,7 @@ func ethFilter(args []string) (nodeFilter, error) { f := func(n nodeJSON) bool { var eth struct { ForkID forkid.ID - _ []rlp.RawValue `rlp:"tail"` + Tail []rlp.RawValue `rlp:"tail"` } if n.N.Load(enr.WithEntry("eth", ð)) != nil { return false @@ -185,9 +257,19 @@ func ethFilter(args []string) (nodeFilter, error) { func lesFilter(args []string) (nodeFilter, error) { f := func(n nodeJSON) bool { var les struct { - _ []rlp.RawValue `rlp:"tail"` + Tail []rlp.RawValue `rlp:"tail"` } return n.N.Load(enr.WithEntry("les", &les)) == nil } return f, nil } + +func snapFilter(args []string) (nodeFilter, error) { + f := func(n nodeJSON) bool { + var snap struct { + Tail []rlp.RawValue `rlp:"tail"` + } + return n.N.Load(enr.WithEntry("snap", &snap)) == nil + } + return f, nil +} diff --git a/cmd/devp2p/rlpxcmd.go b/cmd/devp2p/rlpxcmd.go new file mode 100644 index 000000000000..6557a239da77 --- /dev/null +++ b/cmd/devp2p/rlpxcmd.go @@ -0,0 +1,131 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "fmt" + "net" + + "github.com/ethereum/go-ethereum/cmd/devp2p/internal/ethtest" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/rlpx" + "github.com/ethereum/go-ethereum/rlp" + "gopkg.in/urfave/cli.v1" +) + +var ( + rlpxCommand = cli.Command{ + Name: "rlpx", + Usage: "RLPx Commands", + Subcommands: []cli.Command{ + rlpxPingCommand, + rlpxEthTestCommand, + rlpxSnapTestCommand, + }, + } + rlpxPingCommand = cli.Command{ + Name: "ping", + Usage: "ping ", + Action: rlpxPing, + } + rlpxEthTestCommand = cli.Command{ + Name: "eth-test", + Usage: "Runs tests against a node", + ArgsUsage: " ", + Action: rlpxEthTest, + Flags: []cli.Flag{ + testPatternFlag, + testTAPFlag, + }, + } + rlpxSnapTestCommand = cli.Command{ + Name: "snap-test", + Usage: "Runs tests against a node", + ArgsUsage: " ", + Action: rlpxSnapTest, + Flags: []cli.Flag{ + testPatternFlag, + testTAPFlag, + }, + } +) + +func rlpxPing(ctx *cli.Context) error { + n := getNodeArg(ctx) + fd, err := net.Dial("tcp", fmt.Sprintf("%v:%d", n.IP(), n.TCP())) + if err != nil { + return err + } + conn := rlpx.NewConn(fd, n.Pubkey()) + ourKey, _ := crypto.GenerateKey() + _, err = conn.Handshake(ourKey) + if err != nil { + return err + } + code, data, _, err := conn.Read() + if err != nil { + return err + } + switch code { + case 0: + var h ethtest.Hello + if err := rlp.DecodeBytes(data, &h); err != nil { + return fmt.Errorf("invalid handshake: %v", err) + } + fmt.Printf("%+v\n", h) + case 1: + var msg []p2p.DiscReason + if rlp.DecodeBytes(data, &msg); len(msg) == 0 { + return fmt.Errorf("invalid disconnect message") + } + return fmt.Errorf("received disconnect message: %v", msg[0]) + default: + return fmt.Errorf("invalid message code %d, expected handshake (code zero)", code) + } + return nil +} + +// rlpxEthTest runs the eth protocol test suite. +func rlpxEthTest(ctx *cli.Context) error { + if ctx.NArg() < 3 { + exit("missing path to chain.rlp as command-line argument") + } + suite, err := ethtest.NewSuite(getNodeArg(ctx), ctx.Args()[1], ctx.Args()[2]) + if err != nil { + exit(err) + } + // check if given node supports eth66, and if so, run eth66 protocol tests as well + is66Failed, _ := utesting.Run(utesting.Test{Name: "Is_66", Fn: suite.Is_66}) + if is66Failed { + return runTests(ctx, suite.EthTests()) + } + return runTests(ctx, suite.AllEthTests()) +} + +// rlpxSnapTest runs the snap protocol test suite. +func rlpxSnapTest(ctx *cli.Context) error { + if ctx.NArg() < 3 { + exit("missing path to chain.rlp as command-line argument") + } + suite, err := ethtest.NewSuite(getNodeArg(ctx), ctx.Args()[1], ctx.Args()[2]) + if err != nil { + exit(err) + } + return runTests(ctx, suite.SnapTests()) +} diff --git a/cmd/devp2p/runtest.go b/cmd/devp2p/runtest.go new file mode 100644 index 000000000000..4168f8555bfb --- /dev/null +++ b/cmd/devp2p/runtest.go @@ -0,0 +1,69 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "os" + + "github.com/ethereum/go-ethereum/cmd/devp2p/internal/v4test" + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/log" + "gopkg.in/urfave/cli.v1" +) + +var ( + testPatternFlag = cli.StringFlag{ + Name: "run", + Usage: "Pattern of test suite(s) to run", + } + testTAPFlag = cli.BoolFlag{ + Name: "tap", + Usage: "Output TAP", + } + // These two are specific to the discovery tests. + testListen1Flag = cli.StringFlag{ + Name: "listen1", + Usage: "IP address of the first tester", + Value: v4test.Listen1, + } + testListen2Flag = cli.StringFlag{ + Name: "listen2", + Usage: "IP address of the second tester", + Value: v4test.Listen2, + } +) + +func runTests(ctx *cli.Context, tests []utesting.Test) error { + // Filter test cases. + if ctx.IsSet(testPatternFlag.Name) { + tests = utesting.MatchTests(tests, ctx.String(testPatternFlag.Name)) + } + // Disable logging unless explicitly enabled. + if !ctx.GlobalIsSet("verbosity") && !ctx.GlobalIsSet("vmodule") { + log.Root().SetHandler(log.DiscardHandler()) + } + // Run the tests. + var run = utesting.RunTests + if ctx.Bool(testTAPFlag.Name) { + run = utesting.RunTAP + } + results := run(tests, os.Stdout) + if utesting.CountFailures(results) > 0 { + os.Exit(1) + } + return nil +} diff --git a/cmd/ethkey/changepassword.go b/cmd/ethkey/changepassword.go index 32fde4ed6daf..b9402c2f96da 100644 --- a/cmd/ethkey/changepassword.go +++ b/cmd/ethkey/changepassword.go @@ -51,7 +51,7 @@ Change the password of a keyfile.`, } // Decrypt key with passphrase. - passphrase := getPassphrase(ctx) + passphrase := getPassphrase(ctx, false) key, err := keystore.DecryptKey(keyjson, passphrase) if err != nil { utils.Fatalf("Error decrypting key: %v", err) @@ -67,7 +67,7 @@ Change the password of a keyfile.`, } newPhrase = strings.TrimRight(string(content), "\r\n") } else { - newPhrase = promptPassphrase(true) + newPhrase = utils.GetPassPhrase("", true) } // Encrypt the key with the new passphrase. diff --git a/cmd/ethkey/generate.go b/cmd/ethkey/generate.go index fe9a0c15192e..629d23da5ba5 100644 --- a/cmd/ethkey/generate.go +++ b/cmd/ethkey/generate.go @@ -26,7 +26,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/crypto" - "github.com/pborman/uuid" + "github.com/google/uuid" "gopkg.in/urfave/cli.v1" ) @@ -52,6 +52,10 @@ If you want to encrypt an existing private key, it can be specified by setting Name: "privatekey", Usage: "file containing a raw private key to encrypt", }, + cli.BoolFlag{ + Name: "lightkdf", + Usage: "use less secure scrypt parameters", + }, }, Action: func(ctx *cli.Context) error { // Check if keyfile path given and make sure it doesn't already exist. @@ -82,16 +86,23 @@ If you want to encrypt an existing private key, it can be specified by setting } // Create the keyfile object with a random UUID. - id := uuid.NewRandom() + UUID, err := uuid.NewRandom() + if err != nil { + utils.Fatalf("Failed to generate random uuid: %v", err) + } key := &keystore.Key{ - Id: id, + Id: UUID, Address: crypto.PubkeyToAddress(privateKey.PublicKey), PrivateKey: privateKey, } // Encrypt key with passphrase. - passphrase := promptPassphrase(true) - keyjson, err := keystore.EncryptKey(key, passphrase, keystore.StandardScryptN, keystore.StandardScryptP) + passphrase := getPassphrase(ctx, true) + scryptN, scryptP := keystore.StandardScryptN, keystore.StandardScryptP + if ctx.Bool("lightkdf") { + scryptN, scryptP = keystore.LightScryptN, keystore.LightScryptP + } + keyjson, err := keystore.EncryptKey(key, passphrase, scryptN, scryptP) if err != nil { utils.Fatalf("Error encrypting key: %v", err) } diff --git a/cmd/ethkey/inspect.go b/cmd/ethkey/inspect.go index ba03d4d93692..b646e43aa576 100644 --- a/cmd/ethkey/inspect.go +++ b/cmd/ethkey/inspect.go @@ -60,7 +60,7 @@ make sure to use this feature with great caution!`, } // Decrypt key with passphrase. - passphrase := getPassphrase(ctx) + passphrase := getPassphrase(ctx, false) key, err := keystore.DecryptKey(keyjson, passphrase) if err != nil { utils.Fatalf("Error decrypting key: %v", err) diff --git a/cmd/ethkey/main.go b/cmd/ethkey/main.go index dbc49605888a..6db39174c461 100644 --- a/cmd/ethkey/main.go +++ b/cmd/ethkey/main.go @@ -20,7 +20,7 @@ import ( "fmt" "os" - "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/internal/flags" "gopkg.in/urfave/cli.v1" ) @@ -35,7 +35,7 @@ var gitDate = "" var app *cli.App func init() { - app = utils.NewApp(gitCommit, gitDate, "an Ethereum key manager") + app = flags.NewApp(gitCommit, gitDate, "an Ethereum key manager") app.Commands = []cli.Command{ commandGenerate, commandInspect, @@ -43,7 +43,7 @@ func init() { commandSignMessage, commandVerifyMessage, } - cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate + cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate } // Commonly used command line flags. diff --git a/cmd/ethkey/message.go b/cmd/ethkey/message.go index 5caea69ff653..69c8cf092392 100644 --- a/cmd/ethkey/message.go +++ b/cmd/ethkey/message.go @@ -62,7 +62,7 @@ To sign a message contained in a file, use the --msgfile flag. } // Decrypt key with passphrase. - passphrase := getPassphrase(ctx) + passphrase := getPassphrase(ctx, false) key, err := keystore.DecryptKey(keyjson, passphrase) if err != nil { utils.Fatalf("Error decrypting key: %v", err) diff --git a/cmd/ethkey/message_test.go b/cmd/ethkey/message_test.go index e9e8eeeafb88..9d242ac00244 100644 --- a/cmd/ethkey/message_test.go +++ b/cmd/ethkey/message_test.go @@ -34,7 +34,7 @@ func TestMessageSignVerify(t *testing.T) { message := "test message" // Create the key. - generate := runEthkey(t, "generate", keyfile) + generate := runEthkey(t, "generate", "--lightkdf", keyfile) generate.Expect(` !! Unsupported terminal, password will be echoed. Password: {{.InputLine "foobar"}} diff --git a/cmd/ethkey/utils.go b/cmd/ethkey/utils.go index c6cf5c25a3ba..70baae92f460 100644 --- a/cmd/ethkey/utils.go +++ b/cmd/ethkey/utils.go @@ -23,36 +23,14 @@ import ( "strings" "github.com/ethereum/go-ethereum/cmd/utils" - "github.com/ethereum/go-ethereum/console" "github.com/ethereum/go-ethereum/crypto" "gopkg.in/urfave/cli.v1" ) -// promptPassphrase prompts the user for a passphrase. Set confirmation to true -// to require the user to confirm the passphrase. -func promptPassphrase(confirmation bool) string { - passphrase, err := console.Stdin.PromptPassword("Password: ") - if err != nil { - utils.Fatalf("Failed to read password: %v", err) - } - - if confirmation { - confirm, err := console.Stdin.PromptPassword("Repeat password: ") - if err != nil { - utils.Fatalf("Failed to read password confirmation: %v", err) - } - if passphrase != confirm { - utils.Fatalf("Passwords do not match") - } - } - - return passphrase -} - // getPassphrase obtains a passphrase given by the user. It first checks the // --passfile command line flag and ultimately prompts the user for a // passphrase. -func getPassphrase(ctx *cli.Context) string { +func getPassphrase(ctx *cli.Context, confirmation bool) string { // Look for the --passwordfile flag. passphraseFile := ctx.String(passphraseFlag.Name) if passphraseFile != "" { @@ -65,13 +43,13 @@ func getPassphrase(ctx *cli.Context) string { } // Otherwise prompt the user for the passphrase. - return promptPassphrase(false) + return utils.GetPassPhrase("", confirmation) } // signHash is a helper function that calculates a hash for the given message // that can be safely used to calculate a signature from. // -// The hash is calulcated as +// The hash is calculated as // keccak256("\x19Ethereum Signed Message:\n"${message length}${message}). // // This gives context to the signed message and prevents signing of transactions. diff --git a/cmd/evm/README.md b/cmd/evm/README.md new file mode 100644 index 000000000000..1a029ab7091a --- /dev/null +++ b/cmd/evm/README.md @@ -0,0 +1,338 @@ +## EVM state transition tool + +The `evm t8n` tool is a stateless state transition utility. It is a utility +which can + +1. Take a prestate, including +- Accounts, +- Block context information, +- Previous blockshashes (*optional) +2. Apply a set of transactions, +3. Apply a mining-reward (*optional), +4. And generate a post-state, including +- State root, transaction root, receipt root, +- Information about rejected transactions, +- Optionally: a full or partial post-state dump + +## Specification + +The idea is to specify the behaviour of this binary very _strict_, so that other +node implementors can build replicas based on their own state-machines, and the +state generators can swap between a `geth`-based implementation and a `parityvm`-based +implementation. + +### Command line params + +Command line params that has to be supported are +``` + + --trace Output full trace logs to files .jsonl + --trace.nomemory Disable full memory dump in traces + --trace.nostack Disable stack output in traces + --trace.noreturndata Disable return data output in traces + --output.basedir value Specifies where output files are placed. Will be created if it does not exist. + --output.alloc alloc Determines where to put the alloc of the post-state. + `stdout` - into the stdout output + `stderr` - into the stderr output + --output.result result Determines where to put the result (stateroot, txroot etc) of the post-state. + `stdout` - into the stdout output + `stderr` - into the stderr output + --output.body value If set, the RLP of the transactions (block body) will be written to this file. + --input.txs stdin stdin or file name of where to find the transactions to apply. If the file prefix is '.rlp', then the data is interpreted as an RLP list of signed transactions.The '.rlp' format is identical to the output.body format. (default: "txs.json") + --state.fork value Name of ruleset to use. + --state.chainid value ChainID to use (default: 1) + --state.reward value Mining reward. Set to -1 to disable (default: 0) + +``` + +### Error codes and output + +All logging should happen against the `stderr`. +There are a few (not many) errors that can occur, those are defined below. + +#### EVM-based errors (`2` to `9`) + +- Other EVM error. Exit code `2` +- Failed configuration: when a non-supported or invalid fork was specified. Exit code `3`. +- Block history is not supplied, but needed for a `BLOCKHASH` operation. If `BLOCKHASH` + is invoked targeting a block which history has not been provided for, the program will + exit with code `4`. + +#### IO errors (`10`-`20`) + +- Invalid input json: the supplied data could not be marshalled. + The program will exit with code `10` +- IO problems: failure to load or save files, the program will exit with code `11` + +## Examples +### Basic usage + +Invoking it with the provided example files +``` +./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json +``` +Two resulting files: + +`alloc.json`: +```json +{ + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { + "balance": "0xfeed1a9d", + "nonce": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "nonce": "0xac" + }, + "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0xa410" + } +} +``` +`result.json`: +```json +{ + "stateRoot": "0x84208a19bc2b46ada7445180c1db162be5b39b9abc8c0a54b05d32943eae4e13", + "txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d", + "receiptRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x5208", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x5208", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0" + } + ], + "rejected": [ + { + "index": 1, + "error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" + } + ] +} +``` + +We can make them spit out the data to e.g. `stdout` like this: +``` +./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.result=stdout --output.alloc=stdout +``` +Output: +```json +{ + "alloc": { + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { + "balance": "0xfeed1a9d", + "nonce": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "nonce": "0xac" + }, + "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0xa410" + } + }, + "result": { + "stateRoot": "0x84208a19bc2b46ada7445180c1db162be5b39b9abc8c0a54b05d32943eae4e13", + "txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d", + "receiptRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x5208", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x5208", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0" + } + ], + "rejected": [ + { + "index": 1, + "error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" + } + ] + } +} +``` + +## About Ommers + +Mining rewards and ommer rewards might need to be added. This is how those are applied: + +- `block_reward` is the block mining reward for the miner (`0xaa`), of a block at height `N`. +- For each ommer (mined by `0xbb`), with blocknumber `N-delta` + - (where `delta` is the difference between the current block and the ommer) + - The account `0xbb` (ommer miner) is awarded `(8-delta)/ 8 * block_reward` + - The account `0xaa` (block miner) is awarded `block_reward / 32` + +To make `state_t8n` apply these, the following inputs are required: + +- `state.reward` + - For ethash, it is `5000000000000000000` `wei`, + - If this is not defined, mining rewards are not applied, + - A value of `0` is valid, and causes accounts to be 'touched'. +- For each ommer, the tool needs to be given an `address` and a `delta`. This + is done via the `env`. + +Note: the tool does not verify that e.g. the normal uncle rules apply, +and allows e.g two uncles at the same height, or the uncle-distance. This means that +the tool allows for negative uncle reward (distance > 8) + +Example: +`./testdata/5/env.json`: +```json +{ + "currentCoinbase": "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "currentDifficulty": "0x20000", + "currentGasLimit": "0x750a163df65e8a", + "currentNumber": "1", + "currentTimestamp": "1000", + "ommers": [ + {"delta": 1, "address": "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" }, + {"delta": 2, "address": "0xcccccccccccccccccccccccccccccccccccccccc" } + ] +} +``` +When applying this, using a reward of `0x80` +Output: +```json +{ + "alloc": { + "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": { + "balance": "0x88" + }, + "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb": { + "balance": "0x70" + }, + "0xcccccccccccccccccccccccccccccccccccccccc": { + "balance": "0x60" + } + } +} +``` +### Future EIPS + +It is also possible to experiment with future eips that are not yet defined in a hard fork. +Example, putting EIP-1344 into Frontier: +``` +./evm t8n --state.fork=Frontier+1344 --input.pre=./testdata/1/pre.json --input.txs=./testdata/1/txs.json --input.env=/testdata/1/env.json +``` + +### Block history + +The `BLOCKHASH` opcode requires blockhashes to be provided by the caller, inside the `env`. +If a required blockhash is not provided, the exit code should be `4`: +Example where blockhashes are provided: +``` +./evm --verbosity=1 t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace +INFO [07-27|11:53:40.960] Trie dumping started root=b7341d..857ea1 +INFO [07-27|11:53:40.960] Trie dumping complete accounts=3 elapsed="103.298µs" +INFO [07-27|11:53:40.960] Wrote file file=alloc.json +INFO [07-27|11:53:40.960] Wrote file file=result.json + +``` + +``` +cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2 +``` +``` +{"pc":0,"op":96,"gas":"0x5f58ef8","gasCost":"0x3","memory":"0x","memSize":0,"stack":[],"returnData":"0x","depth":1,"refund":0,"opName":"PUSH1","error":""} +{"pc":2,"op":64,"gas":"0x5f58ef5","gasCost":"0x14","memory":"0x","memSize":0,"stack":["0x1"],"returnData":"0x","depth":1,"refund":0,"opName":"BLOCKHASH","error":""} +{"pc":3,"op":0,"gas":"0x5f58ee1","gasCost":"0x0","memory":"0x","memSize":0,"stack":["0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"],"returnData":"0x","depth":1,"refund":0,"opName":"STOP","error":""} +{"output":"","gasUsed":"0x17","time":156276} +``` + +In this example, the caller has not provided the required blockhash: +``` +./evm t8n --input.alloc=./testdata/4/alloc.json --input.txs=./testdata/4/txs.json --input.env=./testdata/4/env.json --trace +ERROR(4): getHash(3) invoked, blockhash for that block not provided +``` +Error code: 4 + +### Chaining + +Another thing that can be done, is to chain invocations: +``` +./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.alloc=stdout | ./evm t8n --input.alloc=stdin --input.env=./testdata/1/env.json --input.txs=./testdata/1/txs.json +INFO [07-27|11:53:41.049] rejected tx index=1 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" +INFO [07-27|11:53:41.050] Trie dumping started root=84208a..ae4e13 +INFO [07-27|11:53:41.050] Trie dumping complete accounts=3 elapsed="59.412µs" +INFO [07-27|11:53:41.050] Wrote file file=result.json +INFO [07-27|11:53:41.051] rejected tx index=0 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" +INFO [07-27|11:53:41.051] rejected tx index=1 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" +INFO [07-27|11:53:41.052] Trie dumping started root=84208a..ae4e13 +INFO [07-27|11:53:41.052] Trie dumping complete accounts=3 elapsed="45.734µs" +INFO [07-27|11:53:41.052] Wrote file file=alloc.json +INFO [07-27|11:53:41.052] Wrote file file=result.json + +``` +What happened here, is that we first applied two identical transactions, so the second one was rejected. +Then, taking the poststate alloc as the input for the next state, we tried again to include +the same two transactions: this time, both failed due to too low nonce. + +In order to meaningfully chain invocations, one would need to provide meaningful new `env`, otherwise the +actual blocknumber (exposed to the EVM) would not increase. + +### Transactions in RLP form + +It is possible to provide already-signed transactions as input to, using an `input.txs` which ends with the `rlp` suffix. +The input format for RLP-form transactions is _identical_ to the _output_ format for block bodies. Therefore, it's fully possible +to use the evm to go from `json` input to `rlp` input. + +The following command takes **json** the transactions in `./testdata/13/txs.json` and signs them. After execution, they are output to `signed_txs.rlp`.: +``` +./evm t8n --state.fork=London --input.alloc=./testdata/13/alloc.json --input.txs=./testdata/13/txs.json --input.env=./testdata/13/env.json --output.result=alloc_jsontx.json --output.body=signed_txs.rlp +INFO [07-27|11:53:41.124] Trie dumping started root=e4b924..6aef61 +INFO [07-27|11:53:41.124] Trie dumping complete accounts=3 elapsed="94.284µs" +INFO [07-27|11:53:41.125] Wrote file file=alloc.json +INFO [07-27|11:53:41.125] Wrote file file=alloc_jsontx.json +INFO [07-27|11:53:41.125] Wrote file file=signed_txs.rlp + +``` + +The `output.body` is the rlp-list of transactions, encoded in hex and placed in a string a'la `json` encoding rules: +``` +cat signed_txs.rlp +"0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9" +``` + +We can use `rlpdump` to check what the contents are: +``` +rlpdump -hex $(cat signed_txs.rlp | jq -r ) +[ + 02f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904, + 02f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9, +] +``` +Now, we can now use those (or any other already signed transactions), as input, like so: +``` +./evm t8n --state.fork=London --input.alloc=./testdata/13/alloc.json --input.txs=./signed_txs.rlp --input.env=./testdata/13/env.json --output.result=alloc_rlptx.json +INFO [07-27|11:53:41.253] Trie dumping started root=e4b924..6aef61 +INFO [07-27|11:53:41.253] Trie dumping complete accounts=3 elapsed="128.445µs" +INFO [07-27|11:53:41.253] Wrote file file=alloc.json +INFO [07-27|11:53:41.255] Wrote file file=alloc_rlptx.json + +``` + +You might have noticed that the results from these two invocations were stored in two separate files. +And we can now finally check that they match. +``` +cat alloc_jsontx.json | jq .stateRoot && cat alloc_rlptx.json | jq .stateRoot +"0xe4b924a6adb5959fccf769d5b7bb2f6359e26d1e76a2443c5a91a36d826aef61" +"0xe4b924a6adb5959fccf769d5b7bb2f6359e26d1e76a2443c5a91a36d826aef61" +``` diff --git a/cmd/evm/compiler.go b/cmd/evm/compiler.go index c019a2fe70b7..40ad9313c514 100644 --- a/cmd/evm/compiler.go +++ b/cmd/evm/compiler.go @@ -23,7 +23,7 @@ import ( "github.com/ethereum/go-ethereum/cmd/evm/internal/compiler" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) var compileCommand = cli.Command{ diff --git a/cmd/evm/disasm.go b/cmd/evm/disasm.go index 69f611e39b11..f9719497fe10 100644 --- a/cmd/evm/disasm.go +++ b/cmd/evm/disasm.go @@ -23,7 +23,7 @@ import ( "strings" "github.com/ethereum/go-ethereum/core/asm" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) var disasmCommand = cli.Command{ @@ -34,17 +34,22 @@ var disasmCommand = cli.Command{ } func disasmCmd(ctx *cli.Context) error { - if len(ctx.Args().First()) == 0 { - return errors.New("filename required") + var in string + switch { + case len(ctx.Args().First()) > 0: + fn := ctx.Args().First() + input, err := ioutil.ReadFile(fn) + if err != nil { + return err + } + in = string(input) + case ctx.GlobalIsSet(InputFlag.Name): + in = ctx.GlobalString(InputFlag.Name) + default: + return errors.New("missing filename or --input value") } - fn := ctx.Args().First() - in, err := ioutil.ReadFile(fn) - if err != nil { - return err - } - - code := strings.TrimSpace(string(in)) + code := strings.TrimSpace(in) fmt.Printf("%v\n", code) return asm.PrintDisassembled(code) } diff --git a/cmd/evm/internal/t8ntool/block.go b/cmd/evm/internal/t8ntool/block.go new file mode 100644 index 000000000000..d4edd33bdeb7 --- /dev/null +++ b/cmd/evm/internal/t8ntool/block.go @@ -0,0 +1,380 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package t8ntool + +import ( + "crypto/ecdsa" + "encoding/json" + "errors" + "fmt" + "math/big" + "os" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/consensus/clique" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "gopkg.in/urfave/cli.v1" +) + +//go:generate gencodec -type header -field-override headerMarshaling -out gen_header.go +type header struct { + ParentHash common.Hash `json:"parentHash"` + OmmerHash *common.Hash `json:"sha3Uncles"` + Coinbase *common.Address `json:"miner"` + Root common.Hash `json:"stateRoot" gencodec:"required"` + TxHash *common.Hash `json:"transactionsRoot"` + ReceiptHash *common.Hash `json:"receiptsRoot"` + Bloom types.Bloom `json:"logsBloom"` + Difficulty *big.Int `json:"difficulty"` + Number *big.Int `json:"number" gencodec:"required"` + GasLimit uint64 `json:"gasLimit" gencodec:"required"` + GasUsed uint64 `json:"gasUsed"` + Time uint64 `json:"timestamp" gencodec:"required"` + Extra []byte `json:"extraData"` + MixDigest common.Hash `json:"mixHash"` + Nonce *types.BlockNonce `json:"nonce"` + BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"` +} + +type headerMarshaling struct { + Difficulty *math.HexOrDecimal256 + Number *math.HexOrDecimal256 + GasLimit math.HexOrDecimal64 + GasUsed math.HexOrDecimal64 + Time math.HexOrDecimal64 + Extra hexutil.Bytes + BaseFee *math.HexOrDecimal256 +} + +type bbInput struct { + Header *header `json:"header,omitempty"` + OmmersRlp []string `json:"ommers,omitempty"` + TxRlp string `json:"txs,omitempty"` + Clique *cliqueInput `json:"clique,omitempty"` + + Ethash bool `json:"-"` + EthashDir string `json:"-"` + PowMode ethash.Mode `json:"-"` + Txs []*types.Transaction `json:"-"` + Ommers []*types.Header `json:"-"` +} + +type cliqueInput struct { + Key *ecdsa.PrivateKey + Voted *common.Address + Authorize *bool + Vanity common.Hash +} + +// UnmarshalJSON implements json.Unmarshaler interface. +func (c *cliqueInput) UnmarshalJSON(input []byte) error { + var x struct { + Key *common.Hash `json:"secretKey"` + Voted *common.Address `json:"voted"` + Authorize *bool `json:"authorize"` + Vanity common.Hash `json:"vanity"` + } + if err := json.Unmarshal(input, &x); err != nil { + return err + } + if x.Key == nil { + return errors.New("missing required field 'secretKey' for cliqueInput") + } + if ecdsaKey, err := crypto.ToECDSA(x.Key[:]); err != nil { + return err + } else { + c.Key = ecdsaKey + } + c.Voted = x.Voted + c.Authorize = x.Authorize + c.Vanity = x.Vanity + return nil +} + +// ToBlock converts i into a *types.Block +func (i *bbInput) ToBlock() *types.Block { + header := &types.Header{ + ParentHash: i.Header.ParentHash, + UncleHash: types.EmptyUncleHash, + Coinbase: common.Address{}, + Root: i.Header.Root, + TxHash: types.EmptyRootHash, + ReceiptHash: types.EmptyRootHash, + Bloom: i.Header.Bloom, + Difficulty: common.Big0, + Number: i.Header.Number, + GasLimit: i.Header.GasLimit, + GasUsed: i.Header.GasUsed, + Time: i.Header.Time, + Extra: i.Header.Extra, + MixDigest: i.Header.MixDigest, + BaseFee: i.Header.BaseFee, + } + + // Fill optional values. + if i.Header.OmmerHash != nil { + header.UncleHash = *i.Header.OmmerHash + } else if len(i.Ommers) != 0 { + // Calculate the ommer hash if none is provided and there are ommers to hash + header.UncleHash = types.CalcUncleHash(i.Ommers) + } + if i.Header.Coinbase != nil { + header.Coinbase = *i.Header.Coinbase + } + if i.Header.TxHash != nil { + header.TxHash = *i.Header.TxHash + } + if i.Header.ReceiptHash != nil { + header.ReceiptHash = *i.Header.ReceiptHash + } + if i.Header.Nonce != nil { + header.Nonce = *i.Header.Nonce + } + if header.Difficulty != nil { + header.Difficulty = i.Header.Difficulty + } + return types.NewBlockWithHeader(header).WithBody(i.Txs, i.Ommers) +} + +// SealBlock seals the given block using the configured engine. +func (i *bbInput) SealBlock(block *types.Block) (*types.Block, error) { + switch { + case i.Ethash: + return i.sealEthash(block) + case i.Clique != nil: + return i.sealClique(block) + default: + return block, nil + } +} + +// sealEthash seals the given block using ethash. +func (i *bbInput) sealEthash(block *types.Block) (*types.Block, error) { + if i.Header.Nonce != nil { + return nil, NewError(ErrorConfig, fmt.Errorf("sealing with ethash will overwrite provided nonce")) + } + ethashConfig := ethash.Config{ + PowMode: i.PowMode, + DatasetDir: i.EthashDir, + CacheDir: i.EthashDir, + DatasetsInMem: 1, + DatasetsOnDisk: 2, + CachesInMem: 2, + CachesOnDisk: 3, + } + engine := ethash.New(ethashConfig, nil, true) + defer engine.Close() + // Use a buffered chan for results. + // If the testmode is used, the sealer will return quickly, and complain + // "Sealing result is not read by miner" if it cannot write the result. + results := make(chan *types.Block, 1) + if err := engine.Seal(nil, block, results, nil); err != nil { + panic(fmt.Sprintf("failed to seal block: %v", err)) + } + found := <-results + return block.WithSeal(found.Header()), nil +} + +// sealClique seals the given block using clique. +func (i *bbInput) sealClique(block *types.Block) (*types.Block, error) { + // If any clique value overwrites an explicit header value, fail + // to avoid silently building a block with unexpected values. + if i.Header.Extra != nil { + return nil, NewError(ErrorConfig, fmt.Errorf("sealing with clique will overwrite provided extra data")) + } + header := block.Header() + if i.Clique.Voted != nil { + if i.Header.Coinbase != nil { + return nil, NewError(ErrorConfig, fmt.Errorf("sealing with clique and voting will overwrite provided coinbase")) + } + header.Coinbase = *i.Clique.Voted + } + if i.Clique.Authorize != nil { + if i.Header.Nonce != nil { + return nil, NewError(ErrorConfig, fmt.Errorf("sealing with clique and voting will overwrite provided nonce")) + } + if *i.Clique.Authorize { + header.Nonce = [8]byte{} + } else { + header.Nonce = [8]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + } + } + // Extra is fixed 32 byte vanity and 65 byte signature + header.Extra = make([]byte, 32+65) + copy(header.Extra[0:32], i.Clique.Vanity.Bytes()[:]) + + // Sign the seal hash and fill in the rest of the extra data + h := clique.SealHash(header) + sighash, err := crypto.Sign(h[:], i.Clique.Key) + if err != nil { + return nil, err + } + copy(header.Extra[32:], sighash) + block = block.WithSeal(header) + return block, nil +} + +// BuildBlock constructs a block from the given inputs. +func BuildBlock(ctx *cli.Context) error { + // Configure the go-ethereum logger + glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) + glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name))) + log.Root().SetHandler(glogger) + + baseDir, err := createBasedir(ctx) + if err != nil { + return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err)) + } + inputData, err := readInput(ctx) + if err != nil { + return err + } + block := inputData.ToBlock() + block, err = inputData.SealBlock(block) + if err != nil { + return err + } + return dispatchBlock(ctx, baseDir, block) +} + +func readInput(ctx *cli.Context) (*bbInput, error) { + var ( + headerStr = ctx.String(InputHeaderFlag.Name) + ommersStr = ctx.String(InputOmmersFlag.Name) + txsStr = ctx.String(InputTxsRlpFlag.Name) + cliqueStr = ctx.String(SealCliqueFlag.Name) + ethashOn = ctx.Bool(SealEthashFlag.Name) + ethashDir = ctx.String(SealEthashDirFlag.Name) + ethashMode = ctx.String(SealEthashModeFlag.Name) + inputData = &bbInput{} + ) + if ethashOn && cliqueStr != "" { + return nil, NewError(ErrorConfig, fmt.Errorf("both ethash and clique sealing specified, only one may be chosen")) + } + if ethashOn { + inputData.Ethash = ethashOn + inputData.EthashDir = ethashDir + switch ethashMode { + case "normal": + inputData.PowMode = ethash.ModeNormal + case "test": + inputData.PowMode = ethash.ModeTest + case "fake": + inputData.PowMode = ethash.ModeFake + default: + return nil, NewError(ErrorConfig, fmt.Errorf("unknown pow mode: %s, supported modes: test, fake, normal", ethashMode)) + } + } + if headerStr == stdinSelector || ommersStr == stdinSelector || txsStr == stdinSelector || cliqueStr == stdinSelector { + decoder := json.NewDecoder(os.Stdin) + if err := decoder.Decode(inputData); err != nil { + return nil, NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err)) + } + } + if cliqueStr != stdinSelector && cliqueStr != "" { + var clique cliqueInput + if err := readFile(cliqueStr, "clique", &clique); err != nil { + return nil, err + } + inputData.Clique = &clique + } + if headerStr != stdinSelector { + var env header + if err := readFile(headerStr, "header", &env); err != nil { + return nil, err + } + inputData.Header = &env + } + if ommersStr != stdinSelector && ommersStr != "" { + var ommers []string + if err := readFile(ommersStr, "ommers", &ommers); err != nil { + return nil, err + } + inputData.OmmersRlp = ommers + } + if txsStr != stdinSelector { + var txs string + if err := readFile(txsStr, "txs", &txs); err != nil { + return nil, err + } + inputData.TxRlp = txs + } + // Deserialize rlp txs and ommers + var ( + ommers = []*types.Header{} + txs = []*types.Transaction{} + ) + if inputData.TxRlp != "" { + if err := rlp.DecodeBytes(common.FromHex(inputData.TxRlp), &txs); err != nil { + return nil, NewError(ErrorRlp, fmt.Errorf("unable to decode transaction from rlp data: %v", err)) + } + inputData.Txs = txs + } + for _, str := range inputData.OmmersRlp { + type extblock struct { + Header *types.Header + Txs []*types.Transaction + Ommers []*types.Header + } + var ommer *extblock + if err := rlp.DecodeBytes(common.FromHex(str), &ommer); err != nil { + return nil, NewError(ErrorRlp, fmt.Errorf("unable to decode ommer from rlp data: %v", err)) + } + ommers = append(ommers, ommer.Header) + } + inputData.Ommers = ommers + + return inputData, nil +} + +// dispatchOutput writes the output data to either stderr or stdout, or to the specified +// files +func dispatchBlock(ctx *cli.Context, baseDir string, block *types.Block) error { + raw, _ := rlp.EncodeToBytes(block) + + type blockInfo struct { + Rlp hexutil.Bytes `json:"rlp"` + Hash common.Hash `json:"hash"` + } + var enc blockInfo + enc.Rlp = raw + enc.Hash = block.Hash() + + b, err := json.MarshalIndent(enc, "", " ") + if err != nil { + return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err)) + } + switch dest := ctx.String(OutputBlockFlag.Name); dest { + case "stdout": + os.Stdout.Write(b) + os.Stdout.WriteString("\n") + case "stderr": + os.Stderr.Write(b) + os.Stderr.WriteString("\n") + default: + if err := saveFile(baseDir, dest, enc); err != nil { + return err + } + } + return nil +} diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go new file mode 100644 index 000000000000..874685f15ea0 --- /dev/null +++ b/cmd/evm/internal/t8ntool/execution.go @@ -0,0 +1,313 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package t8ntool + +import ( + "fmt" + "math/big" + "os" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/consensus/misc" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + "golang.org/x/crypto/sha3" +) + +type Prestate struct { + Env stEnv `json:"env"` + Pre core.GenesisAlloc `json:"pre"` +} + +// ExecutionResult contains the execution status after running a state test, any +// error that might have occurred and a dump of the final state if requested. +type ExecutionResult struct { + StateRoot common.Hash `json:"stateRoot"` + TxRoot common.Hash `json:"txRoot"` + ReceiptRoot common.Hash `json:"receiptsRoot"` + LogsHash common.Hash `json:"logsHash"` + Bloom types.Bloom `json:"logsBloom" gencodec:"required"` + Receipts types.Receipts `json:"receipts"` + Rejected []*rejectedTx `json:"rejected,omitempty"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` + GasUsed math.HexOrDecimal64 `json:"gasUsed"` +} + +type ommer struct { + Delta uint64 `json:"delta"` + Address common.Address `json:"address"` +} + +//go:generate gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go +type stEnv struct { + Coinbase common.Address `json:"currentCoinbase" gencodec:"required"` + Difficulty *big.Int `json:"currentDifficulty"` + Random *big.Int `json:"currentRandom"` + ParentDifficulty *big.Int `json:"parentDifficulty"` + GasLimit uint64 `json:"currentGasLimit" gencodec:"required"` + Number uint64 `json:"currentNumber" gencodec:"required"` + Timestamp uint64 `json:"currentTimestamp" gencodec:"required"` + ParentTimestamp uint64 `json:"parentTimestamp,omitempty"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + BaseFee *big.Int `json:"currentBaseFee,omitempty"` + ParentUncleHash common.Hash `json:"parentUncleHash"` +} + +type stEnvMarshaling struct { + Coinbase common.UnprefixedAddress + Difficulty *math.HexOrDecimal256 + Random *math.HexOrDecimal256 + ParentDifficulty *math.HexOrDecimal256 + GasLimit math.HexOrDecimal64 + Number math.HexOrDecimal64 + Timestamp math.HexOrDecimal64 + ParentTimestamp math.HexOrDecimal64 + BaseFee *math.HexOrDecimal256 +} + +type rejectedTx struct { + Index int `json:"index"` + Err string `json:"error"` +} + +// Apply applies a set of transactions to a pre-state +func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, + txs types.Transactions, miningReward int64, + getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error)) (*state.StateDB, *ExecutionResult, error) { + + // Capture errors for BLOCKHASH operation, if we haven't been supplied the + // required blockhashes + var hashError error + getHash := func(num uint64) common.Hash { + if pre.Env.BlockHashes == nil { + hashError = fmt.Errorf("getHash(%d) invoked, no blockhashes provided", num) + return common.Hash{} + } + h, ok := pre.Env.BlockHashes[math.HexOrDecimal64(num)] + if !ok { + hashError = fmt.Errorf("getHash(%d) invoked, blockhash for that block not provided", num) + } + return h + } + var ( + statedb = MakePreState(rawdb.NewMemoryDatabase(), pre.Pre) + signer = types.MakeSigner(chainConfig, new(big.Int).SetUint64(pre.Env.Number)) + gaspool = new(core.GasPool) + blockHash = common.Hash{0x13, 0x37} + rejectedTxs []*rejectedTx + includedTxs types.Transactions + gasUsed = uint64(0) + receipts = make(types.Receipts, 0) + txIndex = 0 + ) + gaspool.AddGas(pre.Env.GasLimit) + vmContext := vm.BlockContext{ + CanTransfer: core.CanTransfer, + Transfer: core.Transfer, + Coinbase: pre.Env.Coinbase, + BlockNumber: new(big.Int).SetUint64(pre.Env.Number), + Time: new(big.Int).SetUint64(pre.Env.Timestamp), + Difficulty: pre.Env.Difficulty, + GasLimit: pre.Env.GasLimit, + GetHash: getHash, + } + // If currentBaseFee is defined, add it to the vmContext. + if pre.Env.BaseFee != nil { + vmContext.BaseFee = new(big.Int).Set(pre.Env.BaseFee) + } + // If random is defined, add it to the vmContext. + if pre.Env.Random != nil { + rnd := common.BigToHash(pre.Env.Random) + vmContext.Random = &rnd + } + // If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's + // done in StateProcessor.Process(block, ...), right before transactions are applied. + if chainConfig.DAOForkSupport && + chainConfig.DAOForkBlock != nil && + chainConfig.DAOForkBlock.Cmp(new(big.Int).SetUint64(pre.Env.Number)) == 0 { + misc.ApplyDAOHardFork(statedb) + } + + for i, tx := range txs { + msg, err := tx.AsMessage(signer, pre.Env.BaseFee) + if err != nil { + log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", err) + rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()}) + continue + } + tracer, err := getTracerFn(txIndex, tx.Hash()) + if err != nil { + return nil, nil, err + } + vmConfig.Tracer = tracer + vmConfig.Debug = (tracer != nil) + statedb.Prepare(tx.Hash(), txIndex) + txContext := core.NewEVMTxContext(msg) + snapshot := statedb.Snapshot() + evm := vm.NewEVM(vmContext, txContext, statedb, chainConfig, vmConfig) + + // (ret []byte, usedGas uint64, failed bool, err error) + msgResult, err := core.ApplyMessage(evm, msg, gaspool) + if err != nil { + statedb.RevertToSnapshot(snapshot) + log.Info("rejected tx", "index", i, "hash", tx.Hash(), "from", msg.From(), "error", err) + rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()}) + continue + } + includedTxs = append(includedTxs, tx) + if hashError != nil { + return nil, nil, NewError(ErrorMissingBlockhash, hashError) + } + gasUsed += msgResult.UsedGas + + // Receipt: + { + var root []byte + if chainConfig.IsByzantium(vmContext.BlockNumber) { + statedb.Finalise(true) + } else { + root = statedb.IntermediateRoot(chainConfig.IsEIP158(vmContext.BlockNumber)).Bytes() + } + + // Create a new receipt for the transaction, storing the intermediate root and + // gas used by the tx. + receipt := &types.Receipt{Type: tx.Type(), PostState: root, CumulativeGasUsed: gasUsed} + if msgResult.Failed() { + receipt.Status = types.ReceiptStatusFailed + } else { + receipt.Status = types.ReceiptStatusSuccessful + } + receipt.TxHash = tx.Hash() + receipt.GasUsed = msgResult.UsedGas + + // If the transaction created a contract, store the creation address in the receipt. + if msg.To() == nil { + receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce()) + } + + // Set the receipt logs and create the bloom filter. + receipt.Logs = statedb.GetLogs(tx.Hash(), blockHash) + receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) + // These three are non-consensus fields: + //receipt.BlockHash + //receipt.BlockNumber + receipt.TransactionIndex = uint(txIndex) + receipts = append(receipts, receipt) + } + + txIndex++ + } + statedb.IntermediateRoot(chainConfig.IsEIP158(vmContext.BlockNumber)) + // Add mining reward? + if miningReward > 0 { + // Add mining reward. The mining reward may be `0`, which only makes a difference in the cases + // where + // - the coinbase suicided, or + // - there are only 'bad' transactions, which aren't executed. In those cases, + // the coinbase gets no txfee, so isn't created, and thus needs to be touched + var ( + blockReward = big.NewInt(miningReward) + minerReward = new(big.Int).Set(blockReward) + perOmmer = new(big.Int).Div(blockReward, big.NewInt(32)) + ) + for _, ommer := range pre.Env.Ommers { + // Add 1/32th for each ommer included + minerReward.Add(minerReward, perOmmer) + // Add (8-delta)/8 + reward := big.NewInt(8) + reward.Sub(reward, big.NewInt(0).SetUint64(ommer.Delta)) + reward.Mul(reward, blockReward) + reward.Div(reward, big.NewInt(8)) + statedb.AddBalance(ommer.Address, reward) + } + statedb.AddBalance(pre.Env.Coinbase, minerReward) + } + // Commit block + root, err := statedb.Commit(chainConfig.IsEIP158(vmContext.BlockNumber)) + if err != nil { + fmt.Fprintf(os.Stderr, "Could not commit state: %v", err) + return nil, nil, NewError(ErrorEVM, fmt.Errorf("could not commit state: %v", err)) + } + execRs := &ExecutionResult{ + StateRoot: root, + TxRoot: types.DeriveSha(includedTxs, trie.NewStackTrie(nil)), + ReceiptRoot: types.DeriveSha(receipts, trie.NewStackTrie(nil)), + Bloom: types.CreateBloom(receipts), + LogsHash: rlpHash(statedb.Logs()), + Receipts: receipts, + Rejected: rejectedTxs, + Difficulty: (*math.HexOrDecimal256)(vmContext.Difficulty), + GasUsed: (math.HexOrDecimal64)(gasUsed), + } + return statedb, execRs, nil +} + +func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB { + sdb := state.NewDatabase(db) + statedb, _ := state.New(common.Hash{}, sdb, nil) + for addr, a := range accounts { + statedb.SetCode(addr, a.Code) + statedb.SetNonce(addr, a.Nonce) + statedb.SetBalance(addr, a.Balance) + for k, v := range a.Storage { + statedb.SetState(addr, k, v) + } + } + // Commit and re-open to start with a clean state. + root, _ := statedb.Commit(false) + statedb, _ = state.New(root, sdb, nil) + return statedb +} + +func rlpHash(x interface{}) (h common.Hash) { + hw := sha3.NewLegacyKeccak256() + rlp.Encode(hw, x) + hw.Sum(h[:0]) + return h +} + +// calcDifficulty is based on ethash.CalcDifficulty. This method is used in case +// the caller does not provide an explicit difficulty, but instead provides only +// parent timestamp + difficulty. +// Note: this method only works for ethash engine. +func calcDifficulty(config *params.ChainConfig, number, currentTime, parentTime uint64, + parentDifficulty *big.Int, parentUncleHash common.Hash) *big.Int { + uncleHash := parentUncleHash + if uncleHash == (common.Hash{}) { + uncleHash = types.EmptyUncleHash + } + parent := &types.Header{ + ParentHash: common.Hash{}, + UncleHash: uncleHash, + Difficulty: parentDifficulty, + Number: new(big.Int).SetUint64(number - 1), + Time: parentTime, + } + return ethash.CalcDifficulty(config, currentTime, parent) +} diff --git a/cmd/evm/internal/t8ntool/flags.go b/cmd/evm/internal/t8ntool/flags.go new file mode 100644 index 000000000000..b6054ea562ba --- /dev/null +++ b/cmd/evm/internal/t8ntool/flags.go @@ -0,0 +1,162 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package t8ntool + +import ( + "fmt" + "strings" + + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/tests" + "gopkg.in/urfave/cli.v1" +) + +var ( + TraceFlag = cli.BoolFlag{ + Name: "trace", + Usage: "Output full trace logs to files .jsonl", + } + TraceDisableMemoryFlag = cli.BoolTFlag{ + Name: "trace.nomemory", + Usage: "Disable full memory dump in traces (deprecated)", + } + TraceEnableMemoryFlag = cli.BoolFlag{ + Name: "trace.memory", + Usage: "Enable full memory dump in traces", + } + TraceDisableStackFlag = cli.BoolFlag{ + Name: "trace.nostack", + Usage: "Disable stack output in traces", + } + TraceDisableReturnDataFlag = cli.BoolTFlag{ + Name: "trace.noreturndata", + Usage: "Disable return data output in traces (deprecated)", + } + TraceEnableReturnDataFlag = cli.BoolFlag{ + Name: "trace.returndata", + Usage: "Enable return data output in traces", + } + OutputBasedir = cli.StringFlag{ + Name: "output.basedir", + Usage: "Specifies where output files are placed. Will be created if it does not exist.", + Value: "", + } + OutputBodyFlag = cli.StringFlag{ + Name: "output.body", + Usage: "If set, the RLP of the transactions (block body) will be written to this file.", + Value: "", + } + OutputAllocFlag = cli.StringFlag{ + Name: "output.alloc", + Usage: "Determines where to put the `alloc` of the post-state.\n" + + "\t`stdout` - into the stdout output\n" + + "\t`stderr` - into the stderr output\n" + + "\t - into the file ", + Value: "alloc.json", + } + OutputResultFlag = cli.StringFlag{ + Name: "output.result", + Usage: "Determines where to put the `result` (stateroot, txroot etc) of the post-state.\n" + + "\t`stdout` - into the stdout output\n" + + "\t`stderr` - into the stderr output\n" + + "\t - into the file ", + Value: "result.json", + } + OutputBlockFlag = cli.StringFlag{ + Name: "output.block", + Usage: "Determines where to put the `block` after building.\n" + + "\t`stdout` - into the stdout output\n" + + "\t`stderr` - into the stderr output\n" + + "\t - into the file ", + Value: "block.json", + } + InputAllocFlag = cli.StringFlag{ + Name: "input.alloc", + Usage: "`stdin` or file name of where to find the prestate alloc to use.", + Value: "alloc.json", + } + InputEnvFlag = cli.StringFlag{ + Name: "input.env", + Usage: "`stdin` or file name of where to find the prestate env to use.", + Value: "env.json", + } + InputTxsFlag = cli.StringFlag{ + Name: "input.txs", + Usage: "`stdin` or file name of where to find the transactions to apply. " + + "If the file extension is '.rlp', then the data is interpreted as an RLP list of signed transactions." + + "The '.rlp' format is identical to the output.body format.", + Value: "txs.json", + } + InputHeaderFlag = cli.StringFlag{ + Name: "input.header", + Usage: "`stdin` or file name of where to find the block header to use.", + Value: "header.json", + } + InputOmmersFlag = cli.StringFlag{ + Name: "input.ommers", + Usage: "`stdin` or file name of where to find the list of ommer header RLPs to use.", + } + InputTxsRlpFlag = cli.StringFlag{ + Name: "input.txs", + Usage: "`stdin` or file name of where to find the transactions list in RLP form.", + Value: "txs.rlp", + } + SealCliqueFlag = cli.StringFlag{ + Name: "seal.clique", + Usage: "Seal block with Clique. `stdin` or file name of where to find the Clique sealing data.", + } + SealEthashFlag = cli.BoolFlag{ + Name: "seal.ethash", + Usage: "Seal block with ethash.", + } + SealEthashDirFlag = cli.StringFlag{ + Name: "seal.ethash.dir", + Usage: "Path to ethash DAG. If none exists, a new DAG will be generated.", + } + SealEthashModeFlag = cli.StringFlag{ + Name: "seal.ethash.mode", + Usage: "Defines the type and amount of PoW verification an ethash engine makes.", + Value: "normal", + } + RewardFlag = cli.Int64Flag{ + Name: "state.reward", + Usage: "Mining reward. Set to -1 to disable", + Value: 0, + } + ChainIDFlag = cli.Int64Flag{ + Name: "state.chainid", + Usage: "ChainID to use", + Value: 1, + } + ForknameFlag = cli.StringFlag{ + Name: "state.fork", + Usage: fmt.Sprintf("Name of ruleset to use."+ + "\n\tAvailable forknames:"+ + "\n\t %v"+ + "\n\tAvailable extra eips:"+ + "\n\t %v"+ + "\n\tSyntax (+ExtraEip)", + strings.Join(tests.AvailableForks(), "\n\t "), + strings.Join(vm.ActivateableEips(), ", ")), + Value: "Istanbul", + } + VerbosityFlag = cli.IntFlag{ + Name: "verbosity", + Usage: "sets the verbosity level", + Value: 3, + } +) diff --git a/cmd/evm/internal/t8ntool/gen_header.go b/cmd/evm/internal/t8ntool/gen_header.go new file mode 100644 index 000000000000..196e49dd716f --- /dev/null +++ b/cmd/evm/internal/t8ntool/gen_header.go @@ -0,0 +1,135 @@ +// Code generated by github.com/fjl/gencodec. DO NOT EDIT. + +package t8ntool + +import ( + "encoding/json" + "errors" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/core/types" +) + +var _ = (*headerMarshaling)(nil) + +// MarshalJSON marshals as JSON. +func (h header) MarshalJSON() ([]byte, error) { + type header struct { + ParentHash common.Hash `json:"parentHash"` + OmmerHash *common.Hash `json:"sha3Uncles"` + Coinbase *common.Address `json:"miner"` + Root common.Hash `json:"stateRoot" gencodec:"required"` + TxHash *common.Hash `json:"transactionsRoot"` + ReceiptHash *common.Hash `json:"receiptsRoot"` + Bloom types.Bloom `json:"logsBloom"` + Difficulty *math.HexOrDecimal256 `json:"difficulty"` + Number *math.HexOrDecimal256 `json:"number" gencodec:"required"` + GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` + GasUsed math.HexOrDecimal64 `json:"gasUsed"` + Time math.HexOrDecimal64 `json:"timestamp" gencodec:"required"` + Extra hexutil.Bytes `json:"extraData"` + MixDigest common.Hash `json:"mixHash"` + Nonce *types.BlockNonce `json:"nonce"` + BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"` + } + var enc header + enc.ParentHash = h.ParentHash + enc.OmmerHash = h.OmmerHash + enc.Coinbase = h.Coinbase + enc.Root = h.Root + enc.TxHash = h.TxHash + enc.ReceiptHash = h.ReceiptHash + enc.Bloom = h.Bloom + enc.Difficulty = (*math.HexOrDecimal256)(h.Difficulty) + enc.Number = (*math.HexOrDecimal256)(h.Number) + enc.GasLimit = math.HexOrDecimal64(h.GasLimit) + enc.GasUsed = math.HexOrDecimal64(h.GasUsed) + enc.Time = math.HexOrDecimal64(h.Time) + enc.Extra = h.Extra + enc.MixDigest = h.MixDigest + enc.Nonce = h.Nonce + enc.BaseFee = (*math.HexOrDecimal256)(h.BaseFee) + return json.Marshal(&enc) +} + +// UnmarshalJSON unmarshals from JSON. +func (h *header) UnmarshalJSON(input []byte) error { + type header struct { + ParentHash *common.Hash `json:"parentHash"` + OmmerHash *common.Hash `json:"sha3Uncles"` + Coinbase *common.Address `json:"miner"` + Root *common.Hash `json:"stateRoot" gencodec:"required"` + TxHash *common.Hash `json:"transactionsRoot"` + ReceiptHash *common.Hash `json:"receiptsRoot"` + Bloom *types.Bloom `json:"logsBloom"` + Difficulty *math.HexOrDecimal256 `json:"difficulty"` + Number *math.HexOrDecimal256 `json:"number" gencodec:"required"` + GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` + GasUsed *math.HexOrDecimal64 `json:"gasUsed"` + Time *math.HexOrDecimal64 `json:"timestamp" gencodec:"required"` + Extra *hexutil.Bytes `json:"extraData"` + MixDigest *common.Hash `json:"mixHash"` + Nonce *types.BlockNonce `json:"nonce"` + BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"` + } + var dec header + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + if dec.ParentHash != nil { + h.ParentHash = *dec.ParentHash + } + if dec.OmmerHash != nil { + h.OmmerHash = dec.OmmerHash + } + if dec.Coinbase != nil { + h.Coinbase = dec.Coinbase + } + if dec.Root == nil { + return errors.New("missing required field 'stateRoot' for header") + } + h.Root = *dec.Root + if dec.TxHash != nil { + h.TxHash = dec.TxHash + } + if dec.ReceiptHash != nil { + h.ReceiptHash = dec.ReceiptHash + } + if dec.Bloom != nil { + h.Bloom = *dec.Bloom + } + if dec.Difficulty != nil { + h.Difficulty = (*big.Int)(dec.Difficulty) + } + if dec.Number == nil { + return errors.New("missing required field 'number' for header") + } + h.Number = (*big.Int)(dec.Number) + if dec.GasLimit == nil { + return errors.New("missing required field 'gasLimit' for header") + } + h.GasLimit = uint64(*dec.GasLimit) + if dec.GasUsed != nil { + h.GasUsed = uint64(*dec.GasUsed) + } + if dec.Time == nil { + return errors.New("missing required field 'timestamp' for header") + } + h.Time = uint64(*dec.Time) + if dec.Extra != nil { + h.Extra = *dec.Extra + } + if dec.MixDigest != nil { + h.MixDigest = *dec.MixDigest + } + if dec.Nonce != nil { + h.Nonce = dec.Nonce + } + if dec.BaseFee != nil { + h.BaseFee = (*big.Int)(dec.BaseFee) + } + return nil +} diff --git a/cmd/evm/internal/t8ntool/gen_stenv.go b/cmd/evm/internal/t8ntool/gen_stenv.go new file mode 100644 index 000000000000..a6d774cdabcf --- /dev/null +++ b/cmd/evm/internal/t8ntool/gen_stenv.go @@ -0,0 +1,109 @@ +// Code generated by github.com/fjl/gencodec. DO NOT EDIT. + +package t8ntool + +import ( + "encoding/json" + "errors" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" +) + +var _ = (*stEnvMarshaling)(nil) + +// MarshalJSON marshals as JSON. +func (s stEnv) MarshalJSON() ([]byte, error) { + type stEnv struct { + Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` + Random *math.HexOrDecimal256 `json:"currentRandom"` + ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` + GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + ParentTimestamp math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` + ParentUncleHash common.Hash `json:"parentUncleHash"` + } + var enc stEnv + enc.Coinbase = common.UnprefixedAddress(s.Coinbase) + enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty) + enc.Random = (*math.HexOrDecimal256)(s.Random) + enc.ParentDifficulty = (*math.HexOrDecimal256)(s.ParentDifficulty) + enc.GasLimit = math.HexOrDecimal64(s.GasLimit) + enc.Number = math.HexOrDecimal64(s.Number) + enc.Timestamp = math.HexOrDecimal64(s.Timestamp) + enc.ParentTimestamp = math.HexOrDecimal64(s.ParentTimestamp) + enc.BlockHashes = s.BlockHashes + enc.Ommers = s.Ommers + enc.BaseFee = (*math.HexOrDecimal256)(s.BaseFee) + enc.ParentUncleHash = s.ParentUncleHash + return json.Marshal(&enc) +} + +// UnmarshalJSON unmarshals from JSON. +func (s *stEnv) UnmarshalJSON(input []byte) error { + type stEnv struct { + Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` + Random *math.HexOrDecimal256 `json:"currentRandom"` + ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` + GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + ParentTimestamp *math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` + ParentUncleHash *common.Hash `json:"parentUncleHash"` + } + var dec stEnv + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + if dec.Coinbase == nil { + return errors.New("missing required field 'currentCoinbase' for stEnv") + } + s.Coinbase = common.Address(*dec.Coinbase) + if dec.Difficulty != nil { + s.Difficulty = (*big.Int)(dec.Difficulty) + } + if dec.Random != nil { + s.Random = (*big.Int)(dec.Random) + } + if dec.ParentDifficulty != nil { + s.ParentDifficulty = (*big.Int)(dec.ParentDifficulty) + } + if dec.GasLimit == nil { + return errors.New("missing required field 'currentGasLimit' for stEnv") + } + s.GasLimit = uint64(*dec.GasLimit) + if dec.Number == nil { + return errors.New("missing required field 'currentNumber' for stEnv") + } + s.Number = uint64(*dec.Number) + if dec.Timestamp == nil { + return errors.New("missing required field 'currentTimestamp' for stEnv") + } + s.Timestamp = uint64(*dec.Timestamp) + if dec.ParentTimestamp != nil { + s.ParentTimestamp = uint64(*dec.ParentTimestamp) + } + if dec.BlockHashes != nil { + s.BlockHashes = dec.BlockHashes + } + if dec.Ommers != nil { + s.Ommers = dec.Ommers + } + if dec.BaseFee != nil { + s.BaseFee = (*big.Int)(dec.BaseFee) + } + if dec.ParentUncleHash != nil { + s.ParentUncleHash = *dec.ParentUncleHash + } + return nil +} diff --git a/cmd/evm/internal/t8ntool/transaction.go b/cmd/evm/internal/t8ntool/transaction.go new file mode 100644 index 000000000000..6f1c964ada02 --- /dev/null +++ b/cmd/evm/internal/t8ntool/transaction.go @@ -0,0 +1,179 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package t8ntool + +import ( + "encoding/json" + "errors" + "fmt" + "math/big" + "os" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/tests" + "gopkg.in/urfave/cli.v1" +) + +type result struct { + Error error + Address common.Address + Hash common.Hash + IntrinsicGas uint64 +} + +// MarshalJSON marshals as JSON with a hash. +func (r *result) MarshalJSON() ([]byte, error) { + type xx struct { + Error string `json:"error,omitempty"` + Address *common.Address `json:"address,omitempty"` + Hash *common.Hash `json:"hash,omitempty"` + IntrinsicGas hexutil.Uint64 `json:"intrinsicGas,omitempty"` + } + var out xx + if r.Error != nil { + out.Error = r.Error.Error() + } + if r.Address != (common.Address{}) { + out.Address = &r.Address + } + if r.Hash != (common.Hash{}) { + out.Hash = &r.Hash + } + out.IntrinsicGas = hexutil.Uint64(r.IntrinsicGas) + return json.Marshal(out) +} + +func Transaction(ctx *cli.Context) error { + // Configure the go-ethereum logger + glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) + glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name))) + log.Root().SetHandler(glogger) + + var ( + err error + ) + // We need to load the transactions. May be either in stdin input or in files. + // Check if anything needs to be read from stdin + var ( + txStr = ctx.String(InputTxsFlag.Name) + inputData = &input{} + chainConfig *params.ChainConfig + ) + // Construct the chainconfig + if cConf, _, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil { + return NewError(ErrorConfig, fmt.Errorf("failed constructing chain configuration: %v", err)) + } else { + chainConfig = cConf + } + // Set the chain id + chainConfig.ChainID = big.NewInt(ctx.Int64(ChainIDFlag.Name)) + var body hexutil.Bytes + if txStr == stdinSelector { + decoder := json.NewDecoder(os.Stdin) + if err := decoder.Decode(inputData); err != nil { + return NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err)) + } + // Decode the body of already signed transactions + body = common.FromHex(inputData.TxRlp) + } else { + // Read input from file + inFile, err := os.Open(txStr) + if err != nil { + return NewError(ErrorIO, fmt.Errorf("failed reading txs file: %v", err)) + } + defer inFile.Close() + decoder := json.NewDecoder(inFile) + if strings.HasSuffix(txStr, ".rlp") { + if err := decoder.Decode(&body); err != nil { + return err + } + } else { + return NewError(ErrorIO, errors.New("only rlp supported")) + } + } + signer := types.MakeSigner(chainConfig, new(big.Int)) + // We now have the transactions in 'body', which is supposed to be an + // rlp list of transactions + it, err := rlp.NewListIterator([]byte(body)) + if err != nil { + return err + } + var results []result + for it.Next() { + if err := it.Err(); err != nil { + return NewError(ErrorIO, err) + } + var tx types.Transaction + err := rlp.DecodeBytes(it.Value(), &tx) + if err != nil { + results = append(results, result{Error: err}) + continue + } + r := result{Hash: tx.Hash()} + if sender, err := types.Sender(signer, &tx); err != nil { + r.Error = err + results = append(results, r) + continue + } else { + r.Address = sender + } + // Check intrinsic gas + if gas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, + chainConfig.IsHomestead(new(big.Int)), chainConfig.IsIstanbul(new(big.Int))); err != nil { + r.Error = err + results = append(results, r) + continue + } else { + r.IntrinsicGas = gas + if tx.Gas() < gas { + r.Error = fmt.Errorf("%w: have %d, want %d", core.ErrIntrinsicGas, tx.Gas(), gas) + results = append(results, r) + continue + } + } + // Validate <256bit fields + switch { + case tx.Nonce()+1 < tx.Nonce(): + r.Error = errors.New("nonce exceeds 2^64-1") + case tx.Value().BitLen() > 256: + r.Error = errors.New("value exceeds 256 bits") + case tx.GasPrice().BitLen() > 256: + r.Error = errors.New("gasPrice exceeds 256 bits") + case tx.GasTipCap().BitLen() > 256: + r.Error = errors.New("maxPriorityFeePerGas exceeds 256 bits") + case tx.GasFeeCap().BitLen() > 256: + r.Error = errors.New("maxFeePerGas exceeds 256 bits") + case tx.GasFeeCap().Cmp(tx.GasTipCap()) < 0: + r.Error = errors.New("maxFeePerGas < maxPriorityFeePerGas") + case new(big.Int).Mul(tx.GasPrice(), new(big.Int).SetUint64(tx.Gas())).BitLen() > 256: + r.Error = errors.New("gas * gasPrice exceeds 256 bits") + case new(big.Int).Mul(tx.GasFeeCap(), new(big.Int).SetUint64(tx.Gas())).BitLen() > 256: + r.Error = errors.New("gas * maxFeePerGas exceeds 256 bits") + } + results = append(results, r) + } + out, err := json.MarshalIndent(results, "", " ") + fmt.Println(string(out)) + return err +} diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go new file mode 100644 index 000000000000..097f9ce65c55 --- /dev/null +++ b/cmd/evm/internal/t8ntool/transition.go @@ -0,0 +1,447 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package t8ntool + +import ( + "crypto/ecdsa" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math/big" + "os" + "path" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/tracers/logger" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/tests" + "gopkg.in/urfave/cli.v1" +) + +const ( + ErrorEVM = 2 + ErrorConfig = 3 + ErrorMissingBlockhash = 4 + + ErrorJson = 10 + ErrorIO = 11 + ErrorRlp = 12 + + stdinSelector = "stdin" +) + +type NumberedError struct { + errorCode int + err error +} + +func NewError(errorCode int, err error) *NumberedError { + return &NumberedError{errorCode, err} +} + +func (n *NumberedError) Error() string { + return fmt.Sprintf("ERROR(%d): %v", n.errorCode, n.err.Error()) +} + +func (n *NumberedError) ExitCode() int { + return n.errorCode +} + +// compile-time conformance test +var ( + _ cli.ExitCoder = (*NumberedError)(nil) +) + +type input struct { + Alloc core.GenesisAlloc `json:"alloc,omitempty"` + Env *stEnv `json:"env,omitempty"` + Txs []*txWithKey `json:"txs,omitempty"` + TxRlp string `json:"txsRlp,omitempty"` +} + +func Transition(ctx *cli.Context) error { + // Configure the go-ethereum logger + glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) + glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name))) + log.Root().SetHandler(glogger) + + var ( + err error + tracer vm.EVMLogger + ) + var getTracer func(txIndex int, txHash common.Hash) (vm.EVMLogger, error) + + baseDir, err := createBasedir(ctx) + if err != nil { + return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err)) + } + if ctx.Bool(TraceFlag.Name) { + if ctx.IsSet(TraceDisableMemoryFlag.Name) && ctx.IsSet(TraceEnableMemoryFlag.Name) { + return NewError(ErrorConfig, fmt.Errorf("can't use both flags --%s and --%s", TraceDisableMemoryFlag.Name, TraceEnableMemoryFlag.Name)) + } + if ctx.IsSet(TraceDisableReturnDataFlag.Name) && ctx.IsSet(TraceEnableReturnDataFlag.Name) { + return NewError(ErrorConfig, fmt.Errorf("can't use both flags --%s and --%s", TraceDisableReturnDataFlag.Name, TraceEnableReturnDataFlag.Name)) + } + if ctx.IsSet(TraceDisableMemoryFlag.Name) { + log.Warn(fmt.Sprintf("--%s has been deprecated in favour of --%s", TraceDisableMemoryFlag.Name, TraceEnableMemoryFlag.Name)) + } + if ctx.IsSet(TraceDisableReturnDataFlag.Name) { + log.Warn(fmt.Sprintf("--%s has been deprecated in favour of --%s", TraceDisableReturnDataFlag.Name, TraceEnableReturnDataFlag.Name)) + } + // Configure the EVM logger + logConfig := &logger.Config{ + DisableStack: ctx.Bool(TraceDisableStackFlag.Name), + EnableMemory: !ctx.Bool(TraceDisableMemoryFlag.Name) || ctx.Bool(TraceEnableMemoryFlag.Name), + EnableReturnData: !ctx.Bool(TraceDisableReturnDataFlag.Name) || ctx.Bool(TraceEnableReturnDataFlag.Name), + Debug: true, + } + var prevFile *os.File + // This one closes the last file + defer func() { + if prevFile != nil { + prevFile.Close() + } + }() + getTracer = func(txIndex int, txHash common.Hash) (vm.EVMLogger, error) { + if prevFile != nil { + prevFile.Close() + } + traceFile, err := os.Create(path.Join(baseDir, fmt.Sprintf("trace-%d-%v.jsonl", txIndex, txHash.String()))) + if err != nil { + return nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err)) + } + prevFile = traceFile + return logger.NewJSONLogger(logConfig, traceFile), nil + } + } else { + getTracer = func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error) { + return nil, nil + } + } + // We need to load three things: alloc, env and transactions. May be either in + // stdin input or in files. + // Check if anything needs to be read from stdin + var ( + prestate Prestate + txs types.Transactions // txs to apply + allocStr = ctx.String(InputAllocFlag.Name) + + envStr = ctx.String(InputEnvFlag.Name) + txStr = ctx.String(InputTxsFlag.Name) + inputData = &input{} + ) + // Figure out the prestate alloc + if allocStr == stdinSelector || envStr == stdinSelector || txStr == stdinSelector { + decoder := json.NewDecoder(os.Stdin) + if err := decoder.Decode(inputData); err != nil { + return NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err)) + } + } + if allocStr != stdinSelector { + if err := readFile(allocStr, "alloc", &inputData.Alloc); err != nil { + return err + } + } + prestate.Pre = inputData.Alloc + + // Set the block environment + if envStr != stdinSelector { + var env stEnv + if err := readFile(envStr, "env", &env); err != nil { + return err + } + inputData.Env = &env + } + prestate.Env = *inputData.Env + + vmConfig := vm.Config{ + Tracer: tracer, + Debug: (tracer != nil), + } + // Construct the chainconfig + var chainConfig *params.ChainConfig + if cConf, extraEips, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil { + return NewError(ErrorConfig, fmt.Errorf("failed constructing chain configuration: %v", err)) + } else { + chainConfig = cConf + vmConfig.ExtraEips = extraEips + } + // Set the chain id + chainConfig.ChainID = big.NewInt(ctx.Int64(ChainIDFlag.Name)) + + var txsWithKeys []*txWithKey + if txStr != stdinSelector { + inFile, err := os.Open(txStr) + if err != nil { + return NewError(ErrorIO, fmt.Errorf("failed reading txs file: %v", err)) + } + defer inFile.Close() + decoder := json.NewDecoder(inFile) + if strings.HasSuffix(txStr, ".rlp") { + var body hexutil.Bytes + if err := decoder.Decode(&body); err != nil { + return err + } + var txs types.Transactions + if err := rlp.DecodeBytes(body, &txs); err != nil { + return err + } + for _, tx := range txs { + txsWithKeys = append(txsWithKeys, &txWithKey{ + key: nil, + tx: tx, + }) + } + } else { + if err := decoder.Decode(&txsWithKeys); err != nil { + return NewError(ErrorJson, fmt.Errorf("failed unmarshaling txs-file: %v", err)) + } + } + } else { + if len(inputData.TxRlp) > 0 { + // Decode the body of already signed transactions + body := common.FromHex(inputData.TxRlp) + var txs types.Transactions + if err := rlp.DecodeBytes(body, &txs); err != nil { + return err + } + for _, tx := range txs { + txsWithKeys = append(txsWithKeys, &txWithKey{ + key: nil, + tx: tx, + }) + } + } else { + // JSON encoded transactions + txsWithKeys = inputData.Txs + } + } + // We may have to sign the transactions. + signer := types.MakeSigner(chainConfig, big.NewInt(int64(prestate.Env.Number))) + + if txs, err = signUnsignedTransactions(txsWithKeys, signer); err != nil { + return NewError(ErrorJson, fmt.Errorf("failed signing transactions: %v", err)) + } + // Sanity check, to not `panic` in state_transition + if chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) { + if prestate.Env.BaseFee == nil { + return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section")) + } + } + // Sanity check, to not `panic` in state_transition + if prestate.Env.Random != nil && !chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) { + return NewError(ErrorConfig, errors.New("can only apply RANDOM on top of London chainrules")) + } + if env := prestate.Env; env.Difficulty == nil { + // If difficulty was not provided by caller, we need to calculate it. + switch { + case env.ParentDifficulty == nil: + return NewError(ErrorConfig, errors.New("currentDifficulty was not provided, and cannot be calculated due to missing parentDifficulty")) + case env.Number == 0: + return NewError(ErrorConfig, errors.New("currentDifficulty needs to be provided for block number 0")) + case env.Timestamp <= env.ParentTimestamp: + return NewError(ErrorConfig, fmt.Errorf("currentDifficulty cannot be calculated -- currentTime (%d) needs to be after parent time (%d)", + env.Timestamp, env.ParentTimestamp)) + } + prestate.Env.Difficulty = calcDifficulty(chainConfig, env.Number, env.Timestamp, + env.ParentTimestamp, env.ParentDifficulty, env.ParentUncleHash) + } + // Run the test and aggregate the result + s, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer) + if err != nil { + return err + } + body, _ := rlp.EncodeToBytes(txs) + // Dump the excution result + collector := make(Alloc) + s.DumpToCollector(collector, nil) + return dispatchOutput(ctx, baseDir, result, collector, body) +} + +// txWithKey is a helper-struct, to allow us to use the types.Transaction along with +// a `secretKey`-field, for input +type txWithKey struct { + key *ecdsa.PrivateKey + tx *types.Transaction + protected bool +} + +func (t *txWithKey) UnmarshalJSON(input []byte) error { + // Read the metadata, if present + type txMetadata struct { + Key *common.Hash `json:"secretKey"` + Protected *bool `json:"protected"` + } + var data txMetadata + if err := json.Unmarshal(input, &data); err != nil { + return err + } + if data.Key != nil { + k := data.Key.Hex()[2:] + if ecdsaKey, err := crypto.HexToECDSA(k); err != nil { + return err + } else { + t.key = ecdsaKey + } + } + if data.Protected != nil { + t.protected = *data.Protected + } else { + t.protected = true + } + // Now, read the transaction itself + var tx types.Transaction + if err := json.Unmarshal(input, &tx); err != nil { + return err + } + t.tx = &tx + return nil +} + +// signUnsignedTransactions converts the input txs to canonical transactions. +// +// The transactions can have two forms, either +// 1. unsigned or +// 2. signed +// For (1), r, s, v, need so be zero, and the `secretKey` needs to be set. +// If so, we sign it here and now, with the given `secretKey` +// If the condition above is not met, then it's considered a signed transaction. +// +// To manage this, we read the transactions twice, first trying to read the secretKeys, +// and secondly to read them with the standard tx json format +func signUnsignedTransactions(txs []*txWithKey, signer types.Signer) (types.Transactions, error) { + var signedTxs []*types.Transaction + for i, txWithKey := range txs { + tx := txWithKey.tx + key := txWithKey.key + v, r, s := tx.RawSignatureValues() + if key != nil && v.BitLen()+r.BitLen()+s.BitLen() == 0 { + // This transaction needs to be signed + var ( + signed *types.Transaction + err error + ) + if txWithKey.protected { + signed, err = types.SignTx(tx, signer, key) + } else { + signed, err = types.SignTx(tx, types.FrontierSigner{}, key) + } + if err != nil { + return nil, NewError(ErrorJson, fmt.Errorf("tx %d: failed to sign tx: %v", i, err)) + } + signedTxs = append(signedTxs, signed) + } else { + // Already signed + signedTxs = append(signedTxs, tx) + } + } + return signedTxs, nil +} + +type Alloc map[common.Address]core.GenesisAccount + +func (g Alloc) OnRoot(common.Hash) {} + +func (g Alloc) OnAccount(addr common.Address, dumpAccount state.DumpAccount) { + balance, _ := new(big.Int).SetString(dumpAccount.Balance, 10) + var storage map[common.Hash]common.Hash + if dumpAccount.Storage != nil { + storage = make(map[common.Hash]common.Hash) + for k, v := range dumpAccount.Storage { + storage[k] = common.HexToHash(v) + } + } + genesisAccount := core.GenesisAccount{ + Code: dumpAccount.Code, + Storage: storage, + Balance: balance, + Nonce: dumpAccount.Nonce, + } + g[addr] = genesisAccount +} + +// saveFile marshalls the object to the given file +func saveFile(baseDir, filename string, data interface{}) error { + b, err := json.MarshalIndent(data, "", " ") + if err != nil { + return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err)) + } + location := path.Join(baseDir, filename) + if err = ioutil.WriteFile(location, b, 0644); err != nil { + return NewError(ErrorIO, fmt.Errorf("failed writing output: %v", err)) + } + log.Info("Wrote file", "file", location) + return nil +} + +// dispatchOutput writes the output data to either stderr or stdout, or to the specified +// files +func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, alloc Alloc, body hexutil.Bytes) error { + stdOutObject := make(map[string]interface{}) + stdErrObject := make(map[string]interface{}) + dispatch := func(baseDir, fName, name string, obj interface{}) error { + switch fName { + case "stdout": + stdOutObject[name] = obj + case "stderr": + stdErrObject[name] = obj + case "": + // don't save + default: // save to file + if err := saveFile(baseDir, fName, obj); err != nil { + return err + } + } + return nil + } + if err := dispatch(baseDir, ctx.String(OutputAllocFlag.Name), "alloc", alloc); err != nil { + return err + } + if err := dispatch(baseDir, ctx.String(OutputResultFlag.Name), "result", result); err != nil { + return err + } + if err := dispatch(baseDir, ctx.String(OutputBodyFlag.Name), "body", body); err != nil { + return err + } + if len(stdOutObject) > 0 { + b, err := json.MarshalIndent(stdOutObject, "", " ") + if err != nil { + return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err)) + } + os.Stdout.Write(b) + os.Stdout.WriteString("\n") + } + if len(stdErrObject) > 0 { + b, err := json.MarshalIndent(stdErrObject, "", " ") + if err != nil { + return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err)) + } + os.Stderr.Write(b) + os.Stderr.WriteString("\n") + } + return nil +} diff --git a/cmd/evm/internal/t8ntool/utils.go b/cmd/evm/internal/t8ntool/utils.go new file mode 100644 index 000000000000..1c54f09bf417 --- /dev/null +++ b/cmd/evm/internal/t8ntool/utils.go @@ -0,0 +1,54 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package t8ntool + +import ( + "encoding/json" + "fmt" + "os" + + "gopkg.in/urfave/cli.v1" +) + +// readFile reads the json-data in the provided path and marshals into dest. +func readFile(path, desc string, dest interface{}) error { + inFile, err := os.Open(path) + if err != nil { + return NewError(ErrorIO, fmt.Errorf("failed reading %s file: %v", desc, err)) + } + defer inFile.Close() + decoder := json.NewDecoder(inFile) + if err := decoder.Decode(dest); err != nil { + return NewError(ErrorJson, fmt.Errorf("failed unmarshaling %s file: %v", desc, err)) + } + return nil +} + +// createBasedir makes sure the basedir exists, if user specified one. +func createBasedir(ctx *cli.Context) (string, error) { + baseDir := "" + if ctx.IsSet(OutputBasedir.Name) { + if base := ctx.String(OutputBasedir.Name); len(base) > 0 { + err := os.MkdirAll(base, 0755) // //rw-r--r-- + if err != nil { + return "", err + } + baseDir = base + } + } + return baseDir, nil +} diff --git a/cmd/evm/main.go b/cmd/evm/main.go index 72cb1ab852b1..2f404d48e903 100644 --- a/cmd/evm/main.go +++ b/cmd/evm/main.go @@ -22,7 +22,9 @@ import ( "math/big" "os" + "github.com/ethereum/go-ethereum/cmd/evm/internal/t8ntool" "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/internal/flags" "gopkg.in/urfave/cli.v1" ) @@ -30,7 +32,7 @@ var gitCommit = "" // Git SHA1 commit hash of the release (set via linker flags) var gitDate = "" var ( - app = utils.NewApp(gitCommit, gitDate, "the evm command line interface") + app = flags.NewApp(gitCommit, gitDate, "the evm command line interface") DebugFlag = cli.BoolFlag{ Name: "debug", @@ -111,7 +113,7 @@ var ( Name: "receiver", Usage: "The transaction receiver (execution context)", } - DisableMemoryFlag = cli.BoolFlag{ + DisableMemoryFlag = cli.BoolTFlag{ Name: "nomemory", Usage: "disable memory output", } @@ -119,13 +121,73 @@ var ( Name: "nostack", Usage: "disable stack output", } - EVMInterpreterFlag = cli.StringFlag{ - Name: "vm.evm", - Usage: "External EVM configuration (default = built-in interpreter)", - Value: "", + DisableStorageFlag = cli.BoolFlag{ + Name: "nostorage", + Usage: "disable storage output", + } + DisableReturnDataFlag = cli.BoolTFlag{ + Name: "noreturndata", + Usage: "enable return data output", } ) +var stateTransitionCommand = cli.Command{ + Name: "transition", + Aliases: []string{"t8n"}, + Usage: "executes a full state transition", + Action: t8ntool.Transition, + Flags: []cli.Flag{ + t8ntool.TraceFlag, + t8ntool.TraceDisableMemoryFlag, + t8ntool.TraceEnableMemoryFlag, + t8ntool.TraceDisableStackFlag, + t8ntool.TraceDisableReturnDataFlag, + t8ntool.TraceEnableReturnDataFlag, + t8ntool.OutputBasedir, + t8ntool.OutputAllocFlag, + t8ntool.OutputResultFlag, + t8ntool.OutputBodyFlag, + t8ntool.InputAllocFlag, + t8ntool.InputEnvFlag, + t8ntool.InputTxsFlag, + t8ntool.ForknameFlag, + t8ntool.ChainIDFlag, + t8ntool.RewardFlag, + t8ntool.VerbosityFlag, + }, +} +var transactionCommand = cli.Command{ + Name: "transaction", + Aliases: []string{"t9n"}, + Usage: "performs transaction validation", + Action: t8ntool.Transaction, + Flags: []cli.Flag{ + t8ntool.InputTxsFlag, + t8ntool.ChainIDFlag, + t8ntool.ForknameFlag, + t8ntool.VerbosityFlag, + }, +} + +var blockBuilderCommand = cli.Command{ + Name: "block-builder", + Aliases: []string{"b11r"}, + Usage: "builds a block", + Action: t8ntool.BuildBlock, + Flags: []cli.Flag{ + t8ntool.OutputBasedir, + t8ntool.OutputBlockFlag, + t8ntool.InputHeaderFlag, + t8ntool.InputOmmersFlag, + t8ntool.InputTxsRlpFlag, + t8ntool.SealCliqueFlag, + t8ntool.SealEthashFlag, + t8ntool.SealEthashDirFlag, + t8ntool.SealEthashModeFlag, + t8ntool.VerbosityFlag, + }, +} + func init() { app.Flags = []cli.Flag{ BenchFlag, @@ -149,20 +211,28 @@ func init() { ReceiverFlag, DisableMemoryFlag, DisableStackFlag, - EVMInterpreterFlag, + DisableStorageFlag, + DisableReturnDataFlag, } app.Commands = []cli.Command{ compileCommand, disasmCommand, runCommand, stateTestCommand, + stateTransitionCommand, + transactionCommand, + blockBuilderCommand, } - cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate + cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate } func main() { if err := app.Run(os.Args); err != nil { + code := 1 + if ec, ok := err.(*t8ntool.NumberedError); ok { + code = ec.ExitCode() + } fmt.Fprintln(os.Stderr, err) - os.Exit(1) + os.Exit(code) } } diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index da301ff5ee5e..889de43e0add 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -36,9 +36,10 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm/runtime" + "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) var runCommand = cli.Command{ @@ -70,14 +71,13 @@ func readGenesis(genesisPath string) *core.Genesis { return genesis } -func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) ([]byte, uint64, time.Duration, error) { - var ( - output []byte - gasLeft uint64 - execTime time.Duration - err error - ) +type execStats struct { + time time.Duration // The execution time. + allocs int64 // The number of heap allocations during execution. + bytesAllocated int64 // The cumulative number of bytes allocated during execution. +} +func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) (output []byte, gasLeft uint64, stats execStats, err error) { if bench { result := testing.Benchmark(func(b *testing.B) { for i := 0; i < b.N; i++ { @@ -87,29 +87,38 @@ func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) ([]byte, uin // Get the average execution time from the benchmarking result. // There are other useful stats here that could be reported. - execTime = time.Duration(result.NsPerOp()) + stats.time = time.Duration(result.NsPerOp()) + stats.allocs = result.AllocsPerOp() + stats.bytesAllocated = result.AllocedBytesPerOp() } else { + var memStatsBefore, memStatsAfter goruntime.MemStats + goruntime.ReadMemStats(&memStatsBefore) startTime := time.Now() output, gasLeft, err = execFunc() - execTime = time.Since(startTime) + stats.time = time.Since(startTime) + goruntime.ReadMemStats(&memStatsAfter) + stats.allocs = int64(memStatsAfter.Mallocs - memStatsBefore.Mallocs) + stats.bytesAllocated = int64(memStatsAfter.TotalAlloc - memStatsBefore.TotalAlloc) } - return output, gasLeft, execTime, err + return output, gasLeft, stats, err } func runCmd(ctx *cli.Context) error { glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name))) log.Root().SetHandler(glogger) - logconfig := &vm.LogConfig{ - DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name), - DisableStack: ctx.GlobalBool(DisableStackFlag.Name), - Debug: ctx.GlobalBool(DebugFlag.Name), + logconfig := &logger.Config{ + EnableMemory: !ctx.GlobalBool(DisableMemoryFlag.Name), + DisableStack: ctx.GlobalBool(DisableStackFlag.Name), + DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name), + EnableReturnData: !ctx.GlobalBool(DisableReturnDataFlag.Name), + Debug: ctx.GlobalBool(DebugFlag.Name), } var ( - tracer vm.Tracer - debugLogger *vm.StructLogger + tracer vm.EVMLogger + debugLogger *logger.StructLogger statedb *state.StateDB chainConfig *params.ChainConfig sender = common.BytesToAddress([]byte("sender")) @@ -117,22 +126,22 @@ func runCmd(ctx *cli.Context) error { genesisConfig *core.Genesis ) if ctx.GlobalBool(MachineFlag.Name) { - tracer = vm.NewJSONLogger(logconfig, os.Stdout) + tracer = logger.NewJSONLogger(logconfig, os.Stdout) } else if ctx.GlobalBool(DebugFlag.Name) { - debugLogger = vm.NewStructLogger(logconfig) + debugLogger = logger.NewStructLogger(logconfig) tracer = debugLogger } else { - debugLogger = vm.NewStructLogger(logconfig) + debugLogger = logger.NewStructLogger(logconfig) } if ctx.GlobalString(GenesisFlag.Name) != "" { gen := readGenesis(ctx.GlobalString(GenesisFlag.Name)) genesisConfig = gen db := rawdb.NewMemoryDatabase() genesis := gen.ToBlock(db) - statedb, _ = state.New(genesis.Root(), state.NewDatabase(db)) + statedb, _ = state.New(genesis.Root(), state.NewDatabase(db), nil) chainConfig = gen.Config } else { - statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) + statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) genesisConfig = new(core.Genesis) } if ctx.GlobalString(SenderFlag.Name) != "" { @@ -203,9 +212,8 @@ func runCmd(ctx *cli.Context) error { Coinbase: genesisConfig.Coinbase, BlockNumber: new(big.Int).SetUint64(genesisConfig.Number), EVMConfig: vm.Config{ - Tracer: tracer, - Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name), - EVMInterpreter: ctx.GlobalString(EVMInterpreterFlag.Name), + Tracer: tracer, + Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name), }, } @@ -256,12 +264,13 @@ func runCmd(ctx *cli.Context) error { } } - output, leftOverGas, execTime, err := timedExec(ctx.GlobalBool(BenchFlag.Name), execFunc) + bench := ctx.GlobalBool(BenchFlag.Name) + output, leftOverGas, stats, err := timedExec(bench, execFunc) if ctx.GlobalBool(DumpFlag.Name) { statedb.Commit(true) statedb.IntermediateRoot(true) - fmt.Println(string(statedb.Dump(false, false, true))) + fmt.Println(string(statedb.Dump(nil))) } if memProfilePath := ctx.GlobalString(MemProfileFlag.Name); memProfilePath != "" { @@ -280,23 +289,18 @@ func runCmd(ctx *cli.Context) error { if ctx.GlobalBool(DebugFlag.Name) { if debugLogger != nil { fmt.Fprintln(os.Stderr, "#### TRACE ####") - vm.WriteTrace(os.Stderr, debugLogger.StructLogs()) + logger.WriteTrace(os.Stderr, debugLogger.StructLogs()) } fmt.Fprintln(os.Stderr, "#### LOGS ####") - vm.WriteLogs(os.Stderr, statedb.Logs()) + logger.WriteLogs(os.Stderr, statedb.Logs()) } - if ctx.GlobalBool(StatDumpFlag.Name) { - var mem goruntime.MemStats - goruntime.ReadMemStats(&mem) - fmt.Fprintf(os.Stderr, `evm execution time: %v -heap objects: %d -allocations: %d -total allocations: %d -GC calls: %d -Gas used: %d - -`, execTime, mem.HeapObjects, mem.Alloc, mem.TotalAlloc, mem.NumGC, initialGas-leftOverGas) + if bench || ctx.GlobalBool(StatDumpFlag.Name) { + fmt.Fprintf(os.Stderr, `EVM gas used: %d +execution time: %v +allocations: %d +allocated bytes: %d +`, initialGas-leftOverGas, stats.time, stats.allocs, stats.bytesAllocated) } if tracer == nil { fmt.Printf("0x%x\n", output) diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go index cef2aedb5e68..90596d9b3c60 100644 --- a/cmd/evm/staterunner.go +++ b/cmd/evm/staterunner.go @@ -25,10 +25,11 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/tests" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) var stateTestCommand = cli.Command{ @@ -58,24 +59,26 @@ func stateTestCmd(ctx *cli.Context) error { log.Root().SetHandler(glogger) // Configure the EVM logger - config := &vm.LogConfig{ - DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name), - DisableStack: ctx.GlobalBool(DisableStackFlag.Name), + config := &logger.Config{ + EnableMemory: !ctx.GlobalBool(DisableMemoryFlag.Name), + DisableStack: ctx.GlobalBool(DisableStackFlag.Name), + DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name), + EnableReturnData: !ctx.GlobalBool(DisableReturnDataFlag.Name), } var ( - tracer vm.Tracer - debugger *vm.StructLogger + tracer vm.EVMLogger + debugger *logger.StructLogger ) switch { case ctx.GlobalBool(MachineFlag.Name): - tracer = vm.NewJSONLogger(config, os.Stderr) + tracer = logger.NewJSONLogger(config, os.Stderr) case ctx.GlobalBool(DebugFlag.Name): - debugger = vm.NewStructLogger(config) + debugger = logger.NewStructLogger(config) tracer = debugger default: - debugger = vm.NewStructLogger(config) + debugger = logger.NewStructLogger(config) } // Load the test content from the input file src, err := ioutil.ReadFile(ctx.Args().First()) @@ -96,16 +99,16 @@ func stateTestCmd(ctx *cli.Context) error { for _, st := range test.Subtests() { // Run the test and aggregate the result result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true} - state, err := test.Run(st, cfg) + _, s, err := test.Run(st, cfg, false) // print state root for evmlab tracing - if ctx.GlobalBool(MachineFlag.Name) && state != nil { - fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false)) + if ctx.GlobalBool(MachineFlag.Name) && s != nil { + fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", s.IntermediateRoot(false)) } if err != nil { // Test failed, mark as so and dump any state to aid debugging result.Pass, result.Error = false, err.Error() - if ctx.GlobalBool(DumpFlag.Name) && state != nil { - dump := state.RawDump(false, false, true) + if ctx.GlobalBool(DumpFlag.Name) && s != nil { + dump := s.RawDump(nil) result.State = &dump } } @@ -116,7 +119,7 @@ func stateTestCmd(ctx *cli.Context) error { if ctx.GlobalBool(DebugFlag.Name) { if debugger != nil { fmt.Fprintln(os.Stderr, "#### TRACE ####") - vm.WriteTrace(os.Stderr, debugger.StructLogs()) + logger.WriteTrace(os.Stderr, debugger.StructLogs()) } } } diff --git a/cmd/evm/t8n_test.go b/cmd/evm/t8n_test.go new file mode 100644 index 000000000000..3f0bd3185f1e --- /dev/null +++ b/cmd/evm/t8n_test.go @@ -0,0 +1,477 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/pkg/reexec" + "github.com/ethereum/go-ethereum/cmd/evm/internal/t8ntool" + "github.com/ethereum/go-ethereum/internal/cmdtest" +) + +func TestMain(m *testing.M) { + // Run the app if we've been exec'd as "ethkey-test" in runEthkey. + reexec.Register("evm-test", func() { + if err := app.Run(os.Args); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + os.Exit(0) + }) + // check if we have been reexec'd + if reexec.Init() { + return + } + os.Exit(m.Run()) +} + +type testT8n struct { + *cmdtest.TestCmd +} + +type t8nInput struct { + inAlloc string + inTxs string + inEnv string + stFork string + stReward string +} + +func (args *t8nInput) get(base string) []string { + var out []string + if opt := args.inAlloc; opt != "" { + out = append(out, "--input.alloc") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if opt := args.inTxs; opt != "" { + out = append(out, "--input.txs") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if opt := args.inEnv; opt != "" { + out = append(out, "--input.env") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if opt := args.stFork; opt != "" { + out = append(out, "--state.fork", opt) + } + if opt := args.stReward; opt != "" { + out = append(out, "--state.reward", opt) + } + return out +} + +type t8nOutput struct { + alloc bool + result bool + body bool +} + +func (args *t8nOutput) get() (out []string) { + if args.body { + out = append(out, "--output.body", "stdout") + } else { + out = append(out, "--output.body", "") // empty means ignore + } + if args.result { + out = append(out, "--output.result", "stdout") + } else { + out = append(out, "--output.result", "") + } + if args.alloc { + out = append(out, "--output.alloc", "stdout") + } else { + out = append(out, "--output.alloc", "") + } + return out +} + +func TestT8n(t *testing.T) { + tt := new(testT8n) + tt.TestCmd = cmdtest.NewTestCmd(t, tt) + for i, tc := range []struct { + base string + input t8nInput + output t8nOutput + expExitCode int + expOut string + }{ + { // Test exit (3) on bad config + base: "./testdata/1", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Frontier+1346", "", + }, + output: t8nOutput{alloc: true, result: true}, + expExitCode: 3, + }, + { + base: "./testdata/1", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Byzantium", "", + }, + output: t8nOutput{alloc: true, result: true}, + expOut: "exp.json", + }, + { // blockhash test + base: "./testdata/3", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Berlin", "", + }, + output: t8nOutput{alloc: true, result: true}, + expOut: "exp.json", + }, + { // missing blockhash test + base: "./testdata/4", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Berlin", "", + }, + output: t8nOutput{alloc: true, result: true}, + expExitCode: 4, + }, + { // Uncle test + base: "./testdata/5", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Byzantium", "0x80", + }, + output: t8nOutput{alloc: true, result: true}, + expOut: "exp.json", + }, + { // Sign json transactions + base: "./testdata/13", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "London", "", + }, + output: t8nOutput{body: true}, + expOut: "exp.json", + }, + { // Already signed transactions + base: "./testdata/13", + input: t8nInput{ + "alloc.json", "signed_txs.rlp", "env.json", "London", "", + }, + output: t8nOutput{result: true}, + expOut: "exp2.json", + }, + { // Difficulty calculation - no uncles + base: "./testdata/14", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "London", "", + }, + output: t8nOutput{result: true}, + expOut: "exp.json", + }, + { // Difficulty calculation - with uncles + base: "./testdata/14", + input: t8nInput{ + "alloc.json", "txs.json", "env.uncles.json", "London", "", + }, + output: t8nOutput{result: true}, + expOut: "exp2.json", + }, + { // Difficulty calculation - with ommers + Berlin + base: "./testdata/14", + input: t8nInput{ + "alloc.json", "txs.json", "env.uncles.json", "Berlin", "", + }, + output: t8nOutput{result: true}, + expOut: "exp_berlin.json", + }, + { // Difficulty calculation on arrow glacier + base: "./testdata/19", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "London", "", + }, + output: t8nOutput{result: true}, + expOut: "exp_london.json", + }, + { // Difficulty calculation on arrow glacier + base: "./testdata/19", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "ArrowGlacier", "", + }, + output: t8nOutput{result: true}, + expOut: "exp_arrowglacier.json", + }, + { // Sign unprotected (pre-EIP155) transaction + base: "./testdata/23", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Berlin", "", + }, + output: t8nOutput{result: true}, + expOut: "exp.json", + }, + } { + + args := []string{"t8n"} + args = append(args, tc.output.get()...) + args = append(args, tc.input.get(tc.base)...) + var qArgs []string // quoted args for debugging purposes + for _, arg := range args { + if len(arg) == 0 { + qArgs = append(qArgs, `""`) + } else { + qArgs = append(qArgs, arg) + } + } + tt.Logf("args: %v\n", strings.Join(qArgs, " ")) + tt.Run("evm-test", args...) + // Compare the expected output, if provided + if tc.expOut != "" { + want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut)) + if err != nil { + t.Fatalf("test %d: could not read expected output: %v", i, err) + } + have := tt.Output() + ok, err := cmpJson(have, want) + switch { + case err != nil: + t.Fatalf("test %d, json parsing failed: %v", i, err) + case !ok: + t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want)) + } + } + tt.WaitExit() + if have, want := tt.ExitStatus(), tc.expExitCode; have != want { + t.Fatalf("test %d: wrong exit code, have %d, want %d", i, have, want) + } + } +} + +type t9nInput struct { + inTxs string + stFork string +} + +func (args *t9nInput) get(base string) []string { + var out []string + if opt := args.inTxs; opt != "" { + out = append(out, "--input.txs") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if opt := args.stFork; opt != "" { + out = append(out, "--state.fork", opt) + } + return out +} + +func TestT9n(t *testing.T) { + tt := new(testT8n) + tt.TestCmd = cmdtest.NewTestCmd(t, tt) + for i, tc := range []struct { + base string + input t9nInput + expExitCode int + expOut string + }{ + { // London txs on homestead + base: "./testdata/15", + input: t9nInput{ + inTxs: "signed_txs.rlp", + stFork: "Homestead", + }, + expOut: "exp.json", + }, + { // London txs on London + base: "./testdata/15", + input: t9nInput{ + inTxs: "signed_txs.rlp", + stFork: "London", + }, + expOut: "exp2.json", + }, + { // An RLP list (a blockheader really) + base: "./testdata/15", + input: t9nInput{ + inTxs: "blockheader.rlp", + stFork: "London", + }, + expOut: "exp3.json", + }, + { // Transactions with too low gas + base: "./testdata/16", + input: t9nInput{ + inTxs: "signed_txs.rlp", + stFork: "London", + }, + expOut: "exp.json", + }, + { // Transactions with value exceeding 256 bits + base: "./testdata/17", + input: t9nInput{ + inTxs: "signed_txs.rlp", + stFork: "London", + }, + expOut: "exp.json", + }, + { // Invalid RLP + base: "./testdata/18", + input: t9nInput{ + inTxs: "invalid.rlp", + stFork: "London", + }, + expExitCode: t8ntool.ErrorIO, + }, + } { + + args := []string{"t9n"} + args = append(args, tc.input.get(tc.base)...) + + tt.Run("evm-test", args...) + tt.Logf("args:\n go run . %v\n", strings.Join(args, " ")) + // Compare the expected output, if provided + if tc.expOut != "" { + want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut)) + if err != nil { + t.Fatalf("test %d: could not read expected output: %v", i, err) + } + have := tt.Output() + ok, err := cmpJson(have, want) + switch { + case err != nil: + t.Logf(string(have)) + t.Fatalf("test %d, json parsing failed: %v", i, err) + case !ok: + t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want)) + } + } + tt.WaitExit() + if have, want := tt.ExitStatus(), tc.expExitCode; have != want { + t.Fatalf("test %d: wrong exit code, have %d, want %d", i, have, want) + } + } +} + +type b11rInput struct { + inEnv string + inOmmersRlp string + inTxsRlp string + inClique string + ethash bool + ethashMode string + ethashDir string +} + +func (args *b11rInput) get(base string) []string { + var out []string + if opt := args.inEnv; opt != "" { + out = append(out, "--input.header") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if opt := args.inOmmersRlp; opt != "" { + out = append(out, "--input.ommers") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if opt := args.inTxsRlp; opt != "" { + out = append(out, "--input.txs") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if opt := args.inClique; opt != "" { + out = append(out, "--seal.clique") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if args.ethash { + out = append(out, "--seal.ethash") + } + if opt := args.ethashMode; opt != "" { + out = append(out, "--seal.ethash.mode") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if opt := args.ethashDir; opt != "" { + out = append(out, "--seal.ethash.dir") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + out = append(out, "--output.block") + out = append(out, "stdout") + return out +} + +func TestB11r(t *testing.T) { + tt := new(testT8n) + tt.TestCmd = cmdtest.NewTestCmd(t, tt) + for i, tc := range []struct { + base string + input b11rInput + expExitCode int + expOut string + }{ + { // unsealed block + base: "./testdata/20", + input: b11rInput{ + inEnv: "header.json", + inOmmersRlp: "ommers.json", + inTxsRlp: "txs.rlp", + }, + expOut: "exp.json", + }, + { // ethash test seal + base: "./testdata/21", + input: b11rInput{ + inEnv: "header.json", + inOmmersRlp: "ommers.json", + inTxsRlp: "txs.rlp", + }, + expOut: "exp.json", + }, + { // clique test seal + base: "./testdata/21", + input: b11rInput{ + inEnv: "header.json", + inOmmersRlp: "ommers.json", + inTxsRlp: "txs.rlp", + inClique: "clique.json", + }, + expOut: "exp-clique.json", + }, + { // block with ommers + base: "./testdata/22", + input: b11rInput{ + inEnv: "header.json", + inOmmersRlp: "ommers.json", + inTxsRlp: "txs.rlp", + }, + expOut: "exp.json", + }, + } { + + args := []string{"b11r"} + args = append(args, tc.input.get(tc.base)...) + + tt.Run("evm-test", args...) + tt.Logf("args:\n go run . %v\n", strings.Join(args, " ")) + // Compare the expected output, if provided + if tc.expOut != "" { + want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut)) + if err != nil { + t.Fatalf("test %d: could not read expected output: %v", i, err) + } + have := tt.Output() + ok, err := cmpJson(have, want) + switch { + case err != nil: + t.Logf(string(have)) + t.Fatalf("test %d, json parsing failed: %v", i, err) + case !ok: + t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want)) + } + } + tt.WaitExit() + if have, want := tt.ExitStatus(), tc.expExitCode; have != want { + t.Fatalf("test %d: wrong exit code, have %d, want %d", i, have, want) + } + } +} + +// cmpJson compares the JSON in two byte slices. +func cmpJson(a, b []byte) (bool, error) { + var j, j2 interface{} + if err := json.Unmarshal(a, &j); err != nil { + return false, err + } + if err := json.Unmarshal(b, &j2); err != nil { + return false, err + } + return reflect.DeepEqual(j2, j), nil +} diff --git a/cmd/evm/testdata/1/alloc.json b/cmd/evm/testdata/1/alloc.json new file mode 100644 index 000000000000..cef1a25ff013 --- /dev/null +++ b/cmd/evm/testdata/1/alloc.json @@ -0,0 +1,12 @@ +{ + "a94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "code": "0x", + "nonce": "0xac", + "storage": {} + }, + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192":{ + "balance": "0xfeedbead", + "nonce" : "0x00" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/1/env.json b/cmd/evm/testdata/1/env.json new file mode 100644 index 000000000000..dd60abd205ac --- /dev/null +++ b/cmd/evm/testdata/1/env.json @@ -0,0 +1,7 @@ +{ + "currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "currentDifficulty": "0x20000", + "currentGasLimit": "0x750a163df65e8a", + "currentNumber": "1", + "currentTimestamp": "1000" +} \ No newline at end of file diff --git a/cmd/evm/testdata/1/exp.json b/cmd/evm/testdata/1/exp.json new file mode 100644 index 000000000000..7d3805012d05 --- /dev/null +++ b/cmd/evm/testdata/1/exp.json @@ -0,0 +1,44 @@ +{ + "alloc": { + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { + "balance": "0xfeed1a9d", + "nonce": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "nonce": "0xac" + }, + "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0xa410" + } + }, + "result": { + "stateRoot": "0x84208a19bc2b46ada7445180c1db162be5b39b9abc8c0a54b05d32943eae4e13", + "txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d", + "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x5208", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x5208", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0" + } + ], + "rejected": [ + { + "index": 1, + "error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" + } + ], + "currentDifficulty": "0x20000", + "gasUsed": "0x5208" + } +} diff --git a/cmd/evm/testdata/1/txs.json b/cmd/evm/testdata/1/txs.json new file mode 100644 index 000000000000..50b31ff31bcb --- /dev/null +++ b/cmd/evm/testdata/1/txs.json @@ -0,0 +1,26 @@ +[ + { + "gas": "0x5208", + "gasPrice": "0x2", + "hash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673", + "input": "0x", + "nonce": "0x0", + "r": "0x9500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdb", + "s": "0x7235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600", + "to": "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192", + "v": "0x1b", + "value": "0x1" + }, + { + "gas": "0x5208", + "gasPrice": "0x2", + "hash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673", + "input": "0x", + "nonce": "0x0", + "r": "0x9500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdb", + "s": "0x7235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600", + "to": "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192", + "v": "0x1b", + "value": "0x1" + } +] diff --git a/cmd/evm/testdata/10/alloc.json b/cmd/evm/testdata/10/alloc.json new file mode 100644 index 000000000000..6e98e7513c45 --- /dev/null +++ b/cmd/evm/testdata/10/alloc.json @@ -0,0 +1,23 @@ +{ + "0x1111111111111111111111111111111111111111" : { + "balance" : "0x010000000000", + "code" : "0xfe", + "nonce" : "0x01", + "storage" : { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x010000000000", + "code" : "0x", + "nonce" : "0x01", + "storage" : { + } + }, + "0xd02d72e067e77158444ef2020ff2d325f929b363" : { + "balance" : "0x01000000000000", + "code" : "0x", + "nonce" : "0x01", + "storage" : { + } + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/10/env.json b/cmd/evm/testdata/10/env.json new file mode 100644 index 000000000000..3a82d46a774b --- /dev/null +++ b/cmd/evm/testdata/10/env.json @@ -0,0 +1,12 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentNumber" : "0x01", + "currentTimestamp" : "0x079e", + "previousHash" : "0xcb23ee65a163121f640673b41788ee94633941405f95009999b502eedfbbfd4f", + "currentGasLimit" : "0x40000000", + "currentBaseFee" : "0x036b", + "blockHashes" : { + "0" : "0xcb23ee65a163121f640673b41788ee94633941405f95009999b502eedfbbfd4f" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/10/readme.md b/cmd/evm/testdata/10/readme.md new file mode 100644 index 000000000000..c34be80bb71c --- /dev/null +++ b/cmd/evm/testdata/10/readme.md @@ -0,0 +1,79 @@ +## EIP-1559 testing + +This test contains testcases for EIP-1559, which were reported by Ori as misbehaving. + +``` +[user@work evm]$ dir=./testdata/10 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout 2>&1 +INFO [05-09|22:11:59.436] rejected tx index=3 hash=db07bf..ede1e8 from=0xd02d72E067e77158444ef2020Ff2d325f929B363 error="gas limit reached" +``` +Output: +```json +{ + "alloc": { + "0x1111111111111111111111111111111111111111": { + "code": "0xfe", + "balance": "0x10000000000", + "nonce": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x10000000000", + "nonce": "0x1" + }, + "0xd02d72e067e77158444ef2020ff2d325f929b363": { + "balance": "0xff5beffffc95", + "nonce": "0x4" + } + }, + "result": { + "stateRoot": "0xf91a7ec08e4bfea88719aab34deabb000c86902360532b52afa9599d41f2bb8b", + "txRoot": "0xda925f2306a52fa24c15d5cd212d736ee016415fd8dd0c45fd368de7917d64bb", + "receiptRoot": "0x439a25f7fc424c10fb1f89800e4aa1df74156b137239d9ac3eaa7c911c353cd5", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x10000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x88980f6efcc5358d9c359663e7b9414722d430497637340ea056b076bc206701", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000001", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0" + }, + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x20000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0xd7bf3886f4e2aef74d525ae072c680f3846f550254401b67cbfda4a233757582", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000000", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x1" + }, + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x30000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x50308296760f01f1eeec7500e9e73cad67469249b1f59e9a9f55e6625a4923db", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000000", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x2" + } + ], + "rejected": [ + 3 + ] + } +} +``` diff --git a/cmd/evm/testdata/10/txs.json b/cmd/evm/testdata/10/txs.json new file mode 100644 index 000000000000..f7c9baa26da9 --- /dev/null +++ b/cmd/evm/testdata/10/txs.json @@ -0,0 +1,70 @@ +[ + { + "input" : "0x", + "gas" : "0x10000001", + "nonce" : "0x1", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x7a45f00bcde9036b026cdf1628b023cd8a31a95c62b5e4dbbee2fa7debe668fb", + "s" : "0x3cc9d6f2cd00a045b0263f2d6dad7d60938d5d13d061af4969f95928aa934d4a", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [ + ] + }, + { + "input" : "0x", + "gas" : "0x10000000", + "nonce" : "0x2", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x4c564b94b0281a8210eeec2dd1fe2e16ff1c1903a8c3a1078d735d7f8208b2af", + "s" : "0x56432b2593e6de95db1cb997b7385217aca03f1615327e231734446b39f266d", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [ + ] + }, + { + "input" : "0x", + "gas" : "0x10000000", + "nonce" : "0x3", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x2ed2ef52f924f59d4a21e1f2a50d3b1109303ce5e32334a7ece9b46f4fbc2a57", + "s" : "0x2980257129cbd3da987226f323d50ba3975a834d165e0681f991b75615605c44", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [ + ] + }, + { + "input" : "0x", + "gas" : "0x10000000", + "nonce" : "0x4", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x5df7d7f8f8e15b36fc9f189cacb625040fad10398d08fc90812595922a2c49b2", + "s" : "0x565fc1803f77a84d754ffe3c5363ab54a8d93a06ea1bb9d4c73c73a282b35917", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [ + ] + } +] \ No newline at end of file diff --git a/cmd/evm/testdata/11/alloc.json b/cmd/evm/testdata/11/alloc.json new file mode 100644 index 000000000000..86938230fa75 --- /dev/null +++ b/cmd/evm/testdata/11/alloc.json @@ -0,0 +1,25 @@ +{ + "0x0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x61ffff5060046000f3", + "nonce" : "0x01", + "storage" : { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + "0x00" : "0x00" + } + }, + "0xb94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x00", + "code" : "0x6001600055", + "nonce" : "0x00", + "storage" : { + } + } +} + diff --git a/cmd/evm/testdata/11/env.json b/cmd/evm/testdata/11/env.json new file mode 100644 index 000000000000..37dedf09475a --- /dev/null +++ b/cmd/evm/testdata/11/env.json @@ -0,0 +1,12 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentNumber" : "0x01", + "currentTimestamp" : "0x03e8", + "previousHash" : "0xfda4419b3660e99f37e536dae1ab081c180136bb38c837a93e93d9aab58553b2", + "currentGasLimit" : "0x0f4240", + "blockHashes" : { + "0" : "0xfda4419b3660e99f37e536dae1ab081c180136bb38c837a93e93d9aab58553b2" + } +} + diff --git a/cmd/evm/testdata/11/readme.md b/cmd/evm/testdata/11/readme.md new file mode 100644 index 000000000000..d499f8e99fae --- /dev/null +++ b/cmd/evm/testdata/11/readme.md @@ -0,0 +1,13 @@ +## Test missing basefee + +In this test, the `currentBaseFee` is missing from the env portion. +On a live blockchain, the basefee is present in the header, and verified as part of header validation. + +In `evm t8n`, we don't have blocks, so it needs to be added in the `env`instead. + +When it's missing, an error is expected. + +``` +dir=./testdata/11 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout 2>&1>/dev/null +ERROR(3): EIP-1559 config but missing 'currentBaseFee' in env section +``` \ No newline at end of file diff --git a/cmd/evm/testdata/11/txs.json b/cmd/evm/testdata/11/txs.json new file mode 100644 index 000000000000..c54b0a1f5b4d --- /dev/null +++ b/cmd/evm/testdata/11/txs.json @@ -0,0 +1,14 @@ +[ + { + "input" : "0x38600060013960015160005560006000f3", + "gas" : "0x61a80", + "gasPrice" : "0x1", + "nonce" : "0x0", + "value" : "0x186a0", + "v" : "0x1c", + "r" : "0x2e1391fd903387f1cc2b51df083805fb4bbb0d4710a2cdf4a044d191ff7be63e", + "s" : "0x7f10a933c42ab74927db02b1db009e923d9d2ab24ac24d63c399f2fe5d9c9b22", + "secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + } +] + diff --git a/cmd/evm/testdata/12/alloc.json b/cmd/evm/testdata/12/alloc.json new file mode 100644 index 000000000000..3ed96894fbca --- /dev/null +++ b/cmd/evm/testdata/12/alloc.json @@ -0,0 +1,11 @@ +{ + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "84000000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + "0x00" : "0x00" + } + } +} + diff --git a/cmd/evm/testdata/12/env.json b/cmd/evm/testdata/12/env.json new file mode 100644 index 000000000000..8ae5465369cc --- /dev/null +++ b/cmd/evm/testdata/12/env.json @@ -0,0 +1,10 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentNumber" : "0x01", + "currentTimestamp" : "0x03e8", + "previousHash" : "0xfda4419b3660e99f37e536dae1ab081c180136bb38c837a93e93d9aab58553b2", + "currentGasLimit" : "0x0f4240", + "currentBaseFee" : "0x20" +} + diff --git a/cmd/evm/testdata/12/readme.md b/cmd/evm/testdata/12/readme.md new file mode 100644 index 000000000000..b0177ecc24b6 --- /dev/null +++ b/cmd/evm/testdata/12/readme.md @@ -0,0 +1,40 @@ +## Test 1559 balance + gasCap + +This test contains an EIP-1559 consensus issue which happened on Ropsten, where +`geth` did not properly account for the value transfer while doing the check on `max_fee_per_gas * gas_limit`. + +Before the issue was fixed, this invocation allowed the transaction to pass into a block: +``` +dir=./testdata/12 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout +``` + +With the fix applied, the result is: +``` +dir=./testdata/12 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout +INFO [07-21|19:03:50.276] rejected tx index=0 hash=ccc996..d83435 from=0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B error="insufficient funds for gas * price + value: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B have 84000000 want 84000032" +INFO [07-21|19:03:50.276] Trie dumping started root=e05f81..6597a5 +INFO [07-21|19:03:50.276] Trie dumping complete accounts=1 elapsed="39.549µs" +{ + "alloc": { + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x501bd00" + } + }, + "result": { + "stateRoot": "0xe05f81f8244a76503ceec6f88abfcd03047a612a1001217f37d30984536597a5", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [], + "rejected": [ + { + "index": 0, + "error": "insufficient funds for gas * price + value: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B have 84000000 want 84000032" + } + ] + } +} +``` + +The transaction is rejected. \ No newline at end of file diff --git a/cmd/evm/testdata/12/txs.json b/cmd/evm/testdata/12/txs.json new file mode 100644 index 000000000000..cd683f271c72 --- /dev/null +++ b/cmd/evm/testdata/12/txs.json @@ -0,0 +1,20 @@ +[ + { + "input" : "0x", + "gas" : "0x5208", + "nonce" : "0x0", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x20", + "v" : "0x0", + "r" : "0x0", + "s" : "0x0", + "secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x20", + "accessList" : [ + ] + } +] + diff --git a/cmd/evm/testdata/13/alloc.json b/cmd/evm/testdata/13/alloc.json new file mode 100644 index 000000000000..6e98e7513c45 --- /dev/null +++ b/cmd/evm/testdata/13/alloc.json @@ -0,0 +1,23 @@ +{ + "0x1111111111111111111111111111111111111111" : { + "balance" : "0x010000000000", + "code" : "0xfe", + "nonce" : "0x01", + "storage" : { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x010000000000", + "code" : "0x", + "nonce" : "0x01", + "storage" : { + } + }, + "0xd02d72e067e77158444ef2020ff2d325f929b363" : { + "balance" : "0x01000000000000", + "code" : "0x", + "nonce" : "0x01", + "storage" : { + } + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/13/env.json b/cmd/evm/testdata/13/env.json new file mode 100644 index 000000000000..3a82d46a774b --- /dev/null +++ b/cmd/evm/testdata/13/env.json @@ -0,0 +1,12 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentNumber" : "0x01", + "currentTimestamp" : "0x079e", + "previousHash" : "0xcb23ee65a163121f640673b41788ee94633941405f95009999b502eedfbbfd4f", + "currentGasLimit" : "0x40000000", + "currentBaseFee" : "0x036b", + "blockHashes" : { + "0" : "0xcb23ee65a163121f640673b41788ee94633941405f95009999b502eedfbbfd4f" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/13/exp.json b/cmd/evm/testdata/13/exp.json new file mode 100644 index 000000000000..2b049dfb2907 --- /dev/null +++ b/cmd/evm/testdata/13/exp.json @@ -0,0 +1,3 @@ +{ + "body": "0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9" +} diff --git a/cmd/evm/testdata/13/exp2.json b/cmd/evm/testdata/13/exp2.json new file mode 100644 index 000000000000..ba8c9f865b7e --- /dev/null +++ b/cmd/evm/testdata/13/exp2.json @@ -0,0 +1,39 @@ +{ + "result": { + "stateRoot": "0xe4b924a6adb5959fccf769d5b7bb2f6359e26d1e76a2443c5a91a36d826aef61", + "txRoot": "0x013509c8563d41c0ae4bf38f2d6d19fc6512a1d0d6be045079c8c9f68bf45f9d", + "receiptsRoot": "0xa532a08aa9f62431d6fe5d924951b8efb86ed3c54d06fee77788c3767dd13420", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x84d0", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x84d0", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0" + }, + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x109a0", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x84d0", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x1" + } + ], + "currentDifficulty": "0x20000", + "gasUsed": "0x109a0" + } +} diff --git a/cmd/evm/testdata/13/readme.md b/cmd/evm/testdata/13/readme.md new file mode 100644 index 000000000000..64f52fc9a910 --- /dev/null +++ b/cmd/evm/testdata/13/readme.md @@ -0,0 +1,4 @@ +## Input transactions in RLP form + +This testdata folder is used to examplify how transaction input can be provided in rlp form. +Please see the README in `evm` folder for how this is performed. \ No newline at end of file diff --git a/cmd/evm/testdata/13/signed_txs.rlp b/cmd/evm/testdata/13/signed_txs.rlp new file mode 100644 index 000000000000..9d1157ea45d9 --- /dev/null +++ b/cmd/evm/testdata/13/signed_txs.rlp @@ -0,0 +1 @@ +"0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9" \ No newline at end of file diff --git a/cmd/evm/testdata/13/txs.json b/cmd/evm/testdata/13/txs.json new file mode 100644 index 000000000000..c45ef1e13d1f --- /dev/null +++ b/cmd/evm/testdata/13/txs.json @@ -0,0 +1,34 @@ +[ + { + "input" : "0x", + "gas" : "0x84d0", + "nonce" : "0x1", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x0", + "s" : "0x0", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [] + }, + { + "input" : "0x", + "gas" : "0x84d0", + "nonce" : "0x2", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x0", + "s" : "0x0", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [] + } +] \ No newline at end of file diff --git a/cmd/evm/testdata/14/alloc.json b/cmd/evm/testdata/14/alloc.json new file mode 100644 index 000000000000..cef1a25ff013 --- /dev/null +++ b/cmd/evm/testdata/14/alloc.json @@ -0,0 +1,12 @@ +{ + "a94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "code": "0x", + "nonce": "0xac", + "storage": {} + }, + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192":{ + "balance": "0xfeedbead", + "nonce" : "0x00" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/14/env.json b/cmd/evm/testdata/14/env.json new file mode 100644 index 000000000000..0bf1c5cf48a8 --- /dev/null +++ b/cmd/evm/testdata/14/env.json @@ -0,0 +1,9 @@ +{ + "currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "currentGasLimit": "0x750a163df65e8a", + "currentBaseFee": "0x500", + "currentNumber": "12800000", + "currentTimestamp": "100015", + "parentTimestamp" : "99999", + "parentDifficulty" : "0x2000000000000" +} diff --git a/cmd/evm/testdata/14/env.uncles.json b/cmd/evm/testdata/14/env.uncles.json new file mode 100644 index 000000000000..83811b95ec1b --- /dev/null +++ b/cmd/evm/testdata/14/env.uncles.json @@ -0,0 +1,10 @@ +{ + "currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "currentGasLimit": "0x750a163df65e8a", + "currentBaseFee": "0x500", + "currentNumber": "12800000", + "currentTimestamp": "100035", + "parentTimestamp" : "99999", + "parentDifficulty" : "0x2000000000000", + "parentUncleHash" : "0x000000000000000000000000000000000000000000000000000000000000beef" +} diff --git a/cmd/evm/testdata/14/exp.json b/cmd/evm/testdata/14/exp.json new file mode 100644 index 000000000000..9bf5635f5ba3 --- /dev/null +++ b/cmd/evm/testdata/14/exp.json @@ -0,0 +1,12 @@ +{ + "result": { + "stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "currentDifficulty": "0x2000020000000", + "receipts": [], + "gasUsed": "0x0" + } +} diff --git a/cmd/evm/testdata/14/exp2.json b/cmd/evm/testdata/14/exp2.json new file mode 100644 index 000000000000..9c9025381f16 --- /dev/null +++ b/cmd/evm/testdata/14/exp2.json @@ -0,0 +1,12 @@ +{ + "result": { + "stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [], + "currentDifficulty": "0x1ff8020000000", + "gasUsed": "0x0" + } +} diff --git a/cmd/evm/testdata/14/exp_berlin.json b/cmd/evm/testdata/14/exp_berlin.json new file mode 100644 index 000000000000..c2bf9531197b --- /dev/null +++ b/cmd/evm/testdata/14/exp_berlin.json @@ -0,0 +1,12 @@ +{ + "result": { + "stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [], + "currentDifficulty": "0x1ff9000000000", + "gasUsed": "0x0" + } +} diff --git a/cmd/evm/testdata/14/readme.md b/cmd/evm/testdata/14/readme.md new file mode 100644 index 000000000000..9d0dc9569c67 --- /dev/null +++ b/cmd/evm/testdata/14/readme.md @@ -0,0 +1,41 @@ +## Difficulty calculation + +This test shows how the `evm t8n` can be used to calculate the (ethash) difficulty, if none is provided by the caller. + +Calculating it (with an empty set of txs) using `London` rules (and no provided unclehash for the parent block): +``` +[user@work evm]$ ./evm t8n --input.alloc=./testdata/14/alloc.json --input.txs=./testdata/14/txs.json --input.env=./testdata/14/env.json --output.result=stdout --state.fork=London +INFO [08-30|20:43:09.352] Trie dumping started root=6f0588..7f4bdc +INFO [08-30|20:43:09.352] Trie dumping complete accounts=2 elapsed="82.533µs" +INFO [08-30|20:43:09.352] Wrote file file=alloc.json +{ + "result": { + "stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [], + "currentDifficulty": "0x2000020000000" + } +} +``` +Same thing, but this time providing a non-empty (and non-`emptyKeccak`) unclehash, which leads to a slightly different result: +``` +[user@work evm]$ ./evm t8n --input.alloc=./testdata/14/alloc.json --input.txs=./testdata/14/txs.json --input.env=./testdata/14/env.uncles.json --output.result=stdout --state.fork=London +INFO [08-30|20:44:33.102] Trie dumping started root=6f0588..7f4bdc +INFO [08-30|20:44:33.102] Trie dumping complete accounts=2 elapsed="72.91µs" +INFO [08-30|20:44:33.102] Wrote file file=alloc.json +{ + "result": { + "stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [], + "currentDifficulty": "0x1ff8020000000" + } +} +``` + diff --git a/cmd/evm/testdata/14/txs.json b/cmd/evm/testdata/14/txs.json new file mode 100644 index 000000000000..fe51488c7066 --- /dev/null +++ b/cmd/evm/testdata/14/txs.json @@ -0,0 +1 @@ +[] diff --git a/cmd/evm/testdata/15/blockheader.rlp b/cmd/evm/testdata/15/blockheader.rlp new file mode 100644 index 000000000000..1124e8e2da92 --- /dev/null +++ b/cmd/evm/testdata/15/blockheader.rlp @@ -0,0 +1 @@ +"0xf901f0a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007b0101020383010203a00000000000000000000000000000000000000000000000000000000000000000880000000000000000" \ No newline at end of file diff --git a/cmd/evm/testdata/15/exp.json b/cmd/evm/testdata/15/exp.json new file mode 100644 index 000000000000..1893fdfc08c0 --- /dev/null +++ b/cmd/evm/testdata/15/exp.json @@ -0,0 +1,10 @@ +[ + { + "error": "transaction type not supported", + "hash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476" + }, + { + "error": "transaction type not supported", + "hash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a" + } +] \ No newline at end of file diff --git a/cmd/evm/testdata/15/exp2.json b/cmd/evm/testdata/15/exp2.json new file mode 100644 index 000000000000..dd5e8a358ce5 --- /dev/null +++ b/cmd/evm/testdata/15/exp2.json @@ -0,0 +1,12 @@ +[ + { + "address": "0xd02d72e067e77158444ef2020ff2d325f929b363", + "hash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476", + "intrinsicGas": "0x5208" + }, + { + "address": "0xd02d72e067e77158444ef2020ff2d325f929b363", + "hash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a", + "intrinsicGas": "0x5208" + } +] diff --git a/cmd/evm/testdata/15/exp3.json b/cmd/evm/testdata/15/exp3.json new file mode 100644 index 000000000000..6c46d267cf37 --- /dev/null +++ b/cmd/evm/testdata/15/exp3.json @@ -0,0 +1,47 @@ +[ + { + "error": "transaction type not supported" + }, + { + "error": "transaction type not supported" + }, + { + "error": "transaction type not supported" + }, + { + "error": "transaction type not supported" + }, + { + "error": "transaction type not supported" + }, + { + "error": "transaction type not supported" + }, + { + "error": "transaction type not supported" + }, + { + "error": "rlp: expected List" + }, + { + "error": "rlp: expected List" + }, + { + "error": "rlp: expected List" + }, + { + "error": "rlp: expected List" + }, + { + "error": "rlp: expected List" + }, + { + "error": "rlp: expected input list for types.AccessListTx" + }, + { + "error": "transaction type not supported" + }, + { + "error": "transaction type not supported" + } +] diff --git a/cmd/evm/testdata/15/signed_txs.rlp b/cmd/evm/testdata/15/signed_txs.rlp new file mode 100644 index 000000000000..9d1157ea45d9 --- /dev/null +++ b/cmd/evm/testdata/15/signed_txs.rlp @@ -0,0 +1 @@ +"0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9" \ No newline at end of file diff --git a/cmd/evm/testdata/15/signed_txs.rlp.json b/cmd/evm/testdata/15/signed_txs.rlp.json new file mode 100644 index 000000000000..187f40f24ac1 --- /dev/null +++ b/cmd/evm/testdata/15/signed_txs.rlp.json @@ -0,0 +1,4 @@ +{ + "txsRlp" : "0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9" +} + diff --git a/cmd/evm/testdata/16/exp.json b/cmd/evm/testdata/16/exp.json new file mode 100644 index 000000000000..137ade65135e --- /dev/null +++ b/cmd/evm/testdata/16/exp.json @@ -0,0 +1,13 @@ +[ + { + "address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "hash": "0x7cc3d1a8540a44736750f03bb4d85c0113be4b3472a71bf82241a3b261b479e6", + "intrinsicGas": "0x5208" + }, + { + "error": "intrinsic gas too low: have 82, want 21000", + "address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "hash": "0x3b2d2609e4361562edb9169314f4c05afc6dbf5d706bf9dda5abe242ab76a22b", + "intrinsicGas": "0x5208" + } +] \ No newline at end of file diff --git a/cmd/evm/testdata/16/signed_txs.rlp b/cmd/evm/testdata/16/signed_txs.rlp new file mode 100644 index 000000000000..952ced213014 --- /dev/null +++ b/cmd/evm/testdata/16/signed_txs.rlp @@ -0,0 +1 @@ +"0xf8cab86401f8610180018252089411111111111111111111111111111111111111112080c001a0937f65ef1deece46c473b99962678fb7c38425cf303d1e8fa9717eb4b9d012b5a01940c5a5647c4940217ffde1051a5fd92ec8551e275c1787f81f50a2ad84de43b86201f85f018001529411111111111111111111111111111111111111112080c001a0241c3aec732205542a87fef8c76346741e85480bce5a42d05a9a73dac892f84ca04f52e2dfce57f3a02ed10e085e1a154edf38a726da34127c85fc53b4921759c8" \ No newline at end of file diff --git a/cmd/evm/testdata/16/unsigned_txs.json b/cmd/evm/testdata/16/unsigned_txs.json new file mode 100644 index 000000000000..f619589406e4 --- /dev/null +++ b/cmd/evm/testdata/16/unsigned_txs.json @@ -0,0 +1,34 @@ +[ + { + "input" : "0x", + "gas" : "0x5208", + "nonce" : "0x0", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x20", + "v" : "0x0", + "r" : "0x0", + "s" : "0x0", + "secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8", + "chainId" : "0x1", + "type" : "0x1", + "gasPrice": "0x1", + "accessList" : [ + ] + }, + { + "input" : "0x", + "gas" : "0x52", + "nonce" : "0x0", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x20", + "v" : "0x0", + "r" : "0x0", + "s" : "0x0", + "secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8", + "chainId" : "0x1", + "type" : "0x1", + "gasPrice": "0x1", + "accessList" : [ + ] + } +] diff --git a/cmd/evm/testdata/17/exp.json b/cmd/evm/testdata/17/exp.json new file mode 100644 index 000000000000..485906041b54 --- /dev/null +++ b/cmd/evm/testdata/17/exp.json @@ -0,0 +1,22 @@ + [ + { + "error": "value exceeds 256 bits", + "address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "hash": "0xfbd91685dcbf8172f0e8c53e2ddbb4d26707840da6b51a74371f62a33868fd82", + "intrinsicGas": "0x5208" + }, + { + "error": "gasPrice exceeds 256 bits", + "address": "0x1b57ccef1fe5fb73f1e64530fb4ebd9cf1655964", + "hash": "0x45dc05035cada83748e4c1fe617220106b331eca054f44c2304d5654a9fb29d5", + "intrinsicGas": "0x5208" + }, + { + "error": "invalid transaction v, r, s values", + "hash": "0xf06691c2a803ab7f3c81d06a0c0a896f80f311105c599fc59a9fdbc669356d35" + }, + { + "error": "invalid transaction v, r, s values", + "hash": "0x84703b697ad5b0db25e4f1f98fb6b1adce85b9edb2232eeba9cedd8c6601694b" + } +] \ No newline at end of file diff --git a/cmd/evm/testdata/17/rlpdata.txt b/cmd/evm/testdata/17/rlpdata.txt new file mode 100644 index 000000000000..874461fd76bc --- /dev/null +++ b/cmd/evm/testdata/17/rlpdata.txt @@ -0,0 +1,46 @@ +[ + [ + "", + "d", + 5208, + d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0, + 010000000000000000000000000000000000000000000000000000000000000001, + "", + 1b, + c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549d, + 6180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28, + ], + [ + "", + 010000000000000000000000000000000000000000000000000000000000000001, + 5208, + d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0, + 11, + "", + 1b, + c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549d, + 6180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28, + ], + [ + "", + 11, + 5208, + d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0, + 11, + "", + 1b, + c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549daa, + 6180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28, + ], + [ + "", + 11, + 5208, + d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0, + 11, + "", + 1b, + c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549d, + 6180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28bb, + ], +] diff --git a/cmd/evm/testdata/17/signed_txs.rlp b/cmd/evm/testdata/17/signed_txs.rlp new file mode 100644 index 000000000000..0e351fb03c14 --- /dev/null +++ b/cmd/evm/testdata/17/signed_txs.rlp @@ -0,0 +1 @@ +"0xf901c8f880806482520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0a1010000000000000000000000000000000000000000000000000000000000000001801ba0c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549da06180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28f88080a101000000000000000000000000000000000000000000000000000000000000000182520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d011801ba0c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549da06180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28f860801182520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d011801ba1c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549daaa06180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28f860801182520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d011801ba0c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549da16180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28bb" \ No newline at end of file diff --git a/cmd/evm/testdata/18/README.md b/cmd/evm/testdata/18/README.md new file mode 100644 index 000000000000..360a9bba015a --- /dev/null +++ b/cmd/evm/testdata/18/README.md @@ -0,0 +1,9 @@ +# Invalid rlp + +This folder contains a sample of invalid RLP, and it's expected +that the t9n handles this properly: + +``` +$ go run . t9n --input.txs=./testdata/18/invalid.rlp --state.fork=London +ERROR(11): rlp: value size exceeds available input length +``` \ No newline at end of file diff --git a/cmd/evm/testdata/18/invalid.rlp b/cmd/evm/testdata/18/invalid.rlp new file mode 100644 index 000000000000..7ff2824caf0b --- /dev/null +++ b/cmd/evm/testdata/18/invalid.rlp @@ -0,0 +1 @@ +"0xf852328001825208870b9331677e6ebf0a801ca098ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4aa03887321be575c8095f789dd4c743dfe42c1820f9231f98a962b210e3ac2452a3" \ No newline at end of file diff --git a/cmd/evm/testdata/19/alloc.json b/cmd/evm/testdata/19/alloc.json new file mode 100644 index 000000000000..cef1a25ff013 --- /dev/null +++ b/cmd/evm/testdata/19/alloc.json @@ -0,0 +1,12 @@ +{ + "a94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "code": "0x", + "nonce": "0xac", + "storage": {} + }, + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192":{ + "balance": "0xfeedbead", + "nonce" : "0x00" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/19/env.json b/cmd/evm/testdata/19/env.json new file mode 100644 index 000000000000..0c64392aff5b --- /dev/null +++ b/cmd/evm/testdata/19/env.json @@ -0,0 +1,9 @@ +{ + "currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "currentGasLimit": "0x750a163df65e8a", + "currentBaseFee": "0x500", + "currentNumber": "13000000", + "currentTimestamp": "100015", + "parentTimestamp" : "99999", + "parentDifficulty" : "0x2000000000000" +} diff --git a/cmd/evm/testdata/19/exp_arrowglacier.json b/cmd/evm/testdata/19/exp_arrowglacier.json new file mode 100644 index 000000000000..9cf56ffafc33 --- /dev/null +++ b/cmd/evm/testdata/19/exp_arrowglacier.json @@ -0,0 +1,12 @@ +{ + "result": { + "stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "currentDifficulty": "0x2000000200000", + "receipts": [], + "gasUsed": "0x0" + } +} diff --git a/cmd/evm/testdata/19/exp_london.json b/cmd/evm/testdata/19/exp_london.json new file mode 100644 index 000000000000..a06bc8ca69f0 --- /dev/null +++ b/cmd/evm/testdata/19/exp_london.json @@ -0,0 +1,12 @@ +{ + "result": { + "stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "currentDifficulty": "0x2000080000000", + "receipts": [], + "gasUsed": "0x0" + } +} diff --git a/cmd/evm/testdata/19/readme.md b/cmd/evm/testdata/19/readme.md new file mode 100644 index 000000000000..5fae183f4886 --- /dev/null +++ b/cmd/evm/testdata/19/readme.md @@ -0,0 +1,9 @@ +## Difficulty calculation + +This test shows how the `evm t8n` can be used to calculate the (ethash) difficulty, if none is provided by the caller, +this time on `ArrowGlacier` (Eip 4345). + +Calculating it (with an empty set of txs) using `ArrowGlacier` rules (and no provided unclehash for the parent block): +``` +[user@work evm]$ ./evm t8n --input.alloc=./testdata/14/alloc.json --input.txs=./testdata/14/txs.json --input.env=./testdata/14/env.json --output.result=stdout --state.fork=ArrowGlacier +``` \ No newline at end of file diff --git a/cmd/evm/testdata/19/txs.json b/cmd/evm/testdata/19/txs.json new file mode 100644 index 000000000000..fe51488c7066 --- /dev/null +++ b/cmd/evm/testdata/19/txs.json @@ -0,0 +1 @@ +[] diff --git a/cmd/evm/testdata/2/alloc.json b/cmd/evm/testdata/2/alloc.json new file mode 100644 index 000000000000..a9720afc9367 --- /dev/null +++ b/cmd/evm/testdata/2/alloc.json @@ -0,0 +1,16 @@ +{ + "0x095e7baea6a6c7c4c2dfeb977efac326af552d87" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x6001600053600160006001f0ff00", + "nonce" : "0x00", + "storage" : { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/2/env.json b/cmd/evm/testdata/2/env.json new file mode 100644 index 000000000000..ebadd3f06ac7 --- /dev/null +++ b/cmd/evm/testdata/2/env.json @@ -0,0 +1,7 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentGasLimit" : "0x3b9aca00", + "currentNumber" : "0x01", + "currentTimestamp" : "0x03e8" +} \ No newline at end of file diff --git a/cmd/evm/testdata/2/readme.md b/cmd/evm/testdata/2/readme.md new file mode 100644 index 000000000000..c116f0e79274 --- /dev/null +++ b/cmd/evm/testdata/2/readme.md @@ -0,0 +1 @@ +These files examplify a selfdestruct to the `0`-address. \ No newline at end of file diff --git a/cmd/evm/testdata/2/txs.json b/cmd/evm/testdata/2/txs.json new file mode 100644 index 000000000000..304445858884 --- /dev/null +++ b/cmd/evm/testdata/2/txs.json @@ -0,0 +1,14 @@ +[ + { + "input" : "0x", + "gas" : "0x5f5e100", + "gasPrice" : "0x1", + "nonce" : "0x0", + "to" : "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", + "value" : "0x186a0", + "v" : "0x1b", + "r" : "0x88544c93a564b4c28d2ffac2074a0c55fdd4658fe0d215596ed2e32e3ef7f56b", + "s" : "0x7fb4075d54190f825d7c47bb820284757b34fd6293904a93cddb1d3aa961ac28", + "hash" : "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81" + } +] \ No newline at end of file diff --git a/cmd/evm/testdata/20/exp.json b/cmd/evm/testdata/20/exp.json new file mode 100644 index 000000000000..7bec6cefd695 --- /dev/null +++ b/cmd/evm/testdata/20/exp.json @@ -0,0 +1,4 @@ +{ + "rlp": "0xf902d9f90211a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794e997a23b159e2e2a5ce72333262972374b15425ca0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e99476574682f76312e302e312f6c696e75782f676f312e342e32a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf8897435673d874f7c8f8c2f85f8002825208948a8eafb1cf62bfbeb1741769dae1a9dd4799619201801ba09500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdba07235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600f85f8002825208948a8eafb1cf62bfbeb1741769dae1a9dd4799619201801ba09500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdba07235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600c0", + "hash": "0xaba9a3b6a4e96e9ecffcadaa5a2ae0589359455617535cd86589fe1dd26fe899" +} diff --git a/cmd/evm/testdata/20/header.json b/cmd/evm/testdata/20/header.json new file mode 100644 index 000000000000..fb9b7fc5639c --- /dev/null +++ b/cmd/evm/testdata/20/header.json @@ -0,0 +1,14 @@ +{ + "parentHash": "0xd6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34e", + "miner": "0xe997a23b159e2e2a5ce72333262972374b15425c", + "stateRoot": "0x325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2e", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "difficulty": "0x1000", + "number": "0xc3be", + "gasLimit": "0x50785", + "gasUsed": "0x0", + "timestamp": "0x55c5277e", + "extraData": "0x476574682f76312e302e312f6c696e75782f676f312e342e32", + "mixHash": "0x5865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf", + "nonce": "0x97435673d874f7c8" +} diff --git a/cmd/evm/testdata/20/ommers.json b/cmd/evm/testdata/20/ommers.json new file mode 100644 index 000000000000..fe51488c7066 --- /dev/null +++ b/cmd/evm/testdata/20/ommers.json @@ -0,0 +1 @@ +[] diff --git a/cmd/evm/testdata/20/readme.md b/cmd/evm/testdata/20/readme.md new file mode 100644 index 000000000000..2c448a96e6e7 --- /dev/null +++ b/cmd/evm/testdata/20/readme.md @@ -0,0 +1,11 @@ +# Block building + +This test shows how `b11r` can be used to assemble an unsealed block. + +```console +$ go run . b11r --input.header=testdata/20/header.json --input.txs=testdata/20/txs.rlp --input.ommers=testdata/20/ommers.json --output.block=stdout +{ + "rlp": "0xf90216f90211a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794e997a23b159e2e2a5ce72333262972374b15425ca0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e99476574682f76312e302e312f6c696e75782f676f312e342e32a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf8897435673d874f7c8c0c0", + "hash": "0xaba9a3b6a4e96e9ecffcadaa5a2ae0589359455617535cd86589fe1dd26fe899" +} +``` diff --git a/cmd/evm/testdata/20/txs.rlp b/cmd/evm/testdata/20/txs.rlp new file mode 100644 index 000000000000..3599ff06542b --- /dev/null +++ b/cmd/evm/testdata/20/txs.rlp @@ -0,0 +1 @@ +"0xf8c2f85f8002825208948a8eafb1cf62bfbeb1741769dae1a9dd4799619201801ba09500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdba07235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600f85f8002825208948a8eafb1cf62bfbeb1741769dae1a9dd4799619201801ba09500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdba07235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600" \ No newline at end of file diff --git a/cmd/evm/testdata/21/clique.json b/cmd/evm/testdata/21/clique.json new file mode 100644 index 000000000000..84fa259a0d65 --- /dev/null +++ b/cmd/evm/testdata/21/clique.json @@ -0,0 +1,6 @@ +{ + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8", + "voted": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "authorize": false, + "vanity": "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +} diff --git a/cmd/evm/testdata/21/exp-clique.json b/cmd/evm/testdata/21/exp-clique.json new file mode 100644 index 000000000000..c990ba8aa6ba --- /dev/null +++ b/cmd/evm/testdata/21/exp-clique.json @@ -0,0 +1,4 @@ +{ + "rlp": "0xf9025ff9025aa0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277eb861aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac540a67aaee364005841da84f488f6b6d0116dfb5103d091402c81a163d5f66666595e37f56f196d8c5c98da714dbfae68d6b7e1790cc734a20ec6ce52213ad800a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf88ffffffffffffffffc0c0", + "hash": "0x71c59102cc805dbe8741e1210ebe229a321eff144ac7276006fefe39e8357dc7" +} diff --git a/cmd/evm/testdata/21/exp.json b/cmd/evm/testdata/21/exp.json new file mode 100644 index 000000000000..b3e5e7a83118 --- /dev/null +++ b/cmd/evm/testdata/21/exp.json @@ -0,0 +1,4 @@ +{ + "rlp": "0xf901fdf901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0c0", + "hash": "0x801411e9f6609a659825690d13e4f75a3cfe9143952fa2d9573f3b0a5eb9ebbb" +} diff --git a/cmd/evm/testdata/21/header.json b/cmd/evm/testdata/21/header.json new file mode 100644 index 000000000000..62abe3cc2cc5 --- /dev/null +++ b/cmd/evm/testdata/21/header.json @@ -0,0 +1,11 @@ +{ + "parentHash": "0xd6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34e", + "stateRoot": "0x325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2e", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "difficulty": "0x1000", + "number": "0xc3be", + "gasLimit": "0x50785", + "gasUsed": "0x0", + "timestamp": "0x55c5277e", + "mixHash": "0x5865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf" +} diff --git a/cmd/evm/testdata/21/ommers.json b/cmd/evm/testdata/21/ommers.json new file mode 100644 index 000000000000..fe51488c7066 --- /dev/null +++ b/cmd/evm/testdata/21/ommers.json @@ -0,0 +1 @@ +[] diff --git a/cmd/evm/testdata/21/readme.md b/cmd/evm/testdata/21/readme.md new file mode 100644 index 000000000000..b70f106ffcd6 --- /dev/null +++ b/cmd/evm/testdata/21/readme.md @@ -0,0 +1,23 @@ +# Sealed block building + +This test shows how `b11r` can be used to assemble a sealed block. + +## Ethash + +```console +$ go run . b11r --input.header=testdata/21/header.json --input.txs=testdata/21/txs.rlp --input.ommers=testdata/21/ommers.json --seal.ethash --seal.ethash.mode=test --output.block=stdout +{ + "rlp": "0xf901fdf901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0c0", + "hash": "0x801411e9f6609a659825690d13e4f75a3cfe9143952fa2d9573f3b0a5eb9ebbb" +} +``` + +## Clique + +```console +$ go run . b11r --input.header=testdata/21/header.json --input.txs=testdata/21/txs.rlp --input.ommers=testdata/21/ommers.json --seal.clique=testdata/21/clique.json --output.block=stdout +{ + "rlp": "0xf9025ff9025aa0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277eb861aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac540a67aaee364005841da84f488f6b6d0116dfb5103d091402c81a163d5f66666595e37f56f196d8c5c98da714dbfae68d6b7e1790cc734a20ec6ce52213ad800a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf88ffffffffffffffffc0c0", + "hash": "0x71c59102cc805dbe8741e1210ebe229a321eff144ac7276006fefe39e8357dc7" +} +``` diff --git a/cmd/evm/testdata/21/txs.rlp b/cmd/evm/testdata/21/txs.rlp new file mode 100644 index 000000000000..e815397b333b --- /dev/null +++ b/cmd/evm/testdata/21/txs.rlp @@ -0,0 +1 @@ +"c0" diff --git a/cmd/evm/testdata/22/exp-clique.json b/cmd/evm/testdata/22/exp-clique.json new file mode 100644 index 000000000000..c990ba8aa6ba --- /dev/null +++ b/cmd/evm/testdata/22/exp-clique.json @@ -0,0 +1,4 @@ +{ + "rlp": "0xf9025ff9025aa0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277eb861aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac540a67aaee364005841da84f488f6b6d0116dfb5103d091402c81a163d5f66666595e37f56f196d8c5c98da714dbfae68d6b7e1790cc734a20ec6ce52213ad800a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf88ffffffffffffffffc0c0", + "hash": "0x71c59102cc805dbe8741e1210ebe229a321eff144ac7276006fefe39e8357dc7" +} diff --git a/cmd/evm/testdata/22/exp.json b/cmd/evm/testdata/22/exp.json new file mode 100644 index 000000000000..14fd81997d56 --- /dev/null +++ b/cmd/evm/testdata/22/exp.json @@ -0,0 +1,4 @@ +{ + "rlp": "0xf905f5f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea06eb9f0c3cd68c9e97134e6725d12b1f1d8f0644458da6870a37ff84c908fb1e7940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0f903f6f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000", + "hash": "0xd9a81c8fcd57a7f2a0d2c375eff6ad192c30c3729a271303f0a9a7e1b357e755" +} diff --git a/cmd/evm/testdata/22/header.json b/cmd/evm/testdata/22/header.json new file mode 100644 index 000000000000..62abe3cc2cc5 --- /dev/null +++ b/cmd/evm/testdata/22/header.json @@ -0,0 +1,11 @@ +{ + "parentHash": "0xd6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34e", + "stateRoot": "0x325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2e", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "difficulty": "0x1000", + "number": "0xc3be", + "gasLimit": "0x50785", + "gasUsed": "0x0", + "timestamp": "0x55c5277e", + "mixHash": "0x5865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf" +} diff --git a/cmd/evm/testdata/22/ommers.json b/cmd/evm/testdata/22/ommers.json new file mode 100644 index 000000000000..997015b3cedf --- /dev/null +++ b/cmd/evm/testdata/22/ommers.json @@ -0,0 +1 @@ +["0xf901fdf901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0c0","0xf901fdf901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0c0"] diff --git a/cmd/evm/testdata/22/readme.md b/cmd/evm/testdata/22/readme.md new file mode 100644 index 000000000000..2cac8a2434a6 --- /dev/null +++ b/cmd/evm/testdata/22/readme.md @@ -0,0 +1,11 @@ +# Building blocks with ommers + +This test shows how `b11r` can chain together ommer assembles into a canonical block. + +```console +$ echo "{ \"ommers\": [`go run . b11r --input.header=testdata/22/header.json --input.txs=testdata/22/txs.rlp --output.block=stdout | jq '.[\"rlp\"]'`,`go run . b11r --input.header=testdata/22/header.json --input.txs=testdata/22/txs.rlp --output.block=stdout | jq '.[\"rlp\"]'`]}" | go run . b11r --input.header=testdata/22/header.json --input.txs=testdata/22/txs.rlp --input.ommers=stdin --output.block=stdout +{ + "rlp": "0xf905f5f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea06eb9f0c3cd68c9e97134e6725d12b1f1d8f0644458da6870a37ff84c908fb1e7940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0f903f6f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000", + "hash": "0xd9a81c8fcd57a7f2a0d2c375eff6ad192c30c3729a271303f0a9a7e1b357e755" +} +``` diff --git a/cmd/evm/testdata/22/txs.rlp b/cmd/evm/testdata/22/txs.rlp new file mode 100644 index 000000000000..e815397b333b --- /dev/null +++ b/cmd/evm/testdata/22/txs.rlp @@ -0,0 +1 @@ +"c0" diff --git a/cmd/evm/testdata/23/alloc.json b/cmd/evm/testdata/23/alloc.json new file mode 100644 index 000000000000..239b3553f988 --- /dev/null +++ b/cmd/evm/testdata/23/alloc.json @@ -0,0 +1,16 @@ +{ + "0x095e7baea6a6c7c4c2dfeb977efac326af552d87" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x6001", + "nonce" : "0x00", + "storage" : { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + } +} diff --git a/cmd/evm/testdata/23/env.json b/cmd/evm/testdata/23/env.json new file mode 100644 index 000000000000..1b46321512a0 --- /dev/null +++ b/cmd/evm/testdata/23/env.json @@ -0,0 +1,7 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentGasLimit" : "0x3b9aca00", + "currentNumber" : "0x05", + "currentTimestamp" : "0x03e8" +} diff --git a/cmd/evm/testdata/23/exp.json b/cmd/evm/testdata/23/exp.json new file mode 100644 index 000000000000..e51f37d9c7c9 --- /dev/null +++ b/cmd/evm/testdata/23/exp.json @@ -0,0 +1,25 @@ +{ + "result": { + "stateRoot": "0x65334305e4accfa18352deb24f007b837b5036425b0712cf0e65a43bfa95154d", + "txRoot": "0x75e61774a2ff58cbe32653420256c7f44bc715715a423b0b746d5c622979af6b", + "receiptsRoot": "0xf951f9396af203499cc7d379715a9110323de73967c5700e2f424725446a3c76", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x520b", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x520b", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0" + } + ], + "currentDifficulty": "0x20000", + "gasUsed": "0x520b" + } +} diff --git a/cmd/evm/testdata/23/readme.md b/cmd/evm/testdata/23/readme.md new file mode 100644 index 000000000000..85fe8db66c95 --- /dev/null +++ b/cmd/evm/testdata/23/readme.md @@ -0,0 +1 @@ +These files examplify how to sign a transaction using the pre-EIP155 scheme. diff --git a/cmd/evm/testdata/23/txs.json b/cmd/evm/testdata/23/txs.json new file mode 100644 index 000000000000..22f3840f84b9 --- /dev/null +++ b/cmd/evm/testdata/23/txs.json @@ -0,0 +1,15 @@ +[ + { + "input" : "0x", + "gas" : "0x5f5e100", + "gasPrice" : "0x1", + "nonce" : "0x0", + "to" : "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", + "value" : "0x186a0", + "v" : "0x0", + "r" : "0x0", + "s" : "0x0", + "secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8", + "protected": false + } +] diff --git a/cmd/evm/testdata/3/alloc.json b/cmd/evm/testdata/3/alloc.json new file mode 100644 index 000000000000..dca318ee5462 --- /dev/null +++ b/cmd/evm/testdata/3/alloc.json @@ -0,0 +1,16 @@ +{ + "0x095e7baea6a6c7c4c2dfeb977efac326af552d87" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x600140", + "nonce" : "0x00", + "storage" : { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/3/env.json b/cmd/evm/testdata/3/env.json new file mode 100644 index 000000000000..e283eff46151 --- /dev/null +++ b/cmd/evm/testdata/3/env.json @@ -0,0 +1,8 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentGasLimit" : "0x3b9aca00", + "currentNumber" : "0x05", + "currentTimestamp" : "0x03e8", + "blockHashes" : { "1" : "0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"} +} \ No newline at end of file diff --git a/cmd/evm/testdata/3/exp.json b/cmd/evm/testdata/3/exp.json new file mode 100644 index 000000000000..71b3d2f559cf --- /dev/null +++ b/cmd/evm/testdata/3/exp.json @@ -0,0 +1,38 @@ +{ + "alloc": { + "0x095e7baea6a6c7c4c2dfeb977efac326af552d87": { + "code": "0x600140", + "balance": "0xde0b6b3a76586a0" + }, + "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { + "balance": "0x521f" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0xde0b6b3a7622741", + "nonce": "0x1" + } + }, + "result": { + "stateRoot": "0xb7341da3f9f762a6884eaa186c32942734c146b609efee11c4b0214c44857ea1", + "txRoot": "0x75e61774a2ff58cbe32653420256c7f44bc715715a423b0b746d5c622979af6b", + "receiptsRoot": "0xd0d26df80374a327c025d405ebadc752b1bbd089d864801ae78ab704bcad8086", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x521f", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x521f", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0" + } + ], + "currentDifficulty": "0x20000", + "gasUsed": "0x521f" + } +} diff --git a/cmd/evm/testdata/3/readme.md b/cmd/evm/testdata/3/readme.md new file mode 100644 index 000000000000..499f03d7aa7f --- /dev/null +++ b/cmd/evm/testdata/3/readme.md @@ -0,0 +1,2 @@ +These files examplify a transition where a transaction (excuted on block 5) requests +the blockhash for block `1`. diff --git a/cmd/evm/testdata/3/txs.json b/cmd/evm/testdata/3/txs.json new file mode 100644 index 000000000000..304445858884 --- /dev/null +++ b/cmd/evm/testdata/3/txs.json @@ -0,0 +1,14 @@ +[ + { + "input" : "0x", + "gas" : "0x5f5e100", + "gasPrice" : "0x1", + "nonce" : "0x0", + "to" : "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", + "value" : "0x186a0", + "v" : "0x1b", + "r" : "0x88544c93a564b4c28d2ffac2074a0c55fdd4658fe0d215596ed2e32e3ef7f56b", + "s" : "0x7fb4075d54190f825d7c47bb820284757b34fd6293904a93cddb1d3aa961ac28", + "hash" : "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81" + } +] \ No newline at end of file diff --git a/cmd/evm/testdata/4/alloc.json b/cmd/evm/testdata/4/alloc.json new file mode 100644 index 000000000000..fadf2bdc4ece --- /dev/null +++ b/cmd/evm/testdata/4/alloc.json @@ -0,0 +1,16 @@ +{ + "0x095e7baea6a6c7c4c2dfeb977efac326af552d87" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x600340", + "nonce" : "0x00", + "storage" : { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/4/env.json b/cmd/evm/testdata/4/env.json new file mode 100644 index 000000000000..e283eff46151 --- /dev/null +++ b/cmd/evm/testdata/4/env.json @@ -0,0 +1,8 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentGasLimit" : "0x3b9aca00", + "currentNumber" : "0x05", + "currentTimestamp" : "0x03e8", + "blockHashes" : { "1" : "0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"} +} \ No newline at end of file diff --git a/cmd/evm/testdata/4/readme.md b/cmd/evm/testdata/4/readme.md new file mode 100644 index 000000000000..08840d37bd9c --- /dev/null +++ b/cmd/evm/testdata/4/readme.md @@ -0,0 +1,3 @@ +These files examplify a transition where a transaction (excuted on block 5) requests +the blockhash for block `4`, but where the hash for that block is missing. +It's expected that executing these should cause `exit` with errorcode `4`. diff --git a/cmd/evm/testdata/4/txs.json b/cmd/evm/testdata/4/txs.json new file mode 100644 index 000000000000..304445858884 --- /dev/null +++ b/cmd/evm/testdata/4/txs.json @@ -0,0 +1,14 @@ +[ + { + "input" : "0x", + "gas" : "0x5f5e100", + "gasPrice" : "0x1", + "nonce" : "0x0", + "to" : "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", + "value" : "0x186a0", + "v" : "0x1b", + "r" : "0x88544c93a564b4c28d2ffac2074a0c55fdd4658fe0d215596ed2e32e3ef7f56b", + "s" : "0x7fb4075d54190f825d7c47bb820284757b34fd6293904a93cddb1d3aa961ac28", + "hash" : "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81" + } +] \ No newline at end of file diff --git a/cmd/evm/testdata/5/alloc.json b/cmd/evm/testdata/5/alloc.json new file mode 100644 index 000000000000..9e26dfeeb6e6 --- /dev/null +++ b/cmd/evm/testdata/5/alloc.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/cmd/evm/testdata/5/env.json b/cmd/evm/testdata/5/env.json new file mode 100644 index 000000000000..1085f63e629a --- /dev/null +++ b/cmd/evm/testdata/5/env.json @@ -0,0 +1,11 @@ +{ + "currentCoinbase": "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "currentDifficulty": "0x20000", + "currentGasLimit": "0x750a163df65e8a", + "currentNumber": "1", + "currentTimestamp": "1000", + "ommers": [ + {"delta": 1, "address": "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" }, + {"delta": 2, "address": "0xcccccccccccccccccccccccccccccccccccccccc" } + ] +} \ No newline at end of file diff --git a/cmd/evm/testdata/5/exp.json b/cmd/evm/testdata/5/exp.json new file mode 100644 index 000000000000..7d715672c50c --- /dev/null +++ b/cmd/evm/testdata/5/exp.json @@ -0,0 +1,23 @@ +{ + "alloc": { + "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": { + "balance": "0x88" + }, + "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb": { + "balance": "0x70" + }, + "0xcccccccccccccccccccccccccccccccccccccccc": { + "balance": "0x60" + } + }, + "result": { + "stateRoot": "0xa7312add33811645c6aa65d928a1a4f49d65d448801912c069a0aa8fe9c1f393", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [], + "currentDifficulty": "0x20000", + "gasUsed": "0x0" + } +} diff --git a/cmd/evm/testdata/5/readme.md b/cmd/evm/testdata/5/readme.md new file mode 100644 index 000000000000..e2b608face9c --- /dev/null +++ b/cmd/evm/testdata/5/readme.md @@ -0,0 +1 @@ +These files examplify a transition where there are no transcations, two ommers, at block `N-1` (delta 1) and `N-2` (delta 2). \ No newline at end of file diff --git a/cmd/evm/testdata/5/txs.json b/cmd/evm/testdata/5/txs.json new file mode 100644 index 000000000000..fe51488c7066 --- /dev/null +++ b/cmd/evm/testdata/5/txs.json @@ -0,0 +1 @@ +[] diff --git a/cmd/evm/testdata/7/alloc.json b/cmd/evm/testdata/7/alloc.json new file mode 100644 index 000000000000..cef1a25ff013 --- /dev/null +++ b/cmd/evm/testdata/7/alloc.json @@ -0,0 +1,12 @@ +{ + "a94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "code": "0x", + "nonce": "0xac", + "storage": {} + }, + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192":{ + "balance": "0xfeedbead", + "nonce" : "0x00" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/7/env.json b/cmd/evm/testdata/7/env.json new file mode 100644 index 000000000000..8fd9bc041b8c --- /dev/null +++ b/cmd/evm/testdata/7/env.json @@ -0,0 +1,7 @@ +{ + "currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "currentDifficulty": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffff020000", + "currentGasLimit": "0x750a163df65e8a", + "currentNumber": "5", + "currentTimestamp": "1000" +} \ No newline at end of file diff --git a/cmd/evm/testdata/7/readme.md b/cmd/evm/testdata/7/readme.md new file mode 100644 index 000000000000..c9826e0ba67e --- /dev/null +++ b/cmd/evm/testdata/7/readme.md @@ -0,0 +1,7 @@ +This is a test for HomesteadToDao, checking if the +DAO-transition works + +Example: +``` +./statet8n --input.alloc=./testdata/7/alloc.json --input.txs=./testdata/7/txs.json --input.env=./testdata/7/env.json --output.alloc=stdout --state.fork=HomesteadToDaoAt5 +``` \ No newline at end of file diff --git a/cmd/evm/testdata/7/txs.json b/cmd/evm/testdata/7/txs.json new file mode 100644 index 000000000000..fe51488c7066 --- /dev/null +++ b/cmd/evm/testdata/7/txs.json @@ -0,0 +1 @@ +[] diff --git a/cmd/evm/testdata/8/alloc.json b/cmd/evm/testdata/8/alloc.json new file mode 100644 index 000000000000..1d1b5f86c6e7 --- /dev/null +++ b/cmd/evm/testdata/8/alloc.json @@ -0,0 +1,11 @@ +{ + "0x000000000000000000000000000000000000aaaa": { + "balance": "0x03", + "code": "0x5854505854", + "nonce": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x100000", + "nonce": "0x00" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/8/env.json b/cmd/evm/testdata/8/env.json new file mode 100644 index 000000000000..8b9193472461 --- /dev/null +++ b/cmd/evm/testdata/8/env.json @@ -0,0 +1,7 @@ +{ + "currentCoinbase": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty": "0x20000", + "currentGasLimit": "0x1000000000", + "currentNumber": "0x1000000", + "currentTimestamp": "0x04" +} \ No newline at end of file diff --git a/cmd/evm/testdata/8/readme.md b/cmd/evm/testdata/8/readme.md new file mode 100644 index 000000000000..e021cd7e2ee4 --- /dev/null +++ b/cmd/evm/testdata/8/readme.md @@ -0,0 +1,63 @@ +## EIP-2930 testing + +This test contains testcases for EIP-2930, which uses transactions with access lists. + +### Prestate + +The alloc portion contains one contract (`0x000000000000000000000000000000000000aaaa`), containing the +following code: `0x5854505854`: `PC ;SLOAD; POP; PC; SLOAD`. + +Essentialy, this contract does `SLOAD(0)` and `SLOAD(3)`. + +The alloc also contains some funds on `0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b`. + +## Transactions + +There are three transactions, each invokes the contract above. + +1. ACL-transaction, which contains some non-used slots +2. Regular transaction +3. ACL-transaction, which contains the slots `1` and `3` in `0x000000000000000000000000000000000000aaaa` + +## Execution + +Running it yields: +``` +dir=./testdata/8 && ./evm t8n --state.fork=Berlin --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --trace && cat trace-* | grep SLOAD +{"pc":1,"op":84,"gas":"0x484be","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":4,"op":84,"gas":"0x47c86","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x3"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":1,"op":84,"gas":"0x49cf6","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":4,"op":84,"gas":"0x494be","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x3"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":1,"op":84,"gas":"0x484be","gasCost":"0x64","memory":"0x","memSize":0,"stack":["0x0"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":4,"op":84,"gas":"0x48456","gasCost":"0x64","memory":"0x","memSize":0,"stack":["0x3"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} + +``` + +Simlarly, we can provide the input transactions via `stdin` instead of as file: + +``` +dir=./testdata/8 \ + && cat $dir/txs.json | jq "{txs: .}" \ + | ./evm t8n --state.fork=Berlin \ + --input.alloc=$dir/alloc.json \ + --input.txs=stdin \ + --input.env=$dir/env.json \ + --trace \ + && cat trace-* | grep SLOAD + +{"pc":1,"op":84,"gas":"0x484be","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":4,"op":84,"gas":"0x47c86","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x3"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":1,"op":84,"gas":"0x49cf6","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":4,"op":84,"gas":"0x494be","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x3"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":1,"op":84,"gas":"0x484be","gasCost":"0x64","memory":"0x","memSize":0,"stack":["0x0"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":4,"op":84,"gas":"0x48456","gasCost":"0x64","memory":"0x","memSize":0,"stack":["0x3"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +``` + +If we try to execute it on older rules: +``` +dir=./testdata/8 && ./evm t8n --state.fork=Istanbul --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json +INFO [01-21|23:21:51.265] rejected tx index=0 hash=d2818d..6ab3da error="tx type not supported" +INFO [01-21|23:21:51.265] rejected tx index=1 hash=26ea00..81c01b from=0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B error="nonce too high: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B, tx: 1 state: 0" +INFO [01-21|23:21:51.265] rejected tx index=2 hash=698d01..369cee error="tx type not supported" +``` +Number `1` and `3` are not applicable, and therefore number `2` has wrong nonce. \ No newline at end of file diff --git a/cmd/evm/testdata/8/txs.json b/cmd/evm/testdata/8/txs.json new file mode 100644 index 000000000000..35142ba234b0 --- /dev/null +++ b/cmd/evm/testdata/8/txs.json @@ -0,0 +1,58 @@ +[ + { + "gas": "0x4ef00", + "gasPrice": "0x1", + "chainId": "0x1", + "input": "0x", + "nonce": "0x0", + "to": "0x000000000000000000000000000000000000aaaa", + "value": "0x1", + "type" : "0x1", + "accessList": [ + {"address": "0x0000000000000000000000000000000000000000", + "storageKeys": [ + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ] + } + ], + "v": "0x0", + "r": "0x0", + "s": "0x0", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + }, + { + "gas": "0x4ef00", + "gasPrice": "0x1", + "input": "0x", + "nonce": "0x1", + "to": "0x000000000000000000000000000000000000aaaa", + "value": "0x2", + "v": "0x0", + "r": "0x0", + "s": "0x0", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + }, + { + "gas": "0x4ef00", + "gasPrice": "0x1", + "chainId": "0x1", + "input": "0x", + "nonce": "0x2", + "to": "0x000000000000000000000000000000000000aaaa", + "value": "0x1", + "type" : "0x1", + "accessList": [ + {"address": "0x000000000000000000000000000000000000aaaa", + "storageKeys": [ + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000003" + ] + } + ], + "v": "0x0", + "r": "0x0", + "s": "0x0", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + } +] diff --git a/cmd/evm/testdata/9/alloc.json b/cmd/evm/testdata/9/alloc.json new file mode 100644 index 000000000000..c14e38e84515 --- /dev/null +++ b/cmd/evm/testdata/9/alloc.json @@ -0,0 +1,11 @@ +{ + "0x000000000000000000000000000000000000aaaa": { + "balance": "0x03", + "code": "0x58585454", + "nonce": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x100000000000000", + "nonce": "0x00" + } +} diff --git a/cmd/evm/testdata/9/env.json b/cmd/evm/testdata/9/env.json new file mode 100644 index 000000000000..ec5164b9952e --- /dev/null +++ b/cmd/evm/testdata/9/env.json @@ -0,0 +1,8 @@ +{ + "currentCoinbase": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty": "0x20000", + "currentGasTarget": "0x1000000000", + "currentBaseFee": "0x3B9ACA00", + "currentNumber": "0x1000000", + "currentTimestamp": "0x04" +} diff --git a/cmd/evm/testdata/9/readme.md b/cmd/evm/testdata/9/readme.md new file mode 100644 index 000000000000..88f0f12aaaa5 --- /dev/null +++ b/cmd/evm/testdata/9/readme.md @@ -0,0 +1,75 @@ +## EIP-1559 testing + +This test contains testcases for EIP-1559, which uses an new transaction type and has a new block parameter. + +### Prestate + +The alloc portion contains one contract (`0x000000000000000000000000000000000000aaaa`), containing the +following code: `0x58585454`: `PC; PC; SLOAD; SLOAD`. + +Essentialy, this contract does `SLOAD(0)` and `SLOAD(1)`. + +The alloc also contains some funds on `0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b`. + +## Transactions + +There are two transactions, each invokes the contract above. + +1. EIP-1559 ACL-transaction, which contains the `0x0` slot for `0xaaaa` +2. Legacy transaction + +## Execution + +Running it yields: +``` +$ dir=./testdata/9 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --trace && cat trace-* | grep SLOAD +{"pc":2,"op":84,"gas":"0x48c28","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0","0x1"],"returnStack":null,"returnD +ata":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":3,"op":84,"gas":"0x483f4","gasCost":"0x64","memory":"0x","memSize":0,"stack":["0x0","0x0"],"returnStack":null,"returnDa +ta":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":2,"op":84,"gas":"0x49cf4","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0","0x1"],"returnStack":null,"returnD +ata":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":3,"op":84,"gas":"0x494c0","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0","0x0"],"returnStack":null,"returnD +ata":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +``` + +We can also get the post-alloc: +``` +$ dir=./testdata/9 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout +{ + "alloc": { + "0x000000000000000000000000000000000000aaaa": { + "code": "0x58585454", + "balance": "0x3", + "nonce": "0x1" + }, + "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { + "balance": "0xbfc02677a000" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0xff104fcfea7800", + "nonce": "0x2" + } + } +} +``` + +If we try to execute it on older rules: +``` +dir=./testdata/9 && ./evm t8n --state.fork=Berlin --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout +ERROR(10): Failed signing transactions: ERROR(10): Tx 0: failed to sign tx: transaction type not supported +``` + +It fails, due to the `evm t8n` cannot sign them in with the given signer. We can bypass that, however, +by feeding it presigned transactions, located in `txs_signed.json`. + +``` +dir=./testdata/9 && ./evm t8n --state.fork=Berlin --input.alloc=$dir/alloc.json --input.txs=$dir/txs_signed.json --input.env=$dir/env.json +INFO [05-07|12:28:42.072] rejected tx index=0 hash=b4821e..536819 error="transaction type not supported" +INFO [05-07|12:28:42.072] rejected tx index=1 hash=a9c6c6..fa4036 from=0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B error="nonce too high: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B, tx: 1 state: 0" +INFO [05-07|12:28:42.073] Wrote file file=alloc.json +INFO [05-07|12:28:42.073] Wrote file file=result.json +``` + +Number `0` is not applicable, and therefore number `1` has wrong nonce, and both are rejected. + diff --git a/cmd/evm/testdata/9/txs.json b/cmd/evm/testdata/9/txs.json new file mode 100644 index 000000000000..740abce079d8 --- /dev/null +++ b/cmd/evm/testdata/9/txs.json @@ -0,0 +1,37 @@ +[ + { + "gas": "0x4ef00", + "maxPriorityFeePerGas": "0x2", + "maxFeePerGas": "0x12A05F200", + "chainId": "0x1", + "input": "0x", + "nonce": "0x0", + "to": "0x000000000000000000000000000000000000aaaa", + "value": "0x0", + "type" : "0x2", + "accessList": [ + {"address": "0x000000000000000000000000000000000000aaaa", + "storageKeys": [ + "0x0000000000000000000000000000000000000000000000000000000000000000" + ] + } + ], + "v": "0x0", + "r": "0x0", + "s": "0x0", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + }, + { + "gas": "0x4ef00", + "gasPrice": "0x12A05F200", + "chainId": "0x1", + "input": "0x", + "nonce": "0x1", + "to": "0x000000000000000000000000000000000000aaaa", + "value": "0x0", + "v": "0x0", + "r": "0x0", + "s": "0x0", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + } +] diff --git a/cmd/evm/transition-test.sh b/cmd/evm/transition-test.sh new file mode 100644 index 000000000000..250238d1694a --- /dev/null +++ b/cmd/evm/transition-test.sh @@ -0,0 +1,214 @@ +#!/bin/bash +ticks="\`\`\`" + +function showjson(){ + echo "\`$1\`:" + echo "${ticks}json" + cat $1 + echo "" + echo "$ticks" +} +function demo(){ + echo "$ticks" + echo "$1" + $1 + echo "" + echo "$ticks" + echo "" +} +function tick(){ + echo "$ticks" +} + +cat << EOF +## EVM state transition tool + +The \`evm t8n\` tool is a stateless state transition utility. It is a utility +which can + +1. Take a prestate, including + - Accounts, + - Block context information, + - Previous blockshashes (*optional) +2. Apply a set of transactions, +3. Apply a mining-reward (*optional), +4. And generate a post-state, including + - State root, transaction root, receipt root, + - Information about rejected transactions, + - Optionally: a full or partial post-state dump + +## Specification + +The idea is to specify the behaviour of this binary very _strict_, so that other +node implementors can build replicas based on their own state-machines, and the +state generators can swap between a \`geth\`-based implementation and a \`parityvm\`-based +implementation. + +### Command line params + +Command line params that has to be supported are +$(tick) + +` ./evm t8n -h | grep "trace\|output\|state\."` + +$(tick) + +### Error codes and output + +All logging should happen against the \`stderr\`. +There are a few (not many) errors that can occur, those are defined below. + +#### EVM-based errors (\`2\` to \`9\`) + +- Other EVM error. Exit code \`2\` +- Failed configuration: when a non-supported or invalid fork was specified. Exit code \`3\`. +- Block history is not supplied, but needed for a \`BLOCKHASH\` operation. If \`BLOCKHASH\` + is invoked targeting a block which history has not been provided for, the program will + exit with code \`4\`. + +#### IO errors (\`10\`-\`20\`) + +- Invalid input json: the supplied data could not be marshalled. + The program will exit with code \`10\` +- IO problems: failure to load or save files, the program will exit with code \`11\` + +EOF + +# This should exit with 3 +./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --state.fork=Frontier+1346 2>/dev/null +if [ $? != 3 ]; then + echo "Failed, exitcode should be 3" +fi +cat << EOF +## Examples +### Basic usage + +Invoking it with the provided example files +EOF +cmd="./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json" +tick;echo "$cmd"; tick +$cmd 2>/dev/null +echo "Two resulting files:" +echo "" +showjson alloc.json +showjson result.json +echo "" + +echo "We can make them spit out the data to e.g. \`stdout\` like this:" +cmd="./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.result=stdout --output.alloc=stdout" +tick;echo "$cmd"; tick +output=`$cmd 2>/dev/null` +echo "Output:" +echo "${ticks}json" +echo "$output" +echo "$ticks" + +cat << EOF + +## About Ommers + +Mining rewards and ommer rewards might need to be added. This is how those are applied: + +- \`block_reward\` is the block mining reward for the miner (\`0xaa\`), of a block at height \`N\`. +- For each ommer (mined by \`0xbb\`), with blocknumber \`N-delta\` + - (where \`delta\` is the difference between the current block and the ommer) + - The account \`0xbb\` (ommer miner) is awarded \`(8-delta)/ 8 * block_reward\` + - The account \`0xaa\` (block miner) is awarded \`block_reward / 32\` + +To make \`state_t8n\` apply these, the following inputs are required: + +- \`state.reward\` + - For ethash, it is \`5000000000000000000\` \`wei\`, + - If this is not defined, mining rewards are not applied, + - A value of \`0\` is valid, and causes accounts to be 'touched'. +- For each ommer, the tool needs to be given an \`address\` and a \`delta\`. This + is done via the \`env\`. + +Note: the tool does not verify that e.g. the normal uncle rules apply, +and allows e.g two uncles at the same height, or the uncle-distance. This means that +the tool allows for negative uncle reward (distance > 8) + +Example: +EOF + +showjson ./testdata/5/env.json + +echo "When applying this, using a reward of \`0x08\`" +cmd="./evm t8n --input.alloc=./testdata/5/alloc.json -input.txs=./testdata/5/txs.json --input.env=./testdata/5/env.json --output.alloc=stdout --state.reward=0x80" +output=`$cmd 2>/dev/null` +echo "Output:" +echo "${ticks}json" +echo "$output" +echo "$ticks" + +echo "### Future EIPS" +echo "" +echo "It is also possible to experiment with future eips that are not yet defined in a hard fork." +echo "Example, putting EIP-1344 into Frontier: " +cmd="./evm t8n --state.fork=Frontier+1344 --input.pre=./testdata/1/pre.json --input.txs=./testdata/1/txs.json --input.env=/testdata/1/env.json" +tick;echo "$cmd"; tick +echo "" + +echo "### Block history" +echo "" +echo "The \`BLOCKHASH\` opcode requires blockhashes to be provided by the caller, inside the \`env\`." +echo "If a required blockhash is not provided, the exit code should be \`4\`:" +echo "Example where blockhashes are provided: " +demo "./evm --verbosity=1 t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace" +cmd="cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2" +tick && echo $cmd && tick +echo "$ticks" +cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2 +echo "$ticks" +echo "" + +echo "In this example, the caller has not provided the required blockhash:" +cmd="./evm t8n --input.alloc=./testdata/4/alloc.json --input.txs=./testdata/4/txs.json --input.env=./testdata/4/env.json --trace" +tick && echo $cmd && $cmd +errc=$? +tick +echo "Error code: $errc" +echo "" + +echo "### Chaining" +echo "" +echo "Another thing that can be done, is to chain invocations:" +cmd1="./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.alloc=stdout" +cmd2="./evm t8n --input.alloc=stdin --input.env=./testdata/1/env.json --input.txs=./testdata/1/txs.json" +echo "$ticks" +echo "$cmd1 | $cmd2" +output=$($cmd1 | $cmd2 ) +echo $output +echo "$ticks" +echo "What happened here, is that we first applied two identical transactions, so the second one was rejected. " +echo "Then, taking the poststate alloc as the input for the next state, we tried again to include" +echo "the same two transactions: this time, both failed due to too low nonce." +echo "" +echo "In order to meaningfully chain invocations, one would need to provide meaningful new \`env\`, otherwise the" +echo "actual blocknumber (exposed to the EVM) would not increase." +echo "" + +echo "### Transactions in RLP form" +echo "" +echo "It is possible to provide already-signed transactions as input to, using an \`input.txs\` which ends with the \`rlp\` suffix." +echo "The input format for RLP-form transactions is _identical_ to the _output_ format for block bodies. Therefore, it's fully possible" +echo "to use the evm to go from \`json\` input to \`rlp\` input." +echo "" +echo "The following command takes **json** the transactions in \`./testdata/13/txs.json\` and signs them. After execution, they are output to \`signed_txs.rlp\`.:" +demo "./evm t8n --state.fork=London --input.alloc=./testdata/13/alloc.json --input.txs=./testdata/13/txs.json --input.env=./testdata/13/env.json --output.result=alloc_jsontx.json --output.body=signed_txs.rlp" +echo "The \`output.body\` is the rlp-list of transactions, encoded in hex and placed in a string a'la \`json\` encoding rules:" +demo "cat signed_txs.rlp" +echo "We can use \`rlpdump\` to check what the contents are: " +echo "$ticks" +echo "rlpdump -hex \$(cat signed_txs.rlp | jq -r )" +rlpdump -hex $(cat signed_txs.rlp | jq -r ) +echo "$ticks" +echo "Now, we can now use those (or any other already signed transactions), as input, like so: " +demo "./evm t8n --state.fork=London --input.alloc=./testdata/13/alloc.json --input.txs=./signed_txs.rlp --input.env=./testdata/13/env.json --output.result=alloc_rlptx.json" + +echo "You might have noticed that the results from these two invocations were stored in two separate files. " +echo "And we can now finally check that they match." +echo "$ticks" +echo "cat alloc_jsontx.json | jq .stateRoot && cat alloc_rlptx.json | jq .stateRoot" +cat alloc_jsontx.json | jq .stateRoot && cat alloc_rlptx.json | jq .stateRoot +echo "$ticks" diff --git a/cmd/faucet/README.md b/cmd/faucet/README.md new file mode 100644 index 000000000000..364689a78277 --- /dev/null +++ b/cmd/faucet/README.md @@ -0,0 +1,50 @@ +# Faucet + +The `faucet` is a simplistic web application with the goal of distributing small amounts of Ether in private and test networks. + +Users need to post their Ethereum addresses to fund in a Twitter status update or public Facebook post and share the link to the faucet. The faucet will in turn deduplicate user requests and send the Ether. After a funding round, the faucet prevents the same user requesting again for a pre-configured amount of time, proportional to the amount of Ether requested. + +## Operation + +The `faucet` is a single binary app (everything included) with all configurations set via command line flags and a few files. + +First thing's first, the `faucet` needs to connect to an Ethereum network, for which it needs the necessary genesis and network infos. Each of the following flags must be set: + +- `--genesis` is a path to a file containin the network `genesis.json` +- `--network` is the devp2p network id used during connection +- `--bootnodes` is a list of `enode://` ids to join the network through + +The `faucet` will use the `les` protocol to join the configured Ethereum network and will store its data in `$HOME/.faucet` (currently not configurable). + +## Funding + +To be able to distribute funds, the `faucet` needs access to an already funded Ethereum account. This can be configured via: + +- `--account.json` is a path to the Ethereum account's JSON key file +- `--account.pass` is a path to a text file with the decryption passphrase + +The faucet is able to distribute various amounts of Ether in exchange for various timeouts. These can be configured via: + +- `--faucet.amount` is the number of Ethers to send by default +- `--faucet.minutes` is the time to wait before allowing a rerequest +- `--faucet.tiers` is the funding tiers to support (x3 time, x2.5 funds) + +## Sybil protection + +To prevent the same user from exhausting funds in a loop, the `faucet` ties requests to social networks and captcha resolvers. + +Captcha protection uses Google's invisible ReCaptcha, thus the `faucet` needs to run on a live domain. The domain needs to be registered in Google's systems to retrieve the captcha API token and secrets. After doing so, captcha protection may be enabled via: + +- `--captcha.token` is the API token for ReCaptcha +- `--captcha.secret` is the API secret for ReCaptcha + +Sybil protection via Twitter requires an API key as of 15th December, 2020. To obtain it, a Twitter user must be upgraded to developer status and a new Twitter App deployed with it. The app's `Bearer` token is required by the faucet to retrieve tweet data: + +- `--twitter.token` is the Bearer token for `v2` API access +- `--twitter.token.v1` is the Bearer token for `v1` API access + +Sybil protection via Facebook uses the website to directly download post data thus does not currently require an API configuration. + +## Miscellaneous + +Beside the above - mostly essential - CLI flags, there are a number that can be used to fine tune the `faucet`'s operation. Please see `faucet --help` for a full list. \ No newline at end of file diff --git a/cmd/faucet/faucet.go b/cmd/faucet/faucet.go index 77938efabd90..2d9d7a1e99f2 100644 --- a/cmd/faucet/faucet.go +++ b/cmd/faucet/faucet.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with go-ethereum. If not, see . -// faucet is a Ether faucet backed by a light client. +// faucet is an Ether faucet backed by a light client. package main //go:generate go-bindata -nometadata -o website.go faucet.html @@ -43,18 +43,18 @@ import ( "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethstats" "github.com/ethereum/go-ethereum/les" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/params" @@ -82,6 +82,12 @@ var ( noauthFlag = flag.Bool("noauth", false, "Enables funding requests without authentication") logFlag = flag.Int("loglevel", 3, "Log level to use for Ethereum and the faucet") + + twitterTokenFlag = flag.String("twitter.token", "", "Bearer token to authenticate with the v2 Twitter API") + twitterTokenV1Flag = flag.String("twitter.token.v1", "", "Bearer token to authenticate with the v1.1 Twitter API") + + goerliFlag = flag.Bool("goerli", false, "Initializes the faucet with Görli network config") + rinkebyFlag = flag.Bool("rinkeby", false, "Initializes the faucet with Rinkeby network config") ) var ( @@ -141,28 +147,24 @@ func main() { log.Crit("Failed to render the faucet template", "err", err) } // Load and parse the genesis block requested by the user - blob, err := ioutil.ReadFile(*genesisFlag) + genesis, err := getGenesis(genesisFlag, *goerliFlag, *rinkebyFlag) if err != nil { - log.Crit("Failed to read genesis block contents", "genesis", *genesisFlag, "err", err) - } - genesis := new(core.Genesis) - if err = json.Unmarshal(blob, genesis); err != nil { - log.Crit("Failed to parse genesis block json", "err", err) + log.Crit("Failed to parse genesis config", "err", err) } // Convert the bootnodes to internal enode representations - var enodes []*discv5.Node + var enodes []*enode.Node for _, boot := range strings.Split(*bootFlag, ",") { - if url, err := discv5.ParseNode(boot); err == nil { + if url, err := enode.Parse(enode.ValidSchemes, boot); err == nil { enodes = append(enodes, url) } else { log.Error("Failed to parse bootnode URL", "url", boot, "err", err) } } // Load up the account key and decrypt its password - if blob, err = ioutil.ReadFile(*accPassFlag); err != nil { + blob, err := ioutil.ReadFile(*accPassFlag) + if err != nil { log.Crit("Failed to read account password contents", "file", *accPassFlag, "err", err) } - // Delete trailing newline in password pass := strings.TrimSuffix(string(blob), "\n") ks := keystore.NewKeyStore(filepath.Join(os.Getenv("HOME"), ".faucet", "keys"), keystore.StandardScryptN, keystore.StandardScryptP) @@ -170,11 +172,12 @@ func main() { log.Crit("Failed to read account key contents", "file", *accJSONFlag, "err", err) } acc, err := ks.Import(blob, pass, pass) - if err != nil { + if err != nil && err != keystore.ErrAccountAlreadyExists { log.Crit("Failed to import faucet signer account", "err", err) } - ks.Unlock(acc, pass) - + if err := ks.Unlock(acc, pass); err != nil { + log.Crit("Failed to unlock faucet signer account", "err", err) + } // Assemble and start the faucet light service faucet, err := newFaucet(genesis, *ethPortFlag, enodes, *netFlag, *statsFlag, ks, website.Bytes()) if err != nil { @@ -209,7 +212,7 @@ type faucet struct { nonce uint64 // Current pending nonce of the faucet price *big.Int // Current gas price to issue funds with - conns []*websocket.Conn // Currently live websocket connections + conns []*wsConn // Currently live websocket connections timeouts map[string]time.Time // History of users and their funding timeouts reqs []*request // Currently pending funding requests update chan struct{} // Channel to signal request updates @@ -217,7 +220,14 @@ type faucet struct { lock sync.RWMutex // Lock protecting the faucet's internals } -func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network uint64, stats string, ks *keystore.KeyStore, index []byte) (*faucet, error) { +// wsConn wraps a websocket connection with a write mutex as the underlying +// websocket library does not synchronize access to the stream. +type wsConn struct { + conn *websocket.Conn + wlock sync.Mutex +} + +func newFaucet(genesis *core.Genesis, port int, enodes []*enode.Node, network uint64, stats string, ks *keystore.KeyStore, index []byte) (*faucet, error) { // Assemble the raw devp2p protocol stack stack, err := node.New(&node.Config{ Name: "geth", @@ -235,23 +245,22 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u if err != nil { return nil, err } + // Assemble the Ethereum light client protocol - if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { - cfg := eth.DefaultConfig - cfg.SyncMode = downloader.LightSync - cfg.NetworkId = network - cfg.Genesis = genesis - return les.New(ctx, &cfg) - }); err != nil { - return nil, err + cfg := ethconfig.Defaults + cfg.SyncMode = downloader.LightSync + cfg.NetworkId = network + cfg.Genesis = genesis + utils.SetDNSDiscoveryDefaults(&cfg, genesis.ToBlock(nil).Hash()) + + lesBackend, err := les.New(stack, &cfg) + if err != nil { + return nil, fmt.Errorf("Failed to register the Ethereum service: %w", err) } + // Assemble the ethstats monitoring and reporting service' if stats != "" { - if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { - var serv *les.LightEthereum - ctx.Service(&serv) - return ethstats.New(stats, nil, serv) - }); err != nil { + if err := ethstats.New(stack, lesBackend.ApiBackend, lesBackend.Engine(), stats); err != nil { return nil, err } } @@ -268,7 +277,7 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u // Attach to the client and retrieve and interesting metadatas api, err := stack.Attach() if err != nil { - stack.Stop() + stack.Close() return nil, err } client := ethclient.NewClient(api) @@ -318,13 +327,14 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { defer conn.Close() f.lock.Lock() - f.conns = append(f.conns, conn) + wsconn := &wsConn{conn: conn} + f.conns = append(f.conns, wsconn) f.lock.Unlock() defer func() { f.lock.Lock() for i, c := range f.conns { - if c == conn { + if c.conn == conn { f.conns = append(f.conns[:i], f.conns[i+1:]...) break } @@ -352,7 +362,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { if head == nil || balance == nil { // Report the faucet offline until initial stats are ready //lint:ignore ST1005 This error is to be displayed in the browser - if err = sendError(conn, errors.New("Faucet offline")); err != nil { + if err = sendError(wsconn, errors.New("Faucet offline")); err != nil { log.Warn("Failed to send faucet error to client", "err", err) return } @@ -360,16 +370,19 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { } } // Send over the initial stats and the latest header - if err = send(conn, map[string]interface{}{ + f.lock.RLock() + reqs := f.reqs + f.lock.RUnlock() + if err = send(wsconn, map[string]interface{}{ "funds": new(big.Int).Div(balance, ether), "funded": nonce, "peers": f.stack.Server().PeerCount(), - "requests": f.reqs, + "requests": reqs, }, 3*time.Second); err != nil { log.Warn("Failed to send initial stats to client", "err", err) return } - if err = send(conn, head, 3*time.Second); err != nil { + if err = send(wsconn, head, 3*time.Second); err != nil { log.Warn("Failed to send initial header to client", "err", err) return } @@ -384,9 +397,8 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { if err = conn.ReadJSON(&msg); err != nil { return } - if !*noauthFlag && !strings.HasPrefix(msg.URL, "https://gist.github.com/") && !strings.HasPrefix(msg.URL, "https://twitter.com/") && - !strings.HasPrefix(msg.URL, "https://plus.google.com/") && !strings.HasPrefix(msg.URL, "https://www.facebook.com/") { - if err = sendError(conn, errors.New("URL doesn't link to supported services")); err != nil { + if !*noauthFlag && !strings.HasPrefix(msg.URL, "https://twitter.com/") && !strings.HasPrefix(msg.URL, "https://www.facebook.com/") { + if err = sendError(wsconn, errors.New("URL doesn't link to supported services")); err != nil { log.Warn("Failed to send URL error to client", "err", err) return } @@ -394,7 +406,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { } if msg.Tier >= uint(*tiersFlag) { //lint:ignore ST1005 This error is to be displayed in the browser - if err = sendError(conn, errors.New("Invalid funding tier requested")); err != nil { + if err = sendError(wsconn, errors.New("Invalid funding tier requested")); err != nil { log.Warn("Failed to send tier error to client", "err", err) return } @@ -410,7 +422,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { res, err := http.PostForm("https://www.google.com/recaptcha/api/siteverify", form) if err != nil { - if err = sendError(conn, err); err != nil { + if err = sendError(wsconn, err); err != nil { log.Warn("Failed to send captcha post error to client", "err", err) return } @@ -423,7 +435,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { err = json.NewDecoder(res.Body).Decode(&result) res.Body.Close() if err != nil { - if err = sendError(conn, err); err != nil { + if err = sendError(wsconn, err); err != nil { log.Warn("Failed to send captcha decode error to client", "err", err) return } @@ -432,7 +444,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { if !result.Success { log.Warn("Captcha verification failed", "err", string(result.Errors)) //lint:ignore ST1005 it's funny and the robot won't mind - if err = sendError(conn, errors.New("Beep-bop, you're a robot!")); err != nil { + if err = sendError(wsconn, errors.New("Beep-bop, you're a robot!")); err != nil { log.Warn("Failed to send captcha failure to client", "err", err) return } @@ -441,36 +453,26 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { } // Retrieve the Ethereum address to fund, the requesting user and a profile picture var ( + id string username string avatar string address common.Address ) switch { - case strings.HasPrefix(msg.URL, "https://gist.github.com/"): - if err = sendError(conn, errors.New("GitHub authentication discontinued at the official request of GitHub")); err != nil { - log.Warn("Failed to send GitHub deprecation to client", "err", err) - return - } - continue - case strings.HasPrefix(msg.URL, "https://plus.google.com/"): - //lint:ignore ST1005 Google is a company name and should be capitalized. - if err = sendError(conn, errors.New("Google+ authentication discontinued as the service was sunset")); err != nil { - log.Warn("Failed to send Google+ deprecation to client", "err", err) - return - } - continue case strings.HasPrefix(msg.URL, "https://twitter.com/"): - username, avatar, address, err = authTwitter(msg.URL) + id, username, avatar, address, err = authTwitter(msg.URL, *twitterTokenV1Flag, *twitterTokenFlag) case strings.HasPrefix(msg.URL, "https://www.facebook.com/"): username, avatar, address, err = authFacebook(msg.URL) + id = username case *noauthFlag: username, avatar, address, err = authNoAuth(msg.URL) + id = username default: //lint:ignore ST1005 This error is to be displayed in the browser err = errors.New("Something funky happened, please open an issue at https://github.com/ethereum/go-ethereum/issues") } if err != nil { - if err = sendError(conn, err); err != nil { + if err = sendError(wsconn, err); err != nil { log.Warn("Failed to send prefix error to client", "err", err) return } @@ -484,7 +486,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { fund bool timeout time.Time ) - if timeout = f.timeouts[username]; time.Now().After(timeout) { + if timeout = f.timeouts[id]; time.Now().After(timeout) { // User wasn't funded recently, create the funding transaction amount := new(big.Int).Mul(big.NewInt(int64(*payoutFlag)), ether) amount = new(big.Int).Mul(amount, new(big.Int).Exp(big.NewInt(5), big.NewInt(int64(msg.Tier)), nil)) @@ -494,7 +496,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainID) if err != nil { f.lock.Unlock() - if err = sendError(conn, err); err != nil { + if err = sendError(wsconn, err); err != nil { log.Warn("Failed to send transaction creation error to client", "err", err) return } @@ -503,7 +505,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { // Submit the transaction and mark as funded if successful if err := f.client.SendTransaction(context.Background(), signed); err != nil { f.lock.Unlock() - if err = sendError(conn, err); err != nil { + if err = sendError(wsconn, err); err != nil { log.Warn("Failed to send transaction transmission error to client", "err", err) return } @@ -518,20 +520,20 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { timeout := time.Duration(*minutesFlag*int(math.Pow(3, float64(msg.Tier)))) * time.Minute grace := timeout / 288 // 24h timeout => 5m grace - f.timeouts[username] = time.Now().Add(timeout - grace) + f.timeouts[id] = time.Now().Add(timeout - grace) fund = true } f.lock.Unlock() // Send an error if too frequent funding, othewise a success if !fund { - if err = sendError(conn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(timeout)))); err != nil { // nolint: gosimple + if err = sendError(wsconn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(timeout)))); err != nil { // nolint: gosimple log.Warn("Failed to send funding error to client", "err", err) return } continue } - if err = sendSuccess(conn, fmt.Sprintf("Funding request accepted for %s into %s", username, address.Hex())); err != nil { + if err = sendSuccess(wsconn, fmt.Sprintf("Funding request accepted for %s into %s", username, address.Hex())); err != nil { log.Warn("Failed to send funding success to client", "err", err) return } @@ -624,12 +626,12 @@ func (f *faucet) loop() { "requests": f.reqs, }, time.Second); err != nil { log.Warn("Failed to send stats to client", "err", err) - conn.Close() + conn.conn.Close() continue } if err := send(conn, head, time.Second); err != nil { log.Warn("Failed to send header to client", "err", err) - conn.Close() + conn.conn.Close() } } f.lock.RUnlock() @@ -651,7 +653,7 @@ func (f *faucet) loop() { for _, conn := range f.conns { if err := send(conn, map[string]interface{}{"requests": f.reqs}, time.Second); err != nil { log.Warn("Failed to send requests to client", "err", err) - conn.Close() + conn.conn.Close() } } f.lock.RUnlock() @@ -661,41 +663,63 @@ func (f *faucet) loop() { // sends transmits a data packet to the remote end of the websocket, but also // setting a write deadline to prevent waiting forever on the node. -func send(conn *websocket.Conn, value interface{}, timeout time.Duration) error { +func send(conn *wsConn, value interface{}, timeout time.Duration) error { if timeout == 0 { timeout = 60 * time.Second } - conn.SetWriteDeadline(time.Now().Add(timeout)) - return conn.WriteJSON(value) + conn.wlock.Lock() + defer conn.wlock.Unlock() + conn.conn.SetWriteDeadline(time.Now().Add(timeout)) + return conn.conn.WriteJSON(value) } // sendError transmits an error to the remote end of the websocket, also setting // the write deadline to 1 second to prevent waiting forever. -func sendError(conn *websocket.Conn, err error) error { +func sendError(conn *wsConn, err error) error { return send(conn, map[string]string{"error": err.Error()}, time.Second) } // sendSuccess transmits a success message to the remote end of the websocket, also // setting the write deadline to 1 second to prevent waiting forever. -func sendSuccess(conn *websocket.Conn, msg string) error { +func sendSuccess(conn *wsConn, msg string) error { return send(conn, map[string]string{"success": msg}, time.Second) } // authTwitter tries to authenticate a faucet request using Twitter posts, returning -// the username, avatar URL and Ethereum address to fund on success. -func authTwitter(url string) (string, string, common.Address, error) { +// the uniqueness identifier (user id/username), username, avatar URL and Ethereum address to fund on success. +func authTwitter(url string, tokenV1, tokenV2 string) (string, string, string, common.Address, error) { // Ensure the user specified a meaningful URL, no fancy nonsense parts := strings.Split(url, "/") if len(parts) < 4 || parts[len(parts)-2] != "status" { //lint:ignore ST1005 This error is to be displayed in the browser - return "", "", common.Address{}, errors.New("Invalid Twitter status URL") - } - // Twitter's API isn't really friendly with direct links. Still, we don't - // want to do ask read permissions from users, so just load the public posts and - // scrape it for the Ethereum address and profile URL. + return "", "", "", common.Address{}, errors.New("Invalid Twitter status URL") + } + // Strip any query parameters from the tweet id and ensure it's numeric + tweetID := strings.Split(parts[len(parts)-1], "?")[0] + if !regexp.MustCompile("^[0-9]+$").MatchString(tweetID) { + return "", "", "", common.Address{}, errors.New("Invalid Tweet URL") + } + // Twitter's API isn't really friendly with direct links. + // It is restricted to 300 queries / 15 minute with an app api key. + // Anything more will require read only authorization from the users and that we want to avoid. + + // If Twitter bearer token is provided, use the API, selecting the version + // the user would prefer (currently there's a limit of 1 v2 app / developer + // but unlimited v1.1 apps). + switch { + case tokenV1 != "": + return authTwitterWithTokenV1(tweetID, tokenV1) + case tokenV2 != "": + return authTwitterWithTokenV2(tweetID, tokenV2) + } + // Twiter API token isn't provided so we just load the public posts + // and scrape it for the Ethereum address and profile URL. We need to load + // the mobile page though since the main page loads tweet contents via JS. + url = strings.Replace(url, "https://twitter.com/", "https://mobile.twitter.com/", 1) + res, err := http.Get(url) if err != nil { - return "", "", common.Address{}, err + return "", "", "", common.Address{}, err } defer res.Body.Close() @@ -703,31 +727,115 @@ func authTwitter(url string) (string, string, common.Address, error) { parts = strings.Split(res.Request.URL.String(), "/") if len(parts) < 4 || parts[len(parts)-2] != "status" { //lint:ignore ST1005 This error is to be displayed in the browser - return "", "", common.Address{}, errors.New("Invalid Twitter status URL") + return "", "", "", common.Address{}, errors.New("Invalid Twitter status URL") } username := parts[len(parts)-3] body, err := ioutil.ReadAll(res.Body) if err != nil { - return "", "", common.Address{}, err + return "", "", "", common.Address{}, err } address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body))) if address == (common.Address{}) { //lint:ignore ST1005 This error is to be displayed in the browser - return "", "", common.Address{}, errors.New("No Ethereum address found to fund") + return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund") } var avatar string - if parts = regexp.MustCompile("src=\"([^\"]+twimg.com/profile_images[^\"]+)\"").FindStringSubmatch(string(body)); len(parts) == 2 { + if parts = regexp.MustCompile(`src="([^"]+twimg\.com/profile_images[^"]+)"`).FindStringSubmatch(string(body)); len(parts) == 2 { avatar = parts[1] } - return username + "@twitter", avatar, address, nil + return username + "@twitter", username, avatar, address, nil +} + +// authTwitterWithTokenV1 tries to authenticate a faucet request using Twitter's v1 +// API, returning the user id, username, avatar URL and Ethereum address to fund on +// success. +func authTwitterWithTokenV1(tweetID string, token string) (string, string, string, common.Address, error) { + // Query the tweet details from Twitter + url := fmt.Sprintf("https://api.twitter.com/1.1/statuses/show.json?id=%s", tweetID) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return "", "", "", common.Address{}, err + } + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + res, err := http.DefaultClient.Do(req) + if err != nil { + return "", "", "", common.Address{}, err + } + defer res.Body.Close() + + var result struct { + Text string `json:"text"` + User struct { + ID string `json:"id_str"` + Username string `json:"screen_name"` + Avatar string `json:"profile_image_url"` + } `json:"user"` + } + err = json.NewDecoder(res.Body).Decode(&result) + if err != nil { + return "", "", "", common.Address{}, err + } + address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(result.Text)) + if address == (common.Address{}) { + //lint:ignore ST1005 This error is to be displayed in the browser + return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund") + } + return result.User.ID + "@twitter", result.User.Username, result.User.Avatar, address, nil +} + +// authTwitterWithTokenV2 tries to authenticate a faucet request using Twitter's v2 +// API, returning the user id, username, avatar URL and Ethereum address to fund on +// success. +func authTwitterWithTokenV2(tweetID string, token string) (string, string, string, common.Address, error) { + // Query the tweet details from Twitter + url := fmt.Sprintf("https://api.twitter.com/2/tweets/%s?expansions=author_id&user.fields=profile_image_url", tweetID) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return "", "", "", common.Address{}, err + } + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + res, err := http.DefaultClient.Do(req) + if err != nil { + return "", "", "", common.Address{}, err + } + defer res.Body.Close() + + var result struct { + Data struct { + AuthorID string `json:"author_id"` + Text string `json:"text"` + } `json:"data"` + Includes struct { + Users []struct { + ID string `json:"id"` + Username string `json:"username"` + Avatar string `json:"profile_image_url"` + } `json:"users"` + } `json:"includes"` + } + + err = json.NewDecoder(res.Body).Decode(&result) + if err != nil { + return "", "", "", common.Address{}, err + } + + address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(result.Data.Text)) + if address == (common.Address{}) { + //lint:ignore ST1005 This error is to be displayed in the browser + return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund") + } + return result.Data.AuthorID + "@twitter", result.Includes.Users[0].Username, result.Includes.Users[0].Avatar, address, nil } // authFacebook tries to authenticate a faucet request using Facebook posts, // returning the username, avatar URL and Ethereum address to fund on success. func authFacebook(url string) (string, string, common.Address, error) { // Ensure the user specified a meaningful URL, no fancy nonsense - parts := strings.Split(url, "/") + parts := strings.Split(strings.Split(url, "?")[0], "/") + if parts[len(parts)-1] == "" { + parts = parts[0 : len(parts)-1] + } if len(parts) < 4 || parts[len(parts)-2] != "posts" { //lint:ignore ST1005 This error is to be displayed in the browser return "", "", common.Address{}, errors.New("Invalid Facebook post URL") @@ -737,7 +845,13 @@ func authFacebook(url string) (string, string, common.Address, error) { // Facebook's Graph API isn't really friendly with direct links. Still, we don't // want to do ask read permissions from users, so just load the public posts and // scrape it for the Ethereum address and profile URL. - res, err := http.Get(url) + // + // Facebook recently changed their desktop webpage to use AJAX for loading post + // content, so switch over to the mobile site for now. Will probably end up having + // to use the API eventually. + crawl := strings.Replace(url, "www.facebook.com", "m.facebook.com", 1) + + res, err := http.Get(crawl) if err != nil { return "", "", common.Address{}, err } @@ -753,7 +867,7 @@ func authFacebook(url string) (string, string, common.Address, error) { return "", "", common.Address{}, errors.New("No Ethereum address found to fund") } var avatar string - if parts = regexp.MustCompile("src=\"([^\"]+fbcdn.net[^\"]+)\"").FindStringSubmatch(string(body)); len(parts) == 2 { + if parts = regexp.MustCompile(`src="([^"]+fbcdn\.net[^"]+)"`).FindStringSubmatch(string(body)); len(parts) == 2 { avatar = parts[1] } return username + "@facebook", avatar, address, nil @@ -770,3 +884,19 @@ func authNoAuth(url string) (string, string, common.Address, error) { } return address.Hex() + "@noauth", "", address, nil } + +// getGenesis returns a genesis based on input args +func getGenesis(genesisFlag *string, goerliFlag bool, rinkebyFlag bool) (*core.Genesis, error) { + switch { + case genesisFlag != nil: + var genesis core.Genesis + err := common.LoadJSON(*genesisFlag, &genesis) + return &genesis, err + case goerliFlag: + return core.DefaultGoerliGenesisBlock(), nil + case rinkebyFlag: + return core.DefaultRinkebyGenesisBlock(), nil + default: + return nil, fmt.Errorf("no genesis flag provided") + } +} diff --git a/cmd/faucet/faucet.html b/cmd/faucet/faucet.html index 314b19e1232d..dad5ad84f210 100644 --- a/cmd/faucet/faucet.html +++ b/cmd/faucet/faucet.html @@ -49,7 +49,7 @@

{{
- +
-