From a957bab51c3a0e90a605d98149ee7f3918b0da1f Mon Sep 17 00:00:00 2001 From: Adam Babik Date: Thu, 6 Sep 2018 00:50:46 +0200 Subject: [PATCH 1/7] use patched go-ethereum from fork instead of patching it on-the-fly --- Gopkg.lock | 7 +++++-- Gopkg.toml | 4 +--- Makefile | 1 - 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 64b257834b..66d5c2d120 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -88,7 +88,7 @@ revision = "935e0e8a636ca4ba70b713f3e38a19e1b77739e8" [[projects]] - digest = "1:d670c508dc01984c721d0d968936412e3edcd8ca58caf82fcfd0df9044013a0f" + digest = "1:34fa5a5d444765a1360983211916c4d2e65a3be75186277142d6aef5c21192db" name = "github.com/ethereum/go-ethereum" packages = [ ".", @@ -130,6 +130,7 @@ "eth/gasprice", "eth/tracers", "eth/tracers/internal/tracers", + "ethapi", "ethdb", "event", "internal/debug", @@ -156,7 +157,8 @@ "whisper/whisperv6", ] pruneopts = "T" - revision = "316fc7ecfc10d06603f1358c1f4c1020ec36dd2a" + revision = "13115e12870cb2ebc1c8dfe55bc976d94efbeb90" + source = "github.com/status-im/go-ethereum" version = "v1.8.14" [[projects]] @@ -1041,6 +1043,7 @@ "github.com/ethereum/go-ethereum/crypto/sha3", "github.com/ethereum/go-ethereum/eth", "github.com/ethereum/go-ethereum/eth/downloader", + "github.com/ethereum/go-ethereum/ethapi", "github.com/ethereum/go-ethereum/event", "github.com/ethereum/go-ethereum/les", "github.com/ethereum/go-ethereum/log", diff --git a/Gopkg.toml b/Gopkg.toml index bbb21ab202..37d84d6a36 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -1,6 +1,3 @@ -# These packages are introduced by patching go-ethereum, so there isn't any point in forcing dep to find them. -ignored = [ "github.com/ethereum/go-ethereum/ethapi" ] - [prune] unused-packages = true go-tests = true @@ -28,6 +25,7 @@ ignored = [ "github.com/ethereum/go-ethereum/ethapi" ] [[constraint]] name = "github.com/ethereum/go-ethereum" version = "=v1.8.14" + source = "github.com/status-im/go-ethereum" # * * * * * `go-ethereum` dependencies * * * * * # Pinned down SHAs from `go-ethereum/vendor/vendor.json` diff --git a/Makefile b/Makefile index 5b4a42dd04..026a2dd0c5 100644 --- a/Makefile +++ b/Makefile @@ -262,7 +262,6 @@ vendor-check: ##@dependencies Require all new patches and disallow other changes dep-ensure: ##@dependencies Dep ensure and apply all patches @dep ensure - ./_assets/patches/patcher dep-install: ##@dependencies Install vendoring tool go get -u github.com/golang/dep/cmd/dep From c224666399b8e32804e9758cbc65ce0fef08c9f3 Mon Sep 17 00:00:00 2001 From: Adam Babik Date: Wed, 12 Sep 2018 16:02:37 +0200 Subject: [PATCH 2/7] change the way patches are applied --- Makefile | 19 ++------ _assets/ci/isolate-vendor-check.sh | 54 --------------------- _assets/ci/update-geth.sh | 27 ----------- _assets/patches/geth/README.md | 40 +++++++-------- _assets/patches/update-fork-with-patches.sh | 26 ++++++++++ 5 files changed, 51 insertions(+), 115 deletions(-) delete mode 100755 _assets/ci/isolate-vendor-check.sh delete mode 100755 _assets/ci/update-geth.sh create mode 100755 _assets/patches/update-fork-with-patches.sh diff --git a/Makefile b/Makefile index 026a2dd0c5..8334975f57 100644 --- a/Makefile +++ b/Makefile @@ -256,26 +256,17 @@ clean: ##@other Cleanup deep-clean: clean rm -Rdf .ethereumtest/StatusChain -vendor-check: ##@dependencies Require all new patches and disallow other changes - ./_assets/patches/patcher -c - ./_assets/ci/isolate-vendor-check.sh - dep-ensure: ##@dependencies Dep ensure and apply all patches @dep ensure dep-install: ##@dependencies Install vendoring tool go get -u github.com/golang/dep/cmd/dep -update-geth: ##@dependencies Update geth (use GETH_BRANCH to optionally set the geth branch name) - ./_assets/ci/update-geth.sh $(GETH_BRANCH) - @echo "**************************************************************" - @echo "NOTE: Don't forget to:" - @echo "- update the goleveldb dependency revision in Gopkg.toml to match the version used in go-ethereum" - @echo "- reconcile any changes to interfaces in transactions/fake (such as PublicTransactionPoolAPI), which are copies from internal geth interfaces" - @echo "**************************************************************" - -patch: ##@patching Revert and apply all patches +patch-geth-vendor: ##@patching Apply all patches on ethereum in vendor/ ./_assets/patches/patcher -patch-revert: ##@patching Revert all patches only +patch-geth-vendor-revert: ##@patching Revert all patches from ethereum in vendor/ ./_assets/patches/patcher -r + +patch-geth-fork: ##@patching Apply patches to Status' go-ethereum fork + ./_assets/patches/update-fork-with-patches.sh diff --git a/_assets/ci/isolate-vendor-check.sh b/_assets/ci/isolate-vendor-check.sh deleted file mode 100755 index 0d0d6744d4..0000000000 --- a/_assets/ci/isolate-vendor-check.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env bash - -# This is a hack to isolate vendor check in a -# separate clean state. Without this, validate-vendor.sh -# doesn't like our workflow with patches. -# -# How it works: -# 1) Stashes all changes and checks out to a temporary branch. -# 2) Reverts all patches and commits changes. -# 3) Runs "dep ensure" and validate-vendor.sh. Saves exit code and message. -# 4) Commits any changes. -# 5) Goes back to previous branch and removes the temporary branch. -# 6) Applies stashed changes. -# 7) Prints the message and exits with the exit code. - -timestamp() { - date +"%s" -} - -tempBranchName="isolated-vendor-check-$(timestamp)" - -# Stash current changes first, apply later before exiting. -hasChanges=0 -changes=($(git status --porcelain)) -if [ "$changes" ]; then - git stash - hasChanges=1 -fi - -branchName="$(git rev-parse --abbrev-ref HEAD)" - -git checkout -b $tempBranchName - -# Revert all patches. -$(pwd)/_assets/patches/patcher -r -git add . -git commit -m "vendor check - auto" - -# Do vendor check. -dep ensure -msg=$("$(pwd)/_assets/ci/validate-vendor.sh") -failed=$? -git add . -git commit -m "vendor check - auto" - -# Go back to previous branch, clean and apply stashed. -git checkout "$branchName" -git branch -D $tempBranchName -if [ $hasChanges -eq 1 ]; then - git stash apply > /dev/null 2>&1 -fi - -echo $msg -exit $failed diff --git a/_assets/ci/update-geth.sh b/_assets/ci/update-geth.sh deleted file mode 100755 index 088ca6de76..0000000000 --- a/_assets/ci/update-geth.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash - -# This script updates the go-ethereum dependency, optionally updating the branch if GETH_BRANCH is provided. -# If any changes were made, they will be committed. - -# Exit early if any errors are encountered -set -e -if [ ! -z "$GETH_BRANCH" ]; then - # escape slashes - GETH_BRANCH=$(echo $GETH_BRANCH | sed 's@\/@\\\/@g') - # Update go-ethereum contraint branch - sed -i 'N;N;s@\(\[\[constraint]]\n name = "github.com\/ethereum\/go-ethereum"\n branch =\)\(.*\)@\1 '"\"${GETH_BRANCH}\""'@g' Gopkg.toml -fi -dep ensure -v -update github.com/ethereum/go-ethereum -if ! make dep-ensure; then - echo "Please fix patches and rerun. (dep-ensure failed)" - exit 1 -fi - - -git add Gopkg.lock Gopkg.toml vendor/ _assets/patches/ -if $(git diff --cached --quiet); then - echo "No changes to commit. Geth up to date." - exit 0 -fi -git commit --quiet -m "Updating Geth" -echo "Geth updated." diff --git a/_assets/patches/geth/README.md b/_assets/patches/geth/README.md index 206544873e..87bc8fa4e9 100644 --- a/_assets/patches/geth/README.md +++ b/_assets/patches/geth/README.md @@ -1,11 +1,9 @@ -# Status Patches for geth (go-ethereum) ---- +Status Patches for geth (go-ethereum) +===================================== -Status-go uses [go-ethereum](https://github.com/ethereum/go-ethereum) (**upstream**) as its dependency. As any other Go dependency `go-ethereum` code is vendored and stored in `vendor/` folder. +Status-go uses Status' fork of [go-ethereum](https://github.com/status-im/go-ethereum) as its dependency. As any other Go dependency `go-ethereum` code is vendored and stored in `vendor/` folder. -However, there are a few changes has been made to the upstream, that are specific to Status and should not be merged to the upstream. We keep those changes as a set of patches, that can be applied upon each next release of `go-ethereum`. Patched version of `go-ethereum` is available in vendor folder. - -We try to minimize number and amount of changes in those patches as much as possible, and whereas possible, to contribute changes into the upstream. +The reason why we use a fork is because we introduced a couple of differences that make it work better on mobile devices but not necessarily are suitable for all cases. # Creating patches @@ -16,23 +14,25 @@ Instructions for creating a patch from the command line: 1. Create a patch `git diff --relative=vendor/github.com/ethereum/go-ethereum > _assets/patches/geth/0000-name-of-the-patch.patch` 1. Commit changes. -# Updating patches - -1. Tweak the patch file. -1. Run `make dep-ensure` to re-apply patches. +# Testing patches -# Removing patches +To test a newly created patch, run: -1. Remove the patch file -1. Remove the link from [this README] (./README.md) -1. Run `make dep-ensure` to re-apply patches. +``` +$ git apply _assets/patches/geth/0000-name-of-the-patch.patch --directory vendor/github.com/ethereum/go-ethereum +``` -# Updating +And run `make statusgo` to compile it and `make test` to run unit tests. -When a new stable release of `go-ethereum` comes out, we need to upgrade our vendored copy. We use `dep` for vendoring, so for upgrading: +# Updating fork with a patch -- Change target branch for `go-ethereum` in `Gopkg.toml`. -- `dep ensure -update github.com/ethereum/go-ethereum` -- `make dep-ensure` +To make the patch available for everyone, it needs to be applied and pushed to remote git repository. -This will ensure that dependency is upgraded and fully patched. Upon success, you can do `make vendor-check` after committing all the changes, in order to ensure that all changes are valid. +1. Clone [github.com/status-im/go-ethereum](https://github.com/status-im/go-ethereum) to `$GOPATH` and pull all changes, +1. From `github.com/status-im/status-go` run `GETH_VERSION=v1.8.14 ./_assets/patches/update-fork-with-patches.sh`, +1. Go to `github.com/status-im/go-ethereum` and verify the latest commit and tag `v1.8.14`, +1. If all is good push changes to the upstream: +``` +$ git push origin patched/v1.8.14 +$ git push -f v1.8.14 +``` diff --git a/_assets/patches/update-fork-with-patches.sh b/_assets/patches/update-fork-with-patches.sh new file mode 100755 index 0000000000..4bafaf67b7 --- /dev/null +++ b/_assets/patches/update-fork-with-patches.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +set -ex + +if [ -z "$GETH_VERSION" ]; then + echo "GETH_VERSION is undefined" + exit 1 +fi + +pushd $GOPATH/src/github.com/status-im/go-ethereum +git fetch +git checkout -b patched/$GETH_VERSION origin/patched/$GETH_VERSION || git checkout patched/$GETH_VERSION +git pull +popd + +cp -R ./_assets/patches $GOPATH/src/github.com/status-im/go-ethereum + +pushd $GOPATH/src/github.com/status-im/go-ethereum +./patches/patcher -b . +rm -r ./patches + +git commit -am "add new patches to $GETH_VERSION" +git tag -d $GETH_VERSION +git tag -a $GETH_VERSION -m "Patched release $GETH_VERSION" + +echo "Now, go to $GOPATH/src/github.com/status-im/go-ethereum and check if the latest comment in patched/$GETH_VERSION branch is correct and if tag $GETH_VERSION is updated. If they both are ok, push changes rememberting to push the tag with -f option as it already exists." From 85ff2d5e994d3fe4212ef66a0bc0975913f0d77e Mon Sep 17 00:00:00 2001 From: Adam Babik Date: Wed, 12 Sep 2018 16:07:36 +0200 Subject: [PATCH 3/7] clean up --- .travis.yml | 4 +--- README.md | 2 +- _assets/patches/geth/README.md | 21 +++++++++------------ 3 files changed, 11 insertions(+), 16 deletions(-) diff --git a/.travis.yml b/.travis.yml index 5b329c2a4d..017f86cf94 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,12 +8,10 @@ dist: trusty install: make setup jobs: include: - - stage: Lint & Vendor Check - sudo: required + - stage: Lint before_script: make dep-ensure script: - make lint - - make vendor-check - stage: Test unit and integration script: - make test-unit diff --git a/README.md b/README.md index a1f84a1454..0c9b5c9fc9 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ status-go is an underlying part of [Status](https://status.im/) - a browser, messenger, and gateway to a decentralized world. -It's written in Go and requires Go 1.8 or above. +It's written in Go and requires Go 1.10 or above. It uses Makefile to do most common actions. See `make help` output for available commands. diff --git a/_assets/patches/geth/README.md b/_assets/patches/geth/README.md index 87bc8fa4e9..e59c963180 100644 --- a/_assets/patches/geth/README.md +++ b/_assets/patches/geth/README.md @@ -14,25 +14,22 @@ Instructions for creating a patch from the command line: 1. Create a patch `git diff --relative=vendor/github.com/ethereum/go-ethereum > _assets/patches/geth/0000-name-of-the-patch.patch` 1. Commit changes. -# Testing patches - -To test a newly created patch, run: - -``` -$ git apply _assets/patches/geth/0000-name-of-the-patch.patch --directory vendor/github.com/ethereum/go-ethereum -``` - -And run `make statusgo` to compile it and `make test` to run unit tests. - # Updating fork with a patch To make the patch available for everyone, it needs to be applied and pushed to remote git repository. 1. Clone [github.com/status-im/go-ethereum](https://github.com/status-im/go-ethereum) to `$GOPATH` and pull all changes, 1. From `github.com/status-im/status-go` run `GETH_VERSION=v1.8.14 ./_assets/patches/update-fork-with-patches.sh`, -1. Go to `github.com/status-im/go-ethereum` and verify the latest commit and tag `v1.8.14`, -1. If all is good push changes to the upstream: +1. Go to `github.com/status-im/go-ethereum` and verify if the latest commit and tag `v1.8.14` are correct. If so, push changes to the upstream: ``` $ git push origin patched/v1.8.14 $ git push -f v1.8.14 ``` + +# Testing patches + +Assumming that your patch is included in the fork and the updated tag is pushed: + +1. Make sure that the `vendor/` directory is clean: `make dep-ensure`, +1. Run `make statusgo` to compile `statusd`, +1. Run `make test` to run unit tests. From bba926682b5a7eaa913905a901de5794fd8dbac0 Mon Sep 17 00:00:00 2001 From: Adam Babik Date: Thu, 13 Sep 2018 09:46:53 +0200 Subject: [PATCH 4/7] update comments --- _assets/patches/update-fork-with-patches.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/_assets/patches/update-fork-with-patches.sh b/_assets/patches/update-fork-with-patches.sh index 4bafaf67b7..abadb53cd5 100755 --- a/_assets/patches/update-fork-with-patches.sh +++ b/_assets/patches/update-fork-with-patches.sh @@ -20,6 +20,8 @@ pushd $GOPATH/src/github.com/status-im/go-ethereum rm -r ./patches git commit -am "add new patches to $GETH_VERSION" + +# remove already existing tag as we will replace it with a patched commit git tag -d $GETH_VERSION git tag -a $GETH_VERSION -m "Patched release $GETH_VERSION" From 352d48196aac4f500de7ca1f85da92ef4b4c18d9 Mon Sep 17 00:00:00 2001 From: Adam Babik Date: Thu, 27 Sep 2018 15:47:35 +0200 Subject: [PATCH 5/7] remove getch patches --- .../patches/geth/0000-accounts-hd-keys.patch | 358 ------------------ .../geth/0014-whisperv6-notifications.patch | 208 ---------- .../patches/geth/0016-fix-leveldb-issue.patch | 131 ------- ...0021-backends-simulated-chain-signer.patch | 14 - .../geth/0022-node-attach-public.patch | 98 ----- .../geth/0023-extract-personal-sign-api.patch | 61 --- .../geth/0025-whisper-confirmations.patch | 89 ----- .../geth/0027-whisper-time-source.patch | 131 ------- _assets/patches/geth/0028-p2p-watchdog.patch | 119 ------ .../geth/0029-node-api-gauge-metric.patch | 28 -- ...nd-mailserver-request-completed-code.patch | 110 ------ ...-mailserver-response-contains-cursor.patch | 134 ------- .../geth/0035-add_goroutines_metrics.patch | 23 -- .../patches/geth/0037-whisper-metrics.patch | 88 ----- _assets/patches/geth/README.md | 35 -- _assets/patches/update-fork-with-patches.sh | 28 -- 16 files changed, 1655 deletions(-) delete mode 100644 _assets/patches/geth/0000-accounts-hd-keys.patch delete mode 100644 _assets/patches/geth/0014-whisperv6-notifications.patch delete mode 100644 _assets/patches/geth/0016-fix-leveldb-issue.patch delete mode 100644 _assets/patches/geth/0021-backends-simulated-chain-signer.patch delete mode 100644 _assets/patches/geth/0022-node-attach-public.patch delete mode 100644 _assets/patches/geth/0023-extract-personal-sign-api.patch delete mode 100644 _assets/patches/geth/0025-whisper-confirmations.patch delete mode 100644 _assets/patches/geth/0027-whisper-time-source.patch delete mode 100644 _assets/patches/geth/0028-p2p-watchdog.patch delete mode 100644 _assets/patches/geth/0029-node-api-gauge-metric.patch delete mode 100644 _assets/patches/geth/0032-send-mailserver-request-completed-code.patch delete mode 100644 _assets/patches/geth/0033-mailserver-response-contains-cursor.patch delete mode 100644 _assets/patches/geth/0035-add_goroutines_metrics.patch delete mode 100644 _assets/patches/geth/0037-whisper-metrics.patch delete mode 100644 _assets/patches/geth/README.md delete mode 100755 _assets/patches/update-fork-with-patches.sh diff --git a/_assets/patches/geth/0000-accounts-hd-keys.patch b/_assets/patches/geth/0000-accounts-hd-keys.patch deleted file mode 100644 index 68bfa82200..0000000000 --- a/_assets/patches/geth/0000-accounts-hd-keys.patch +++ /dev/null @@ -1,358 +0,0 @@ -diff --git a/accounts/keystore/key.go b/accounts/keystore/key.go -index 211fa863..65c83f3b 100644 ---- a/accounts/keystore/key.go -+++ b/accounts/keystore/key.go -@@ -33,6 +33,7 @@ import ( - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/pborman/uuid" -+ "github.com/status-im/status-go/extkeys" - ) - - const ( -@@ -46,6 +47,10 @@ type Key struct { - // we only store privkey as pubkey/address can be derived from it - // privkey in this struct is always in plaintext - PrivateKey *ecdsa.PrivateKey -+ // extended key is the root node for new hardened children i.e. sub-accounts -+ ExtendedKey *extkeys.ExtendedKey -+ // next index to be used for sub-account child derivation -+ SubAccountIndex uint32 - } - - type keyStore interface { -@@ -65,10 +70,12 @@ type plainKeyJSON struct { - } - - type encryptedKeyJSONV3 struct { -- Address string `json:"address"` -- Crypto cryptoJSON `json:"crypto"` -- Id string `json:"id"` -- Version int `json:"version"` -+ Address string `json:"address"` -+ Crypto cryptoJSON `json:"crypto"` -+ Id string `json:"id"` -+ Version int `json:"version"` -+ ExtendedKey cryptoJSON `json:"extendedkey"` -+ SubAccountIndex uint32 `json:"subaccountindex"` - } - - type encryptedKeyJSONV1 struct { -@@ -137,6 +144,40 @@ func newKeyFromECDSA(privateKeyECDSA *ecdsa.PrivateKey) *Key { - return key - } - -+func newKeyFromExtendedKey(extKey *extkeys.ExtendedKey) (*Key, error) { -+ var ( -+ extChild1, extChild2 *extkeys.ExtendedKey -+ err error -+ ) -+ -+ if extKey.Depth == 0 { // we are dealing with master key -+ // CKD#1 - main account -+ extChild1, err = extKey.BIP44Child(extkeys.CoinTypeETH, 0) -+ if err != nil { -+ return &Key{}, err -+ } -+ -+ // CKD#2 - sub-accounts root -+ extChild2, err = extKey.BIP44Child(extkeys.CoinTypeETH, 1) -+ if err != nil { -+ return &Key{}, err -+ } -+ } else { // we are dealing with non-master key, so it is safe to persist and extend from it -+ extChild1 = extKey -+ extChild2 = extKey -+ } -+ -+ privateKeyECDSA := extChild1.ToECDSA() -+ id := uuid.NewRandom() -+ key := &Key{ -+ Id: id, -+ Address: crypto.PubkeyToAddress(privateKeyECDSA.PublicKey), -+ PrivateKey: privateKeyECDSA, -+ ExtendedKey: extChild2, -+ } -+ return key, nil -+} -+ - // NewKeyForDirectICAP generates a key whose address fits into < 155 bits so it can fit - // into the Direct ICAP spec. for simplicity and easier compatibility with other libs, we - // retry until the first byte is 0. -diff --git a/accounts/keystore/keystore.go b/accounts/keystore/keystore.go -index 6b04acd0..ac2ab008 100644 ---- a/accounts/keystore/keystore.go -+++ b/accounts/keystore/keystore.go -@@ -38,6 +38,7 @@ import ( - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/event" -+ "github.com/status-im/status-go/extkeys" - ) - - var ( -@@ -228,6 +229,11 @@ func (ks *KeyStore) Accounts() []accounts.Account { - return ks.cache.accounts() - } - -+// AccountDecryptedKey returns decrypted key for account (provided that password is correct). -+func (ks *KeyStore) AccountDecryptedKey(a accounts.Account, auth string) (accounts.Account, *Key, error) { -+ return ks.getDecryptedKey(a, auth) -+} -+ - // Delete deletes the key matched by account if the passphrase is correct. - // If the account contains no filename, the address must match a unique key. - func (ks *KeyStore) Delete(a accounts.Account, passphrase string) error { -@@ -453,6 +459,34 @@ func (ks *KeyStore) ImportECDSA(priv *ecdsa.PrivateKey, passphrase string) (acco - return ks.importKey(key, passphrase) - } - -+// ImportExtendedKey stores ECDSA key (obtained from extended key) along with CKD#2 (root for sub-accounts) -+// If key file is not found, it is created. Key is encrypted with the given passphrase. -+func (ks *KeyStore) ImportExtendedKey(extKey *extkeys.ExtendedKey, passphrase string) (accounts.Account, error) { -+ key, err := newKeyFromExtendedKey(extKey) -+ if err != nil { -+ zeroKey(key.PrivateKey) -+ return accounts.Account{}, err -+ } -+ -+ // if account is already imported, return cached version -+ if ks.cache.hasAddress(key.Address) { -+ a := accounts.Account{ -+ Address: key.Address, -+ } -+ ks.cache.maybeReload() -+ ks.cache.mu.Lock() -+ a, err := ks.cache.find(a) -+ ks.cache.mu.Unlock() -+ if err != nil { -+ zeroKey(key.PrivateKey) -+ return a, err -+ } -+ return a, nil -+ } -+ -+ return ks.importKey(key, passphrase) -+} -+ - func (ks *KeyStore) importKey(key *Key, passphrase string) (accounts.Account, error) { - a := accounts.Account{Address: key.Address, URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.storage.JoinPath(keyFileName(key.Address))}} - if err := ks.storage.StoreKey(a.URL.Path, key, passphrase); err != nil { -@@ -463,6 +497,15 @@ func (ks *KeyStore) importKey(key *Key, passphrase string) (accounts.Account, er - return a, nil - } - -+func (ks *KeyStore) IncSubAccountIndex(a accounts.Account, passphrase string) error { -+ a, key, err := ks.getDecryptedKey(a, passphrase) -+ if err != nil { -+ return err -+ } -+ key.SubAccountIndex++ -+ return ks.storage.StoreKey(a.URL.Path, key, passphrase) -+} -+ - // Update changes the passphrase of an existing account. - func (ks *KeyStore) Update(a accounts.Account, passphrase, newPassphrase string) error { - a, key, err := ks.getDecryptedKey(a, passphrase) -@@ -486,6 +529,9 @@ func (ks *KeyStore) ImportPreSaleKey(keyJSON []byte, passphrase string) (account - - // zeroKey zeroes a private key in memory. - func zeroKey(k *ecdsa.PrivateKey) { -+ if k == nil { -+ return -+ } - b := k.D.Bits() - for i := range b { - b[i] = 0 -diff --git a/accounts/keystore/keystore_passphrase.go b/accounts/keystore/keystore_passphrase.go -index 59738abe..2b6ef252 100644 ---- a/accounts/keystore/keystore_passphrase.go -+++ b/accounts/keystore/keystore_passphrase.go -@@ -41,6 +41,7 @@ import ( - "github.com/ethereum/go-ethereum/common/math" - "github.com/ethereum/go-ethereum/crypto" - "github.com/pborman/uuid" -+ "github.com/status-im/status-go/extkeys" - "golang.org/x/crypto/pbkdf2" - "golang.org/x/crypto/scrypt" - ) -@@ -157,15 +158,68 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) { - KDFParams: scryptParamsJSON, - MAC: hex.EncodeToString(mac), - } -+ encryptedExtendedKey, err := EncryptExtendedKey(key.ExtendedKey, auth, scryptN, scryptP) -+ if err != nil { -+ return nil, err -+ } - encryptedKeyJSONV3 := encryptedKeyJSONV3{ - hex.EncodeToString(key.Address[:]), - cryptoStruct, - key.Id.String(), - version, -+ encryptedExtendedKey, -+ key.SubAccountIndex, - } - return json.Marshal(encryptedKeyJSONV3) - } - -+func EncryptExtendedKey(extKey *extkeys.ExtendedKey, auth string, scryptN, scryptP int) (cryptoJSON, error) { -+ if extKey == nil { -+ return cryptoJSON{}, nil -+ } -+ authArray := []byte(auth) -+ salt := make([]byte, 32) -+ if _, err := io.ReadFull(rand.Reader, salt); err != nil { -+ panic("reading from crypto/rand failed: " + err.Error()) -+ } -+ derivedKey, err := scrypt.Key(authArray, salt, scryptN, scryptR, scryptP, scryptDKLen) -+ if err != nil { -+ return cryptoJSON{}, err -+ } -+ encryptKey := derivedKey[:16] -+ keyBytes := []byte(extKey.String()) -+ -+ iv := make([]byte, aes.BlockSize) // 16 -+ if _, err := io.ReadFull(rand.Reader, iv); err != nil { -+ panic("reading from crypto/rand failed: " + err.Error()) -+ } -+ cipherText, err := aesCTRXOR(encryptKey, keyBytes, iv) -+ if err != nil { -+ return cryptoJSON{}, err -+ } -+ mac := crypto.Keccak256(derivedKey[16:32], cipherText) -+ -+ scryptParamsJSON := make(map[string]interface{}, 5) -+ scryptParamsJSON["n"] = scryptN -+ scryptParamsJSON["r"] = scryptR -+ scryptParamsJSON["p"] = scryptP -+ scryptParamsJSON["dklen"] = scryptDKLen -+ scryptParamsJSON["salt"] = hex.EncodeToString(salt) -+ -+ cipherParamsJSON := cipherparamsJSON{ -+ IV: hex.EncodeToString(iv), -+ } -+ -+ return cryptoJSON{ -+ Cipher: "aes-128-ctr", -+ CipherText: hex.EncodeToString(cipherText), -+ CipherParams: cipherParamsJSON, -+ KDF: "scrypt", -+ KDFParams: scryptParamsJSON, -+ MAC: hex.EncodeToString(mac), -+ }, nil -+} -+ - // DecryptKey decrypts a key from a json blob, returning the private key itself. - func DecryptKey(keyjson []byte, auth string) (*Key, error) { - // Parse the json into a simple map to fetch the key version -@@ -177,20 +231,43 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) { - var ( - keyBytes, keyId []byte - err error -+ extKeyBytes []byte -+ extKey *extkeys.ExtendedKey - ) -+ -+ subAccountIndex, ok := m["subaccountindex"].(float64) -+ if !ok { -+ subAccountIndex = 0 -+ } -+ - if version, ok := m["version"].(string); ok && version == "1" { - k := new(encryptedKeyJSONV1) - if err := json.Unmarshal(keyjson, k); err != nil { - return nil, err - } - keyBytes, keyId, err = decryptKeyV1(k, auth) -+ if err != nil { -+ return nil, err -+ } -+ -+ extKey, err = extkeys.NewKeyFromString(extkeys.EmptyExtendedKeyString) - } else { - k := new(encryptedKeyJSONV3) - if err := json.Unmarshal(keyjson, k); err != nil { - return nil, err - } - keyBytes, keyId, err = decryptKeyV3(k, auth) -+ if err != nil { -+ return nil, err -+ } -+ -+ extKeyBytes, err = decryptExtendedKey(k, auth) -+ if err != nil { -+ return nil, err -+ } -+ extKey, err = extkeys.NewKeyFromString(string(extKeyBytes)) - } -+ - // Handle any decryption errors and return the key - if err != nil { - return nil, err -@@ -198,9 +275,11 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) { - key := crypto.ToECDSAUnsafe(keyBytes) - - return &Key{ -- Id: uuid.UUID(keyId), -- Address: crypto.PubkeyToAddress(key.PublicKey), -- PrivateKey: key, -+ Id: uuid.UUID(keyId), -+ Address: crypto.PubkeyToAddress(key.PublicKey), -+ PrivateKey: key, -+ ExtendedKey: extKey, -+ SubAccountIndex: uint32(subAccountIndex), - }, nil - } - -@@ -280,6 +359,51 @@ func decryptKeyV1(keyProtected *encryptedKeyJSONV1, auth string) (keyBytes []byt - return plainText, keyId, err - } - -+func decryptExtendedKey(keyProtected *encryptedKeyJSONV3, auth string) (plainText []byte, err error) { -+ if len(keyProtected.ExtendedKey.CipherText) == 0 { -+ return []byte(extkeys.EmptyExtendedKeyString), nil -+ } -+ -+ if keyProtected.Version != version { -+ return nil, fmt.Errorf("Version not supported: %v", keyProtected.Version) -+ } -+ -+ if keyProtected.ExtendedKey.Cipher != "aes-128-ctr" { -+ return nil, fmt.Errorf("Cipher not supported: %v", keyProtected.ExtendedKey.Cipher) -+ } -+ -+ mac, err := hex.DecodeString(keyProtected.ExtendedKey.MAC) -+ if err != nil { -+ return nil, err -+ } -+ -+ iv, err := hex.DecodeString(keyProtected.ExtendedKey.CipherParams.IV) -+ if err != nil { -+ return nil, err -+ } -+ -+ cipherText, err := hex.DecodeString(keyProtected.ExtendedKey.CipherText) -+ if err != nil { -+ return nil, err -+ } -+ -+ derivedKey, err := getKDFKey(keyProtected.ExtendedKey, auth) -+ if err != nil { -+ return nil, err -+ } -+ -+ calculatedMAC := crypto.Keccak256(derivedKey[16:32], cipherText) -+ if !bytes.Equal(calculatedMAC, mac) { -+ return nil, ErrDecrypt -+ } -+ -+ plainText, err = aesCTRXOR(derivedKey[:16], cipherText, iv) -+ if err != nil { -+ return nil, err -+ } -+ return plainText, err -+} -+ - func getKDFKey(cryptoJSON cryptoJSON, auth string) ([]byte, error) { - authArray := []byte(auth) - salt, err := hex.DecodeString(cryptoJSON.KDFParams["salt"].(string)) diff --git a/_assets/patches/geth/0014-whisperv6-notifications.patch b/_assets/patches/geth/0014-whisperv6-notifications.patch deleted file mode 100644 index ef326ad3cd..0000000000 --- a/_assets/patches/geth/0014-whisperv6-notifications.patch +++ /dev/null @@ -1,208 +0,0 @@ -diff --git i/whisper/whisperv6/api.go w/whisper/whisperv6/api.go -index c60bc46a1..2de99f293 100644 ---- i/whisper/whisperv6/api.go -+++ w/whisper/whisperv6/api.go -@@ -317,6 +317,16 @@ func (api *PublicWhisperAPI) Post(ctx context.Context, req NewMessage) (hexutil. - return result, err - } - -+// UninstallFilter is alias for Unsubscribe -+func (api *PublicWhisperAPI) UninstallFilter(id string) { -+ api.w.Unsubscribe(id) -+} -+ -+// Unsubscribe disables and removes an existing filter. -+func (api *PublicWhisperAPI) Unsubscribe(id string) { -+ api.w.Unsubscribe(id) -+} -+ - //go:generate gencodec -type Criteria -field-override criteriaOverride -out gen_criteria_json.go - - // Criteria holds various filter options for inbound messages. -diff --git i/whisper/whisperv6/whisper.go w/whisper/whisperv6/whisper.go -index 880cced09..702556079 100644 ---- i/whisper/whisperv6/whisper.go -+++ w/whisper/whisperv6/whisper.go -@@ -382,9 +382,9 @@ func (whisper *Whisper) NewKeyPair() (string, error) { - return "", fmt.Errorf("failed to generate valid key") - } - -- id, err := GenerateRandomID() -+ id, err := toDeterministicID(common.ToHex(crypto.FromECDSAPub(&key.PublicKey)), keyIDSize) - if err != nil { -- return "", fmt.Errorf("failed to generate ID: %s", err) -+ return "", err - } - - whisper.keyMu.Lock() -@@ -399,11 +399,16 @@ func (whisper *Whisper) NewKeyPair() (string, error) { - - // DeleteKeyPair deletes the specified key if it exists. - func (whisper *Whisper) DeleteKeyPair(key string) bool { -+ deterministicID, err := toDeterministicID(key, keyIDSize) -+ if err != nil { -+ return false -+ } -+ - whisper.keyMu.Lock() - defer whisper.keyMu.Unlock() - -- if whisper.privateKeys[key] != nil { -- delete(whisper.privateKeys, key) -+ if whisper.privateKeys[deterministicID] != nil { -+ delete(whisper.privateKeys, deterministicID) - return true - } - return false -@@ -411,31 +416,73 @@ func (whisper *Whisper) DeleteKeyPair(key string) bool { - - // AddKeyPair imports a asymmetric private key and returns it identifier. - func (whisper *Whisper) AddKeyPair(key *ecdsa.PrivateKey) (string, error) { -- id, err := GenerateRandomID() -+ id, err := makeDeterministicID(common.ToHex(crypto.FromECDSAPub(&key.PublicKey)), keyIDSize) - if err != nil { -- return "", fmt.Errorf("failed to generate ID: %s", err) -+ return "", err -+ } -+ if whisper.HasKeyPair(id) { -+ return id, nil // no need to re-inject - } - - whisper.keyMu.Lock() - whisper.privateKeys[id] = key - whisper.keyMu.Unlock() -+ log.Info("Whisper identity added", "id", id, "pubkey", common.ToHex(crypto.FromECDSAPub(&key.PublicKey))) - - return id, nil - } - -+// SelectKeyPair adds cryptographic identity, and makes sure -+// that it is the only private key known to the node. -+func (whisper *Whisper) SelectKeyPair(key *ecdsa.PrivateKey) error { -+ id, err := makeDeterministicID(common.ToHex(crypto.FromECDSAPub(&key.PublicKey)), keyIDSize) -+ if err != nil { -+ return err -+ } -+ -+ whisper.keyMu.Lock() -+ defer whisper.keyMu.Unlock() -+ -+ whisper.privateKeys = make(map[string]*ecdsa.PrivateKey) // reset key store -+ whisper.privateKeys[id] = key -+ -+ log.Info("Whisper identity selected", "id", id, "key", common.ToHex(crypto.FromECDSAPub(&key.PublicKey))) -+ return nil -+} -+ -+// DeleteKeyPairs removes all cryptographic identities known to the node -+func (whisper *Whisper) DeleteKeyPairs() error { -+ whisper.keyMu.Lock() -+ defer whisper.keyMu.Unlock() -+ -+ whisper.privateKeys = make(map[string]*ecdsa.PrivateKey) -+ -+ return nil -+} -+ - // HasKeyPair checks if the whisper node is configured with the private key - // of the specified public pair. - func (whisper *Whisper) HasKeyPair(id string) bool { -+ deterministicID, err := toDeterministicID(id, keyIDSize) -+ if err != nil { -+ return false -+ } -+ - whisper.keyMu.RLock() - defer whisper.keyMu.RUnlock() -- return whisper.privateKeys[id] != nil -+ return whisper.privateKeys[deterministicID] != nil - } - - // GetPrivateKey retrieves the private key of the specified identity. - func (whisper *Whisper) GetPrivateKey(id string) (*ecdsa.PrivateKey, error) { -+ deterministicID, err := toDeterministicID(id, keyIDSize) -+ if err != nil { -+ return nil, err -+ } -+ - whisper.keyMu.RLock() - defer whisper.keyMu.RUnlock() -- key := whisper.privateKeys[id] -+ key := whisper.privateKeys[deterministicID] - if key == nil { - return nil, fmt.Errorf("invalid id") - } -@@ -467,6 +514,23 @@ func (whisper *Whisper) GenerateSymKey() (string, error) { - return id, nil - } - -+// AddSymKey stores the key with a given id. -+func (whisper *Whisper) AddSymKey(id string, key []byte) (string, error) { -+ deterministicID, err := toDeterministicID(id, keyIDSize) -+ if err != nil { -+ return "", err -+ } -+ -+ whisper.keyMu.Lock() -+ defer whisper.keyMu.Unlock() -+ -+ if whisper.symKeys[deterministicID] != nil { -+ return "", fmt.Errorf("key already exists: %v", id) -+ } -+ whisper.symKeys[deterministicID] = key -+ return deterministicID, nil -+} -+ - // AddSymKeyDirect stores the key, and returns its id. - func (whisper *Whisper) AddSymKeyDirect(key []byte) (string, error) { - if len(key) != aesKeyLength { -@@ -1013,6 +1077,33 @@ func GenerateRandomID() (id string, err error) { - return id, err - } - -+// makeDeterministicID generates a deterministic ID, based on a given input -+func makeDeterministicID(input string, keyLen int) (id string, err error) { -+ buf := pbkdf2.Key([]byte(input), nil, 4096, keyLen, sha256.New) -+ if !validateDataIntegrity(buf, keyIDSize) { -+ return "", fmt.Errorf("error in GenerateDeterministicID: failed to generate key") -+ } -+ id = common.Bytes2Hex(buf) -+ return id, err -+} -+ -+// toDeterministicID reviews incoming id, and transforms it to format -+// expected internally be private key store. Originally, public keys -+// were used as keys, now random keys are being used. And in order to -+// make it easier to consume, we now allow both random IDs and public -+// keys to be passed. -+func toDeterministicID(id string, expectedLen int) (string, error) { -+ if len(id) != (expectedLen * 2) { // we received hex key, so number of chars in id is doubled -+ var err error -+ id, err = makeDeterministicID(id, expectedLen) -+ if err != nil { -+ return "", err -+ } -+ } -+ -+ return id, nil -+} -+ - func isFullNode(bloom []byte) bool { - if bloom == nil { - return true -@@ -1048,3 +1139,15 @@ func addBloom(a, b []byte) []byte { - } - return c - } -+ -+// SelectedKeyPairID returns the id of currently selected key pair. -+// It helps distinguish between different users w/o exposing the user identity itself. -+func (whisper *Whisper) SelectedKeyPairID() string { -+ whisper.keyMu.RLock() -+ defer whisper.keyMu.RUnlock() -+ -+ for id := range whisper.privateKeys { -+ return id -+ } -+ return "" -+} diff --git a/_assets/patches/geth/0016-fix-leveldb-issue.patch b/_assets/patches/geth/0016-fix-leveldb-issue.patch deleted file mode 100644 index 76e34b674a..0000000000 --- a/_assets/patches/geth/0016-fix-leveldb-issue.patch +++ /dev/null @@ -1,131 +0,0 @@ -diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go -index fbde9c6c..c0013a11 100644 ---- a/eth/downloader/downloader.go -+++ b/eth/downloader/downloader.go -@@ -143,6 +143,8 @@ type Downloader struct { - quitCh chan struct{} // Quit channel to signal termination - quitLock sync.RWMutex // Lock to prevent double closes - -+ downloads sync.WaitGroup // Keeps track of the currently active downloads -+ - // Testing hooks - syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run - bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch -@@ -403,7 +405,9 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode - // specified peer and head hash. - func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) { - d.mux.Post(StartEvent{}) -+ d.downloads.Add(1) - defer func() { -+ d.downloads.Done() - // reset on error - if err != nil { - d.mux.Post(FailedEvent{err}) -@@ -471,14 +475,22 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I - } else if d.mode == FullSync { - fetchers = append(fetchers, d.processFullSyncContent) - } -- return d.spawnSync(fetchers) -+ return d.spawnSync(errCancelHeaderFetch, fetchers) - } - - // spawnSync runs d.process and all given fetcher functions to completion in - // separate goroutines, returning the first error that appears. --func (d *Downloader) spawnSync(fetchers []func() error) error { -+func (d *Downloader) spawnSync(errCancel error, fetchers []func() error) error { -+ d.cancelLock.Lock() -+ select { -+ case <-d.cancelCh: -+ d.cancelLock.Unlock() -+ return errCancel -+ default: -+ } - errc := make(chan error, len(fetchers)) - d.cancelWg.Add(len(fetchers)) -+ d.cancelLock.Unlock() - for _, fn := range fetchers { - fn := fn - go func() { defer d.cancelWg.Done(); errc <- fn() }() -@@ -539,6 +551,10 @@ func (d *Downloader) Terminate() { - - // Cancel any pending download requests - d.Cancel() -+ -+ // Wait, so external dependencies aren't destroyed -+ // until the download processing is done. -+ d.downloads.Wait() - } - - // fetchHeight retrieves the head header of the remote peer to aid in estimating -diff --git a/eth/handler.go b/eth/handler.go -index f89f68c9..5522b0d9 100644 ---- a/eth/handler.go -+++ b/eth/handler.go -@@ -230,6 +230,9 @@ func (pm *ProtocolManager) Stop() { - // Quit fetcher, txsyncLoop. - close(pm.quitSync) - -+ // Stop downloader and make sure that all the running downloads are complete. -+ pm.downloader.Terminate() -+ - // Disconnect existing sessions. - // This also closes the gate for any new registrations on the peer set. - // sessions which are already established but not added to pm.peers yet -diff --git a/eth/sync.go b/eth/sync.go -index e49e4008..4367434a 100644 ---- a/eth/sync.go -+++ b/eth/sync.go -@@ -135,7 +135,6 @@ func (pm *ProtocolManager) syncer() { - // Start and ensure cleanup of sync mechanisms - pm.fetcher.Start() - defer pm.fetcher.Stop() -- defer pm.downloader.Terminate() - - // Wait for different events to fire synchronisation operations - forceSync := time.NewTicker(forceSyncCycle) -diff --git a/les/backend.go b/les/backend.go -index 00025ba6..38c36da6 100644 ---- a/les/backend.go -+++ b/les/backend.go -@@ -20,7 +20,6 @@ package les - import ( - "fmt" - "sync" -- "time" - - "github.com/ethereum/go-ethereum/accounts" - "github.com/ethereum/go-ethereum/common" -@@ -253,7 +252,6 @@ func (s *LightEthereum) Stop() error { - - s.eventMux.Stop() - -- time.Sleep(time.Millisecond * 200) - s.chainDb.Close() - close(s.shutdownChan) - -diff --git a/les/handler.go b/les/handler.go -index ca40eaab..cc15d68c 100644 ---- a/les/handler.go -+++ b/les/handler.go -@@ -194,6 +194,9 @@ func (pm *ProtocolManager) Stop() { - pm.clientPool.stop() - } - -+ // Stop downloader and make sure that all the running downloads are complete. -+ pm.downloader.Terminate() -+ - // Disconnect existing sessions. - // This also closes the gate for any new registrations on the peer set. - // sessions which are already established but not added to pm.peers yet -diff --git a/les/sync.go b/les/sync.go -index 1ac64558..eb155377 100644 ---- a/les/sync.go -+++ b/les/sync.go -@@ -31,7 +31,6 @@ func (pm *ProtocolManager) syncer() { - // Start and ensure cleanup of sync mechanisms - //pm.fetcher.Start() - //defer pm.fetcher.Stop() -- defer pm.downloader.Terminate() - - // Wait for different events to fire synchronisation operations - //forceSync := time.Tick(forceSyncCycle) diff --git a/_assets/patches/geth/0021-backends-simulated-chain-signer.patch b/_assets/patches/geth/0021-backends-simulated-chain-signer.patch deleted file mode 100644 index 2b6bfe51d4..0000000000 --- a/_assets/patches/geth/0021-backends-simulated-chain-signer.patch +++ /dev/null @@ -1,14 +0,0 @@ -diff --git i/accounts/abi/bind/backends/simulated.go w/accounts/abi/bind/backends/simulated.go -index bd342a8cb..2ce30e2fa 100644 ---- i/accounts/abi/bind/backends/simulated.go -+++ w/accounts/abi/bind/backends/simulated.go -@@ -295,8 +295,7 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM - func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transaction) error { - b.mu.Lock() - defer b.mu.Unlock() -- -- sender, err := types.Sender(types.HomesteadSigner{}, tx) -+ sender, err := types.Sender(types.NewEIP155Signer(tx.ChainId()), tx) - if err != nil { - panic(fmt.Errorf("invalid transaction: %v", err)) - } diff --git a/_assets/patches/geth/0022-node-attach-public.patch b/_assets/patches/geth/0022-node-attach-public.patch deleted file mode 100644 index 6ec61ef7af..0000000000 --- a/_assets/patches/geth/0022-node-attach-public.patch +++ /dev/null @@ -1,98 +0,0 @@ -diff --git a/node/node.go b/node/node.go -index ada38372..5ea58e13 100644 ---- a/node/node.go -+++ b/node/node.go -@@ -51,8 +51,9 @@ type Node struct { - serviceFuncs []ServiceConstructor // Service constructors (in dependency order) - services map[reflect.Type]Service // Currently running services - -- rpcAPIs []rpc.API // List of APIs currently provided by the node -- inprocHandler *rpc.Server // In-process RPC request handler to process the API requests -+ rpcAPIs []rpc.API // List of APIs currently provided by the node -+ inprocHandler *rpc.Server // In-process RPC request handler to process the API requests -+ inprocPublicHandler *rpc.Server // In-process RPC request handler to process the public API requests - - ipcEndpoint string // IPC endpoint to listen at (empty = IPC disabled) - ipcListener net.Listener // IPC RPC listener socket to serve API requests -@@ -259,18 +260,25 @@ func (n *Node) startRPC(services map[reflect.Type]Service) error { - if err := n.startInProc(apis); err != nil { - return err - } -+ if err := n.startPublicInProc(apis, n.config.HTTPModules); err != nil { -+ n.stopInProc() -+ return err -+ } - if err := n.startIPC(apis); err != nil { -+ n.stopPublicInProc() - n.stopInProc() - return err - } - if err := n.startHTTP(n.httpEndpoint, apis, n.config.HTTPModules, n.config.HTTPCors, n.config.HTTPVirtualHosts, n.config.HTTPTimeouts); err != nil { - n.stopIPC() -+ n.stopPublicInProc() - n.stopInProc() - return err - } - if err := n.startWS(n.wsEndpoint, apis, n.config.WSModules, n.config.WSOrigins, n.config.WSExposeAll); err != nil { - n.stopHTTP() - n.stopIPC() -+ n.stopPublicInProc() - n.stopInProc() - return err - } -@@ -301,6 +309,36 @@ func (n *Node) stopInProc() { - } - } - -+// startPublicInProc initializes an in-process RPC endpoint for public APIs. -+func (n *Node) startPublicInProc(apis []rpc.API, modules []string) error { -+ // Generate the whitelist based on the allowed modules -+ whitelist := make(map[string]bool) -+ for _, module := range modules { -+ whitelist[module] = true -+ } -+ -+ // Register all the public APIs exposed by the services -+ handler := rpc.NewServer() -+ for _, api := range apis { -+ if whitelist[api.Namespace] || (len(whitelist) == 0 && api.Public) { -+ if err := handler.RegisterName(api.Namespace, api.Service); err != nil { -+ return err -+ } -+ n.log.Debug("InProc public registered", "service", api.Service, "namespace", api.Namespace) -+ } -+ } -+ n.inprocPublicHandler = handler -+ return nil -+} -+ -+// stopPublicInProc terminates the in-process RPC endpoint for public APIs. -+func (n *Node) stopPublicInProc() { -+ if n.inprocPublicHandler != nil { -+ n.inprocPublicHandler.Stop() -+ n.inprocPublicHandler = nil -+ } -+} -+ - // startIPC initializes and starts the IPC RPC endpoint. - func (n *Node) startIPC(apis []rpc.API) error { - if n.ipcEndpoint == "" { -@@ -487,6 +525,18 @@ func (n *Node) Attach() (*rpc.Client, error) { - return rpc.DialInProc(n.inprocHandler), nil - } - -+// AttachPublic creates an RPC client attached to an in-process Public API handler. -+func (n *Node) AttachPublic() (*rpc.Client, error) { -+ n.lock.RLock() -+ defer n.lock.RUnlock() -+ -+ if n.server == nil { -+ return nil, ErrNodeStopped -+ } -+ -+ return rpc.DialInProc(n.inprocPublicHandler), nil -+} -+ - // RPCHandler returns the in-process RPC request handler. - func (n *Node) RPCHandler() (*rpc.Server, error) { - n.lock.RLock() diff --git a/_assets/patches/geth/0023-extract-personal-sign-api.patch b/_assets/patches/geth/0023-extract-personal-sign-api.patch deleted file mode 100644 index f3d3cf3f97..0000000000 --- a/_assets/patches/geth/0023-extract-personal-sign-api.patch +++ /dev/null @@ -1,61 +0,0 @@ -diff --git a/ethapi/private_account.go b/ethapi/private_account.go -new file mode 100644 -index 00000000..8d51fd31 ---- /dev/null -+++ b/ethapi/private_account.go -@@ -0,0 +1,26 @@ -+package ethapi -+ -+import ( -+ "context" -+ -+ "github.com/ethereum/go-ethereum/accounts" -+ "github.com/ethereum/go-ethereum/common" -+ "github.com/ethereum/go-ethereum/common/hexutil" -+ "github.com/ethereum/go-ethereum/internal/ethapi" -+) -+ -+type LimitedPersonalAPI struct { -+ privateAPI *ethapi.PrivateAccountAPI -+} -+ -+func NewLimitedPersonalAPI(am *accounts.Manager) *LimitedPersonalAPI { -+ return &LimitedPersonalAPI{ethapi.NewSubsetOfPrivateAccountAPI(am)} -+} -+ -+func (s *LimitedPersonalAPI) Sign(ctx context.Context, data hexutil.Bytes, addr common.Address, passwd string) (hexutil.Bytes, error) { -+ return s.privateAPI.Sign(ctx, data, addr, passwd) -+} -+ -+func (s *LimitedPersonalAPI) EcRecover(ctx context.Context, data, sig hexutil.Bytes) (common.Address, error) { -+ return s.privateAPI.EcRecover(ctx, data, sig) -+} -diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go -index 31408633..3cee8753 100644 ---- a/internal/ethapi/api.go -+++ b/internal/ethapi/api.go -@@ -214,6 +214,14 @@ func NewPrivateAccountAPI(b Backend, nonceLock *AddrLocker) *PrivateAccountAPI { - } - } - -+func NewSubsetOfPrivateAccountAPI(am *accounts.Manager) *PrivateAccountAPI { -+ return &PrivateAccountAPI{ -+ am: am, -+ nonceLock: nil, -+ b: nil, -+ } -+} -+ - // ListAccounts will return a list of addresses for accounts this node manages. - func (s *PrivateAccountAPI) ListAccounts() []common.Address { - addresses := make([]common.Address, 0) // return [] instead of nil if empty -@@ -426,7 +434,7 @@ func (s *PrivateAccountAPI) Sign(ctx context.Context, data hexutil.Bytes, addr c - // Look up the wallet containing the requested signer - account := accounts.Account{Address: addr} - -- wallet, err := s.b.AccountManager().Find(account) -+ wallet, err := s.am.Find(account) - if err != nil { - return nil, err - } - diff --git a/_assets/patches/geth/0025-whisper-confirmations.patch b/_assets/patches/geth/0025-whisper-confirmations.patch deleted file mode 100644 index a7f880d26d..0000000000 --- a/_assets/patches/geth/0025-whisper-confirmations.patch +++ /dev/null @@ -1,89 +0,0 @@ -diff --git a/whisper/whisperv6/events.go b/whisper/whisperv6/events.go -new file mode 100644 -index 00000000..e03ec9de ---- /dev/null -+++ b/whisper/whisperv6/events.go -@@ -0,0 +1,23 @@ -+package whisperv6 -+ -+import ( -+ "github.com/ethereum/go-ethereum/common" -+ "github.com/ethereum/go-ethereum/p2p/discover" -+) -+ -+// EventType used to define known envelope events. -+type EventType string -+ -+const ( -+ // EventEnvelopeSent fires when envelope was sent to a peer. -+ EventEnvelopeSent EventType = "envelope.sent" -+ // EventEnvelopeExpired fires when envelop expired -+ EventEnvelopeExpired EventType = "envelope.expired" -+) -+ -+// EnvelopeEvent used for envelopes events. -+type EnvelopeEvent struct { -+ Event EventType -+ Hash common.Hash -+ Peer discover.NodeID -+} -diff --git a/whisper/whisperv6/peer.go b/whisper/whisperv6/peer.go -index 79cc2127..018d8f82 100644 ---- a/whisper/whisperv6/peer.go -+++ b/whisper/whisperv6/peer.go -@@ -204,6 +204,11 @@ func (peer *Peer) broadcast() error { - // mark envelopes only if they were successfully sent - for _, e := range bundle { - peer.mark(e) -+ peer.host.envelopeFeed.Send(EnvelopeEvent{ -+ Event: EventEnvelopeSent, -+ Hash: e.Hash(), -+ Peer: peer.peer.ID(), // specifically discover.NodeID because it can be pretty printed -+ }) - } - - log.Trace("broadcast", "num. messages", len(bundle)) -diff --git a/whisper/whisperv6/whisper.go b/whisper/whisperv6/whisper.go -index 414aa788..3c3c66ad 100644 ---- a/whisper/whisperv6/whisper.go -+++ b/whisper/whisperv6/whisper.go -@@ -29,6 +29,7 @@ import ( - mapset "github.com/deckarep/golang-set" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" -+ "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/rlp" -@@ -89,5 +90,7 @@ type Whisper struct { - - mailServer MailServer // MailServer interface -+ -+ envelopeFeed event.Feed - } - - // New creates a Whisper client ready to communicate through the Ethereum P2P network. -@@ -133,6 +136,12 @@ func New(cfg *Config) *Whisper { - return whisper - } - -+// SubscribeEnvelopeEvents subscribes to envelopes feed. -+// In order to prevent blocking whisper producers events must be amply buffered. -+func (whisper *Whisper) SubscribeEnvelopeEvents(events chan<- EnvelopeEvent) event.Subscription { -+ return whisper.envelopeFeed.Subscribe(events) -+} -+ - // MinPow returns the PoW value required by this node. - func (whisper *Whisper) MinPow() float64 { - val, exist := whisper.settings.Load(minPowIdx) -@@ -986,6 +995,10 @@ func (whisper *Whisper) expire() { - hashSet.Each(func(v interface{}) bool { - sz := whisper.envelopes[v.(common.Hash)].size() - delete(whisper.envelopes, v.(common.Hash)) -+ whisper.envelopeFeed.Send(EnvelopeEvent{ -+ Hash: v.(common.Hash), -+ Event: EventEnvelopeExpired, -+ }) - whisper.stats.messagesCleared++ - whisper.stats.memoryCleared += sz - whisper.stats.memoryUsed -= sz diff --git a/_assets/patches/geth/0027-whisper-time-source.patch b/_assets/patches/geth/0027-whisper-time-source.patch deleted file mode 100644 index 4fb8f62ad5..0000000000 --- a/_assets/patches/geth/0027-whisper-time-source.patch +++ /dev/null @@ -1,131 +0,0 @@ -diff --git a/whisper/whisperv6/api.go b/whisper/whisperv6/api.go -index 2de99f293..e0c3284b6 100644 ---- a/whisper/whisperv6/api.go -+++ b/whisper/whisperv6/api.go -@@ -285,7 +285,7 @@ func (api *PublicWhisperAPI) Post(ctx context.Context, req NewMessage) (hexutil. - } - - var result []byte -- env, err := whisperMsg.Wrap(params) -+ env, err := whisperMsg.Wrap(params, api.w.GetCurrentTime()) - if err != nil { - return nil, err - } -diff --git a/whisper/whisperv6/config.go b/whisper/whisperv6/config.go -index 61419de00..3c28263e5 100644 ---- a/whisper/whisperv6/config.go -+++ b/whisper/whisperv6/config.go -@@ -16,14 +16,18 @@ - - package whisperv6 - -+import "time" -+ - // Config represents the configuration state of a whisper node. - type Config struct { - MaxMessageSize uint32 `toml:",omitempty"` - MinimumAcceptedPOW float64 `toml:",omitempty"` -+ TimeSource func() time.Time - } - - // DefaultConfig represents (shocker!) the default configuration. - var DefaultConfig = Config{ - MaxMessageSize: DefaultMaxMessageSize, - MinimumAcceptedPOW: DefaultMinimumPoW, -+ TimeSource: time.Now, - } -diff --git a/whisper/whisperv6/envelope.go b/whisper/whisperv6/envelope.go -index c42d1fa8a..3b65fdba0 100644 ---- a/whisper/whisperv6/envelope.go -+++ b/whisper/whisperv6/envelope.go -@@ -62,9 +62,9 @@ func (e *Envelope) rlpWithoutNonce() []byte { - - // NewEnvelope wraps a Whisper message with expiration and destination data - // included into an envelope for network forwarding. --func NewEnvelope(ttl uint32, topic TopicType, msg *sentMessage) *Envelope { -+func NewEnvelope(ttl uint32, topic TopicType, msg *sentMessage, now time.Time) *Envelope { - env := Envelope{ -- Expiry: uint32(time.Now().Add(time.Second * time.Duration(ttl)).Unix()), -+ Expiry: uint32(now.Add(time.Second * time.Duration(ttl)).Unix()), - TTL: ttl, - Topic: topic, - Data: msg.Raw, -diff --git a/whisper/whisperv6/message.go b/whisper/whisperv6/message.go -index 2d4e86244..a12b445e2 100644 ---- a/whisper/whisperv6/message.go -+++ b/whisper/whisperv6/message.go -@@ -27,6 +27,7 @@ import ( - "errors" - mrand "math/rand" - "strconv" -+ "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" -@@ -234,7 +235,7 @@ func generateSecureRandomData(length int) ([]byte, error) { - } - - // Wrap bundles the message into an Envelope to transmit over the network. --func (msg *sentMessage) Wrap(options *MessageParams) (envelope *Envelope, err error) { -+func (msg *sentMessage) Wrap(options *MessageParams, now time.Time) (envelope *Envelope, err error) { - if options.TTL == 0 { - options.TTL = DefaultTTL - } -@@ -254,7 +255,7 @@ func (msg *sentMessage) Wrap(options *MessageParams) (envelope *Envelope, err er - return nil, err - } - -- envelope = NewEnvelope(options.TTL, options.Topic, msg) -+ envelope = NewEnvelope(options.TTL, options.Topic, msg, now) - if err = envelope.Seal(options); err != nil { - return nil, err - } -diff --git a/whisper/whisperv6/whisper.go b/whisper/whisperv6/whisper.go -index 8d56ece64..697f0ecb8 100644 ---- a/whisper/whisperv6/whisper.go -+++ b/whisper/whisperv6/whisper.go -@@ -93,4 +92,6 @@ type Whisper struct { - envelopeFeed event.Feed -+ -+ timeSource func() time.Time // source of time for whisper - } - - // New creates a Whisper client ready to communicate through the Ethereum P2P network. -@@ -110,6 +112,7 @@ func New(cfg *Config) *Whisper { - p2pMsgQueue: make(chan *Envelope, messageQueueLimit), - quit: make(chan struct{}), - syncAllowance: DefaultSyncAllowance, -+ timeSource: cfg.TimeSource, - } - - whisper.filters = NewFilters(whisper) -@@ -215,6 +218,11 @@ func (whisper *Whisper) APIs() []rpc.API { - } - } - -+// GetCurrentTime returns current time. -+func (whisper *Whisper) GetCurrentTime() time.Time { -+ return whisper.timeSource() -+} -+ - // RegisterServer registers MailServer interface. - // MailServer will process all the incoming messages with p2pRequestCode. - func (whisper *Whisper) RegisterServer(server MailServer) { -@@ -829,7 +837,7 @@ func (whisper *Whisper) runMessageLoop(p *Peer, rw p2p.MsgReadWriter) error { - // appropriate time-stamp. In case of error, connection should be dropped. - // param isP2P indicates whether the message is peer-to-peer (should not be forwarded). - func (whisper *Whisper) add(envelope *Envelope, isP2P bool) (bool, error) { -- now := uint32(time.Now().Unix()) -+ now := uint32(whisper.timeSource().Unix()) - sent := envelope.Expiry - envelope.TTL - - if sent > now { -@@ -988,7 +996,7 @@ func (whisper *Whisper) expire() { - whisper.statsMu.Lock() - defer whisper.statsMu.Unlock() - whisper.stats.reset() -- now := uint32(time.Now().Unix()) -+ now := uint32(whisper.timeSource().Unix()) - for expiry, hashSet := range whisper.expirations { - if expiry < now { - // Dump all expired messages and remove timestamp diff --git a/_assets/patches/geth/0028-p2p-watchdog.patch b/_assets/patches/geth/0028-p2p-watchdog.patch deleted file mode 100644 index 080dcf9e33..0000000000 --- a/_assets/patches/geth/0028-p2p-watchdog.patch +++ /dev/null @@ -1,119 +0,0 @@ -diff --git c/p2p/peer.go w/p2p/peer.go -index 73e33418e..322268b28 100644 ---- c/p2p/peer.go -+++ w/p2p/peer.go -@@ -22,6 +22,7 @@ import ( - "net" - "sort" - "sync" -+ "sync/atomic" - "time" - - "github.com/ethereum/go-ethereum/common/mclock" -@@ -38,7 +39,10 @@ const ( - - snappyProtocolVersion = 5 - -- pingInterval = 15 * time.Second -+ pingInterval = 1 * time.Second -+ // watchdogInterval intentionally lower than ping interval. -+ // this way we reduce potential flaky window size. -+ watchdogInterval = 200 * time.Millisecond - ) - - const ( -@@ -100,6 +104,7 @@ type Peer struct { - log log.Logger - created mclock.AbsTime - -+ flaky int32 - wg sync.WaitGroup - protoErr chan error - closed chan struct{} -@@ -118,6 +123,11 @@ func NewPeer(id discover.NodeID, name string, caps []Cap) *Peer { - return peer - } - -+// IsFlaky returns true if there was no incoming traffic recently. -+func (p *Peer) IsFlaky() bool { -+ return atomic.LoadInt32(&p.flaky) == 1 -+} -+ - // ID returns the node's public key. - func (p *Peer) ID() discover.NodeID { - return p.rw.id -@@ -188,8 +198,10 @@ func (p *Peer) run() (remoteRequested bool, err error) { - readErr = make(chan error, 1) - reason DiscReason // sent to the peer - ) -- p.wg.Add(2) -- go p.readLoop(readErr) -+ p.wg.Add(3) -+ reads := make(chan struct{}, 10) // channel for reads -+ go p.readLoop(readErr, reads) -+ go p.watchdogLoop(reads) - go p.pingLoop() - - // Start all protocol handlers. -@@ -248,7 +260,24 @@ func (p *Peer) pingLoop() { - } - } - --func (p *Peer) readLoop(errc chan<- error) { -+func (p *Peer) watchdogLoop(reads <-chan struct{}) { -+ defer p.wg.Done() -+ hb := time.NewTimer(watchdogInterval) -+ defer hb.Stop() -+ for { -+ select { -+ case <-reads: -+ atomic.StoreInt32(&p.flaky, 0) -+ case <-hb.C: -+ atomic.StoreInt32(&p.flaky, 1) -+ case <-p.closed: -+ return -+ } -+ hb.Reset(watchdogInterval) -+ } -+} -+ -+func (p *Peer) readLoop(errc chan<- error, reads chan<- struct{}) { - defer p.wg.Done() - for { - msg, err := p.rw.ReadMsg() -@@ -261,6 +290,7 @@ func (p *Peer) readLoop(errc chan<- error) { - errc <- err - return - } -+ reads <- struct{}{} - } - } - -diff --git c/p2p/server.go w/p2p/server.go -index c41d1dc15..04c6f7147 100644 ---- c/p2p/server.go -+++ w/p2p/server.go -@@ -45,7 +45,7 @@ const ( - - // Maximum time allowed for reading a complete message. - // This is effectively the amount of time a connection can be idle. -- frameReadTimeout = 30 * time.Second -+ frameReadTimeout = 10 * time.Second - - // Maximum amount of time allowed for writing a complete message. - frameWriteTimeout = 20 * time.Second -diff --git c/whisper/whisperv6/peer.go w/whisper/whisperv6/peer.go -index 427127290..c30e92d1c 100644 ---- c/whisper/whisperv6/peer.go -+++ w/whisper/whisperv6/peer.go -@@ -187,6 +187,10 @@ func (peer *Peer) expire() { - // broadcast iterates over the collection of envelopes and transmits yet unknown - // ones over the network. - func (peer *Peer) broadcast() error { -+ if peer.peer.IsFlaky() { -+ log.Trace("Waiting for a peer to restore communication", "ID", peer.peer.ID()) -+ return nil -+ } - envelopes := peer.host.Envelopes() - bundle := make([]*Envelope, 0, len(envelopes)) - for _, envelope := range envelopes { diff --git a/_assets/patches/geth/0029-node-api-gauge-metric.patch b/_assets/patches/geth/0029-node-api-gauge-metric.patch deleted file mode 100644 index 0c21a5a75b..0000000000 --- a/_assets/patches/geth/0029-node-api-gauge-metric.patch +++ /dev/null @@ -1,28 +0,0 @@ -diff --git a/node/api.go b/node/api.go -index a3b8bc0b..a151147c 100644 ---- a/node/api.go -+++ b/node/api.go -@@ -313,6 +313,11 @@ func (api *PublicDebugAPI) Metrics(raw bool) (map[string]interface{}, error) { - "Overall": float64(metric.Count()), - } - -+ case metrics.Gauge: -+ root[name] = map[string]interface{}{ -+ "Value": float64(metric.Value()), -+ } -+ - case metrics.Meter: - root[name] = map[string]interface{}{ - "AvgRate01Min": metric.Rate1(), -@@ -348,6 +353,11 @@ func (api *PublicDebugAPI) Metrics(raw bool) (map[string]interface{}, error) { - "Overall": float64(metric.Count()), - } - -+ case metrics.Gauge: -+ root[name] = map[string]interface{}{ -+ "Value": float64(metric.Value()), -+ } -+ - case metrics.Meter: - root[name] = map[string]interface{}{ - "Avg01Min": format(metric.Rate1()*60, metric.Rate1()), diff --git a/_assets/patches/geth/0032-send-mailserver-request-completed-code.patch b/_assets/patches/geth/0032-send-mailserver-request-completed-code.patch deleted file mode 100644 index 78774ac661..0000000000 --- a/_assets/patches/geth/0032-send-mailserver-request-completed-code.patch +++ /dev/null @@ -1,110 +0,0 @@ -diff --git a/whisper/whisperv6/doc.go b/whisper/whisperv6/doc.go -index 4bbf554..2fcc9e6 100644 ---- a/whisper/whisperv6/doc.go -+++ b/whisper/whisperv6/doc.go -@@ -44,13 +44,14 @@ const ( - ProtocolName = "shh" // Nickname of the protocol in geth - - // whisper protocol message codes, according to EIP-627 -- statusCode = 0 // used by whisper protocol -- messagesCode = 1 // normal whisper message -- powRequirementCode = 2 // PoW requirement -- bloomFilterExCode = 3 // bloom filter exchange -- p2pRequestCode = 126 // peer-to-peer message, used by Dapp protocol -- p2pMessageCode = 127 // peer-to-peer message (to be consumed by the peer, but not forwarded any further) -- NumberOfMessageCodes = 128 -+ statusCode = 0 // used by whisper protocol -+ messagesCode = 1 // normal whisper message -+ powRequirementCode = 2 // PoW requirement -+ bloomFilterExCode = 3 // bloom filter exchange -+ p2pRequestCompleteCode = 125 // peer-to-peer message, used by Dapp protocol -+ p2pRequestCode = 126 // peer-to-peer message, used by Dapp protocol -+ p2pMessageCode = 127 // peer-to-peer message (to be consumed by the peer, but not forwarded any further) -+ NumberOfMessageCodes = 128 - - SizeMask = byte(3) // mask used to extract the size of payload size field from the flags - signatureFlag = byte(4) -diff --git a/whisper/whisperv6/events.go b/whisper/whisperv6/events.go -index e03ec9d..1665539 100644 ---- a/whisper/whisperv6/events.go -+++ b/whisper/whisperv6/events.go -@@ -1,23 +1,27 @@ - package whisperv6 - - import ( -- "github.com/ethereum/go-ethereum/common" -- "github.com/ethereum/go-ethereum/p2p/discover" -+ "github.com/ethereum/go-ethereum/common" -+ "github.com/ethereum/go-ethereum/p2p/discover" - ) - - // EventType used to define known envelope events. - type EventType string - - const ( -- // EventEnvelopeSent fires when envelope was sent to a peer. -- EventEnvelopeSent EventType = "envelope.sent" -- // EventEnvelopeExpired fires when envelop expired -- EventEnvelopeExpired EventType = "envelope.expired" -+ // EventEnvelopeSent fires when envelope was sent to a peer. -+ EventEnvelopeSent EventType = "envelope.sent" -+ // EventEnvelopeExpired fires when envelop expired -+ EventEnvelopeExpired EventType = "envelope.expired" -+ // EventMailServerRequestCompleted fires after mailserver sends all the requested messages -+ EventMailServerRequestCompleted EventType = "mailserver.request.completed" -+ // EventMailServerRequestExpired fires after mailserver the request TTL ends -+ EventMailServerRequestExpired EventType = "mailserver.request.expired" - ) - - // EnvelopeEvent used for envelopes events. - type EnvelopeEvent struct { -- Event EventType -- Hash common.Hash -- Peer discover.NodeID -+ Event EventType -+ Hash common.Hash -+ Peer discover.NodeID - } -diff --git a/whisper/whisperv6/whisper.go b/whisper/whisperv6/whisper.go -index 697f0ec..4a7b006 100644 ---- a/whisper/whisperv6/whisper.go -+++ b/whisper/whisperv6/whisper.go -@@ -378,6 +378,15 @@ func (whisper *Whisper) RequestHistoricMessages(peerID []byte, envelope *Envelop - return p2p.Send(p.ws, p2pRequestCode, envelope) - } - -+func (whisper *Whisper) SendHistoricMessageResponse(peer *Peer, requestID common.Hash) error { -+ size, r, err := rlp.EncodeToReader(requestID) -+ if err != nil { -+ return err -+ } -+ -+ return peer.ws.WriteMsg(p2p.Msg{Code: p2pRequestCompleteCode, Size: uint32(size), Payload: r}) -+} -+ - // SendP2PMessage sends a peer-to-peer message to a specific peer. - func (whisper *Whisper) SendP2PMessage(peerID []byte, envelope *Envelope) error { - p, err := whisper.getPeer(peerID) -@@ -821,8 +830,22 @@ func (whisper *Whisper) runMessageLoop(p *Peer, rw p2p.MsgReadWriter) error { - log.Warn("failed to decode p2p request message, peer will be disconnected", "peer", p.peer.ID(), "err", err) - return errors.New("invalid p2p request") - } -+ - whisper.mailServer.DeliverMail(p, &request) - } -+ case p2pRequestCompleteCode: -+ if p.trusted { -+ var requestID common.Hash -+ if err := packet.Decode(&requestID); err != nil { -+ log.Warn("failed to decode response message, peer will be disconnected", "peer", p.peer.ID(), "err", err) -+ return errors.New("invalid request response message") -+ } -+ -+ whisper.envelopeFeed.Send(EnvelopeEvent{ -+ Hash: requestID, -+ Event: EventMailServerRequestCompleted, -+ }) -+ } - default: - // New message types might be implemented in the future versions of Whisper. - // For forward compatibility, just ignore. diff --git a/_assets/patches/geth/0033-mailserver-response-contains-cursor.patch b/_assets/patches/geth/0033-mailserver-response-contains-cursor.patch deleted file mode 100644 index d6e97c86d6..0000000000 --- a/_assets/patches/geth/0033-mailserver-response-contains-cursor.patch +++ /dev/null @@ -1,134 +0,0 @@ -diff --git a/whisper/whisperv6/events.go b/whisper/whisperv6/events.go -index 1665539..fe7570e 100644 ---- a/whisper/whisperv6/events.go -+++ b/whisper/whisperv6/events.go -@@ -13,10 +13,14 @@ const ( - EventEnvelopeSent EventType = "envelope.sent" - // EventEnvelopeExpired fires when envelop expired - EventEnvelopeExpired EventType = "envelope.expired" -+ // EventEnvelopeAvailable fires when envelop is available for filters -+ EventEnvelopeAvailable EventType = "envelope.available" - // EventMailServerRequestCompleted fires after mailserver sends all the requested messages - EventMailServerRequestCompleted EventType = "mailserver.request.completed" - // EventMailServerRequestExpired fires after mailserver the request TTL ends - EventMailServerRequestExpired EventType = "mailserver.request.expired" -+ // EventMailServerEnvelopeArchived fires after an envelope has been archived -+ EventMailServerEnvelopeArchived EventType = "mailserver.envelope.archived" - ) - - // EnvelopeEvent used for envelopes events. -@@ -24,4 +28,5 @@ type EnvelopeEvent struct { - Event EventType - Hash common.Hash - Peer discover.NodeID -+ Data interface{} - } -diff --git a/whisper/whisperv6/whisper.go b/whisper/whisperv6/whisper.go -index 91d4482..6a937a2 100644 ---- a/whisper/whisperv6/whisper.go -+++ b/whisper/whisperv6/whisper.go -@@ -49,6 +49,12 @@ type Statistics struct { - totalMessagesCleared int - } - -+// MailServerResponse is the response payload sent by the mailserver -+type MailServerResponse struct { -+ LastEnvelopeHash common.Hash -+ Cursor []byte -+} -+ - const ( - maxMsgSizeIdx = iota // Maximal message length allowed by the whisper node - overflowIdx // Indicator of message queue overflow -@@ -378,8 +384,8 @@ func (whisper *Whisper) RequestHistoricMessages(peerID []byte, envelope *Envelop - return p2p.Send(p.ws, p2pRequestCode, envelope) - } - --func (whisper *Whisper) SendHistoricMessageResponse(peer *Peer, requestID common.Hash) error { -- size, r, err := rlp.EncodeToReader(requestID) -+func (whisper *Whisper) SendHistoricMessageResponse(peer *Peer, payload []byte) error { -+ size, r, err := rlp.EncodeToReader(payload) - if err != nil { - return err - } -@@ -835,15 +841,49 @@ func (whisper *Whisper) runMessageLoop(p *Peer, rw p2p.MsgReadWriter) error { - } - case p2pRequestCompleteCode: - if p.trusted { -- var requestID common.Hash -- if err := packet.Decode(&requestID); err != nil { -+ var payload []byte -+ if err := packet.Decode(&payload); err != nil { - log.Warn("failed to decode response message, peer will be disconnected", "peer", p.peer.ID(), "err", err) - return errors.New("invalid request response message") - } - -+ // check if payload is -+ // - requestID or -+ // - requestID + lastEnvelopeHash or -+ // - requestID + lastEnvelopeHash + cursor -+ // requestID is the hash of the request envelope. -+ // lastEnvelopeHash is the last envelope sent by the mail server -+ // cursor is the db key, 36 bytes: 4 for the timestamp + 32 for the envelope hash. -+ // length := len(payload) -+ -+ if len(payload) < common.HashLength || len(payload) > common.HashLength*3+4 { -+ log.Warn("invalid response message, peer will be disconnected", "peer", p.peer.ID(), "err", err, "payload size", len(payload)) -+ return errors.New("invalid response size") -+ } -+ -+ var ( -+ requestID common.Hash -+ lastEnvelopeHash common.Hash -+ cursor []byte -+ ) -+ -+ requestID = common.BytesToHash(payload[:common.HashLength]) -+ -+ if len(payload) >= common.HashLength*2 { -+ lastEnvelopeHash = common.BytesToHash(payload[common.HashLength : common.HashLength*2]) -+ } -+ -+ if len(payload) >= common.HashLength*2+36 { -+ cursor = payload[common.HashLength*2 : common.HashLength*2+36] -+ } -+ - whisper.envelopeFeed.Send(EnvelopeEvent{ - Hash: requestID, - Event: EventMailServerRequestCompleted, -+ Data: &MailServerResponse{ -+ LastEnvelopeHash: lastEnvelopeHash, -+ Cursor: cursor, -+ }, - }) - } - default: -@@ -927,6 +967,10 @@ func (whisper *Whisper) add(envelope *Envelope, isP2P bool) (bool, error) { - whisper.postEvent(envelope, isP2P) // notify the local node about the new message - if whisper.mailServer != nil { - whisper.mailServer.Archive(envelope) -+ whisper.envelopeFeed.Send(EnvelopeEvent{ -+ Hash: envelope.Hash(), -+ Event: EventMailServerEnvelopeArchived, -+ }) - } - } - return true, nil -@@ -985,9 +1029,17 @@ func (whisper *Whisper) processQueue() { - - case e = <-whisper.messageQueue: - whisper.filters.NotifyWatchers(e, false) -+ whisper.envelopeFeed.Send(EnvelopeEvent{ -+ Hash: e.Hash(), -+ Event: EventEnvelopeAvailable, -+ }) - - case e = <-whisper.p2pMsgQueue: - whisper.filters.NotifyWatchers(e, true) -+ whisper.envelopeFeed.Send(EnvelopeEvent{ -+ Hash: e.Hash(), -+ Event: EventEnvelopeAvailable, -+ }) - } - } - } diff --git a/_assets/patches/geth/0035-add_goroutines_metrics.patch b/_assets/patches/geth/0035-add_goroutines_metrics.patch deleted file mode 100644 index 7516a0e8d8..0000000000 --- a/_assets/patches/geth/0035-add_goroutines_metrics.patch +++ /dev/null @@ -1,23 +0,0 @@ -diff --git a/metrics/metrics.go b/metrics/metrics.go -index 2a2b804e..d4d703df 100644 ---- a/metrics/metrics.go -+++ b/metrics/metrics.go -@@ -56,6 +56,7 @@ func CollectProcessMetrics(refresh time.Duration) { - memFrees := GetOrRegisterMeter("system/memory/frees", DefaultRegistry) - memInuse := GetOrRegisterMeter("system/memory/inuse", DefaultRegistry) - memPauses := GetOrRegisterMeter("system/memory/pauses", DefaultRegistry) -+ goroutines := GetOrRegisterGauge("system/goroutines", DefaultRegistry) - - var diskReads, diskReadBytes, diskWrites, diskWriteBytes Meter - var diskReadBytesCounter, diskWriteBytesCounter Counter -@@ -89,6 +90,10 @@ func CollectProcessMetrics(refresh time.Duration) { - diskReadBytesCounter.Inc(diskstats[location1].ReadBytes - diskstats[location2].ReadBytes) - diskWriteBytesCounter.Inc(diskstats[location1].WriteBytes - diskstats[location2].WriteBytes) - } -+ -+ goroutines.Update(int64(runtime.NumGoroutine())) -+ - time.Sleep(refresh) - } -+ - } diff --git a/_assets/patches/geth/0037-whisper-metrics.patch b/_assets/patches/geth/0037-whisper-metrics.patch deleted file mode 100644 index ffbec149f7..0000000000 --- a/_assets/patches/geth/0037-whisper-metrics.patch +++ /dev/null @@ -1,88 +0,0 @@ -diff --git a/whisper/whisperv6/metrics.go b/whisper/whisperv6/metrics.go -new file mode 100644 -index 00000000..b0e899da ---- /dev/null -+++ b/whisper/whisperv6/metrics.go -@@ -0,0 +1,16 @@ -+package whisperv6 -+ -+import "github.com/ethereum/go-ethereum/metrics" -+ -+var ( -+ envelopeAddedCounter = metrics.NewRegisteredCounter("whisper/envelopeAdded", nil) -+ envelopeNewAddedCounter = metrics.NewRegisteredCounter("whisper/envelopeNewAdded", nil) -+ envelopeClearedCounter = metrics.NewRegisteredCounter("whisper/envelopeCleared", nil) -+ envelopeErrFromFutureCounter = metrics.NewRegisteredCounter("whisper/envelopeErrFromFuture", nil) -+ envelopeErrVeryOldCounter = metrics.NewRegisteredCounter("whisper/envelopeErrVeryOld", nil) -+ envelopeErrExpiredCounter = metrics.NewRegisteredCounter("whisper/envelopeErrExpired", nil) -+ envelopeErrOversizedCounter = metrics.NewRegisteredCounter("whisper/envelopeErrOversized", nil) -+ envelopeErrLowPowCounter = metrics.NewRegisteredCounter("whisper/envelopeErrLowPow", nil) -+ envelopeErrNoBloomMatchCounter = metrics.NewRegisteredCounter("whisper/envelopeErrNoBloomMatch", nil) -+ envelopeSizeMeter = metrics.NewRegisteredMeter("whisper/envelopeSize", nil) -+) -diff --git a/whisper/whisperv6/whisper.go b/whisper/whisperv6/whisper.go -index 482457cb..002aaadf 100644 ---- a/whisper/whisperv6/whisper.go -+++ b/whisper/whisperv6/whisper.go -@@ -894,8 +894,11 @@ func (whisper *Whisper) add(envelope *Envelope, isP2P bool) (bool, error) { - now := uint32(whisper.timeSource().Unix()) - sent := envelope.Expiry - envelope.TTL - -+ envelopeAddedCounter.Inc(1) -+ - if sent > now { - if sent-DefaultSyncAllowance > now { -+ envelopeErrFromFutureCounter.Inc(1) - return false, fmt.Errorf("envelope created in the future [%x]", envelope.Hash()) - } - // recalculate PoW, adjusted for the time difference, plus one second for latency -@@ -904,13 +907,16 @@ func (whisper *Whisper) add(envelope *Envelope, isP2P bool) (bool, error) { - - if envelope.Expiry < now { - if envelope.Expiry+DefaultSyncAllowance*2 < now { -+ envelopeErrVeryOldCounter.Inc(1) - return false, fmt.Errorf("very old message") - } - log.Debug("expired envelope dropped", "hash", envelope.Hash().Hex()) -+ envelopeErrExpiredCounter.Inc(1) - return false, nil // drop envelope without error - } - - if uint32(envelope.size()) > whisper.MaxMessageSize() { -+ envelopeErrOversizedCounter.Inc(1) - return false, fmt.Errorf("huge messages are not allowed [%x]", envelope.Hash()) - } - -@@ -919,6 +925,7 @@ func (whisper *Whisper) add(envelope *Envelope, isP2P bool) (bool, error) { - // in this case the previous value is retrieved by MinPowTolerance() - // for a short period of peer synchronization. - if envelope.PoW() < whisper.MinPowTolerance() { -+ envelopeErrLowPowCounter.Inc(1) - return false, fmt.Errorf("envelope with low PoW received: PoW=%f, hash=[%v]", envelope.PoW(), envelope.Hash().Hex()) - } - } -@@ -928,6 +935,7 @@ func (whisper *Whisper) add(envelope *Envelope, isP2P bool) (bool, error) { - // in this case the previous value is retrieved by BloomFilterTolerance() - // for a short period of peer synchronization. - if !BloomFilterMatch(whisper.BloomFilterTolerance(), envelope.Bloom()) { -+ envelopeErrNoBloomMatchCounter.Inc(1) - return false, fmt.Errorf("envelope does not match bloom filter, hash=[%v], bloom: \n%x \n%x \n%x", - envelope.Hash().Hex(), whisper.BloomFilter(), envelope.Bloom(), envelope.Topic) - } -@@ -952,6 +960,8 @@ func (whisper *Whisper) add(envelope *Envelope, isP2P bool) (bool, error) { - log.Trace("whisper envelope already cached", "hash", envelope.Hash().Hex()) - } else { - log.Trace("cached whisper envelope", "hash", envelope.Hash().Hex()) -+ envelopeNewAddedCounter.Inc(1) -+ envelopeSizeMeter.Mark(int64(envelope.size())) - whisper.statsMu.Lock() - whisper.stats.memoryUsed += envelope.size() - whisper.statsMu.Unlock() -@@ -1053,6 +1063,7 @@ func (whisper *Whisper) expire() { - hashSet.Each(func(v interface{}) bool { - sz := whisper.envelopes[v.(common.Hash)].size() - delete(whisper.envelopes, v.(common.Hash)) -+ envelopeClearedCounter.Inc(1) - whisper.envelopeFeed.Send(EnvelopeEvent{ - Hash: v.(common.Hash), - Event: EventEnvelopeExpired, diff --git a/_assets/patches/geth/README.md b/_assets/patches/geth/README.md deleted file mode 100644 index e59c963180..0000000000 --- a/_assets/patches/geth/README.md +++ /dev/null @@ -1,35 +0,0 @@ -Status Patches for geth (go-ethereum) -===================================== - -Status-go uses Status' fork of [go-ethereum](https://github.com/status-im/go-ethereum) as its dependency. As any other Go dependency `go-ethereum` code is vendored and stored in `vendor/` folder. - -The reason why we use a fork is because we introduced a couple of differences that make it work better on mobile devices but not necessarily are suitable for all cases. - -# Creating patches - -Instructions for creating a patch from the command line: - -1. Do changes in `vendor/github.com/ethereum/go-ethereum/`, -1. Go to the root `status-go` directory, -1. Create a patch `git diff --relative=vendor/github.com/ethereum/go-ethereum > _assets/patches/geth/0000-name-of-the-patch.patch` -1. Commit changes. - -# Updating fork with a patch - -To make the patch available for everyone, it needs to be applied and pushed to remote git repository. - -1. Clone [github.com/status-im/go-ethereum](https://github.com/status-im/go-ethereum) to `$GOPATH` and pull all changes, -1. From `github.com/status-im/status-go` run `GETH_VERSION=v1.8.14 ./_assets/patches/update-fork-with-patches.sh`, -1. Go to `github.com/status-im/go-ethereum` and verify if the latest commit and tag `v1.8.14` are correct. If so, push changes to the upstream: -``` -$ git push origin patched/v1.8.14 -$ git push -f v1.8.14 -``` - -# Testing patches - -Assumming that your patch is included in the fork and the updated tag is pushed: - -1. Make sure that the `vendor/` directory is clean: `make dep-ensure`, -1. Run `make statusgo` to compile `statusd`, -1. Run `make test` to run unit tests. diff --git a/_assets/patches/update-fork-with-patches.sh b/_assets/patches/update-fork-with-patches.sh deleted file mode 100755 index abadb53cd5..0000000000 --- a/_assets/patches/update-fork-with-patches.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash - -set -ex - -if [ -z "$GETH_VERSION" ]; then - echo "GETH_VERSION is undefined" - exit 1 -fi - -pushd $GOPATH/src/github.com/status-im/go-ethereum -git fetch -git checkout -b patched/$GETH_VERSION origin/patched/$GETH_VERSION || git checkout patched/$GETH_VERSION -git pull -popd - -cp -R ./_assets/patches $GOPATH/src/github.com/status-im/go-ethereum - -pushd $GOPATH/src/github.com/status-im/go-ethereum -./patches/patcher -b . -rm -r ./patches - -git commit -am "add new patches to $GETH_VERSION" - -# remove already existing tag as we will replace it with a patched commit -git tag -d $GETH_VERSION -git tag -a $GETH_VERSION -m "Patched release $GETH_VERSION" - -echo "Now, go to $GOPATH/src/github.com/status-im/go-ethereum and check if the latest comment in patched/$GETH_VERSION branch is correct and if tag $GETH_VERSION is updated. If they both are ok, push changes rememberting to push the tag with -f option as it already exists." From d6f59a2fe12945eeb2c1cf7fe245e10c199b3181 Mon Sep 17 00:00:00 2001 From: Adam Babik Date: Thu, 27 Sep 2018 15:51:34 +0200 Subject: [PATCH 6/7] update to 1.8.16 --- Gopkg.lock | 23 +- .../go-ethereum/.github/no-response.yml | 4 +- .../ethereum/go-ethereum/.travis.yml | 29 +- .../ethereum/go-ethereum/Dockerfile | 2 +- .../ethereum/go-ethereum/Dockerfile.alltools | 2 +- .../github.com/ethereum/go-ethereum/README.md | 2 +- .../patches/0000-accounts-hd-keys.patch | 358 ++++++++++++++++ .../0014-whisperv6-notifications.patch | 208 ++++++++++ .../patches/0016-fix-leveldb-issue.patch | 131 ++++++ ...0021-backends-simulated-chain-signer.patch | 14 + .../patches/0022-node-attach-public.patch | 98 +++++ .../0023-extract-personal-sign-api.patch | 61 +++ .../patches/0025-whisper-confirmations.patch | 89 ++++ .../patches/0027-whisper-time-source.patch | 135 ++++++ .../_assets/patches/0028-p2p-watchdog.patch | 119 ++++++ .../patches/0029-node-api-gauge-metric.patch | 28 ++ ...nd-mailserver-request-completed-code.patch | 110 +++++ ...-mailserver-response-contains-cursor.patch | 134 ++++++ .../patches/0035-add_goroutines_metrics.patch | 23 ++ .../patches/0037-whisper-metrics.patch | 88 ++++ .../go-ethereum/_assets/patches/README.md | 38 ++ .../go-ethereum/_assets/patches/patcher | 189 +++++++++ .../accounts/abi/bind/backends/simulated.go | 2 +- .../ethereum/go-ethereum/accounts/abi/type.go | 7 +- .../go-ethereum/accounts/abi/unpack.go | 28 +- .../go-ethereum/accounts/keystore/key.go | 18 +- .../go-ethereum/accounts/keystore/keystore.go | 2 +- .../accounts/keystore/keystore_passphrase.go | 27 +- .../ethereum/go-ethereum/appveyor.yml | 4 +- .../ethereum/go-ethereum/cmd/clef/main.go | 6 +- .../ethereum/go-ethereum/cmd/ethkey/README.md | 18 +- .../ethereum/go-ethereum/cmd/evm/disasm.go | 2 +- .../ethereum/go-ethereum/cmd/evm/runner.go | 25 +- .../ethereum/go-ethereum/cmd/faucet/faucet.go | 115 +++--- .../ethereum/go-ethereum/cmd/geth/chaincmd.go | 4 +- .../ethereum/go-ethereum/cmd/geth/config.go | 3 + .../ethereum/go-ethereum/cmd/geth/main.go | 3 + .../ethereum/go-ethereum/cmd/geth/usage.go | 2 + .../cmd/puppeth/wizard_dashboard.go | 2 +- .../go-ethereum/cmd/puppeth/wizard_network.go | 4 +- .../ethereum/go-ethereum/cmd/swarm/access.go | 48 ++- .../ethereum/go-ethereum/cmd/swarm/hash.go | 2 +- .../ethereum/go-ethereum/cmd/swarm/main.go | 25 ++ .../go-ethereum/cmd/swarm/swarm-smoke/main.go | 6 +- .../cmd/swarm/swarm-smoke/upload_and_sync.go | 18 +- .../ethereum/go-ethereum/cmd/utils/flags.go | 32 +- .../ethereum/go-ethereum/common/bytes.go | 2 +- .../ethereum/go-ethereum/common/format.go | 42 ++ .../ethereum/go-ethereum/common/types.go | 2 +- .../go-ethereum/consensus/clique/clique.go | 57 +-- .../go-ethereum/consensus/clique/snapshot.go | 16 +- .../go-ethereum/consensus/consensus.go | 2 +- .../go-ethereum/consensus/ethash/consensus.go | 142 ++++--- .../go-ethereum/consensus/ethash/ethash.go | 4 +- .../ethereum/go-ethereum/core/blockchain.go | 77 ++-- .../go-ethereum/core/chain_indexer.go | 2 +- .../ethereum/go-ethereum/core/chain_makers.go | 12 +- .../ethereum/go-ethereum/core/genesis.go | 2 +- .../ethereum/go-ethereum/core/headerchain.go | 14 +- .../go-ethereum/core/state/state_object.go | 44 +- .../go-ethereum/core/state/statedb.go | 38 +- .../go-ethereum/core/state_processor.go | 2 +- .../ethereum/go-ethereum/core/tx_pool.go | 10 +- .../ethereum/go-ethereum/core/types/bloom9.go | 4 +- .../ethereum/go-ethereum/core/vm/evm.go | 43 +- .../ethereum/go-ethereum/core/vm/gas_table.go | 77 +++- .../go-ethereum/core/vm/instructions.go | 2 +- .../ethereum/go-ethereum/core/vm/interface.go | 4 +- .../go-ethereum/core/vm/interpreter.go | 30 +- .../ethereum/go-ethereum/core/vm/noop.go | 70 ---- .../secp256k1/libsecp256k1/src/secp256k1.c | 2 - .../ethereum/go-ethereum/eth/api_tracer.go | 2 +- .../ethereum/go-ethereum/eth/backend.go | 66 ++- .../ethereum/go-ethereum/eth/config.go | 5 + .../go-ethereum/eth/downloader/api.go | 4 +- .../go-ethereum/eth/downloader/queue.go | 34 +- .../go-ethereum/eth/fetcher/fetcher.go | 8 +- .../go-ethereum/eth/tracers/tracer.go | 8 +- .../ethereum/go-ethereum/ethdb/database.go | 15 +- .../go-ethereum/internal/build/util.go | 4 +- .../ethereum/go-ethereum/les/commons.go | 24 +- .../ethereum/go-ethereum/les/distributor.go | 4 +- .../ethereum/go-ethereum/les/odr_requests.go | 16 +- .../ethereum/go-ethereum/les/retrieve.go | 13 +- .../ethereum/go-ethereum/light/lightchain.go | 18 +- .../ethereum/go-ethereum/light/odr.go | 16 +- .../ethereum/go-ethereum/light/odr_util.go | 2 +- .../ethereum/go-ethereum/light/postprocess.go | 37 +- .../ethereum/go-ethereum/metrics/ewma.go | 3 - .../ethereum/go-ethereum/metrics/meter.go | 43 +- .../ethereum/go-ethereum/miner/miner.go | 4 +- .../ethereum/go-ethereum/miner/worker.go | 72 ++-- .../ethereum/go-ethereum/mobile/ethclient.go | 8 +- .../ethereum/go-ethereum/mobile/shhclient.go | 195 +++++++++ .../ethereum/go-ethereum/mobile/types.go | 93 +++++ .../go-ethereum/p2p/discover/table.go | 2 +- .../ethereum/go-ethereum/p2p/discv5/net.go | 2 +- .../ethereum/go-ethereum/p2p/discv5/table.go | 2 +- .../ethereum/go-ethereum/params/config.go | 68 +++- .../go-ethereum/params/protocol_params.go | 23 +- .../ethereum/go-ethereum/params/version.go | 2 +- .../ethereum/go-ethereum/rlp/typecache.go | 2 +- .../ethereum/go-ethereum/rpc/client.go | 1 + .../ethereum/go-ethereum/rpc/websocket.go | 27 +- .../ethereum/go-ethereum/swarm/api/act.go | 149 +++++-- .../ethereum/go-ethereum/swarm/api/api.go | 7 - .../ethereum/go-ethereum/swarm/api/config.go | 4 +- .../ethereum/go-ethereum/swarm/api/encrypt.go | 22 +- .../go-ethereum/swarm/api/http/server.go | 2 +- .../go-ethereum/swarm/api/manifest.go | 66 +-- .../go-ethereum/swarm/network/discovery.go | 76 ++-- .../go-ethereum/swarm/network/fetcher.go | 305 ++++++++++++++ .../go-ethereum/swarm/network/hive.go | 71 +--- .../go-ethereum/swarm/network/kademlia.go | 154 +++---- .../network/priorityqueue/priorityqueue.go | 38 +- .../go-ethereum/swarm/network/protocol.go | 51 +-- .../swarm/network/simulation/simulation.go | 18 +- .../swarm/network/stream/delivery.go | 231 +++++------ .../swarm/network/stream/messages.go | 87 ++-- .../go-ethereum/swarm/network/stream/peer.go | 53 ++- .../swarm/network/stream/stream.go | 48 +-- .../swarm/network/stream/syncer.go | 94 ++--- .../ethereum/go-ethereum/swarm/pot/doc.go | 4 +- .../ethereum/go-ethereum/swarm/pss/pss.go | 45 +- .../go-ethereum/swarm/storage/chunker.go | 102 +++-- .../go-ethereum/swarm/storage/chunkstore.go | 69 ---- .../go-ethereum/swarm/storage/dbapi.go | 54 --- .../swarm/storage/encryption/encryption.go | 126 ++++-- .../go-ethereum/swarm/storage/hasherstore.go | 194 +++++---- .../go-ethereum/swarm/storage/ldbstore.go | 343 ++++++++++------ .../go-ethereum/swarm/storage/localstore.go | 110 ++--- .../go-ethereum/swarm/storage/memstore.go | 85 +--- .../go-ethereum/swarm/storage/mru/handler.go | 47 ++- .../go-ethereum/swarm/storage/mru/lookup.go | 6 +- .../go-ethereum/swarm/storage/mru/metadata.go | 6 +- .../go-ethereum/swarm/storage/mru/request.go | 2 +- .../swarm/storage/mru/signedupdate.go | 9 +- .../go-ethereum/swarm/storage/mru/testutil.go | 21 +- .../swarm/storage/mru/updateheader.go | 4 +- .../go-ethereum/swarm/storage/netstore.go | 384 +++++++++++------- .../go-ethereum/swarm/storage/pyramid.go | 77 ++-- .../go-ethereum/swarm/storage/types.go | 191 +++++---- .../ethereum/go-ethereum/swarm/swarm.go | 52 +-- .../{storage/common.go => testutil/file.go} | 48 +-- .../go-ethereum/swarm/testutil/http.go | 4 +- .../go-ethereum/swarm/version/version.go | 2 +- .../go-ethereum/tests/block_test_util.go | 2 +- .../go-ethereum/tests/state_test_util.go | 13 +- .../ethereum/go-ethereum/trie/sync.go | 6 +- .../whisper/mailserver/mailserver.go | 2 +- .../go-ethereum/whisper/whisperv6/api.go | 8 +- .../go-ethereum/whisper/whisperv6/config.go | 14 +- .../go-ethereum/whisper/whisperv6/peer.go | 10 +- .../go-ethereum/whisper/whisperv6/whisper.go | 44 +- vendor/gopkg.in/karalabe/cookiejar.v2/LICENSE | 25 -- .../cookiejar.v2/collections/prque/prque.go | 66 --- .../cookiejar.v2/collections/prque/sstack.go | 91 ----- 157 files changed, 5163 insertions(+), 2390 deletions(-) create mode 100644 vendor/github.com/ethereum/go-ethereum/_assets/patches/0000-accounts-hd-keys.patch create mode 100644 vendor/github.com/ethereum/go-ethereum/_assets/patches/0014-whisperv6-notifications.patch create mode 100644 vendor/github.com/ethereum/go-ethereum/_assets/patches/0016-fix-leveldb-issue.patch create mode 100644 vendor/github.com/ethereum/go-ethereum/_assets/patches/0021-backends-simulated-chain-signer.patch create mode 100644 vendor/github.com/ethereum/go-ethereum/_assets/patches/0022-node-attach-public.patch create mode 100644 vendor/github.com/ethereum/go-ethereum/_assets/patches/0023-extract-personal-sign-api.patch create mode 100644 vendor/github.com/ethereum/go-ethereum/_assets/patches/0025-whisper-confirmations.patch create mode 100644 vendor/github.com/ethereum/go-ethereum/_assets/patches/0027-whisper-time-source.patch create mode 100644 vendor/github.com/ethereum/go-ethereum/_assets/patches/0028-p2p-watchdog.patch create mode 100644 vendor/github.com/ethereum/go-ethereum/_assets/patches/0029-node-api-gauge-metric.patch create mode 100644 vendor/github.com/ethereum/go-ethereum/_assets/patches/0032-send-mailserver-request-completed-code.patch create mode 100644 vendor/github.com/ethereum/go-ethereum/_assets/patches/0033-mailserver-response-contains-cursor.patch create mode 100644 vendor/github.com/ethereum/go-ethereum/_assets/patches/0035-add_goroutines_metrics.patch create mode 100644 vendor/github.com/ethereum/go-ethereum/_assets/patches/0037-whisper-metrics.patch create mode 100644 vendor/github.com/ethereum/go-ethereum/_assets/patches/README.md create mode 100755 vendor/github.com/ethereum/go-ethereum/_assets/patches/patcher delete mode 100644 vendor/github.com/ethereum/go-ethereum/core/vm/noop.go create mode 100644 vendor/github.com/ethereum/go-ethereum/mobile/shhclient.go create mode 100644 vendor/github.com/ethereum/go-ethereum/swarm/network/fetcher.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/swarm/storage/chunkstore.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/swarm/storage/dbapi.go rename vendor/github.com/ethereum/go-ethereum/swarm/{storage/common.go => testutil/file.go} (54%) delete mode 100755 vendor/gopkg.in/karalabe/cookiejar.v2/LICENSE delete mode 100755 vendor/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque.go delete mode 100755 vendor/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack.go diff --git a/Gopkg.lock b/Gopkg.lock index f2258c3236..c5ee0c4f8a 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -88,11 +88,7 @@ revision = "935e0e8a636ca4ba70b713f3e38a19e1b77739e8" [[projects]] -<<<<<<< HEAD - digest = "1:34fa5a5d444765a1360983211916c4d2e65a3be75186277142d6aef5c21192db" -======= - digest = "1:ba11b65320bfa1a5e8e43b050833bca23490e508a67c45d7e430156cefc2ab7f" ->>>>>>> develop + digest = "1:cf53a6be6cf3d947c0c7fb678237a7b34e06e4102411ecb843eedacb46759995" name = "github.com/ethereum/go-ethereum" packages = [ ".", @@ -162,14 +158,9 @@ "whisper/whisperv6", ] pruneopts = "T" -<<<<<<< HEAD - revision = "13115e12870cb2ebc1c8dfe55bc976d94efbeb90" + revision = "56c2fa69e0224e8f939a092b3aba20b23bb95c26" source = "github.com/status-im/go-ethereum" - version = "v1.8.14" -======= - revision = "89451f7c382ad2185987ee369f16416f89c28a7d" - version = "v1.8.15" ->>>>>>> develop + version = "v1.8.16" [[projects]] digest = "1:5ac7ecd476a2355a5201229081df2e5f57333ecf703e1f69dde699ae34169c1b" @@ -1036,14 +1027,6 @@ revision = "48a433ba4bcadc5be9aa16d4bdcb383d3f57a741" version = "v9.9.3" -[[projects]] - branch = "v2" - digest = "1:0a2f4b974a413866afa1b41130d756643841fb9ad661b81c9a6dd9f1364ed19f" - name = "gopkg.in/karalabe/cookiejar.v2" - packages = ["collections/prque"] - pruneopts = "NUT" - revision = "8dcd6a7f4951f6ff3ee9cbb919a06d8925822e57" - [[projects]] branch = "v2" digest = "1:85815bf6f46dc1855ae4e236b496bfa463f04fce83f700d74357ef220476abb7" diff --git a/vendor/github.com/ethereum/go-ethereum/.github/no-response.yml b/vendor/github.com/ethereum/go-ethereum/.github/no-response.yml index a6227159d1..b6e96efdc6 100644 --- a/vendor/github.com/ethereum/go-ethereum/.github/no-response.yml +++ b/vendor/github.com/ethereum/go-ethereum/.github/no-response.yml @@ -7,5 +7,5 @@ closeComment: > This issue has been automatically closed because there has been no response to our request for more information from the original author. With only the information that is currently in the issue, we don't have enough information - to take action. Please reach out if you have or find the answers we need so - that we can investigate further. + to take action. Please reach out if you have more relevant information or + answers to our questions so that we can investigate further. diff --git a/vendor/github.com/ethereum/go-ethereum/.travis.yml b/vendor/github.com/ethereum/go-ethereum/.travis.yml index 3ae88aab6d..372f7a8270 100644 --- a/vendor/github.com/ethereum/go-ethereum/.travis.yml +++ b/vendor/github.com/ethereum/go-ethereum/.travis.yml @@ -14,11 +14,22 @@ matrix: - go run build/ci.go install - go run build/ci.go test -coverage $TEST_PACKAGES - # These are the latest Go versions. - os: linux dist: trusty sudo: required go: 1.10.x + script: + - sudo modprobe fuse + - sudo chmod 666 /dev/fuse + - sudo chown root:$USER /etc/fuse.conf + - go run build/ci.go install + - go run build/ci.go test -coverage $TEST_PACKAGES + + # These are the latest Go versions. + - os: linux + dist: trusty + sudo: required + go: 1.11.x script: - sudo modprobe fuse - sudo chmod 666 /dev/fuse @@ -27,7 +38,7 @@ matrix: - go run build/ci.go test -coverage $TEST_PACKAGES - os: osx - go: 1.10.x + go: 1.11.x script: - unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703 - go run build/ci.go install @@ -36,7 +47,7 @@ matrix: # This builder only tests code linters on latest version of Go - os: linux dist: trusty - go: 1.10.x + go: 1.11.x env: - lint git: @@ -47,7 +58,7 @@ matrix: # This builder does the Ubuntu PPA upload - os: linux dist: trusty - go: 1.10.x + go: 1.11.x env: - ubuntu-ppa git: @@ -66,7 +77,7 @@ matrix: - os: linux dist: trusty sudo: required - go: 1.10.x + go: 1.11.x env: - azure-linux git: @@ -100,7 +111,7 @@ matrix: dist: trusty services: - docker - go: 1.10.x + go: 1.11.x env: - azure-linux-mips git: @@ -144,7 +155,7 @@ matrix: git: submodules: false # avoid cloning ethereum/tests before_install: - - curl https://storage.googleapis.com/golang/go1.10.3.linux-amd64.tar.gz | tar -xz + - curl https://storage.googleapis.com/golang/go1.11.linux-amd64.tar.gz | tar -xz - export PATH=`pwd`/go/bin:$PATH - export GOROOT=`pwd`/go - export GOPATH=$HOME/go @@ -161,7 +172,7 @@ matrix: # This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads - os: osx - go: 1.10.x + go: 1.11.x env: - azure-osx - azure-ios @@ -190,7 +201,7 @@ matrix: # This builder does the Azure archive purges to avoid accumulating junk - os: linux dist: trusty - go: 1.10.x + go: 1.11.x env: - azure-purge git: diff --git a/vendor/github.com/ethereum/go-ethereum/Dockerfile b/vendor/github.com/ethereum/go-ethereum/Dockerfile index edf5a0602d..e87dd35d32 100644 --- a/vendor/github.com/ethereum/go-ethereum/Dockerfile +++ b/vendor/github.com/ethereum/go-ethereum/Dockerfile @@ -1,5 +1,5 @@ # Build Geth in a stock Go builder container -FROM golang:1.10-alpine as builder +FROM golang:1.11-alpine as builder RUN apk add --no-cache make gcc musl-dev linux-headers diff --git a/vendor/github.com/ethereum/go-ethereum/Dockerfile.alltools b/vendor/github.com/ethereum/go-ethereum/Dockerfile.alltools index e54e107bf3..e984a1b092 100644 --- a/vendor/github.com/ethereum/go-ethereum/Dockerfile.alltools +++ b/vendor/github.com/ethereum/go-ethereum/Dockerfile.alltools @@ -1,5 +1,5 @@ # Build Geth in a stock Go builder container -FROM golang:1.10-alpine as builder +FROM golang:1.11-alpine as builder RUN apk add --no-cache make gcc musl-dev linux-headers diff --git a/vendor/github.com/ethereum/go-ethereum/README.md b/vendor/github.com/ethereum/go-ethereum/README.md index c6bc91af17..f308fb1011 100644 --- a/vendor/github.com/ethereum/go-ethereum/README.md +++ b/vendor/github.com/ethereum/go-ethereum/README.md @@ -7,7 +7,7 @@ https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/6874 )](https://godoc.org/github.com/ethereum/go-ethereum) [![Go Report Card](https://goreportcard.com/badge/github.com/ethereum/go-ethereum)](https://goreportcard.com/report/github.com/ethereum/go-ethereum) [![Travis](https://travis-ci.org/ethereum/go-ethereum.svg?branch=master)](https://travis-ci.org/ethereum/go-ethereum) -[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/ethereum/go-ethereum?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) +[![Discord](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/nthXNEv) Automated builds are available for stable releases and the unstable master branch. Binary archives are published at https://geth.ethereum.org/downloads/. diff --git a/vendor/github.com/ethereum/go-ethereum/_assets/patches/0000-accounts-hd-keys.patch b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0000-accounts-hd-keys.patch new file mode 100644 index 0000000000..68bfa82200 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0000-accounts-hd-keys.patch @@ -0,0 +1,358 @@ +diff --git a/accounts/keystore/key.go b/accounts/keystore/key.go +index 211fa863..65c83f3b 100644 +--- a/accounts/keystore/key.go ++++ b/accounts/keystore/key.go +@@ -33,6 +33,7 @@ import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/pborman/uuid" ++ "github.com/status-im/status-go/extkeys" + ) + + const ( +@@ -46,6 +47,10 @@ type Key struct { + // we only store privkey as pubkey/address can be derived from it + // privkey in this struct is always in plaintext + PrivateKey *ecdsa.PrivateKey ++ // extended key is the root node for new hardened children i.e. sub-accounts ++ ExtendedKey *extkeys.ExtendedKey ++ // next index to be used for sub-account child derivation ++ SubAccountIndex uint32 + } + + type keyStore interface { +@@ -65,10 +70,12 @@ type plainKeyJSON struct { + } + + type encryptedKeyJSONV3 struct { +- Address string `json:"address"` +- Crypto cryptoJSON `json:"crypto"` +- Id string `json:"id"` +- Version int `json:"version"` ++ Address string `json:"address"` ++ Crypto cryptoJSON `json:"crypto"` ++ Id string `json:"id"` ++ Version int `json:"version"` ++ ExtendedKey cryptoJSON `json:"extendedkey"` ++ SubAccountIndex uint32 `json:"subaccountindex"` + } + + type encryptedKeyJSONV1 struct { +@@ -137,6 +144,40 @@ func newKeyFromECDSA(privateKeyECDSA *ecdsa.PrivateKey) *Key { + return key + } + ++func newKeyFromExtendedKey(extKey *extkeys.ExtendedKey) (*Key, error) { ++ var ( ++ extChild1, extChild2 *extkeys.ExtendedKey ++ err error ++ ) ++ ++ if extKey.Depth == 0 { // we are dealing with master key ++ // CKD#1 - main account ++ extChild1, err = extKey.BIP44Child(extkeys.CoinTypeETH, 0) ++ if err != nil { ++ return &Key{}, err ++ } ++ ++ // CKD#2 - sub-accounts root ++ extChild2, err = extKey.BIP44Child(extkeys.CoinTypeETH, 1) ++ if err != nil { ++ return &Key{}, err ++ } ++ } else { // we are dealing with non-master key, so it is safe to persist and extend from it ++ extChild1 = extKey ++ extChild2 = extKey ++ } ++ ++ privateKeyECDSA := extChild1.ToECDSA() ++ id := uuid.NewRandom() ++ key := &Key{ ++ Id: id, ++ Address: crypto.PubkeyToAddress(privateKeyECDSA.PublicKey), ++ PrivateKey: privateKeyECDSA, ++ ExtendedKey: extChild2, ++ } ++ return key, nil ++} ++ + // NewKeyForDirectICAP generates a key whose address fits into < 155 bits so it can fit + // into the Direct ICAP spec. for simplicity and easier compatibility with other libs, we + // retry until the first byte is 0. +diff --git a/accounts/keystore/keystore.go b/accounts/keystore/keystore.go +index 6b04acd0..ac2ab008 100644 +--- a/accounts/keystore/keystore.go ++++ b/accounts/keystore/keystore.go +@@ -38,6 +38,7 @@ import ( + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/event" ++ "github.com/status-im/status-go/extkeys" + ) + + var ( +@@ -228,6 +229,11 @@ func (ks *KeyStore) Accounts() []accounts.Account { + return ks.cache.accounts() + } + ++// AccountDecryptedKey returns decrypted key for account (provided that password is correct). ++func (ks *KeyStore) AccountDecryptedKey(a accounts.Account, auth string) (accounts.Account, *Key, error) { ++ return ks.getDecryptedKey(a, auth) ++} ++ + // Delete deletes the key matched by account if the passphrase is correct. + // If the account contains no filename, the address must match a unique key. + func (ks *KeyStore) Delete(a accounts.Account, passphrase string) error { +@@ -453,6 +459,34 @@ func (ks *KeyStore) ImportECDSA(priv *ecdsa.PrivateKey, passphrase string) (acco + return ks.importKey(key, passphrase) + } + ++// ImportExtendedKey stores ECDSA key (obtained from extended key) along with CKD#2 (root for sub-accounts) ++// If key file is not found, it is created. Key is encrypted with the given passphrase. ++func (ks *KeyStore) ImportExtendedKey(extKey *extkeys.ExtendedKey, passphrase string) (accounts.Account, error) { ++ key, err := newKeyFromExtendedKey(extKey) ++ if err != nil { ++ zeroKey(key.PrivateKey) ++ return accounts.Account{}, err ++ } ++ ++ // if account is already imported, return cached version ++ if ks.cache.hasAddress(key.Address) { ++ a := accounts.Account{ ++ Address: key.Address, ++ } ++ ks.cache.maybeReload() ++ ks.cache.mu.Lock() ++ a, err := ks.cache.find(a) ++ ks.cache.mu.Unlock() ++ if err != nil { ++ zeroKey(key.PrivateKey) ++ return a, err ++ } ++ return a, nil ++ } ++ ++ return ks.importKey(key, passphrase) ++} ++ + func (ks *KeyStore) importKey(key *Key, passphrase string) (accounts.Account, error) { + a := accounts.Account{Address: key.Address, URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.storage.JoinPath(keyFileName(key.Address))}} + if err := ks.storage.StoreKey(a.URL.Path, key, passphrase); err != nil { +@@ -463,6 +497,15 @@ func (ks *KeyStore) importKey(key *Key, passphrase string) (accounts.Account, er + return a, nil + } + ++func (ks *KeyStore) IncSubAccountIndex(a accounts.Account, passphrase string) error { ++ a, key, err := ks.getDecryptedKey(a, passphrase) ++ if err != nil { ++ return err ++ } ++ key.SubAccountIndex++ ++ return ks.storage.StoreKey(a.URL.Path, key, passphrase) ++} ++ + // Update changes the passphrase of an existing account. + func (ks *KeyStore) Update(a accounts.Account, passphrase, newPassphrase string) error { + a, key, err := ks.getDecryptedKey(a, passphrase) +@@ -486,6 +529,9 @@ func (ks *KeyStore) ImportPreSaleKey(keyJSON []byte, passphrase string) (account + + // zeroKey zeroes a private key in memory. + func zeroKey(k *ecdsa.PrivateKey) { ++ if k == nil { ++ return ++ } + b := k.D.Bits() + for i := range b { + b[i] = 0 +diff --git a/accounts/keystore/keystore_passphrase.go b/accounts/keystore/keystore_passphrase.go +index 59738abe..2b6ef252 100644 +--- a/accounts/keystore/keystore_passphrase.go ++++ b/accounts/keystore/keystore_passphrase.go +@@ -41,6 +41,7 @@ import ( + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/crypto" + "github.com/pborman/uuid" ++ "github.com/status-im/status-go/extkeys" + "golang.org/x/crypto/pbkdf2" + "golang.org/x/crypto/scrypt" + ) +@@ -157,15 +158,68 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) { + KDFParams: scryptParamsJSON, + MAC: hex.EncodeToString(mac), + } ++ encryptedExtendedKey, err := EncryptExtendedKey(key.ExtendedKey, auth, scryptN, scryptP) ++ if err != nil { ++ return nil, err ++ } + encryptedKeyJSONV3 := encryptedKeyJSONV3{ + hex.EncodeToString(key.Address[:]), + cryptoStruct, + key.Id.String(), + version, ++ encryptedExtendedKey, ++ key.SubAccountIndex, + } + return json.Marshal(encryptedKeyJSONV3) + } + ++func EncryptExtendedKey(extKey *extkeys.ExtendedKey, auth string, scryptN, scryptP int) (cryptoJSON, error) { ++ if extKey == nil { ++ return cryptoJSON{}, nil ++ } ++ authArray := []byte(auth) ++ salt := make([]byte, 32) ++ if _, err := io.ReadFull(rand.Reader, salt); err != nil { ++ panic("reading from crypto/rand failed: " + err.Error()) ++ } ++ derivedKey, err := scrypt.Key(authArray, salt, scryptN, scryptR, scryptP, scryptDKLen) ++ if err != nil { ++ return cryptoJSON{}, err ++ } ++ encryptKey := derivedKey[:16] ++ keyBytes := []byte(extKey.String()) ++ ++ iv := make([]byte, aes.BlockSize) // 16 ++ if _, err := io.ReadFull(rand.Reader, iv); err != nil { ++ panic("reading from crypto/rand failed: " + err.Error()) ++ } ++ cipherText, err := aesCTRXOR(encryptKey, keyBytes, iv) ++ if err != nil { ++ return cryptoJSON{}, err ++ } ++ mac := crypto.Keccak256(derivedKey[16:32], cipherText) ++ ++ scryptParamsJSON := make(map[string]interface{}, 5) ++ scryptParamsJSON["n"] = scryptN ++ scryptParamsJSON["r"] = scryptR ++ scryptParamsJSON["p"] = scryptP ++ scryptParamsJSON["dklen"] = scryptDKLen ++ scryptParamsJSON["salt"] = hex.EncodeToString(salt) ++ ++ cipherParamsJSON := cipherparamsJSON{ ++ IV: hex.EncodeToString(iv), ++ } ++ ++ return cryptoJSON{ ++ Cipher: "aes-128-ctr", ++ CipherText: hex.EncodeToString(cipherText), ++ CipherParams: cipherParamsJSON, ++ KDF: "scrypt", ++ KDFParams: scryptParamsJSON, ++ MAC: hex.EncodeToString(mac), ++ }, nil ++} ++ + // DecryptKey decrypts a key from a json blob, returning the private key itself. + func DecryptKey(keyjson []byte, auth string) (*Key, error) { + // Parse the json into a simple map to fetch the key version +@@ -177,20 +231,43 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) { + var ( + keyBytes, keyId []byte + err error ++ extKeyBytes []byte ++ extKey *extkeys.ExtendedKey + ) ++ ++ subAccountIndex, ok := m["subaccountindex"].(float64) ++ if !ok { ++ subAccountIndex = 0 ++ } ++ + if version, ok := m["version"].(string); ok && version == "1" { + k := new(encryptedKeyJSONV1) + if err := json.Unmarshal(keyjson, k); err != nil { + return nil, err + } + keyBytes, keyId, err = decryptKeyV1(k, auth) ++ if err != nil { ++ return nil, err ++ } ++ ++ extKey, err = extkeys.NewKeyFromString(extkeys.EmptyExtendedKeyString) + } else { + k := new(encryptedKeyJSONV3) + if err := json.Unmarshal(keyjson, k); err != nil { + return nil, err + } + keyBytes, keyId, err = decryptKeyV3(k, auth) ++ if err != nil { ++ return nil, err ++ } ++ ++ extKeyBytes, err = decryptExtendedKey(k, auth) ++ if err != nil { ++ return nil, err ++ } ++ extKey, err = extkeys.NewKeyFromString(string(extKeyBytes)) + } ++ + // Handle any decryption errors and return the key + if err != nil { + return nil, err +@@ -198,9 +275,11 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) { + key := crypto.ToECDSAUnsafe(keyBytes) + + return &Key{ +- Id: uuid.UUID(keyId), +- Address: crypto.PubkeyToAddress(key.PublicKey), +- PrivateKey: key, ++ Id: uuid.UUID(keyId), ++ Address: crypto.PubkeyToAddress(key.PublicKey), ++ PrivateKey: key, ++ ExtendedKey: extKey, ++ SubAccountIndex: uint32(subAccountIndex), + }, nil + } + +@@ -280,6 +359,51 @@ func decryptKeyV1(keyProtected *encryptedKeyJSONV1, auth string) (keyBytes []byt + return plainText, keyId, err + } + ++func decryptExtendedKey(keyProtected *encryptedKeyJSONV3, auth string) (plainText []byte, err error) { ++ if len(keyProtected.ExtendedKey.CipherText) == 0 { ++ return []byte(extkeys.EmptyExtendedKeyString), nil ++ } ++ ++ if keyProtected.Version != version { ++ return nil, fmt.Errorf("Version not supported: %v", keyProtected.Version) ++ } ++ ++ if keyProtected.ExtendedKey.Cipher != "aes-128-ctr" { ++ return nil, fmt.Errorf("Cipher not supported: %v", keyProtected.ExtendedKey.Cipher) ++ } ++ ++ mac, err := hex.DecodeString(keyProtected.ExtendedKey.MAC) ++ if err != nil { ++ return nil, err ++ } ++ ++ iv, err := hex.DecodeString(keyProtected.ExtendedKey.CipherParams.IV) ++ if err != nil { ++ return nil, err ++ } ++ ++ cipherText, err := hex.DecodeString(keyProtected.ExtendedKey.CipherText) ++ if err != nil { ++ return nil, err ++ } ++ ++ derivedKey, err := getKDFKey(keyProtected.ExtendedKey, auth) ++ if err != nil { ++ return nil, err ++ } ++ ++ calculatedMAC := crypto.Keccak256(derivedKey[16:32], cipherText) ++ if !bytes.Equal(calculatedMAC, mac) { ++ return nil, ErrDecrypt ++ } ++ ++ plainText, err = aesCTRXOR(derivedKey[:16], cipherText, iv) ++ if err != nil { ++ return nil, err ++ } ++ return plainText, err ++} ++ + func getKDFKey(cryptoJSON cryptoJSON, auth string) ([]byte, error) { + authArray := []byte(auth) + salt, err := hex.DecodeString(cryptoJSON.KDFParams["salt"].(string)) diff --git a/vendor/github.com/ethereum/go-ethereum/_assets/patches/0014-whisperv6-notifications.patch b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0014-whisperv6-notifications.patch new file mode 100644 index 0000000000..ef326ad3cd --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0014-whisperv6-notifications.patch @@ -0,0 +1,208 @@ +diff --git i/whisper/whisperv6/api.go w/whisper/whisperv6/api.go +index c60bc46a1..2de99f293 100644 +--- i/whisper/whisperv6/api.go ++++ w/whisper/whisperv6/api.go +@@ -317,6 +317,16 @@ func (api *PublicWhisperAPI) Post(ctx context.Context, req NewMessage) (hexutil. + return result, err + } + ++// UninstallFilter is alias for Unsubscribe ++func (api *PublicWhisperAPI) UninstallFilter(id string) { ++ api.w.Unsubscribe(id) ++} ++ ++// Unsubscribe disables and removes an existing filter. ++func (api *PublicWhisperAPI) Unsubscribe(id string) { ++ api.w.Unsubscribe(id) ++} ++ + //go:generate gencodec -type Criteria -field-override criteriaOverride -out gen_criteria_json.go + + // Criteria holds various filter options for inbound messages. +diff --git i/whisper/whisperv6/whisper.go w/whisper/whisperv6/whisper.go +index 880cced09..702556079 100644 +--- i/whisper/whisperv6/whisper.go ++++ w/whisper/whisperv6/whisper.go +@@ -382,9 +382,9 @@ func (whisper *Whisper) NewKeyPair() (string, error) { + return "", fmt.Errorf("failed to generate valid key") + } + +- id, err := GenerateRandomID() ++ id, err := toDeterministicID(common.ToHex(crypto.FromECDSAPub(&key.PublicKey)), keyIDSize) + if err != nil { +- return "", fmt.Errorf("failed to generate ID: %s", err) ++ return "", err + } + + whisper.keyMu.Lock() +@@ -399,11 +399,16 @@ func (whisper *Whisper) NewKeyPair() (string, error) { + + // DeleteKeyPair deletes the specified key if it exists. + func (whisper *Whisper) DeleteKeyPair(key string) bool { ++ deterministicID, err := toDeterministicID(key, keyIDSize) ++ if err != nil { ++ return false ++ } ++ + whisper.keyMu.Lock() + defer whisper.keyMu.Unlock() + +- if whisper.privateKeys[key] != nil { +- delete(whisper.privateKeys, key) ++ if whisper.privateKeys[deterministicID] != nil { ++ delete(whisper.privateKeys, deterministicID) + return true + } + return false +@@ -411,31 +416,73 @@ func (whisper *Whisper) DeleteKeyPair(key string) bool { + + // AddKeyPair imports a asymmetric private key and returns it identifier. + func (whisper *Whisper) AddKeyPair(key *ecdsa.PrivateKey) (string, error) { +- id, err := GenerateRandomID() ++ id, err := makeDeterministicID(common.ToHex(crypto.FromECDSAPub(&key.PublicKey)), keyIDSize) + if err != nil { +- return "", fmt.Errorf("failed to generate ID: %s", err) ++ return "", err ++ } ++ if whisper.HasKeyPair(id) { ++ return id, nil // no need to re-inject + } + + whisper.keyMu.Lock() + whisper.privateKeys[id] = key + whisper.keyMu.Unlock() ++ log.Info("Whisper identity added", "id", id, "pubkey", common.ToHex(crypto.FromECDSAPub(&key.PublicKey))) + + return id, nil + } + ++// SelectKeyPair adds cryptographic identity, and makes sure ++// that it is the only private key known to the node. ++func (whisper *Whisper) SelectKeyPair(key *ecdsa.PrivateKey) error { ++ id, err := makeDeterministicID(common.ToHex(crypto.FromECDSAPub(&key.PublicKey)), keyIDSize) ++ if err != nil { ++ return err ++ } ++ ++ whisper.keyMu.Lock() ++ defer whisper.keyMu.Unlock() ++ ++ whisper.privateKeys = make(map[string]*ecdsa.PrivateKey) // reset key store ++ whisper.privateKeys[id] = key ++ ++ log.Info("Whisper identity selected", "id", id, "key", common.ToHex(crypto.FromECDSAPub(&key.PublicKey))) ++ return nil ++} ++ ++// DeleteKeyPairs removes all cryptographic identities known to the node ++func (whisper *Whisper) DeleteKeyPairs() error { ++ whisper.keyMu.Lock() ++ defer whisper.keyMu.Unlock() ++ ++ whisper.privateKeys = make(map[string]*ecdsa.PrivateKey) ++ ++ return nil ++} ++ + // HasKeyPair checks if the whisper node is configured with the private key + // of the specified public pair. + func (whisper *Whisper) HasKeyPair(id string) bool { ++ deterministicID, err := toDeterministicID(id, keyIDSize) ++ if err != nil { ++ return false ++ } ++ + whisper.keyMu.RLock() + defer whisper.keyMu.RUnlock() +- return whisper.privateKeys[id] != nil ++ return whisper.privateKeys[deterministicID] != nil + } + + // GetPrivateKey retrieves the private key of the specified identity. + func (whisper *Whisper) GetPrivateKey(id string) (*ecdsa.PrivateKey, error) { ++ deterministicID, err := toDeterministicID(id, keyIDSize) ++ if err != nil { ++ return nil, err ++ } ++ + whisper.keyMu.RLock() + defer whisper.keyMu.RUnlock() +- key := whisper.privateKeys[id] ++ key := whisper.privateKeys[deterministicID] + if key == nil { + return nil, fmt.Errorf("invalid id") + } +@@ -467,6 +514,23 @@ func (whisper *Whisper) GenerateSymKey() (string, error) { + return id, nil + } + ++// AddSymKey stores the key with a given id. ++func (whisper *Whisper) AddSymKey(id string, key []byte) (string, error) { ++ deterministicID, err := toDeterministicID(id, keyIDSize) ++ if err != nil { ++ return "", err ++ } ++ ++ whisper.keyMu.Lock() ++ defer whisper.keyMu.Unlock() ++ ++ if whisper.symKeys[deterministicID] != nil { ++ return "", fmt.Errorf("key already exists: %v", id) ++ } ++ whisper.symKeys[deterministicID] = key ++ return deterministicID, nil ++} ++ + // AddSymKeyDirect stores the key, and returns its id. + func (whisper *Whisper) AddSymKeyDirect(key []byte) (string, error) { + if len(key) != aesKeyLength { +@@ -1013,6 +1077,33 @@ func GenerateRandomID() (id string, err error) { + return id, err + } + ++// makeDeterministicID generates a deterministic ID, based on a given input ++func makeDeterministicID(input string, keyLen int) (id string, err error) { ++ buf := pbkdf2.Key([]byte(input), nil, 4096, keyLen, sha256.New) ++ if !validateDataIntegrity(buf, keyIDSize) { ++ return "", fmt.Errorf("error in GenerateDeterministicID: failed to generate key") ++ } ++ id = common.Bytes2Hex(buf) ++ return id, err ++} ++ ++// toDeterministicID reviews incoming id, and transforms it to format ++// expected internally be private key store. Originally, public keys ++// were used as keys, now random keys are being used. And in order to ++// make it easier to consume, we now allow both random IDs and public ++// keys to be passed. ++func toDeterministicID(id string, expectedLen int) (string, error) { ++ if len(id) != (expectedLen * 2) { // we received hex key, so number of chars in id is doubled ++ var err error ++ id, err = makeDeterministicID(id, expectedLen) ++ if err != nil { ++ return "", err ++ } ++ } ++ ++ return id, nil ++} ++ + func isFullNode(bloom []byte) bool { + if bloom == nil { + return true +@@ -1048,3 +1139,15 @@ func addBloom(a, b []byte) []byte { + } + return c + } ++ ++// SelectedKeyPairID returns the id of currently selected key pair. ++// It helps distinguish between different users w/o exposing the user identity itself. ++func (whisper *Whisper) SelectedKeyPairID() string { ++ whisper.keyMu.RLock() ++ defer whisper.keyMu.RUnlock() ++ ++ for id := range whisper.privateKeys { ++ return id ++ } ++ return "" ++} diff --git a/vendor/github.com/ethereum/go-ethereum/_assets/patches/0016-fix-leveldb-issue.patch b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0016-fix-leveldb-issue.patch new file mode 100644 index 0000000000..76e34b674a --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0016-fix-leveldb-issue.patch @@ -0,0 +1,131 @@ +diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go +index fbde9c6c..c0013a11 100644 +--- a/eth/downloader/downloader.go ++++ b/eth/downloader/downloader.go +@@ -143,6 +143,8 @@ type Downloader struct { + quitCh chan struct{} // Quit channel to signal termination + quitLock sync.RWMutex // Lock to prevent double closes + ++ downloads sync.WaitGroup // Keeps track of the currently active downloads ++ + // Testing hooks + syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run + bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch +@@ -403,7 +405,9 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode + // specified peer and head hash. + func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) { + d.mux.Post(StartEvent{}) ++ d.downloads.Add(1) + defer func() { ++ d.downloads.Done() + // reset on error + if err != nil { + d.mux.Post(FailedEvent{err}) +@@ -471,14 +475,22 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I + } else if d.mode == FullSync { + fetchers = append(fetchers, d.processFullSyncContent) + } +- return d.spawnSync(fetchers) ++ return d.spawnSync(errCancelHeaderFetch, fetchers) + } + + // spawnSync runs d.process and all given fetcher functions to completion in + // separate goroutines, returning the first error that appears. +-func (d *Downloader) spawnSync(fetchers []func() error) error { ++func (d *Downloader) spawnSync(errCancel error, fetchers []func() error) error { ++ d.cancelLock.Lock() ++ select { ++ case <-d.cancelCh: ++ d.cancelLock.Unlock() ++ return errCancel ++ default: ++ } + errc := make(chan error, len(fetchers)) + d.cancelWg.Add(len(fetchers)) ++ d.cancelLock.Unlock() + for _, fn := range fetchers { + fn := fn + go func() { defer d.cancelWg.Done(); errc <- fn() }() +@@ -539,6 +551,10 @@ func (d *Downloader) Terminate() { + + // Cancel any pending download requests + d.Cancel() ++ ++ // Wait, so external dependencies aren't destroyed ++ // until the download processing is done. ++ d.downloads.Wait() + } + + // fetchHeight retrieves the head header of the remote peer to aid in estimating +diff --git a/eth/handler.go b/eth/handler.go +index f89f68c9..5522b0d9 100644 +--- a/eth/handler.go ++++ b/eth/handler.go +@@ -230,6 +230,9 @@ func (pm *ProtocolManager) Stop() { + // Quit fetcher, txsyncLoop. + close(pm.quitSync) + ++ // Stop downloader and make sure that all the running downloads are complete. ++ pm.downloader.Terminate() ++ + // Disconnect existing sessions. + // This also closes the gate for any new registrations on the peer set. + // sessions which are already established but not added to pm.peers yet +diff --git a/eth/sync.go b/eth/sync.go +index e49e4008..4367434a 100644 +--- a/eth/sync.go ++++ b/eth/sync.go +@@ -135,7 +135,6 @@ func (pm *ProtocolManager) syncer() { + // Start and ensure cleanup of sync mechanisms + pm.fetcher.Start() + defer pm.fetcher.Stop() +- defer pm.downloader.Terminate() + + // Wait for different events to fire synchronisation operations + forceSync := time.NewTicker(forceSyncCycle) +diff --git a/les/backend.go b/les/backend.go +index 00025ba6..38c36da6 100644 +--- a/les/backend.go ++++ b/les/backend.go +@@ -20,7 +20,6 @@ package les + import ( + "fmt" + "sync" +- "time" + + "github.com/ethereum/go-ethereum/accounts" + "github.com/ethereum/go-ethereum/common" +@@ -253,7 +252,6 @@ func (s *LightEthereum) Stop() error { + + s.eventMux.Stop() + +- time.Sleep(time.Millisecond * 200) + s.chainDb.Close() + close(s.shutdownChan) + +diff --git a/les/handler.go b/les/handler.go +index ca40eaab..cc15d68c 100644 +--- a/les/handler.go ++++ b/les/handler.go +@@ -194,6 +194,9 @@ func (pm *ProtocolManager) Stop() { + pm.clientPool.stop() + } + ++ // Stop downloader and make sure that all the running downloads are complete. ++ pm.downloader.Terminate() ++ + // Disconnect existing sessions. + // This also closes the gate for any new registrations on the peer set. + // sessions which are already established but not added to pm.peers yet +diff --git a/les/sync.go b/les/sync.go +index 1ac64558..eb155377 100644 +--- a/les/sync.go ++++ b/les/sync.go +@@ -31,7 +31,6 @@ func (pm *ProtocolManager) syncer() { + // Start and ensure cleanup of sync mechanisms + //pm.fetcher.Start() + //defer pm.fetcher.Stop() +- defer pm.downloader.Terminate() + + // Wait for different events to fire synchronisation operations + //forceSync := time.Tick(forceSyncCycle) diff --git a/vendor/github.com/ethereum/go-ethereum/_assets/patches/0021-backends-simulated-chain-signer.patch b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0021-backends-simulated-chain-signer.patch new file mode 100644 index 0000000000..2b6bfe51d4 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0021-backends-simulated-chain-signer.patch @@ -0,0 +1,14 @@ +diff --git i/accounts/abi/bind/backends/simulated.go w/accounts/abi/bind/backends/simulated.go +index bd342a8cb..2ce30e2fa 100644 +--- i/accounts/abi/bind/backends/simulated.go ++++ w/accounts/abi/bind/backends/simulated.go +@@ -295,8 +295,7 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM + func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transaction) error { + b.mu.Lock() + defer b.mu.Unlock() +- +- sender, err := types.Sender(types.HomesteadSigner{}, tx) ++ sender, err := types.Sender(types.NewEIP155Signer(tx.ChainId()), tx) + if err != nil { + panic(fmt.Errorf("invalid transaction: %v", err)) + } diff --git a/vendor/github.com/ethereum/go-ethereum/_assets/patches/0022-node-attach-public.patch b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0022-node-attach-public.patch new file mode 100644 index 0000000000..6ec61ef7af --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0022-node-attach-public.patch @@ -0,0 +1,98 @@ +diff --git a/node/node.go b/node/node.go +index ada38372..5ea58e13 100644 +--- a/node/node.go ++++ b/node/node.go +@@ -51,8 +51,9 @@ type Node struct { + serviceFuncs []ServiceConstructor // Service constructors (in dependency order) + services map[reflect.Type]Service // Currently running services + +- rpcAPIs []rpc.API // List of APIs currently provided by the node +- inprocHandler *rpc.Server // In-process RPC request handler to process the API requests ++ rpcAPIs []rpc.API // List of APIs currently provided by the node ++ inprocHandler *rpc.Server // In-process RPC request handler to process the API requests ++ inprocPublicHandler *rpc.Server // In-process RPC request handler to process the public API requests + + ipcEndpoint string // IPC endpoint to listen at (empty = IPC disabled) + ipcListener net.Listener // IPC RPC listener socket to serve API requests +@@ -259,18 +260,25 @@ func (n *Node) startRPC(services map[reflect.Type]Service) error { + if err := n.startInProc(apis); err != nil { + return err + } ++ if err := n.startPublicInProc(apis, n.config.HTTPModules); err != nil { ++ n.stopInProc() ++ return err ++ } + if err := n.startIPC(apis); err != nil { ++ n.stopPublicInProc() + n.stopInProc() + return err + } + if err := n.startHTTP(n.httpEndpoint, apis, n.config.HTTPModules, n.config.HTTPCors, n.config.HTTPVirtualHosts, n.config.HTTPTimeouts); err != nil { + n.stopIPC() ++ n.stopPublicInProc() + n.stopInProc() + return err + } + if err := n.startWS(n.wsEndpoint, apis, n.config.WSModules, n.config.WSOrigins, n.config.WSExposeAll); err != nil { + n.stopHTTP() + n.stopIPC() ++ n.stopPublicInProc() + n.stopInProc() + return err + } +@@ -301,6 +309,36 @@ func (n *Node) stopInProc() { + } + } + ++// startPublicInProc initializes an in-process RPC endpoint for public APIs. ++func (n *Node) startPublicInProc(apis []rpc.API, modules []string) error { ++ // Generate the whitelist based on the allowed modules ++ whitelist := make(map[string]bool) ++ for _, module := range modules { ++ whitelist[module] = true ++ } ++ ++ // Register all the public APIs exposed by the services ++ handler := rpc.NewServer() ++ for _, api := range apis { ++ if whitelist[api.Namespace] || (len(whitelist) == 0 && api.Public) { ++ if err := handler.RegisterName(api.Namespace, api.Service); err != nil { ++ return err ++ } ++ n.log.Debug("InProc public registered", "service", api.Service, "namespace", api.Namespace) ++ } ++ } ++ n.inprocPublicHandler = handler ++ return nil ++} ++ ++// stopPublicInProc terminates the in-process RPC endpoint for public APIs. ++func (n *Node) stopPublicInProc() { ++ if n.inprocPublicHandler != nil { ++ n.inprocPublicHandler.Stop() ++ n.inprocPublicHandler = nil ++ } ++} ++ + // startIPC initializes and starts the IPC RPC endpoint. + func (n *Node) startIPC(apis []rpc.API) error { + if n.ipcEndpoint == "" { +@@ -487,6 +525,18 @@ func (n *Node) Attach() (*rpc.Client, error) { + return rpc.DialInProc(n.inprocHandler), nil + } + ++// AttachPublic creates an RPC client attached to an in-process Public API handler. ++func (n *Node) AttachPublic() (*rpc.Client, error) { ++ n.lock.RLock() ++ defer n.lock.RUnlock() ++ ++ if n.server == nil { ++ return nil, ErrNodeStopped ++ } ++ ++ return rpc.DialInProc(n.inprocPublicHandler), nil ++} ++ + // RPCHandler returns the in-process RPC request handler. + func (n *Node) RPCHandler() (*rpc.Server, error) { + n.lock.RLock() diff --git a/vendor/github.com/ethereum/go-ethereum/_assets/patches/0023-extract-personal-sign-api.patch b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0023-extract-personal-sign-api.patch new file mode 100644 index 0000000000..f3d3cf3f97 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0023-extract-personal-sign-api.patch @@ -0,0 +1,61 @@ +diff --git a/ethapi/private_account.go b/ethapi/private_account.go +new file mode 100644 +index 00000000..8d51fd31 +--- /dev/null ++++ b/ethapi/private_account.go +@@ -0,0 +1,26 @@ ++package ethapi ++ ++import ( ++ "context" ++ ++ "github.com/ethereum/go-ethereum/accounts" ++ "github.com/ethereum/go-ethereum/common" ++ "github.com/ethereum/go-ethereum/common/hexutil" ++ "github.com/ethereum/go-ethereum/internal/ethapi" ++) ++ ++type LimitedPersonalAPI struct { ++ privateAPI *ethapi.PrivateAccountAPI ++} ++ ++func NewLimitedPersonalAPI(am *accounts.Manager) *LimitedPersonalAPI { ++ return &LimitedPersonalAPI{ethapi.NewSubsetOfPrivateAccountAPI(am)} ++} ++ ++func (s *LimitedPersonalAPI) Sign(ctx context.Context, data hexutil.Bytes, addr common.Address, passwd string) (hexutil.Bytes, error) { ++ return s.privateAPI.Sign(ctx, data, addr, passwd) ++} ++ ++func (s *LimitedPersonalAPI) EcRecover(ctx context.Context, data, sig hexutil.Bytes) (common.Address, error) { ++ return s.privateAPI.EcRecover(ctx, data, sig) ++} +diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go +index 31408633..3cee8753 100644 +--- a/internal/ethapi/api.go ++++ b/internal/ethapi/api.go +@@ -214,6 +214,14 @@ func NewPrivateAccountAPI(b Backend, nonceLock *AddrLocker) *PrivateAccountAPI { + } + } + ++func NewSubsetOfPrivateAccountAPI(am *accounts.Manager) *PrivateAccountAPI { ++ return &PrivateAccountAPI{ ++ am: am, ++ nonceLock: nil, ++ b: nil, ++ } ++} ++ + // ListAccounts will return a list of addresses for accounts this node manages. + func (s *PrivateAccountAPI) ListAccounts() []common.Address { + addresses := make([]common.Address, 0) // return [] instead of nil if empty +@@ -426,7 +434,7 @@ func (s *PrivateAccountAPI) Sign(ctx context.Context, data hexutil.Bytes, addr c + // Look up the wallet containing the requested signer + account := accounts.Account{Address: addr} + +- wallet, err := s.b.AccountManager().Find(account) ++ wallet, err := s.am.Find(account) + if err != nil { + return nil, err + } + diff --git a/vendor/github.com/ethereum/go-ethereum/_assets/patches/0025-whisper-confirmations.patch b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0025-whisper-confirmations.patch new file mode 100644 index 0000000000..a7f880d26d --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0025-whisper-confirmations.patch @@ -0,0 +1,89 @@ +diff --git a/whisper/whisperv6/events.go b/whisper/whisperv6/events.go +new file mode 100644 +index 00000000..e03ec9de +--- /dev/null ++++ b/whisper/whisperv6/events.go +@@ -0,0 +1,23 @@ ++package whisperv6 ++ ++import ( ++ "github.com/ethereum/go-ethereum/common" ++ "github.com/ethereum/go-ethereum/p2p/discover" ++) ++ ++// EventType used to define known envelope events. ++type EventType string ++ ++const ( ++ // EventEnvelopeSent fires when envelope was sent to a peer. ++ EventEnvelopeSent EventType = "envelope.sent" ++ // EventEnvelopeExpired fires when envelop expired ++ EventEnvelopeExpired EventType = "envelope.expired" ++) ++ ++// EnvelopeEvent used for envelopes events. ++type EnvelopeEvent struct { ++ Event EventType ++ Hash common.Hash ++ Peer discover.NodeID ++} +diff --git a/whisper/whisperv6/peer.go b/whisper/whisperv6/peer.go +index 79cc2127..018d8f82 100644 +--- a/whisper/whisperv6/peer.go ++++ b/whisper/whisperv6/peer.go +@@ -204,6 +204,11 @@ func (peer *Peer) broadcast() error { + // mark envelopes only if they were successfully sent + for _, e := range bundle { + peer.mark(e) ++ peer.host.envelopeFeed.Send(EnvelopeEvent{ ++ Event: EventEnvelopeSent, ++ Hash: e.Hash(), ++ Peer: peer.peer.ID(), // specifically discover.NodeID because it can be pretty printed ++ }) + } + + log.Trace("broadcast", "num. messages", len(bundle)) +diff --git a/whisper/whisperv6/whisper.go b/whisper/whisperv6/whisper.go +index 414aa788..3c3c66ad 100644 +--- a/whisper/whisperv6/whisper.go ++++ b/whisper/whisperv6/whisper.go +@@ -29,6 +29,7 @@ import ( + mapset "github.com/deckarep/golang-set" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" ++ "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/rlp" +@@ -89,5 +90,7 @@ type Whisper struct { + + mailServer MailServer // MailServer interface ++ ++ envelopeFeed event.Feed + } + + // New creates a Whisper client ready to communicate through the Ethereum P2P network. +@@ -133,6 +136,12 @@ func New(cfg *Config) *Whisper { + return whisper + } + ++// SubscribeEnvelopeEvents subscribes to envelopes feed. ++// In order to prevent blocking whisper producers events must be amply buffered. ++func (whisper *Whisper) SubscribeEnvelopeEvents(events chan<- EnvelopeEvent) event.Subscription { ++ return whisper.envelopeFeed.Subscribe(events) ++} ++ + // MinPow returns the PoW value required by this node. + func (whisper *Whisper) MinPow() float64 { + val, exist := whisper.settings.Load(minPowIdx) +@@ -986,6 +995,10 @@ func (whisper *Whisper) expire() { + hashSet.Each(func(v interface{}) bool { + sz := whisper.envelopes[v.(common.Hash)].size() + delete(whisper.envelopes, v.(common.Hash)) ++ whisper.envelopeFeed.Send(EnvelopeEvent{ ++ Hash: v.(common.Hash), ++ Event: EventEnvelopeExpired, ++ }) + whisper.stats.messagesCleared++ + whisper.stats.memoryCleared += sz + whisper.stats.memoryUsed -= sz diff --git a/vendor/github.com/ethereum/go-ethereum/_assets/patches/0027-whisper-time-source.patch b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0027-whisper-time-source.patch new file mode 100644 index 0000000000..d3e1aa345f --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0027-whisper-time-source.patch @@ -0,0 +1,135 @@ +diff --git a/whisper/whisperv6/api.go b/whisper/whisperv6/api.go +index 16b79c7fd..1df3b73dd 100644 +--- a/whisper/whisperv6/api.go ++++ b/whisper/whisperv6/api.go +@@ -284,7 +284,7 @@ func (api *PublicWhisperAPI) Post(ctx context.Context, req NewMessage) (hexutil. + } + + var result []byte +- env, err := whisperMsg.Wrap(params) ++ env, err := whisperMsg.Wrap(params, api.w.GetCurrentTime()) + if err != nil { + return nil, err + } +diff --git a/whisper/whisperv6/config.go b/whisper/whisperv6/config.go +index 38eb9551c..213b83698 100644 +--- a/whisper/whisperv6/config.go ++++ b/whisper/whisperv6/config.go +@@ -16,11 +16,14 @@ + + package whisperv6 + ++import "time" ++ + // Config represents the configuration state of a whisper node. + type Config struct { + MaxMessageSize uint32 `toml:",omitempty"` + MinimumAcceptedPOW float64 `toml:",omitempty"` + RestrictConnectionBetweenLightClients bool `toml:",omitempty"` ++ TimeSource func() time.Time + } + + // DefaultConfig represents (shocker!) the default configuration. +@@ -28,4 +31,5 @@ var DefaultConfig = Config{ + MaxMessageSize: DefaultMaxMessageSize, + MinimumAcceptedPOW: DefaultMinimumPoW, + RestrictConnectionBetweenLightClients: true, ++ TimeSource: time.Now, + } +diff --git a/whisper/whisperv6/envelope.go b/whisper/whisperv6/envelope.go +index c42d1fa8a..3b65fdba0 100644 +--- a/whisper/whisperv6/envelope.go ++++ b/whisper/whisperv6/envelope.go +@@ -62,9 +62,9 @@ func (e *Envelope) rlpWithoutNonce() []byte { + + // NewEnvelope wraps a Whisper message with expiration and destination data + // included into an envelope for network forwarding. +-func NewEnvelope(ttl uint32, topic TopicType, msg *sentMessage) *Envelope { ++func NewEnvelope(ttl uint32, topic TopicType, msg *sentMessage, now time.Time) *Envelope { + env := Envelope{ +- Expiry: uint32(time.Now().Add(time.Second * time.Duration(ttl)).Unix()), ++ Expiry: uint32(now.Add(time.Second * time.Duration(ttl)).Unix()), + TTL: ttl, + Topic: topic, + Data: msg.Raw, +diff --git a/whisper/whisperv6/message.go b/whisper/whisperv6/message.go +index 2d4e86244..a12b445e2 100644 +--- a/whisper/whisperv6/message.go ++++ b/whisper/whisperv6/message.go +@@ -27,6 +27,7 @@ import ( + "errors" + mrand "math/rand" + "strconv" ++ "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +@@ -234,7 +235,7 @@ func generateSecureRandomData(length int) ([]byte, error) { + } + + // Wrap bundles the message into an Envelope to transmit over the network. +-func (msg *sentMessage) Wrap(options *MessageParams) (envelope *Envelope, err error) { ++func (msg *sentMessage) Wrap(options *MessageParams, now time.Time) (envelope *Envelope, err error) { + if options.TTL == 0 { + options.TTL = DefaultTTL + } +@@ -254,7 +255,7 @@ func (msg *sentMessage) Wrap(options *MessageParams) (envelope *Envelope, err er + return nil, err + } + +- envelope = NewEnvelope(options.TTL, options.Topic, msg) ++ envelope = NewEnvelope(options.TTL, options.Topic, msg, now) + if err = envelope.Seal(options); err != nil { + return nil, err + } +diff --git a/whisper/whisperv6/whisper.go b/whisper/whisperv6/whisper.go +index 014e93c12..a57ff6428 100644 +--- a/whisper/whisperv6/whisper.go ++++ b/whisper/whisperv6/whisper.go +@@ -91,6 +91,8 @@ type Whisper struct { + mailServer MailServer // MailServer interface + + envelopeFeed event.Feed ++ ++ timeSource func() time.Time // source of time for whisper + } + + // New creates a Whisper client ready to communicate through the Ethereum P2P network. +@@ -109,6 +111,7 @@ func New(cfg *Config) *Whisper { + p2pMsgQueue: make(chan *Envelope, messageQueueLimit), + quit: make(chan struct{}), + syncAllowance: DefaultSyncAllowance, ++ timeSource: cfg.TimeSource, + } + + whisper.filters = NewFilters(whisper) +@@ -215,6 +218,11 @@ func (whisper *Whisper) APIs() []rpc.API { + } + } + ++// GetCurrentTime returns current time. ++func (whisper *Whisper) GetCurrentTime() time.Time { ++ return whisper.timeSource() ++} ++ + // RegisterServer registers MailServer interface. + // MailServer will process all the incoming messages with p2pRequestCode. + func (whisper *Whisper) RegisterServer(server MailServer) { +@@ -846,7 +854,7 @@ func (whisper *Whisper) runMessageLoop(p *Peer, rw p2p.MsgReadWriter) error { + // appropriate time-stamp. In case of error, connection should be dropped. + // param isP2P indicates whether the message is peer-to-peer (should not be forwarded). + func (whisper *Whisper) add(envelope *Envelope, isP2P bool) (bool, error) { +- now := uint32(time.Now().Unix()) ++ now := uint32(whisper.timeSource().Unix()) + sent := envelope.Expiry - envelope.TTL + + if sent > now { +@@ -989,7 +997,7 @@ func (whisper *Whisper) expire() { + whisper.statsMu.Lock() + defer whisper.statsMu.Unlock() + whisper.stats.reset() +- now := uint32(time.Now().Unix()) ++ now := uint32(whisper.timeSource().Unix()) + for expiry, hashSet := range whisper.expirations { + if expiry < now { + // Dump all expired messages and remove timestamp diff --git a/vendor/github.com/ethereum/go-ethereum/_assets/patches/0028-p2p-watchdog.patch b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0028-p2p-watchdog.patch new file mode 100644 index 0000000000..080dcf9e33 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0028-p2p-watchdog.patch @@ -0,0 +1,119 @@ +diff --git c/p2p/peer.go w/p2p/peer.go +index 73e33418e..322268b28 100644 +--- c/p2p/peer.go ++++ w/p2p/peer.go +@@ -22,6 +22,7 @@ import ( + "net" + "sort" + "sync" ++ "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common/mclock" +@@ -38,7 +39,10 @@ const ( + + snappyProtocolVersion = 5 + +- pingInterval = 15 * time.Second ++ pingInterval = 1 * time.Second ++ // watchdogInterval intentionally lower than ping interval. ++ // this way we reduce potential flaky window size. ++ watchdogInterval = 200 * time.Millisecond + ) + + const ( +@@ -100,6 +104,7 @@ type Peer struct { + log log.Logger + created mclock.AbsTime + ++ flaky int32 + wg sync.WaitGroup + protoErr chan error + closed chan struct{} +@@ -118,6 +123,11 @@ func NewPeer(id discover.NodeID, name string, caps []Cap) *Peer { + return peer + } + ++// IsFlaky returns true if there was no incoming traffic recently. ++func (p *Peer) IsFlaky() bool { ++ return atomic.LoadInt32(&p.flaky) == 1 ++} ++ + // ID returns the node's public key. + func (p *Peer) ID() discover.NodeID { + return p.rw.id +@@ -188,8 +198,10 @@ func (p *Peer) run() (remoteRequested bool, err error) { + readErr = make(chan error, 1) + reason DiscReason // sent to the peer + ) +- p.wg.Add(2) +- go p.readLoop(readErr) ++ p.wg.Add(3) ++ reads := make(chan struct{}, 10) // channel for reads ++ go p.readLoop(readErr, reads) ++ go p.watchdogLoop(reads) + go p.pingLoop() + + // Start all protocol handlers. +@@ -248,7 +260,24 @@ func (p *Peer) pingLoop() { + } + } + +-func (p *Peer) readLoop(errc chan<- error) { ++func (p *Peer) watchdogLoop(reads <-chan struct{}) { ++ defer p.wg.Done() ++ hb := time.NewTimer(watchdogInterval) ++ defer hb.Stop() ++ for { ++ select { ++ case <-reads: ++ atomic.StoreInt32(&p.flaky, 0) ++ case <-hb.C: ++ atomic.StoreInt32(&p.flaky, 1) ++ case <-p.closed: ++ return ++ } ++ hb.Reset(watchdogInterval) ++ } ++} ++ ++func (p *Peer) readLoop(errc chan<- error, reads chan<- struct{}) { + defer p.wg.Done() + for { + msg, err := p.rw.ReadMsg() +@@ -261,6 +290,7 @@ func (p *Peer) readLoop(errc chan<- error) { + errc <- err + return + } ++ reads <- struct{}{} + } + } + +diff --git c/p2p/server.go w/p2p/server.go +index c41d1dc15..04c6f7147 100644 +--- c/p2p/server.go ++++ w/p2p/server.go +@@ -45,7 +45,7 @@ const ( + + // Maximum time allowed for reading a complete message. + // This is effectively the amount of time a connection can be idle. +- frameReadTimeout = 30 * time.Second ++ frameReadTimeout = 10 * time.Second + + // Maximum amount of time allowed for writing a complete message. + frameWriteTimeout = 20 * time.Second +diff --git c/whisper/whisperv6/peer.go w/whisper/whisperv6/peer.go +index 427127290..c30e92d1c 100644 +--- c/whisper/whisperv6/peer.go ++++ w/whisper/whisperv6/peer.go +@@ -187,6 +187,10 @@ func (peer *Peer) expire() { + // broadcast iterates over the collection of envelopes and transmits yet unknown + // ones over the network. + func (peer *Peer) broadcast() error { ++ if peer.peer.IsFlaky() { ++ log.Trace("Waiting for a peer to restore communication", "ID", peer.peer.ID()) ++ return nil ++ } + envelopes := peer.host.Envelopes() + bundle := make([]*Envelope, 0, len(envelopes)) + for _, envelope := range envelopes { diff --git a/vendor/github.com/ethereum/go-ethereum/_assets/patches/0029-node-api-gauge-metric.patch b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0029-node-api-gauge-metric.patch new file mode 100644 index 0000000000..0c21a5a75b --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0029-node-api-gauge-metric.patch @@ -0,0 +1,28 @@ +diff --git a/node/api.go b/node/api.go +index a3b8bc0b..a151147c 100644 +--- a/node/api.go ++++ b/node/api.go +@@ -313,6 +313,11 @@ func (api *PublicDebugAPI) Metrics(raw bool) (map[string]interface{}, error) { + "Overall": float64(metric.Count()), + } + ++ case metrics.Gauge: ++ root[name] = map[string]interface{}{ ++ "Value": float64(metric.Value()), ++ } ++ + case metrics.Meter: + root[name] = map[string]interface{}{ + "AvgRate01Min": metric.Rate1(), +@@ -348,6 +353,11 @@ func (api *PublicDebugAPI) Metrics(raw bool) (map[string]interface{}, error) { + "Overall": float64(metric.Count()), + } + ++ case metrics.Gauge: ++ root[name] = map[string]interface{}{ ++ "Value": float64(metric.Value()), ++ } ++ + case metrics.Meter: + root[name] = map[string]interface{}{ + "Avg01Min": format(metric.Rate1()*60, metric.Rate1()), diff --git a/vendor/github.com/ethereum/go-ethereum/_assets/patches/0032-send-mailserver-request-completed-code.patch b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0032-send-mailserver-request-completed-code.patch new file mode 100644 index 0000000000..78774ac661 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0032-send-mailserver-request-completed-code.patch @@ -0,0 +1,110 @@ +diff --git a/whisper/whisperv6/doc.go b/whisper/whisperv6/doc.go +index 4bbf554..2fcc9e6 100644 +--- a/whisper/whisperv6/doc.go ++++ b/whisper/whisperv6/doc.go +@@ -44,13 +44,14 @@ const ( + ProtocolName = "shh" // Nickname of the protocol in geth + + // whisper protocol message codes, according to EIP-627 +- statusCode = 0 // used by whisper protocol +- messagesCode = 1 // normal whisper message +- powRequirementCode = 2 // PoW requirement +- bloomFilterExCode = 3 // bloom filter exchange +- p2pRequestCode = 126 // peer-to-peer message, used by Dapp protocol +- p2pMessageCode = 127 // peer-to-peer message (to be consumed by the peer, but not forwarded any further) +- NumberOfMessageCodes = 128 ++ statusCode = 0 // used by whisper protocol ++ messagesCode = 1 // normal whisper message ++ powRequirementCode = 2 // PoW requirement ++ bloomFilterExCode = 3 // bloom filter exchange ++ p2pRequestCompleteCode = 125 // peer-to-peer message, used by Dapp protocol ++ p2pRequestCode = 126 // peer-to-peer message, used by Dapp protocol ++ p2pMessageCode = 127 // peer-to-peer message (to be consumed by the peer, but not forwarded any further) ++ NumberOfMessageCodes = 128 + + SizeMask = byte(3) // mask used to extract the size of payload size field from the flags + signatureFlag = byte(4) +diff --git a/whisper/whisperv6/events.go b/whisper/whisperv6/events.go +index e03ec9d..1665539 100644 +--- a/whisper/whisperv6/events.go ++++ b/whisper/whisperv6/events.go +@@ -1,23 +1,27 @@ + package whisperv6 + + import ( +- "github.com/ethereum/go-ethereum/common" +- "github.com/ethereum/go-ethereum/p2p/discover" ++ "github.com/ethereum/go-ethereum/common" ++ "github.com/ethereum/go-ethereum/p2p/discover" + ) + + // EventType used to define known envelope events. + type EventType string + + const ( +- // EventEnvelopeSent fires when envelope was sent to a peer. +- EventEnvelopeSent EventType = "envelope.sent" +- // EventEnvelopeExpired fires when envelop expired +- EventEnvelopeExpired EventType = "envelope.expired" ++ // EventEnvelopeSent fires when envelope was sent to a peer. ++ EventEnvelopeSent EventType = "envelope.sent" ++ // EventEnvelopeExpired fires when envelop expired ++ EventEnvelopeExpired EventType = "envelope.expired" ++ // EventMailServerRequestCompleted fires after mailserver sends all the requested messages ++ EventMailServerRequestCompleted EventType = "mailserver.request.completed" ++ // EventMailServerRequestExpired fires after mailserver the request TTL ends ++ EventMailServerRequestExpired EventType = "mailserver.request.expired" + ) + + // EnvelopeEvent used for envelopes events. + type EnvelopeEvent struct { +- Event EventType +- Hash common.Hash +- Peer discover.NodeID ++ Event EventType ++ Hash common.Hash ++ Peer discover.NodeID + } +diff --git a/whisper/whisperv6/whisper.go b/whisper/whisperv6/whisper.go +index 697f0ec..4a7b006 100644 +--- a/whisper/whisperv6/whisper.go ++++ b/whisper/whisperv6/whisper.go +@@ -378,6 +378,15 @@ func (whisper *Whisper) RequestHistoricMessages(peerID []byte, envelope *Envelop + return p2p.Send(p.ws, p2pRequestCode, envelope) + } + ++func (whisper *Whisper) SendHistoricMessageResponse(peer *Peer, requestID common.Hash) error { ++ size, r, err := rlp.EncodeToReader(requestID) ++ if err != nil { ++ return err ++ } ++ ++ return peer.ws.WriteMsg(p2p.Msg{Code: p2pRequestCompleteCode, Size: uint32(size), Payload: r}) ++} ++ + // SendP2PMessage sends a peer-to-peer message to a specific peer. + func (whisper *Whisper) SendP2PMessage(peerID []byte, envelope *Envelope) error { + p, err := whisper.getPeer(peerID) +@@ -821,8 +830,22 @@ func (whisper *Whisper) runMessageLoop(p *Peer, rw p2p.MsgReadWriter) error { + log.Warn("failed to decode p2p request message, peer will be disconnected", "peer", p.peer.ID(), "err", err) + return errors.New("invalid p2p request") + } ++ + whisper.mailServer.DeliverMail(p, &request) + } ++ case p2pRequestCompleteCode: ++ if p.trusted { ++ var requestID common.Hash ++ if err := packet.Decode(&requestID); err != nil { ++ log.Warn("failed to decode response message, peer will be disconnected", "peer", p.peer.ID(), "err", err) ++ return errors.New("invalid request response message") ++ } ++ ++ whisper.envelopeFeed.Send(EnvelopeEvent{ ++ Hash: requestID, ++ Event: EventMailServerRequestCompleted, ++ }) ++ } + default: + // New message types might be implemented in the future versions of Whisper. + // For forward compatibility, just ignore. diff --git a/vendor/github.com/ethereum/go-ethereum/_assets/patches/0033-mailserver-response-contains-cursor.patch b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0033-mailserver-response-contains-cursor.patch new file mode 100644 index 0000000000..845036afcd --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0033-mailserver-response-contains-cursor.patch @@ -0,0 +1,134 @@ +diff --git a/whisper/whisperv6/events.go b/whisper/whisperv6/events.go +index 1665539d6..fe7570ed5 100644 +--- a/whisper/whisperv6/events.go ++++ b/whisper/whisperv6/events.go +@@ -13,10 +13,14 @@ const ( + EventEnvelopeSent EventType = "envelope.sent" + // EventEnvelopeExpired fires when envelop expired + EventEnvelopeExpired EventType = "envelope.expired" ++ // EventEnvelopeAvailable fires when envelop is available for filters ++ EventEnvelopeAvailable EventType = "envelope.available" + // EventMailServerRequestCompleted fires after mailserver sends all the requested messages + EventMailServerRequestCompleted EventType = "mailserver.request.completed" + // EventMailServerRequestExpired fires after mailserver the request TTL ends + EventMailServerRequestExpired EventType = "mailserver.request.expired" ++ // EventMailServerEnvelopeArchived fires after an envelope has been archived ++ EventMailServerEnvelopeArchived EventType = "mailserver.envelope.archived" + ) + + // EnvelopeEvent used for envelopes events. +@@ -24,4 +28,5 @@ type EnvelopeEvent struct { + Event EventType + Hash common.Hash + Peer discover.NodeID ++ Data interface{} + } +diff --git a/whisper/whisperv6/whisper.go b/whisper/whisperv6/whisper.go +index ccc733165..dbe4c3fc0 100644 +--- a/whisper/whisperv6/whisper.go ++++ b/whisper/whisperv6/whisper.go +@@ -49,6 +49,12 @@ type Statistics struct { + totalMessagesCleared int + } + ++// MailServerResponse is the response payload sent by the mailserver ++type MailServerResponse struct { ++ LastEnvelopeHash common.Hash ++ Cursor []byte ++} ++ + const ( + maxMsgSizeIdx = iota // Maximal message length allowed by the whisper node + overflowIdx // Indicator of message queue overflow +@@ -397,8 +403,8 @@ func (whisper *Whisper) RequestHistoricMessages(peerID []byte, envelope *Envelop + return p2p.Send(p.ws, p2pRequestCode, envelope) + } + +-func (whisper *Whisper) SendHistoricMessageResponse(peer *Peer, requestID common.Hash) error { +- size, r, err := rlp.EncodeToReader(requestID) ++func (whisper *Whisper) SendHistoricMessageResponse(peer *Peer, payload []byte) error { ++ size, r, err := rlp.EncodeToReader(payload) + if err != nil { + return err + } +@@ -852,15 +858,49 @@ func (whisper *Whisper) runMessageLoop(p *Peer, rw p2p.MsgReadWriter) error { + } + case p2pRequestCompleteCode: + if p.trusted { +- var requestID common.Hash +- if err := packet.Decode(&requestID); err != nil { ++ var payload []byte ++ if err := packet.Decode(&payload); err != nil { + log.Warn("failed to decode response message, peer will be disconnected", "peer", p.peer.ID(), "err", err) + return errors.New("invalid request response message") + } + ++ // check if payload is ++ // - requestID or ++ // - requestID + lastEnvelopeHash or ++ // - requestID + lastEnvelopeHash + cursor ++ // requestID is the hash of the request envelope. ++ // lastEnvelopeHash is the last envelope sent by the mail server ++ // cursor is the db key, 36 bytes: 4 for the timestamp + 32 for the envelope hash. ++ // length := len(payload) ++ ++ if len(payload) < common.HashLength || len(payload) > common.HashLength*3+4 { ++ log.Warn("invalid response message, peer will be disconnected", "peer", p.peer.ID(), "err", err, "payload size", len(payload)) ++ return errors.New("invalid response size") ++ } ++ ++ var ( ++ requestID common.Hash ++ lastEnvelopeHash common.Hash ++ cursor []byte ++ ) ++ ++ requestID = common.BytesToHash(payload[:common.HashLength]) ++ ++ if len(payload) >= common.HashLength*2 { ++ lastEnvelopeHash = common.BytesToHash(payload[common.HashLength : common.HashLength*2]) ++ } ++ ++ if len(payload) >= common.HashLength*2+36 { ++ cursor = payload[common.HashLength*2 : common.HashLength*2+36] ++ } ++ + whisper.envelopeFeed.Send(EnvelopeEvent{ + Hash: requestID, + Event: EventMailServerRequestCompleted, ++ Data: &MailServerResponse{ ++ LastEnvelopeHash: lastEnvelopeHash, ++ Cursor: cursor, ++ }, + }) + } + default: +@@ -944,6 +984,10 @@ func (whisper *Whisper) add(envelope *Envelope, isP2P bool) (bool, error) { + whisper.postEvent(envelope, isP2P) // notify the local node about the new message + if whisper.mailServer != nil { + whisper.mailServer.Archive(envelope) ++ whisper.envelopeFeed.Send(EnvelopeEvent{ ++ Hash: envelope.Hash(), ++ Event: EventMailServerEnvelopeArchived, ++ }) + } + } + return true, nil +@@ -986,9 +1030,17 @@ func (whisper *Whisper) processQueue() { + + case e = <-whisper.messageQueue: + whisper.filters.NotifyWatchers(e, false) ++ whisper.envelopeFeed.Send(EnvelopeEvent{ ++ Hash: e.Hash(), ++ Event: EventEnvelopeAvailable, ++ }) + + case e = <-whisper.p2pMsgQueue: + whisper.filters.NotifyWatchers(e, true) ++ whisper.envelopeFeed.Send(EnvelopeEvent{ ++ Hash: e.Hash(), ++ Event: EventEnvelopeAvailable, ++ }) + } + } + } diff --git a/vendor/github.com/ethereum/go-ethereum/_assets/patches/0035-add_goroutines_metrics.patch b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0035-add_goroutines_metrics.patch new file mode 100644 index 0000000000..7516a0e8d8 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0035-add_goroutines_metrics.patch @@ -0,0 +1,23 @@ +diff --git a/metrics/metrics.go b/metrics/metrics.go +index 2a2b804e..d4d703df 100644 +--- a/metrics/metrics.go ++++ b/metrics/metrics.go +@@ -56,6 +56,7 @@ func CollectProcessMetrics(refresh time.Duration) { + memFrees := GetOrRegisterMeter("system/memory/frees", DefaultRegistry) + memInuse := GetOrRegisterMeter("system/memory/inuse", DefaultRegistry) + memPauses := GetOrRegisterMeter("system/memory/pauses", DefaultRegistry) ++ goroutines := GetOrRegisterGauge("system/goroutines", DefaultRegistry) + + var diskReads, diskReadBytes, diskWrites, diskWriteBytes Meter + var diskReadBytesCounter, diskWriteBytesCounter Counter +@@ -89,6 +90,10 @@ func CollectProcessMetrics(refresh time.Duration) { + diskReadBytesCounter.Inc(diskstats[location1].ReadBytes - diskstats[location2].ReadBytes) + diskWriteBytesCounter.Inc(diskstats[location1].WriteBytes - diskstats[location2].WriteBytes) + } ++ ++ goroutines.Update(int64(runtime.NumGoroutine())) ++ + time.Sleep(refresh) + } ++ + } diff --git a/vendor/github.com/ethereum/go-ethereum/_assets/patches/0037-whisper-metrics.patch b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0037-whisper-metrics.patch new file mode 100644 index 0000000000..ffbec149f7 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0037-whisper-metrics.patch @@ -0,0 +1,88 @@ +diff --git a/whisper/whisperv6/metrics.go b/whisper/whisperv6/metrics.go +new file mode 100644 +index 00000000..b0e899da +--- /dev/null ++++ b/whisper/whisperv6/metrics.go +@@ -0,0 +1,16 @@ ++package whisperv6 ++ ++import "github.com/ethereum/go-ethereum/metrics" ++ ++var ( ++ envelopeAddedCounter = metrics.NewRegisteredCounter("whisper/envelopeAdded", nil) ++ envelopeNewAddedCounter = metrics.NewRegisteredCounter("whisper/envelopeNewAdded", nil) ++ envelopeClearedCounter = metrics.NewRegisteredCounter("whisper/envelopeCleared", nil) ++ envelopeErrFromFutureCounter = metrics.NewRegisteredCounter("whisper/envelopeErrFromFuture", nil) ++ envelopeErrVeryOldCounter = metrics.NewRegisteredCounter("whisper/envelopeErrVeryOld", nil) ++ envelopeErrExpiredCounter = metrics.NewRegisteredCounter("whisper/envelopeErrExpired", nil) ++ envelopeErrOversizedCounter = metrics.NewRegisteredCounter("whisper/envelopeErrOversized", nil) ++ envelopeErrLowPowCounter = metrics.NewRegisteredCounter("whisper/envelopeErrLowPow", nil) ++ envelopeErrNoBloomMatchCounter = metrics.NewRegisteredCounter("whisper/envelopeErrNoBloomMatch", nil) ++ envelopeSizeMeter = metrics.NewRegisteredMeter("whisper/envelopeSize", nil) ++) +diff --git a/whisper/whisperv6/whisper.go b/whisper/whisperv6/whisper.go +index 482457cb..002aaadf 100644 +--- a/whisper/whisperv6/whisper.go ++++ b/whisper/whisperv6/whisper.go +@@ -894,8 +894,11 @@ func (whisper *Whisper) add(envelope *Envelope, isP2P bool) (bool, error) { + now := uint32(whisper.timeSource().Unix()) + sent := envelope.Expiry - envelope.TTL + ++ envelopeAddedCounter.Inc(1) ++ + if sent > now { + if sent-DefaultSyncAllowance > now { ++ envelopeErrFromFutureCounter.Inc(1) + return false, fmt.Errorf("envelope created in the future [%x]", envelope.Hash()) + } + // recalculate PoW, adjusted for the time difference, plus one second for latency +@@ -904,13 +907,16 @@ func (whisper *Whisper) add(envelope *Envelope, isP2P bool) (bool, error) { + + if envelope.Expiry < now { + if envelope.Expiry+DefaultSyncAllowance*2 < now { ++ envelopeErrVeryOldCounter.Inc(1) + return false, fmt.Errorf("very old message") + } + log.Debug("expired envelope dropped", "hash", envelope.Hash().Hex()) ++ envelopeErrExpiredCounter.Inc(1) + return false, nil // drop envelope without error + } + + if uint32(envelope.size()) > whisper.MaxMessageSize() { ++ envelopeErrOversizedCounter.Inc(1) + return false, fmt.Errorf("huge messages are not allowed [%x]", envelope.Hash()) + } + +@@ -919,6 +925,7 @@ func (whisper *Whisper) add(envelope *Envelope, isP2P bool) (bool, error) { + // in this case the previous value is retrieved by MinPowTolerance() + // for a short period of peer synchronization. + if envelope.PoW() < whisper.MinPowTolerance() { ++ envelopeErrLowPowCounter.Inc(1) + return false, fmt.Errorf("envelope with low PoW received: PoW=%f, hash=[%v]", envelope.PoW(), envelope.Hash().Hex()) + } + } +@@ -928,6 +935,7 @@ func (whisper *Whisper) add(envelope *Envelope, isP2P bool) (bool, error) { + // in this case the previous value is retrieved by BloomFilterTolerance() + // for a short period of peer synchronization. + if !BloomFilterMatch(whisper.BloomFilterTolerance(), envelope.Bloom()) { ++ envelopeErrNoBloomMatchCounter.Inc(1) + return false, fmt.Errorf("envelope does not match bloom filter, hash=[%v], bloom: \n%x \n%x \n%x", + envelope.Hash().Hex(), whisper.BloomFilter(), envelope.Bloom(), envelope.Topic) + } +@@ -952,6 +960,8 @@ func (whisper *Whisper) add(envelope *Envelope, isP2P bool) (bool, error) { + log.Trace("whisper envelope already cached", "hash", envelope.Hash().Hex()) + } else { + log.Trace("cached whisper envelope", "hash", envelope.Hash().Hex()) ++ envelopeNewAddedCounter.Inc(1) ++ envelopeSizeMeter.Mark(int64(envelope.size())) + whisper.statsMu.Lock() + whisper.stats.memoryUsed += envelope.size() + whisper.statsMu.Unlock() +@@ -1053,6 +1063,7 @@ func (whisper *Whisper) expire() { + hashSet.Each(func(v interface{}) bool { + sz := whisper.envelopes[v.(common.Hash)].size() + delete(whisper.envelopes, v.(common.Hash)) ++ envelopeClearedCounter.Inc(1) + whisper.envelopeFeed.Send(EnvelopeEvent{ + Hash: v.(common.Hash), + Event: EventEnvelopeExpired, diff --git a/vendor/github.com/ethereum/go-ethereum/_assets/patches/README.md b/vendor/github.com/ethereum/go-ethereum/_assets/patches/README.md new file mode 100644 index 0000000000..c642e26f49 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/_assets/patches/README.md @@ -0,0 +1,38 @@ +Status Patches for geth (go-ethereum) +===================================== + +status-go uses [go-ethereum](https://github.com/status-im/go-ethereum) as its dependency. As any other Go dependency `go-ethereum` code is vendored and stored in `vendor/` folder. + +However, there are a few changes has been made to the upstream, that are specific to Status and should not be merged to the upstream. We keep those changes as a set of patches, that can be applied upon each next release of `go-ethereum`. Patched version of `go-ethereum` is available in vendor folder. + +We try to minimize number and amount of changes in those patches as much as possible, and whereas possible, to contribute changes into the upstream. + +# Creating patches + +Instructions for creating a patch from the command line: + +1. Do changes in `vendor/github.com/ethereum/go-ethereum/`, +1. Go to the root `status-go` directory, +1. Create a patch `git diff --relative=vendor/github.com/ethereum/go-ethereum > _assets/patches/geth/0000-name-of-the-patch.patch` +1. Commit changes. + +# Updating patches + +1. Tweak the patch file. +1. Run `make dep-ensure` to re-apply patches. + +# Removing patches + +1. Remove the patch file +1. Remove the link from [this README] (./README.md) +1. Run `make dep-ensure` to re-apply patches. + +# Updating + +When a new stable release of `go-ethereum` comes out, we need to upgrade our vendored copy. We use `dep` for vendoring, so for upgrading: + +- Change target branch for `go-ethereum` in `Gopkg.toml`. +- `dep ensure -update github.com/ethereum/go-ethereum` +- `make dep-ensure` + +This will ensure that dependency is upgraded and fully patched. Upon success, you can do `make vendor-check` after committing all the changes, in order to ensure that all changes are valid. diff --git a/vendor/github.com/ethereum/go-ethereum/_assets/patches/patcher b/vendor/github.com/ethereum/go-ethereum/_assets/patches/patcher new file mode 100755 index 0000000000..4918f2e75f --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/_assets/patches/patcher @@ -0,0 +1,189 @@ +#!/usr/bin/env bash + +# Default behaviour: +# Reverts all patches in patch dir, notes down the ones +# which were previously applied. Applies all from the beginning +# and reports about previously unapplied patches. If there's +# an error, reverts the last one and stops. +# +# Usage: ./patcher -b -r -v +# -b: is the target location relative to which patches will be applied +# -p: is where to take the patches from (default is geth) +# -r: reverts all and exit if this flag is present +# -c: reverts all to see what's applied, applies all previously applied back again, +# reports unapplied patches in this branch by comparing with "develop" including +# uncommitted ones and exits (with 1 if there are any) +# -v: verbose error reporting about failed patch +# +# If -b is not present, default path is as below ($basepath). + +dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +# Patches path is geth unless specified. +patches=("$dir"/*.patch) + +# Use this branch as a reference for comparing patches +# in current branch (-c option). +baseBranch="develop" + +# Base path is the current root project unless specified. +basepath="." + +gitApply() { + f=$1 + basepath=$2 + verbose=$3 + + if [ $verbose -eq 1 ]; then + if [ $basepath == "." ]; then + git apply "$f" + else + git apply "$f" --directory="$basepath" + fi + else + if [ $basepath == "." ]; then + git apply "$f" > /dev/null 2>&1 + else + git apply "$f" --directory="$basepath" > /dev/null 2>&1 + fi + fi +} + +gitApplyReverse() { + f=$1 + basepath=$2 + + if [ $basepath == "." ]; then + git apply "$f" -R > /dev/null 2>&1 + else + git apply "$f" --directory="$basepath" -R > /dev/null 2>&1 + fi +} + +verbose=0 +revert=0 +compare=0 +while getopts b:p:rcv opt; do + case $opt in + b) + basepath=$OPTARG + ;; + p) + patches=("$dir"/$OPTARG/*.patch) + ;; + r) + revert=1 + ;; + c) + compare=1 + ;; + v) + verbose=1 + ;; + \?) + echo "Invalid flag: -$OPTARG" >&2 + exit + ;; + esac +done + +if [ $revert -eq 1 ]; then + # Reverts in reverse order and exits. + for ((i=${#patches[@]}-1; i>=0; i--)); do + gitApplyReverse "${patches[$i]}" "$basepath" 0 + done + echo "Reverted all." + exit +fi +if [ $compare -eq 1 ]; then + applied=() + unapplied=() + # Finds applied patches using reverse order and + # notes them down. + for ((i=${#patches[@]}-1; i>=0; i--)); do + f=${patches[$i]} + gitApplyReverse "$f" "$basepath" + if [ $? -ne 0 ]; then + unapplied+=("$f") + else + applied+=("$f") + fi + done + # Applies reverted patches back again. + for ((i=${#applied[@]}-1; i>=0; i--)); do + f=${applied[$i]} + gitApply "$f" "$basepath" 0 + done + # Sorts out new patches' paths by comparing with base branch. + fromBaseBranch=($(git diff $baseBranch --stat | grep "\\.patch" | + while IFS=" " read -r -a line; do + path="$(pwd)/${line[0]}" + echo "$path" + done + )) + # Also does the same with uncommitted. + uncommitted=($(git status -u --porcelain | grep "\\.patch" | + while IFS=" " read -r -a line; do + length=${#line[@]} + path="$(pwd)/${line[$((length - 1))]}" + echo "$path" + done + )) + newPatches=( "${fromBaseBranch[@]}" "${uncommitted[@]}" ) + # Checks new patches and exits with 1 if there are unapplied. + hasUnapplied=0 + for newPatch in "${newPatches[@]}"; do + for unapp in "${unapplied[@]}"; do + if [ "$unapp" == "$newPatch" ]; then + echo "Recently added/changed but not applied: $unapp" + hasUnapplied=1 + break + fi + done + done + exit $hasUnapplied +fi + +applied=() + +echo -en "\\n" +echo "Previously applied:" +echo "===================" +# Reverts every patch in reverse order to see +# which was previously applied. +for ((i=${#patches[@]}-1; i>=0; i--)); do + f=${patches[$i]} + gitApplyReverse "$f" "$basepath" + if [ $? -eq 0 ]; then + applied+=("$f") + echo "$f" + fi +done +echo "===================" +echo -en "\\n" + +# Applies every patch from the beginning. +for ((i=0; i<${#patches[@]}; i++)); do + f=${patches[$i]} + # If not applied, report it new. + has=0 + for patch in "${applied[@]}"; do + if [ "$patch" == "$f" ]; then + has=1 + break + fi + done + if [ $has -eq 0 ]; then + echo "Applying new: $f" + echo -en "\\n" + fi + gitApply "$f" "$basepath" $verbose + if [ $? -ne 0 ]; then + echo "Failed and reverting: $f" + gitApplyReverse "$f" "$basepath" + echo -en "\\n" + exit 1 + fi +done + +echo -en "\\n" +echo "Done." diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/backends/simulated.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/backends/simulated.go index 0ada79b150..d610880c9b 100644 --- a/vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/backends/simulated.go +++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/backends/simulated.go @@ -69,7 +69,7 @@ func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBac database := ethdb.NewMemDatabase() genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc} genesis.MustCommit(database) - blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{}) + blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{}, nil) backend := &SimulatedBackend{ database: database, diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/type.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/type.go index 9de36daffb..dce89d2b4e 100644 --- a/vendor/github.com/ethereum/go-ethereum/accounts/abi/type.go +++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/type.go @@ -103,7 +103,12 @@ func NewType(t string) (typ Type, err error) { return typ, err } // parse the type and size of the abi-type. - parsedType := typeRegex.FindAllStringSubmatch(t, -1)[0] + matches := typeRegex.FindAllStringSubmatch(t, -1) + if len(matches) == 0 { + return Type{}, fmt.Errorf("invalid type '%v'", t) + } + parsedType := matches[0] + // varSize is the size of the variable var varSize int if len(parsedType[3]) > 0 { diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/unpack.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/unpack.go index 793d515adf..d5875140cc 100644 --- a/vendor/github.com/ethereum/go-ethereum/accounts/abi/unpack.go +++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/unpack.go @@ -25,8 +25,17 @@ import ( "github.com/ethereum/go-ethereum/common" ) +var ( + maxUint256 = big.NewInt(0).Add( + big.NewInt(0).Exp(big.NewInt(2), big.NewInt(256), nil), + big.NewInt(-1)) + maxInt256 = big.NewInt(0).Add( + big.NewInt(0).Exp(big.NewInt(2), big.NewInt(255), nil), + big.NewInt(-1)) +) + // reads the integer based on its kind -func readInteger(kind reflect.Kind, b []byte) interface{} { +func readInteger(typ byte, kind reflect.Kind, b []byte) interface{} { switch kind { case reflect.Uint8: return b[len(b)-1] @@ -45,7 +54,20 @@ func readInteger(kind reflect.Kind, b []byte) interface{} { case reflect.Int64: return int64(binary.BigEndian.Uint64(b[len(b)-8:])) default: - return new(big.Int).SetBytes(b) + // the only case lefts for integer is int256/uint256. + // big.SetBytes can't tell if a number is negative, positive on itself. + // On EVM, if the returned number > max int256, it is negative. + ret := new(big.Int).SetBytes(b) + if typ == UintTy { + return ret + } + + if ret.Cmp(maxInt256) > 0 { + ret.Add(maxUint256, big.NewInt(0).Neg(ret)) + ret.Add(ret, big.NewInt(1)) + ret.Neg(ret) + } + return ret } } @@ -179,7 +201,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) { case StringTy: // variable arrays are written at the end of the return bytes return string(output[begin : begin+end]), nil case IntTy, UintTy: - return readInteger(t.Kind, returnOutput), nil + return readInteger(t.T, t.Kind, returnOutput), nil case BoolTy: return readBool(returnOutput) case AddressTy: diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/key.go b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/key.go index 65c83f3b0c..f8fbc044c3 100644 --- a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/key.go +++ b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/key.go @@ -220,26 +220,34 @@ func storeNewKey(ks keyStore, rand io.Reader, auth string) (*Key, accounts.Accou return key, a, err } -func writeKeyFile(file string, content []byte) error { +func writeTemporaryKeyFile(file string, content []byte) (string, error) { // Create the keystore directory with appropriate permissions // in case it is not present yet. const dirPerm = 0700 if err := os.MkdirAll(filepath.Dir(file), dirPerm); err != nil { - return err + return "", err } // Atomic write: create a temporary hidden file first // then move it into place. TempFile assigns mode 0600. f, err := ioutil.TempFile(filepath.Dir(file), "."+filepath.Base(file)+".tmp") if err != nil { - return err + return "", err } if _, err := f.Write(content); err != nil { f.Close() os.Remove(f.Name()) - return err + return "", err } f.Close() - return os.Rename(f.Name(), file) + return f.Name(), nil +} + +func writeKeyFile(file string, content []byte) error { + name, err := writeTemporaryKeyFile(file, content) + if err != nil { + return err + } + return os.Rename(name, file) } // keyFileName implements the naming convention for keyfiles: diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore.go b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore.go index ac2ab00837..9f274485a6 100644 --- a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore.go +++ b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore.go @@ -79,7 +79,7 @@ type unlocked struct { // NewKeyStore creates a keystore for the given directory. func NewKeyStore(keydir string, scryptN, scryptP int) *KeyStore { keydir, _ = filepath.Abs(keydir) - ks := &KeyStore{storage: &keyStorePassphrase{keydir, scryptN, scryptP}} + ks := &KeyStore{storage: &keyStorePassphrase{keydir, scryptN, scryptP, false}} ks.init(keydir) return ks } diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_passphrase.go b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_passphrase.go index 2b6ef2528c..f6ac25a2a0 100644 --- a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_passphrase.go +++ b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_passphrase.go @@ -35,6 +35,7 @@ import ( "fmt" "io" "io/ioutil" + "os" "path/filepath" "github.com/ethereum/go-ethereum/common" @@ -73,6 +74,10 @@ type keyStorePassphrase struct { keysDirPath string scryptN int scryptP int + // skipKeyFileVerification disables the security-feature which does + // reads and decrypts any newly created keyfiles. This should be 'false' in all + // cases except tests -- setting this to 'true' is not recommended. + skipKeyFileVerification bool } func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string) (*Key, error) { @@ -94,7 +99,7 @@ func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string) // StoreKey generates a key, encrypts with 'auth' and stores in the given directory func StoreKey(dir, auth string, scryptN, scryptP int) (common.Address, error) { - _, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP}, rand.Reader, auth) + _, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP, false}, rand.Reader, auth) return a.Address, err } @@ -103,7 +108,25 @@ func (ks keyStorePassphrase) StoreKey(filename string, key *Key, auth string) er if err != nil { return err } - return writeKeyFile(filename, keyjson) + // Write into temporary file + tmpName, err := writeTemporaryKeyFile(filename, keyjson) + if err != nil { + return err + } + if !ks.skipKeyFileVerification { + // Verify that we can decrypt the file with the given password. + _, err = ks.GetKey(key.Address, tmpName, auth) + if err != nil { + msg := "An error was encountered when saving and verifying the keystore file. \n" + + "This indicates that the keystore is corrupted. \n" + + "The corrupted file is stored at \n%v\n" + + "Please file a ticket at:\n\n" + + "https://github.com/ethereum/go-ethereum/issues." + + "The error was : %s" + return fmt.Errorf(msg, tmpName, err) + } + } + return os.Rename(tmpName, filename) } func (ks keyStorePassphrase) JoinPath(filename string) string { diff --git a/vendor/github.com/ethereum/go-ethereum/appveyor.yml b/vendor/github.com/ethereum/go-ethereum/appveyor.yml index 05ff92cf36..b056cb3fd7 100644 --- a/vendor/github.com/ethereum/go-ethereum/appveyor.yml +++ b/vendor/github.com/ethereum/go-ethereum/appveyor.yml @@ -23,8 +23,8 @@ environment: install: - git submodule update --init - rmdir C:\go /s /q - - appveyor DownloadFile https://storage.googleapis.com/golang/go1.10.3.windows-%GETH_ARCH%.zip - - 7z x go1.10.3.windows-%GETH_ARCH%.zip -y -oC:\ > NUL + - appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.windows-%GETH_ARCH%.zip + - 7z x go1.11.windows-%GETH_ARCH%.zip -y -oC:\ > NUL - go version - gcc --version diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/clef/main.go b/vendor/github.com/ethereum/go-ethereum/cmd/clef/main.go index 85704754de..f363a86f2c 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/clef/main.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/clef/main.go @@ -225,7 +225,7 @@ func initializeSecrets(c *cli.Context) error { if _, err := os.Stat(location); err == nil { return fmt.Errorf("file %v already exists, will not overwrite", location) } - err = ioutil.WriteFile(location, masterSeed, 0700) + err = ioutil.WriteFile(location, masterSeed, 0400) if err != nil { return err } @@ -540,14 +540,14 @@ func readMasterKey(ctx *cli.Context) ([]byte, error) { // checkFile is a convenience function to check if a file // * exists -// * is mode 0600 +// * is mode 0400 func checkFile(filename string) error { info, err := os.Stat(filename) if err != nil { return fmt.Errorf("failed stat on %s: %v", filename, err) } // Check the unix permission bits - if info.Mode().Perm()&077 != 0 { + if info.Mode().Perm()&0377 != 0 { return fmt.Errorf("file (%v) has insecure file permissions (%v)", filename, info.Mode().String()) } return nil diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/ethkey/README.md b/vendor/github.com/ethereum/go-ethereum/cmd/ethkey/README.md index cf72ba43d7..48d3c9e9b7 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/ethkey/README.md +++ b/vendor/github.com/ethereum/go-ethereum/cmd/ethkey/README.md @@ -21,21 +21,33 @@ Private key information can be printed by using the `--private` flag; make sure to use this feature with great caution! -### `ethkey sign ` +### `ethkey signmessage ` Sign the message with a keyfile. It is possible to refer to a file containing the message. +To sign a message contained in a file, use the `--msgfile` flag. -### `ethkey verify
` +### `ethkey verifymessage
` Verify the signature of the message. It is possible to refer to a file containing the message. +To sign a message contained in a file, use the --msgfile flag. + + +### `ethkey changepassphrase ` + +Change the passphrase of a keyfile. +use the `--newpasswordfile` to point to the new password file. ## Passphrases For every command that uses a keyfile, you will be prompted to provide the passphrase for decrypting the keyfile. To avoid this message, it is possible -to pass the passphrase by using the `--passphrase` flag pointing to a file that +to pass the passphrase by using the `--passwordfile` flag pointing to a file that contains the passphrase. + +## JSON + +In case you need to output the result in a JSON format, you shall by using the `--json` flag. diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/evm/disasm.go b/vendor/github.com/ethereum/go-ethereum/cmd/evm/disasm.go index 4a442cf784..69f611e39b 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/evm/disasm.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/evm/disasm.go @@ -44,7 +44,7 @@ func disasmCmd(ctx *cli.Context) error { return err } - code := strings.TrimSpace(string(in[:])) + code := strings.TrimSpace(string(in)) fmt.Printf("%v\n", code) return asm.PrintDisassembled(code) } diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/evm/runner.go b/vendor/github.com/ethereum/go-ethereum/cmd/evm/runner.go index 7138a9ddd4..962fc021d7 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/evm/runner.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/evm/runner.go @@ -80,13 +80,13 @@ func runCmd(ctx *cli.Context) error { } var ( - tracer vm.Tracer - debugLogger *vm.StructLogger - statedb *state.StateDB - chainConfig *params.ChainConfig - sender = common.BytesToAddress([]byte("sender")) - receiver = common.BytesToAddress([]byte("receiver")) - blockNumber uint64 + tracer vm.Tracer + debugLogger *vm.StructLogger + statedb *state.StateDB + chainConfig *params.ChainConfig + sender = common.BytesToAddress([]byte("sender")) + receiver = common.BytesToAddress([]byte("receiver")) + genesisConfig *core.Genesis ) if ctx.GlobalBool(MachineFlag.Name) { tracer = NewJSONLogger(logconfig, os.Stdout) @@ -98,13 +98,14 @@ func runCmd(ctx *cli.Context) error { } if ctx.GlobalString(GenesisFlag.Name) != "" { gen := readGenesis(ctx.GlobalString(GenesisFlag.Name)) + genesisConfig = gen db := ethdb.NewMemDatabase() genesis := gen.ToBlock(db) statedb, _ = state.New(genesis.Root(), state.NewDatabase(db)) chainConfig = gen.Config - blockNumber = gen.Number } else { statedb, _ = state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase())) + genesisConfig = new(core.Genesis) } if ctx.GlobalString(SenderFlag.Name) != "" { sender = common.HexToAddress(ctx.GlobalString(SenderFlag.Name)) @@ -156,13 +157,19 @@ func runCmd(ctx *cli.Context) error { } initialGas := ctx.GlobalUint64(GasFlag.Name) + if genesisConfig.GasLimit != 0 { + initialGas = genesisConfig.GasLimit + } runtimeConfig := runtime.Config{ Origin: sender, State: statedb, GasLimit: initialGas, GasPrice: utils.GlobalBig(ctx, PriceFlag.Name), Value: utils.GlobalBig(ctx, ValueFlag.Name), - BlockNumber: new(big.Int).SetUint64(blockNumber), + Difficulty: genesisConfig.Difficulty, + Time: new(big.Int).SetUint64(genesisConfig.Timestamp), + Coinbase: genesisConfig.Coinbase, + BlockNumber: new(big.Int).SetUint64(genesisConfig.Number), EVMConfig: vm.Config{ Tracer: tracer, Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name), diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/faucet/faucet.go b/vendor/github.com/ethereum/go-ethereum/cmd/faucet/faucet.go index 6799060275..e17ac36f52 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/faucet/faucet.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/faucet/faucet.go @@ -157,7 +157,8 @@ func main() { if blob, err = ioutil.ReadFile(*accPassFlag); err != nil { log.Crit("Failed to read account password contents", "file", *accPassFlag, "err", err) } - pass := string(blob) + // Delete trailing newline in password + pass := strings.TrimSuffix(string(blob), "\n") ks := keystore.NewKeyStore(filepath.Join(os.Getenv("HOME"), ".faucet", "keys"), keystore.StandardScryptN, keystore.StandardScryptP) if blob, err = ioutil.ReadFile(*accJSONFlag); err != nil { @@ -198,6 +199,8 @@ type faucet struct { keystore *keystore.KeyStore // Keystore containing the single signer account accounts.Account // Account funding user faucet requests + head *types.Header // Current head header of the faucet + balance *big.Int // Current balance of the faucet nonce uint64 // Current pending nonce of the faucet price *big.Int // Current gas price to issue funds with @@ -323,33 +326,30 @@ func (f *faucet) apiHandler(conn *websocket.Conn) { nonce uint64 err error ) - for { - // Attempt to retrieve the stats, may error on no faucet connectivity - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - head, err = f.client.HeaderByNumber(ctx, nil) - if err == nil { - balance, err = f.client.BalanceAt(ctx, f.account.Address, head.Number) - if err == nil { - nonce, err = f.client.NonceAt(ctx, f.account.Address, nil) - } + for head == nil || balance == nil { + // Retrieve the current stats cached by the faucet + f.lock.RLock() + if f.head != nil { + head = types.CopyHeader(f.head) } - cancel() + if f.balance != nil { + balance = new(big.Int).Set(f.balance) + } + nonce = f.nonce + f.lock.RUnlock() - // If stats retrieval failed, wait a bit and retry - if err != nil { - if err = sendError(conn, errors.New("Faucet offline: "+err.Error())); err != nil { + if head == nil || balance == nil { + // Report the faucet offline until initial stats are ready + if err = sendError(conn, errors.New("Faucet offline")); err != nil { log.Warn("Failed to send faucet error to client", "err", err) return } time.Sleep(3 * time.Second) - continue } - // Initial stats reported successfully, proceed with user interaction - break } // Send over the initial stats and the latest header if err = send(conn, map[string]interface{}{ - "funds": balance.Div(balance, ether), + "funds": new(big.Int).Div(balance, ether), "funded": nonce, "peers": f.stack.Server().PeerCount(), "requests": f.reqs, @@ -519,6 +519,47 @@ func (f *faucet) apiHandler(conn *websocket.Conn) { } } +// refresh attempts to retrieve the latest header from the chain and extract the +// associated faucet balance and nonce for connectivity caching. +func (f *faucet) refresh(head *types.Header) error { + // Ensure a state update does not run for too long + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // If no header was specified, use the current chain head + var err error + if head == nil { + if head, err = f.client.HeaderByNumber(ctx, nil); err != nil { + return err + } + } + // Retrieve the balance, nonce and gas price from the current head + var ( + balance *big.Int + nonce uint64 + price *big.Int + ) + if balance, err = f.client.BalanceAt(ctx, f.account.Address, head.Number); err != nil { + return err + } + if nonce, err = f.client.NonceAt(ctx, f.account.Address, head.Number); err != nil { + return err + } + if price, err = f.client.SuggestGasPrice(ctx); err != nil { + return err + } + // Everything succeeded, update the cached stats and eject old requests + f.lock.Lock() + f.head, f.balance = head, balance + f.price, f.nonce = price, nonce + for len(f.reqs) > 0 && f.reqs[0].Tx.Nonce() < f.nonce { + f.reqs = f.reqs[1:] + } + f.lock.Unlock() + + return nil +} + // loop keeps waiting for interesting events and pushes them out to connected // websockets. func (f *faucet) loop() { @@ -536,45 +577,27 @@ func (f *faucet) loop() { go func() { for head := range update { // New chain head arrived, query the current stats and stream to clients - var ( - balance *big.Int - nonce uint64 - price *big.Int - err error - ) - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - balance, err = f.client.BalanceAt(ctx, f.account.Address, head.Number) - if err == nil { - nonce, err = f.client.NonceAt(ctx, f.account.Address, nil) - if err == nil { - price, err = f.client.SuggestGasPrice(ctx) - } + timestamp := time.Unix(head.Time.Int64(), 0) + if time.Since(timestamp) > time.Hour { + log.Warn("Skipping faucet refresh, head too old", "number", head.Number, "hash", head.Hash(), "age", common.PrettyAge(timestamp)) + continue } - cancel() - - // If querying the data failed, try for the next block - if err != nil { + if err := f.refresh(head); err != nil { log.Warn("Failed to update faucet state", "block", head.Number, "hash", head.Hash(), "err", err) continue - } else { - log.Info("Updated faucet state", "block", head.Number, "hash", head.Hash(), "balance", balance, "nonce", nonce, "price", price) } // Faucet state retrieved, update locally and send to clients - balance = new(big.Int).Div(balance, ether) + f.lock.RLock() + log.Info("Updated faucet state", "number", head.Number, "hash", head.Hash(), "age", common.PrettyAge(timestamp), "balance", f.balance, "nonce", f.nonce, "price", f.price) - f.lock.Lock() - f.price, f.nonce = price, nonce - for len(f.reqs) > 0 && f.reqs[0].Tx.Nonce() < f.nonce { - f.reqs = f.reqs[1:] - } - f.lock.Unlock() + balance := new(big.Int).Div(f.balance, ether) + peers := f.stack.Server().PeerCount() - f.lock.RLock() for _, conn := range f.conns { if err := send(conn, map[string]interface{}{ "funds": balance, "funded": f.nonce, - "peers": f.stack.Server().PeerCount(), + "peers": peers, "requests": f.reqs, }, time.Second); err != nil { log.Warn("Failed to send stats to client", "err", err) diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/geth/chaincmd.go b/vendor/github.com/ethereum/go-ethereum/cmd/geth/chaincmd.go index 87548865be..562c7e0dee 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/geth/chaincmd.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/geth/chaincmd.go @@ -340,9 +340,9 @@ func importPreimages(ctx *cli.Context) error { start := time.Now() if err := utils.ImportPreimages(diskdb, ctx.Args().First()); err != nil { - utils.Fatalf("Export error: %v\n", err) + utils.Fatalf("Import error: %v\n", err) } - fmt.Printf("Export done in %v\n", time.Since(start)) + fmt.Printf("Import done in %v\n", time.Since(start)) return nil } diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/geth/config.go b/vendor/github.com/ethereum/go-ethereum/cmd/geth/config.go index e6bd4d5bef..b0749d2329 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/geth/config.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/geth/config.go @@ -168,6 +168,9 @@ func makeFullNode(ctx *cli.Context) *node.Node { if ctx.GlobalIsSet(utils.WhisperMinPOWFlag.Name) { cfg.Shh.MinimumAcceptedPOW = ctx.Float64(utils.WhisperMinPOWFlag.Name) } + if ctx.GlobalIsSet(utils.WhisperRestrictConnectionBetweenLightClientsFlag.Name) { + cfg.Shh.RestrictConnectionBetweenLightClients = true + } utils.RegisterShhService(stack, &cfg.Shh) } diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/geth/main.go b/vendor/github.com/ethereum/go-ethereum/cmd/geth/main.go index 4d7e98698c..fae4b57181 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/geth/main.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/geth/main.go @@ -130,6 +130,8 @@ var ( utils.NoCompactionFlag, utils.GpoBlocksFlag, utils.GpoPercentileFlag, + utils.EWASMInterpreterFlag, + utils.EVMInterpreterFlag, configFileFlag, } @@ -151,6 +153,7 @@ var ( utils.WhisperEnabledFlag, utils.WhisperMaxMessageSizeFlag, utils.WhisperMinPOWFlag, + utils.WhisperRestrictConnectionBetweenLightClientsFlag, } metricsFlags = []cli.Flag{ diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/geth/usage.go b/vendor/github.com/ethereum/go-ethereum/cmd/geth/usage.go index a674eca4f1..8b0491ce3c 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/geth/usage.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/geth/usage.go @@ -207,6 +207,8 @@ var AppHelpFlagGroups = []flagGroup{ Name: "VIRTUAL MACHINE", Flags: []cli.Flag{ utils.VMEnableDebugFlag, + utils.EVMInterpreterFlag, + utils.EWASMInterpreterFlag, }, }, { diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_dashboard.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_dashboard.go index 5f781c4152..1a01631ff4 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_dashboard.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_dashboard.go @@ -92,7 +92,7 @@ func (w *wizard) deployDashboard() { pages = append(pages, page) } } - // Promt the user to chose one, enter manually or simply not list this service + // Prompt the user to chose one, enter manually or simply not list this service defLabel, defChoice := "don't list", len(pages)+2 if len(pages) > 0 { defLabel, defChoice = pages[0], 1 diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_network.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_network.go index c0ddcc2a3c..83b10cf375 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_network.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_network.go @@ -87,7 +87,7 @@ func (w *wizard) makeServer() string { return input } -// selectServer lists the user all the currnetly known servers to choose from, +// selectServer lists the user all the currently known servers to choose from, // also granting the option to add a new one. func (w *wizard) selectServer() string { // List the available server to the user and wait for a choice @@ -115,7 +115,7 @@ func (w *wizard) selectServer() string { // manageComponents displays a list of network components the user can tear down // and an option func (w *wizard) manageComponents() { - // List all the componens we can tear down, along with an entry to deploy a new one + // List all the components we can tear down, along with an entry to deploy a new one fmt.Println() var serviceHosts, serviceNames []string diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/access.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/access.go index 12cfbfc1a4..67e852dde5 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/access.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/access.go @@ -51,7 +51,7 @@ func accessNewPass(ctx *cli.Context) { password = getPassPhrase("", 0, makePasswordList(ctx)) dryRun = ctx.Bool(SwarmDryRunFlag.Name) ) - accessKey, ae, err = api.DoPasswordNew(ctx, password, salt) + accessKey, ae, err = api.DoPassword(ctx, password, salt) if err != nil { utils.Fatalf("error getting session key: %v", err) } @@ -62,7 +62,6 @@ func accessNewPass(ctx *cli.Context) { utils.Fatalf("had an error printing the manifests: %v", err) } } else { - utils.Fatalf("uploading manifests") err = uploadManifests(ctx, m, nil) if err != nil { utils.Fatalf("had an error uploading the manifests: %v", err) @@ -85,7 +84,7 @@ func accessNewPK(ctx *cli.Context) { granteePublicKey = ctx.String(SwarmAccessGrantKeyFlag.Name) dryRun = ctx.Bool(SwarmDryRunFlag.Name) ) - sessionKey, ae, err = api.DoPKNew(ctx, privateKey, granteePublicKey, salt) + sessionKey, ae, err = api.DoPK(ctx, privateKey, granteePublicKey, salt) if err != nil { utils.Fatalf("error getting session key: %v", err) } @@ -110,23 +109,38 @@ func accessNewACT(ctx *cli.Context) { } var ( - ae *api.AccessEntry - actManifest *api.Manifest - accessKey []byte - err error - ref = args[0] - grantees = []string{} - actFilename = ctx.String(SwarmAccessGrantKeysFlag.Name) - privateKey = getPrivKey(ctx) - dryRun = ctx.Bool(SwarmDryRunFlag.Name) + ae *api.AccessEntry + actManifest *api.Manifest + accessKey []byte + err error + ref = args[0] + pkGrantees = []string{} + passGrantees = []string{} + pkGranteesFilename = ctx.String(SwarmAccessGrantKeysFlag.Name) + passGranteesFilename = ctx.String(utils.PasswordFileFlag.Name) + privateKey = getPrivKey(ctx) + dryRun = ctx.Bool(SwarmDryRunFlag.Name) ) + if pkGranteesFilename == "" && passGranteesFilename == "" { + utils.Fatalf("you have to provide either a grantee public-keys file or an encryption passwords file (or both)") + } - bytes, err := ioutil.ReadFile(actFilename) - if err != nil { - utils.Fatalf("had an error reading the grantee public key list") + if pkGranteesFilename != "" { + bytes, err := ioutil.ReadFile(pkGranteesFilename) + if err != nil { + utils.Fatalf("had an error reading the grantee public key list") + } + pkGrantees = strings.Split(string(bytes), "\n") + } + + if passGranteesFilename != "" { + bytes, err := ioutil.ReadFile(passGranteesFilename) + if err != nil { + utils.Fatalf("could not read password filename: %v", err) + } + passGrantees = strings.Split(string(bytes), "\n") } - grantees = strings.Split(string(bytes), "\n") - accessKey, ae, actManifest, err = api.DoACTNew(ctx, privateKey, salt, grantees) + accessKey, ae, actManifest, err = api.DoACT(ctx, privateKey, salt, pkGrantees, passGrantees) if err != nil { utils.Fatalf("error generating ACT manifest: %v", err) } diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/hash.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/hash.go index bca4955b15..d679806e38 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/hash.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/hash.go @@ -39,7 +39,7 @@ func hash(ctx *cli.Context) { defer f.Close() stat, _ := f.Stat() - fileStore := storage.NewFileStore(storage.NewMapChunkStore(), storage.NewFileStoreParams()) + fileStore := storage.NewFileStore(&storage.FakeChunkStore{}, storage.NewFileStoreParams()) addr, _, err := fileStore.Store(context.TODO(), f, stat.Size(), false) if err != nil { utils.Fatalf("%v\n", err) diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/main.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/main.go index 637ae06e96..c93344c420 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/main.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/main.go @@ -18,6 +18,7 @@ package main import ( "crypto/ecdsa" + "encoding/hex" "fmt" "io/ioutil" "os" @@ -208,6 +209,10 @@ var ( Name: "data", Usage: "Initializes the resource with the given hex-encoded data. Data must be prefixed by 0x", } + SwarmCompressedFlag = cli.BoolFlag{ + Name: "compressed", + Usage: "Prints encryption keys in compressed form", + } ) //declare a few constant error messages, useful for later error check comparisons in test @@ -252,6 +257,14 @@ func init() { Usage: "Print version numbers", Description: "The output of this command is supposed to be machine-readable", }, + { + Action: keys, + CustomHelpTemplate: helpTemplate, + Name: "print-keys", + Flags: []cli.Flag{SwarmCompressedFlag}, + Usage: "Print public key information", + Description: "The output of this command is supposed to be machine-readable", + }, { Action: upload, CustomHelpTemplate: helpTemplate, @@ -306,6 +319,7 @@ func init() { Flags: []cli.Flag{ SwarmAccessGrantKeysFlag, SwarmDryRunFlag, + utils.PasswordFileFlag, }, Name: "act", Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest", @@ -580,6 +594,17 @@ func main() { } } +func keys(ctx *cli.Context) error { + privateKey := getPrivKey(ctx) + pub := hex.EncodeToString(crypto.FromECDSAPub(&privateKey.PublicKey)) + pubCompressed := hex.EncodeToString(crypto.CompressPubkey(&privateKey.PublicKey)) + if !ctx.Bool(SwarmCompressedFlag.Name) { + fmt.Println(fmt.Sprintf("publicKey=%s", pub)) + } + fmt.Println(fmt.Sprintf("publicKeyCompressed=%s", pubCompressed)) + return nil +} + func version(ctx *cli.Context) error { fmt.Println(strings.Title(clientIdentifier)) fmt.Println("Version:", sv.VersionWithMeta) diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/main.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/main.go index 87bc39816d..70aee19229 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/main.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/main.go @@ -48,7 +48,7 @@ func main() { cli.StringFlag{ Name: "cluster-endpoint", Value: "testing", - Usage: "cluster to point to (open, or testing)", + Usage: "cluster to point to (local, open or testing)", Destination: &cluster, }, cli.IntFlag{ @@ -76,8 +76,8 @@ func main() { }, cli.IntFlag{ Name: "filesize", - Value: 1, - Usage: "file size for generated random file in MB", + Value: 1024, + Usage: "file size for generated random file in KB", Destination: &filesize, }, } diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/upload_and_sync.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/upload_and_sync.go index d5300b63d9..5e0ff4b0f3 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/upload_and_sync.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/upload_and_sync.go @@ -39,6 +39,11 @@ import ( func generateEndpoints(scheme string, cluster string, from int, to int) { if cluster == "prod" { cluster = "" + } else if cluster == "local" { + for port := from; port <= to; port++ { + endpoints = append(endpoints, fmt.Sprintf("%s://localhost:%v", scheme, port)) + } + return } else { cluster = cluster + "." } @@ -53,13 +58,13 @@ func generateEndpoints(scheme string, cluster string, from int, to int) { } func cliUploadAndSync(c *cli.Context) error { - defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size", filesize) }(time.Now()) + defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size (kb)", filesize) }(time.Now()) generateEndpoints(scheme, cluster, from, to) log.Info("uploading to " + endpoints[0] + " and syncing") - f, cleanup := generateRandomFile(filesize * 1000000) + f, cleanup := generateRandomFile(filesize * 1000) defer cleanup() hash, err := upload(f, endpoints[0]) @@ -76,12 +81,7 @@ func cliUploadAndSync(c *cli.Context) error { log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash)) - if filesize < 10 { - time.Sleep(35 * time.Second) - } else { - time.Sleep(15 * time.Second) - time.Sleep(2 * time.Duration(filesize) * time.Second) - } + time.Sleep(3 * time.Second) wg := sync.WaitGroup{} for _, endpoint := range endpoints { @@ -109,7 +109,7 @@ func cliUploadAndSync(c *cli.Context) error { // fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file func fetch(hash string, endpoint string, original []byte, ruid string) error { log.Trace("sleeping", "ruid", ruid) - time.Sleep(5 * time.Second) + time.Sleep(3 * time.Second) log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash) res, err := http.Get(endpoint + "/bzz:/" + hash + "/") diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/utils/flags.go b/vendor/github.com/ethereum/go-ethereum/cmd/utils/flags.go index 495bfe13e0..a2becd08b8 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/utils/flags.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/utils/flags.go @@ -343,12 +343,12 @@ var ( } MinerGasPriceFlag = BigFlag{ Name: "miner.gasprice", - Usage: "Minimal gas price for mining a transactions", + Usage: "Minimum gas price for mining a transaction", Value: eth.DefaultConfig.MinerGasPrice, } MinerLegacyGasPriceFlag = BigFlag{ Name: "gasprice", - Usage: "Minimal gas price for mining a transactions (deprecated, use --miner.gasprice)", + Usage: "Minimum gas price for mining a transaction (deprecated, use --miner.gasprice)", Value: eth.DefaultConfig.MinerGasPrice, } MinerEtherbaseFlag = cli.StringFlag{ @@ -567,6 +567,10 @@ var ( Usage: "Minimum POW accepted", Value: whisper.DefaultMinimumPoW, } + WhisperRestrictConnectionBetweenLightClientsFlag = cli.BoolFlag{ + Name: "shh.restrict-light", + Usage: "Restrict connection between two whisper light clients", + } // Metrics flags MetricsEnabledFlag = cli.BoolFlag{ @@ -606,6 +610,17 @@ var ( Usage: "InfluxDB `host` tag attached to all measurements", Value: "localhost", } + + EWASMInterpreterFlag = cli.StringFlag{ + Name: "vm.ewasm", + Usage: "External ewasm configuration (default = built-in interpreter)", + Value: "", + } + EVMInterpreterFlag = cli.StringFlag{ + Name: "vm.evm", + Usage: "External EVM configuration (default = built-in interpreter)", + Value: "", + } ) // MakeDataDir retrieves the currently requested data directory, terminating @@ -1099,6 +1114,9 @@ func SetShhConfig(ctx *cli.Context, stack *node.Node, cfg *whisper.Config) { if ctx.GlobalIsSet(WhisperMinPOWFlag.Name) { cfg.MinimumAcceptedPOW = ctx.GlobalFloat64(WhisperMinPOWFlag.Name) } + if ctx.GlobalIsSet(WhisperRestrictConnectionBetweenLightClientsFlag.Name) { + cfg.RestrictConnectionBetweenLightClients = true + } } // SetEthConfig applies eth-related command line flags to the config. @@ -1177,6 +1195,14 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { cfg.EnablePreimageRecording = ctx.GlobalBool(VMEnableDebugFlag.Name) } + if ctx.GlobalIsSet(EWASMInterpreterFlag.Name) { + cfg.EWASMInterpreter = ctx.GlobalString(EWASMInterpreterFlag.Name) + } + + if ctx.GlobalIsSet(EVMInterpreterFlag.Name) { + cfg.EVMInterpreter = ctx.GlobalString(EVMInterpreterFlag.Name) + } + // Override any default configs for hard coded networks. switch { case ctx.GlobalBool(TestnetFlag.Name): @@ -1372,7 +1398,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai cache.TrieNodeLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100 } vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)} - chain, err = core.NewBlockChain(chainDb, cache, config, engine, vmcfg) + chain, err = core.NewBlockChain(chainDb, cache, config, engine, vmcfg, nil) if err != nil { Fatalf("Can't create BlockChain: %v", err) } diff --git a/vendor/github.com/ethereum/go-ethereum/common/bytes.go b/vendor/github.com/ethereum/go-ethereum/common/bytes.go index cbab2c3fa9..0c257a1ee0 100644 --- a/vendor/github.com/ethereum/go-ethereum/common/bytes.go +++ b/vendor/github.com/ethereum/go-ethereum/common/bytes.go @@ -100,7 +100,7 @@ func Hex2BytesFixed(str string, flen int) []byte { return h[len(h)-flen:] } hh := make([]byte, flen) - copy(hh[flen-len(h):flen], h[:]) + copy(hh[flen-len(h):flen], h) return hh } diff --git a/vendor/github.com/ethereum/go-ethereum/common/format.go b/vendor/github.com/ethereum/go-ethereum/common/format.go index fccc299620..6fc21af719 100644 --- a/vendor/github.com/ethereum/go-ethereum/common/format.go +++ b/vendor/github.com/ethereum/go-ethereum/common/format.go @@ -38,3 +38,45 @@ func (d PrettyDuration) String() string { } return label } + +// PrettyAge is a pretty printed version of a time.Duration value that rounds +// the values up to a single most significant unit, days/weeks/years included. +type PrettyAge time.Time + +// ageUnits is a list of units the age pretty printing uses. +var ageUnits = []struct { + Size time.Duration + Symbol string +}{ + {12 * 30 * 24 * time.Hour, "y"}, + {30 * 24 * time.Hour, "mo"}, + {7 * 24 * time.Hour, "w"}, + {24 * time.Hour, "d"}, + {time.Hour, "h"}, + {time.Minute, "m"}, + {time.Second, "s"}, +} + +// String implements the Stringer interface, allowing pretty printing of duration +// values rounded to the most significant time unit. +func (t PrettyAge) String() string { + // Calculate the time difference and handle the 0 cornercase + diff := time.Since(time.Time(t)) + if diff < time.Second { + return "0" + } + // Accumulate a precision of 3 components before returning + result, prec := "", 0 + + for _, unit := range ageUnits { + if diff > unit.Size { + result = fmt.Sprintf("%s%d%s", result, diff/unit.Size, unit.Symbol) + diff %= unit.Size + + if prec += 1; prec >= 3 { + break + } + } + } + return result +} diff --git a/vendor/github.com/ethereum/go-ethereum/common/types.go b/vendor/github.com/ethereum/go-ethereum/common/types.go index 71fe5c95cd..a4b9995267 100644 --- a/vendor/github.com/ethereum/go-ethereum/common/types.go +++ b/vendor/github.com/ethereum/go-ethereum/common/types.go @@ -34,7 +34,7 @@ import ( const ( // HashLength is the expected length of the hash HashLength = 32 - // AddressLength is the expected length of the adddress + // AddressLength is the expected length of the address AddressLength = 20 ) diff --git a/vendor/github.com/ethereum/go-ethereum/consensus/clique/clique.go b/vendor/github.com/ethereum/go-ethereum/consensus/clique/clique.go index 5472909846..eae09f91df 100644 --- a/vendor/github.com/ethereum/go-ethereum/consensus/clique/clique.go +++ b/vendor/github.com/ethereum/go-ethereum/consensus/clique/clique.go @@ -93,27 +93,33 @@ var ( // errMissingSignature is returned if a block's extra-data section doesn't seem // to contain a 65 byte secp256k1 signature. - errMissingSignature = errors.New("extra-data 65 byte suffix signature missing") + errMissingSignature = errors.New("extra-data 65 byte signature suffix missing") // errExtraSigners is returned if non-checkpoint block contain signer data in // their extra-data fields. errExtraSigners = errors.New("non-checkpoint block contains extra signer list") // errInvalidCheckpointSigners is returned if a checkpoint block contains an - // invalid list of signers (i.e. non divisible by 20 bytes, or not the correct - // ones). + // invalid list of signers (i.e. non divisible by 20 bytes). errInvalidCheckpointSigners = errors.New("invalid signer list on checkpoint block") + // errMismatchingCheckpointSigners is returned if a checkpoint block contains a + // list of signers different than the one the local node calculated. + errMismatchingCheckpointSigners = errors.New("mismatching signer list on checkpoint block") + // errInvalidMixDigest is returned if a block's mix digest is non-zero. errInvalidMixDigest = errors.New("non-zero mix digest") // errInvalidUncleHash is returned if a block contains an non-empty uncle list. errInvalidUncleHash = errors.New("non empty uncle hash") - // errInvalidDifficulty is returned if the difficulty of a block is not either - // of 1 or 2, or if the value does not match the turn of the signer. + // errInvalidDifficulty is returned if the difficulty of a block neither 1 or 2. errInvalidDifficulty = errors.New("invalid difficulty") + // errWrongDifficulty is returned if the difficulty of a block doesn't match the + // turn of the signer. + errWrongDifficulty = errors.New("wrong difficulty") + // ErrInvalidTimestamp is returned if the timestamp of a block is lower than // the previous block's timestamp + the minimum block period. ErrInvalidTimestamp = errors.New("invalid timestamp") @@ -122,13 +128,12 @@ var ( // be modified via out-of-range or non-contiguous headers. errInvalidVotingChain = errors.New("invalid voting chain") - // errUnauthorized is returned if a header is signed by a non-authorized entity. - errUnauthorized = errors.New("unauthorized") + // errUnauthorizedSigner is returned if a header is signed by a non-authorized entity. + errUnauthorizedSigner = errors.New("unauthorized signer") - // errWaitTransactions is returned if an empty block is attempted to be sealed - // on an instant chain (0 second period). It's important to refuse these as the - // block reward is zero, so an empty block just bloats the chain... fast. - errWaitTransactions = errors.New("waiting for transactions") + // errRecentlySigned is returned if a header is signed by an authorized entity + // that already signed a header recently, thus is temporarily not allowed to. + errRecentlySigned = errors.New("recently signed") ) // SignerFn is a signer callback function to request a hash to be signed by a @@ -205,6 +210,9 @@ type Clique struct { signer common.Address // Ethereum address of the signing key signFn SignerFn // Signer function to authorize hashes with lock sync.RWMutex // Protects the signer fields + + // The fields below are for testing only + fakeDiff bool // Skip difficulty verifications } // New creates a Clique proof-of-authority consensus engine with the initial @@ -359,7 +367,7 @@ func (c *Clique) verifyCascadingFields(chain consensus.ChainReader, header *type } extraSuffix := len(header.Extra) - extraSeal if !bytes.Equal(header.Extra[extraVanity:extraSuffix], signers) { - return errInvalidCheckpointSigners + return errMismatchingCheckpointSigners } } // All basic checks passed, verify the seal and return @@ -388,7 +396,7 @@ func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash commo } } // If we're at an checkpoint block, make a snapshot if it's known - if number%c.config.Epoch == 0 { + if number == 0 || (number%c.config.Epoch == 0 && chain.GetHeaderByNumber(number-1) == nil) { checkpoint := chain.GetHeaderByNumber(number) if checkpoint != nil { hash := checkpoint.Hash() @@ -481,23 +489,25 @@ func (c *Clique) verifySeal(chain consensus.ChainReader, header *types.Header, p return err } if _, ok := snap.Signers[signer]; !ok { - return errUnauthorized + return errUnauthorizedSigner } for seen, recent := range snap.Recents { if recent == signer { // Signer is among recents, only fail if the current block doesn't shift it out if limit := uint64(len(snap.Signers)/2 + 1); seen > number-limit { - return errUnauthorized + return errRecentlySigned } } } // Ensure that the difficulty corresponds to the turn-ness of the signer - inturn := snap.inturn(header.Number.Uint64(), signer) - if inturn && header.Difficulty.Cmp(diffInTurn) != 0 { - return errInvalidDifficulty - } - if !inturn && header.Difficulty.Cmp(diffNoTurn) != 0 { - return errInvalidDifficulty + if !c.fakeDiff { + inturn := snap.inturn(header.Number.Uint64(), signer) + if inturn && header.Difficulty.Cmp(diffInTurn) != 0 { + return errWrongDifficulty + } + if !inturn && header.Difficulty.Cmp(diffNoTurn) != 0 { + return errWrongDifficulty + } } return nil } @@ -600,7 +610,8 @@ func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, results c } // For 0-period chains, refuse to seal empty blocks (no reward but would spin sealing) if c.config.Period == 0 && len(block.Transactions()) == 0 { - return errWaitTransactions + log.Info("Sealing paused, waiting for transactions") + return nil } // Don't hold the signer fields for the entire sealing procedure c.lock.RLock() @@ -613,7 +624,7 @@ func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, results c return err } if _, authorized := snap.Signers[signer]; !authorized { - return errUnauthorized + return errUnauthorizedSigner } // If we're amongst the recent signers, wait for the next block for seen, recent := range snap.Recents { diff --git a/vendor/github.com/ethereum/go-ethereum/consensus/clique/snapshot.go b/vendor/github.com/ethereum/go-ethereum/consensus/clique/snapshot.go index 2333d69247..54d4555f3b 100644 --- a/vendor/github.com/ethereum/go-ethereum/consensus/clique/snapshot.go +++ b/vendor/github.com/ethereum/go-ethereum/consensus/clique/snapshot.go @@ -57,12 +57,12 @@ type Snapshot struct { Tally map[common.Address]Tally `json:"tally"` // Current vote tally to avoid recalculating } -// signers implements the sort interface to allow sorting a list of addresses -type signers []common.Address +// signersAscending implements the sort interface to allow sorting a list of addresses +type signersAscending []common.Address -func (s signers) Len() int { return len(s) } -func (s signers) Less(i, j int) bool { return bytes.Compare(s[i][:], s[j][:]) < 0 } -func (s signers) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s signersAscending) Len() int { return len(s) } +func (s signersAscending) Less(i, j int) bool { return bytes.Compare(s[i][:], s[j][:]) < 0 } +func (s signersAscending) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // newSnapshot creates a new snapshot with the specified startup parameters. This // method does not initialize the set of recent signers, so only ever use if for @@ -214,11 +214,11 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) { return nil, err } if _, ok := snap.Signers[signer]; !ok { - return nil, errUnauthorized + return nil, errUnauthorizedSigner } for _, recent := range snap.Recents { if recent == signer { - return nil, errUnauthorized + return nil, errRecentlySigned } } snap.Recents[number] = signer @@ -298,7 +298,7 @@ func (s *Snapshot) signers() []common.Address { for sig := range s.Signers { sigs = append(sigs, sig) } - sort.Sort(signers(sigs)) + sort.Sort(signersAscending(sigs)) return sigs } diff --git a/vendor/github.com/ethereum/go-ethereum/consensus/consensus.go b/vendor/github.com/ethereum/go-ethereum/consensus/consensus.go index 12ede7ff46..487b07be77 100644 --- a/vendor/github.com/ethereum/go-ethereum/consensus/consensus.go +++ b/vendor/github.com/ethereum/go-ethereum/consensus/consensus.go @@ -90,7 +90,7 @@ type Engine interface { // the result into the given channel. // // Note, the method returns immediately and will send the result async. More - // than one result may also be returned depending on the consensus algorothm. + // than one result may also be returned depending on the consensus algorithm. Seal(chain ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error // SealHash returns the hash of a block prior to it being sealed. diff --git a/vendor/github.com/ethereum/go-ethereum/consensus/ethash/consensus.go b/vendor/github.com/ethereum/go-ethereum/consensus/ethash/consensus.go index af0e733ae7..548c57cd9b 100644 --- a/vendor/github.com/ethereum/go-ethereum/consensus/ethash/consensus.go +++ b/vendor/github.com/ethereum/go-ethereum/consensus/ethash/consensus.go @@ -38,10 +38,24 @@ import ( // Ethash proof-of-work protocol constants. var ( - FrontierBlockReward = big.NewInt(5e+18) // Block reward in wei for successfully mining a block - ByzantiumBlockReward = big.NewInt(3e+18) // Block reward in wei for successfully mining a block upward from Byzantium - maxUncles = 2 // Maximum number of uncles allowed in a single block - allowedFutureBlockTime = 15 * time.Second // Max time from current time allowed for blocks, before they're considered future blocks + FrontierBlockReward = big.NewInt(5e+18) // Block reward in wei for successfully mining a block + ByzantiumBlockReward = big.NewInt(3e+18) // Block reward in wei for successfully mining a block upward from Byzantium + ConstantinopleBlockReward = big.NewInt(2e+18) // Block reward in wei for successfully mining a block upward from Constantinople + maxUncles = 2 // Maximum number of uncles allowed in a single block + allowedFutureBlockTime = 15 * time.Second // Max time from current time allowed for blocks, before they're considered future blocks + + // calcDifficultyConstantinople is the difficulty adjustment algorithm for Constantinople. + // It returns the difficulty that a new block should have when created at time given the + // parent block's time and difficulty. The calculation uses the Byzantium rules, but with + // bomb offset 5M. + // Specification EIP-1234: https://eips.ethereum.org/EIPS/eip-1234 + calcDifficultyConstantinople = makeDifficultyCalculator(big.NewInt(5000000)) + + // calcDifficultyByzantium is the difficulty adjustment algorithm. It returns + // the difficulty that a new block should have when created at time given the + // parent block's time and difficulty. The calculation uses the Byzantium rules. + // Specification EIP-649: https://eips.ethereum.org/EIPS/eip-649 + calcDifficultyByzantium = makeDifficultyCalculator(big.NewInt(3000000)) ) // Various error messages to mark blocks invalid. These should be private to @@ -299,6 +313,8 @@ func (ethash *Ethash) CalcDifficulty(chain consensus.ChainReader, time uint64, p func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int { next := new(big.Int).Add(parent.Number, big1) switch { + case config.IsConstantinople(next): + return calcDifficultyConstantinople(time, parent) case config.IsByzantium(next): return calcDifficultyByzantium(time, parent) case config.IsHomestead(next): @@ -316,66 +332,69 @@ var ( big9 = big.NewInt(9) big10 = big.NewInt(10) bigMinus99 = big.NewInt(-99) - big2999999 = big.NewInt(2999999) ) -// calcDifficultyByzantium is the difficulty adjustment algorithm. It returns -// the difficulty that a new block should have when created at time given the -// parent block's time and difficulty. The calculation uses the Byzantium rules. -func calcDifficultyByzantium(time uint64, parent *types.Header) *big.Int { - // https://github.com/ethereum/EIPs/issues/100. - // algorithm: - // diff = (parent_diff + - // (parent_diff / 2048 * max((2 if len(parent.uncles) else 1) - ((timestamp - parent.timestamp) // 9), -99)) - // ) + 2^(periodCount - 2) - - bigTime := new(big.Int).SetUint64(time) - bigParentTime := new(big.Int).Set(parent.Time) - - // holds intermediate values to make the algo easier to read & audit - x := new(big.Int) - y := new(big.Int) - - // (2 if len(parent_uncles) else 1) - (block_timestamp - parent_timestamp) // 9 - x.Sub(bigTime, bigParentTime) - x.Div(x, big9) - if parent.UncleHash == types.EmptyUncleHash { - x.Sub(big1, x) - } else { - x.Sub(big2, x) - } - // max((2 if len(parent_uncles) else 1) - (block_timestamp - parent_timestamp) // 9, -99) - if x.Cmp(bigMinus99) < 0 { - x.Set(bigMinus99) - } - // parent_diff + (parent_diff / 2048 * max((2 if len(parent.uncles) else 1) - ((timestamp - parent.timestamp) // 9), -99)) - y.Div(parent.Difficulty, params.DifficultyBoundDivisor) - x.Mul(y, x) - x.Add(parent.Difficulty, x) - - // minimum difficulty can ever be (before exponential factor) - if x.Cmp(params.MinimumDifficulty) < 0 { - x.Set(params.MinimumDifficulty) - } - // calculate a fake block number for the ice-age delay: - // https://github.com/ethereum/EIPs/pull/669 - // fake_block_number = max(0, block.number - 3_000_000) - fakeBlockNumber := new(big.Int) - if parent.Number.Cmp(big2999999) >= 0 { - fakeBlockNumber = fakeBlockNumber.Sub(parent.Number, big2999999) // Note, parent is 1 less than the actual block number - } - // for the exponential factor - periodCount := fakeBlockNumber - periodCount.Div(periodCount, expDiffPeriod) +// makeDifficultyCalculator creates a difficultyCalculator with the given bomb-delay. +// the difficulty is calculated with Byzantium rules, which differs from Homestead in +// how uncles affect the calculation +func makeDifficultyCalculator(bombDelay *big.Int) func(time uint64, parent *types.Header) *big.Int { + // Note, the calculations below looks at the parent number, which is 1 below + // the block number. Thus we remove one from the delay given + bombDelayFromParent := new(big.Int).Sub(bombDelay, big1) + return func(time uint64, parent *types.Header) *big.Int { + // https://github.com/ethereum/EIPs/issues/100. + // algorithm: + // diff = (parent_diff + + // (parent_diff / 2048 * max((2 if len(parent.uncles) else 1) - ((timestamp - parent.timestamp) // 9), -99)) + // ) + 2^(periodCount - 2) + + bigTime := new(big.Int).SetUint64(time) + bigParentTime := new(big.Int).Set(parent.Time) + + // holds intermediate values to make the algo easier to read & audit + x := new(big.Int) + y := new(big.Int) + + // (2 if len(parent_uncles) else 1) - (block_timestamp - parent_timestamp) // 9 + x.Sub(bigTime, bigParentTime) + x.Div(x, big9) + if parent.UncleHash == types.EmptyUncleHash { + x.Sub(big1, x) + } else { + x.Sub(big2, x) + } + // max((2 if len(parent_uncles) else 1) - (block_timestamp - parent_timestamp) // 9, -99) + if x.Cmp(bigMinus99) < 0 { + x.Set(bigMinus99) + } + // parent_diff + (parent_diff / 2048 * max((2 if len(parent.uncles) else 1) - ((timestamp - parent.timestamp) // 9), -99)) + y.Div(parent.Difficulty, params.DifficultyBoundDivisor) + x.Mul(y, x) + x.Add(parent.Difficulty, x) + + // minimum difficulty can ever be (before exponential factor) + if x.Cmp(params.MinimumDifficulty) < 0 { + x.Set(params.MinimumDifficulty) + } + // calculate a fake block number for the ice-age delay + // Specification: https://eips.ethereum.org/EIPS/eip-1234 + fakeBlockNumber := new(big.Int) + if parent.Number.Cmp(bombDelayFromParent) >= 0 { + fakeBlockNumber = fakeBlockNumber.Sub(parent.Number, bombDelayFromParent) + } + // for the exponential factor + periodCount := fakeBlockNumber + periodCount.Div(periodCount, expDiffPeriod) - // the exponential factor, commonly referred to as "the bomb" - // diff = diff + 2^(periodCount - 2) - if periodCount.Cmp(big1) > 0 { - y.Sub(periodCount, big2) - y.Exp(big2, y, nil) - x.Add(x, y) + // the exponential factor, commonly referred to as "the bomb" + // diff = diff + 2^(periodCount - 2) + if periodCount.Cmp(big1) > 0 { + y.Sub(periodCount, big2) + y.Exp(big2, y, nil) + x.Add(x, y) + } + return x } - return x } // calcDifficultyHomestead is the difficulty adjustment algorithm. It returns @@ -592,6 +611,9 @@ func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header if config.IsByzantium(header.Number) { blockReward = ByzantiumBlockReward } + if config.IsConstantinople(header.Number) { + blockReward = ConstantinopleBlockReward + } // Accumulate the rewards for the miner and any included uncles reward := new(big.Int).Set(blockReward) r := new(big.Int) diff --git a/vendor/github.com/ethereum/go-ethereum/consensus/ethash/ethash.go b/vendor/github.com/ethereum/go-ethereum/consensus/ethash/ethash.go index b4819ca38b..d124cb1e20 100644 --- a/vendor/github.com/ethereum/go-ethereum/consensus/ethash/ethash.go +++ b/vendor/github.com/ethereum/go-ethereum/consensus/ethash/ethash.go @@ -485,7 +485,7 @@ func New(config Config, notify []string, noverify bool) *Ethash { caches: newlru("cache", config.CachesInMem, newCache), datasets: newlru("dataset", config.DatasetsInMem, newDataset), update: make(chan struct{}), - hashrate: metrics.NewMeter(), + hashrate: metrics.NewMeterForced(), workCh: make(chan *sealTask), fetchWorkCh: make(chan *sealWork), submitWorkCh: make(chan *mineResult), @@ -505,7 +505,7 @@ func NewTester(notify []string, noverify bool) *Ethash { caches: newlru("cache", 1, newCache), datasets: newlru("dataset", 1, newDataset), update: make(chan struct{}), - hashrate: metrics.NewMeter(), + hashrate: metrics.NewMeterForced(), workCh: make(chan *sealTask), fetchWorkCh: make(chan *sealWork), submitWorkCh: make(chan *mineResult), diff --git a/vendor/github.com/ethereum/go-ethereum/core/blockchain.go b/vendor/github.com/ethereum/go-ethereum/core/blockchain.go index 0461da7fd9..fe961e0c4b 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/blockchain.go +++ b/vendor/github.com/ethereum/go-ethereum/core/blockchain.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/common/prque" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" @@ -43,7 +44,6 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "github.com/hashicorp/golang-lru" - "gopkg.in/karalabe/cookiejar.v2/collections/prque" ) var ( @@ -128,13 +128,14 @@ type BlockChain struct { validator Validator // block and state validator interface vmConfig vm.Config - badBlocks *lru.Cache // Bad block cache + badBlocks *lru.Cache // Bad block cache + shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block. } // NewBlockChain returns a fully initialised block chain using information // available in the database. It initialises the default Ethereum Validator and // Processor. -func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config) (*BlockChain, error) { +func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) { if cacheConfig == nil { cacheConfig = &CacheConfig{ TrieNodeLimit: 256 * 1024 * 1024, @@ -148,19 +149,20 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par badBlocks, _ := lru.New(badBlockLimit) bc := &BlockChain{ - chainConfig: chainConfig, - cacheConfig: cacheConfig, - db: db, - triegc: prque.New(), - stateCache: state.NewDatabase(db), - quit: make(chan struct{}), - bodyCache: bodyCache, - bodyRLPCache: bodyRLPCache, - blockCache: blockCache, - futureBlocks: futureBlocks, - engine: engine, - vmConfig: vmConfig, - badBlocks: badBlocks, + chainConfig: chainConfig, + cacheConfig: cacheConfig, + db: db, + triegc: prque.New(nil), + stateCache: state.NewDatabase(db), + quit: make(chan struct{}), + shouldPreserve: shouldPreserve, + bodyCache: bodyCache, + bodyRLPCache: bodyRLPCache, + blockCache: blockCache, + futureBlocks: futureBlocks, + engine: engine, + vmConfig: vmConfig, + badBlocks: badBlocks, } bc.SetValidator(NewBlockValidator(chainConfig, bc, engine)) bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine)) @@ -251,9 +253,9 @@ func (bc *BlockChain) loadLastState() error { blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) - log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd) - log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd) - log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd) + log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(currentHeader.Time.Int64(), 0))) + log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(currentBlock.Time().Int64(), 0))) + log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(currentFastBlock.Time().Int64(), 0))) return nil } @@ -850,13 +852,16 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ } bc.mu.Unlock() - log.Info("Imported new block receipts", - "count", stats.processed, - "elapsed", common.PrettyDuration(time.Since(start)), - "number", head.Number(), - "hash", head.Hash(), + context := []interface{}{ + "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)), + "number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(head.Time().Int64(), 0)), "size", common.StorageSize(bytes), - "ignored", stats.ignored) + } + if stats.ignored > 0 { + context = append(context, []interface{}{"ignored", stats.ignored}...) + } + log.Info("Imported new block receipts", context...) + return 0, nil } @@ -915,7 +920,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types. } else { // Full but not archive node, do proper garbage collection triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive - bc.triegc.Push(root, -float32(block.NumberU64())) + bc.triegc.Push(root, -int64(block.NumberU64())) if current := block.NumberU64(); current > triesInMemory { // If we exceeded our memory allowance, flush matured singleton nodes to disk @@ -964,8 +969,17 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types. reorg := externTd.Cmp(localTd) > 0 currentBlock = bc.CurrentBlock() if !reorg && externTd.Cmp(localTd) == 0 { - // Split same-difficulty blocks by number, then at random - reorg = block.NumberU64() < currentBlock.NumberU64() || (block.NumberU64() == currentBlock.NumberU64() && mrand.Float64() < 0.5) + // Split same-difficulty blocks by number, then preferentially select + // the block generated by the local miner as the canonical block. + if block.NumberU64() < currentBlock.NumberU64() { + reorg = true + } else if block.NumberU64() == currentBlock.NumberU64() { + var currentPreserve, blockPreserve bool + if bc.shouldPreserve != nil { + currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block) + } + reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5) + } } if reorg { // Reorganise the chain if the parent is not the head block @@ -1229,8 +1243,13 @@ func (st *insertStats) report(chain []*types.Block, index int, cache common.Stor context := []interface{}{ "blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000, "elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed), - "number", end.Number(), "hash", end.Hash(), "cache", cache, + "number", end.Number(), "hash", end.Hash(), + } + if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute { + context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) } + context = append(context, []interface{}{"cache", cache}...) + if st.queued > 0 { context = append(context, []interface{}{"queued", st.queued}...) } diff --git a/vendor/github.com/ethereum/go-ethereum/core/chain_indexer.go b/vendor/github.com/ethereum/go-ethereum/core/chain_indexer.go index 89ee75eb29..4bdd4ba1c8 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/chain_indexer.go +++ b/vendor/github.com/ethereum/go-ethereum/core/chain_indexer.go @@ -445,7 +445,7 @@ func (c *ChainIndexer) AddChildIndexer(indexer *ChainIndexer) { func (c *ChainIndexer) loadValidSections() { data, _ := c.indexDb.Get([]byte("count")) if len(data) == 8 { - c.storedSections = binary.BigEndian.Uint64(data[:]) + c.storedSections = binary.BigEndian.Uint64(data) } } diff --git a/vendor/github.com/ethereum/go-ethereum/core/chain_makers.go b/vendor/github.com/ethereum/go-ethereum/core/chain_makers.go index de0fc6be9e..0bc453fdf3 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/chain_makers.go +++ b/vendor/github.com/ethereum/go-ethereum/core/chain_makers.go @@ -67,6 +67,11 @@ func (b *BlockGen) SetExtra(data []byte) { b.header.Extra = data } +// SetNonce sets the nonce field of the generated block. +func (b *BlockGen) SetNonce(nonce types.BlockNonce) { + b.header.Nonce = nonce +} + // AddTx adds a transaction to the generated block. If no coinbase has // been set, the block's coinbase is set to the zero address. // @@ -172,7 +177,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse genblock := func(i int, parent *types.Block, statedb *state.StateDB) (*types.Block, types.Receipts) { // TODO(karalabe): This is needed for clique, which depends on multiple blocks. // It's nonetheless ugly to spin up a blockchain here. Get rid of this somehow. - blockchain, _ := NewBlockChain(db, nil, config, engine, vm.Config{}) + blockchain, _ := NewBlockChain(db, nil, config, engine, vm.Config{}, nil) defer blockchain.Stop() b := &BlockGen{i: i, parent: parent, chain: blocks, chainReader: blockchain, statedb: statedb, config: config, engine: engine} @@ -190,13 +195,14 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse if config.DAOForkSupport && config.DAOForkBlock != nil && config.DAOForkBlock.Cmp(b.header.Number) == 0 { misc.ApplyDAOHardFork(statedb) } - // Execute any user modifications to the block and finalize it + // Execute any user modifications to the block if gen != nil { gen(i, b) } - if b.engine != nil { + // Finalize and seal the block block, _ := b.engine.Finalize(b.chainReader, b.header, statedb, b.txs, b.uncles, b.receipts) + // Write state changes to db root, err := statedb.Commit(config.IsEIP158(b.header.Number)) if err != nil { diff --git a/vendor/github.com/ethereum/go-ethereum/core/genesis.go b/vendor/github.com/ethereum/go-ethereum/core/genesis.go index 9190e2ba22..6e71afd61c 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/genesis.go +++ b/vendor/github.com/ethereum/go-ethereum/core/genesis.go @@ -355,7 +355,7 @@ func DeveloperGenesisBlock(period uint64, faucet common.Address) *Genesis { common.BytesToAddress([]byte{6}): {Balance: big.NewInt(1)}, // ECAdd common.BytesToAddress([]byte{7}): {Balance: big.NewInt(1)}, // ECScalarMul common.BytesToAddress([]byte{8}): {Balance: big.NewInt(1)}, // ECPairing - faucet: {Balance: new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(9))}, + faucet: {Balance: new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(9))}, }, } } diff --git a/vendor/github.com/ethereum/go-ethereum/core/headerchain.go b/vendor/github.com/ethereum/go-ethereum/core/headerchain.go index 2bbec28bf3..d2093113c0 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/headerchain.go +++ b/vendor/github.com/ethereum/go-ethereum/core/headerchain.go @@ -281,8 +281,18 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCa } // Report some public statistics so the user has a clue what's going on last := chain[len(chain)-1] - log.Info("Imported new block headers", "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)), - "number", last.Number, "hash", last.Hash(), "ignored", stats.ignored) + + context := []interface{}{ + "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)), + "number", last.Number, "hash", last.Hash(), + } + if timestamp := time.Unix(last.Time.Int64(), 0); time.Since(timestamp) > time.Minute { + context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) + } + if stats.ignored > 0 { + context = append(context, []interface{}{"ignored", stats.ignored}...) + } + log.Info("Imported new block headers", context...) return 0, nil } diff --git a/vendor/github.com/ethereum/go-ethereum/core/state/state_object.go b/vendor/github.com/ethereum/go-ethereum/core/state/state_object.go index 091d24184a..f41ab04092 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/state/state_object.go +++ b/vendor/github.com/ethereum/go-ethereum/core/state/state_object.go @@ -77,7 +77,7 @@ type stateObject struct { trie Trie // storage trie, which becomes non-nil on first access code Code // contract bytecode, which gets set when code is loaded - cachedStorage Storage // Storage entry cache to avoid duplicate reads + originStorage Storage // Storage cache of original entries to dedup rewrites dirtyStorage Storage // Storage entries that need to be flushed to disk // Cache flags. @@ -115,7 +115,7 @@ func newObject(db *StateDB, address common.Address, data Account) *stateObject { address: address, addrHash: crypto.Keccak256Hash(address[:]), data: data, - cachedStorage: make(Storage), + originStorage: make(Storage), dirtyStorage: make(Storage), } } @@ -159,13 +159,25 @@ func (c *stateObject) getTrie(db Database) Trie { return c.trie } -// GetState returns a value in account storage. +// GetState retrieves a value from the account storage trie. func (self *stateObject) GetState(db Database, key common.Hash) common.Hash { - value, exists := self.cachedStorage[key] - if exists { + // If we have a dirty value for this state entry, return it + value, dirty := self.dirtyStorage[key] + if dirty { return value } - // Load from DB in case it is missing. + // Otherwise return the entry's original value + return self.GetCommittedState(db, key) +} + +// GetCommittedState retrieves a value from the committed account storage trie. +func (self *stateObject) GetCommittedState(db Database, key common.Hash) common.Hash { + // If we have the original value cached, return that + value, cached := self.originStorage[key] + if cached { + return value + } + // Otherwise load the value from the database enc, err := self.getTrie(db).TryGet(key[:]) if err != nil { self.setError(err) @@ -178,22 +190,27 @@ func (self *stateObject) GetState(db Database, key common.Hash) common.Hash { } value.SetBytes(content) } - self.cachedStorage[key] = value + self.originStorage[key] = value return value } // SetState updates a value in account storage. func (self *stateObject) SetState(db Database, key, value common.Hash) { + // If the new value is the same as old, don't set + prev := self.GetState(db, key) + if prev == value { + return + } + // New value is different, update and journal the change self.db.journal.append(storageChange{ account: &self.address, key: key, - prevalue: self.GetState(db, key), + prevalue: prev, }) self.setState(key, value) } func (self *stateObject) setState(key, value common.Hash) { - self.cachedStorage[key] = value self.dirtyStorage[key] = value } @@ -202,6 +219,13 @@ func (self *stateObject) updateTrie(db Database) Trie { tr := self.getTrie(db) for key, value := range self.dirtyStorage { delete(self.dirtyStorage, key) + + // Skip noop changes, persist actual changes + if value == self.originStorage[key] { + continue + } + self.originStorage[key] = value + if (value == common.Hash{}) { self.setError(tr.TryDelete(key[:])) continue @@ -279,7 +303,7 @@ func (self *stateObject) deepCopy(db *StateDB) *stateObject { } stateObject.code = self.code stateObject.dirtyStorage = self.dirtyStorage.Copy() - stateObject.cachedStorage = self.dirtyStorage.Copy() + stateObject.originStorage = self.originStorage.Copy() stateObject.suicided = self.suicided stateObject.dirtyCode = self.dirtyCode stateObject.deleted = self.deleted diff --git a/vendor/github.com/ethereum/go-ethereum/core/state/statedb.go b/vendor/github.com/ethereum/go-ethereum/core/state/statedb.go index 101b03a127..216667ce98 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/state/statedb.go +++ b/vendor/github.com/ethereum/go-ethereum/core/state/statedb.go @@ -169,11 +169,22 @@ func (self *StateDB) Preimages() map[common.Hash][]byte { return self.preimages } +// AddRefund adds gas to the refund counter func (self *StateDB) AddRefund(gas uint64) { self.journal.append(refundChange{prev: self.refund}) self.refund += gas } +// SubRefund removes gas from the refund counter. +// This method will panic if the refund counter goes below zero +func (self *StateDB) SubRefund(gas uint64) { + self.journal.append(refundChange{prev: self.refund}) + if gas > self.refund { + panic("Refund counter below zero") + } + self.refund -= gas +} + // Exist reports whether the given account address exists in the state. // Notably this also returns true for suicided accounts. func (self *StateDB) Exist(addr common.Address) bool { @@ -236,10 +247,20 @@ func (self *StateDB) GetCodeHash(addr common.Address) common.Hash { return common.BytesToHash(stateObject.CodeHash()) } -func (self *StateDB) GetState(addr common.Address, bhash common.Hash) common.Hash { +// GetState retrieves a value from the given account's storage trie. +func (self *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { + stateObject := self.getStateObject(addr) + if stateObject != nil { + return stateObject.GetState(self.db, hash) + } + return common.Hash{} +} + +// GetCommittedState retrieves a value from the given account's committed storage trie. +func (self *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { stateObject := self.getStateObject(addr) if stateObject != nil { - return stateObject.GetState(self.db, bhash) + return stateObject.GetCommittedState(self.db, hash) } return common.Hash{} } @@ -435,19 +456,14 @@ func (db *StateDB) ForEachStorage(addr common.Address, cb func(key, value common if so == nil { return } - - // When iterating over the storage check the cache first - for h, value := range so.cachedStorage { - cb(h, value) - } - it := trie.NewIterator(so.getTrie(db.db).NodeIterator(nil)) for it.Next() { - // ignore cached values key := common.BytesToHash(db.trie.GetKey(it.Key)) - if _, ok := so.cachedStorage[key]; !ok { - cb(key, common.BytesToHash(it.Value)) + if value, dirty := so.dirtyStorage[key]; dirty { + cb(key, value) + continue } + cb(key, common.BytesToHash(it.Value)) } } diff --git a/vendor/github.com/ethereum/go-ethereum/core/state_processor.go b/vendor/github.com/ethereum/go-ethereum/core/state_processor.go index 1a91a57ab4..503a35d16a 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/state_processor.go +++ b/vendor/github.com/ethereum/go-ethereum/core/state_processor.go @@ -110,7 +110,7 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo *usedGas += gas // Create a new receipt for the transaction, storing the intermediate root and gas used by the tx - // based on the eip phase, we're passing wether the root touch-delete accounts. + // based on the eip phase, we're passing whether the root touch-delete accounts. receipt := types.NewReceipt(root, failed, *usedGas) receipt.TxHash = tx.Hash() receipt.GasUsed = gas diff --git a/vendor/github.com/ethereum/go-ethereum/core/tx_pool.go b/vendor/github.com/ethereum/go-ethereum/core/tx_pool.go index 46ae2759bc..f6da5da2a7 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/tx_pool.go +++ b/vendor/github.com/ethereum/go-ethereum/core/tx_pool.go @@ -26,13 +26,13 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/prque" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" - "gopkg.in/karalabe/cookiejar.v2/collections/prque" ) const ( @@ -525,7 +525,7 @@ func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common return pending, queued } -// Pending retrieves all currently processable transactions, groupped by origin +// Pending retrieves all currently processable transactions, grouped by origin // account and sorted by nonce. The returned transaction set is a copy and can be // freely modified by calling code. func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) { @@ -547,7 +547,7 @@ func (pool *TxPool) Locals() []common.Address { return pool.locals.flatten() } -// local retrieves all currently known local transactions, groupped by origin +// local retrieves all currently known local transactions, grouped by origin // account and sorted by nonce. The returned transaction set is a copy and can be // freely modified by calling code. func (pool *TxPool) local() map[common.Address]types.Transactions { @@ -987,11 +987,11 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { if pending > pool.config.GlobalSlots { pendingBeforeCap := pending // Assemble a spam order to penalize large transactors first - spammers := prque.New() + spammers := prque.New(nil) for addr, list := range pool.pending { // Only evict transactions from high rollers if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { - spammers.Push(addr, float32(list.Len())) + spammers.Push(addr, int64(list.Len())) } } // Gradually drop transactions from offenders diff --git a/vendor/github.com/ethereum/go-ethereum/core/types/bloom9.go b/vendor/github.com/ethereum/go-ethereum/core/types/bloom9.go index a76b6f33c5..d045c9e667 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/types/bloom9.go +++ b/vendor/github.com/ethereum/go-ethereum/core/types/bloom9.go @@ -113,7 +113,7 @@ func LogsBloom(logs []*Log) *big.Int { } func bloom9(b []byte) *big.Int { - b = crypto.Keccak256(b[:]) + b = crypto.Keccak256(b) r := new(big.Int) @@ -130,7 +130,7 @@ var Bloom9 = bloom9 func BloomLookup(bin Bloom, topic bytesBacked) bool { bloom := bin.Big() - cmp := bloom9(topic.Bytes()[:]) + cmp := bloom9(topic.Bytes()) return bloom.And(bloom, cmp).Cmp(cmp) == 0 } diff --git a/vendor/github.com/ethereum/go-ethereum/core/vm/evm.go b/vendor/github.com/ethereum/go-ethereum/core/vm/evm.go index a24f6f3865..fc040c6216 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/vm/evm.go +++ b/vendor/github.com/ethereum/go-ethereum/core/vm/evm.go @@ -41,7 +41,7 @@ type ( ) // run runs the given contract and takes care of running precompiles with a fallback to the byte code interpreter. -func run(evm *EVM, contract *Contract, input []byte) ([]byte, error) { +func run(evm *EVM, contract *Contract, input []byte, readOnly bool) ([]byte, error) { if contract.CodeAddr != nil { precompiles := PrecompiledContractsHomestead if evm.ChainConfig().IsByzantium(evm.BlockNumber) { @@ -61,7 +61,7 @@ func run(evm *EVM, contract *Contract, input []byte) ([]byte, error) { }(evm.interpreter) evm.interpreter = interpreter } - return interpreter.Run(contract, input) + return interpreter.Run(contract, input, readOnly) } } return nil, ErrNoCompatibleInterpreter @@ -136,10 +136,28 @@ func NewEVM(ctx Context, statedb StateDB, chainConfig *params.ChainConfig, vmCon vmConfig: vmConfig, chainConfig: chainConfig, chainRules: chainConfig.Rules(ctx.BlockNumber), - interpreters: make([]Interpreter, 1), + interpreters: make([]Interpreter, 0, 1), } - evm.interpreters[0] = NewEVMInterpreter(evm, vmConfig) + if chainConfig.IsEWASM(ctx.BlockNumber) { + // to be implemented by EVM-C and Wagon PRs. + // if vmConfig.EWASMInterpreter != "" { + // extIntOpts := strings.Split(vmConfig.EWASMInterpreter, ":") + // path := extIntOpts[0] + // options := []string{} + // if len(extIntOpts) > 1 { + // options = extIntOpts[1..] + // } + // evm.interpreters = append(evm.interpreters, NewEVMVCInterpreter(evm, vmConfig, options)) + // } else { + // evm.interpreters = append(evm.interpreters, NewEWASMInterpreter(evm, vmConfig)) + // } + panic("No supported ewasm interpreter yet.") + } + + // vmConfig.EVMInterpreter will be used by EVM-C, it won't be checked here + // as we always want to have the built-in EVM as the failover option. + evm.interpreters = append(evm.interpreters, NewEVMInterpreter(evm, vmConfig)) evm.interpreter = evm.interpreters[0] return evm @@ -210,7 +228,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas evm.vmConfig.Tracer.CaptureEnd(ret, gas-contract.Gas, time.Since(start), err) }() } - ret, err = run(evm, contract, input) + ret, err = run(evm, contract, input, false) // When an error was returned by the EVM or when setting the creation code // above we revert to the snapshot and consume any gas remaining. Additionally @@ -255,7 +273,7 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte, contract := NewContract(caller, to, value, gas) contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr)) - ret, err = run(evm, contract, input) + ret, err = run(evm, contract, input, false) if err != nil { evm.StateDB.RevertToSnapshot(snapshot) if err != errExecutionReverted { @@ -288,7 +306,7 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by contract := NewContract(caller, to, nil, gas).AsDelegate() contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr)) - ret, err = run(evm, contract, input) + ret, err = run(evm, contract, input, false) if err != nil { evm.StateDB.RevertToSnapshot(snapshot) if err != errExecutionReverted { @@ -310,13 +328,6 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte if evm.depth > int(params.CallCreateDepth) { return nil, gas, ErrDepth } - // Make sure the readonly is only set if we aren't in readonly yet - // this makes also sure that the readonly flag isn't removed for - // child calls. - if !evm.interpreter.IsReadOnly() { - evm.interpreter.SetReadOnly(true) - defer func() { evm.interpreter.SetReadOnly(false) }() - } var ( to = AccountRef(addr) @@ -331,7 +342,7 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte // When an error was returned by the EVM or when setting the creation code // above we revert to the snapshot and consume any gas remaining. Additionally // when we're in Homestead this also counts for code storage gas errors. - ret, err = run(evm, contract, input) + ret, err = run(evm, contract, input, true) if err != nil { evm.StateDB.RevertToSnapshot(snapshot) if err != errExecutionReverted { @@ -382,7 +393,7 @@ func (evm *EVM) create(caller ContractRef, code []byte, gas uint64, value *big.I } start := time.Now() - ret, err := run(evm, contract, nil) + ret, err := run(evm, contract, nil, false) // check whether the max code size has been exceeded maxCodeSizeExceeded := evm.ChainConfig().IsEIP158(evm.BlockNumber) && len(ret) > params.MaxCodeSize diff --git a/vendor/github.com/ethereum/go-ethereum/core/vm/gas_table.go b/vendor/github.com/ethereum/go-ethereum/core/vm/gas_table.go index f9eea319e1..10b4f719a7 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/vm/gas_table.go +++ b/vendor/github.com/ethereum/go-ethereum/core/vm/gas_table.go @@ -117,24 +117,69 @@ func gasReturnDataCopy(gt params.GasTable, evm *EVM, contract *Contract, stack * func gasSStore(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { var ( - y, x = stack.Back(1), stack.Back(0) - val = evm.StateDB.GetState(contract.Address(), common.BigToHash(x)) + y, x = stack.Back(1), stack.Back(0) + current = evm.StateDB.GetState(contract.Address(), common.BigToHash(x)) ) - // This checks for 3 scenario's and calculates gas accordingly - // 1. From a zero-value address to a non-zero value (NEW VALUE) - // 2. From a non-zero value address to a zero-value address (DELETE) - // 3. From a non-zero to a non-zero (CHANGE) - if val == (common.Hash{}) && y.Sign() != 0 { - // 0 => non 0 - return params.SstoreSetGas, nil - } else if val != (common.Hash{}) && y.Sign() == 0 { - // non 0 => 0 - evm.StateDB.AddRefund(params.SstoreRefundGas) - return params.SstoreClearGas, nil - } else { - // non 0 => non 0 (or 0 => 0) - return params.SstoreResetGas, nil + // The legacy gas metering only takes into consideration the current state + if !evm.chainRules.IsConstantinople { + // This checks for 3 scenario's and calculates gas accordingly: + // + // 1. From a zero-value address to a non-zero value (NEW VALUE) + // 2. From a non-zero value address to a zero-value address (DELETE) + // 3. From a non-zero to a non-zero (CHANGE) + switch { + case current == (common.Hash{}) && y.Sign() != 0: // 0 => non 0 + return params.SstoreSetGas, nil + case current != (common.Hash{}) && y.Sign() == 0: // non 0 => 0 + evm.StateDB.AddRefund(params.SstoreRefundGas) + return params.SstoreClearGas, nil + default: // non 0 => non 0 (or 0 => 0) + return params.SstoreResetGas, nil + } + } + // The new gas metering is based on net gas costs (EIP-1283): + // + // 1. If current value equals new value (this is a no-op), 200 gas is deducted. + // 2. If current value does not equal new value + // 2.1. If original value equals current value (this storage slot has not been changed by the current execution context) + // 2.1.1. If original value is 0, 20000 gas is deducted. + // 2.1.2. Otherwise, 5000 gas is deducted. If new value is 0, add 15000 gas to refund counter. + // 2.2. If original value does not equal current value (this storage slot is dirty), 200 gas is deducted. Apply both of the following clauses. + // 2.2.1. If original value is not 0 + // 2.2.1.1. If current value is 0 (also means that new value is not 0), remove 15000 gas from refund counter. We can prove that refund counter will never go below 0. + // 2.2.1.2. If new value is 0 (also means that current value is not 0), add 15000 gas to refund counter. + // 2.2.2. If original value equals new value (this storage slot is reset) + // 2.2.2.1. If original value is 0, add 19800 gas to refund counter. + // 2.2.2.2. Otherwise, add 4800 gas to refund counter. + value := common.BigToHash(y) + if current == value { // noop (1) + return params.NetSstoreNoopGas, nil + } + original := evm.StateDB.GetCommittedState(contract.Address(), common.BigToHash(x)) + if original == current { + if original == (common.Hash{}) { // create slot (2.1.1) + return params.NetSstoreInitGas, nil + } + if value == (common.Hash{}) { // delete slot (2.1.2b) + evm.StateDB.AddRefund(params.NetSstoreClearRefund) + } + return params.NetSstoreCleanGas, nil // write existing slot (2.1.2) + } + if original != (common.Hash{}) { + if current == (common.Hash{}) { // recreate slot (2.2.1.1) + evm.StateDB.SubRefund(params.NetSstoreClearRefund) + } else if value == (common.Hash{}) { // delete slot (2.2.1.2) + evm.StateDB.AddRefund(params.NetSstoreClearRefund) + } + } + if original == value { + if original == (common.Hash{}) { // reset to original inexistent slot (2.2.2.1) + evm.StateDB.AddRefund(params.NetSstoreResetClearRefund) + } else { // reset to original existing slot (2.2.2.2) + evm.StateDB.AddRefund(params.NetSstoreResetRefund) + } } + return params.NetSstoreDirtyGas, nil } func makeGasLog(n uint64) gasFunc { diff --git a/vendor/github.com/ethereum/go-ethereum/core/vm/instructions.go b/vendor/github.com/ethereum/go-ethereum/core/vm/instructions.go index b7742e6551..ca9e775ac5 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/vm/instructions.go +++ b/vendor/github.com/ethereum/go-ethereum/core/vm/instructions.go @@ -355,7 +355,7 @@ func opSAR(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory * defer interpreter.intPool.put(shift) // First operand back into the pool if shift.Cmp(common.Big256) >= 0 { - if value.Sign() > 0 { + if value.Sign() >= 0 { value.SetUint64(0) } else { value.SetInt64(-1) diff --git a/vendor/github.com/ethereum/go-ethereum/core/vm/interface.go b/vendor/github.com/ethereum/go-ethereum/core/vm/interface.go index 1ef91cf1d6..fc15082f18 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/vm/interface.go +++ b/vendor/github.com/ethereum/go-ethereum/core/vm/interface.go @@ -40,8 +40,10 @@ type StateDB interface { GetCodeSize(common.Address) int AddRefund(uint64) + SubRefund(uint64) GetRefund() uint64 + GetCommittedState(common.Address, common.Hash) common.Hash GetState(common.Address, common.Hash) common.Hash SetState(common.Address, common.Hash, common.Hash) @@ -64,7 +66,7 @@ type StateDB interface { ForEachStorage(common.Address, func(common.Hash, common.Hash) bool) } -// CallContext provides a basic interface for the EVM calling conventions. The EVM EVM +// CallContext provides a basic interface for the EVM calling conventions. The EVM // depends on this context being implemented for doing subcalls and initialising new EVM contracts. type CallContext interface { // Call another contract diff --git a/vendor/github.com/ethereum/go-ethereum/core/vm/interpreter.go b/vendor/github.com/ethereum/go-ethereum/core/vm/interpreter.go index 1e9202424a..8e934f60e8 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/vm/interpreter.go +++ b/vendor/github.com/ethereum/go-ethereum/core/vm/interpreter.go @@ -39,6 +39,11 @@ type Config struct { // may be left uninitialised and will be set to the default // table. JumpTable [256]operation + + // Type of the EWASM interpreter + EWASMInterpreter string + // Type of the EVM interpreter + EVMInterpreter string } // Interpreter is used to run Ethereum based contracts and will utilise the @@ -48,7 +53,7 @@ type Config struct { type Interpreter interface { // Run loops and evaluates the contract's code with the given input data and returns // the return byte-slice and an error if one occurred. - Run(contract *Contract, input []byte) ([]byte, error) + Run(contract *Contract, input []byte, static bool) ([]byte, error) // CanRun tells if the contract, passed as an argument, can be // run by the current interpreter. This is meant so that the // caller can do something like: @@ -61,10 +66,6 @@ type Interpreter interface { // } // ``` CanRun([]byte) bool - // IsReadOnly reports if the interpreter is in read only mode. - IsReadOnly() bool - // SetReadOnly sets (or unsets) read only mode in the interpreter. - SetReadOnly(bool) } // EVMInterpreter represents an EVM interpreter @@ -125,7 +126,7 @@ func (in *EVMInterpreter) enforceRestrictions(op OpCode, operation operation, st // It's important to note that any errors returned by the interpreter should be // considered a revert-and-consume-all-gas operation except for // errExecutionReverted which means revert-and-keep-gas-left. -func (in *EVMInterpreter) Run(contract *Contract, input []byte) (ret []byte, err error) { +func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (ret []byte, err error) { if in.intPool == nil { in.intPool = poolOfIntPools.get() defer func() { @@ -138,6 +139,13 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte) (ret []byte, err in.evm.depth++ defer func() { in.evm.depth-- }() + // Make sure the readOnly is only set if we aren't in readOnly yet. + // This makes also sure that the readOnly flag isn't removed for child calls. + if readOnly && !in.readOnly { + in.readOnly = true + defer func() { in.readOnly = false }() + } + // Reset the previous call's return data. It's unimportant to preserve the old buffer // as every returning call will return new data anyway. in.returnData = nil @@ -263,13 +271,3 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte) (ret []byte, err func (in *EVMInterpreter) CanRun(code []byte) bool { return true } - -// IsReadOnly reports if the interpreter is in read only mode. -func (in *EVMInterpreter) IsReadOnly() bool { - return in.readOnly -} - -// SetReadOnly sets (or unsets) read only mode in the interpreter. -func (in *EVMInterpreter) SetReadOnly(ro bool) { - in.readOnly = ro -} diff --git a/vendor/github.com/ethereum/go-ethereum/core/vm/noop.go b/vendor/github.com/ethereum/go-ethereum/core/vm/noop.go deleted file mode 100644 index b71ead0d77..0000000000 --- a/vendor/github.com/ethereum/go-ethereum/core/vm/noop.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package vm - -import ( - "math/big" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" -) - -func NoopCanTransfer(db StateDB, from common.Address, balance *big.Int) bool { - return true -} -func NoopTransfer(db StateDB, from, to common.Address, amount *big.Int) {} - -type NoopEVMCallContext struct{} - -func (NoopEVMCallContext) Call(caller ContractRef, addr common.Address, data []byte, gas, value *big.Int) ([]byte, error) { - return nil, nil -} -func (NoopEVMCallContext) CallCode(caller ContractRef, addr common.Address, data []byte, gas, value *big.Int) ([]byte, error) { - return nil, nil -} -func (NoopEVMCallContext) Create(caller ContractRef, data []byte, gas, value *big.Int) ([]byte, common.Address, error) { - return nil, common.Address{}, nil -} -func (NoopEVMCallContext) DelegateCall(me ContractRef, addr common.Address, data []byte, gas *big.Int) ([]byte, error) { - return nil, nil -} - -type NoopStateDB struct{} - -func (NoopStateDB) CreateAccount(common.Address) {} -func (NoopStateDB) SubBalance(common.Address, *big.Int) {} -func (NoopStateDB) AddBalance(common.Address, *big.Int) {} -func (NoopStateDB) GetBalance(common.Address) *big.Int { return nil } -func (NoopStateDB) GetNonce(common.Address) uint64 { return 0 } -func (NoopStateDB) SetNonce(common.Address, uint64) {} -func (NoopStateDB) GetCodeHash(common.Address) common.Hash { return common.Hash{} } -func (NoopStateDB) GetCode(common.Address) []byte { return nil } -func (NoopStateDB) SetCode(common.Address, []byte) {} -func (NoopStateDB) GetCodeSize(common.Address) int { return 0 } -func (NoopStateDB) AddRefund(uint64) {} -func (NoopStateDB) GetRefund() uint64 { return 0 } -func (NoopStateDB) GetState(common.Address, common.Hash) common.Hash { return common.Hash{} } -func (NoopStateDB) SetState(common.Address, common.Hash, common.Hash) {} -func (NoopStateDB) Suicide(common.Address) bool { return false } -func (NoopStateDB) HasSuicided(common.Address) bool { return false } -func (NoopStateDB) Exist(common.Address) bool { return false } -func (NoopStateDB) Empty(common.Address) bool { return false } -func (NoopStateDB) RevertToSnapshot(int) {} -func (NoopStateDB) Snapshot() int { return 0 } -func (NoopStateDB) AddLog(*types.Log) {} -func (NoopStateDB) AddPreimage(common.Hash, []byte) {} -func (NoopStateDB) ForEachStorage(common.Address, func(common.Hash, common.Hash) bool) {} diff --git a/vendor/github.com/ethereum/go-ethereum/crypto/secp256k1/libsecp256k1/src/secp256k1.c b/vendor/github.com/ethereum/go-ethereum/crypto/secp256k1/libsecp256k1/src/secp256k1.c index fb8b882faa..7d637bfad1 100755 --- a/vendor/github.com/ethereum/go-ethereum/crypto/secp256k1/libsecp256k1/src/secp256k1.c +++ b/vendor/github.com/ethereum/go-ethereum/crypto/secp256k1/libsecp256k1/src/secp256k1.c @@ -26,7 +26,6 @@ } while(0) static void default_illegal_callback_fn(const char* str, void* data) { - (void)data; fprintf(stderr, "[libsecp256k1] illegal argument: %s\n", str); abort(); } @@ -37,7 +36,6 @@ static const secp256k1_callback default_illegal_callback = { }; static void default_error_callback_fn(const char* str, void* data) { - (void)data; fprintf(stderr, "[libsecp256k1] internal consistency check failed: %s\n", str); abort(); } diff --git a/vendor/github.com/ethereum/go-ethereum/eth/api_tracer.go b/vendor/github.com/ethereum/go-ethereum/eth/api_tracer.go index 0a8b9a9942..5b7f168ec2 100644 --- a/vendor/github.com/ethereum/go-ethereum/eth/api_tracer.go +++ b/vendor/github.com/ethereum/go-ethereum/eth/api_tracer.go @@ -127,7 +127,7 @@ func (api *PrivateDebugAPI) TraceChain(ctx context.Context, start, end rpc.Block // traceChain configures a new tracer according to the provided configuration, and // executes all the transactions contained within. The return value will be one item -// per transaction, dependent on the requestd tracer. +// per transaction, dependent on the requested tracer. func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Block, config *TraceConfig) (*rpc.Subscription, error) { // Tracing a chain is a **long** operation, only do with subscriptions notifier, supported := rpc.NotifierFromContext(ctx) diff --git a/vendor/github.com/ethereum/go-ethereum/eth/backend.go b/vendor/github.com/ethereum/go-ethereum/eth/backend.go index 9926225f24..b555b064ad 100644 --- a/vendor/github.com/ethereum/go-ethereum/eth/backend.go +++ b/vendor/github.com/ethereum/go-ethereum/eth/backend.go @@ -149,10 +149,14 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) { rawdb.WriteDatabaseVersion(chainDb, core.BlockChainVersion) } var ( - vmConfig = vm.Config{EnablePreimageRecording: config.EnablePreimageRecording} + vmConfig = vm.Config{ + EnablePreimageRecording: config.EnablePreimageRecording, + EWASMInterpreter: config.EWASMInterpreter, + EVMInterpreter: config.EVMInterpreter, + } cacheConfig = &core.CacheConfig{Disabled: config.NoPruning, TrieNodeLimit: config.TrieCache, TrieTimeLimit: config.TrieTimeout} ) - eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, eth.chainConfig, eth.engine, vmConfig) + eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, eth.chainConfig, eth.engine, vmConfig, eth.shouldPreserve) if err != nil { return nil, err } @@ -173,7 +177,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) { return nil, err } - eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.engine, config.MinerRecommit, config.MinerGasFloor, config.MinerGasCeil) + eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.engine, config.MinerRecommit, config.MinerGasFloor, config.MinerGasCeil, eth.isLocalBlock) eth.miner.SetExtra(makeExtraData(config.MinerExtraData)) eth.APIBackend = &EthAPIBackend{eth, nil} @@ -330,6 +334,60 @@ func (s *Ethereum) Etherbase() (eb common.Address, err error) { return common.Address{}, fmt.Errorf("etherbase must be explicitly specified") } +// isLocalBlock checks whether the specified block is mined +// by local miner accounts. +// +// We regard two types of accounts as local miner account: etherbase +// and accounts specified via `txpool.locals` flag. +func (s *Ethereum) isLocalBlock(block *types.Block) bool { + author, err := s.engine.Author(block.Header()) + if err != nil { + log.Warn("Failed to retrieve block author", "number", block.NumberU64(), "hash", block.Hash(), "err", err) + return false + } + // Check whether the given address is etherbase. + s.lock.RLock() + etherbase := s.etherbase + s.lock.RUnlock() + if author == etherbase { + return true + } + // Check whether the given address is specified by `txpool.local` + // CLI flag. + for _, account := range s.config.TxPool.Locals { + if account == author { + return true + } + } + return false +} + +// shouldPreserve checks whether we should preserve the given block +// during the chain reorg depending on whether the author of block +// is a local account. +func (s *Ethereum) shouldPreserve(block *types.Block) bool { + // The reason we need to disable the self-reorg preserving for clique + // is it can be probable to introduce a deadlock. + // + // e.g. If there are 7 available signers + // + // r1 A + // r2 B + // r3 C + // r4 D + // r5 A [X] F G + // r6 [X] + // + // In the round5, the inturn signer E is offline, so the worst case + // is A, F and G sign the block of round5 and reject the block of opponents + // and in the round6, the last available signer B is offline, the whole + // network is stuck. + if _, ok := s.engine.(*clique.Clique); ok { + return false + } + return s.isLocalBlock(block) +} + // SetEtherbase sets the mining reward address. func (s *Ethereum) SetEtherbase(etherbase common.Address) { s.lock.Lock() @@ -362,7 +420,7 @@ func (s *Ethereum) StartMining(threads int) error { s.lock.RUnlock() s.txPool.SetGasPrice(price) - // Configure the local mining addess + // Configure the local mining address eb, err := s.Etherbase() if err != nil { log.Error("Cannot start mining without etherbase", "err", err) diff --git a/vendor/github.com/ethereum/go-ethereum/eth/config.go b/vendor/github.com/ethereum/go-ethereum/eth/config.go index f1a402e370..efbaafb6a2 100644 --- a/vendor/github.com/ethereum/go-ethereum/eth/config.go +++ b/vendor/github.com/ethereum/go-ethereum/eth/config.go @@ -121,6 +121,11 @@ type Config struct { // Miscellaneous options DocRoot string `toml:"-"` + + // Type of the EWASM interpreter ("" for detault) + EWASMInterpreter string + // Type of the EVM interpreter ("" for default) + EVMInterpreter string } type configMarshaling struct { diff --git a/vendor/github.com/ethereum/go-ethereum/eth/downloader/api.go b/vendor/github.com/ethereum/go-ethereum/eth/downloader/api.go index 91c6322d41..57ff3d71af 100644 --- a/vendor/github.com/ethereum/go-ethereum/eth/downloader/api.go +++ b/vendor/github.com/ethereum/go-ethereum/eth/downloader/api.go @@ -40,8 +40,8 @@ type PublicDownloaderAPI struct { // installSyncSubscription channel. func NewPublicDownloaderAPI(d *Downloader, m *event.TypeMux) *PublicDownloaderAPI { api := &PublicDownloaderAPI{ - d: d, - mux: m, + d: d, + mux: m, installSyncSubscription: make(chan chan interface{}), uninstallSyncSubscription: make(chan *uninstallSyncSubscriptionRequest), } diff --git a/vendor/github.com/ethereum/go-ethereum/eth/downloader/queue.go b/vendor/github.com/ethereum/go-ethereum/eth/downloader/queue.go index 8529535ba7..a0e8a6d48c 100644 --- a/vendor/github.com/ethereum/go-ethereum/eth/downloader/queue.go +++ b/vendor/github.com/ethereum/go-ethereum/eth/downloader/queue.go @@ -26,10 +26,10 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/prque" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" - "gopkg.in/karalabe/cookiejar.v2/collections/prque" ) var ( @@ -105,11 +105,11 @@ func newQueue() *queue { headerPendPool: make(map[string]*fetchRequest), headerContCh: make(chan bool), blockTaskPool: make(map[common.Hash]*types.Header), - blockTaskQueue: prque.New(), + blockTaskQueue: prque.New(nil), blockPendPool: make(map[string]*fetchRequest), blockDonePool: make(map[common.Hash]struct{}), receiptTaskPool: make(map[common.Hash]*types.Header), - receiptTaskQueue: prque.New(), + receiptTaskQueue: prque.New(nil), receiptPendPool: make(map[string]*fetchRequest), receiptDonePool: make(map[common.Hash]struct{}), resultCache: make([]*fetchResult, blockCacheItems), @@ -277,7 +277,7 @@ func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { } // Schedule all the header retrieval tasks for the skeleton assembly q.headerTaskPool = make(map[uint64]*types.Header) - q.headerTaskQueue = prque.New() + q.headerTaskQueue = prque.New(nil) q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch) q.headerProced = 0 @@ -288,7 +288,7 @@ func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { index := from + uint64(i*MaxHeaderFetch) q.headerTaskPool[index] = header - q.headerTaskQueue.Push(index, -float32(index)) + q.headerTaskQueue.Push(index, -int64(index)) } } @@ -334,11 +334,11 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { } // Queue the header for content retrieval q.blockTaskPool[hash] = header - q.blockTaskQueue.Push(header, -float32(header.Number.Uint64())) + q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) if q.mode == FastSync { q.receiptTaskPool[hash] = header - q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64())) + q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64())) } inserts = append(inserts, header) q.headerHead = hash @@ -436,7 +436,7 @@ func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest { } // Merge all the skipped batches back for _, from := range skip { - q.headerTaskQueue.Push(from, -float32(from)) + q.headerTaskQueue.Push(from, -int64(from)) } // Assemble and return the block download request if send == 0 { @@ -542,7 +542,7 @@ func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common } // Merge all the skipped headers back for _, header := range skip { - taskQueue.Push(header, -float32(header.Number.Uint64())) + taskQueue.Push(header, -int64(header.Number.Uint64())) } if progress { // Wake WaitResults, resultCache was modified @@ -585,10 +585,10 @@ func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool m defer q.lock.Unlock() if request.From > 0 { - taskQueue.Push(request.From, -float32(request.From)) + taskQueue.Push(request.From, -int64(request.From)) } for _, header := range request.Headers { - taskQueue.Push(header, -float32(header.Number.Uint64())) + taskQueue.Push(header, -int64(header.Number.Uint64())) } delete(pendPool, request.Peer.id) } @@ -602,13 +602,13 @@ func (q *queue) Revoke(peerID string) { if request, ok := q.blockPendPool[peerID]; ok { for _, header := range request.Headers { - q.blockTaskQueue.Push(header, -float32(header.Number.Uint64())) + q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) } delete(q.blockPendPool, peerID) } if request, ok := q.receiptPendPool[peerID]; ok { for _, header := range request.Headers { - q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64())) + q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64())) } delete(q.receiptPendPool, peerID) } @@ -657,10 +657,10 @@ func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, // Return any non satisfied requests to the pool if request.From > 0 { - taskQueue.Push(request.From, -float32(request.From)) + taskQueue.Push(request.From, -int64(request.From)) } for _, header := range request.Headers { - taskQueue.Push(header, -float32(header.Number.Uint64())) + taskQueue.Push(header, -int64(header.Number.Uint64())) } // Add the peer to the expiry report along the number of failed requests expiries[id] = len(request.Headers) @@ -731,7 +731,7 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh } miss[request.From] = struct{}{} - q.headerTaskQueue.Push(request.From, -float32(request.From)) + q.headerTaskQueue.Push(request.From, -int64(request.From)) return 0, errors.New("delivery not accepted") } // Clean up a successful fetch and try to deliver any sub-results @@ -854,7 +854,7 @@ func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQ // Return all failed or missing fetches to the queue for _, header := range request.Headers { if header != nil { - taskQueue.Push(header, -float32(header.Number.Uint64())) + taskQueue.Push(header, -int64(header.Number.Uint64())) } } // Wake up WaitResults diff --git a/vendor/github.com/ethereum/go-ethereum/eth/fetcher/fetcher.go b/vendor/github.com/ethereum/go-ethereum/eth/fetcher/fetcher.go index 277f14b81a..f0b5e8064b 100644 --- a/vendor/github.com/ethereum/go-ethereum/eth/fetcher/fetcher.go +++ b/vendor/github.com/ethereum/go-ethereum/eth/fetcher/fetcher.go @@ -23,10 +23,10 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/prque" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" - "gopkg.in/karalabe/cookiejar.v2/collections/prque" ) const ( @@ -160,7 +160,7 @@ func New(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBloc fetching: make(map[common.Hash]*announce), fetched: make(map[common.Hash][]*announce), completing: make(map[common.Hash]*announce), - queue: prque.New(), + queue: prque.New(nil), queues: make(map[string]int), queued: make(map[common.Hash]*inject), getBlock: getBlock, @@ -299,7 +299,7 @@ func (f *Fetcher) loop() { // If too high up the chain or phase, continue later number := op.block.NumberU64() if number > height+1 { - f.queue.Push(op, -float32(number)) + f.queue.Push(op, -int64(number)) if f.queueChangeHook != nil { f.queueChangeHook(hash, true) } @@ -624,7 +624,7 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) { } f.queues[peer] = count f.queued[hash] = op - f.queue.Push(op, -float32(block.NumberU64())) + f.queue.Push(op, -int64(block.NumberU64())) if f.queueChangeHook != nil { f.queueChangeHook(op.block.Hash(), true) } diff --git a/vendor/github.com/ethereum/go-ethereum/eth/tracers/tracer.go b/vendor/github.com/ethereum/go-ethereum/eth/tracers/tracer.go index feb57e0601..b519236f2d 100644 --- a/vendor/github.com/ethereum/go-ethereum/eth/tracers/tracer.go +++ b/vendor/github.com/ethereum/go-ethereum/eth/tracers/tracer.go @@ -124,7 +124,7 @@ func (mw *memoryWrapper) pushObject(vm *duktape.Context) { ctx.Pop2() ptr := ctx.PushFixedBuffer(len(blob)) - copy(makeSlice(ptr, uint(len(blob))), blob[:]) + copy(makeSlice(ptr, uint(len(blob))), blob) return 1 }) vm.PutPropString(obj, "slice") @@ -204,7 +204,7 @@ func (dw *dbWrapper) pushObject(vm *duktape.Context) { code := dw.db.GetCode(common.BytesToAddress(popSlice(ctx))) ptr := ctx.PushFixedBuffer(len(code)) - copy(makeSlice(ptr, uint(len(code))), code[:]) + copy(makeSlice(ptr, uint(len(code))), code) return 1 }) vm.PutPropString(obj, "getCode") @@ -268,7 +268,7 @@ func (cw *contractWrapper) pushObject(vm *duktape.Context) { blob := cw.contract.Input ptr := ctx.PushFixedBuffer(len(blob)) - copy(makeSlice(ptr, uint(len(blob))), blob[:]) + copy(makeSlice(ptr, uint(len(blob))), blob) return 1 }) vm.PutPropString(obj, "getInput") @@ -584,7 +584,7 @@ func (jst *Tracer) GetResult() (json.RawMessage, error) { case []byte: ptr := jst.vm.PushFixedBuffer(len(val)) - copy(makeSlice(ptr, uint(len(val))), val[:]) + copy(makeSlice(ptr, uint(len(val))), val) case common.Address: ptr := jst.vm.PushFixedBuffer(20) diff --git a/vendor/github.com/ethereum/go-ethereum/ethdb/database.go b/vendor/github.com/ethereum/go-ethereum/ethdb/database.go index 1b262b73cb..99abd09b99 100644 --- a/vendor/github.com/ethereum/go-ethereum/ethdb/database.go +++ b/vendor/github.com/ethereum/go-ethereum/ethdb/database.go @@ -155,15 +155,12 @@ func (db *LDBDatabase) LDB() *leveldb.DB { // Meter configures the database metrics collectors and func (db *LDBDatabase) Meter(prefix string) { - if metrics.Enabled { - // Initialize all the metrics collector at the requested prefix - db.compTimeMeter = metrics.NewRegisteredMeter(prefix+"compact/time", nil) - db.compReadMeter = metrics.NewRegisteredMeter(prefix+"compact/input", nil) - db.compWriteMeter = metrics.NewRegisteredMeter(prefix+"compact/output", nil) - db.diskReadMeter = metrics.NewRegisteredMeter(prefix+"disk/read", nil) - db.diskWriteMeter = metrics.NewRegisteredMeter(prefix+"disk/write", nil) - } - // Initialize write delay metrics no matter we are in metric mode or not. + // Initialize all the metrics collector at the requested prefix + db.compTimeMeter = metrics.NewRegisteredMeter(prefix+"compact/time", nil) + db.compReadMeter = metrics.NewRegisteredMeter(prefix+"compact/input", nil) + db.compWriteMeter = metrics.NewRegisteredMeter(prefix+"compact/output", nil) + db.diskReadMeter = metrics.NewRegisteredMeter(prefix+"disk/read", nil) + db.diskWriteMeter = metrics.NewRegisteredMeter(prefix+"disk/write", nil) db.writeDelayMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/duration", nil) db.writeDelayNMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/counter", nil) diff --git a/vendor/github.com/ethereum/go-ethereum/internal/build/util.go b/vendor/github.com/ethereum/go-ethereum/internal/build/util.go index f99ee8396d..195bdb404b 100644 --- a/vendor/github.com/ethereum/go-ethereum/internal/build/util.go +++ b/vendor/github.com/ethereum/go-ethereum/internal/build/util.go @@ -143,9 +143,9 @@ func CopyFile(dst, src string, mode os.FileMode) { // so that go commands executed by build use the same version of Go as the 'host' that runs // build code. e.g. // -// /usr/lib/go-1.8/bin/go run build/ci.go ... +// /usr/lib/go-1.11/bin/go run build/ci.go ... // -// runs using go 1.8 and invokes go 1.8 tools from the same GOROOT. This is also important +// runs using go 1.11 and invokes go 1.11 tools from the same GOROOT. This is also important // because runtime.Version checks on the host should match the tools that are run. func GoTool(tool string, args ...string) *exec.Cmd { args = append([]string{tool}, args...) diff --git a/vendor/github.com/ethereum/go-ethereum/les/commons.go b/vendor/github.com/ethereum/go-ethereum/les/commons.go index a97687993d..0b6cf3711c 100644 --- a/vendor/github.com/ethereum/go-ethereum/les/commons.go +++ b/vendor/github.com/ethereum/go-ethereum/les/commons.go @@ -42,12 +42,12 @@ type lesCommons struct { // NodeInfo represents a short summary of the Ethereum sub-protocol metadata // known about the host peer. type NodeInfo struct { - Network uint64 `json:"network"` // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4) - Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain - Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block - Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules - Head common.Hash `json:"head"` // SHA3 hash of the host's best owned block - CHT light.TrustedCheckpoint `json:"cht"` // Trused CHT checkpoint for fast catchup + Network uint64 `json:"network"` // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4) + Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain + Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block + Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules + Head common.Hash `json:"head"` // SHA3 hash of the host's best owned block + CHT params.TrustedCheckpoint `json:"cht"` // Trused CHT checkpoint for fast catchup } // makeProtocols creates protocol descriptors for the given LES versions. @@ -76,7 +76,7 @@ func (c *lesCommons) makeProtocols(versions []uint) []p2p.Protocol { // nodeInfo retrieves some protocol metadata about the running host node. func (c *lesCommons) nodeInfo() interface{} { - var cht light.TrustedCheckpoint + var cht params.TrustedCheckpoint sections, _, _ := c.chtIndexer.Sections() sections2, _, _ := c.bloomTrieIndexer.Sections() @@ -98,11 +98,11 @@ func (c *lesCommons) nodeInfo() interface{} { idxV2 := (sectionIndex+1)*c.iConfig.PairChtSize/c.iConfig.ChtSize - 1 chtRoot = light.GetChtRoot(c.chainDb, idxV2, sectionHead) } - cht = light.TrustedCheckpoint{ - SectionIdx: sectionIndex, - SectionHead: sectionHead, - CHTRoot: chtRoot, - BloomRoot: light.GetBloomTrieRoot(c.chainDb, sectionIndex, sectionHead), + cht = params.TrustedCheckpoint{ + SectionIndex: sectionIndex, + SectionHead: sectionHead, + CHTRoot: chtRoot, + BloomRoot: light.GetBloomTrieRoot(c.chainDb, sectionIndex, sectionHead), } } diff --git a/vendor/github.com/ethereum/go-ethereum/les/distributor.go b/vendor/github.com/ethereum/go-ethereum/les/distributor.go index d3f6b21d18..f90765b624 100644 --- a/vendor/github.com/ethereum/go-ethereum/les/distributor.go +++ b/vendor/github.com/ethereum/go-ethereum/les/distributor.go @@ -114,7 +114,9 @@ func (d *requestDistributor) loop() { d.lock.Lock() elem := d.reqQueue.Front() for elem != nil { - close(elem.Value.(*distReq).sentChn) + req := elem.Value.(*distReq) + close(req.sentChn) + req.sentChn = nil elem = elem.Next() } d.lock.Unlock() diff --git a/vendor/github.com/ethereum/go-ethereum/les/odr_requests.go b/vendor/github.com/ethereum/go-ethereum/les/odr_requests.go index 9e9b2673f9..77b1b6d0c8 100644 --- a/vendor/github.com/ethereum/go-ethereum/les/odr_requests.go +++ b/vendor/github.com/ethereum/go-ethereum/les/odr_requests.go @@ -478,7 +478,7 @@ func (r *ChtRequest) Validate(db ethdb.Database, msg *Msg) error { } type BloomReq struct { - BloomTrieNum, BitIdx, SectionIdx, FromLevel uint64 + BloomTrieNum, BitIdx, SectionIndex, FromLevel uint64 } // ODR request type for requesting headers by Canonical Hash Trie, see LesOdrRequest interface @@ -487,7 +487,7 @@ type BloomRequest light.BloomRequest // GetCost returns the cost of the given ODR request according to the serving // peer's cost table (implementation of LesOdrRequest) func (r *BloomRequest) GetCost(peer *peer) uint64 { - return peer.GetRequestCost(GetHelperTrieProofsMsg, len(r.SectionIdxList)) + return peer.GetRequestCost(GetHelperTrieProofsMsg, len(r.SectionIndexList)) } // CanSend tells if a certain peer is suitable for serving the given request @@ -503,13 +503,13 @@ func (r *BloomRequest) CanSend(peer *peer) bool { // Request sends an ODR request to the LES network (implementation of LesOdrRequest) func (r *BloomRequest) Request(reqID uint64, peer *peer) error { - peer.Log().Debug("Requesting BloomBits", "bloomTrie", r.BloomTrieNum, "bitIdx", r.BitIdx, "sections", r.SectionIdxList) - reqs := make([]HelperTrieReq, len(r.SectionIdxList)) + peer.Log().Debug("Requesting BloomBits", "bloomTrie", r.BloomTrieNum, "bitIdx", r.BitIdx, "sections", r.SectionIndexList) + reqs := make([]HelperTrieReq, len(r.SectionIndexList)) var encNumber [10]byte binary.BigEndian.PutUint16(encNumber[:2], uint16(r.BitIdx)) - for i, sectionIdx := range r.SectionIdxList { + for i, sectionIdx := range r.SectionIndexList { binary.BigEndian.PutUint64(encNumber[2:], sectionIdx) reqs[i] = HelperTrieReq{ Type: htBloomBits, @@ -524,7 +524,7 @@ func (r *BloomRequest) Request(reqID uint64, peer *peer) error { // returns true and stores results in memory if the message was a valid reply // to the request (implementation of LesOdrRequest) func (r *BloomRequest) Validate(db ethdb.Database, msg *Msg) error { - log.Debug("Validating BloomBits", "bloomTrie", r.BloomTrieNum, "bitIdx", r.BitIdx, "sections", r.SectionIdxList) + log.Debug("Validating BloomBits", "bloomTrie", r.BloomTrieNum, "bitIdx", r.BitIdx, "sections", r.SectionIndexList) // Ensure we have a correct message with a single proof element if msg.MsgType != MsgHelperTrieProofs { @@ -535,13 +535,13 @@ func (r *BloomRequest) Validate(db ethdb.Database, msg *Msg) error { nodeSet := proofs.NodeSet() reads := &readTraceDB{db: nodeSet} - r.BloomBits = make([][]byte, len(r.SectionIdxList)) + r.BloomBits = make([][]byte, len(r.SectionIndexList)) // Verify the proofs var encNumber [10]byte binary.BigEndian.PutUint16(encNumber[:2], uint16(r.BitIdx)) - for i, idx := range r.SectionIdxList { + for i, idx := range r.SectionIndexList { binary.BigEndian.PutUint64(encNumber[2:], idx) value, _, err := trie.VerifyProof(r.BloomTrieRoot, encNumber[:], reads) if err != nil { diff --git a/vendor/github.com/ethereum/go-ethereum/les/retrieve.go b/vendor/github.com/ethereum/go-ethereum/les/retrieve.go index 8ae36d82cd..d77cfea74e 100644 --- a/vendor/github.com/ethereum/go-ethereum/les/retrieve.go +++ b/vendor/github.com/ethereum/go-ethereum/les/retrieve.go @@ -217,6 +217,13 @@ func (r *sentReq) stateRequesting() reqStateFn { go r.tryRequest() r.lastReqQueued = true return r.stateRequesting + case rpDeliveredInvalid: + // if it was the last sent request (set to nil by update) then start a new one + if !r.lastReqQueued && r.lastReqSentTo == nil { + go r.tryRequest() + r.lastReqQueued = true + } + return r.stateRequesting case rpDeliveredValid: r.stop(nil) return r.stateStopped @@ -242,7 +249,11 @@ func (r *sentReq) stateNoMorePeers() reqStateFn { r.stop(nil) return r.stateStopped } - return r.stateNoMorePeers + if r.waiting() { + return r.stateNoMorePeers + } + r.stop(light.ErrNoPeers) + return nil case <-r.stopCh: return r.stateStopped } diff --git a/vendor/github.com/ethereum/go-ethereum/light/lightchain.go b/vendor/github.com/ethereum/go-ethereum/light/lightchain.go index d40a4ee6c6..8e2734c2d9 100644 --- a/vendor/github.com/ethereum/go-ethereum/light/lightchain.go +++ b/vendor/github.com/ethereum/go-ethereum/light/lightchain.go @@ -118,19 +118,19 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, engine consensus. } // addTrustedCheckpoint adds a trusted checkpoint to the blockchain -func (self *LightChain) addTrustedCheckpoint(cp TrustedCheckpoint) { +func (self *LightChain) addTrustedCheckpoint(cp *params.TrustedCheckpoint) { if self.odr.ChtIndexer() != nil { - StoreChtRoot(self.chainDb, cp.SectionIdx, cp.SectionHead, cp.CHTRoot) - self.odr.ChtIndexer().AddCheckpoint(cp.SectionIdx, cp.SectionHead) + StoreChtRoot(self.chainDb, cp.SectionIndex, cp.SectionHead, cp.CHTRoot) + self.odr.ChtIndexer().AddCheckpoint(cp.SectionIndex, cp.SectionHead) } if self.odr.BloomTrieIndexer() != nil { - StoreBloomTrieRoot(self.chainDb, cp.SectionIdx, cp.SectionHead, cp.BloomRoot) - self.odr.BloomTrieIndexer().AddCheckpoint(cp.SectionIdx, cp.SectionHead) + StoreBloomTrieRoot(self.chainDb, cp.SectionIndex, cp.SectionHead, cp.BloomRoot) + self.odr.BloomTrieIndexer().AddCheckpoint(cp.SectionIndex, cp.SectionHead) } if self.odr.BloomIndexer() != nil { - self.odr.BloomIndexer().AddCheckpoint(cp.SectionIdx, cp.SectionHead) + self.odr.BloomIndexer().AddCheckpoint(cp.SectionIndex, cp.SectionHead) } - log.Info("Added trusted checkpoint", "chain", cp.name, "block", (cp.SectionIdx+1)*self.indexerConfig.ChtSize-1, "hash", cp.SectionHead) + log.Info("Added trusted checkpoint", "chain", cp.Name, "block", (cp.SectionIndex+1)*self.indexerConfig.ChtSize-1, "hash", cp.SectionHead) } func (self *LightChain) getProcInterrupt() bool { @@ -157,7 +157,7 @@ func (self *LightChain) loadLastState() error { // Issue a status log and return header := self.hc.CurrentHeader() headerTd := self.GetTd(header.Hash(), header.Number.Uint64()) - log.Info("Loaded most recent local header", "number", header.Number, "hash", header.Hash(), "td", headerTd) + log.Info("Loaded most recent local header", "number", header.Number, "hash", header.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(header.Time.Int64(), 0))) return nil } @@ -488,7 +488,7 @@ func (self *LightChain) SyncCht(ctx context.Context) bool { // Ensure the chain didn't move past the latest block while retrieving it if self.hc.CurrentHeader().Number.Uint64() < header.Number.Uint64() { - log.Info("Updated latest header based on CHT", "number", header.Number, "hash", header.Hash()) + log.Info("Updated latest header based on CHT", "number", header.Number, "hash", header.Hash(), "age", common.PrettyAge(time.Unix(header.Time.Int64(), 0))) self.hc.SetCurrentHeader(header) } return true diff --git a/vendor/github.com/ethereum/go-ethereum/light/odr.go b/vendor/github.com/ethereum/go-ethereum/light/odr.go index 3cd8b2c040..900be05440 100644 --- a/vendor/github.com/ethereum/go-ethereum/light/odr.go +++ b/vendor/github.com/ethereum/go-ethereum/light/odr.go @@ -157,18 +157,18 @@ func (req *ChtRequest) StoreResult(db ethdb.Database) { // BloomRequest is the ODR request type for retrieving bloom filters from a CHT structure type BloomRequest struct { OdrRequest - Config *IndexerConfig - BloomTrieNum uint64 - BitIdx uint - SectionIdxList []uint64 - BloomTrieRoot common.Hash - BloomBits [][]byte - Proofs *NodeSet + Config *IndexerConfig + BloomTrieNum uint64 + BitIdx uint + SectionIndexList []uint64 + BloomTrieRoot common.Hash + BloomBits [][]byte + Proofs *NodeSet } // StoreResult stores the retrieved data in local database func (req *BloomRequest) StoreResult(db ethdb.Database) { - for i, sectionIdx := range req.SectionIdxList { + for i, sectionIdx := range req.SectionIndexList { sectionHead := rawdb.ReadCanonicalHash(db, (sectionIdx+1)*req.Config.BloomTrieSize-1) // if we don't have the canonical hash stored for this section head number, we'll still store it under // a key with a zero sectionHead. GetBloomBits will look there too if we still don't have the canonical diff --git a/vendor/github.com/ethereum/go-ethereum/light/odr_util.go b/vendor/github.com/ethereum/go-ethereum/light/odr_util.go index 9bc0f604b0..073f0d6429 100644 --- a/vendor/github.com/ethereum/go-ethereum/light/odr_util.go +++ b/vendor/github.com/ethereum/go-ethereum/light/odr_util.go @@ -222,7 +222,7 @@ func GetBloomBits(ctx context.Context, odr OdrBackend, bitIdx uint, sectionIdxLi } r := &BloomRequest{BloomTrieRoot: GetBloomTrieRoot(db, bloomTrieCount-1, sectionHead), BloomTrieNum: bloomTrieCount - 1, - BitIdx: bitIdx, SectionIdxList: reqList, Config: odr.IndexerConfig()} + BitIdx: bitIdx, SectionIndexList: reqList, Config: odr.IndexerConfig()} if err := odr.Retrieve(ctx, r); err != nil { return nil, err } else { diff --git a/vendor/github.com/ethereum/go-ethereum/light/postprocess.go b/vendor/github.com/ethereum/go-ethereum/light/postprocess.go index 7b23e48b5b..2f8cb73ab1 100644 --- a/vendor/github.com/ethereum/go-ethereum/light/postprocess.go +++ b/vendor/github.com/ethereum/go-ethereum/light/postprocess.go @@ -104,38 +104,11 @@ var ( } ) -// trustedCheckpoint represents a set of post-processed trie roots (CHT and BloomTrie) associated with -// the appropriate section index and head hash. It is used to start light syncing from this checkpoint -// and avoid downloading the entire header chain while still being able to securely access old headers/logs. -type TrustedCheckpoint struct { - name string - SectionIdx uint64 - SectionHead, CHTRoot, BloomRoot common.Hash -} - // trustedCheckpoints associates each known checkpoint with the genesis hash of the chain it belongs to -var trustedCheckpoints = map[common.Hash]TrustedCheckpoint{ - params.MainnetGenesisHash: { - name: "mainnet", - SectionIdx: 187, - SectionHead: common.HexToHash("e6baa034efa31562d71ff23676512dec6562c1ad0301e08843b907e81958c696"), - CHTRoot: common.HexToHash("28001955219719cf06de1b08648969139d123a9835fc760547a1e4dabdabc15a"), - BloomRoot: common.HexToHash("395ca2373fc662720ac6b58b3bbe71f68aa0f38b63b2d3553dd32ff3c51eebc4"), - }, - params.TestnetGenesisHash: { - name: "ropsten", - SectionIdx: 117, - SectionHead: common.HexToHash("9529b38631ae30783f56cbe4c3b9f07575b770ecba4f6e20a274b1e2f40fede1"), - CHTRoot: common.HexToHash("6f48e9f101f1fac98e7d74fbbcc4fda138358271ffd974d40d2506f0308bb363"), - BloomRoot: common.HexToHash("8242342e66e942c0cd893484e6736b9862ceb88b43ca344bb06a8285ac1b6d64"), - }, - params.RinkebyGenesisHash: { - name: "rinkeby", - SectionIdx: 85, - SectionHead: common.HexToHash("92cfa67afc4ad8ab0dcbc6fa49efd14b5b19402442e7317e6bc879d85f89d64d"), - CHTRoot: common.HexToHash("2802ec92cd7a54a75bca96afdc666ae7b99e5d96cf8192dcfb09588812f51564"), - BloomRoot: common.HexToHash("ebefeb31a9a42866d8cf2d2477704b4c3d7c20d0e4e9b5aaa77f396e016a1263"), - }, +var trustedCheckpoints = map[common.Hash]*params.TrustedCheckpoint{ + params.MainnetGenesisHash: params.MainnetTrustedCheckpoint, + params.TestnetGenesisHash: params.TestnetTrustedCheckpoint, + params.RinkebyGenesisHash: params.RinkebyTrustedCheckpoint, } var ( @@ -329,7 +302,7 @@ func (b *BloomTrieIndexerBackend) fetchMissingNodes(ctx context.Context, section for i := 0; i < 20; i++ { go func() { for bitIndex := range indexCh { - r := &BloomRequest{BloomTrieRoot: root, BloomTrieNum: section - 1, BitIdx: bitIndex, SectionIdxList: []uint64{section - 1}, Config: b.odr.IndexerConfig()} + r := &BloomRequest{BloomTrieRoot: root, BloomTrieNum: section - 1, BitIdx: bitIndex, SectionIndexList: []uint64{section - 1}, Config: b.odr.IndexerConfig()} for { if err := b.odr.Retrieve(ctx, r); err == ErrNoPeers { // if there are no peers to serve, retry later diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/ewma.go b/vendor/github.com/ethereum/go-ethereum/metrics/ewma.go index 3aecd4fa35..57c949e7d4 100644 --- a/vendor/github.com/ethereum/go-ethereum/metrics/ewma.go +++ b/vendor/github.com/ethereum/go-ethereum/metrics/ewma.go @@ -17,9 +17,6 @@ type EWMA interface { // NewEWMA constructs a new EWMA with the given alpha. func NewEWMA(alpha float64) EWMA { - if !Enabled { - return NilEWMA{} - } return &StandardEWMA{alpha: alpha} } diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/meter.go b/vendor/github.com/ethereum/go-ethereum/metrics/meter.go index 82b2141a62..58d170fae0 100644 --- a/vendor/github.com/ethereum/go-ethereum/metrics/meter.go +++ b/vendor/github.com/ethereum/go-ethereum/metrics/meter.go @@ -29,6 +29,17 @@ func GetOrRegisterMeter(name string, r Registry) Meter { return r.GetOrRegister(name, NewMeter).(Meter) } +// GetOrRegisterMeterForced returns an existing Meter or constructs and registers a +// new StandardMeter no matter the global switch is enabled or not. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. +func GetOrRegisterMeterForced(name string, r Registry) Meter { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewMeterForced).(Meter) +} + // NewMeter constructs a new StandardMeter and launches a goroutine. // Be sure to call Stop() once the meter is of no use to allow for garbage collection. func NewMeter() Meter { @@ -46,8 +57,23 @@ func NewMeter() Meter { return m } -// NewMeter constructs and registers a new StandardMeter and launches a -// goroutine. +// NewMeterForced constructs a new StandardMeter and launches a goroutine no matter +// the global switch is enabled or not. +// Be sure to call Stop() once the meter is of no use to allow for garbage collection. +func NewMeterForced() Meter { + m := newStandardMeter() + arbiter.Lock() + defer arbiter.Unlock() + arbiter.meters[m] = struct{}{} + if !arbiter.started { + arbiter.started = true + go arbiter.tick() + } + return m +} + +// NewRegisteredMeter constructs and registers a new StandardMeter +// and launches a goroutine. // Be sure to unregister the meter from the registry once it is of no use to // allow for garbage collection. func NewRegisteredMeter(name string, r Registry) Meter { @@ -59,6 +85,19 @@ func NewRegisteredMeter(name string, r Registry) Meter { return c } +// NewRegisteredMeterForced constructs and registers a new StandardMeter +// and launches a goroutine no matter the global switch is enabled or not. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. +func NewRegisteredMeterForced(name string, r Registry) Meter { + c := NewMeterForced() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + // MeterSnapshot is a read-only copy of another Meter. type MeterSnapshot struct { count int64 diff --git a/vendor/github.com/ethereum/go-ethereum/miner/miner.go b/vendor/github.com/ethereum/go-ethereum/miner/miner.go index 7f194db261..5218c12107 100644 --- a/vendor/github.com/ethereum/go-ethereum/miner/miner.go +++ b/vendor/github.com/ethereum/go-ethereum/miner/miner.go @@ -52,13 +52,13 @@ type Miner struct { shouldStart int32 // should start indicates whether we should start after sync } -func New(eth Backend, config *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, recommit time.Duration, gasFloor, gasCeil uint64) *Miner { +func New(eth Backend, config *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, recommit time.Duration, gasFloor, gasCeil uint64, isLocalBlock func(block *types.Block) bool) *Miner { miner := &Miner{ eth: eth, mux: mux, engine: engine, exitCh: make(chan struct{}), - worker: newWorker(config, engine, eth, mux, recommit, gasFloor, gasCeil), + worker: newWorker(config, engine, eth, mux, recommit, gasFloor, gasCeil, isLocalBlock), canStart: 1, } go miner.update() diff --git a/vendor/github.com/ethereum/go-ethereum/miner/worker.go b/vendor/github.com/ethereum/go-ethereum/miner/worker.go index 3500ca4c23..8579c5c84b 100644 --- a/vendor/github.com/ethereum/go-ethereum/miner/worker.go +++ b/vendor/github.com/ethereum/go-ethereum/miner/worker.go @@ -149,9 +149,10 @@ type worker struct { resubmitIntervalCh chan time.Duration resubmitAdjustCh chan *intervalAdjust - current *environment // An environment for current running cycle. - possibleUncles map[common.Hash]*types.Block // A set of side blocks as the possible uncle blocks. - unconfirmed *unconfirmedBlocks // A set of locally mined blocks pending canonicalness confirmations. + current *environment // An environment for current running cycle. + localUncles map[common.Hash]*types.Block // A set of side blocks generated locally as the possible uncle blocks. + remoteUncles map[common.Hash]*types.Block // A set of side blocks as the possible uncle blocks. + unconfirmed *unconfirmedBlocks // A set of locally mined blocks pending canonicalness confirmations. mu sync.RWMutex // The lock used to protect the coinbase and extra fields coinbase common.Address @@ -168,6 +169,9 @@ type worker struct { running int32 // The indicator whether the consensus engine is running or not. newTxs int32 // New arrival transaction count since last sealing work submitting. + // External functions + isLocalBlock func(block *types.Block) bool // Function used to determine whether the specified block is mined by local miner. + // Test hooks newTaskHook func(*task) // Method to call upon receiving a new sealing task. skipSealHook func(*task) bool // Method to decide whether skipping the sealing. @@ -175,7 +179,7 @@ type worker struct { resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval. } -func newWorker(config *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, recommit time.Duration, gasFloor, gasCeil uint64) *worker { +func newWorker(config *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, recommit time.Duration, gasFloor, gasCeil uint64, isLocalBlock func(*types.Block) bool) *worker { worker := &worker{ config: config, engine: engine, @@ -184,7 +188,9 @@ func newWorker(config *params.ChainConfig, engine consensus.Engine, eth Backend, chain: eth.BlockChain(), gasFloor: gasFloor, gasCeil: gasCeil, - possibleUncles: make(map[common.Hash]*types.Block), + isLocalBlock: isLocalBlock, + localUncles: make(map[common.Hash]*types.Block), + remoteUncles: make(map[common.Hash]*types.Block), unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), miningLogAtDepth), pendingTasks: make(map[common.Hash]*task), txsCh: make(chan core.NewTxsEvent, txChanSize), @@ -405,11 +411,19 @@ func (w *worker) mainLoop() { w.commitNewWork(req.interrupt, req.noempty, req.timestamp) case ev := <-w.chainSideCh: - if _, exist := w.possibleUncles[ev.Block.Hash()]; exist { + // Short circuit for duplicate side blocks + if _, exist := w.localUncles[ev.Block.Hash()]; exist { + continue + } + if _, exist := w.remoteUncles[ev.Block.Hash()]; exist { continue } - // Add side block to possible uncle block set. - w.possibleUncles[ev.Block.Hash()] = ev.Block + // Add side block to possible uncle block set depending on the author. + if w.isLocalBlock != nil && w.isLocalBlock(ev.Block) { + w.localUncles[ev.Block.Hash()] = ev.Block + } else { + w.remoteUncles[ev.Block.Hash()] = ev.Block + } // If our mining block contains less than 2 uncle blocks, // add the new uncle block if valid and regenerate a mining block. if w.isRunning() && w.current != nil && w.current.uncles.Cardinality() < 2 { @@ -421,7 +435,10 @@ func (w *worker) mainLoop() { if !ok { return false } - uncle, exist := w.possibleUncles[hash] + uncle, exist := w.localUncles[hash] + if !exist { + uncle, exist = w.remoteUncles[hash] + } if !exist { return false } @@ -651,7 +668,10 @@ func (w *worker) updateSnapshot() { if !ok { return false } - uncle, exist := w.possibleUncles[hash] + uncle, exist := w.localUncles[hash] + if !exist { + uncle, exist = w.remoteUncles[hash] + } if !exist { return false } @@ -859,23 +879,29 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64) misc.ApplyDAOHardFork(env.state) } // Accumulate the uncles for the current block - for hash, uncle := range w.possibleUncles { - if uncle.NumberU64()+staleThreshold <= header.Number.Uint64() { - delete(w.possibleUncles, hash) - } - } uncles := make([]*types.Header, 0, 2) - for hash, uncle := range w.possibleUncles { - if len(uncles) == 2 { - break + commitUncles := func(blocks map[common.Hash]*types.Block) { + // Clean up stale uncle blocks first + for hash, uncle := range blocks { + if uncle.NumberU64()+staleThreshold <= header.Number.Uint64() { + delete(blocks, hash) + } } - if err := w.commitUncle(env, uncle.Header()); err != nil { - log.Trace("Possible uncle rejected", "hash", hash, "reason", err) - } else { - log.Debug("Committing new uncle to block", "hash", hash) - uncles = append(uncles, uncle.Header()) + for hash, uncle := range blocks { + if len(uncles) == 2 { + break + } + if err := w.commitUncle(env, uncle.Header()); err != nil { + log.Trace("Possible uncle rejected", "hash", hash, "reason", err) + } else { + log.Debug("Committing new uncle to block", "hash", hash) + uncles = append(uncles, uncle.Header()) + } } } + // Prefer to locally generated uncle + commitUncles(w.localUncles) + commitUncles(w.remoteUncles) if !noempty { // Create an empty block based on temporary copied state for sealing in advance without waiting block diff --git a/vendor/github.com/ethereum/go-ethereum/mobile/ethclient.go b/vendor/github.com/ethereum/go-ethereum/mobile/ethclient.go index 66399c6b56..662125c4ad 100644 --- a/vendor/github.com/ethereum/go-ethereum/mobile/ethclient.go +++ b/vendor/github.com/ethereum/go-ethereum/mobile/ethclient.go @@ -138,7 +138,9 @@ func (ec *EthereumClient) SubscribeNewHead(ctx *Context, handler NewHeadHandler, handler.OnNewHead(&Header{header}) case err := <-rawSub.Err(): - handler.OnError(err.Error()) + if err != nil { + handler.OnError(err.Error()) + } return } } @@ -227,7 +229,9 @@ func (ec *EthereumClient) SubscribeFilterLogs(ctx *Context, query *FilterQuery, handler.OnFilterLogs(&Log{&log}) case err := <-rawSub.Err(): - handler.OnError(err.Error()) + if err != nil { + handler.OnError(err.Error()) + } return } } diff --git a/vendor/github.com/ethereum/go-ethereum/mobile/shhclient.go b/vendor/github.com/ethereum/go-ethereum/mobile/shhclient.go new file mode 100644 index 0000000000..a069c9bd40 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/mobile/shhclient.go @@ -0,0 +1,195 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Contains a wrapper for the Whisper client. + +package geth + +import ( + "github.com/ethereum/go-ethereum/whisper/shhclient" + whisper "github.com/ethereum/go-ethereum/whisper/whisperv6" +) + +// WhisperClient provides access to the Ethereum APIs. +type WhisperClient struct { + client *shhclient.Client +} + +// NewWhisperClient connects a client to the given URL. +func NewWhisperClient(rawurl string) (client *WhisperClient, _ error) { + rawClient, err := shhclient.Dial(rawurl) + return &WhisperClient{rawClient}, err +} + +// GetVersion returns the Whisper sub-protocol version. +func (wc *WhisperClient) GetVersion(ctx *Context) (version string, _ error) { + return wc.client.Version(ctx.context) +} + +// Info returns diagnostic information about the whisper node. +func (wc *WhisperClient) GetInfo(ctx *Context) (info *Info, _ error) { + rawInfo, err := wc.client.Info(ctx.context) + return &Info{&rawInfo}, err +} + +// SetMaxMessageSize sets the maximal message size allowed by this node. Incoming +// and outgoing messages with a larger size will be rejected. Whisper message size +// can never exceed the limit imposed by the underlying P2P protocol (10 Mb). +func (wc *WhisperClient) SetMaxMessageSize(ctx *Context, size int32) error { + return wc.client.SetMaxMessageSize(ctx.context, uint32(size)) +} + +// SetMinimumPoW (experimental) sets the minimal PoW required by this node. +// This experimental function was introduced for the future dynamic adjustment of +// PoW requirement. If the node is overwhelmed with messages, it should raise the +// PoW requirement and notify the peers. The new value should be set relative to +// the old value (e.g. double). The old value could be obtained via shh_info call. +func (wc *WhisperClient) SetMinimumPoW(ctx *Context, pow float64) error { + return wc.client.SetMinimumPoW(ctx.context, pow) +} + +// Marks specific peer trusted, which will allow it to send historic (expired) messages. +// Note This function is not adding new nodes, the node needs to exists as a peer. +func (wc *WhisperClient) MarkTrustedPeer(ctx *Context, enode string) error { + return wc.client.MarkTrustedPeer(ctx.context, enode) +} + +// NewKeyPair generates a new public and private key pair for message decryption and encryption. +// It returns an identifier that can be used to refer to the key. +func (wc *WhisperClient) NewKeyPair(ctx *Context) (string, error) { + return wc.client.NewKeyPair(ctx.context) +} + +// AddPrivateKey stored the key pair, and returns its ID. +func (wc *WhisperClient) AddPrivateKey(ctx *Context, key []byte) (string, error) { + return wc.client.AddPrivateKey(ctx.context, key) +} + +// DeleteKeyPair delete the specifies key. +func (wc *WhisperClient) DeleteKeyPair(ctx *Context, id string) (string, error) { + return wc.client.DeleteKeyPair(ctx.context, id) +} + +// HasKeyPair returns an indication if the node has a private key or +// key pair matching the given ID. +func (wc *WhisperClient) HasKeyPair(ctx *Context, id string) (bool, error) { + return wc.client.HasKeyPair(ctx.context, id) +} + +// GetPublicKey return the public key for a key ID. +func (wc *WhisperClient) GetPublicKey(ctx *Context, id string) ([]byte, error) { + return wc.client.PublicKey(ctx.context, id) +} + +// GetPrivateKey return the private key for a key ID. +func (wc *WhisperClient) GetPrivateKey(ctx *Context, id string) ([]byte, error) { + return wc.client.PrivateKey(ctx.context, id) +} + +// NewSymmetricKey generates a random symmetric key and returns its identifier. +// Can be used encrypting and decrypting messages where the key is known to both parties. +func (wc *WhisperClient) NewSymmetricKey(ctx *Context) (string, error) { + return wc.client.NewSymmetricKey(ctx.context) +} + +// AddSymmetricKey stores the key, and returns its identifier. +func (wc *WhisperClient) AddSymmetricKey(ctx *Context, key []byte) (string, error) { + return wc.client.AddSymmetricKey(ctx.context, key) +} + +// GenerateSymmetricKeyFromPassword generates the key from password, stores it, and returns its identifier. +func (wc *WhisperClient) GenerateSymmetricKeyFromPassword(ctx *Context, passwd string) (string, error) { + return wc.client.GenerateSymmetricKeyFromPassword(ctx.context, passwd) +} + +// HasSymmetricKey returns an indication if the key associated with the given id is stored in the node. +func (wc *WhisperClient) HasSymmetricKey(ctx *Context, id string) (bool, error) { + return wc.client.HasSymmetricKey(ctx.context, id) +} + +// GetSymmetricKey returns the symmetric key associated with the given identifier. +func (wc *WhisperClient) GetSymmetricKey(ctx *Context, id string) ([]byte, error) { + return wc.client.GetSymmetricKey(ctx.context, id) +} + +// DeleteSymmetricKey deletes the symmetric key associated with the given identifier. +func (wc *WhisperClient) DeleteSymmetricKey(ctx *Context, id string) error { + return wc.client.DeleteSymmetricKey(ctx.context, id) +} + +// Post a message onto the network. +func (wc *WhisperClient) Post(ctx *Context, message *NewMessage) (string, error) { + return wc.client.Post(ctx.context, *message.newMessage) +} + +// NewHeadHandler is a client-side subscription callback to invoke on events and +// subscription failure. +type NewMessageHandler interface { + OnNewMessage(message *Message) + OnError(failure string) +} + +// SubscribeMessages subscribes to messages that match the given criteria. This method +// is only supported on bi-directional connections such as websockets and IPC. +// NewMessageFilter uses polling and is supported over HTTP. +func (wc *WhisperClient) SubscribeMessages(ctx *Context, criteria *Criteria, handler NewMessageHandler, buffer int) (*Subscription, error) { + // Subscribe to the event internally + ch := make(chan *whisper.Message, buffer) + rawSub, err := wc.client.SubscribeMessages(ctx.context, *criteria.criteria, ch) + if err != nil { + return nil, err + } + // Start up a dispatcher to feed into the callback + go func() { + for { + select { + case message := <-ch: + handler.OnNewMessage(&Message{message}) + + case err := <-rawSub.Err(): + if err != nil { + handler.OnError(err.Error()) + } + return + } + } + }() + return &Subscription{rawSub}, nil +} + +// NewMessageFilter creates a filter within the node. This filter can be used to poll +// for new messages (see FilterMessages) that satisfy the given criteria. A filter can +// timeout when it was polled for in whisper.filterTimeout. +func (wc *WhisperClient) NewMessageFilter(ctx *Context, criteria *Criteria) (string, error) { + return wc.client.NewMessageFilter(ctx.context, *criteria.criteria) +} + +// DeleteMessageFilter removes the filter associated with the given id. +func (wc *WhisperClient) DeleteMessageFilter(ctx *Context, id string) error { + return wc.client.DeleteMessageFilter(ctx.context, id) +} + +// GetFilterMessages retrieves all messages that are received between the last call to +// this function and match the criteria that where given when the filter was created. +func (wc *WhisperClient) GetFilterMessages(ctx *Context, id string) (*Messages, error) { + rawFilterMessages, err := wc.client.FilterMessages(ctx.context, id) + if err != nil { + return nil, err + } + res := make([]*whisper.Message, len(rawFilterMessages)) + copy(res, rawFilterMessages) + return &Messages{res}, nil +} diff --git a/vendor/github.com/ethereum/go-ethereum/mobile/types.go b/vendor/github.com/ethereum/go-ethereum/mobile/types.go index b2780f3076..443d07ea93 100644 --- a/vendor/github.com/ethereum/go-ethereum/mobile/types.go +++ b/vendor/github.com/ethereum/go-ethereum/mobile/types.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rlp" + whisper "github.com/ethereum/go-ethereum/whisper/whisperv6" ) // A Nonce is a 64-bit hash which proves (combined with the mix-hash) that @@ -334,3 +335,95 @@ func (r *Receipt) GetLogs() *Logs { return &Logs{r.receipt.Logs} } func (r *Receipt) GetTxHash() *Hash { return &Hash{r.receipt.TxHash} } func (r *Receipt) GetContractAddress() *Address { return &Address{r.receipt.ContractAddress} } func (r *Receipt) GetGasUsed() int64 { return int64(r.receipt.GasUsed) } + +// Info represents a diagnostic information about the whisper node. +type Info struct { + info *whisper.Info +} + +// NewMessage represents a new whisper message that is posted through the RPC. +type NewMessage struct { + newMessage *whisper.NewMessage +} + +func NewNewMessage() *NewMessage { + nm := &NewMessage{ + newMessage: new(whisper.NewMessage), + } + return nm +} + +func (nm *NewMessage) GetSymKeyID() string { return nm.newMessage.SymKeyID } +func (nm *NewMessage) SetSymKeyID(symKeyID string) { nm.newMessage.SymKeyID = symKeyID } +func (nm *NewMessage) GetPublicKey() []byte { return nm.newMessage.PublicKey } +func (nm *NewMessage) SetPublicKey(publicKey []byte) { + nm.newMessage.PublicKey = common.CopyBytes(publicKey) +} +func (nm *NewMessage) GetSig() string { return nm.newMessage.Sig } +func (nm *NewMessage) SetSig(sig string) { nm.newMessage.Sig = sig } +func (nm *NewMessage) GetTTL() int64 { return int64(nm.newMessage.TTL) } +func (nm *NewMessage) SetTTL(ttl int64) { nm.newMessage.TTL = uint32(ttl) } +func (nm *NewMessage) GetPayload() []byte { return nm.newMessage.Payload } +func (nm *NewMessage) SetPayload(payload []byte) { nm.newMessage.Payload = common.CopyBytes(payload) } +func (nm *NewMessage) GetPowTime() int64 { return int64(nm.newMessage.PowTime) } +func (nm *NewMessage) SetPowTime(powTime int64) { nm.newMessage.PowTime = uint32(powTime) } +func (nm *NewMessage) GetPowTarget() float64 { return nm.newMessage.PowTarget } +func (nm *NewMessage) SetPowTarget(powTarget float64) { nm.newMessage.PowTarget = powTarget } +func (nm *NewMessage) GetTargetPeer() string { return nm.newMessage.TargetPeer } +func (nm *NewMessage) SetTargetPeer(targetPeer string) { nm.newMessage.TargetPeer = targetPeer } +func (nm *NewMessage) GetTopic() []byte { return nm.newMessage.Topic[:] } +func (nm *NewMessage) SetTopic(topic []byte) { nm.newMessage.Topic = whisper.BytesToTopic(topic) } + +// Message represents a whisper message. +type Message struct { + message *whisper.Message +} + +func (m *Message) GetSig() []byte { return m.message.Sig } +func (m *Message) GetTTL() int64 { return int64(m.message.TTL) } +func (m *Message) GetTimestamp() int64 { return int64(m.message.Timestamp) } +func (m *Message) GetPayload() []byte { return m.message.Payload } +func (m *Message) GetPoW() float64 { return m.message.PoW } +func (m *Message) GetHash() []byte { return m.message.Hash } +func (m *Message) GetDst() []byte { return m.message.Dst } + +// Messages represents an array of messages. +type Messages struct { + messages []*whisper.Message +} + +// Size returns the number of messages in the slice. +func (m *Messages) Size() int { + return len(m.messages) +} + +// Get returns the message at the given index from the slice. +func (m *Messages) Get(index int) (message *Message, _ error) { + if index < 0 || index >= len(m.messages) { + return nil, errors.New("index out of bounds") + } + return &Message{m.messages[index]}, nil +} + +// Criteria holds various filter options for inbound messages. +type Criteria struct { + criteria *whisper.Criteria +} + +func NewCriteria(topic []byte) *Criteria { + c := &Criteria{ + criteria: new(whisper.Criteria), + } + encodedTopic := whisper.BytesToTopic(topic) + c.criteria.Topics = []whisper.TopicType{encodedTopic} + return c +} + +func (c *Criteria) GetSymKeyID() string { return c.criteria.SymKeyID } +func (c *Criteria) SetSymKeyID(symKeyID string) { c.criteria.SymKeyID = symKeyID } +func (c *Criteria) GetPrivateKeyID() string { return c.criteria.PrivateKeyID } +func (c *Criteria) SetPrivateKeyID(privateKeyID string) { c.criteria.PrivateKeyID = privateKeyID } +func (c *Criteria) GetSig() []byte { return c.criteria.Sig } +func (c *Criteria) SetSig(sig []byte) { c.criteria.Sig = common.CopyBytes(sig) } +func (c *Criteria) GetMinPow() float64 { return c.criteria.MinPow } +func (c *Criteria) SetMinPow(pow float64) { c.criteria.MinPow = pow } diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/discover/table.go b/vendor/github.com/ethereum/go-ethereum/p2p/discover/table.go index 0a554bbeb4..a130b5494f 100644 --- a/vendor/github.com/ethereum/go-ethereum/p2p/discover/table.go +++ b/vendor/github.com/ethereum/go-ethereum/p2p/discover/table.go @@ -162,7 +162,7 @@ func (tab *Table) ReadRandomNodes(buf []*Node) (n int) { var buckets [][]*Node for _, b := range &tab.buckets { if len(b.entries) > 0 { - buckets = append(buckets, b.entries[:]) + buckets = append(buckets, b.entries) } } if len(buckets) == 0 { diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/discv5/net.go b/vendor/github.com/ethereum/go-ethereum/p2p/discv5/net.go index b93c93d648..a6cabf0803 100644 --- a/vendor/github.com/ethereum/go-ethereum/p2p/discv5/net.go +++ b/vendor/github.com/ethereum/go-ethereum/p2p/discv5/net.go @@ -1228,7 +1228,7 @@ func (net *Network) checkTopicRegister(data *topicRegister) (*pong, error) { if rlpHash(data.Topics) != pongpkt.data.(*pong).TopicHash { return nil, errors.New("topic hash mismatch") } - if int(data.Idx) < 0 || int(data.Idx) >= len(data.Topics) { + if data.Idx >= uint(len(data.Topics)) { return nil, errors.New("topic index out of range") } return pongpkt.data.(*pong), nil diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/discv5/table.go b/vendor/github.com/ethereum/go-ethereum/p2p/discv5/table.go index c793be5082..4f4b2426f4 100644 --- a/vendor/github.com/ethereum/go-ethereum/p2p/discv5/table.go +++ b/vendor/github.com/ethereum/go-ethereum/p2p/discv5/table.go @@ -123,7 +123,7 @@ func (tab *Table) readRandomNodes(buf []*Node) (n int) { var buckets [][]*Node for _, b := range &tab.buckets { if len(b.entries) > 0 { - buckets = append(buckets, b.entries[:]) + buckets = append(buckets, b.entries) } } if len(buckets) == 0 { diff --git a/vendor/github.com/ethereum/go-ethereum/params/config.go b/vendor/github.com/ethereum/go-ethereum/params/config.go index 70a1edead4..a9e631cde4 100644 --- a/vendor/github.com/ethereum/go-ethereum/params/config.go +++ b/vendor/github.com/ethereum/go-ethereum/params/config.go @@ -46,6 +46,15 @@ var ( Ethash: new(EthashConfig), } + // MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network. + MainnetTrustedCheckpoint = &TrustedCheckpoint{ + Name: "mainnet", + SectionIndex: 193, + SectionHead: common.HexToHash("0xc2d574295ecedc4d58530ae24c31a5a98be7d2b3327fba0dd0f4ed3913828a55"), + CHTRoot: common.HexToHash("0x5d1027dfae688c77376e842679ceada87fd94738feb9b32ef165473bfbbb317b"), + BloomRoot: common.HexToHash("0xd38be1a06aabd568e10957fee4fcc523bc64996bcf31bae3f55f86e0a583919f"), + } + // TestnetChainConfig contains the chain parameters to run a node on the Ropsten test network. TestnetChainConfig = &ChainConfig{ ChainID: big.NewInt(3), @@ -61,6 +70,15 @@ var ( Ethash: new(EthashConfig), } + // TestnetTrustedCheckpoint contains the light client trusted checkpoint for the Ropsten test network. + TestnetTrustedCheckpoint = &TrustedCheckpoint{ + Name: "testnet", + SectionIndex: 123, + SectionHead: common.HexToHash("0xa372a53decb68ce453da12bea1c8ee7b568b276aa2aab94d9060aa7c81fc3dee"), + CHTRoot: common.HexToHash("0x6b02e7fada79cd2a80d4b3623df9c44384d6647fc127462e1c188ccd09ece87b"), + BloomRoot: common.HexToHash("0xf2d27490914968279d6377d42868928632573e823b5d1d4a944cba6009e16259"), + } + // RinkebyChainConfig contains the chain parameters to run a node on the Rinkeby test network. RinkebyChainConfig = &ChainConfig{ ChainID: big.NewInt(4), @@ -79,24 +97,45 @@ var ( }, } + // RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network. + RinkebyTrustedCheckpoint = &TrustedCheckpoint{ + Name: "rinkeby", + SectionIndex: 91, + SectionHead: common.HexToHash("0x435b7b2d8a7922f3b9a522f2fb02730e95e0e1782f0f5443894d5415bba37154"), + CHTRoot: common.HexToHash("0x0664bf7ecccfb6775c4eca6f0f264fb5282a22754a2135a1ac4bff2ef02898dd"), + BloomRoot: common.HexToHash("0x2a64df2400c3a2cb6400639bb6ed29389abdb4d93e2e525aa7c21f38767cd96f"), + } + // AllEthashProtocolChanges contains every protocol change (EIPs) introduced // and accepted by the Ethereum core developers into the Ethash consensus. // // This configuration is intentionally not using keyed fields to force anyone // adding flags to the config to also have to set these fields. - AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil} + AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil} // AllCliqueProtocolChanges contains every protocol change (EIPs) introduced // and accepted by the Ethereum core developers into the Clique consensus. // // This configuration is intentionally not using keyed fields to force anyone // adding flags to the config to also have to set these fields. - AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}} + AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}} - TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil} + TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil} TestRules = TestChainConfig.Rules(new(big.Int)) ) +// TrustedCheckpoint represents a set of post-processed trie roots (CHT and +// BloomTrie) associated with the appropriate section index and head hash. It is +// used to start light syncing from this checkpoint and avoid downloading the +// entire header chain while still being able to securely access old headers/logs. +type TrustedCheckpoint struct { + Name string `json:"-"` + SectionIndex uint64 `json:"sectionIndex"` + SectionHead common.Hash `json:"sectionHead"` + CHTRoot common.Hash `json:"chtRoot"` + BloomRoot common.Hash `json:"bloomRoot"` +} + // ChainConfig is the core config which determines the blockchain settings. // // ChainConfig is stored in the database on a per block basis. This means @@ -119,6 +158,7 @@ type ChainConfig struct { ByzantiumBlock *big.Int `json:"byzantiumBlock,omitempty"` // Byzantium switch block (nil = no fork, 0 = already on byzantium) ConstantinopleBlock *big.Int `json:"constantinopleBlock,omitempty"` // Constantinople switch block (nil = no fork, 0 = already activated) + EWASMBlock *big.Int `json:"ewasmBlock,omitempty"` // EWASM switch block (nil = no fork, 0 = already activated) // Various consensus engines Ethash *EthashConfig `json:"ethash,omitempty"` @@ -204,6 +244,11 @@ func (c *ChainConfig) IsConstantinople(num *big.Int) bool { return isForked(c.ConstantinopleBlock, num) } +// IsEWASM returns whether num represents a block number after the EWASM fork +func (c *ChainConfig) IsEWASM(num *big.Int) bool { + return isForked(c.EWASMBlock, num) +} + // GasTable returns the gas table corresponding to the current phase (homestead or homestead reprice). // // The returned GasTable's fields shouldn't, under any circumstances, be changed. @@ -269,6 +314,9 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head *big.Int) *Confi if isForkIncompatible(c.ConstantinopleBlock, newcfg.ConstantinopleBlock, head) { return newCompatError("Constantinople fork block", c.ConstantinopleBlock, newcfg.ConstantinopleBlock) } + if isForkIncompatible(c.EWASMBlock, newcfg.EWASMBlock, head) { + return newCompatError("ewasm fork block", c.EWASMBlock, newcfg.EWASMBlock) + } return nil } @@ -327,7 +375,7 @@ func (err *ConfigCompatError) Error() string { return fmt.Sprintf("mismatching %s in database (have %d, want %d, rewindto %d)", err.What, err.StoredConfig, err.NewConfig, err.RewindTo) } -// Rules wraps ChainConfig and is merely syntatic sugar or can be used for functions +// Rules wraps ChainConfig and is merely syntactic sugar or can be used for functions // that do not have or require information about the block. // // Rules is a one time interface meaning that it shouldn't be used in between transition @@ -335,7 +383,7 @@ func (err *ConfigCompatError) Error() string { type Rules struct { ChainID *big.Int IsHomestead, IsEIP150, IsEIP155, IsEIP158 bool - IsByzantium bool + IsByzantium, IsConstantinople bool } // Rules ensures c's ChainID is not nil. @@ -344,5 +392,13 @@ func (c *ChainConfig) Rules(num *big.Int) Rules { if chainID == nil { chainID = new(big.Int) } - return Rules{ChainID: new(big.Int).Set(chainID), IsHomestead: c.IsHomestead(num), IsEIP150: c.IsEIP150(num), IsEIP155: c.IsEIP155(num), IsEIP158: c.IsEIP158(num), IsByzantium: c.IsByzantium(num)} + return Rules{ + ChainID: new(big.Int).Set(chainID), + IsHomestead: c.IsHomestead(num), + IsEIP150: c.IsEIP150(num), + IsEIP155: c.IsEIP155(num), + IsEIP158: c.IsEIP158(num), + IsByzantium: c.IsByzantium(num), + IsConstantinople: c.IsConstantinople(num), + } } diff --git a/vendor/github.com/ethereum/go-ethereum/params/protocol_params.go b/vendor/github.com/ethereum/go-ethereum/params/protocol_params.go index 4b53b3320a..c8b6609afb 100644 --- a/vendor/github.com/ethereum/go-ethereum/params/protocol_params.go +++ b/vendor/github.com/ethereum/go-ethereum/params/protocol_params.go @@ -32,15 +32,26 @@ const ( TxGasContractCreation uint64 = 53000 // Per transaction that creates a contract. NOTE: Not payable on data of calls between transactions. TxDataZeroGas uint64 = 4 // Per byte of data attached to a transaction that equals zero. NOTE: Not payable on data of calls between transactions. QuadCoeffDiv uint64 = 512 // Divisor for the quadratic particle of the memory cost equation. - SstoreSetGas uint64 = 20000 // Once per SLOAD operation. LogDataGas uint64 = 8 // Per byte in a LOG* operation's data. CallStipend uint64 = 2300 // Free gas given at beginning of call. - Sha3Gas uint64 = 30 // Once per SHA3 operation. - Sha3WordGas uint64 = 6 // Once per word of the SHA3 operation's data. - SstoreResetGas uint64 = 5000 // Once per SSTORE operation if the zeroness changes from zero. - SstoreClearGas uint64 = 5000 // Once per SSTORE operation if the zeroness doesn't change. - SstoreRefundGas uint64 = 15000 // Once per SSTORE operation if the zeroness changes to zero. + Sha3Gas uint64 = 30 // Once per SHA3 operation. + Sha3WordGas uint64 = 6 // Once per word of the SHA3 operation's data. + + SstoreSetGas uint64 = 20000 // Once per SLOAD operation. + SstoreResetGas uint64 = 5000 // Once per SSTORE operation if the zeroness changes from zero. + SstoreClearGas uint64 = 5000 // Once per SSTORE operation if the zeroness doesn't change. + SstoreRefundGas uint64 = 15000 // Once per SSTORE operation if the zeroness changes to zero. + + NetSstoreNoopGas uint64 = 200 // Once per SSTORE operation if the value doesn't change. + NetSstoreInitGas uint64 = 20000 // Once per SSTORE operation from clean zero. + NetSstoreCleanGas uint64 = 5000 // Once per SSTORE operation from clean non-zero. + NetSstoreDirtyGas uint64 = 200 // Once per SSTORE operation from dirty. + + NetSstoreClearRefund uint64 = 15000 // Once per SSTORE operation for clearing an originally existing storage slot + NetSstoreResetRefund uint64 = 4800 // Once per SSTORE operation for resetting to the original non-zero value + NetSstoreResetClearRefund uint64 = 19800 // Once per SSTORE operation for resetting to the original zero value + JumpdestGas uint64 = 1 // Refunded gas, once per SSTORE operation if the zeroness changes to zero. EpochDuration uint64 = 30000 // Duration between proof-of-work epochs. CallGas uint64 = 40 // Once per CALL operation & message call transaction. diff --git a/vendor/github.com/ethereum/go-ethereum/params/version.go b/vendor/github.com/ethereum/go-ethereum/params/version.go index 12f4111507..cf7da5d3e0 100644 --- a/vendor/github.com/ethereum/go-ethereum/params/version.go +++ b/vendor/github.com/ethereum/go-ethereum/params/version.go @@ -23,7 +23,7 @@ import ( const ( VersionMajor = 1 // Major version component of the current release VersionMinor = 8 // Minor version component of the current release - VersionPatch = 15 // Patch version component of the current release + VersionPatch = 16 // Patch version component of the current release VersionMeta = "stable" // Version metadata to append to the version string ) diff --git a/vendor/github.com/ethereum/go-ethereum/rlp/typecache.go b/vendor/github.com/ethereum/go-ethereum/rlp/typecache.go index 3df799e1ec..8c2dd518e2 100644 --- a/vendor/github.com/ethereum/go-ethereum/rlp/typecache.go +++ b/vendor/github.com/ethereum/go-ethereum/rlp/typecache.go @@ -76,7 +76,7 @@ func cachedTypeInfo1(typ reflect.Type, tags tags) (*typeinfo, error) { // another goroutine got the write lock first return info, nil } - // put a dummmy value into the cache before generating. + // put a dummy value into the cache before generating. // if the generator tries to lookup itself, it will get // the dummy value and won't call itself recursively. typeCache[key] = new(typeinfo) diff --git a/vendor/github.com/ethereum/go-ethereum/rpc/client.go b/vendor/github.com/ethereum/go-ethereum/rpc/client.go index a2ef2ed6b6..d96189a2d8 100644 --- a/vendor/github.com/ethereum/go-ethereum/rpc/client.go +++ b/vendor/github.com/ethereum/go-ethereum/rpc/client.go @@ -487,6 +487,7 @@ func (c *Client) write(ctx context.Context, msg interface{}) error { } c.writeConn.SetWriteDeadline(deadline) err := json.NewEncoder(c.writeConn).Encode(msg) + c.writeConn.SetWriteDeadline(time.Time{}) if err != nil { c.writeConn = nil } diff --git a/vendor/github.com/ethereum/go-ethereum/rpc/websocket.go b/vendor/github.com/ethereum/go-ethereum/rpc/websocket.go index e7a86ddaed..eae8320e56 100644 --- a/vendor/github.com/ethereum/go-ethereum/rpc/websocket.go +++ b/vendor/github.com/ethereum/go-ethereum/rpc/websocket.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "crypto/tls" + "encoding/base64" "encoding/json" "fmt" "net" @@ -118,12 +119,7 @@ func wsHandshakeValidator(allowedOrigins []string) func(*websocket.Config, *http return f } -// DialWebsocket creates a new RPC client that communicates with a JSON-RPC server -// that is listening on the given endpoint. -// -// The context is used for the initial connection establishment. It does not -// affect subsequent interactions with the client. -func DialWebsocket(ctx context.Context, endpoint, origin string) (*Client, error) { +func wsGetConfig(endpoint, origin string) (*websocket.Config, error) { if origin == "" { var err error if origin, err = os.Hostname(); err != nil { @@ -140,6 +136,25 @@ func DialWebsocket(ctx context.Context, endpoint, origin string) (*Client, error return nil, err } + if config.Location.User != nil { + b64auth := base64.StdEncoding.EncodeToString([]byte(config.Location.User.String())) + config.Header.Add("Authorization", "Basic "+b64auth) + config.Location.User = nil + } + return config, nil +} + +// DialWebsocket creates a new RPC client that communicates with a JSON-RPC server +// that is listening on the given endpoint. +// +// The context is used for the initial connection establishment. It does not +// affect subsequent interactions with the client. +func DialWebsocket(ctx context.Context, endpoint, origin string) (*Client, error) { + config, err := wsGetConfig(endpoint, origin) + if err != nil { + return nil, err + } + return newClient(ctx, func(ctx context.Context) (net.Conn, error) { return wsDialContext(ctx, config) }) diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/act.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/act.go index b1a5947831..52d9098271 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/api/act.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/act.go @@ -102,6 +102,7 @@ const AccessTypePass = AccessType("pass") const AccessTypePK = AccessType("pk") const AccessTypeACT = AccessType("act") +// NewAccessEntryPassword creates a manifest AccessEntry in order to create an ACT protected by a password func NewAccessEntryPassword(salt []byte, kdfParams *KdfParams) (*AccessEntry, error) { if len(salt) != 32 { return nil, fmt.Errorf("salt should be 32 bytes long") @@ -113,6 +114,7 @@ func NewAccessEntryPassword(salt []byte, kdfParams *KdfParams) (*AccessEntry, er }, nil } +// NewAccessEntryPK creates a manifest AccessEntry in order to create an ACT protected by a pair of Elliptic Curve keys func NewAccessEntryPK(publisher string, salt []byte) (*AccessEntry, error) { if len(publisher) != 66 { return nil, fmt.Errorf("publisher should be 66 characters long, got %d", len(publisher)) @@ -127,6 +129,7 @@ func NewAccessEntryPK(publisher string, salt []byte) (*AccessEntry, error) { }, nil } +// NewAccessEntryACT creates a manifest AccessEntry in order to create an ACT protected by a combination of EC keys and passwords func NewAccessEntryACT(publisher string, salt []byte, act string) (*AccessEntry, error) { if len(salt) != 32 { return nil, fmt.Errorf("salt should be 32 bytes long") @@ -140,15 +143,19 @@ func NewAccessEntryACT(publisher string, salt []byte, act string) (*AccessEntry, Publisher: publisher, Salt: salt, Act: act, + KdfParams: DefaultKdfParams, }, nil } +// NOOPDecrypt is a generic decrypt function that is passed into the API in places where real ACT decryption capabilities are +// either unwanted, or alternatively, cannot be implemented in the immediate scope func NOOPDecrypt(*ManifestEntry) error { return nil } var DefaultKdfParams = NewKdfParams(262144, 1, 8) +// NewKdfParams returns a KdfParams struct with the given scrypt params func NewKdfParams(n, p, r int) *KdfParams { return &KdfParams{ @@ -161,15 +168,20 @@ func NewKdfParams(n, p, r int) *KdfParams { // NewSessionKeyPassword creates a session key based on a shared secret (password) and the given salt // and kdf parameters in the access entry func NewSessionKeyPassword(password string, accessEntry *AccessEntry) ([]byte, error) { - if accessEntry.Type != AccessTypePass { + if accessEntry.Type != AccessTypePass && accessEntry.Type != AccessTypeACT { return nil, errors.New("incorrect access entry type") + } + return sessionKeyPassword(password, accessEntry.Salt, accessEntry.KdfParams) +} + +func sessionKeyPassword(password string, salt []byte, kdfParams *KdfParams) ([]byte, error) { return scrypt.Key( []byte(password), - accessEntry.Salt, - accessEntry.KdfParams.N, - accessEntry.KdfParams.R, - accessEntry.KdfParams.P, + salt, + kdfParams.N, + kdfParams.R, + kdfParams.P, 32, ) } @@ -188,9 +200,6 @@ func NewSessionKeyPK(private *ecdsa.PrivateKey, public *ecdsa.PublicKey, salt [] return sessionKey, nil } -func (a *API) NodeSessionKey(privateKey *ecdsa.PrivateKey, publicKey *ecdsa.PublicKey, salt []byte) ([]byte, error) { - return NewSessionKeyPK(privateKey, publicKey, salt) -} func (a *API) doDecrypt(ctx context.Context, credentials string, pk *ecdsa.PrivateKey) DecryptFunc { return func(m *ManifestEntry) error { if m.Access == nil { @@ -242,7 +251,7 @@ func (a *API) doDecrypt(ctx context.Context, credentials string, pk *ecdsa.Priva if err != nil { return ErrDecrypt } - key, err := a.NodeSessionKey(pk, publisher, m.Access.Salt) + key, err := NewSessionKeyPK(pk, publisher, m.Access.Salt) if err != nil { return ErrDecrypt } @@ -261,6 +270,11 @@ func (a *API) doDecrypt(ctx context.Context, credentials string, pk *ecdsa.Priva m.Access = nil return nil case "act": + var ( + sessionKey []byte + err error + ) + publisherBytes, err := hex.DecodeString(m.Access.Publisher) if err != nil { return ErrDecrypt @@ -270,40 +284,35 @@ func (a *API) doDecrypt(ctx context.Context, credentials string, pk *ecdsa.Priva return ErrDecrypt } - sessionKey, err := a.NodeSessionKey(pk, publisher, m.Access.Salt) + sessionKey, err = NewSessionKeyPK(pk, publisher, m.Access.Salt) if err != nil { return ErrDecrypt } - hasher := sha3.NewKeccak256() - hasher.Write(append(sessionKey, 0)) - lookupKey := hasher.Sum(nil) - - hasher.Reset() - - hasher.Write(append(sessionKey, 1)) - accessKeyDecryptionKey := hasher.Sum(nil) - - lk := hex.EncodeToString(lookupKey) - list, err := a.GetManifestList(ctx, NOOPDecrypt, storage.Address(common.Hex2Bytes(m.Access.Act)), lk) - - found := "" - for _, v := range list.Entries { - if v.Path == lk { - found = v.Hash - } - } - - if found == "" { - return ErrDecrypt - } - - v, err := hex.DecodeString(found) + found, ciphertext, decryptionKey, err := a.getACTDecryptionKey(ctx, storage.Address(common.Hex2Bytes(m.Access.Act)), sessionKey) if err != nil { return err } - enc := NewRefEncryption(len(v) - 8) - decodedRef, err := enc.Decrypt(v, accessKeyDecryptionKey) + if !found { + // try to fall back to password + if credentials != "" { + sessionKey, err = NewSessionKeyPassword(credentials, m.Access) + if err != nil { + return err + } + found, ciphertext, decryptionKey, err = a.getACTDecryptionKey(ctx, storage.Address(common.Hex2Bytes(m.Access.Act)), sessionKey) + if err != nil { + return err + } + if !found { + return ErrDecrypt + } + } else { + return ErrDecrypt + } + } + enc := NewRefEncryption(len(ciphertext) - 8) + decodedRef, err := enc.Decrypt(ciphertext, decryptionKey) if err != nil { return ErrDecrypt } @@ -326,6 +335,33 @@ func (a *API) doDecrypt(ctx context.Context, credentials string, pk *ecdsa.Priva } } +func (a *API) getACTDecryptionKey(ctx context.Context, actManifestAddress storage.Address, sessionKey []byte) (found bool, ciphertext, decryptionKey []byte, err error) { + hasher := sha3.NewKeccak256() + hasher.Write(append(sessionKey, 0)) + lookupKey := hasher.Sum(nil) + hasher.Reset() + + hasher.Write(append(sessionKey, 1)) + accessKeyDecryptionKey := hasher.Sum(nil) + hasher.Reset() + + lk := hex.EncodeToString(lookupKey) + list, err := a.GetManifestList(ctx, NOOPDecrypt, actManifestAddress, lk) + if err != nil { + return false, nil, nil, err + } + for _, v := range list.Entries { + if v.Path == lk { + cipherTextBytes, err := hex.DecodeString(v.Hash) + if err != nil { + return false, nil, nil, err + } + return true, cipherTextBytes, accessKeyDecryptionKey, nil + } + } + return false, nil, nil, nil +} + func GenerateAccessControlManifest(ctx *cli.Context, ref string, accessKey []byte, ae *AccessEntry) (*Manifest, error) { refBytes, err := hex.DecodeString(ref) if err != nil { @@ -352,7 +388,9 @@ func GenerateAccessControlManifest(ctx *cli.Context, ref string, accessKey []byt return m, nil } -func DoPKNew(ctx *cli.Context, privateKey *ecdsa.PrivateKey, granteePublicKey string, salt []byte) (sessionKey []byte, ae *AccessEntry, err error) { +// DoPK is a helper function to the CLI API that handles the entire business logic for +// creating a session key and access entry given the cli context, ec keys and salt +func DoPK(ctx *cli.Context, privateKey *ecdsa.PrivateKey, granteePublicKey string, salt []byte) (sessionKey []byte, ae *AccessEntry, err error) { if granteePublicKey == "" { return nil, nil, errors.New("need a grantee Public Key") } @@ -383,9 +421,11 @@ func DoPKNew(ctx *cli.Context, privateKey *ecdsa.PrivateKey, granteePublicKey st return sessionKey, ae, nil } -func DoACTNew(ctx *cli.Context, privateKey *ecdsa.PrivateKey, salt []byte, grantees []string) (accessKey []byte, ae *AccessEntry, actManifest *Manifest, err error) { - if len(grantees) == 0 { - return nil, nil, nil, errors.New("did not get any grantee public keys") +// DoACT is a helper function to the CLI API that handles the entire business logic for +// creating a access key, access entry and ACT manifest (including uploading it) given the cli context, ec keys, password grantees and salt +func DoACT(ctx *cli.Context, privateKey *ecdsa.PrivateKey, salt []byte, grantees []string, encryptPasswords []string) (accessKey []byte, ae *AccessEntry, actManifest *Manifest, err error) { + if len(grantees) == 0 && len(encryptPasswords) == 0 { + return nil, nil, nil, errors.New("did not get any grantee public keys or any encryption passwords") } publisherPub := hex.EncodeToString(crypto.CompressPubkey(&privateKey.PublicKey)) @@ -430,7 +470,31 @@ func DoACTNew(ctx *cli.Context, privateKey *ecdsa.PrivateKey, salt []byte, grant enc := NewRefEncryption(len(accessKey)) encryptedAccessKey, err := enc.Encrypt(accessKey, accessKeyEncryptionKey) + if err != nil { + return nil, nil, nil, err + } + lookupPathEncryptedAccessKeyMap[hex.EncodeToString(lookupKey)] = hex.EncodeToString(encryptedAccessKey) + } + for _, pass := range encryptPasswords { + sessionKey, err := sessionKeyPassword(pass, salt, DefaultKdfParams) + if err != nil { + return nil, nil, nil, err + } + hasher := sha3.NewKeccak256() + hasher.Write(append(sessionKey, 0)) + lookupKey := hasher.Sum(nil) + + hasher.Reset() + hasher.Write(append(sessionKey, 1)) + + accessKeyEncryptionKey := hasher.Sum(nil) + + enc := NewRefEncryption(len(accessKey)) + encryptedAccessKey, err := enc.Encrypt(accessKey, accessKeyEncryptionKey) + if err != nil { + return nil, nil, nil, err + } lookupPathEncryptedAccessKeyMap[hex.EncodeToString(lookupKey)] = hex.EncodeToString(encryptedAccessKey) } @@ -454,7 +518,10 @@ func DoACTNew(ctx *cli.Context, privateKey *ecdsa.PrivateKey, salt []byte, grant return accessKey, ae, m, nil } -func DoPasswordNew(ctx *cli.Context, password string, salt []byte) (sessionKey []byte, ae *AccessEntry, err error) { +// DoPassword is a helper function to the CLI API that handles the entire business logic for +// creating a session key and an access entry given the cli context, password and salt. +// By default - DefaultKdfParams are used as the scrypt params +func DoPassword(ctx *cli.Context, password string, salt []byte) (sessionKey []byte, ae *AccessEntry, err error) { ae, err = NewAccessEntryPassword(salt, DefaultKdfParams) if err != nil { return nil, nil, err diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/api.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/api.go index d733ad989c..d7b6d84190 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/api/api.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/api.go @@ -250,13 +250,6 @@ func NewAPI(fileStore *storage.FileStore, dns Resolver, resourceHandler *mru.Han return } -// Upload to be used only in TEST -func (a *API) Upload(ctx context.Context, uploadDir, index string, toEncrypt bool) (hash string, err error) { - fs := NewFileSystem(a) - hash, err = fs.Upload(uploadDir, index, toEncrypt) - return hash, err -} - // Retrieve FileStore reader API func (a *API) Retrieve(ctx context.Context, addr storage.Address) (reader storage.LazySectionReader, isEncrypted bool) { return a.fileStore.Retrieve(ctx, addr) diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/config.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/config.go index 3044dc2e52..baa13105a6 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/api/config.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/config.go @@ -62,6 +62,7 @@ type Config struct { NetworkID uint64 SwapEnabled bool SyncEnabled bool + SyncingSkipCheck bool DeliverySkipCheck bool LightNodeEnabled bool SyncUpdateDelay time.Duration @@ -89,7 +90,8 @@ func NewConfig() (c *Config) { NetworkID: network.DefaultNetworkID, SwapEnabled: false, SyncEnabled: true, - DeliverySkipCheck: false, + SyncingSkipCheck: false, + DeliverySkipCheck: true, SyncUpdateDelay: 15 * time.Second, SwapAPI: "", } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/encrypt.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/encrypt.go index 9a2e369149..ffe6c16d2d 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/api/encrypt.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/encrypt.go @@ -25,27 +25,27 @@ import ( ) type RefEncryption struct { - spanEncryption encryption.Encryption - dataEncryption encryption.Encryption - span []byte + refSize int + span []byte } func NewRefEncryption(refSize int) *RefEncryption { span := make([]byte, 8) binary.LittleEndian.PutUint64(span, uint64(refSize)) return &RefEncryption{ - spanEncryption: encryption.New(0, uint32(refSize/32), sha3.NewKeccak256), - dataEncryption: encryption.New(refSize, 0, sha3.NewKeccak256), - span: span, + refSize: refSize, + span: span, } } func (re *RefEncryption) Encrypt(ref []byte, key []byte) ([]byte, error) { - encryptedSpan, err := re.spanEncryption.Encrypt(re.span, key) + spanEncryption := encryption.New(key, 0, uint32(re.refSize/32), sha3.NewKeccak256) + encryptedSpan, err := spanEncryption.Encrypt(re.span) if err != nil { return nil, err } - encryptedData, err := re.dataEncryption.Encrypt(ref, key) + dataEncryption := encryption.New(key, re.refSize, 0, sha3.NewKeccak256) + encryptedData, err := dataEncryption.Encrypt(ref) if err != nil { return nil, err } @@ -57,7 +57,8 @@ func (re *RefEncryption) Encrypt(ref []byte, key []byte) ([]byte, error) { } func (re *RefEncryption) Decrypt(ref []byte, key []byte) ([]byte, error) { - decryptedSpan, err := re.spanEncryption.Decrypt(ref[:8], key) + spanEncryption := encryption.New(key, 0, uint32(re.refSize/32), sha3.NewKeccak256) + decryptedSpan, err := spanEncryption.Decrypt(ref[:8]) if err != nil { return nil, err } @@ -67,7 +68,8 @@ func (re *RefEncryption) Decrypt(ref []byte, key []byte) ([]byte, error) { return nil, errors.New("invalid span in encrypted reference") } - decryptedRef, err := re.dataEncryption.Decrypt(ref[8:], key) + dataEncryption := encryption.New(key, re.refSize, 0, sha3.NewKeccak256) + decryptedRef, err := dataEncryption.Decrypt(ref[8:]) if err != nil { return nil, err } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/http/server.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/http/server.go index 2aa1963969..af1269b934 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/api/http/server.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/http/server.go @@ -129,7 +129,7 @@ func NewServer(api *api.API, corsString string) *Server { }) mux.Handle("/bzz-immutable:/", methodHandler{ "GET": Adapt( - http.HandlerFunc(server.HandleGet), + http.HandlerFunc(server.HandleBzzGet), defaultMiddlewares..., ), }) diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/manifest.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/manifest.go index a1329a800f..d44ad2277c 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/api/manifest.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/manifest.go @@ -69,9 +69,12 @@ func (a *API) NewManifest(ctx context.Context, toEncrypt bool) (storage.Address, if err != nil { return nil, err } - key, wait, err := a.Store(ctx, bytes.NewReader(data), int64(len(data)), toEncrypt) - wait(ctx) - return key, err + addr, wait, err := a.Store(ctx, bytes.NewReader(data), int64(len(data)), toEncrypt) + if err != nil { + return nil, err + } + err = wait(ctx) + return addr, err } // Manifest hack for supporting Mutable Resource Updates from the bzz: scheme @@ -87,8 +90,12 @@ func (a *API) NewResourceManifest(ctx context.Context, resourceAddr string) (sto if err != nil { return nil, err } - key, _, err := a.Store(ctx, bytes.NewReader(data), int64(len(data)), false) - return key, err + addr, wait, err := a.Store(ctx, bytes.NewReader(data), int64(len(data)), false) + if err != nil { + return nil, err + } + err = wait(ctx) + return addr, err } // ManifestWriter is used to add and remove entries from an underlying manifest @@ -106,21 +113,26 @@ func (a *API) NewManifestWriter(ctx context.Context, addr storage.Address, quitC return &ManifestWriter{a, trie, quitC}, nil } -// AddEntry stores the given data and adds the resulting key to the manifest -func (m *ManifestWriter) AddEntry(ctx context.Context, data io.Reader, e *ManifestEntry) (key storage.Address, err error) { +// AddEntry stores the given data and adds the resulting address to the manifest +func (m *ManifestWriter) AddEntry(ctx context.Context, data io.Reader, e *ManifestEntry) (addr storage.Address, err error) { entry := newManifestTrieEntry(e, nil) if data != nil { - key, _, err = m.api.Store(ctx, data, e.Size, m.trie.encrypted) + var wait func(context.Context) error + addr, wait, err = m.api.Store(ctx, data, e.Size, m.trie.encrypted) + if err != nil { + return nil, err + } + err = wait(ctx) if err != nil { return nil, err } - entry.Hash = key.Hex() + entry.Hash = addr.Hex() } if entry.Hash == "" { - return key, errors.New("missing entry hash") + return addr, errors.New("missing entry hash") } m.trie.addEntry(entry, m.quitC) - return key, nil + return addr, nil } // RemoveEntry removes the given path from the manifest @@ -129,7 +141,7 @@ func (m *ManifestWriter) RemoveEntry(path string) error { return nil } -// Store stores the manifest, returning the resulting storage key +// Store stores the manifest, returning the resulting storage address func (m *ManifestWriter) Store() (storage.Address, error) { return m.trie.ref, m.trie.recalcAndStore() } @@ -211,51 +223,51 @@ type manifestTrieEntry struct { subtrie *manifestTrie } -func loadManifest(ctx context.Context, fileStore *storage.FileStore, hash storage.Address, quitC chan bool, decrypt DecryptFunc) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand - log.Trace("manifest lookup", "key", hash) +func loadManifest(ctx context.Context, fileStore *storage.FileStore, addr storage.Address, quitC chan bool, decrypt DecryptFunc) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand + log.Trace("manifest lookup", "addr", addr) // retrieve manifest via FileStore - manifestReader, isEncrypted := fileStore.Retrieve(ctx, hash) - log.Trace("reader retrieved", "key", hash) - return readManifest(manifestReader, hash, fileStore, isEncrypted, quitC, decrypt) + manifestReader, isEncrypted := fileStore.Retrieve(ctx, addr) + log.Trace("reader retrieved", "addr", addr) + return readManifest(manifestReader, addr, fileStore, isEncrypted, quitC, decrypt) } -func readManifest(mr storage.LazySectionReader, hash storage.Address, fileStore *storage.FileStore, isEncrypted bool, quitC chan bool, decrypt DecryptFunc) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand +func readManifest(mr storage.LazySectionReader, addr storage.Address, fileStore *storage.FileStore, isEncrypted bool, quitC chan bool, decrypt DecryptFunc) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand // TODO check size for oversized manifests size, err := mr.Size(mr.Context(), quitC) if err != nil { // size == 0 // can't determine size means we don't have the root chunk - log.Trace("manifest not found", "key", hash) + log.Trace("manifest not found", "addr", addr) err = fmt.Errorf("Manifest not Found") return } if size > manifestSizeLimit { - log.Warn("manifest exceeds size limit", "key", hash, "size", size, "limit", manifestSizeLimit) + log.Warn("manifest exceeds size limit", "addr", addr, "size", size, "limit", manifestSizeLimit) err = fmt.Errorf("Manifest size of %v bytes exceeds the %v byte limit", size, manifestSizeLimit) return } manifestData := make([]byte, size) read, err := mr.Read(manifestData) if int64(read) < size { - log.Trace("manifest not found", "key", hash) + log.Trace("manifest not found", "addr", addr) if err == nil { err = fmt.Errorf("Manifest retrieval cut short: read %v, expect %v", read, size) } return } - log.Debug("manifest retrieved", "key", hash) + log.Debug("manifest retrieved", "addr", addr) var man struct { Entries []*manifestTrieEntry `json:"entries"` } err = json.Unmarshal(manifestData, &man) if err != nil { - err = fmt.Errorf("Manifest %v is malformed: %v", hash.Log(), err) - log.Trace("malformed manifest", "key", hash) + err = fmt.Errorf("Manifest %v is malformed: %v", addr.Log(), err) + log.Trace("malformed manifest", "addr", addr) return } - log.Trace("manifest entries", "key", hash, "len", len(man.Entries)) + log.Trace("manifest entries", "addr", addr, "len", len(man.Entries)) trie = &manifestTrie{ fileStore: fileStore, @@ -406,12 +418,12 @@ func (mt *manifestTrie) recalcAndStore() error { sr := bytes.NewReader(manifest) ctx := context.TODO() - key, wait, err2 := mt.fileStore.Store(ctx, sr, int64(len(manifest)), mt.encrypted) + addr, wait, err2 := mt.fileStore.Store(ctx, sr, int64(len(manifest)), mt.encrypted) if err2 != nil { return err2 } err2 = wait(ctx) - mt.ref = key + mt.ref = addr return err2 } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/discovery.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/discovery.go index 55bf7c0332..3019594809 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/network/discovery.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/discovery.go @@ -26,30 +26,30 @@ import ( // discovery bzz extension for requesting and relaying node address records -// discPeer wraps BzzPeer and embeds an Overlay connectivity driver -type discPeer struct { +// Peer wraps BzzPeer and embeds Kademlia overlay connectivity driver +type Peer struct { *BzzPeer - overlay Overlay - sentPeers bool // whether we already sent peer closer to this address - mtx sync.RWMutex + kad *Kademlia + sentPeers bool // whether we already sent peer closer to this address + mtx sync.RWMutex // peers map[string]bool // tracks node records sent to the peer depth uint8 // the proximity order advertised by remote as depth of saturation } -// NewDiscovery constructs a discovery peer -func newDiscovery(p *BzzPeer, o Overlay) *discPeer { - d := &discPeer{ - overlay: o, +// NewPeer constructs a discovery peer +func NewPeer(p *BzzPeer, kad *Kademlia) *Peer { + d := &Peer{ + kad: kad, BzzPeer: p, peers: make(map[string]bool), } // record remote as seen so we never send a peer its own record - d.seen(d) + d.seen(p.BzzAddr) return d } // HandleMsg is the message handler that delegates incoming messages -func (d *discPeer) HandleMsg(ctx context.Context, msg interface{}) error { +func (d *Peer) HandleMsg(ctx context.Context, msg interface{}) error { switch msg := msg.(type) { case *peersMsg: @@ -64,24 +64,18 @@ func (d *discPeer) HandleMsg(ctx context.Context, msg interface{}) error { } // NotifyDepth sends a message to all connections if depth of saturation is changed -func NotifyDepth(depth uint8, h Overlay) { - f := func(val OverlayConn, po int, _ bool) bool { - dp, ok := val.(*discPeer) - if ok { - dp.NotifyDepth(depth) - } +func NotifyDepth(depth uint8, kad *Kademlia) { + f := func(val *Peer, po int, _ bool) bool { + val.NotifyDepth(depth) return true } - h.EachConn(nil, 255, f) + kad.EachConn(nil, 255, f) } // NotifyPeer informs all peers about a newly added node -func NotifyPeer(p OverlayAddr, k Overlay) { - f := func(val OverlayConn, po int, _ bool) bool { - dp, ok := val.(*discPeer) - if ok { - dp.NotifyPeer(p, uint8(po)) - } +func NotifyPeer(p *BzzAddr, k *Kademlia) { + f := func(val *Peer, po int, _ bool) bool { + val.NotifyPeer(p, uint8(po)) return true } k.EachConn(p.Address(), 255, f) @@ -91,22 +85,20 @@ func NotifyPeer(p OverlayAddr, k Overlay) { // the peer's PO is within the recipients advertised depth // OR the peer is closer to the recipient than self // unless already notified during the connection session -func (d *discPeer) NotifyPeer(a OverlayAddr, po uint8) { +func (d *Peer) NotifyPeer(a *BzzAddr, po uint8) { // immediately return if (po < d.getDepth() && pot.ProxCmp(d.localAddr, d, a) != 1) || d.seen(a) { return } - // log.Trace(fmt.Sprintf("%08x peer %08x notified of peer %08x", d.localAddr.Over()[:4], d.Address()[:4], a.Address()[:4])) resp := &peersMsg{ - Peers: []*BzzAddr{ToAddr(a)}, + Peers: []*BzzAddr{a}, } go d.Send(context.TODO(), resp) } // NotifyDepth sends a subPeers Msg to the receiver notifying them about // a change in the depth of saturation -func (d *discPeer) NotifyDepth(po uint8) { - // log.Trace(fmt.Sprintf("%08x peer %08x notified of new depth %v", d.localAddr.Over()[:4], d.Address()[:4], po)) +func (d *Peer) NotifyDepth(po uint8) { go d.Send(context.TODO(), &subPeersMsg{Depth: po}) } @@ -141,7 +133,7 @@ func (msg peersMsg) String() string { // handlePeersMsg called by the protocol when receiving peerset (for target address) // list of nodes ([]PeerAddr in peersMsg) is added to the overlay db using the // Register interface method -func (d *discPeer) handlePeersMsg(msg *peersMsg) error { +func (d *Peer) handlePeersMsg(msg *peersMsg) error { // register all addresses if len(msg.Peers) == 0 { return nil @@ -149,12 +141,12 @@ func (d *discPeer) handlePeersMsg(msg *peersMsg) error { for _, a := range msg.Peers { d.seen(a) - NotifyPeer(a, d.overlay) + NotifyPeer(a, d.kad) } - return d.overlay.Register(toOverlayAddrs(msg.Peers...)) + return d.kad.Register(msg.Peers...) } -// subPeers msg is communicating the depth/sharpness/focus of the overlay table of a peer +// subPeers msg is communicating the depth of the overlay table of a peer type subPeersMsg struct { Depth uint8 } @@ -164,21 +156,20 @@ func (msg subPeersMsg) String() string { return fmt.Sprintf("%T: request peers > PO%02d. ", msg, msg.Depth) } -func (d *discPeer) handleSubPeersMsg(msg *subPeersMsg) error { +func (d *Peer) handleSubPeersMsg(msg *subPeersMsg) error { if !d.sentPeers { d.setDepth(msg.Depth) var peers []*BzzAddr - d.overlay.EachConn(d.Over(), 255, func(p OverlayConn, po int, isproxbin bool) bool { + d.kad.EachConn(d.Over(), 255, func(p *Peer, po int, isproxbin bool) bool { if pob, _ := pof(d, d.localAddr, 0); pob > po { return false } - if !d.seen(p) { - peers = append(peers, ToAddr(p.Off())) + if !d.seen(p.BzzAddr) { + peers = append(peers, p.BzzAddr) } return true }) if len(peers) > 0 { - // log.Debug(fmt.Sprintf("%08x: %v peers sent to %v", d.overlay.BaseAddr(), len(peers), d)) go d.Send(context.TODO(), &peersMsg{Peers: peers}) } } @@ -186,9 +177,9 @@ func (d *discPeer) handleSubPeersMsg(msg *subPeersMsg) error { return nil } -// seen takes an Overlay peer and checks if it was sent to a peer already +// seen takes an peer address and checks if it was sent to a peer already // if not, marks the peer as sent -func (d *discPeer) seen(p OverlayPeer) bool { +func (d *Peer) seen(p *BzzAddr) bool { d.mtx.Lock() defer d.mtx.Unlock() k := string(p.Address()) @@ -199,12 +190,13 @@ func (d *discPeer) seen(p OverlayPeer) bool { return false } -func (d *discPeer) getDepth() uint8 { +func (d *Peer) getDepth() uint8 { d.mtx.RLock() defer d.mtx.RUnlock() return d.depth } -func (d *discPeer) setDepth(depth uint8) { + +func (d *Peer) setDepth(depth uint8) { d.mtx.Lock() defer d.mtx.Unlock() d.depth = depth diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/fetcher.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/fetcher.go new file mode 100644 index 0000000000..413b40cb5b --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/fetcher.go @@ -0,0 +1,305 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package network + +import ( + "context" + "sync" + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/swarm/storage" +) + +var searchTimeout = 1 * time.Second + +// Time to consider peer to be skipped. +// Also used in stream delivery. +var RequestTimeout = 10 * time.Second + +type RequestFunc func(context.Context, *Request) (*discover.NodeID, chan struct{}, error) + +// Fetcher is created when a chunk is not found locally. It starts a request handler loop once and +// keeps it alive until all active requests are completed. This can happen: +// 1. either because the chunk is delivered +// 2. or becuse the requestor cancelled/timed out +// Fetcher self destroys itself after it is completed. +// TODO: cancel all forward requests after termination +type Fetcher struct { + protoRequestFunc RequestFunc // request function fetcher calls to issue retrieve request for a chunk + addr storage.Address // the address of the chunk to be fetched + offerC chan *discover.NodeID // channel of sources (peer node id strings) + requestC chan struct{} + skipCheck bool +} + +type Request struct { + Addr storage.Address // chunk address + Source *discover.NodeID // nodeID of peer to request from (can be nil) + SkipCheck bool // whether to offer the chunk first or deliver directly + peersToSkip *sync.Map // peers not to request chunk from (only makes sense if source is nil) +} + +// NewRequest returns a new instance of Request based on chunk address skip check and +// a map of peers to skip. +func NewRequest(addr storage.Address, skipCheck bool, peersToSkip *sync.Map) *Request { + return &Request{ + Addr: addr, + SkipCheck: skipCheck, + peersToSkip: peersToSkip, + } +} + +// SkipPeer returns if the peer with nodeID should not be requested to deliver a chunk. +// Peers to skip are kept per Request and for a time period of RequestTimeout. +// This function is used in stream package in Delivery.RequestFromPeers to optimize +// requests for chunks. +func (r *Request) SkipPeer(nodeID string) bool { + val, ok := r.peersToSkip.Load(nodeID) + if !ok { + return false + } + t, ok := val.(time.Time) + if ok && time.Now().After(t.Add(RequestTimeout)) { + // deadine expired + r.peersToSkip.Delete(nodeID) + return false + } + return true +} + +// FetcherFactory is initialised with a request function and can create fetchers +type FetcherFactory struct { + request RequestFunc + skipCheck bool +} + +// NewFetcherFactory takes a request function and skip check parameter and creates a FetcherFactory +func NewFetcherFactory(request RequestFunc, skipCheck bool) *FetcherFactory { + return &FetcherFactory{ + request: request, + skipCheck: skipCheck, + } +} + +// New contructs a new Fetcher, for the given chunk. All peers in peersToSkip are not requested to +// deliver the given chunk. peersToSkip should always contain the peers which are actively requesting +// this chunk, to make sure we don't request back the chunks from them. +// The created Fetcher is started and returned. +func (f *FetcherFactory) New(ctx context.Context, source storage.Address, peersToSkip *sync.Map) storage.NetFetcher { + fetcher := NewFetcher(source, f.request, f.skipCheck) + go fetcher.run(ctx, peersToSkip) + return fetcher +} + +// NewFetcher creates a new Fetcher for the given chunk address using the given request function. +func NewFetcher(addr storage.Address, rf RequestFunc, skipCheck bool) *Fetcher { + return &Fetcher{ + addr: addr, + protoRequestFunc: rf, + offerC: make(chan *discover.NodeID), + requestC: make(chan struct{}), + skipCheck: skipCheck, + } +} + +// Offer is called when an upstream peer offers the chunk via syncing as part of `OfferedHashesMsg` and the node does not have the chunk locally. +func (f *Fetcher) Offer(ctx context.Context, source *discover.NodeID) { + // First we need to have this select to make sure that we return if context is done + select { + case <-ctx.Done(): + return + default: + } + + // This select alone would not guarantee that we return of context is done, it could potentially + // push to offerC instead if offerC is available (see number 2 in https://golang.org/ref/spec#Select_statements) + select { + case f.offerC <- source: + case <-ctx.Done(): + } +} + +// Request is called when an upstream peer request the chunk as part of `RetrieveRequestMsg`, or from a local request through FileStore, and the node does not have the chunk locally. +func (f *Fetcher) Request(ctx context.Context) { + // First we need to have this select to make sure that we return if context is done + select { + case <-ctx.Done(): + return + default: + } + + // This select alone would not guarantee that we return of context is done, it could potentially + // push to offerC instead if offerC is available (see number 2 in https://golang.org/ref/spec#Select_statements) + select { + case f.requestC <- struct{}{}: + case <-ctx.Done(): + } +} + +// start prepares the Fetcher +// it keeps the Fetcher alive within the lifecycle of the passed context +func (f *Fetcher) run(ctx context.Context, peers *sync.Map) { + var ( + doRequest bool // determines if retrieval is initiated in the current iteration + wait *time.Timer // timer for search timeout + waitC <-chan time.Time // timer channel + sources []*discover.NodeID // known sources, ie. peers that offered the chunk + requested bool // true if the chunk was actually requested + ) + gone := make(chan *discover.NodeID) // channel to signal that a peer we requested from disconnected + + // loop that keeps the fetching process alive + // after every request a timer is set. If this goes off we request again from another peer + // note that the previous request is still alive and has the chance to deliver, so + // rerequesting extends the search. ie., + // if a peer we requested from is gone we issue a new request, so the number of active + // requests never decreases + for { + select { + + // incoming offer + case source := <-f.offerC: + log.Trace("new source", "peer addr", source, "request addr", f.addr) + // 1) the chunk is offered by a syncing peer + // add to known sources + sources = append(sources, source) + // launch a request to the source iff the chunk was requested (not just expected because its offered by a syncing peer) + doRequest = requested + + // incoming request + case <-f.requestC: + log.Trace("new request", "request addr", f.addr) + // 2) chunk is requested, set requested flag + // launch a request iff none been launched yet + doRequest = !requested + requested = true + + // peer we requested from is gone. fall back to another + // and remove the peer from the peers map + case id := <-gone: + log.Trace("peer gone", "peer id", id.String(), "request addr", f.addr) + peers.Delete(id.String()) + doRequest = requested + + // search timeout: too much time passed since the last request, + // extend the search to a new peer if we can find one + case <-waitC: + log.Trace("search timed out: rerequesting", "request addr", f.addr) + doRequest = requested + + // all Fetcher context closed, can quit + case <-ctx.Done(): + log.Trace("terminate fetcher", "request addr", f.addr) + // TODO: send cancelations to all peers left over in peers map (i.e., those we requested from) + return + } + + // need to issue a new request + if doRequest { + var err error + sources, err = f.doRequest(ctx, gone, peers, sources) + if err != nil { + log.Info("unable to request", "request addr", f.addr, "err", err) + } + } + + // if wait channel is not set, set it to a timer + if requested { + if wait == nil { + wait = time.NewTimer(searchTimeout) + defer wait.Stop() + waitC = wait.C + } else { + // stop the timer and drain the channel if it was not drained earlier + if !wait.Stop() { + select { + case <-wait.C: + default: + } + } + // reset the timer to go off after searchTimeout + wait.Reset(searchTimeout) + } + } + doRequest = false + } +} + +// doRequest attempts at finding a peer to request the chunk from +// * first it tries to request explicitly from peers that are known to have offered the chunk +// * if there are no such peers (available) it tries to request it from a peer closest to the chunk address +// excluding those in the peersToSkip map +// * if no such peer is found an error is returned +// +// if a request is successful, +// * the peer's address is added to the set of peers to skip +// * the peer's address is removed from prospective sources, and +// * a go routine is started that reports on the gone channel if the peer is disconnected (or terminated their streamer) +func (f *Fetcher) doRequest(ctx context.Context, gone chan *discover.NodeID, peersToSkip *sync.Map, sources []*discover.NodeID) ([]*discover.NodeID, error) { + var i int + var sourceID *discover.NodeID + var quit chan struct{} + + req := &Request{ + Addr: f.addr, + SkipCheck: f.skipCheck, + peersToSkip: peersToSkip, + } + + foundSource := false + // iterate over known sources + for i = 0; i < len(sources); i++ { + req.Source = sources[i] + var err error + sourceID, quit, err = f.protoRequestFunc(ctx, req) + if err == nil { + // remove the peer from known sources + // Note: we can modify the source although we are looping on it, because we break from the loop immediately + sources = append(sources[:i], sources[i+1:]...) + foundSource = true + break + } + } + + // if there are no known sources, or none available, we try request from a closest node + if !foundSource { + req.Source = nil + var err error + sourceID, quit, err = f.protoRequestFunc(ctx, req) + if err != nil { + // if no peers found to request from + return sources, err + } + } + // add peer to the set of peers to skip from now + peersToSkip.Store(sourceID.String(), time.Now()) + + // if the quit channel is closed, it indicates that the source peer we requested from + // disconnected or terminated its streamer + // here start a go routine that watches this channel and reports the source peer on the gone channel + // this go routine quits if the fetcher global context is done to prevent process leak + go func() { + select { + case <-quit: + gone <- sourceID + case <-ctx.Done(): + } + }() + return sources, nil +} diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/hive.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/hive.go index 3660210883..425c1d5a1e 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/network/hive.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/hive.go @@ -32,31 +32,10 @@ import ( Hive is the logistic manager of the swarm When the hive is started, a forever loop is launched that -asks the Overlay Topology driver (e.g., generic kademlia nodetable) +asks the kademlia nodetable to suggest peers to bootstrap connectivity */ -// Overlay is the interface for kademlia (or other topology drivers) -type Overlay interface { - // suggest peers to connect to - SuggestPeer() (OverlayAddr, int, bool) - // register and deregister peer connections - On(OverlayConn) (depth uint8, changed bool) - Off(OverlayConn) - // register peer addresses - Register([]OverlayAddr) error - // iterate over connected peers - EachConn([]byte, int, func(OverlayConn, int, bool) bool) - // iterate over known peers (address records) - EachAddr([]byte, int, func(OverlayAddr, int, bool) bool) - // pretty print the connectivity - String() string - // base Overlay address of the node itself - BaseAddr() []byte - // connectivity health check used for testing - Healthy(*PeerPot) *Health -} - // HiveParams holds the config options to hive type HiveParams struct { Discovery bool // if want discovery of not @@ -78,7 +57,7 @@ func NewHiveParams() *HiveParams { // Hive manages network connections of the swarm node type Hive struct { *HiveParams // settings - Overlay // the overlay connectiviy driver + *Kademlia // the overlay connectiviy driver Store state.Store // storage interface to save peers across sessions addPeer func(*discover.Node) // server callback to connect to a peer // bookkeeping @@ -88,12 +67,12 @@ type Hive struct { // NewHive constructs a new hive // HiveParams: config parameters -// Overlay: connectivity driver using a network topology +// Kademlia: connectivity driver using a network topology // StateStore: to save peers across sessions -func NewHive(params *HiveParams, overlay Overlay, store state.Store) *Hive { +func NewHive(params *HiveParams, kad *Kademlia, store state.Store) *Hive { return &Hive{ HiveParams: params, - Overlay: overlay, + Kademlia: kad, Store: store, } } @@ -133,7 +112,7 @@ func (h *Hive) Stop() error { } } log.Info(fmt.Sprintf("%08x hive stopped, dropping peers", h.BaseAddr()[:4])) - h.EachConn(nil, 255, func(p OverlayConn, _ int, _ bool) bool { + h.EachConn(nil, 255, func(p *Peer, _ int, _ bool) bool { log.Info(fmt.Sprintf("%08x dropping peer %08x", h.BaseAddr()[:4], p.Address()[:4])) p.Drop(nil) return true @@ -151,14 +130,14 @@ func (h *Hive) connect() { addr, depth, changed := h.SuggestPeer() if h.Discovery && changed { - NotifyDepth(uint8(depth), h) + NotifyDepth(uint8(depth), h.Kademlia) } if addr == nil { continue } log.Trace(fmt.Sprintf("%08x hive connect() suggested %08x", h.BaseAddr()[:4], addr.Address()[:4])) - under, err := discover.ParseNode(string(addr.(Addr).Under())) + under, err := discover.ParseNode(string(addr.Under())) if err != nil { log.Warn(fmt.Sprintf("%08x unable to connect to bee %08x: invalid node URL: %v", h.BaseAddr()[:4], addr.Address()[:4], err)) continue @@ -170,19 +149,19 @@ func (h *Hive) connect() { // Run protocol run function func (h *Hive) Run(p *BzzPeer) error { - dp := newDiscovery(p, h) + dp := NewPeer(p, h.Kademlia) depth, changed := h.On(dp) // if we want discovery, advertise change of depth if h.Discovery { if changed { // if depth changed, send to all peers - NotifyDepth(depth, h) + NotifyDepth(depth, h.Kademlia) } else { // otherwise just send depth to new peer dp.NotifyDepth(depth) } } - NotifyPeer(p.Off(), h) + NotifyPeer(p.BzzAddr, h.Kademlia) defer h.Off(dp) return dp.Run(dp.HandleMsg) } @@ -206,17 +185,6 @@ func (h *Hive) PeerInfo(id discover.NodeID) interface{} { } } -// ToAddr returns the serialisable version of u -func ToAddr(pa OverlayPeer) *BzzAddr { - if addr, ok := pa.(*BzzAddr); ok { - return addr - } - if p, ok := pa.(*discPeer); ok { - return p.BzzAddr - } - return pa.(*BzzPeer).BzzAddr -} - // loadPeers, savePeer implement persistence callback/ func (h *Hive) loadPeers() error { var as []*BzzAddr @@ -230,28 +198,19 @@ func (h *Hive) loadPeers() error { } log.Info(fmt.Sprintf("hive %08x: peers loaded", h.BaseAddr()[:4])) - return h.Register(toOverlayAddrs(as...)) -} - -// toOverlayAddrs transforms an array of BzzAddr to OverlayAddr -func toOverlayAddrs(as ...*BzzAddr) (oas []OverlayAddr) { - for _, a := range as { - oas = append(oas, OverlayAddr(a)) - } - return + return h.Register(as...) } // savePeers, savePeer implement persistence callback/ func (h *Hive) savePeers() error { var peers []*BzzAddr - h.Overlay.EachAddr(nil, 256, func(pa OverlayAddr, i int, _ bool) bool { + h.Kademlia.EachAddr(nil, 256, func(pa *BzzAddr, i int, _ bool) bool { if pa == nil { log.Warn(fmt.Sprintf("empty addr: %v", i)) return true } - apa := ToAddr(pa) - log.Trace("saving peer", "peer", apa) - peers = append(peers, apa) + log.Trace("saving peer", "peer", pa) + peers = append(peers, pa) return true }) if err := h.Store.Put("peers", peers); err != nil { diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia.go index 0177d449c4..55a0c6f13d 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia.go @@ -62,7 +62,7 @@ type KadParams struct { RetryExponent int // exponent to multiply retry intervals with MaxRetries int // maximum number of redial attempts // function to sanction or prevent suggesting a peer - Reachable func(OverlayAddr) bool + Reachable func(*BzzAddr) bool } // NewKadParams returns a params struct with default values @@ -106,45 +106,22 @@ func NewKademlia(addr []byte, params *KadParams) *Kademlia { } } -// OverlayPeer interface captures the common aspect of view of a peer from the Overlay -// topology driver -type OverlayPeer interface { - Address() []byte -} - -// OverlayConn represents a connected peer -type OverlayConn interface { - OverlayPeer - Drop(error) // call to indicate a peer should be expunged - Off() OverlayAddr // call to return a persitent OverlayAddr -} - -// OverlayAddr represents a kademlia peer record -type OverlayAddr interface { - OverlayPeer - Update(OverlayAddr) OverlayAddr // returns the updated version of the original -} - -// entry represents a Kademlia table entry (an extension of OverlayPeer) +// entry represents a Kademlia table entry (an extension of BzzAddr) type entry struct { - OverlayPeer + *BzzAddr + conn *Peer seenAt time.Time retries int } -// newEntry creates a kademlia peer from an OverlayPeer interface -func newEntry(p OverlayPeer) *entry { +// newEntry creates a kademlia peer from a *Peer +func newEntry(p *BzzAddr) *entry { return &entry{ - OverlayPeer: p, - seenAt: time.Now(), + BzzAddr: p, + seenAt: time.Now(), } } -// Bin is the binary (bitvector) serialisation of the entry address -func (e *entry) Bin() string { - return pot.ToBin(e.addr().Address()) -} - // Label is a short tag for the entry for debug func Label(e *entry) string { return fmt.Sprintf("%s (%d)", e.Hex()[:4], e.retries) @@ -152,29 +129,12 @@ func Label(e *entry) string { // Hex is the hexadecimal serialisation of the entry address func (e *entry) Hex() string { - return fmt.Sprintf("%x", e.addr().Address()) + return fmt.Sprintf("%x", e.Address()) } -// String is the short tag for the entry -func (e *entry) String() string { - return fmt.Sprintf("%s (%d)", e.Hex()[:8], e.retries) -} - -// addr returns the kad peer record (OverlayAddr) corresponding to the entry -func (e *entry) addr() OverlayAddr { - a, _ := e.OverlayPeer.(OverlayAddr) - return a -} - -// conn returns the connected peer (OverlayPeer) corresponding to the entry -func (e *entry) conn() OverlayConn { - c, _ := e.OverlayPeer.(OverlayConn) - return c -} - -// Register enters each OverlayAddr as kademlia peer record into the +// Register enters each address as kademlia peer record into the // database of known peer addresses -func (k *Kademlia) Register(peers []OverlayAddr) error { +func (k *Kademlia) Register(peers ...*BzzAddr) error { k.lock.Lock() defer k.lock.Unlock() var known, size int @@ -203,7 +163,6 @@ func (k *Kademlia) Register(peers []OverlayAddr) error { if k.addrCountC != nil && size-known > 0 { k.addrCountC <- k.addrs.Size() } - // log.Trace(fmt.Sprintf("%x registered %v peers, %v known, total: %v", k.BaseAddr()[:4], size, known, k.addrs.Size())) k.sendNeighbourhoodDepthChange() return nil @@ -212,7 +171,7 @@ func (k *Kademlia) Register(peers []OverlayAddr) error { // SuggestPeer returns a known peer for the lowest proximity bin for the // lowest bincount below depth // naturally if there is an empty row it returns a peer for that -func (k *Kademlia) SuggestPeer() (a OverlayAddr, o int, want bool) { +func (k *Kademlia) SuggestPeer() (a *BzzAddr, o int, want bool) { k.lock.Lock() defer k.lock.Unlock() minsize := k.MinBinSize @@ -224,15 +183,18 @@ func (k *Kademlia) SuggestPeer() (a OverlayAddr, o int, want bool) { if po < depth { return false } - a = k.callable(val) + e := val.(*entry) + c := k.callable(e) + if c { + a = e.BzzAddr + } ppo = po - return a == nil + return !c }) if a != nil { log.Trace(fmt.Sprintf("%08x candidate nearest neighbour found: %v (%v)", k.BaseAddr()[:4], a, ppo)) return a, 0, false } - // log.Trace(fmt.Sprintf("%08x no candidate nearest neighbours to connect to (Depth: %v, minProxSize: %v) %#v", k.BaseAddr()[:4], depth, k.MinProxBinSize, a)) var bpo []int prev := -1 @@ -250,7 +212,6 @@ func (k *Kademlia) SuggestPeer() (a OverlayAddr, o int, want bool) { }) // all buckets are full, ie., minsize == k.MinBinSize if len(bpo) == 0 { - // log.Debug(fmt.Sprintf("%08x: all bins saturated", k.BaseAddr()[:4])) return nil, 0, false } // as long as we got candidate peers to connect to @@ -264,8 +225,12 @@ func (k *Kademlia) SuggestPeer() (a OverlayAddr, o int, want bool) { return false } return f(func(val pot.Val, _ int) bool { - a = k.callable(val) - return a == nil + e := val.(*entry) + c := k.callable(e) + if c { + a = e.BzzAddr + } + return !c }) }) // found a candidate @@ -282,25 +247,26 @@ func (k *Kademlia) SuggestPeer() (a OverlayAddr, o int, want bool) { } // On inserts the peer as a kademlia peer into the live peers -func (k *Kademlia) On(p OverlayConn) (uint8, bool) { +func (k *Kademlia) On(p *Peer) (uint8, bool) { k.lock.Lock() defer k.lock.Unlock() - e := newEntry(p) var ins bool k.conns, _, _, _ = pot.Swap(k.conns, p, pof, func(v pot.Val) pot.Val { // if not found live if v == nil { ins = true // insert new online peer into conns - return e + return p } // found among live peers, do nothing return v }) if ins { + a := newEntry(p.BzzAddr) + a.conn = p // insert new online peer into addrs k.addrs, _, _, _ = pot.Swap(k.addrs, p, pof, func(v pot.Val) pot.Val { - return e + return a }) // send new address count value only if the peer is inserted if k.addrCountC != nil { @@ -324,6 +290,8 @@ func (k *Kademlia) On(p OverlayConn) (uint8, bool) { // Not receiving from the returned channel will block On function // when the neighbourhood depth is changed. func (k *Kademlia) NeighbourhoodDepthC() <-chan int { + k.lock.Lock() + defer k.lock.Unlock() if k.nDepthC == nil { k.nDepthC = make(chan int) } @@ -357,7 +325,7 @@ func (k *Kademlia) AddrCountC() <-chan int { } // Off removes a peer from among live peers -func (k *Kademlia) Off(p OverlayConn) { +func (k *Kademlia) Off(p *Peer) { k.lock.Lock() defer k.lock.Unlock() var del bool @@ -367,7 +335,7 @@ func (k *Kademlia) Off(p OverlayConn) { panic(fmt.Sprintf("connected peer not found %v", p)) } del = true - return newEntry(p.Off()) + return newEntry(p.BzzAddr) }) if del { @@ -383,7 +351,7 @@ func (k *Kademlia) Off(p OverlayConn) { } } -func (k *Kademlia) EachBin(base []byte, pof pot.Pof, o int, eachBinFunc func(conn OverlayConn, po int) bool) { +func (k *Kademlia) EachBin(base []byte, pof pot.Pof, o int, eachBinFunc func(conn *Peer, po int) bool) { k.lock.RLock() defer k.lock.RUnlock() @@ -403,7 +371,7 @@ func (k *Kademlia) EachBin(base []byte, pof pot.Pof, o int, eachBinFunc func(con for bin := startPo; bin <= endPo; bin++ { f(func(val pot.Val, _ int) bool { - return eachBinFunc(val.(*entry).conn(), bin) + return eachBinFunc(val.(*Peer), bin) }) } return true @@ -413,13 +381,13 @@ func (k *Kademlia) EachBin(base []byte, pof pot.Pof, o int, eachBinFunc func(con // EachConn is an iterator with args (base, po, f) applies f to each live peer // that has proximity order po or less as measured from the base // if base is nil, kademlia base address is used -func (k *Kademlia) EachConn(base []byte, o int, f func(OverlayConn, int, bool) bool) { +func (k *Kademlia) EachConn(base []byte, o int, f func(*Peer, int, bool) bool) { k.lock.RLock() defer k.lock.RUnlock() k.eachConn(base, o, f) } -func (k *Kademlia) eachConn(base []byte, o int, f func(OverlayConn, int, bool) bool) { +func (k *Kademlia) eachConn(base []byte, o int, f func(*Peer, int, bool) bool) { if len(base) == 0 { base = k.base } @@ -428,20 +396,20 @@ func (k *Kademlia) eachConn(base []byte, o int, f func(OverlayConn, int, bool) b if po > o { return true } - return f(val.(*entry).conn(), po, po >= depth) + return f(val.(*Peer), po, po >= depth) }) } // EachAddr called with (base, po, f) is an iterator applying f to each known peer // that has proximity order po or less as measured from the base // if base is nil, kademlia base address is used -func (k *Kademlia) EachAddr(base []byte, o int, f func(OverlayAddr, int, bool) bool) { +func (k *Kademlia) EachAddr(base []byte, o int, f func(*BzzAddr, int, bool) bool) { k.lock.RLock() defer k.lock.RUnlock() k.eachAddr(base, o, f) } -func (k *Kademlia) eachAddr(base []byte, o int, f func(OverlayAddr, int, bool) bool) { +func (k *Kademlia) eachAddr(base []byte, o int, f func(*BzzAddr, int, bool) bool) { if len(base) == 0 { base = k.base } @@ -450,7 +418,7 @@ func (k *Kademlia) eachAddr(base []byte, o int, f func(OverlayAddr, int, bool) b if po > o { return true } - return f(val.(*entry).addr(), po, po >= depth) + return f(val.(*entry).BzzAddr, po, po >= depth) }) } @@ -472,12 +440,11 @@ func (k *Kademlia) neighbourhoodDepth() (depth int) { return depth } -// callable when called with val, -func (k *Kademlia) callable(val pot.Val) OverlayAddr { - e := val.(*entry) +// callable decides if an address entry represents a callable peer +func (k *Kademlia) callable(e *entry) bool { // not callable if peer is live or exceeded maxRetries - if e.conn() != nil || e.retries > k.MaxRetries { - return nil + if e.conn != nil || e.retries > k.MaxRetries { + return false } // calculate the allowed number of retries based on time lapsed since last seen timeAgo := int64(time.Since(e.seenAt)) @@ -491,17 +458,17 @@ func (k *Kademlia) callable(val pot.Val) OverlayAddr { // peer can be retried again if retries < e.retries { log.Trace(fmt.Sprintf("%08x: %v long time since last try (at %v) needed before retry %v, wait only warrants %v", k.BaseAddr()[:4], e, timeAgo, e.retries, retries)) - return nil + return false } // function to sanction or prevent suggesting a peer - if k.Reachable != nil && !k.Reachable(e.addr()) { + if k.Reachable != nil && !k.Reachable(e.BzzAddr) { log.Trace(fmt.Sprintf("%08x: peer %v is temporarily not callable", k.BaseAddr()[:4], e)) - return nil + return false } e.retries++ log.Trace(fmt.Sprintf("%08x: peer %v is callable", k.BaseAddr()[:4], e)) - return e.addr() + return true } // BaseAddr return the kademlia base address @@ -516,7 +483,8 @@ func (k *Kademlia) String() string { return k.string() } -// String returns kademlia table + kaddb table displayed with ascii +// string returns kademlia table + kaddb table displayed with ascii +// caller must hold the lock func (k *Kademlia) string() string { wsrow := " " var rows []string @@ -538,7 +506,7 @@ func (k *Kademlia) string() string { row := []string{fmt.Sprintf("%2d", size)} rest -= size f(func(val pot.Val, vpo int) bool { - e := val.(*entry) + e := val.(*Peer) row = append(row, fmt.Sprintf("%x", e.Address()[:2])) rowlen++ return rowlen < 4 @@ -594,8 +562,9 @@ type PeerPot struct { EmptyBins []int } -// NewPeerPotMap creates a map of pot record of OverlayAddr with keys +// NewPeerPotMap creates a map of pot record of *BzzAddr with keys // as hexadecimal representations of the address. +// used for testing only func NewPeerPotMap(kadMinProxSize int, addrs [][]byte) map[string]*PeerPot { // create a table of all nodes for health check np := pot.NewPot(nil, 0) @@ -640,6 +609,7 @@ func NewPeerPotMap(kadMinProxSize int, addrs [][]byte) map[string]*PeerPot { // saturation returns the lowest proximity order that the bin for that order // has less than n peers +// It is used in Healthy function for testing only func (k *Kademlia) saturation(n int) int { prev := -1 k.addrs.EachBin(k.base, pof, 0, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool { @@ -654,7 +624,7 @@ func (k *Kademlia) saturation(n int) int { } // full returns true if all required bins have connected peers. -// It is used in Healthy function. +// It is used in Healthy function for testing only func (k *Kademlia) full(emptyBins []int) (full bool) { prev := 0 e := len(emptyBins) @@ -688,10 +658,13 @@ func (k *Kademlia) full(emptyBins []int) (full bool) { return e == 0 } +// knowNearestNeighbours tests if all known nearest neighbours given as arguments +// are found in the addressbook +// It is used in Healthy function for testing only func (k *Kademlia) knowNearestNeighbours(peers [][]byte) bool { pm := make(map[string]bool) - k.eachAddr(nil, 255, func(p OverlayAddr, po int, nn bool) bool { + k.eachAddr(nil, 255, func(p *BzzAddr, po int, nn bool) bool { if !nn { return false } @@ -709,10 +682,13 @@ func (k *Kademlia) knowNearestNeighbours(peers [][]byte) bool { return true } +// gotNearestNeighbours tests if all known nearest neighbours given as arguments +// are connected peers +// It is used in Healthy function for testing only func (k *Kademlia) gotNearestNeighbours(peers [][]byte) (got bool, n int, missing [][]byte) { pm := make(map[string]bool) - k.eachConn(nil, 255, func(p OverlayConn, po int, nn bool) bool { + k.eachConn(nil, 255, func(p *Peer, po int, nn bool) bool { if !nn { return false } @@ -735,6 +711,7 @@ func (k *Kademlia) gotNearestNeighbours(peers [][]byte) (got bool, n int, missin } // Health state of the Kademlia +// used for testing only type Health struct { KnowNN bool // whether node knows all its nearest neighbours GotNN bool // whether node is connected to all its nearest neighbours @@ -746,6 +723,7 @@ type Health struct { // Healthy reports the health state of the kademlia connectivity // returns a Health struct +// used for testing only func (k *Kademlia) Healthy(pp *PeerPot) *Health { k.lock.RLock() defer k.lock.RUnlock() diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/priorityqueue/priorityqueue.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/priorityqueue/priorityqueue.go index fab638c9e2..5385026054 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/network/priorityqueue/priorityqueue.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/priorityqueue/priorityqueue.go @@ -28,10 +28,13 @@ package priorityqueue import ( "context" "errors" + + "github.com/ethereum/go-ethereum/log" ) var ( - errContention = errors.New("queue contention") + ErrContention = errors.New("contention") + errBadPriority = errors.New("bad priority") wakey = struct{}{} @@ -39,7 +42,7 @@ var ( // PriorityQueue is the basic structure type PriorityQueue struct { - queues []chan interface{} + Queues []chan interface{} wakeup chan struct{} } @@ -50,27 +53,29 @@ func New(n int, l int) *PriorityQueue { queues[i] = make(chan interface{}, l) } return &PriorityQueue{ - queues: queues, + Queues: queues, wakeup: make(chan struct{}, 1), } } // Run is a forever loop popping items from the queues func (pq *PriorityQueue) Run(ctx context.Context, f func(interface{})) { - top := len(pq.queues) - 1 + top := len(pq.Queues) - 1 p := top READ: for { - q := pq.queues[p] + q := pq.Queues[p] select { case <-ctx.Done(): return case x := <-q: + log.Trace("priority.queue f(x)", "p", p, "len(Queues[p])", len(pq.Queues[p])) f(x) p = top default: if p > 0 { p-- + log.Trace("priority.queue p > 0", "p", p) continue READ } p = top @@ -78,6 +83,7 @@ READ: case <-ctx.Done(): return case <-pq.wakeup: + log.Trace("priority.queue wakeup", "p", p) } } } @@ -85,23 +91,15 @@ READ: // Push pushes an item to the appropriate queue specified in the priority argument // if context is given it waits until either the item is pushed or the Context aborts -// otherwise returns errContention if the queue is full -func (pq *PriorityQueue) Push(ctx context.Context, x interface{}, p int) error { - if p < 0 || p >= len(pq.queues) { +func (pq *PriorityQueue) Push(x interface{}, p int) error { + if p < 0 || p >= len(pq.Queues) { return errBadPriority } - if ctx == nil { - select { - case pq.queues[p] <- x: - default: - return errContention - } - } else { - select { - case pq.queues[p] <- x: - case <-ctx.Done(): - return ctx.Err() - } + log.Trace("priority.queue push", "p", p, "len(Queues[p])", len(pq.Queues[p])) + select { + case pq.Queues[p] <- x: + default: + return ErrContention } select { case pq.wakeup <- wakey: diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol.go index 7f7ca5eed5..d509d157bb 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol.go @@ -44,7 +44,7 @@ const ( // BzzSpec is the spec of the generic swarm handshake var BzzSpec = &protocols.Spec{ Name: "bzz", - Version: 6, + Version: 7, MaxMsgSize: 10 * 1024 * 1024, Messages: []interface{}{ HandshakeMsg{}, @@ -54,7 +54,7 @@ var BzzSpec = &protocols.Spec{ // DiscoverySpec is the spec for the bzz discovery subprotocols var DiscoverySpec = &protocols.Spec{ Name: "hive", - Version: 5, + Version: 6, MaxMsgSize: 10 * 1024 * 1024, Messages: []interface{}{ peersMsg{}, @@ -62,32 +62,6 @@ var DiscoverySpec = &protocols.Spec{ }, } -// Addr interface that peerPool needs -type Addr interface { - OverlayPeer - Over() []byte - Under() []byte - String() string - Update(OverlayAddr) OverlayAddr -} - -// Peer interface represents an live peer connection -type Peer interface { - Addr // the address of a peer - Conn // the live connection (protocols.Peer) - LastActive() time.Time // last time active -} - -// Conn interface represents an live peer connection -type Conn interface { - ID() discover.NodeID // the key that uniquely identifies the Node for the peerPool - Handshake(context.Context, interface{}, func(interface{}) error) (interface{}, error) // can send messages - Send(context.Context, interface{}) error // can send messages - Drop(error) // disconnect this peer - Run(func(context.Context, interface{}) error) error // the run function to run a protocol - Off() OverlayAddr -} - // BzzConfig captures the config params used by the hive type BzzConfig struct { OverlayAddr []byte // base address of the overlay network @@ -114,7 +88,7 @@ type Bzz struct { // * bzz config // * overlay driver // * peer store -func NewBzz(config *BzzConfig, kad Overlay, store state.Store, streamerSpec *protocols.Spec, streamerRun func(*BzzPeer) error) *Bzz { +func NewBzz(config *BzzConfig, kad *Kademlia, store state.Store, streamerSpec *protocols.Spec, streamerRun func(*BzzPeer) error) *Bzz { return &Bzz{ Hive: NewHive(config.HiveParams, kad, store), NetworkID: config.NetworkID, @@ -131,7 +105,7 @@ func (b *Bzz) UpdateLocalAddr(byteaddr []byte) *BzzAddr { b.localAddr = b.localAddr.Update(&BzzAddr{ UAddr: byteaddr, OAddr: b.localAddr.OAddr, - }).(*BzzAddr) + }) return b.localAddr } @@ -274,7 +248,7 @@ type BzzPeer struct { LightNode bool } -func NewBzzTestPeer(p *protocols.Peer, addr *BzzAddr) *BzzPeer { +func NewBzzPeer(p *protocols.Peer, addr *BzzAddr) *BzzPeer { return &BzzPeer{ Peer: p, localAddr: addr, @@ -282,11 +256,6 @@ func NewBzzTestPeer(p *protocols.Peer, addr *BzzAddr) *BzzPeer { } } -// Off returns the overlay peer record for offline persistence -func (p *BzzPeer) Off() OverlayAddr { - return p.BzzAddr -} - // LastActive returns the time the peer was last active func (p *BzzPeer) LastActive() time.Time { return p.lastActive @@ -388,8 +357,8 @@ func (a *BzzAddr) ID() discover.NodeID { } // Update updates the underlay address of a peer record -func (a *BzzAddr) Update(na OverlayAddr) OverlayAddr { - return &BzzAddr{a.OAddr, na.(Addr).Under()} +func (a *BzzAddr) Update(na *BzzAddr) *BzzAddr { + return &BzzAddr{a.OAddr, na.UAddr} } // String pretty prints the address @@ -410,9 +379,9 @@ func RandomAddr() *BzzAddr { } // NewNodeIDFromAddr transforms the underlay address to an adapters.NodeID -func NewNodeIDFromAddr(addr Addr) discover.NodeID { - log.Info(fmt.Sprintf("uaddr=%s", string(addr.Under()))) - node := discover.MustParseNode(string(addr.Under())) +func NewNodeIDFromAddr(addr *BzzAddr) discover.NodeID { + log.Info(fmt.Sprintf("uaddr=%s", string(addr.UAddr))) + node := discover.MustParseNode(string(addr.UAddr)) return node.ID } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation.go index 74f9d98ee9..2c7a18b098 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation.go @@ -94,7 +94,7 @@ func New(services map[string]ServiceFunc) (s *Simulation) { } s.Net = simulations.NewNetwork( - adapters.NewSimAdapter(adapterServices), + adapters.NewTCPAdapter(adapterServices), &simulations.NetworkConfig{ID: "0"}, ) @@ -112,7 +112,7 @@ type Result struct { } // Run calls the RunFunc function while taking care of -// cancelation provided through the Context. +// cancellation provided through the Context. func (s *Simulation) Run(ctx context.Context, f RunFunc) (r Result) { //if the option is set to run a HTTP server with the simulation, //init the server and start it @@ -164,17 +164,6 @@ var maxParallelCleanups = 10 func (s *Simulation) Close() { close(s.done) - // Close all connections before calling the Network Shutdown. - // It is possible that p2p.Server.Stop will block if there are - // existing connections. - for _, c := range s.Net.Conns { - if c.Up { - s.Net.Disconnect(c.One, c.Other) - } - } - s.shutdownWG.Wait() - s.Net.Shutdown() - sem := make(chan struct{}, maxParallelCleanups) s.mu.RLock() cleanupFuncs := make([]func(), len(s.cleanupFuncs)) @@ -206,6 +195,9 @@ func (s *Simulation) Close() { } close(s.runC) } + + s.shutdownWG.Wait() + s.Net.Shutdown() } // Done returns a channel that is closed when the simulation diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery.go index 36040339d3..d0f27eebcf 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery.go @@ -19,12 +19,11 @@ package stream import ( "context" "errors" - "time" - "github.com/ethereum/go-ethereum/common" + "fmt" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p/discover" - cp "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/network" "github.com/ethereum/go-ethereum/swarm/spancontext" @@ -46,39 +45,34 @@ var ( ) type Delivery struct { - db *storage.DBAPI - overlay network.Overlay - receiveC chan *ChunkDeliveryMsg - getPeer func(discover.NodeID) *Peer + chunkStore storage.SyncChunkStore + kad *network.Kademlia + getPeer func(discover.NodeID) *Peer } -func NewDelivery(overlay network.Overlay, db *storage.DBAPI) *Delivery { - d := &Delivery{ - db: db, - overlay: overlay, - receiveC: make(chan *ChunkDeliveryMsg, deliveryCap), +func NewDelivery(kad *network.Kademlia, chunkStore storage.SyncChunkStore) *Delivery { + return &Delivery{ + chunkStore: chunkStore, + kad: kad, } - - go d.processReceivedChunks() - return d } // SwarmChunkServer implements Server type SwarmChunkServer struct { deliveryC chan []byte batchC chan []byte - db *storage.DBAPI + chunkStore storage.ChunkStore currentLen uint64 quit chan struct{} } // NewSwarmChunkServer is SwarmChunkServer constructor -func NewSwarmChunkServer(db *storage.DBAPI) *SwarmChunkServer { +func NewSwarmChunkServer(chunkStore storage.ChunkStore) *SwarmChunkServer { s := &SwarmChunkServer{ - deliveryC: make(chan []byte, deliveryCap), - batchC: make(chan []byte), - db: db, - quit: make(chan struct{}), + deliveryC: make(chan []byte, deliveryCap), + batchC: make(chan []byte), + chunkStore: chunkStore, + quit: make(chan struct{}), } go s.processDeliveries() return s @@ -123,13 +117,11 @@ func (s *SwarmChunkServer) Close() { // GetData retrives chunk data from db store func (s *SwarmChunkServer) GetData(ctx context.Context, key []byte) ([]byte, error) { - chunk, err := s.db.Get(ctx, storage.Address(key)) - if err == storage.ErrFetching { - <-chunk.ReqC - } else if err != nil { + chunk, err := s.chunkStore.Get(ctx, storage.Address(key)) + if err != nil { return nil, err } - return chunk.SData, nil + return chunk.Data(), nil } // RetrieveRequestMsg is the protocol msg for chunk retrieve requests @@ -153,57 +145,39 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req * return err } streamer := s.Server.(*SwarmChunkServer) - chunk, created := d.db.GetOrCreateRequest(ctx, req.Addr) - if chunk.ReqC != nil { - if created { - if err := d.RequestFromPeers(ctx, chunk.Addr[:], true, sp.ID()); err != nil { - log.Warn("unable to forward chunk request", "peer", sp.ID(), "key", chunk.Addr, "err", err) - chunk.SetErrored(storage.ErrChunkForward) - return nil - } + + var cancel func() + // TODO: do something with this hardcoded timeout, maybe use TTL in the future + ctx, cancel = context.WithTimeout(context.WithValue(ctx, "peer", sp.ID().String()), network.RequestTimeout) + + go func() { + select { + case <-ctx.Done(): + case <-streamer.quit: } - go func() { - var osp opentracing.Span - ctx, osp = spancontext.StartSpan( - ctx, - "waiting.delivery") - defer osp.Finish() - - t := time.NewTimer(10 * time.Minute) - defer t.Stop() - - log.Debug("waiting delivery", "peer", sp.ID(), "hash", req.Addr, "node", common.Bytes2Hex(d.overlay.BaseAddr()), "created", created) - start := time.Now() - select { - case <-chunk.ReqC: - log.Debug("retrieve request ReqC closed", "peer", sp.ID(), "hash", req.Addr, "time", time.Since(start)) - case <-t.C: - log.Debug("retrieve request timeout", "peer", sp.ID(), "hash", req.Addr) - chunk.SetErrored(storage.ErrChunkTimeout) - return - } - chunk.SetErrored(nil) - - if req.SkipCheck { - err := sp.Deliver(ctx, chunk, s.priority) - if err != nil { - log.Warn("ERROR in handleRetrieveRequestMsg, DROPPING peer!", "err", err) - sp.Drop(err) - } + cancel() + }() + + go func() { + chunk, err := d.chunkStore.Get(ctx, req.Addr) + if err != nil { + log.Warn("ChunkStore.Get can not retrieve chunk", "err", err) + return + } + if req.SkipCheck { + err = sp.Deliver(ctx, chunk, s.priority) + if err != nil { + log.Warn("ERROR in handleRetrieveRequestMsg", "err", err) } - streamer.deliveryC <- chunk.Addr[:] - }() - return nil - } - // TODO: call the retrieve function of the outgoing syncer - if req.SkipCheck { - log.Trace("deliver", "peer", sp.ID(), "hash", chunk.Addr) - if length := len(chunk.SData); length < 9 { - log.Error("Chunk.SData to deliver is too short", "len(chunk.SData)", length, "address", chunk.Addr) + return } - return sp.Deliver(ctx, chunk, s.priority) - } - streamer.deliveryC <- chunk.Addr[:] + select { + case streamer.deliveryC <- chunk.Address()[:]: + case <-streamer.quit: + } + + }() + return nil } @@ -213,6 +187,7 @@ type ChunkDeliveryMsg struct { peer *Peer // set in handleChunkDeliveryMsg } +// TODO: Fix context SNAFU func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req *ChunkDeliveryMsg) error { var osp opentracing.Span ctx, osp = spancontext.StartSpan( @@ -220,81 +195,63 @@ func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req *Ch "chunk.delivery") defer osp.Finish() - req.peer = sp - d.receiveC <- req - return nil -} + processReceivedChunksCount.Inc(1) -func (d *Delivery) processReceivedChunks() { -R: - for req := range d.receiveC { - processReceivedChunksCount.Inc(1) - - if len(req.SData) > cp.DefaultSize+8 { - log.Warn("received chunk is bigger than expected", "len", len(req.SData)) - continue R - } - - // this should be has locally - chunk, err := d.db.Get(context.TODO(), req.Addr) - if err == nil { - continue R - } - if err != storage.ErrFetching { - log.Error("processReceivedChunks db error", "addr", req.Addr, "err", err, "chunk", chunk) - continue R - } - select { - case <-chunk.ReqC: - log.Error("someone else delivered?", "hash", chunk.Addr.Hex()) - continue R - default: - } - - chunk.SData = req.SData - d.db.Put(context.TODO(), chunk) - - go func(req *ChunkDeliveryMsg) { - err := chunk.WaitToStore() + go func() { + req.peer = sp + err := d.chunkStore.Put(ctx, storage.NewChunk(req.Addr, req.SData)) + if err != nil { if err == storage.ErrChunkInvalid { + // we removed this log because it spams the logs + // TODO: Enable this log line + // log.Warn("invalid chunk delivered", "peer", sp.ID(), "chunk", req.Addr, ) req.peer.Drop(err) } - }(req) - } + } + }() + return nil } // RequestFromPeers sends a chunk retrieve request to -func (d *Delivery) RequestFromPeers(ctx context.Context, hash []byte, skipCheck bool, peersToSkip ...discover.NodeID) error { - var success bool - var err error +func (d *Delivery) RequestFromPeers(ctx context.Context, req *network.Request) (*discover.NodeID, chan struct{}, error) { requestFromPeersCount.Inc(1) + var sp *Peer + spID := req.Source - d.overlay.EachConn(hash, 255, func(p network.OverlayConn, po int, nn bool) bool { - spId := p.(network.Peer).ID() - for _, p := range peersToSkip { - if p == spId { - log.Trace("Delivery.RequestFromPeers: skip peer", "peer", spId) + if spID != nil { + sp = d.getPeer(*spID) + if sp == nil { + return nil, nil, fmt.Errorf("source peer %v not found", spID.String()) + } + } else { + d.kad.EachConn(req.Addr[:], 255, func(p *network.Peer, po int, nn bool) bool { + id := p.ID() + // TODO: skip light nodes that do not accept retrieve requests + if req.SkipPeer(id.String()) { + log.Trace("Delivery.RequestFromPeers: skip peer", "peer id", id) return true } - } - sp := d.getPeer(spId) + sp = d.getPeer(id) + if sp == nil { + log.Warn("Delivery.RequestFromPeers: peer not found", "id", id) + return true + } + spID = &id + return false + }) if sp == nil { - log.Warn("Delivery.RequestFromPeers: peer not found", "id", spId) - return true + return nil, nil, errors.New("no peer found") } - err = sp.SendPriority(ctx, &RetrieveRequestMsg{ - Addr: hash, - SkipCheck: skipCheck, - }, Top) - if err != nil { - return true - } - requestFromPeersEachCount.Inc(1) - success = true - return false - }) - if success { - return nil } - return errors.New("no peer found") + + err := sp.SendPriority(ctx, &RetrieveRequestMsg{ + Addr: req.Addr, + SkipCheck: req.SkipCheck, + }, Top) + if err != nil { + return nil, nil, err + } + requestFromPeersEachCount.Inc(1) + + return spID, sp.quit, nil } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/messages.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/messages.go index a19f635892..2e1a81e822 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/messages.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/messages.go @@ -18,9 +18,7 @@ package stream import ( "context" - "errors" "fmt" - "sync" "time" "github.com/ethereum/go-ethereum/metrics" @@ -31,6 +29,8 @@ import ( opentracing "github.com/opentracing/opentracing-go" ) +var syncBatchTimeout = 30 * time.Second + // Stream defines a unique stream identifier. type Stream struct { // Name is used for Client and Server functions identification. @@ -117,8 +117,7 @@ func (p *Peer) handleSubscribeMsg(ctx context.Context, req *SubscribeMsg) (err e go func() { if err := p.SendOfferedHashes(os, from, to); err != nil { - log.Warn("SendOfferedHashes dropping peer", "err", err) - p.Drop(err) + log.Warn("SendOfferedHashes error", "peer", p.ID().TerminalString(), "err", err) } }() @@ -135,8 +134,7 @@ func (p *Peer) handleSubscribeMsg(ctx context.Context, req *SubscribeMsg) (err e } go func() { if err := p.SendOfferedHashes(os, req.History.From, req.History.To); err != nil { - log.Warn("SendOfferedHashes dropping peer", "err", err) - p.Drop(err) + log.Warn("SendOfferedHashes error", "peer", p.ID().TerminalString(), "err", err) } }() } @@ -202,38 +200,52 @@ func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg if err != nil { return fmt.Errorf("error initiaising bitvector of length %v: %v", len(hashes)/HashSize, err) } - wg := sync.WaitGroup{} + + ctr := 0 + errC := make(chan error) + ctx, cancel := context.WithTimeout(ctx, syncBatchTimeout) + + ctx = context.WithValue(ctx, "source", p.ID().String()) for i := 0; i < len(hashes); i += HashSize { hash := hashes[i : i+HashSize] if wait := c.NeedData(ctx, hash); wait != nil { + ctr++ want.Set(i/HashSize, true) - wg.Add(1) // create request and wait until the chunk data arrives and is stored - go func(w func()) { - w() - wg.Done() + go func(w func(context.Context) error) { + select { + case errC <- w(ctx): + case <-ctx.Done(): + } }(wait) } } - // done := make(chan bool) - // go func() { - // wg.Wait() - // close(done) - // }() - // go func() { - // select { - // case <-done: - // s.next <- s.batchDone(p, req, hashes) - // case <-time.After(1 * time.Second): - // p.Drop(errors.New("timeout waiting for batch to be delivered")) - // } - // }() + go func() { - wg.Wait() + defer cancel() + for i := 0; i < ctr; i++ { + select { + case err := <-errC: + if err != nil { + log.Debug("client.handleOfferedHashesMsg() error waiting for chunk, dropping peer", "peer", p.ID(), "err", err) + p.Drop(err) + return + } + case <-ctx.Done(): + log.Debug("client.handleOfferedHashesMsg() context done", "ctx.Err()", ctx.Err()) + return + case <-c.quit: + log.Debug("client.handleOfferedHashesMsg() quit") + return + } + } select { case c.next <- c.batchDone(p, req, hashes): case <-c.quit: + log.Debug("client.handleOfferedHashesMsg() quit") + case <-ctx.Done(): + log.Debug("client.handleOfferedHashesMsg() context done", "ctx.Err()", ctx.Err()) } }() // only send wantedKeysMsg if all missing chunks of the previous batch arrived @@ -242,7 +254,7 @@ func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg c.sessionAt = req.From } from, to := c.nextBatch(req.To + 1) - log.Trace("received offered batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To) + log.Trace("set next batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To, "addr", p.streamer.addr.ID()) if from == to { return nil } @@ -254,25 +266,25 @@ func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg To: to, } go func() { + log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To) select { - case <-time.After(120 * time.Second): - log.Warn("handleOfferedHashesMsg timeout, so dropping peer") - p.Drop(errors.New("handle offered hashes timeout")) - return case err := <-c.next: if err != nil { - log.Warn("c.next dropping peer", "err", err) + log.Warn("c.next error dropping peer", "err", err) p.Drop(err) return } case <-c.quit: + log.Debug("client.handleOfferedHashesMsg() quit") + return + case <-ctx.Done(): + log.Debug("client.handleOfferedHashesMsg() context done", "ctx.Err()", ctx.Err()) return } log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To) err := p.SendPriority(ctx, msg, c.priority) if err != nil { - log.Warn("SendPriority err, so dropping peer", "err", err) - p.Drop(err) + log.Warn("SendPriority error", "err", err) } }() return nil @@ -306,8 +318,7 @@ func (p *Peer) handleWantedHashesMsg(ctx context.Context, req *WantedHashesMsg) // launch in go routine since GetBatch blocks until new hashes arrive go func() { if err := p.SendOfferedHashes(s, req.From, req.To); err != nil { - log.Warn("SendOfferedHashes dropping peer", "err", err) - p.Drop(err) + log.Warn("SendOfferedHashes error", "err", err) } }() // go p.SendOfferedHashes(s, req.From, req.To) @@ -327,11 +338,7 @@ func (p *Peer) handleWantedHashesMsg(ctx context.Context, req *WantedHashesMsg) if err != nil { return fmt.Errorf("handleWantedHashesMsg get data %x: %v", hash, err) } - chunk := storage.NewChunk(hash, nil) - chunk.SData = data - if length := len(chunk.SData); length < 9 { - log.Error("Chunk.SData to sync is too short", "len(chunk.SData)", length, "address", chunk.Addr) - } + chunk := storage.NewChunk(hash, data) if err := p.Deliver(ctx, chunk, s.priority); err != nil { return err } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/peer.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/peer.go index 80b9ab711a..5fdaa7b878 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/peer.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/peer.go @@ -33,8 +33,6 @@ import ( opentracing "github.com/opentracing/opentracing-go" ) -var sendTimeout = 30 * time.Second - type notFoundError struct { t string s Stream @@ -83,8 +81,40 @@ func NewPeer(peer *protocols.Peer, streamer *Registry) *Peer { ctx, cancel := context.WithCancel(context.Background()) go p.pq.Run(ctx, func(i interface{}) { wmsg := i.(WrappedPriorityMsg) - p.Send(wmsg.Context, wmsg.Msg) + err := p.Send(wmsg.Context, wmsg.Msg) + if err != nil { + log.Error("Message send error, dropping peer", "peer", p.ID(), "err", err) + p.Drop(err) + } }) + + // basic monitoring for pq contention + go func(pq *pq.PriorityQueue) { + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + var len_maxi int + var cap_maxi int + for k := range pq.Queues { + if len_maxi < len(pq.Queues[k]) { + len_maxi = len(pq.Queues[k]) + } + + if cap_maxi < cap(pq.Queues[k]) { + cap_maxi = cap(pq.Queues[k]) + } + } + + metrics.GetOrRegisterGauge(fmt.Sprintf("pq_len_%s", p.ID().TerminalString()), nil).Update(int64(len_maxi)) + metrics.GetOrRegisterGauge(fmt.Sprintf("pq_cap_%s", p.ID().TerminalString()), nil).Update(int64(cap_maxi)) + case <-p.quit: + return + } + } + }(p.pq) + go func() { <-p.quit cancel() @@ -93,7 +123,7 @@ func NewPeer(peer *protocols.Peer, streamer *Registry) *Peer { } // Deliver sends a storeRequestMsg protocol message to the peer -func (p *Peer) Deliver(ctx context.Context, chunk *storage.Chunk, priority uint8) error { +func (p *Peer) Deliver(ctx context.Context, chunk storage.Chunk, priority uint8) error { var sp opentracing.Span ctx, sp = spancontext.StartSpan( ctx, @@ -101,8 +131,8 @@ func (p *Peer) Deliver(ctx context.Context, chunk *storage.Chunk, priority uint8 defer sp.Finish() msg := &ChunkDeliveryMsg{ - Addr: chunk.Addr, - SData: chunk.SData, + Addr: chunk.Address(), + SData: chunk.Data(), } return p.SendPriority(ctx, msg, priority) } @@ -111,13 +141,16 @@ func (p *Peer) Deliver(ctx context.Context, chunk *storage.Chunk, priority uint8 func (p *Peer) SendPriority(ctx context.Context, msg interface{}, priority uint8) error { defer metrics.GetOrRegisterResettingTimer(fmt.Sprintf("peer.sendpriority_t.%d", priority), nil).UpdateSince(time.Now()) metrics.GetOrRegisterCounter(fmt.Sprintf("peer.sendpriority.%d", priority), nil).Inc(1) - cctx, cancel := context.WithTimeout(context.Background(), sendTimeout) - defer cancel() wmsg := WrappedPriorityMsg{ Context: ctx, Msg: msg, } - return p.pq.Push(cctx, wmsg, int(priority)) + err := p.pq.Push(wmsg, int(priority)) + if err == pq.ErrContention { + log.Warn("dropping peer on priority queue contention", "peer", p.ID()) + p.Drop(err) + } + return err } // SendOfferedHashes sends OfferedHashesMsg protocol msg @@ -132,7 +165,7 @@ func (p *Peer) SendOfferedHashes(s *server, f, t uint64) error { if err != nil { return err } - // true only when quiting + // true only when quitting if len(hashes) == 0 { return nil } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/stream.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/stream.go index cd0580a0c0..319fc62c9f 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/stream.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/stream.go @@ -32,10 +32,8 @@ import ( "github.com/ethereum/go-ethereum/swarm/network" "github.com/ethereum/go-ethereum/swarm/network/stream/intervals" "github.com/ethereum/go-ethereum/swarm/pot" - "github.com/ethereum/go-ethereum/swarm/spancontext" "github.com/ethereum/go-ethereum/swarm/state" "github.com/ethereum/go-ethereum/swarm/storage" - opentracing "github.com/opentracing/opentracing-go" ) const ( @@ -43,8 +41,8 @@ const ( Mid High Top - PriorityQueue // number of queues - PriorityQueueCap = 32 // queue capacity + PriorityQueue = 4 // number of priority queues - Low, Mid, High, Top + PriorityQueueCap = 4096 // queue capacity HashSize = 32 ) @@ -73,7 +71,7 @@ type RegistryOptions struct { } // NewRegistry is Streamer constructor -func NewRegistry(addr *network.BzzAddr, delivery *Delivery, db *storage.DBAPI, intervalsStore state.Store, options *RegistryOptions) *Registry { +func NewRegistry(addr *network.BzzAddr, delivery *Delivery, syncChunkStore storage.SyncChunkStore, intervalsStore state.Store, options *RegistryOptions) *Registry { if options == nil { options = &RegistryOptions{} } @@ -93,13 +91,13 @@ func NewRegistry(addr *network.BzzAddr, delivery *Delivery, db *storage.DBAPI, i streamer.api = NewAPI(streamer) delivery.getPeer = streamer.getPeer streamer.RegisterServerFunc(swarmChunkServerStreamName, func(_ *Peer, _ string, _ bool) (Server, error) { - return NewSwarmChunkServer(delivery.db), nil + return NewSwarmChunkServer(delivery.chunkStore), nil }) streamer.RegisterClientFunc(swarmChunkServerStreamName, func(p *Peer, t string, live bool) (Client, error) { - return NewSwarmSyncerClient(p, delivery.db, false, NewStream(swarmChunkServerStreamName, t, live)) + return NewSwarmSyncerClient(p, syncChunkStore, NewStream(swarmChunkServerStreamName, t, live)) }) - RegisterSwarmSyncerServer(streamer, db) - RegisterSwarmSyncerClient(streamer, db) + RegisterSwarmSyncerServer(streamer, syncChunkStore) + RegisterSwarmSyncerClient(streamer, syncChunkStore) if options.DoSync { // latestIntC function ensures that @@ -130,7 +128,7 @@ func NewRegistry(addr *network.BzzAddr, delivery *Delivery, db *storage.DBAPI, i // wait for kademlia table to be healthy time.Sleep(options.SyncUpdateDelay) - kad := streamer.delivery.overlay.(*network.Kademlia) + kad := streamer.delivery.kad depthC := latestIntC(kad.NeighbourhoodDepthC()) addressBookSizeC := latestIntC(kad.AddrCountC()) @@ -325,16 +323,6 @@ func (r *Registry) Quit(peerId discover.NodeID, s Stream) error { return peer.Send(context.TODO(), msg) } -func (r *Registry) Retrieve(ctx context.Context, chunk *storage.Chunk) error { - var sp opentracing.Span - ctx, sp = spancontext.StartSpan( - ctx, - "registry.retrieve") - defer sp.Finish() - - return r.delivery.RequestFromPeers(ctx, chunk.Addr[:], r.skipCheck) -} - func (r *Registry) NodeInfo() interface{} { return nil } @@ -398,9 +386,7 @@ func (r *Registry) Run(p *network.BzzPeer) error { // and they are no longer required after iteration, request to Quit // them will be send to appropriate peers. func (r *Registry) updateSyncing() { - // if overlay in not Kademlia, panic - kad := r.delivery.overlay.(*network.Kademlia) - + kad := r.delivery.kad // map of all SYNC streams for all peers // used at the and of the function to remove servers // that are not needed anymore @@ -421,8 +407,7 @@ func (r *Registry) updateSyncing() { r.peersMu.RUnlock() // request subscriptions for all nodes and bins - kad.EachBin(r.addr.Over(), pot.DefaultPof(256), 0, func(conn network.OverlayConn, bin int) bool { - p := conn.(network.Peer) + kad.EachBin(r.addr.Over(), pot.DefaultPof(256), 0, func(p *network.Peer, bin int) bool { log.Debug(fmt.Sprintf("Requesting subscription by: registry %s from peer %s for bin: %d", r.addr.ID(), p.ID(), bin)) // bin is always less then 256 and it is safe to convert it to type uint8 @@ -461,10 +446,11 @@ func (r *Registry) updateSyncing() { func (r *Registry) runProtocol(p *p2p.Peer, rw p2p.MsgReadWriter) error { peer := protocols.NewPeer(p, rw, Spec) - bzzPeer := network.NewBzzTestPeer(peer, r.addr) - r.delivery.overlay.On(bzzPeer) - defer r.delivery.overlay.Off(bzzPeer) - return r.Run(bzzPeer) + bp := network.NewBzzPeer(peer, r.addr) + np := network.NewPeer(bp, r.delivery.kad) + r.delivery.kad.On(np) + defer r.delivery.kad.Off(np) + return r.Run(bp) } // HandleMsg is the message handler that delegates incoming messages @@ -559,7 +545,7 @@ func (c client) NextInterval() (start, end uint64, err error) { // Client interface for incoming peer Streamer type Client interface { - NeedData(context.Context, []byte) func() + NeedData(context.Context, []byte) func(context.Context) error BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error) Close() } @@ -653,7 +639,7 @@ func (c *clientParams) clientCreated() { // Spec is the spec of the streamer protocol var Spec = &protocols.Spec{ Name: "stream", - Version: 5, + Version: 6, MaxMsgSize: 10 * 1024 * 1024, Messages: []interface{}{ UnsubscribeMsg{}, diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/syncer.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/syncer.go index d7febe4a3e..e9811a6785 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/syncer.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/syncer.go @@ -28,7 +28,6 @@ import ( ) const ( - // BatchSize = 2 BatchSize = 128 ) @@ -38,35 +37,37 @@ const ( // * (live/non-live historical) chunk syncing per proximity bin type SwarmSyncerServer struct { po uint8 - db *storage.DBAPI + store storage.SyncChunkStore sessionAt uint64 start uint64 + live bool quit chan struct{} } // NewSwarmSyncerServer is contructor for SwarmSyncerServer -func NewSwarmSyncerServer(live bool, po uint8, db *storage.DBAPI) (*SwarmSyncerServer, error) { - sessionAt := db.CurrentBucketStorageIndex(po) +func NewSwarmSyncerServer(live bool, po uint8, syncChunkStore storage.SyncChunkStore) (*SwarmSyncerServer, error) { + sessionAt := syncChunkStore.BinIndex(po) var start uint64 if live { start = sessionAt } return &SwarmSyncerServer{ po: po, - db: db, + store: syncChunkStore, sessionAt: sessionAt, start: start, + live: live, quit: make(chan struct{}), }, nil } -func RegisterSwarmSyncerServer(streamer *Registry, db *storage.DBAPI) { +func RegisterSwarmSyncerServer(streamer *Registry, syncChunkStore storage.SyncChunkStore) { streamer.RegisterServerFunc("SYNC", func(p *Peer, t string, live bool) (Server, error) { po, err := ParseSyncBinKey(t) if err != nil { return nil, err } - return NewSwarmSyncerServer(live, po, db) + return NewSwarmSyncerServer(live, po, syncChunkStore) }) // streamer.RegisterServerFunc(stream, func(p *Peer) (Server, error) { // return NewOutgoingProvableSwarmSyncer(po, db) @@ -78,27 +79,35 @@ func (s *SwarmSyncerServer) Close() { close(s.quit) } -// GetSection retrieves the actual chunk from localstore +// GetData retrieves the actual chunk from netstore func (s *SwarmSyncerServer) GetData(ctx context.Context, key []byte) ([]byte, error) { - chunk, err := s.db.Get(ctx, storage.Address(key)) - if err == storage.ErrFetching { - <-chunk.ReqC - } else if err != nil { + chunk, err := s.store.Get(ctx, storage.Address(key)) + if err != nil { return nil, err } - return chunk.SData, nil + return chunk.Data(), nil } // GetBatch retrieves the next batch of hashes from the dbstore func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) { var batch []byte i := 0 - if from == 0 { - from = s.start - } - if to <= from || from >= s.sessionAt { - to = math.MaxUint64 + if s.live { + if from == 0 { + from = s.start + } + if to <= from || from >= s.sessionAt { + to = math.MaxUint64 + } + } else { + if (to < from && to != 0) || from > s.sessionAt { + return nil, 0, 0, nil, nil + } + if to == 0 || to > s.sessionAt { + to = s.sessionAt + } } + var ticker *time.Ticker defer func() { if ticker != nil { @@ -119,8 +128,8 @@ func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint6 } metrics.GetOrRegisterCounter("syncer.setnextbatch.iterator", nil).Inc(1) - err := s.db.Iterator(from, to, s.po, func(addr storage.Address, idx uint64) bool { - batch = append(batch, addr[:]...) + err := s.store.Iterator(from, to, s.po, func(key storage.Address, idx uint64) bool { + batch = append(batch, key[:]...) i++ to = idx return i < BatchSize @@ -134,7 +143,7 @@ func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint6 wait = true } - log.Trace("Swarm syncer offer batch", "po", s.po, "len", i, "from", from, "to", to, "current store count", s.db.CurrentBucketStorageIndex(s.po)) + log.Trace("Swarm syncer offer batch", "po", s.po, "len", i, "from", from, "to", to, "current store count", s.store.BinIndex(s.po)) return batch, from, to, nil, nil } @@ -146,28 +155,26 @@ type SwarmSyncerClient struct { sessionReader storage.LazySectionReader retrieveC chan *storage.Chunk storeC chan *storage.Chunk - db *storage.DBAPI + store storage.SyncChunkStore // chunker storage.Chunker - currentRoot storage.Address - requestFunc func(chunk *storage.Chunk) - end, start uint64 - peer *Peer - ignoreExistingRequest bool - stream Stream + currentRoot storage.Address + requestFunc func(chunk *storage.Chunk) + end, start uint64 + peer *Peer + stream Stream } // NewSwarmSyncerClient is a contructor for provable data exchange syncer -func NewSwarmSyncerClient(p *Peer, db *storage.DBAPI, ignoreExistingRequest bool, stream Stream) (*SwarmSyncerClient, error) { +func NewSwarmSyncerClient(p *Peer, store storage.SyncChunkStore, stream Stream) (*SwarmSyncerClient, error) { return &SwarmSyncerClient{ - db: db, - peer: p, - ignoreExistingRequest: ignoreExistingRequest, - stream: stream, + store: store, + peer: p, + stream: stream, }, nil } // // NewIncomingProvableSwarmSyncer is a contructor for provable data exchange syncer -// func NewIncomingProvableSwarmSyncer(po int, priority int, index uint64, sessionAt uint64, intervals []uint64, sessionRoot storage.Key, chunker *storage.PyramidChunker, store storage.ChunkStore, p Peer) *SwarmSyncerClient { +// func NewIncomingProvableSwarmSyncer(po int, priority int, index uint64, sessionAt uint64, intervals []uint64, sessionRoot storage.Address, chunker *storage.PyramidChunker, store storage.ChunkStore, p Peer) *SwarmSyncerClient { // retrieveC := make(storage.Chunk, chunksCap) // RunChunkRequestor(p, retrieveC) // storeC := make(storage.Chunk, chunksCap) @@ -204,26 +211,15 @@ func NewSwarmSyncerClient(p *Peer, db *storage.DBAPI, ignoreExistingRequest bool // RegisterSwarmSyncerClient registers the client constructor function for // to handle incoming sync streams -func RegisterSwarmSyncerClient(streamer *Registry, db *storage.DBAPI) { +func RegisterSwarmSyncerClient(streamer *Registry, store storage.SyncChunkStore) { streamer.RegisterClientFunc("SYNC", func(p *Peer, t string, live bool) (Client, error) { - return NewSwarmSyncerClient(p, db, true, NewStream("SYNC", t, live)) + return NewSwarmSyncerClient(p, store, NewStream("SYNC", t, live)) }) } // NeedData -func (s *SwarmSyncerClient) NeedData(ctx context.Context, key []byte) (wait func()) { - chunk, _ := s.db.GetOrCreateRequest(ctx, key) - // TODO: we may want to request from this peer anyway even if the request exists - - // ignoreExistingRequest is temporary commented out until its functionality is verified. - // For now, this optimization can be disabled. - if chunk.ReqC == nil { //|| (s.ignoreExistingRequest && !created) { - return nil - } - // create request and wait until the chunk data arrives and is stored - return func() { - chunk.WaitToStore() - } +func (s *SwarmSyncerClient) NeedData(ctx context.Context, key []byte) (wait func(context.Context) error) { + return s.store.FetchFunc(ctx, key) } // BatchDone diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pot/doc.go b/vendor/github.com/ethereum/go-ethereum/swarm/pot/doc.go index 4c0a03065d..cb6faea57b 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/pot/doc.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/pot/doc.go @@ -27,11 +27,11 @@ OR distance over finite set of integers). Methods take a comparison operator (pof, proximity order function) to compare two value types. The default pof assumes Val to be or project to a byte slice using -the reverse rank on the MSB first XOR logarithmic disctance. +the reverse rank on the MSB first XOR logarithmic distance. If the address space if limited, equality is defined as the maximum proximity order. -The container offers applicative (funcional) style methods on PO trees: +The container offers applicative (functional) style methods on PO trees: * adding/removing en element * swap (value based add/remove) * merging two PO trees (union) diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss.go index 8459211ddb..b96649fea1 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss.go @@ -110,10 +110,10 @@ func (params *PssParams) WithPrivateKey(privatekey *ecdsa.PrivateKey) *PssParams // // Implements node.Service type Pss struct { - network.Overlay // we can get the overlayaddress from this - privateKey *ecdsa.PrivateKey // pss can have it's own independent key - w *whisper.Whisper // key and encryption backend - auxAPIs []rpc.API // builtins (handshake, test) can add APIs + *network.Kademlia // we can get the Kademlia address from this + privateKey *ecdsa.PrivateKey // pss can have it's own independent key + w *whisper.Whisper // key and encryption backend + auxAPIs []rpc.API // builtins (handshake, test) can add APIs // sending and forwarding fwdPool map[string]*protocols.Peer // keep track of all peers sitting on the pssmsg routing layer @@ -151,9 +151,9 @@ func (p *Pss) String() string { // Creates a new Pss instance. // -// In addition to params, it takes a swarm network overlay +// In addition to params, it takes a swarm network Kademlia // and a FileStore storage for message cache storage. -func NewPss(k network.Overlay, params *PssParams) (*Pss, error) { +func NewPss(k *network.Kademlia, params *PssParams) (*Pss, error) { if params.privateKey == nil { return nil, errors.New("missing private key for pss") } @@ -162,7 +162,7 @@ func NewPss(k network.Overlay, params *PssParams) (*Pss, error) { Version: pssVersion, } ps := &Pss{ - Overlay: k, + Kademlia: k, privateKey: params.privateKey, w: whisper.New(&whisper.DefaultConfig), quitC: make(chan struct{}), @@ -290,9 +290,9 @@ func (p *Pss) addAPI(api rpc.API) { p.auxAPIs = append(p.auxAPIs, api) } -// Returns the swarm overlay address of the pss node +// Returns the swarm Kademlia address of the pss node func (p *Pss) BaseAddr() []byte { - return p.Overlay.BaseAddr() + return p.Kademlia.BaseAddr() } // Returns the pss node's public key @@ -356,11 +356,11 @@ func (p *Pss) handlePssMsg(ctx context.Context, msg interface{}) error { } if int64(pssmsg.Expire) < time.Now().Unix() { metrics.GetOrRegisterCounter("pss.expire", nil).Inc(1) - log.Warn("pss filtered expired message", "from", common.ToHex(p.Overlay.BaseAddr()), "to", common.ToHex(pssmsg.To)) + log.Warn("pss filtered expired message", "from", common.ToHex(p.Kademlia.BaseAddr()), "to", common.ToHex(pssmsg.To)) return nil } if p.checkFwdCache(pssmsg) { - log.Trace("pss relay block-cache match (process)", "from", common.ToHex(p.Overlay.BaseAddr()), "to", (common.ToHex(pssmsg.To))) + log.Trace("pss relay block-cache match (process)", "from", common.ToHex(p.Kademlia.BaseAddr()), "to", (common.ToHex(pssmsg.To))) return nil } p.addFwdCache(pssmsg) @@ -442,13 +442,13 @@ func (p *Pss) executeHandlers(topic Topic, payload []byte, from *PssAddress, asy // will return false if using partial address func (p *Pss) isSelfRecipient(msg *PssMsg) bool { - return bytes.Equal(msg.To, p.Overlay.BaseAddr()) + return bytes.Equal(msg.To, p.Kademlia.BaseAddr()) } -// test match of leftmost bytes in given message to node's overlay address +// test match of leftmost bytes in given message to node's Kademlia address func (p *Pss) isSelfPossibleRecipient(msg *PssMsg) bool { - local := p.Overlay.BaseAddr() - return bytes.Equal(msg.To[:], local[:len(msg.To)]) + local := p.Kademlia.BaseAddr() + return bytes.Equal(msg.To, local[:len(msg.To)]) } ///////////////////////////////////////////////////////////////////// @@ -816,14 +816,7 @@ func (p *Pss) forward(msg *PssMsg) error { // send with kademlia // find the closest peer to the recipient and attempt to send sent := 0 - p.Overlay.EachConn(to, 256, func(op network.OverlayConn, po int, isproxbin bool) bool { - // we need p2p.protocols.Peer.Send - // cast and resolve - sp, ok := op.(senderPeer) - if !ok { - log.Crit("Pss cannot use kademlia peer type") - return false - } + p.Kademlia.EachConn(to, 256, func(sp *network.Peer, po int, isproxbin bool) bool { info := sp.Info() // check if the peer is running pss @@ -840,7 +833,7 @@ func (p *Pss) forward(msg *PssMsg) error { } // get the protocol peer from the forwarding peer cache - sendMsg := fmt.Sprintf("MSG TO %x FROM %x VIA %x", to, p.BaseAddr(), op.Address()) + sendMsg := fmt.Sprintf("MSG TO %x FROM %x VIA %x", to, p.BaseAddr(), sp.Address()) p.fwdPoolMu.RLock() pp := p.fwdPool[sp.Info().ID] p.fwdPoolMu.RUnlock() @@ -859,11 +852,11 @@ func (p *Pss) forward(msg *PssMsg) error { // - if the peer is end recipient but the full address has not been disclosed // - if the peer address matches the partial address fully // - if the peer is in proxbin - if len(msg.To) < addressLength && bytes.Equal(msg.To, op.Address()[:len(msg.To)]) { + if len(msg.To) < addressLength && bytes.Equal(msg.To, sp.Address()[:len(msg.To)]) { log.Trace(fmt.Sprintf("Pss keep forwarding: Partial address + full partial match")) return true } else if isproxbin { - log.Trace(fmt.Sprintf("%x is in proxbin, keep forwarding", common.ToHex(op.Address()))) + log.Trace(fmt.Sprintf("%x is in proxbin, keep forwarding", common.ToHex(sp.Address()))) return true } // at this point we stop forwarding, and the state is as follows: diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/chunker.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/chunker.go index 6d805b8e29..40292e88f9 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/chunker.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/chunker.go @@ -22,10 +22,9 @@ import ( "fmt" "io" "sync" - "time" "github.com/ethereum/go-ethereum/metrics" - "github.com/ethereum/go-ethereum/swarm/chunk" + ch "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/spancontext" opentracing "github.com/opentracing/opentracing-go" @@ -67,7 +66,6 @@ The hashing itself does use extra copies and allocation though, since it does ne var ( errAppendOppNotSuported = errors.New("Append operation not supported") - errOperationTimedOut = errors.New("operation timed out") ) type ChunkerParams struct { @@ -133,7 +131,7 @@ type TreeChunker struct { func TreeJoin(ctx context.Context, addr Address, getter Getter, depth int) *LazyChunkReader { jp := &JoinerParams{ ChunkerParams: ChunkerParams{ - chunkSize: chunk.DefaultSize, + chunkSize: ch.DefaultSize, hashSize: int64(len(addr)), }, addr: addr, @@ -153,7 +151,7 @@ func TreeSplit(ctx context.Context, data io.Reader, size int64, putter Putter) ( tsp := &TreeSplitterParams{ SplitterParams: SplitterParams{ ChunkerParams: ChunkerParams{ - chunkSize: chunk.DefaultSize, + chunkSize: ch.DefaultSize, hashSize: putter.RefSize(), }, reader: data, @@ -201,11 +199,6 @@ func NewTreeSplitter(params *TreeSplitterParams) *TreeChunker { return tc } -// String() for pretty printing -func (c *Chunk) String() string { - return fmt.Sprintf("Key: %v TreeSize: %v Chunksize: %v", c.Addr.Log(), c.Size, len(c.SData)) -} - type hashJob struct { key Address chunk []byte @@ -236,7 +229,7 @@ func (tc *TreeChunker) Split(ctx context.Context) (k Address, wait func(context. panic("chunker must be initialised") } - tc.runWorker() + tc.runWorker(ctx) depth := 0 treeSize := tc.chunkSize @@ -251,7 +244,7 @@ func (tc *TreeChunker) Split(ctx context.Context) (k Address, wait func(context. // this waitgroup member is released after the root hash is calculated tc.wg.Add(1) //launch actual recursive function passing the waitgroups - go tc.split(depth, treeSize/tc.branches, key, tc.dataSize, tc.wg) + go tc.split(ctx, depth, treeSize/tc.branches, key, tc.dataSize, tc.wg) // closes internal error channel if all subprocesses in the workgroup finished go func() { @@ -267,14 +260,14 @@ func (tc *TreeChunker) Split(ctx context.Context) (k Address, wait func(context. if err != nil { return nil, nil, err } - case <-time.NewTimer(splitTimeout).C: - return nil, nil, errOperationTimedOut + case <-ctx.Done(): + return nil, nil, ctx.Err() } return key, tc.putter.Wait, nil } -func (tc *TreeChunker) split(depth int, treeSize int64, addr Address, size int64, parentWg *sync.WaitGroup) { +func (tc *TreeChunker) split(ctx context.Context, depth int, treeSize int64, addr Address, size int64, parentWg *sync.WaitGroup) { // @@ -321,10 +314,10 @@ func (tc *TreeChunker) split(depth int, treeSize int64, addr Address, size int64 secSize = treeSize } // the hash of that data - subTreeKey := chunk[8+i*tc.hashSize : 8+(i+1)*tc.hashSize] + subTreeAddress := chunk[8+i*tc.hashSize : 8+(i+1)*tc.hashSize] childrenWg.Add(1) - tc.split(depth-1, treeSize/tc.branches, subTreeKey, secSize, childrenWg) + tc.split(ctx, depth-1, treeSize/tc.branches, subTreeAddress, secSize, childrenWg) i++ pos += treeSize @@ -336,7 +329,7 @@ func (tc *TreeChunker) split(depth int, treeSize int64, addr Address, size int64 worker := tc.getWorkerCount() if int64(len(tc.jobC)) > worker && worker < ChunkProcessors { - tc.runWorker() + tc.runWorker(ctx) } select { @@ -345,7 +338,7 @@ func (tc *TreeChunker) split(depth int, treeSize int64, addr Address, size int64 } } -func (tc *TreeChunker) runWorker() { +func (tc *TreeChunker) runWorker(ctx context.Context) { tc.incrementWorkerCount() go func() { defer tc.decrementWorkerCount() @@ -357,7 +350,7 @@ func (tc *TreeChunker) runWorker() { return } - h, err := tc.putter.Put(tc.ctx, job.chunk) + h, err := tc.putter.Put(ctx, job.chunk) if err != nil { tc.errC <- err return @@ -377,8 +370,8 @@ func (tc *TreeChunker) Append() (Address, func(), error) { // LazyChunkReader implements LazySectionReader type LazyChunkReader struct { - Ctx context.Context - key Address // root key + ctx context.Context + addr Address // root address chunkData ChunkData off int64 // offset chunkSize int64 // inherit from chunker @@ -390,18 +383,18 @@ type LazyChunkReader struct { func (tc *TreeChunker) Join(ctx context.Context) *LazyChunkReader { return &LazyChunkReader{ - key: tc.addr, + addr: tc.addr, chunkSize: tc.chunkSize, branches: tc.branches, hashSize: tc.hashSize, depth: tc.depth, getter: tc.getter, - Ctx: tc.ctx, + ctx: tc.ctx, } } func (r *LazyChunkReader) Context() context.Context { - return r.Ctx + return r.ctx } // Size is meant to be called on the LazySectionReader @@ -415,23 +408,24 @@ func (r *LazyChunkReader) Size(ctx context.Context, quitC chan bool) (n int64, e "lcr.size") defer sp.Finish() - log.Debug("lazychunkreader.size", "key", r.key) + log.Debug("lazychunkreader.size", "addr", r.addr) if r.chunkData == nil { - chunkData, err := r.getter.Get(cctx, Reference(r.key)) + chunkData, err := r.getter.Get(cctx, Reference(r.addr)) if err != nil { return 0, err } - if chunkData == nil { - select { - case <-quitC: - return 0, errors.New("aborted") - default: - return 0, fmt.Errorf("root chunk not found for %v", r.key.Hex()) - } - } r.chunkData = chunkData + s := r.chunkData.Size() + log.Debug("lazychunkreader.size", "key", r.addr, "size", s) + if s < 0 { + return 0, errors.New("corrupt size") + } + return int64(s), nil } - return r.chunkData.Size(), nil + s := r.chunkData.Size() + log.Debug("lazychunkreader.size", "key", r.addr, "size", s) + + return int64(s), nil } // read at can be called numerous times @@ -443,7 +437,7 @@ func (r *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error) { var sp opentracing.Span var cctx context.Context cctx, sp = spancontext.StartSpan( - r.Ctx, + r.ctx, "lcr.read") defer sp.Finish() @@ -460,7 +454,7 @@ func (r *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error) { quitC := make(chan bool) size, err := r.Size(cctx, quitC) if err != nil { - log.Error("lazychunkreader.readat.size", "size", size, "err", err) + log.Debug("lazychunkreader.readat.size", "size", size, "err", err) return 0, err } @@ -481,7 +475,7 @@ func (r *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error) { length *= r.chunkSize } wg.Add(1) - go r.join(cctx, b, off, off+length, depth, treeSize/r.branches, r.chunkData, &wg, errC, quitC) + go r.join(b, off, off+length, depth, treeSize/r.branches, r.chunkData, &wg, errC, quitC) go func() { wg.Wait() close(errC) @@ -489,20 +483,22 @@ func (r *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error) { err = <-errC if err != nil { - log.Error("lazychunkreader.readat.errc", "err", err) + log.Debug("lazychunkreader.readat.errc", "err", err) close(quitC) return 0, err } if off+int64(len(b)) >= size { + log.Debug("lazychunkreader.readat.return at end", "size", size, "off", off) return int(size - off), io.EOF } + log.Debug("lazychunkreader.readat.errc", "buff", len(b)) return len(b), nil } -func (r *LazyChunkReader) join(ctx context.Context, b []byte, off int64, eoff int64, depth int, treeSize int64, chunkData ChunkData, parentWg *sync.WaitGroup, errC chan error, quitC chan bool) { +func (r *LazyChunkReader) join(b []byte, off int64, eoff int64, depth int, treeSize int64, chunkData ChunkData, parentWg *sync.WaitGroup, errC chan error, quitC chan bool) { defer parentWg.Done() // find appropriate block level - for chunkData.Size() < treeSize && depth > r.depth { + for chunkData.Size() < uint64(treeSize) && depth > r.depth { treeSize /= r.branches depth-- } @@ -545,19 +541,19 @@ func (r *LazyChunkReader) join(ctx context.Context, b []byte, off int64, eoff in } wg.Add(1) go func(j int64) { - childKey := chunkData[8+j*r.hashSize : 8+(j+1)*r.hashSize] - chunkData, err := r.getter.Get(ctx, Reference(childKey)) + childAddress := chunkData[8+j*r.hashSize : 8+(j+1)*r.hashSize] + chunkData, err := r.getter.Get(r.ctx, Reference(childAddress)) if err != nil { - log.Error("lazychunkreader.join", "key", fmt.Sprintf("%x", childKey), "err", err) + log.Debug("lazychunkreader.join", "key", fmt.Sprintf("%x", childAddress), "err", err) select { - case errC <- fmt.Errorf("chunk %v-%v not found; key: %s", off, off+treeSize, fmt.Sprintf("%x", childKey)): + case errC <- fmt.Errorf("chunk %v-%v not found; key: %s", off, off+treeSize, fmt.Sprintf("%x", childAddress)): case <-quitC: } return } if l := len(chunkData); l < 9 { select { - case errC <- fmt.Errorf("chunk %v-%v incomplete; key: %s, data length %v", off, off+treeSize, fmt.Sprintf("%x", childKey), l): + case errC <- fmt.Errorf("chunk %v-%v incomplete; key: %s, data length %v", off, off+treeSize, fmt.Sprintf("%x", childAddress), l): case <-quitC: } return @@ -565,26 +561,26 @@ func (r *LazyChunkReader) join(ctx context.Context, b []byte, off int64, eoff in if soff < off { soff = off } - r.join(ctx, b[soff-off:seoff-off], soff-roff, seoff-roff, depth-1, treeSize/r.branches, chunkData, wg, errC, quitC) + r.join(b[soff-off:seoff-off], soff-roff, seoff-roff, depth-1, treeSize/r.branches, chunkData, wg, errC, quitC) }(i) } //for } // Read keeps a cursor so cannot be called simulateously, see ReadAt func (r *LazyChunkReader) Read(b []byte) (read int, err error) { - log.Debug("lazychunkreader.read", "key", r.key) + log.Debug("lazychunkreader.read", "key", r.addr) metrics.GetOrRegisterCounter("lazychunkreader.read", nil).Inc(1) read, err = r.ReadAt(b, r.off) if err != nil && err != io.EOF { - log.Error("lazychunkreader.readat", "read", read, "err", err) + log.Debug("lazychunkreader.readat", "read", read, "err", err) metrics.GetOrRegisterCounter("lazychunkreader.read.err", nil).Inc(1) } metrics.GetOrRegisterCounter("lazychunkreader.read.bytes", nil).Inc(int64(read)) r.off += int64(read) - return + return read, err } // completely analogous to standard SectionReader implementation @@ -592,7 +588,7 @@ var errWhence = errors.New("Seek: invalid whence") var errOffset = errors.New("Seek: invalid offset") func (r *LazyChunkReader) Seek(offset int64, whence int) (int64, error) { - log.Debug("lazychunkreader.seek", "key", r.key, "offset", offset) + log.Debug("lazychunkreader.seek", "key", r.addr, "offset", offset) switch whence { default: return 0, errWhence @@ -607,7 +603,7 @@ func (r *LazyChunkReader) Seek(offset int64, whence int) (int64, error) { return 0, fmt.Errorf("can't get size: %v", err) } } - offset += r.chunkData.Size() + offset += int64(r.chunkData.Size()) } if offset < 0 { diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/chunkstore.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/chunkstore.go deleted file mode 100644 index 3b4d97a7a7..0000000000 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/chunkstore.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package storage - -import ( - "context" - "sync" -) - -/* -ChunkStore interface is implemented by : - -- MemStore: a memory cache -- DbStore: local disk/db store -- LocalStore: a combination (sequence of) memStore and dbStore -- NetStore: cloud storage abstraction layer -- FakeChunkStore: dummy store which doesn't store anything just implements the interface -*/ -type ChunkStore interface { - Put(context.Context, *Chunk) // effectively there is no error even if there is an error - Get(context.Context, Address) (*Chunk, error) - Close() -} - -// MapChunkStore is a very simple ChunkStore implementation to store chunks in a map in memory. -type MapChunkStore struct { - chunks map[string]*Chunk - mu sync.RWMutex -} - -func NewMapChunkStore() *MapChunkStore { - return &MapChunkStore{ - chunks: make(map[string]*Chunk), - } -} - -func (m *MapChunkStore) Put(ctx context.Context, chunk *Chunk) { - m.mu.Lock() - defer m.mu.Unlock() - m.chunks[chunk.Addr.Hex()] = chunk - chunk.markAsStored() -} - -func (m *MapChunkStore) Get(ctx context.Context, addr Address) (*Chunk, error) { - m.mu.RLock() - defer m.mu.RUnlock() - chunk := m.chunks[addr.Hex()] - if chunk == nil { - return nil, ErrChunkNotFound - } - return chunk, nil -} - -func (m *MapChunkStore) Close() { -} diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/dbapi.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/dbapi.go deleted file mode 100644 index dd71752eb2..0000000000 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/dbapi.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package storage - -import "context" - -// wrapper of db-s to provide mockable custom local chunk store access to syncer -type DBAPI struct { - db *LDBStore - loc *LocalStore -} - -func NewDBAPI(loc *LocalStore) *DBAPI { - return &DBAPI{loc.DbStore, loc} -} - -// to obtain the chunks from address or request db entry only -func (d *DBAPI) Get(ctx context.Context, addr Address) (*Chunk, error) { - return d.loc.Get(ctx, addr) -} - -// current storage counter of chunk db -func (d *DBAPI) CurrentBucketStorageIndex(po uint8) uint64 { - return d.db.CurrentBucketStorageIndex(po) -} - -// iteration storage counter and proximity order -func (d *DBAPI) Iterator(from uint64, to uint64, po uint8, f func(Address, uint64) bool) error { - return d.db.SyncIterator(from, to, po, f) -} - -// to obtain the chunks from address or request db entry only -func (d *DBAPI) GetOrCreateRequest(ctx context.Context, addr Address) (*Chunk, bool) { - return d.loc.GetOrCreateRequest(ctx, addr) -} - -// to obtain the chunks from key or request db entry only -func (d *DBAPI) Put(ctx context.Context, chunk *Chunk) { - d.loc.Put(ctx, chunk) -} diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/encryption/encryption.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/encryption/encryption.go index e50f2163db..6fbdab062b 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/encryption/encryption.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/encryption/encryption.go @@ -21,6 +21,7 @@ import ( "encoding/binary" "fmt" "hash" + "sync" ) const KeyLength = 32 @@ -28,84 +29,119 @@ const KeyLength = 32 type Key []byte type Encryption interface { - Encrypt(data []byte, key Key) ([]byte, error) - Decrypt(data []byte, key Key) ([]byte, error) + Encrypt(data []byte) ([]byte, error) + Decrypt(data []byte) ([]byte, error) } type encryption struct { - padding int - initCtr uint32 - hashFunc func() hash.Hash + key Key // the encryption key (hashSize bytes long) + keyLen int // length of the key = length of blockcipher block + padding int // encryption will pad the data upto this if > 0 + initCtr uint32 // initial counter used for counter mode blockcipher + hashFunc func() hash.Hash // hasher constructor function } -func New(padding int, initCtr uint32, hashFunc func() hash.Hash) *encryption { +// New constructs a new encryptor/decryptor +func New(key Key, padding int, initCtr uint32, hashFunc func() hash.Hash) *encryption { return &encryption{ + key: key, + keyLen: len(key), padding: padding, initCtr: initCtr, hashFunc: hashFunc, } } -func (e *encryption) Encrypt(data []byte, key Key) ([]byte, error) { +// Encrypt encrypts the data and does padding if specified +func (e *encryption) Encrypt(data []byte) ([]byte, error) { length := len(data) + outLength := length isFixedPadding := e.padding > 0 - if isFixedPadding && length > e.padding { - return nil, fmt.Errorf("Data length longer than padding, data length %v padding %v", length, e.padding) - } - - paddedData := data - if isFixedPadding && length < e.padding { - paddedData = make([]byte, e.padding) - copy(paddedData[:length], data) - rand.Read(paddedData[length:]) + if isFixedPadding { + if length > e.padding { + return nil, fmt.Errorf("Data length longer than padding, data length %v padding %v", length, e.padding) + } + outLength = e.padding } - return e.transform(paddedData, key), nil + out := make([]byte, outLength) + e.transform(data, out) + return out, nil } -func (e *encryption) Decrypt(data []byte, key Key) ([]byte, error) { +// Decrypt decrypts the data, if padding was used caller must know original length and truncate +func (e *encryption) Decrypt(data []byte) ([]byte, error) { length := len(data) if e.padding > 0 && length != e.padding { return nil, fmt.Errorf("Data length different than padding, data length %v padding %v", length, e.padding) } + out := make([]byte, length) + e.transform(data, out) + return out, nil +} - return e.transform(data, key), nil +// +func (e *encryption) transform(in, out []byte) { + inLength := len(in) + wg := sync.WaitGroup{} + wg.Add((inLength-1)/e.keyLen + 1) + for i := 0; i < inLength; i += e.keyLen { + l := min(e.keyLen, inLength-i) + // call transformations per segment (asyncronously) + go func(i int, x, y []byte) { + defer wg.Done() + e.Transcrypt(i, x, y) + }(i/e.keyLen, in[i:i+l], out[i:i+l]) + } + // pad the rest if out is longer + pad(out[inLength:]) + wg.Wait() } -func (e *encryption) transform(data []byte, key Key) []byte { - dataLength := len(data) - transformedData := make([]byte, dataLength) +// used for segmentwise transformation +// if in is shorter than out, padding is used +func (e *encryption) Transcrypt(i int, in []byte, out []byte) { + // first hash key with counter (initial counter + i) hasher := e.hashFunc() - ctr := e.initCtr - hashSize := hasher.Size() - for i := 0; i < dataLength; i += hashSize { - hasher.Write(key) + hasher.Write(e.key) - ctrBytes := make([]byte, 4) - binary.LittleEndian.PutUint32(ctrBytes, ctr) + ctrBytes := make([]byte, 4) + binary.LittleEndian.PutUint32(ctrBytes, uint32(i)+e.initCtr) + hasher.Write(ctrBytes) - hasher.Write(ctrBytes) + ctrHash := hasher.Sum(nil) + hasher.Reset() - ctrHash := hasher.Sum(nil) - hasher.Reset() - hasher.Write(ctrHash) + // second round of hashing for selective disclosure + hasher.Write(ctrHash) + segmentKey := hasher.Sum(nil) + hasher.Reset() - segmentKey := hasher.Sum(nil) - - hasher.Reset() + // XOR bytes uptil length of in (out must be at least as long) + inLength := len(in) + for j := 0; j < inLength; j++ { + out[j] = in[j] ^ segmentKey[j] + } + // insert padding if out is longer + pad(out[inLength:]) +} - segmentSize := min(hashSize, dataLength-i) - for j := 0; j < segmentSize; j++ { - transformedData[i+j] = data[i+j] ^ segmentKey[j] - } - ctr++ +func pad(b []byte) { + l := len(b) + for total := 0; total < l; { + read, _ := rand.Read(b[total:]) + total += read } - return transformedData } -func GenerateRandomKey() (Key, error) { - key := make([]byte, KeyLength) - _, err := rand.Read(key) - return key, err +// GenerateRandomKey generates a random key of length l +func GenerateRandomKey(l int) Key { + key := make([]byte, l) + var total int + for total < l { + read, _ := rand.Read(key[total:]) + total += read + } + return key } func min(x, y int) int { diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/hasherstore.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/hasherstore.go index bc23077c18..879622b9a0 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/hasherstore.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/hasherstore.go @@ -19,57 +19,47 @@ package storage import ( "context" "fmt" - "sync" + "sync/atomic" "github.com/ethereum/go-ethereum/crypto/sha3" - "github.com/ethereum/go-ethereum/swarm/chunk" + ch "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/storage/encryption" ) -type chunkEncryption struct { - spanEncryption encryption.Encryption - dataEncryption encryption.Encryption -} - type hasherStore struct { - store ChunkStore - hashFunc SwarmHasher - chunkEncryption *chunkEncryption - hashSize int // content hash size - refSize int64 // reference size (content hash + possibly encryption key) - wg *sync.WaitGroup - closed chan struct{} -} - -func newChunkEncryption(chunkSize, refSize int64) *chunkEncryption { - return &chunkEncryption{ - spanEncryption: encryption.New(0, uint32(chunkSize/refSize), sha3.NewKeccak256), - dataEncryption: encryption.New(int(chunkSize), 0, sha3.NewKeccak256), - } + store ChunkStore + toEncrypt bool + hashFunc SwarmHasher + hashSize int // content hash size + refSize int64 // reference size (content hash + possibly encryption key) + nrChunks uint64 // number of chunks to store + errC chan error // global error channel + doneC chan struct{} // closed by Close() call to indicate that count is the final number of chunks + quitC chan struct{} // closed to quit unterminated routines } // NewHasherStore creates a hasherStore object, which implements Putter and Getter interfaces. // With the HasherStore you can put and get chunk data (which is just []byte) into a ChunkStore // and the hasherStore will take core of encryption/decryption of data if necessary -func NewHasherStore(chunkStore ChunkStore, hashFunc SwarmHasher, toEncrypt bool) *hasherStore { - var chunkEncryption *chunkEncryption - +func NewHasherStore(store ChunkStore, hashFunc SwarmHasher, toEncrypt bool) *hasherStore { hashSize := hashFunc().Size() refSize := int64(hashSize) if toEncrypt { refSize += encryption.KeyLength - chunkEncryption = newChunkEncryption(chunk.DefaultSize, refSize) } - return &hasherStore{ - store: chunkStore, - hashFunc: hashFunc, - chunkEncryption: chunkEncryption, - hashSize: hashSize, - refSize: refSize, - wg: &sync.WaitGroup{}, - closed: make(chan struct{}), + h := &hasherStore{ + store: store, + toEncrypt: toEncrypt, + hashFunc: hashFunc, + hashSize: hashSize, + refSize: refSize, + errC: make(chan error), + doneC: make(chan struct{}), + quitC: make(chan struct{}), } + + return h } // Put stores the chunkData into the ChunkStore of the hasherStore and returns the reference. @@ -77,38 +67,36 @@ func NewHasherStore(chunkStore ChunkStore, hashFunc SwarmHasher, toEncrypt bool) // Asynchronous function, the data will not necessarily be stored when it returns. func (h *hasherStore) Put(ctx context.Context, chunkData ChunkData) (Reference, error) { c := chunkData - size := chunkData.Size() var encryptionKey encryption.Key - if h.chunkEncryption != nil { + if h.toEncrypt { var err error c, encryptionKey, err = h.encryptChunkData(chunkData) if err != nil { return nil, err } } - chunk := h.createChunk(c, size) - + chunk := h.createChunk(c) h.storeChunk(ctx, chunk) - return Reference(append(chunk.Addr, encryptionKey...)), nil + return Reference(append(chunk.Address(), encryptionKey...)), nil } // Get returns data of the chunk with the given reference (retrieved from the ChunkStore of hasherStore). // If the data is encrypted and the reference contains an encryption key, it will be decrypted before // return. func (h *hasherStore) Get(ctx context.Context, ref Reference) (ChunkData, error) { - key, encryptionKey, err := parseReference(ref, h.hashSize) + addr, encryptionKey, err := parseReference(ref, h.hashSize) if err != nil { return nil, err } - toDecrypt := (encryptionKey != nil) - chunk, err := h.store.Get(ctx, key) + chunk, err := h.store.Get(ctx, addr) if err != nil { return nil, err } - chunkData := chunk.SData + chunkData := ChunkData(chunk.Data()) + toDecrypt := (encryptionKey != nil) if toDecrypt { var err error chunkData, err = h.decryptChunkData(chunkData, encryptionKey) @@ -122,16 +110,40 @@ func (h *hasherStore) Get(ctx context.Context, ref Reference) (ChunkData, error) // Close indicates that no more chunks will be put with the hasherStore, so the Wait // function can return when all the previously put chunks has been stored. func (h *hasherStore) Close() { - close(h.closed) + close(h.doneC) } // Wait returns when // 1) the Close() function has been called and // 2) all the chunks which has been Put has been stored func (h *hasherStore) Wait(ctx context.Context) error { - <-h.closed - h.wg.Wait() - return nil + defer close(h.quitC) + var nrStoredChunks uint64 // number of stored chunks + var done bool + doneC := h.doneC + for { + select { + // if context is done earlier, just return with the error + case <-ctx.Done(): + return ctx.Err() + // doneC is closed if all chunks have been submitted, from then we just wait until all of them are also stored + case <-doneC: + done = true + doneC = nil + // a chunk has been stored, if err is nil, then successfully, so increase the stored chunk counter + case err := <-h.errC: + if err != nil { + return err + } + nrStoredChunks++ + } + // if all the chunks have been submitted and all of them are stored, then we can return + if done { + if nrStoredChunks >= atomic.LoadUint64(&h.nrChunks) { + return nil + } + } + } } func (h *hasherStore) createHash(chunkData ChunkData) Address { @@ -141,12 +153,9 @@ func (h *hasherStore) createHash(chunkData ChunkData) Address { return hasher.Sum(nil) } -func (h *hasherStore) createChunk(chunkData ChunkData, chunkSize int64) *Chunk { +func (h *hasherStore) createChunk(chunkData ChunkData) *chunk { hash := h.createHash(chunkData) - chunk := NewChunk(hash, nil) - chunk.SData = chunkData - chunk.Size = chunkSize - + chunk := NewChunk(hash, chunkData) return chunk } @@ -155,23 +164,14 @@ func (h *hasherStore) encryptChunkData(chunkData ChunkData) (ChunkData, encrypti return nil, nil, fmt.Errorf("Invalid ChunkData, min length 8 got %v", len(chunkData)) } - encryptionKey, err := encryption.GenerateRandomKey() - if err != nil { - return nil, nil, err - } - - encryptedSpan, err := h.chunkEncryption.spanEncryption.Encrypt(chunkData[:8], encryptionKey) - if err != nil { - return nil, nil, err - } - encryptedData, err := h.chunkEncryption.dataEncryption.Encrypt(chunkData[8:], encryptionKey) + key, encryptedSpan, encryptedData, err := h.encrypt(chunkData) if err != nil { return nil, nil, err } c := make(ChunkData, len(encryptedSpan)+len(encryptedData)) copy(c[:8], encryptedSpan) copy(c[8:], encryptedData) - return c, encryptionKey, nil + return c, key, nil } func (h *hasherStore) decryptChunkData(chunkData ChunkData, encryptionKey encryption.Key) (ChunkData, error) { @@ -179,54 +179,82 @@ func (h *hasherStore) decryptChunkData(chunkData ChunkData, encryptionKey encryp return nil, fmt.Errorf("Invalid ChunkData, min length 8 got %v", len(chunkData)) } - decryptedSpan, err := h.chunkEncryption.spanEncryption.Decrypt(chunkData[:8], encryptionKey) - if err != nil { - return nil, err - } - - decryptedData, err := h.chunkEncryption.dataEncryption.Decrypt(chunkData[8:], encryptionKey) + decryptedSpan, decryptedData, err := h.decrypt(chunkData, encryptionKey) if err != nil { return nil, err } // removing extra bytes which were just added for padding length := ChunkData(decryptedSpan).Size() - for length > chunk.DefaultSize { - length = length + (chunk.DefaultSize - 1) - length = length / chunk.DefaultSize - length *= h.refSize + for length > ch.DefaultSize { + length = length + (ch.DefaultSize - 1) + length = length / ch.DefaultSize + length *= uint64(h.refSize) } c := make(ChunkData, length+8) copy(c[:8], decryptedSpan) copy(c[8:], decryptedData[:length]) - return c[:length+8], nil + return c, nil } func (h *hasherStore) RefSize() int64 { return h.refSize } -func (h *hasherStore) storeChunk(ctx context.Context, chunk *Chunk) { - h.wg.Add(1) +func (h *hasherStore) encrypt(chunkData ChunkData) (encryption.Key, []byte, []byte, error) { + key := encryption.GenerateRandomKey(encryption.KeyLength) + encryptedSpan, err := h.newSpanEncryption(key).Encrypt(chunkData[:8]) + if err != nil { + return nil, nil, nil, err + } + encryptedData, err := h.newDataEncryption(key).Encrypt(chunkData[8:]) + if err != nil { + return nil, nil, nil, err + } + return key, encryptedSpan, encryptedData, nil +} + +func (h *hasherStore) decrypt(chunkData ChunkData, key encryption.Key) ([]byte, []byte, error) { + encryptedSpan, err := h.newSpanEncryption(key).Encrypt(chunkData[:8]) + if err != nil { + return nil, nil, err + } + encryptedData, err := h.newDataEncryption(key).Encrypt(chunkData[8:]) + if err != nil { + return nil, nil, err + } + return encryptedSpan, encryptedData, nil +} + +func (h *hasherStore) newSpanEncryption(key encryption.Key) encryption.Encryption { + return encryption.New(key, 0, uint32(ch.DefaultSize/h.refSize), sha3.NewKeccak256) +} + +func (h *hasherStore) newDataEncryption(key encryption.Key) encryption.Encryption { + return encryption.New(key, int(ch.DefaultSize), 0, sha3.NewKeccak256) +} + +func (h *hasherStore) storeChunk(ctx context.Context, chunk *chunk) { + atomic.AddUint64(&h.nrChunks, 1) go func() { - <-chunk.dbStoredC - h.wg.Done() + select { + case h.errC <- h.store.Put(ctx, chunk): + case <-h.quitC: + } }() - h.store.Put(ctx, chunk) } func parseReference(ref Reference, hashSize int) (Address, encryption.Key, error) { - encryptedKeyLength := hashSize + encryption.KeyLength + encryptedRefLength := hashSize + encryption.KeyLength switch len(ref) { - case KeyLength: + case AddressLength: return Address(ref), nil, nil - case encryptedKeyLength: + case encryptedRefLength: encKeyIdx := len(ref) - encryption.KeyLength return Address(ref[:encKeyIdx]), encryption.Key(ref[encKeyIdx:]), nil default: - return nil, nil, fmt.Errorf("Invalid reference length, expected %v or %v got %v", hashSize, encryptedKeyLength, len(ref)) + return nil, nil, fmt.Errorf("Invalid reference length, expected %v or %v got %v", hashSize, encryptedRefLength, len(ref)) } - } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore.go index b95aa13b09..bde627394e 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore.go @@ -28,6 +28,7 @@ import ( "context" "encoding/binary" "encoding/hex" + "errors" "fmt" "io" "io/ioutil" @@ -36,7 +37,7 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/swarm/chunk" + ch "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/storage/mock" "github.com/syndtr/goleveldb/leveldb" @@ -48,6 +49,10 @@ const ( maxGCitems = 5000 // max number of items to be gc'd per call to collectGarbage() ) +var ( + dbEntryCount = metrics.NewRegisteredCounter("ldbstore.entryCnt", nil) +) + var ( keyIndex = byte(0) keyOldData = byte(1) @@ -58,6 +63,10 @@ var ( keyDistanceCnt = byte(7) ) +var ( + ErrDBClosed = errors.New("LDBStore closed") +) + type gcItem struct { idx uint64 value uint64 @@ -76,7 +85,7 @@ func NewLDBStoreParams(storeparams *StoreParams, path string) *LDBStoreParams { return &LDBStoreParams{ StoreParams: storeparams, Path: path, - Po: func(k Address) (ret uint8) { return uint8(Proximity(storeparams.BaseKey[:], k[:])) }, + Po: func(k Address) (ret uint8) { return uint8(Proximity(storeparams.BaseKey, k[:])) }, } } @@ -95,18 +104,29 @@ type LDBStore struct { batchC chan bool batchesC chan struct{} - batch *leveldb.Batch + closed bool + batch *dbBatch lock sync.RWMutex quit chan struct{} // Functions encodeDataFunc is used to bypass // the default functionality of DbStore with // mock.NodeStore for testing purposes. - encodeDataFunc func(chunk *Chunk) []byte + encodeDataFunc func(chunk Chunk) []byte // If getDataFunc is defined, it will be used for // retrieving the chunk data instead from the local // LevelDB database. - getDataFunc func(addr Address) (data []byte, err error) + getDataFunc func(key Address) (data []byte, err error) +} + +type dbBatch struct { + *leveldb.Batch + err error + c chan struct{} +} + +func newBatch() *dbBatch { + return &dbBatch{Batch: new(leveldb.Batch), c: make(chan struct{})} } // TODO: Instead of passing the distance function, just pass the address from which distances are calculated @@ -117,10 +137,9 @@ func NewLDBStore(params *LDBStoreParams) (s *LDBStore, err error) { s.hashfunc = params.Hash s.quit = make(chan struct{}) - s.batchC = make(chan bool) s.batchesC = make(chan struct{}, 1) go s.writeBatches() - s.batch = new(leveldb.Batch) + s.batch = newBatch() // associate encodeData with default functionality s.encodeDataFunc = encodeData @@ -139,17 +158,13 @@ func NewLDBStore(params *LDBStoreParams) (s *LDBStore, err error) { k[1] = uint8(i) cnt, _ := s.db.Get(k) s.bucketCnt[i] = BytesToU64(cnt) - s.bucketCnt[i]++ } data, _ := s.db.Get(keyEntryCnt) s.entryCnt = BytesToU64(data) - s.entryCnt++ data, _ = s.db.Get(keyAccessCnt) s.accessCnt = BytesToU64(data) - s.accessCnt++ data, _ = s.db.Get(keyDataIdx) s.dataIdx = BytesToU64(data) - s.dataIdx++ return s, nil } @@ -201,14 +216,6 @@ func getIndexKey(hash Address) []byte { return key } -func getOldDataKey(idx uint64) []byte { - key := make([]byte, 9) - key[0] = keyOldData - binary.BigEndian.PutUint64(key[1:9], idx) - - return key -} - func getDataKey(idx uint64, po uint8) []byte { key := make([]byte, 10) key[0] = keyData @@ -223,12 +230,12 @@ func encodeIndex(index *dpaDBIndex) []byte { return data } -func encodeData(chunk *Chunk) []byte { +func encodeData(chunk Chunk) []byte { // Always create a new underlying array for the returned byte slice. - // The chunk.Key array may be used in the returned slice which + // The chunk.Address array may be used in the returned slice which // may be changed later in the code or by the LevelDB, resulting - // that the Key is changed as well. - return append(append([]byte{}, chunk.Addr[:]...), chunk.SData...) + // that the Address is changed as well. + return append(append([]byte{}, chunk.Address()[:]...), chunk.Data()...) } func decodeIndex(data []byte, index *dpaDBIndex) error { @@ -236,17 +243,13 @@ func decodeIndex(data []byte, index *dpaDBIndex) error { return dec.Decode(index) } -func decodeData(data []byte, chunk *Chunk) { - chunk.SData = data[32:] - chunk.Size = int64(binary.BigEndian.Uint64(data[32:40])) -} - -func decodeOldData(data []byte, chunk *Chunk) { - chunk.SData = data - chunk.Size = int64(binary.BigEndian.Uint64(data[0:8])) +func decodeData(addr Address, data []byte) (*chunk, error) { + return NewChunk(addr, data[32:]), nil } func (s *LDBStore) collectGarbage(ratio float32) { + log.Trace("collectGarbage", "ratio", ratio) + metrics.GetOrRegisterCounter("ldbstore.collectgarbage", nil).Inc(1) it := s.db.NewIterator() @@ -319,7 +322,7 @@ func (s *LDBStore) Export(out io.Writer) (int64, error) { log.Trace("store.export", "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po) data, err := s.db.Get(datakey) if err != nil { - log.Warn(fmt.Sprintf("Chunk %x found but could not be accessed: %v", key[:], err)) + log.Warn(fmt.Sprintf("Chunk %x found but could not be accessed: %v", key, err)) continue } @@ -344,44 +347,75 @@ func (s *LDBStore) Export(out io.Writer) (int64, error) { func (s *LDBStore) Import(in io.Reader) (int64, error) { tr := tar.NewReader(in) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + countC := make(chan int64) + errC := make(chan error) var count int64 - var wg sync.WaitGroup - for { - hdr, err := tr.Next() - if err == io.EOF { - break - } else if err != nil { - return count, err - } + go func() { + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } else if err != nil { + select { + case errC <- err: + case <-ctx.Done(): + } + } - if len(hdr.Name) != 64 { - log.Warn("ignoring non-chunk file", "name", hdr.Name) - continue - } + if len(hdr.Name) != 64 { + log.Warn("ignoring non-chunk file", "name", hdr.Name) + continue + } - keybytes, err := hex.DecodeString(hdr.Name) - if err != nil { - log.Warn("ignoring invalid chunk file", "name", hdr.Name, "err", err) - continue + keybytes, err := hex.DecodeString(hdr.Name) + if err != nil { + log.Warn("ignoring invalid chunk file", "name", hdr.Name, "err", err) + continue + } + + data, err := ioutil.ReadAll(tr) + if err != nil { + select { + case errC <- err: + case <-ctx.Done(): + } + } + key := Address(keybytes) + chunk := NewChunk(key, data[32:]) + + go func() { + select { + case errC <- s.Put(ctx, chunk): + case <-ctx.Done(): + } + }() + + count++ } + countC <- count + }() - data, err := ioutil.ReadAll(tr) - if err != nil { - return count, err + // wait for all chunks to be stored + i := int64(0) + var total int64 + for { + select { + case err := <-errC: + if err != nil { + return count, err + } + i++ + case total = <-countC: + case <-ctx.Done(): + return i, ctx.Err() + } + if total > 0 && i == total { + return total, nil } - key := Address(keybytes) - chunk := NewChunk(key, nil) - chunk.SData = data[32:] - s.Put(context.TODO(), chunk) - wg.Add(1) - go func() { - defer wg.Done() - <-chunk.dbStoredC - }() - count++ } - wg.Wait() - return count, nil } func (s *LDBStore) Cleanup() { @@ -421,21 +455,24 @@ func (s *LDBStore) Cleanup() { } if !found { - log.Warn(fmt.Sprintf("Chunk %x found but count not be accessed with any po", key[:])) + log.Warn(fmt.Sprintf("Chunk %x found but count not be accessed with any po", key)) errorsFound++ continue } } - c := &Chunk{} ck := data[:32] - decodeData(data, c) + c, err := decodeData(ck, data) + if err != nil { + log.Error("decodeData error", "err", err) + continue + } - cs := int64(binary.LittleEndian.Uint64(c.SData[:8])) - log.Trace("chunk", "key", fmt.Sprintf("%x", key[:]), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(c.SData), "size", cs) + cs := int64(binary.LittleEndian.Uint64(c.sdata[:8])) + log.Trace("chunk", "key", fmt.Sprintf("%x", key), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(c.sdata), "size", cs) - if len(c.SData) > chunk.DefaultSize+8 { - log.Warn("chunk for cleanup", "key", fmt.Sprintf("%x", key[:]), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(c.SData), "size", cs) + if len(c.sdata) > ch.DefaultSize+8 { + log.Warn("chunk for cleanup", "key", fmt.Sprintf("%x", key), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(c.sdata), "size", cs) s.delete(index.Idx, getIndexKey(key[1:]), po) removed++ errorsFound++ @@ -488,6 +525,18 @@ func (s *LDBStore) ReIndex() { log.Warn(fmt.Sprintf("Found %v errors out of %v entries", errorsFound, total)) } +func (s *LDBStore) Delete(addr Address) { + s.lock.Lock() + defer s.lock.Unlock() + + ikey := getIndexKey(addr) + + var indx dpaDBIndex + s.tryAccessIdx(ikey, &indx) + + s.delete(indx.Idx, ikey, s.po(addr)) +} + func (s *LDBStore) delete(idx uint64, idxKey []byte, po uint8) { metrics.GetOrRegisterCounter("ldbstore.delete", nil).Inc(1) @@ -495,7 +544,7 @@ func (s *LDBStore) delete(idx uint64, idxKey []byte, po uint8) { batch.Delete(idxKey) batch.Delete(getDataKey(idx, po)) s.entryCnt-- - s.bucketCnt[po]-- + dbEntryCount.Dec(1) cntKey := make([]byte, 2) cntKey[0] = keyDistanceCnt cntKey[1] = po @@ -504,16 +553,15 @@ func (s *LDBStore) delete(idx uint64, idxKey []byte, po uint8) { s.db.Write(batch) } -func (s *LDBStore) CurrentBucketStorageIndex(po uint8) uint64 { +func (s *LDBStore) BinIndex(po uint8) uint64 { s.lock.RLock() defer s.lock.RUnlock() - return s.bucketCnt[po] } func (s *LDBStore) Size() uint64 { - s.lock.Lock() - defer s.lock.Unlock() + s.lock.RLock() + defer s.lock.RUnlock() return s.entryCnt } @@ -523,49 +571,60 @@ func (s *LDBStore) CurrentStorageIndex() uint64 { return s.dataIdx } -func (s *LDBStore) Put(ctx context.Context, chunk *Chunk) { +func (s *LDBStore) Put(ctx context.Context, chunk Chunk) error { metrics.GetOrRegisterCounter("ldbstore.put", nil).Inc(1) - log.Trace("ldbstore.put", "key", chunk.Addr) + log.Trace("ldbstore.put", "key", chunk.Address()) - ikey := getIndexKey(chunk.Addr) + ikey := getIndexKey(chunk.Address()) var index dpaDBIndex - po := s.po(chunk.Addr) + po := s.po(chunk.Address()) + s.lock.Lock() - defer s.lock.Unlock() - log.Trace("ldbstore.put: s.db.Get", "key", chunk.Addr, "ikey", fmt.Sprintf("%x", ikey)) + if s.closed { + s.lock.Unlock() + return ErrDBClosed + } + batch := s.batch + + log.Trace("ldbstore.put: s.db.Get", "key", chunk.Address(), "ikey", fmt.Sprintf("%x", ikey)) idata, err := s.db.Get(ikey) if err != nil { s.doPut(chunk, &index, po) - batchC := s.batchC - go func() { - <-batchC - chunk.markAsStored() - }() } else { - log.Trace("ldbstore.put: chunk already exists, only update access", "key", chunk.Addr) + log.Trace("ldbstore.put: chunk already exists, only update access", "key", chunk.Address) decodeIndex(idata, &index) - chunk.markAsStored() } index.Access = s.accessCnt s.accessCnt++ idata = encodeIndex(&index) s.batch.Put(ikey, idata) + + s.lock.Unlock() + select { case s.batchesC <- struct{}{}: default: } + + select { + case <-batch.c: + return batch.err + case <-ctx.Done(): + return ctx.Err() + } } // force putting into db, does not check access index -func (s *LDBStore) doPut(chunk *Chunk, index *dpaDBIndex, po uint8) { +func (s *LDBStore) doPut(chunk Chunk, index *dpaDBIndex, po uint8) { data := s.encodeDataFunc(chunk) dkey := getDataKey(s.dataIdx, po) s.batch.Put(dkey, data) index.Idx = s.dataIdx s.bucketCnt[po] = s.dataIdx s.entryCnt++ + dbEntryCount.Inc(1) s.dataIdx++ cntKey := make([]byte, 2) @@ -575,56 +634,64 @@ func (s *LDBStore) doPut(chunk *Chunk, index *dpaDBIndex, po uint8) { } func (s *LDBStore) writeBatches() { -mainLoop: for { select { case <-s.quit: - break mainLoop + log.Debug("DbStore: quit batch write loop") + return case <-s.batchesC: - s.lock.Lock() - b := s.batch - e := s.entryCnt - d := s.dataIdx - a := s.accessCnt - c := s.batchC - s.batchC = make(chan bool) - s.batch = new(leveldb.Batch) - err := s.writeBatch(b, e, d, a) - // TODO: set this error on the batch, then tell the chunk + err := s.writeCurrentBatch() if err != nil { - log.Error(fmt.Sprintf("spawn batch write (%d entries): %v", b.Len(), err)) - } - close(c) - for e > s.capacity { - // Collect garbage in a separate goroutine - // to be able to interrupt this loop by s.quit. - done := make(chan struct{}) - go func() { - s.collectGarbage(gcArrayFreeRatio) - close(done) - }() - - e = s.entryCnt - select { - case <-s.quit: - s.lock.Unlock() - break mainLoop - case <-done: - } + log.Debug("DbStore: quit batch write loop", "err", err.Error()) + return } - s.lock.Unlock() } } - log.Trace(fmt.Sprintf("DbStore: quit batch write loop")) + +} + +func (s *LDBStore) writeCurrentBatch() error { + s.lock.Lock() + defer s.lock.Unlock() + b := s.batch + l := b.Len() + if l == 0 { + return nil + } + e := s.entryCnt + d := s.dataIdx + a := s.accessCnt + s.batch = newBatch() + b.err = s.writeBatch(b, e, d, a) + close(b.c) + for e > s.capacity { + log.Trace("for >", "e", e, "s.capacity", s.capacity) + // Collect garbage in a separate goroutine + // to be able to interrupt this loop by s.quit. + done := make(chan struct{}) + go func() { + s.collectGarbage(gcArrayFreeRatio) + log.Trace("collectGarbage closing done") + close(done) + }() + + select { + case <-s.quit: + return errors.New("CollectGarbage terminated due to quit") + case <-done: + } + e = s.entryCnt + } + return nil } // must be called non concurrently -func (s *LDBStore) writeBatch(b *leveldb.Batch, entryCnt, dataIdx, accessCnt uint64) error { +func (s *LDBStore) writeBatch(b *dbBatch, entryCnt, dataIdx, accessCnt uint64) error { b.Put(keyEntryCnt, U64ToBytes(entryCnt)) b.Put(keyDataIdx, U64ToBytes(dataIdx)) b.Put(keyAccessCnt, U64ToBytes(accessCnt)) l := b.Len() - if err := s.db.Write(b); err != nil { + if err := s.db.Write(b.Batch); err != nil { return fmt.Errorf("unable to write batch: %v", err) } log.Trace(fmt.Sprintf("batch write (%d entries)", l)) @@ -635,12 +702,12 @@ func (s *LDBStore) writeBatch(b *leveldb.Batch, entryCnt, dataIdx, accessCnt uin // to a mock store to bypass the default functionality encodeData. // The constructed function always returns the nil data, as DbStore does // not need to store the data, but still need to create the index. -func newMockEncodeDataFunc(mockStore *mock.NodeStore) func(chunk *Chunk) []byte { - return func(chunk *Chunk) []byte { - if err := mockStore.Put(chunk.Addr, encodeData(chunk)); err != nil { - log.Error(fmt.Sprintf("%T: Chunk %v put: %v", mockStore, chunk.Addr.Log(), err)) +func newMockEncodeDataFunc(mockStore *mock.NodeStore) func(chunk Chunk) []byte { + return func(chunk Chunk) []byte { + if err := mockStore.Put(chunk.Address(), encodeData(chunk)); err != nil { + log.Error(fmt.Sprintf("%T: Chunk %v put: %v", mockStore, chunk.Address().Log(), err)) } - return chunk.Addr[:] + return chunk.Address()[:] } } @@ -663,7 +730,7 @@ func (s *LDBStore) tryAccessIdx(ikey []byte, index *dpaDBIndex) bool { return true } -func (s *LDBStore) Get(ctx context.Context, addr Address) (chunk *Chunk, err error) { +func (s *LDBStore) Get(_ context.Context, addr Address) (chunk Chunk, err error) { metrics.GetOrRegisterCounter("ldbstore.get", nil).Inc(1) log.Trace("ldbstore.get", "key", addr) @@ -672,9 +739,11 @@ func (s *LDBStore) Get(ctx context.Context, addr Address) (chunk *Chunk, err err return s.get(addr) } -func (s *LDBStore) get(addr Address) (chunk *Chunk, err error) { +func (s *LDBStore) get(addr Address) (chunk *chunk, err error) { var indx dpaDBIndex - + if s.closed { + return nil, ErrDBClosed + } if s.tryAccessIdx(getIndexKey(addr), &indx) { var data []byte if s.getDataFunc != nil { @@ -697,9 +766,7 @@ func (s *LDBStore) get(addr Address) (chunk *Chunk, err error) { } } - chunk = NewChunk(addr, nil) - chunk.markAsStored() - decodeData(data, chunk) + return decodeData(addr, data) } else { err = ErrChunkNotFound } @@ -753,6 +820,12 @@ func (s *LDBStore) setCapacity(c uint64) { func (s *LDBStore) Close() { close(s.quit) + s.lock.Lock() + s.closed = true + s.lock.Unlock() + // force writing out current batch + s.writeCurrentBatch() + close(s.batchesC) s.db.Close() } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/localstore.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/localstore.go index 9e34749797..04701ee69a 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/localstore.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/localstore.go @@ -18,8 +18,6 @@ package storage import ( "context" - "encoding/binary" - "fmt" "path/filepath" "sync" @@ -97,123 +95,89 @@ func NewTestLocalStoreForAddr(params *LocalStoreParams) (*LocalStore, error) { // when the chunk is stored in memstore. // After the LDBStore.Put, it is ensured that the MemStore // contains the chunk with the same data, but nil ReqC channel. -func (ls *LocalStore) Put(ctx context.Context, chunk *Chunk) { +func (ls *LocalStore) Put(ctx context.Context, chunk Chunk) error { valid := true // ls.Validators contains a list of one validator per chunk type. // if one validator succeeds, then the chunk is valid for _, v := range ls.Validators { - if valid = v.Validate(chunk.Addr, chunk.SData); valid { + if valid = v.Validate(chunk.Address(), chunk.Data()); valid { break } } if !valid { - log.Trace("invalid chunk", "addr", chunk.Addr, "len", len(chunk.SData)) - chunk.SetErrored(ErrChunkInvalid) - chunk.markAsStored() - return + return ErrChunkInvalid } - log.Trace("localstore.put", "addr", chunk.Addr) - + log.Trace("localstore.put", "key", chunk.Address()) ls.mu.Lock() defer ls.mu.Unlock() - chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8])) - - memChunk, err := ls.memStore.Get(ctx, chunk.Addr) - switch err { - case nil: - if memChunk.ReqC == nil { - chunk.markAsStored() - return - } - case ErrChunkNotFound: - default: - chunk.SetErrored(err) - return + _, err := ls.memStore.Get(ctx, chunk.Address()) + if err == nil { + return nil } - - ls.DbStore.Put(ctx, chunk) - - // chunk is no longer a request, but a chunk with data, so replace it in memStore - newc := NewChunk(chunk.Addr, nil) - newc.SData = chunk.SData - newc.Size = chunk.Size - newc.dbStoredC = chunk.dbStoredC - - ls.memStore.Put(ctx, newc) - - if memChunk != nil && memChunk.ReqC != nil { - close(memChunk.ReqC) + if err != nil && err != ErrChunkNotFound { + return err } + ls.memStore.Put(ctx, chunk) + err = ls.DbStore.Put(ctx, chunk) + return err } // Get(chunk *Chunk) looks up a chunk in the local stores // This method is blocking until the chunk is retrieved // so additional timeout may be needed to wrap this call if // ChunkStores are remote and can have long latency -func (ls *LocalStore) Get(ctx context.Context, addr Address) (chunk *Chunk, err error) { +func (ls *LocalStore) Get(ctx context.Context, addr Address) (chunk Chunk, err error) { ls.mu.Lock() defer ls.mu.Unlock() return ls.get(ctx, addr) } -func (ls *LocalStore) get(ctx context.Context, addr Address) (chunk *Chunk, err error) { +func (ls *LocalStore) get(ctx context.Context, addr Address) (chunk Chunk, err error) { chunk, err = ls.memStore.Get(ctx, addr) + + if err != nil && err != ErrChunkNotFound { + metrics.GetOrRegisterCounter("localstore.get.error", nil).Inc(1) + return nil, err + } + if err == nil { - if chunk.ReqC != nil { - select { - case <-chunk.ReqC: - default: - metrics.GetOrRegisterCounter("localstore.get.errfetching", nil).Inc(1) - return chunk, ErrFetching - } - } metrics.GetOrRegisterCounter("localstore.get.cachehit", nil).Inc(1) - return + return chunk, nil } + metrics.GetOrRegisterCounter("localstore.get.cachemiss", nil).Inc(1) chunk, err = ls.DbStore.Get(ctx, addr) if err != nil { metrics.GetOrRegisterCounter("localstore.get.error", nil).Inc(1) - return + return nil, err } - chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8])) + ls.memStore.Put(ctx, chunk) - return + return chunk, nil } -// retrieve logic common for local and network chunk retrieval requests -func (ls *LocalStore) GetOrCreateRequest(ctx context.Context, addr Address) (chunk *Chunk, created bool) { - metrics.GetOrRegisterCounter("localstore.getorcreaterequest", nil).Inc(1) - +func (ls *LocalStore) FetchFunc(ctx context.Context, addr Address) func(context.Context) error { ls.mu.Lock() defer ls.mu.Unlock() - var err error - chunk, err = ls.get(ctx, addr) - if err == nil && chunk.GetErrored() == nil { - metrics.GetOrRegisterCounter("localstore.getorcreaterequest.hit", nil).Inc(1) - log.Trace(fmt.Sprintf("LocalStore.GetOrRetrieve: %v found locally", addr)) - return chunk, false + _, err := ls.get(ctx, addr) + if err == nil { + return nil } - if err == ErrFetching && chunk.GetErrored() == nil { - metrics.GetOrRegisterCounter("localstore.getorcreaterequest.errfetching", nil).Inc(1) - log.Trace(fmt.Sprintf("LocalStore.GetOrRetrieve: %v hit on an existing request %v", addr, chunk.ReqC)) - return chunk, false + return func(context.Context) error { + return err } - // no data and no request status - metrics.GetOrRegisterCounter("localstore.getorcreaterequest.miss", nil).Inc(1) - log.Trace(fmt.Sprintf("LocalStore.GetOrRetrieve: %v not found locally. open new request", addr)) - chunk = NewChunk(addr, make(chan bool)) - ls.memStore.Put(ctx, chunk) - return chunk, true } -// RequestsCacheLen returns the current number of outgoing requests stored in the cache -func (ls *LocalStore) RequestsCacheLen() int { - return ls.memStore.requests.Len() +func (ls *LocalStore) BinIndex(po uint8) uint64 { + return ls.DbStore.BinIndex(po) +} + +func (ls *LocalStore) Iterator(from uint64, to uint64, po uint8, f func(Address, uint64) bool) error { + return ls.DbStore.SyncIterator(from, to, po, f) } // Close the local store diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/memstore.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/memstore.go index 55cfcbfeaf..36b1e00d9b 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/memstore.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/memstore.go @@ -20,24 +20,17 @@ package storage import ( "context" - "sync" lru "github.com/hashicorp/golang-lru" ) type MemStore struct { cache *lru.Cache - requests *lru.Cache - mu sync.RWMutex disabled bool } -//NewMemStore is instantiating a MemStore cache. We are keeping a record of all outgoing requests for chunks, that -//should later be delivered by peer nodes, in the `requests` LRU cache. We are also keeping all frequently requested +//NewMemStore is instantiating a MemStore cache keeping all frequently requested //chunks in the `cache` LRU cache. -// -//`requests` LRU cache capacity should ideally never be reached, this is why for the time being it should be initialised -//with the same value as the LDBStore capacity. func NewMemStore(params *StoreParams, _ *LDBStore) (m *MemStore) { if params.CacheCapacity == 0 { return &MemStore{ @@ -45,102 +38,48 @@ func NewMemStore(params *StoreParams, _ *LDBStore) (m *MemStore) { } } - onEvicted := func(key interface{}, value interface{}) { - v := value.(*Chunk) - <-v.dbStoredC - } - c, err := lru.NewWithEvict(int(params.CacheCapacity), onEvicted) - if err != nil { - panic(err) - } - - requestEvicted := func(key interface{}, value interface{}) { - // temporary remove of the error log, until we figure out the problem, as it is too spamy - //log.Error("evict called on outgoing request") - } - r, err := lru.NewWithEvict(int(params.ChunkRequestsCacheCapacity), requestEvicted) + c, err := lru.New(int(params.CacheCapacity)) if err != nil { panic(err) } return &MemStore{ - cache: c, - requests: r, + cache: c, } } -func (m *MemStore) Get(ctx context.Context, addr Address) (*Chunk, error) { +func (m *MemStore) Get(_ context.Context, addr Address) (Chunk, error) { if m.disabled { return nil, ErrChunkNotFound } - m.mu.RLock() - defer m.mu.RUnlock() - - r, ok := m.requests.Get(string(addr)) - // it is a request - if ok { - return r.(*Chunk), nil - } - - // it is not a request c, ok := m.cache.Get(string(addr)) if !ok { return nil, ErrChunkNotFound } - return c.(*Chunk), nil + return c.(*chunk), nil } -func (m *MemStore) Put(ctx context.Context, c *Chunk) { +func (m *MemStore) Put(_ context.Context, c Chunk) error { if m.disabled { - return + return nil } - m.mu.Lock() - defer m.mu.Unlock() - - // it is a request - if c.ReqC != nil { - select { - case <-c.ReqC: - if c.GetErrored() != nil { - m.requests.Remove(string(c.Addr)) - return - } - m.cache.Add(string(c.Addr), c) - m.requests.Remove(string(c.Addr)) - default: - m.requests.Add(string(c.Addr), c) - } - return - } - - // it is not a request - m.cache.Add(string(c.Addr), c) - m.requests.Remove(string(c.Addr)) + m.cache.Add(string(c.Address()), c) + return nil } func (m *MemStore) setCapacity(n int) { if n <= 0 { m.disabled = true } else { - onEvicted := func(key interface{}, value interface{}) { - v := value.(*Chunk) - <-v.dbStoredC - } - c, err := lru.NewWithEvict(n, onEvicted) - if err != nil { - panic(err) - } - - r, err := lru.New(defaultChunkRequestsCacheCapacity) + c, err := lru.New(n) if err != nil { panic(err) } - m = &MemStore{ - cache: c, - requests: r, + *m = MemStore{ + cache: c, } } } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/handler.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/handler.go index 57561fd14b..18c667f14e 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/handler.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/handler.go @@ -187,12 +187,12 @@ func (h *Handler) New(ctx context.Context, request *Request) error { return err } if request.metaHash != nil && !bytes.Equal(request.metaHash, metaHash) || - request.rootAddr != nil && !bytes.Equal(request.rootAddr, chunk.Addr) { + request.rootAddr != nil && !bytes.Equal(request.rootAddr, chunk.Address()) { return NewError(ErrInvalidValue, "metaHash in UpdateRequest does not match actual metadata") } request.metaHash = metaHash - request.rootAddr = chunk.Addr + request.rootAddr = chunk.Address() h.chunkStore.Put(ctx, chunk) log.Debug("new resource", "name", request.metadata.Name, "startTime", request.metadata.StartTime, "frequency", request.metadata.Frequency, "owner", request.metadata.Owner) @@ -202,14 +202,14 @@ func (h *Handler) New(ctx context.Context, request *Request) error { resourceUpdate: resourceUpdate{ updateHeader: updateHeader{ UpdateLookup: UpdateLookup{ - rootAddr: chunk.Addr, + rootAddr: chunk.Address(), }, }, }, ResourceMetadata: request.metadata, updated: time.Now(), } - h.set(chunk.Addr, rsrc) + h.set(chunk.Address(), rsrc) return nil } @@ -348,7 +348,11 @@ func (h *Handler) lookup(rsrc *resource, params *LookupParams) (*resource, error return nil, NewErrorf(ErrPeriodDepth, "Lookup exceeded max period hops (%d)", lp.Limit) } updateAddr := lp.UpdateAddr() - chunk, err := h.chunkStore.GetWithTimeout(context.TODO(), updateAddr, defaultRetrieveTimeout) + + ctx, cancel := context.WithTimeout(context.Background(), defaultRetrieveTimeout) + defer cancel() + + chunk, err := h.chunkStore.Get(ctx, updateAddr) if err == nil { if specificversion { return h.updateIndex(rsrc, chunk) @@ -358,7 +362,11 @@ func (h *Handler) lookup(rsrc *resource, params *LookupParams) (*resource, error for { newversion := lp.version + 1 updateAddr := lp.UpdateAddr() - newchunk, err := h.chunkStore.GetWithTimeout(context.TODO(), updateAddr, defaultRetrieveTimeout) + + ctx, cancel := context.WithTimeout(context.Background(), defaultRetrieveTimeout) + defer cancel() + + newchunk, err := h.chunkStore.Get(ctx, updateAddr) if err != nil { return h.updateIndex(rsrc, chunk) } @@ -380,7 +388,10 @@ func (h *Handler) lookup(rsrc *resource, params *LookupParams) (*resource, error // Load retrieves the Mutable Resource metadata chunk stored at rootAddr // Upon retrieval it creates/updates the index entry for it with metadata corresponding to the chunk contents func (h *Handler) Load(ctx context.Context, rootAddr storage.Address) (*resource, error) { - chunk, err := h.chunkStore.GetWithTimeout(ctx, rootAddr, defaultRetrieveTimeout) + //TODO: Maybe add timeout to context, defaultRetrieveTimeout? + ctx, cancel := context.WithTimeout(ctx, defaultRetrieveTimeout) + defer cancel() + chunk, err := h.chunkStore.Get(ctx, rootAddr) if err != nil { return nil, NewError(ErrNotFound, err.Error()) } @@ -388,11 +399,11 @@ func (h *Handler) Load(ctx context.Context, rootAddr storage.Address) (*resource // create the index entry rsrc := &resource{} - if err := rsrc.ResourceMetadata.binaryGet(chunk.SData); err != nil { // Will fail if this is not really a metadata chunk + if err := rsrc.ResourceMetadata.binaryGet(chunk.Data()); err != nil { // Will fail if this is not really a metadata chunk return nil, err } - rsrc.rootAddr, rsrc.metaHash = metadataHash(chunk.SData) + rsrc.rootAddr, rsrc.metaHash = metadataHash(chunk.Data()) if !bytes.Equal(rsrc.rootAddr, rootAddr) { return nil, NewError(ErrCorruptData, "Corrupt metadata chunk") } @@ -402,17 +413,17 @@ func (h *Handler) Load(ctx context.Context, rootAddr storage.Address) (*resource } // update mutable resource index map with specified content -func (h *Handler) updateIndex(rsrc *resource, chunk *storage.Chunk) (*resource, error) { +func (h *Handler) updateIndex(rsrc *resource, chunk storage.Chunk) (*resource, error) { // retrieve metadata from chunk data and check that it matches this mutable resource var r SignedResourceUpdate - if err := r.fromChunk(chunk.Addr, chunk.SData); err != nil { + if err := r.fromChunk(chunk.Address(), chunk.Data()); err != nil { return nil, err } - log.Trace("resource index update", "name", rsrc.ResourceMetadata.Name, "updatekey", chunk.Addr, "period", r.period, "version", r.version) + log.Trace("resource index update", "name", rsrc.ResourceMetadata.Name, "updatekey", chunk.Address(), "period", r.period, "version", r.version) // update our rsrcs entry map - rsrc.lastKey = chunk.Addr + rsrc.lastKey = chunk.Address() rsrc.period = r.period rsrc.version = r.version rsrc.updated = time.Now() @@ -420,8 +431,8 @@ func (h *Handler) updateIndex(rsrc *resource, chunk *storage.Chunk) (*resource, rsrc.multihash = r.multihash copy(rsrc.data, r.data) rsrc.Reader = bytes.NewReader(rsrc.data) - log.Debug("resource synced", "name", rsrc.ResourceMetadata.Name, "updateAddr", chunk.Addr, "period", rsrc.period, "version", rsrc.version) - h.set(chunk.Addr, rsrc) + log.Debug("resource synced", "name", rsrc.ResourceMetadata.Name, "updateAddr", chunk.Address(), "period", rsrc.period, "version", rsrc.version) + h.set(chunk.Address(), rsrc) return rsrc, nil } @@ -457,7 +468,7 @@ func (h *Handler) update(ctx context.Context, r *SignedResourceUpdate) (updateAd // send the chunk h.chunkStore.Put(ctx, chunk) - log.Trace("resource update", "updateAddr", r.updateAddr, "lastperiod", r.period, "version", r.version, "data", chunk.SData, "multihash", r.multihash) + log.Trace("resource update", "updateAddr", r.updateAddr, "lastperiod", r.period, "version", r.version, "data", chunk.Data(), "multihash", r.multihash) // update our resources map entry if the new update is older than the one we have, if we have it. if rsrc != nil && (r.period > rsrc.period || (rsrc.period == r.period && r.version > rsrc.version)) { @@ -475,7 +486,7 @@ func (h *Handler) update(ctx context.Context, r *SignedResourceUpdate) (updateAd // Retrieves the resource index value for the given nameHash func (h *Handler) get(rootAddr storage.Address) *resource { - if len(rootAddr) < storage.KeyLength { + if len(rootAddr) < storage.AddressLength { log.Warn("Handler.get with invalid rootAddr") return nil } @@ -488,7 +499,7 @@ func (h *Handler) get(rootAddr storage.Address) *resource { // Sets the resource index value for the given nameHash func (h *Handler) set(rootAddr storage.Address, rsrc *resource) { - if len(rootAddr) < storage.KeyLength { + if len(rootAddr) < storage.AddressLength { log.Warn("Handler.set with invalid rootAddr") return } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/lookup.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/lookup.go index eb28336e12..b52cd5b4ff 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/lookup.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/lookup.go @@ -72,7 +72,7 @@ type UpdateLookup struct { // 4 bytes period // 4 bytes version // storage.Keylength for rootAddr -const updateLookupLength = 4 + 4 + storage.KeyLength +const updateLookupLength = 4 + 4 + storage.AddressLength // UpdateAddr calculates the resource update chunk address corresponding to this lookup key func (u *UpdateLookup) UpdateAddr() (updateAddr storage.Address) { @@ -90,7 +90,7 @@ func (u *UpdateLookup) binaryPut(serializedData []byte) error { if len(serializedData) != updateLookupLength { return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize UpdateLookup. Expected %d, got %d", updateLookupLength, len(serializedData)) } - if len(u.rootAddr) != storage.KeyLength { + if len(u.rootAddr) != storage.AddressLength { return NewError(ErrInvalidValue, "UpdateLookup.binaryPut called without rootAddr set") } binary.LittleEndian.PutUint32(serializedData[:4], u.period) @@ -111,7 +111,7 @@ func (u *UpdateLookup) binaryGet(serializedData []byte) error { } u.period = binary.LittleEndian.Uint32(serializedData[:4]) u.version = binary.LittleEndian.Uint32(serializedData[4:8]) - u.rootAddr = storage.Address(make([]byte, storage.KeyLength)) + u.rootAddr = storage.Address(make([]byte, storage.AddressLength)) copy(u.rootAddr[:], serializedData[8:]) return nil } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/metadata.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/metadata.go index 0ab0ed1d9e..5091148959 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/metadata.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/metadata.go @@ -142,7 +142,7 @@ func (r *ResourceMetadata) serializeAndHash() (rootAddr, metaHash []byte, chunkD } // creates a metadata chunk out of a resourceMetadata structure -func (metadata *ResourceMetadata) newChunk() (chunk *storage.Chunk, metaHash []byte, err error) { +func (metadata *ResourceMetadata) newChunk() (chunk storage.Chunk, metaHash []byte, err error) { // the metadata chunk contains a timestamp of when the resource starts to be valid // and also how frequently it is expected to be updated // from this we know at what time we should look for updates, and how often @@ -157,9 +157,7 @@ func (metadata *ResourceMetadata) newChunk() (chunk *storage.Chunk, metaHash []b } // make the chunk and send it to swarm - chunk = storage.NewChunk(rootAddr, nil) - chunk.SData = chunkData - chunk.Size = int64(len(chunkData)) + chunk = storage.NewChunk(rootAddr, chunkData) return chunk, metaHash, nil } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/request.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/request.go index dd71f855d6..af2ccf5c76 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/request.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/request.go @@ -182,7 +182,7 @@ func (r *Request) fromJSON(j *updateRequestJSON) error { var declaredRootAddr storage.Address var declaredMetaHash []byte - declaredRootAddr, err = decodeHexSlice(j.RootAddr, storage.KeyLength, "rootAddr") + declaredRootAddr, err = decodeHexSlice(j.RootAddr, storage.AddressLength, "rootAddr") if err != nil { return err } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/signedupdate.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/signedupdate.go index 1c6d02e82a..41a5a5e631 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/signedupdate.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/signedupdate.go @@ -96,7 +96,7 @@ func (r *SignedResourceUpdate) Sign(signer Signer) error { } // create an update chunk. -func (r *SignedResourceUpdate) toChunk() (*storage.Chunk, error) { +func (r *SignedResourceUpdate) toChunk() (storage.Chunk, error) { // Check that the update is signed and serialized // For efficiency, data is serialized during signature and cached in @@ -105,14 +105,11 @@ func (r *SignedResourceUpdate) toChunk() (*storage.Chunk, error) { return nil, NewError(ErrInvalidSignature, "newUpdateChunk called without a valid signature or payload data. Call .Sign() first.") } - chunk := storage.NewChunk(r.updateAddr, nil) resourceUpdateLength := r.resourceUpdate.binaryLength() - chunk.SData = r.binaryData - // signature is the last item in the chunk data - copy(chunk.SData[resourceUpdateLength:], r.signature[:]) + copy(r.binaryData[resourceUpdateLength:], r.signature[:]) - chunk.Size = int64(len(chunk.SData)) + chunk := storage.NewChunk(r.updateAddr, r.binaryData) return chunk, nil } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/testutil.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/testutil.go index 6efcba9aba..a30baaa1d7 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/testutil.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/testutil.go @@ -17,8 +17,12 @@ package mru import ( + "context" "fmt" "path/filepath" + "sync" + + "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/swarm/storage" ) @@ -35,6 +39,17 @@ func (t *TestHandler) Close() { t.chunkStore.Close() } +type mockNetFetcher struct{} + +func (m *mockNetFetcher) Request(ctx context.Context) { +} +func (m *mockNetFetcher) Offer(ctx context.Context, source *discover.NodeID) { +} + +func newFakeNetFetcher(context.Context, storage.Address, *sync.Map) storage.NetFetcher { + return &mockNetFetcher{} +} + // NewTestHandler creates Handler object to be used for testing purposes. func NewTestHandler(datadir string, params *HandlerParams) (*TestHandler, error) { path := filepath.Join(datadir, testDbDirName) @@ -47,7 +62,11 @@ func NewTestHandler(datadir string, params *HandlerParams) (*TestHandler, error) } localStore.Validators = append(localStore.Validators, storage.NewContentAddressValidator(storage.MakeHashFunc(resourceHashAlgorithm))) localStore.Validators = append(localStore.Validators, rh) - netStore := storage.NewNetStore(localStore, nil) + netStore, err := storage.NewNetStore(localStore, nil) + if err != nil { + return nil, err + } + netStore.NewNetFetcherFunc = newFakeNetFetcher rh.SetStore(netStore) return &TestHandler{rh}, nil } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/updateheader.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/updateheader.go index 3ac20c1890..f0039eaf66 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/updateheader.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mru/updateheader.go @@ -27,7 +27,7 @@ type updateHeader struct { metaHash []byte // SHA3 hash of the metadata chunk (less ownerAddr). Used to prove ownerhsip of the resource. } -const metaHashLength = storage.KeyLength +const metaHashLength = storage.AddressLength // updateLookupLength bytes // 1 byte flags (multihash bool for now) @@ -76,7 +76,7 @@ func (h *updateHeader) binaryGet(serializedData []byte) error { } cursor := updateLookupLength h.metaHash = make([]byte, metaHashLength) - copy(h.metaHash[:storage.KeyLength], serializedData[cursor:cursor+storage.KeyLength]) + copy(h.metaHash[:storage.AddressLength], serializedData[cursor:cursor+storage.AddressLength]) cursor += metaHashLength flags := serializedData[cursor] diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/netstore.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/netstore.go index 96a7e51f77..80ac6f1989 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/netstore.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/netstore.go @@ -18,181 +18,277 @@ package storage import ( "context" + "encoding/hex" + "fmt" + "sync" + "sync/atomic" "time" + "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/swarm/log" - "github.com/ethereum/go-ethereum/swarm/spancontext" - opentracing "github.com/opentracing/opentracing-go" + + lru "github.com/hashicorp/golang-lru" ) -var ( - // NetStore.Get timeout for get and get retries - // This is the maximum period that the Get will block. - // If it is reached, Get will return ErrChunkNotFound. - netStoreRetryTimeout = 30 * time.Second - // Minimal period between calling get method on NetStore - // on retry. It protects calling get very frequently if - // it returns ErrChunkNotFound very fast. - netStoreMinRetryDelay = 3 * time.Second - // Timeout interval before retrieval is timed out. - // It is used in NetStore.get on waiting for ReqC to be - // closed on a single retrieve request. - searchTimeout = 10 * time.Second +type ( + NewNetFetcherFunc func(ctx context.Context, addr Address, peers *sync.Map) NetFetcher ) -// NetStore implements the ChunkStore interface, -// this chunk access layer assumed 2 chunk stores -// local storage eg. LocalStore and network storage eg., NetStore -// access by calling network is blocking with a timeout +type NetFetcher interface { + Request(ctx context.Context) + Offer(ctx context.Context, source *discover.NodeID) +} + +// NetStore is an extension of local storage +// it implements the ChunkStore interface +// on request it initiates remote cloud retrieval using a fetcher +// fetchers are unique to a chunk and are stored in fetchers LRU memory cache +// fetchFuncFactory is a factory object to create a fetch function for a specific chunk address type NetStore struct { - localStore *LocalStore - retrieve func(ctx context.Context, chunk *Chunk) error + mu sync.Mutex + store SyncChunkStore + fetchers *lru.Cache + NewNetFetcherFunc NewNetFetcherFunc + closeC chan struct{} } -func NewNetStore(localStore *LocalStore, retrieve func(ctx context.Context, chunk *Chunk) error) *NetStore { - return &NetStore{localStore, retrieve} +var fetcherTimeout = 2 * time.Minute // timeout to cancel the fetcher even if requests are coming in + +// NewNetStore creates a new NetStore object using the given local store. newFetchFunc is a +// constructor function that can create a fetch function for a specific chunk address. +func NewNetStore(store SyncChunkStore, nnf NewNetFetcherFunc) (*NetStore, error) { + fetchers, err := lru.New(defaultChunkRequestsCacheCapacity) + if err != nil { + return nil, err + } + return &NetStore{ + store: store, + fetchers: fetchers, + NewNetFetcherFunc: nnf, + closeC: make(chan struct{}), + }, nil } -// Get is the entrypoint for local retrieve requests -// waits for response or times out -// -// Get uses get method to retrieve request, but retries if the -// ErrChunkNotFound is returned by get, until the netStoreRetryTimeout -// is reached. -func (ns *NetStore) Get(ctx context.Context, addr Address) (chunk *Chunk, err error) { - - var sp opentracing.Span - ctx, sp = spancontext.StartSpan( - ctx, - "netstore.get.global") - defer sp.Finish() - - timer := time.NewTimer(netStoreRetryTimeout) - defer timer.Stop() - - // result and resultC provide results from the goroutine - // where NetStore.get is called. - type result struct { - chunk *Chunk - err error +// Put stores a chunk in localstore, and delivers to all requestor peers using the fetcher stored in +// the fetchers cache +func (n *NetStore) Put(ctx context.Context, ch Chunk) error { + n.mu.Lock() + defer n.mu.Unlock() + + // put to the chunk to the store, there should be no error + err := n.store.Put(ctx, ch) + if err != nil { + return err } - resultC := make(chan result) - - // quitC ensures that retring goroutine is terminated - // when this function returns. - quitC := make(chan struct{}) - defer close(quitC) - - // do retries in a goroutine so that the timer can - // force this method to return after the netStoreRetryTimeout. - go func() { - // limiter ensures that NetStore.get is not called more frequently - // then netStoreMinRetryDelay. If NetStore.get takes longer - // then netStoreMinRetryDelay, the next retry call will be - // without a delay. - limiter := time.NewTimer(netStoreMinRetryDelay) - defer limiter.Stop() - - for { - chunk, err := ns.get(ctx, addr, 0) - if err != ErrChunkNotFound { - // break retry only if the error is nil - // or other error then ErrChunkNotFound - select { - case <-quitC: - // Maybe NetStore.Get function has returned - // by the timer.C while we were waiting for the - // results. Terminate this goroutine. - case resultC <- result{chunk: chunk, err: err}: - // Send the result to the parrent goroutine. - } - return - - } - select { - case <-quitC: - // NetStore.Get function has returned, possibly - // by the timer.C, which makes this goroutine - // not needed. - return - case <-limiter.C: - } - // Reset the limiter for the next iteration. - limiter.Reset(netStoreMinRetryDelay) - log.Debug("NetStore.Get retry chunk", "key", addr) - } - }() - select { - case r := <-resultC: - return r.chunk, r.err - case <-timer.C: - return nil, ErrChunkNotFound + // if chunk is now put in the store, check if there was an active fetcher and call deliver on it + // (this delivers the chunk to requestors via the fetcher) + if f := n.getFetcher(ch.Address()); f != nil { + f.deliver(ctx, ch) } + return nil } -// GetWithTimeout makes a single retrieval attempt for a chunk with a explicit timeout parameter -func (ns *NetStore) GetWithTimeout(ctx context.Context, addr Address, timeout time.Duration) (chunk *Chunk, err error) { - return ns.get(ctx, addr, timeout) +// Get retrieves the chunk from the NetStore DPA synchronously. +// It calls NetStore.get, and if the chunk is not in local Storage +// it calls fetch with the request, which blocks until the chunk +// arrived or context is done +func (n *NetStore) Get(rctx context.Context, ref Address) (Chunk, error) { + chunk, fetch, err := n.get(rctx, ref) + if err != nil { + return nil, err + } + if chunk != nil { + return chunk, nil + } + return fetch(rctx) } -func (ns *NetStore) get(ctx context.Context, addr Address, timeout time.Duration) (chunk *Chunk, err error) { - if timeout == 0 { - timeout = searchTimeout +func (n *NetStore) BinIndex(po uint8) uint64 { + return n.store.BinIndex(po) +} + +func (n *NetStore) Iterator(from uint64, to uint64, po uint8, f func(Address, uint64) bool) error { + return n.store.Iterator(from, to, po, f) +} + +// FetchFunc returns nil if the store contains the given address. Otherwise it returns a wait function, +// which returns after the chunk is available or the context is done +func (n *NetStore) FetchFunc(ctx context.Context, ref Address) func(context.Context) error { + chunk, fetch, _ := n.get(ctx, ref) + if chunk != nil { + return nil } + return func(ctx context.Context) error { + _, err := fetch(ctx) + return err + } +} - var sp opentracing.Span - ctx, sp = spancontext.StartSpan( - ctx, - "netstore.get") - defer sp.Finish() +// Close chunk store +func (n *NetStore) Close() { + close(n.closeC) + n.store.Close() + // TODO: loop through fetchers to cancel them +} - if ns.retrieve == nil { - chunk, err = ns.localStore.Get(ctx, addr) - if err == nil { - return chunk, nil - } - if err != ErrFetching { - return nil, err - } - } else { - var created bool - chunk, created = ns.localStore.GetOrCreateRequest(ctx, addr) +// get attempts at retrieving the chunk from LocalStore +// If it is not found then using getOrCreateFetcher: +// 1. Either there is already a fetcher to retrieve it +// 2. A new fetcher is created and saved in the fetchers cache +// From here on, all Get will hit on this fetcher until the chunk is delivered +// or all fetcher contexts are done. +// It returns a chunk, a fetcher function and an error +// If chunk is nil, the returned fetch function needs to be called with a context to return the chunk. +func (n *NetStore) get(ctx context.Context, ref Address) (Chunk, func(context.Context) (Chunk, error), error) { + n.mu.Lock() + defer n.mu.Unlock() - if chunk.ReqC == nil { - return chunk, nil + chunk, err := n.store.Get(ctx, ref) + if err != nil { + if err != ErrChunkNotFound { + log.Debug("Received error from LocalStore other than ErrNotFound", "err", err) } + // The chunk is not available in the LocalStore, let's get the fetcher for it, or create a new one + // if it doesn't exist yet + f := n.getOrCreateFetcher(ref) + // If the caller needs the chunk, it has to use the returned fetch function to get it + return nil, f.Fetch, nil + } - if created { - err := ns.retrieve(ctx, chunk) - if err != nil { - // mark chunk request as failed so that we can retry it later - chunk.SetErrored(ErrChunkUnavailable) - return nil, err - } - } + return chunk, nil, nil +} + +// getOrCreateFetcher attempts at retrieving an existing fetchers +// if none exists, creates one and saves it in the fetchers cache +// caller must hold the lock +func (n *NetStore) getOrCreateFetcher(ref Address) *fetcher { + if f := n.getFetcher(ref); f != nil { + return f } - t := time.NewTicker(timeout) - defer t.Stop() + // no fetcher for the given address, we have to create a new one + key := hex.EncodeToString(ref) + // create the context during which fetching is kept alive + ctx, cancel := context.WithTimeout(context.Background(), fetcherTimeout) + // destroy is called when all requests finish + destroy := func() { + // remove fetcher from fetchers + n.fetchers.Remove(key) + // stop fetcher by cancelling context called when + // all requests cancelled/timedout or chunk is delivered + cancel() + } + // peers always stores all the peers which have an active request for the chunk. It is shared + // between fetcher and the NewFetchFunc function. It is needed by the NewFetchFunc because + // the peers which requested the chunk should not be requested to deliver it. + peers := &sync.Map{} - select { - case <-t.C: - // mark chunk request as failed so that we can retry - chunk.SetErrored(ErrChunkNotFound) - return nil, ErrChunkNotFound - case <-chunk.ReqC: + fetcher := newFetcher(ref, n.NewNetFetcherFunc(ctx, ref, peers), destroy, peers, n.closeC) + n.fetchers.Add(key, fetcher) + + return fetcher +} + +// getFetcher retrieves the fetcher for the given address from the fetchers cache if it exists, +// otherwise it returns nil +func (n *NetStore) getFetcher(ref Address) *fetcher { + key := hex.EncodeToString(ref) + f, ok := n.fetchers.Get(key) + if ok { + return f.(*fetcher) } - chunk.SetErrored(nil) - return chunk, nil + return nil } -// Put is the entrypoint for local store requests coming from storeLoop -func (ns *NetStore) Put(ctx context.Context, chunk *Chunk) { - ns.localStore.Put(ctx, chunk) +// RequestsCacheLen returns the current number of outgoing requests stored in the cache +func (n *NetStore) RequestsCacheLen() int { + return n.fetchers.Len() } -// Close chunk store -func (ns *NetStore) Close() { - ns.localStore.Close() +// One fetcher object is responsible to fetch one chunk for one address, and keep track of all the +// peers who have requested it and did not receive it yet. +type fetcher struct { + addr Address // address of chunk + chunk Chunk // fetcher can set the chunk on the fetcher + deliveredC chan struct{} // chan signalling chunk delivery to requests + cancelledC chan struct{} // chan signalling the fetcher has been cancelled (removed from fetchers in NetStore) + netFetcher NetFetcher // remote fetch function to be called with a request source taken from the context + cancel func() // cleanup function for the remote fetcher to call when all upstream contexts are called + peers *sync.Map // the peers which asked for the chunk + requestCnt int32 // number of requests on this chunk. If all the requests are done (delivered or context is done) the cancel function is called + deliverOnce *sync.Once // guarantees that we only close deliveredC once +} + +// newFetcher creates a new fetcher object for the fiven addr. fetch is the function which actually +// does the retrieval (in non-test cases this is coming from the network package). cancel function is +// called either +// 1. when the chunk has been fetched all peers have been either notified or their context has been done +// 2. the chunk has not been fetched but all context from all the requests has been done +// The peers map stores all the peers which have requested chunk. +func newFetcher(addr Address, nf NetFetcher, cancel func(), peers *sync.Map, closeC chan struct{}) *fetcher { + cancelOnce := &sync.Once{} // cancel should only be called once + return &fetcher{ + addr: addr, + deliveredC: make(chan struct{}), + deliverOnce: &sync.Once{}, + cancelledC: closeC, + netFetcher: nf, + cancel: func() { + cancelOnce.Do(func() { + cancel() + }) + }, + peers: peers, + } +} + +// Fetch fetches the chunk synchronously, it is called by NetStore.Get is the chunk is not available +// locally. +func (f *fetcher) Fetch(rctx context.Context) (Chunk, error) { + atomic.AddInt32(&f.requestCnt, 1) + defer func() { + // if all the requests are done the fetcher can be cancelled + if atomic.AddInt32(&f.requestCnt, -1) == 0 { + f.cancel() + } + }() + + // The peer asking for the chunk. Store in the shared peers map, but delete after the request + // has been delivered + peer := rctx.Value("peer") + if peer != nil { + f.peers.Store(peer, time.Now()) + defer f.peers.Delete(peer) + } + + // If there is a source in the context then it is an offer, otherwise a request + sourceIF := rctx.Value("source") + if sourceIF != nil { + var source *discover.NodeID + id := discover.MustHexID(sourceIF.(string)) + source = &id + f.netFetcher.Offer(rctx, source) + } else { + f.netFetcher.Request(rctx) + } + + // wait until either the chunk is delivered or the context is done + select { + case <-rctx.Done(): + return nil, rctx.Err() + case <-f.deliveredC: + return f.chunk, nil + case <-f.cancelledC: + return nil, fmt.Errorf("fetcher cancelled") + } +} + +// deliver is called by NetStore.Put to notify all pending requests +func (f *fetcher) deliver(ctx context.Context, ch Chunk) { + f.deliverOnce.Do(func() { + f.chunk = ch + // closing the deliveredC channel will terminate ongoing requests + close(f.deliveredC) + }) } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/pyramid.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/pyramid.go index 36ff66d045..f74eef06bc 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/pyramid.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/pyramid.go @@ -25,7 +25,7 @@ import ( "sync" "time" - "github.com/ethereum/go-ethereum/swarm/chunk" + ch "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/log" ) @@ -57,7 +57,7 @@ import ( When certain no of data chunks are created (defaultBranches), a signal is sent to create a tree entry. When the level 0 tree entries reaches certain threshold (defaultBranches), another signal is sent to a tree entry one level up.. and so on... until only the data is exhausted AND only one - tree entry is present in certain level. The key of tree entry is given out as the rootKey of the file. + tree entry is present in certain level. The key of tree entry is given out as the rootAddress of the file. */ @@ -98,15 +98,15 @@ func NewPyramidSplitterParams(addr Address, reader io.Reader, putter Putter, get } /* - When splitting, data is given as a SectionReader, and the key is a hashSize long byte slice (Key), the root hash of the entire content will fill this once processing finishes. + When splitting, data is given as a SectionReader, and the key is a hashSize long byte slice (Address), the root hash of the entire content will fill this once processing finishes. New chunks to store are store using the putter which the caller provides. */ func PyramidSplit(ctx context.Context, reader io.Reader, putter Putter, getter Getter) (Address, func(context.Context) error, error) { - return NewPyramidSplitter(NewPyramidSplitterParams(nil, reader, putter, getter, chunk.DefaultSize)).Split(ctx) + return NewPyramidSplitter(NewPyramidSplitterParams(nil, reader, putter, getter, ch.DefaultSize)).Split(ctx) } func PyramidAppend(ctx context.Context, addr Address, reader io.Reader, putter Putter, getter Getter) (Address, func(context.Context) error, error) { - return NewPyramidSplitter(NewPyramidSplitterParams(addr, reader, putter, getter, chunk.DefaultSize)).Append(ctx) + return NewPyramidSplitter(NewPyramidSplitterParams(addr, reader, putter, getter, ch.DefaultSize)).Append(ctx) } // Entry to create a tree node @@ -153,7 +153,7 @@ type PyramidChunker struct { wg *sync.WaitGroup errC chan error quitC chan bool - rootKey []byte + rootAddress []byte chunkLevel [][]*TreeEntry } @@ -171,14 +171,14 @@ func NewPyramidSplitter(params *PyramidSplitterParams) (pc *PyramidChunker) { pc.wg = &sync.WaitGroup{} pc.errC = make(chan error) pc.quitC = make(chan bool) - pc.rootKey = make([]byte, pc.hashSize) + pc.rootAddress = make([]byte, pc.hashSize) pc.chunkLevel = make([][]*TreeEntry, pc.branches) return } func (pc *PyramidChunker) Join(addr Address, getter Getter, depth int) LazySectionReader { return &LazyChunkReader{ - key: addr, + addr: addr, depth: depth, chunkSize: pc.chunkSize, branches: pc.branches, @@ -209,7 +209,7 @@ func (pc *PyramidChunker) Split(ctx context.Context) (k Address, wait func(conte log.Debug("pyramid.chunker: Split()") pc.wg.Add(1) - pc.prepareChunks(false) + pc.prepareChunks(ctx, false) // closes internal error channel if all subprocesses in the workgroup finished go func() { @@ -231,19 +231,21 @@ func (pc *PyramidChunker) Split(ctx context.Context) (k Address, wait func(conte if err != nil { return nil, nil, err } - case <-time.NewTimer(splitTimeout).C: + case <-ctx.Done(): + _ = pc.putter.Wait(ctx) //??? + return nil, nil, ctx.Err() } - return pc.rootKey, pc.putter.Wait, nil + return pc.rootAddress, pc.putter.Wait, nil } func (pc *PyramidChunker) Append(ctx context.Context) (k Address, wait func(context.Context) error, err error) { log.Debug("pyramid.chunker: Append()") // Load the right most unfinished tree chunks in every level - pc.loadTree() + pc.loadTree(ctx) pc.wg.Add(1) - pc.prepareChunks(true) + pc.prepareChunks(ctx, true) // closes internal error channel if all subprocesses in the workgroup finished go func() { @@ -265,11 +267,11 @@ func (pc *PyramidChunker) Append(ctx context.Context) (k Address, wait func(cont case <-time.NewTimer(splitTimeout).C: } - return pc.rootKey, pc.putter.Wait, nil + return pc.rootAddress, pc.putter.Wait, nil } -func (pc *PyramidChunker) processor(id int64) { +func (pc *PyramidChunker) processor(ctx context.Context, id int64) { defer pc.decrementWorkerCount() for { select { @@ -278,19 +280,22 @@ func (pc *PyramidChunker) processor(id int64) { if !ok { return } - pc.processChunk(id, job) + pc.processChunk(ctx, id, job) case <-pc.quitC: return } } } -func (pc *PyramidChunker) processChunk(id int64, job *chunkJob) { +func (pc *PyramidChunker) processChunk(ctx context.Context, id int64, job *chunkJob) { log.Debug("pyramid.chunker: processChunk()", "id", id) - ref, err := pc.putter.Put(context.TODO(), job.chunk) + ref, err := pc.putter.Put(ctx, job.chunk) if err != nil { - pc.errC <- err + select { + case pc.errC <- err: + case <-pc.quitC: + } } // report hash of this chunk one level up (keys corresponds to the proper subslice of the parent chunk) @@ -300,14 +305,14 @@ func (pc *PyramidChunker) processChunk(id int64, job *chunkJob) { job.parentWg.Done() } -func (pc *PyramidChunker) loadTree() error { +func (pc *PyramidChunker) loadTree(ctx context.Context) error { log.Debug("pyramid.chunker: loadTree()") // Get the root chunk to get the total size - chunkData, err := pc.getter.Get(context.TODO(), Reference(pc.key)) + chunkData, err := pc.getter.Get(ctx, Reference(pc.key)) if err != nil { return errLoadingTreeRootChunk } - chunkSize := chunkData.Size() + chunkSize := int64(chunkData.Size()) log.Trace("pyramid.chunker: root chunk", "chunk.Size", chunkSize, "pc.chunkSize", pc.chunkSize) //if data size is less than a chunk... add a parent with update as pending @@ -356,7 +361,7 @@ func (pc *PyramidChunker) loadTree() error { branchCount = int64(len(ent.chunk)-8) / pc.hashSize for i := int64(0); i < branchCount; i++ { key := ent.chunk[8+(i*pc.hashSize) : 8+((i+1)*pc.hashSize)] - newChunkData, err := pc.getter.Get(context.TODO(), Reference(key)) + newChunkData, err := pc.getter.Get(ctx, Reference(key)) if err != nil { return errLoadingTreeChunk } @@ -365,7 +370,7 @@ func (pc *PyramidChunker) loadTree() error { newEntry := &TreeEntry{ level: lvl - 1, branchCount: bewBranchCount, - subtreeSize: uint64(newChunkSize), + subtreeSize: newChunkSize, chunk: newChunkData, key: key, index: 0, @@ -385,7 +390,7 @@ func (pc *PyramidChunker) loadTree() error { return nil } -func (pc *PyramidChunker) prepareChunks(isAppend bool) { +func (pc *PyramidChunker) prepareChunks(ctx context.Context, isAppend bool) { log.Debug("pyramid.chunker: prepareChunks", "isAppend", isAppend) defer pc.wg.Done() @@ -393,11 +398,11 @@ func (pc *PyramidChunker) prepareChunks(isAppend bool) { pc.incrementWorkerCount() - go pc.processor(pc.workerCount) + go pc.processor(ctx, pc.workerCount) parent := NewTreeEntry(pc) var unfinishedChunkData ChunkData - var unfinishedChunkSize int64 + var unfinishedChunkSize uint64 if isAppend && len(pc.chunkLevel[0]) != 0 { lastIndex := len(pc.chunkLevel[0]) - 1 @@ -415,16 +420,16 @@ func (pc *PyramidChunker) prepareChunks(isAppend bool) { } lastBranch := parent.branchCount - 1 - lastKey := parent.chunk[8+lastBranch*pc.hashSize : 8+(lastBranch+1)*pc.hashSize] + lastAddress := parent.chunk[8+lastBranch*pc.hashSize : 8+(lastBranch+1)*pc.hashSize] var err error - unfinishedChunkData, err = pc.getter.Get(context.TODO(), lastKey) + unfinishedChunkData, err = pc.getter.Get(ctx, lastAddress) if err != nil { pc.errC <- err } unfinishedChunkSize = unfinishedChunkData.Size() - if unfinishedChunkSize < pc.chunkSize { - parent.subtreeSize = parent.subtreeSize - uint64(unfinishedChunkSize) + if unfinishedChunkSize < uint64(pc.chunkSize) { + parent.subtreeSize = parent.subtreeSize - unfinishedChunkSize parent.branchCount = parent.branchCount - 1 } else { unfinishedChunkData = nil @@ -468,8 +473,8 @@ func (pc *PyramidChunker) prepareChunks(isAppend bool) { if parent.branchCount == 1 && (pc.depth() == 0 || isAppend) { // Data is exactly one chunk.. pick the last chunk key as root chunkWG.Wait() - lastChunksKey := parent.chunk[8 : 8+pc.hashSize] - copy(pc.rootKey, lastChunksKey) + lastChunksAddress := parent.chunk[8 : 8+pc.hashSize] + copy(pc.rootAddress, lastChunksAddress) break } } else { @@ -502,7 +507,7 @@ func (pc *PyramidChunker) prepareChunks(isAppend bool) { // No need to build the tree if the depth is 0 // or we are appending. // Just use the last key. - copy(pc.rootKey, pkey) + copy(pc.rootAddress, pkey) } else { // We need to build the tree and and provide the lonely // chunk key to replace the last tree chunk key. @@ -525,7 +530,7 @@ func (pc *PyramidChunker) prepareChunks(isAppend bool) { workers := pc.getWorkerCount() if int64(len(pc.jobC)) > workers && workers < ChunkProcessors { pc.incrementWorkerCount() - go pc.processor(pc.workerCount) + go pc.processor(ctx, pc.workerCount) } } @@ -558,7 +563,7 @@ func (pc *PyramidChunker) buildTree(isAppend bool, ent *TreeEntry, chunkWG *sync lvlCount := int64(len(pc.chunkLevel[lvl])) if lvlCount == 1 && last { - copy(pc.rootKey, pc.chunkLevel[lvl][0].key) + copy(pc.rootAddress, pc.chunkLevel[lvl][0].key) return } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/types.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/types.go index 53e3af485a..8c70f45848 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/types.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/types.go @@ -25,16 +25,16 @@ import ( "fmt" "hash" "io" - "sync" + "io/ioutil" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto/sha3" "github.com/ethereum/go-ethereum/swarm/bmt" - "github.com/ethereum/go-ethereum/swarm/chunk" + ch "github.com/ethereum/go-ethereum/swarm/chunk" ) const MaxPO = 16 -const KeyLength = 32 +const AddressLength = 32 type Hasher func() hash.Hash type SwarmHasher func() SwarmHash @@ -116,7 +116,7 @@ func MakeHashFunc(hash string) SwarmHasher { return func() SwarmHash { hasher := sha3.NewKeccak256 hasherSize := hasher().Size() - segmentCount := chunk.DefaultSize / hasherSize + segmentCount := ch.DefaultSize / hasherSize pool := bmt.NewTreePool(hasher, segmentCount, bmt.PoolSize) return bmt.New(pool) } @@ -136,7 +136,7 @@ func (a Address) Log() string { } func (a Address) String() string { - return fmt.Sprintf("%064x", []byte(a)[:]) + return fmt.Sprintf("%064x", []byte(a)) } func (a Address) MarshalJSON() (out []byte, err error) { @@ -169,88 +169,88 @@ func (c AddressCollection) Swap(i, j int) { c[i], c[j] = c[j], c[i] } -// Chunk also serves as a request object passed to ChunkStores -// in case it is a retrieval request, Data is nil and Size is 0 -// Note that Size is not the size of the data chunk, which is Data.Size() -// but the size of the subtree encoded in the chunk -// 0 if request, to be supplied by the dpa -type Chunk struct { - Addr Address // always - SData []byte // nil if request, to be supplied by dpa - Size int64 // size of the data covered by the subtree encoded in this chunk - //Source Peer // peer - C chan bool // to signal data delivery by the dpa - ReqC chan bool // to signal the request done - dbStoredC chan bool // never remove a chunk from memStore before it is written to dbStore - dbStored bool - dbStoredMu *sync.Mutex - errored error // flag which is set when the chunk request has errored or timeouted - erroredMu sync.Mutex +// Chunk interface implemented by context.Contexts and data chunks +type Chunk interface { + Address() Address + Payload() []byte + SpanBytes() []byte + Span() int64 + Data() []byte } -func (c *Chunk) SetErrored(err error) { - c.erroredMu.Lock() - defer c.erroredMu.Unlock() +type chunk struct { + addr Address + sdata []byte + span int64 +} - c.errored = err +func NewChunk(addr Address, data []byte) *chunk { + return &chunk{ + addr: addr, + sdata: data, + span: -1, + } } -func (c *Chunk) GetErrored() error { - c.erroredMu.Lock() - defer c.erroredMu.Unlock() +func (c *chunk) Address() Address { + return c.addr +} - return c.errored +func (c *chunk) SpanBytes() []byte { + return c.sdata[:8] } -func NewChunk(addr Address, reqC chan bool) *Chunk { - return &Chunk{ - Addr: addr, - ReqC: reqC, - dbStoredC: make(chan bool), - dbStoredMu: &sync.Mutex{}, +func (c *chunk) Span() int64 { + if c.span == -1 { + c.span = int64(binary.LittleEndian.Uint64(c.sdata[:8])) } + return c.span } -func (c *Chunk) markAsStored() { - c.dbStoredMu.Lock() - defer c.dbStoredMu.Unlock() - - if !c.dbStored { - close(c.dbStoredC) - c.dbStored = true - } +func (c *chunk) Data() []byte { + return c.sdata } -func (c *Chunk) WaitToStore() error { - <-c.dbStoredC - return c.GetErrored() +func (c *chunk) Payload() []byte { + return c.sdata[8:] } -func GenerateRandomChunk(dataSize int64) *Chunk { - return GenerateRandomChunks(dataSize, 1)[0] +// String() for pretty printing +func (self *chunk) String() string { + return fmt.Sprintf("Address: %v TreeSize: %v Chunksize: %v", self.addr.Log(), self.span, len(self.sdata)) } -func GenerateRandomChunks(dataSize int64, count int) (chunks []*Chunk) { - var i int +func GenerateRandomChunk(dataSize int64) Chunk { hasher := MakeHashFunc(DefaultHash)() - if dataSize > chunk.DefaultSize { - dataSize = chunk.DefaultSize - } + sdata := make([]byte, dataSize+8) + rand.Read(sdata[8:]) + binary.LittleEndian.PutUint64(sdata[:8], uint64(dataSize)) + hasher.ResetWithLength(sdata[:8]) + hasher.Write(sdata[8:]) + return NewChunk(hasher.Sum(nil), sdata) +} - for i = 0; i < count; i++ { - chunks = append(chunks, NewChunk(nil, nil)) - chunks[i].SData = make([]byte, dataSize+8) - rand.Read(chunks[i].SData) - binary.LittleEndian.PutUint64(chunks[i].SData[:8], uint64(dataSize)) - hasher.ResetWithLength(chunks[i].SData[:8]) - hasher.Write(chunks[i].SData[8:]) - chunks[i].Addr = make([]byte, 32) - copy(chunks[i].Addr, hasher.Sum(nil)) +func GenerateRandomChunks(dataSize int64, count int) (chunks []Chunk) { + if dataSize > ch.DefaultSize { + dataSize = ch.DefaultSize + } + for i := 0; i < count; i++ { + ch := GenerateRandomChunk(ch.DefaultSize) + chunks = append(chunks, ch) } - return chunks } +func GenerateRandomData(l int) (r io.Reader, slice []byte) { + slice, err := ioutil.ReadAll(io.LimitReader(rand.Reader, int64(l))) + if err != nil { + panic("rand error") + } + // log.Warn("generate random data", "len", len(slice), "data", common.Bytes2Hex(slice)) + r = io.LimitReader(bytes.NewReader(slice), int64(l)) + return r, slice +} + // Size, Seek, Read, ReadAt type LazySectionReader interface { Context() context.Context @@ -273,18 +273,17 @@ func (r *LazyTestSectionReader) Context() context.Context { } type StoreParams struct { - Hash SwarmHasher `toml:"-"` - DbCapacity uint64 - CacheCapacity uint - ChunkRequestsCacheCapacity uint - BaseKey []byte + Hash SwarmHasher `toml:"-"` + DbCapacity uint64 + CacheCapacity uint + BaseKey []byte } func NewDefaultStoreParams() *StoreParams { - return NewStoreParams(defaultLDBCapacity, defaultCacheCapacity, defaultChunkRequestsCacheCapacity, nil, nil) + return NewStoreParams(defaultLDBCapacity, defaultCacheCapacity, nil, nil) } -func NewStoreParams(ldbCap uint64, cacheCap uint, requestsCap uint, hash SwarmHasher, basekey []byte) *StoreParams { +func NewStoreParams(ldbCap uint64, cacheCap uint, hash SwarmHasher, basekey []byte) *StoreParams { if basekey == nil { basekey = make([]byte, 32) } @@ -292,11 +291,10 @@ func NewStoreParams(ldbCap uint64, cacheCap uint, requestsCap uint, hash SwarmHa hash = MakeHashFunc(DefaultHash) } return &StoreParams{ - Hash: hash, - DbCapacity: ldbCap, - CacheCapacity: cacheCap, - ChunkRequestsCacheCapacity: requestsCap, - BaseKey: basekey, + Hash: hash, + DbCapacity: ldbCap, + CacheCapacity: cacheCap, + BaseKey: basekey, } } @@ -321,8 +319,8 @@ type Getter interface { } // NOTE: this returns invalid data if chunk is encrypted -func (c ChunkData) Size() int64 { - return int64(binary.LittleEndian.Uint64(c[:8])) +func (c ChunkData) Size() uint64 { + return binary.LittleEndian.Uint64(c[:8]) } func (c ChunkData) Data() []byte { @@ -348,7 +346,8 @@ func NewContentAddressValidator(hasher SwarmHasher) *ContentAddressValidator { // Validate that the given key is a valid content address for the given data func (v *ContentAddressValidator) Validate(addr Address, data []byte) bool { - if l := len(data); l < 9 || l > chunk.DefaultSize+8 { + if l := len(data); l < 9 || l > ch.DefaultSize+8 { + // log.Error("invalid chunk size", "chunk", addr.Hex(), "size", l) return false } @@ -359,3 +358,37 @@ func (v *ContentAddressValidator) Validate(addr Address, data []byte) bool { return bytes.Equal(hash, addr[:]) } + +type ChunkStore interface { + Put(ctx context.Context, ch Chunk) (err error) + Get(rctx context.Context, ref Address) (ch Chunk, err error) + Close() +} + +// SyncChunkStore is a ChunkStore which supports syncing +type SyncChunkStore interface { + ChunkStore + BinIndex(po uint8) uint64 + Iterator(from uint64, to uint64, po uint8, f func(Address, uint64) bool) error + FetchFunc(ctx context.Context, ref Address) func(context.Context) error +} + +// FakeChunkStore doesn't store anything, just implements the ChunkStore interface +// It can be used to inject into a hasherStore if you don't want to actually store data just do the +// hashing +type FakeChunkStore struct { +} + +// Put doesn't store anything it is just here to implement ChunkStore +func (f *FakeChunkStore) Put(_ context.Context, ch Chunk) error { + return nil +} + +// Gut doesn't store anything it is just here to implement ChunkStore +func (f *FakeChunkStore) Get(_ context.Context, ref Address) (Chunk, error) { + panic("FakeChunkStore doesn't support Get") +} + +// Close doesn't store anything it is just here to implement ChunkStore +func (f *FakeChunkStore) Close() { +} diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/swarm.go b/vendor/github.com/ethereum/go-ethereum/swarm/swarm.go index 736cd37de8..13aa1125d3 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/swarm.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/swarm.go @@ -75,8 +75,8 @@ type Swarm struct { privateKey *ecdsa.PrivateKey corsString string swapEnabled bool - lstore *storage.LocalStore // local store, needs to store for releasing resources after node stopped - sfs *fuse.SwarmFS // need this to cleanup all the active mounts on node exit + netStore *storage.NetStore + sfs *fuse.SwarmFS // need this to cleanup all the active mounts on node exit ps *pss.Pss tracerClose io.Closer @@ -164,37 +164,40 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e self.dns = resolver } - self.lstore, err = storage.NewLocalStore(config.LocalStoreParams, mockStore) + lstore, err := storage.NewLocalStore(config.LocalStoreParams, mockStore) if err != nil { - return + return nil, err + } + + self.netStore, err = storage.NewNetStore(lstore, nil) + if err != nil { + return nil, err } - db := storage.NewDBAPI(self.lstore) to := network.NewKademlia( common.FromHex(config.BzzKey), network.NewKadParams(), ) - delivery := stream.NewDelivery(to, db) + delivery := stream.NewDelivery(to, self.netStore) + self.netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, config.DeliverySkipCheck).New - self.streamer = stream.NewRegistry(addr, delivery, db, stateStore, &stream.RegistryOptions{ - SkipCheck: config.DeliverySkipCheck, + self.streamer = stream.NewRegistry(addr, delivery, self.netStore, stateStore, &stream.RegistryOptions{ + SkipCheck: config.SyncingSkipCheck, DoSync: config.SyncEnabled, DoRetrieve: true, SyncUpdateDelay: config.SyncUpdateDelay, }) - // set up NetStore, the cloud storage local access layer - netStore := storage.NewNetStore(self.lstore, self.streamer.Retrieve) // Swarm Hash Merklised Chunking for Arbitrary-length Document/File storage - self.fileStore = storage.NewFileStore(netStore, self.config.FileStoreParams) + self.fileStore = storage.NewFileStore(self.netStore, self.config.FileStoreParams) var resourceHandler *mru.Handler rhparams := &mru.HandlerParams{} resourceHandler = mru.NewHandler(rhparams) - resourceHandler.SetStore(netStore) + resourceHandler.SetStore(self.netStore) - self.lstore.Validators = []storage.ChunkValidator{ + lstore.Validators = []storage.ChunkValidator{ storage.NewContentAddressValidator(storage.MakeHashFunc(storage.DefaultHash)), resourceHandler, } @@ -356,7 +359,7 @@ func (self *Swarm) Start(srv *p2p.Server) error { log.Error("bzz failed", "err", err) return err } - log.Info("Swarm network started", "bzzaddr", fmt.Sprintf("%x", self.bzz.Hive.Overlay.BaseAddr())) + log.Info("Swarm network started", "bzzaddr", fmt.Sprintf("%x", self.bzz.Hive.BaseAddr())) if self.ps != nil { self.ps.Start(srv) @@ -399,7 +402,7 @@ func (self *Swarm) periodicallyUpdateGauges() { func (self *Swarm) updateGauges() { uptimeGauge.Update(time.Since(startTime).Nanoseconds()) - requestsCacheGauge.Update(int64(self.lstore.RequestsCacheLen())) + requestsCacheGauge.Update(int64(self.netStore.RequestsCacheLen())) } // implements the node.Service interface @@ -420,8 +423,8 @@ func (self *Swarm) Stop() error { ch.Save() } - if self.lstore != nil { - self.lstore.DbStore.Close() + if self.netStore != nil { + self.netStore.Close() } self.sfs.Stop() stopCounter.Inc(1) @@ -478,21 +481,6 @@ func (self *Swarm) APIs() []rpc.API { Service: self.sfs, Public: false, }, - // storage APIs - // DEPRECATED: Use the HTTP API instead - { - Namespace: "bzz", - Version: "0.1", - Service: api.NewStorage(self.api), - Public: true, - }, - { - Namespace: "bzz", - Version: "0.1", - Service: api.NewFileSystem(self.api), - Public: false, - }, - // {Namespace, Version, api.NewAdmin(self), false}, } apis = append(apis, self.bzz.APIs()...) diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/common.go b/vendor/github.com/ethereum/go-ethereum/swarm/testutil/file.go similarity index 54% rename from vendor/github.com/ethereum/go-ethereum/swarm/storage/common.go rename to vendor/github.com/ethereum/go-ethereum/swarm/testutil/file.go index d6352820eb..ecb0d971ed 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/common.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/testutil/file.go @@ -1,4 +1,4 @@ -// Copyright 2018 The go-ethereum Authors +// Copyright 2017 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -13,32 +13,32 @@ // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package storage -import ( - "context" - "sync" +package testutil - "github.com/ethereum/go-ethereum/swarm/log" +import ( + "io" + "io/ioutil" + "os" + "strings" + "testing" ) -// PutChunks adds chunks to localstore -// It waits for receive on the stored channel -// It logs but does not fail on delivery error -func PutChunks(store *LocalStore, chunks ...*Chunk) { - wg := sync.WaitGroup{} - wg.Add(len(chunks)) - go func() { - for _, c := range chunks { - <-c.dbStoredC - if err := c.GetErrored(); err != nil { - log.Error("chunk store fail", "err", err, "key", c.Addr) - } - wg.Done() - } - }() - for _, c := range chunks { - go store.Put(context.TODO(), c) +// TempFileWithContent is a helper function that creates a temp file that contains the following string content then closes the file handle +// it returns the complete file path +func TempFileWithContent(t *testing.T, content string) string { + tempFile, err := ioutil.TempFile("", "swarm-temp-file") + if err != nil { + t.Fatal(err) + } + + _, err = io.Copy(tempFile, strings.NewReader(content)) + if err != nil { + os.RemoveAll(tempFile.Name()) + t.Fatal(err) + } + if err = tempFile.Close(); err != nil { + t.Fatal(err) } - wg.Wait() + return tempFile.Name() } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/testutil/http.go b/vendor/github.com/ethereum/go-ethereum/swarm/testutil/http.go index 7fd60fcc3d..0748230329 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/testutil/http.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/testutil/http.go @@ -45,7 +45,7 @@ func (f *fakeTimeProvider) Now() mru.Timestamp { return mru.Timestamp{Time: f.currentTime} } -func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer) *TestSwarmServer { +func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, resolver api.Resolver) *TestSwarmServer { dir, err := ioutil.TempDir("", "swarm-storage-test") if err != nil { t.Fatal(err) @@ -77,7 +77,7 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer) *Tes t.Fatal(err) } - a := api.NewAPI(fileStore, nil, rh.Handler, nil) + a := api.NewAPI(fileStore, resolver, rh.Handler, nil) srv := httptest.NewServer(serverFunc(a)) return &TestSwarmServer{ Server: srv, diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/version/version.go b/vendor/github.com/ethereum/go-ethereum/swarm/version/version.go index 4cd095fb7c..0beff6363b 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/version/version.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/version/version.go @@ -23,7 +23,7 @@ import ( const ( VersionMajor = 0 // Major version component of the current release VersionMinor = 3 // Minor version component of the current release - VersionPatch = 3 // Patch version component of the current release + VersionPatch = 4 // Patch version component of the current release VersionMeta = "stable" // Version metadata to append to the version string ) diff --git a/vendor/github.com/ethereum/go-ethereum/tests/block_test_util.go b/vendor/github.com/ethereum/go-ethereum/tests/block_test_util.go index 2db47da57f..427a94958a 100644 --- a/vendor/github.com/ethereum/go-ethereum/tests/block_test_util.go +++ b/vendor/github.com/ethereum/go-ethereum/tests/block_test_util.go @@ -111,7 +111,7 @@ func (t *BlockTest) Run() error { return fmt.Errorf("genesis block state root does not match test: computed=%x, test=%x", gblock.Root().Bytes()[:6], t.json.Genesis.StateRoot[:6]) } - chain, err := core.NewBlockChain(db, nil, config, ethash.NewShared(), vm.Config{}) + chain, err := core.NewBlockChain(db, nil, config, ethash.NewShared(), vm.Config{}, nil) if err != nil { return err } diff --git a/vendor/github.com/ethereum/go-ethereum/tests/state_test_util.go b/vendor/github.com/ethereum/go-ethereum/tests/state_test_util.go index 84581fae18..5d2251e529 100644 --- a/vendor/github.com/ethereum/go-ethereum/tests/state_test_util.go +++ b/vendor/github.com/ethereum/go-ethereum/tests/state_test_util.go @@ -146,7 +146,18 @@ func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config) (*state.StateD if logs := rlpHash(statedb.Logs()); logs != common.Hash(post.Logs) { return statedb, fmt.Errorf("post state logs hash mismatch: got %x, want %x", logs, post.Logs) } - root, _ := statedb.Commit(config.IsEIP158(block.Number())) + // Commit block + statedb.Commit(config.IsEIP158(block.Number())) + // Add 0-value mining reward. This only makes a difference in the cases + // where + // - the coinbase suicided, or + // - there are only 'bad' transactions, which aren't executed. In those cases, + // the coinbase gets no txfee, so isn't created, and thus needs to be touched + statedb.AddBalance(block.Coinbase(), new(big.Int)) + // And _now_ get the state root + root := statedb.IntermediateRoot(config.IsEIP158(block.Number())) + // N.B: We need to do this in a two-step process, because the first Commit takes care + // of suicides, and we need to touch the coinbase _after_ it has potentially suicided. if root != common.Hash(post.Root) { return statedb, fmt.Errorf("post state root mismatch: got %x, want %x", root, post.Root) } diff --git a/vendor/github.com/ethereum/go-ethereum/trie/sync.go b/vendor/github.com/ethereum/go-ethereum/trie/sync.go index 88d6eb7799..67dff5a8b6 100644 --- a/vendor/github.com/ethereum/go-ethereum/trie/sync.go +++ b/vendor/github.com/ethereum/go-ethereum/trie/sync.go @@ -21,8 +21,8 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/prque" "github.com/ethereum/go-ethereum/ethdb" - "gopkg.in/karalabe/cookiejar.v2/collections/prque" ) // ErrNotRequested is returned by the trie sync when it's requested to process a @@ -84,7 +84,7 @@ func NewSync(root common.Hash, database DatabaseReader, callback LeafCallback) * database: database, membatch: newSyncMemBatch(), requests: make(map[common.Hash]*request), - queue: prque.New(), + queue: prque.New(nil), } ts.AddSubTrie(root, 0, common.Hash{}, callback) return ts @@ -242,7 +242,7 @@ func (s *Sync) schedule(req *request) { return } // Schedule the request for future retrieval - s.queue.Push(req.hash, float32(req.depth)) + s.queue.Push(req.hash, int64(req.depth)) s.requests[req.hash] = req } diff --git a/vendor/github.com/ethereum/go-ethereum/whisper/mailserver/mailserver.go b/vendor/github.com/ethereum/go-ethereum/whisper/mailserver/mailserver.go index d32eaddec3..af9418d9f8 100644 --- a/vendor/github.com/ethereum/go-ethereum/whisper/mailserver/mailserver.go +++ b/vendor/github.com/ethereum/go-ethereum/whisper/mailserver/mailserver.go @@ -118,7 +118,7 @@ func (s *WMailServer) processRequest(peer *whisper.Peer, lower, upper uint32, bl var err error var zero common.Hash kl := NewDbKey(lower, zero) - ku := NewDbKey(upper, zero) + ku := NewDbKey(upper+1, zero) // LevelDB is exclusive, while the Whisper API is inclusive i := s.db.NewIterator(&util.Range{Start: kl.raw, Limit: ku.raw}, nil) defer i.Release() diff --git a/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/api.go b/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/api.go index 8f06a9ca04..1df3b73dd0 100644 --- a/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/api.go +++ b/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/api.go @@ -195,14 +195,14 @@ func (api *PublicWhisperAPI) DeleteSymKey(ctx context.Context, id string) bool { // MakeLightClient turns the node into light client, which does not forward // any incoming messages, and sends only messages originated in this node. func (api *PublicWhisperAPI) MakeLightClient(ctx context.Context) bool { - api.w.lightClient = true - return api.w.lightClient + api.w.SetLightClientMode(true) + return api.w.LightClientMode() } // CancelLightClient cancels light client mode. func (api *PublicWhisperAPI) CancelLightClient(ctx context.Context) bool { - api.w.lightClient = false - return !api.w.lightClient + api.w.SetLightClientMode(false) + return !api.w.LightClientMode() } //go:generate gencodec -type NewMessage -field-override newMessageOverride -out gen_newmessage_json.go diff --git a/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/config.go b/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/config.go index 3c28263e59..213b83698c 100644 --- a/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/config.go +++ b/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/config.go @@ -20,14 +20,16 @@ import "time" // Config represents the configuration state of a whisper node. type Config struct { - MaxMessageSize uint32 `toml:",omitempty"` - MinimumAcceptedPOW float64 `toml:",omitempty"` - TimeSource func() time.Time + MaxMessageSize uint32 `toml:",omitempty"` + MinimumAcceptedPOW float64 `toml:",omitempty"` + RestrictConnectionBetweenLightClients bool `toml:",omitempty"` + TimeSource func() time.Time } // DefaultConfig represents (shocker!) the default configuration. var DefaultConfig = Config{ - MaxMessageSize: DefaultMaxMessageSize, - MinimumAcceptedPOW: DefaultMinimumPoW, - TimeSource: time.Now, + MaxMessageSize: DefaultMaxMessageSize, + MinimumAcceptedPOW: DefaultMinimumPoW, + RestrictConnectionBetweenLightClients: true, + TimeSource: time.Now, } diff --git a/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/peer.go b/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/peer.go index 4e2443d03b..2b7687e780 100644 --- a/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/peer.go +++ b/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/peer.go @@ -79,11 +79,14 @@ func (peer *Peer) stop() { func (peer *Peer) handshake() error { // Send the handshake status message asynchronously errc := make(chan error, 1) + isLightNode := peer.host.LightClientMode() + isRestrictedLightNodeConnection := peer.host.LightClientModeConnectionRestricted() go func() { pow := peer.host.MinPow() powConverted := math.Float64bits(pow) bloom := peer.host.BloomFilter() - errc <- p2p.SendItems(peer.ws, statusCode, ProtocolVersion, powConverted, bloom) + + errc <- p2p.SendItems(peer.ws, statusCode, ProtocolVersion, powConverted, bloom, isLightNode) }() // Fetch the remote status packet and verify protocol match @@ -127,6 +130,11 @@ func (peer *Peer) handshake() error { } } + isRemotePeerLightNode, err := s.Bool() + if isRemotePeerLightNode && isLightNode && isRestrictedLightNodeConnection { + return fmt.Errorf("peer [%x] is useless: two light client communication restricted", peer.ID()) + } + if err := <-errc; err != nil { return fmt.Errorf("peer [%x] failed to send status packet: %v", peer.ID(), err) } diff --git a/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/whisper.go b/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/whisper.go index 749d1cce31..5463be7761 100644 --- a/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/whisper.go +++ b/vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/whisper.go @@ -56,12 +56,14 @@ type MailServerResponse struct { } const ( - maxMsgSizeIdx = iota // Maximal message length allowed by the whisper node - overflowIdx // Indicator of message queue overflow - minPowIdx // Minimal PoW required by the whisper node - minPowToleranceIdx // Minimal PoW tolerated by the whisper node for a limited time - bloomFilterIdx // Bloom filter for topics of interest for this node - bloomFilterToleranceIdx // Bloom filter tolerated by the whisper node for a limited time + maxMsgSizeIdx = iota // Maximal message length allowed by the whisper node + overflowIdx // Indicator of message queue overflow + minPowIdx // Minimal PoW required by the whisper node + minPowToleranceIdx // Minimal PoW tolerated by the whisper node for a limited time + bloomFilterIdx // Bloom filter for topics of interest for this node + bloomFilterToleranceIdx // Bloom filter tolerated by the whisper node for a limited time + lightClientModeIdx // Light client mode. (does not forward any messages) + restrictConnectionBetweenLightClientsIdx // Restrict connection between two light clients ) // Whisper represents a dark communication interface through the Ethereum @@ -89,8 +91,6 @@ type Whisper struct { syncAllowance int // maximum time in seconds allowed to process the whisper-related messages - lightClient bool // indicates is this node is pure light client (does not forward any messages) - statsMu sync.Mutex // guard stats stats Statistics // Statistics of whisper node @@ -125,6 +125,7 @@ func New(cfg *Config) *Whisper { whisper.settings.Store(minPowIdx, cfg.MinimumAcceptedPOW) whisper.settings.Store(maxMsgSizeIdx, cfg.MaxMessageSize) whisper.settings.Store(overflowIdx, false) + whisper.settings.Store(restrictConnectionBetweenLightClientsIdx, cfg.RestrictConnectionBetweenLightClients) // p2p whisper sub protocol handler whisper.protocol = p2p.Protocol{ @@ -299,6 +300,31 @@ func (whisper *Whisper) SetMinimumPowTest(val float64) { whisper.settings.Store(minPowToleranceIdx, val) } +//SetLightClientMode makes node light client (does not forward any messages) +func (whisper *Whisper) SetLightClientMode(v bool) { + whisper.settings.Store(lightClientModeIdx, v) +} + +//LightClientMode indicates is this node is light client (does not forward any messages) +func (whisper *Whisper) LightClientMode() bool { + val, exist := whisper.settings.Load(lightClientModeIdx) + if !exist || val == nil { + return false + } + v, ok := val.(bool) + return v && ok +} + +//LightClientModeConnectionRestricted indicates that connection to light client in light client mode not allowed +func (whisper *Whisper) LightClientModeConnectionRestricted() bool { + val, exist := whisper.settings.Load(restrictConnectionBetweenLightClientsIdx) + if !exist || val == nil { + return false + } + v, ok := val.(bool) + return v && ok +} + func (whisper *Whisper) notifyPeersAboutPowRequirementChange(pow float64) { arr := whisper.getPeers() for _, p := range arr { @@ -768,7 +794,7 @@ func (whisper *Whisper) runMessageLoop(p *Peer, rw p2p.MsgReadWriter) error { trouble := false for _, env := range envelopes { - cached, err := whisper.add(env, whisper.lightClient) + cached, err := whisper.add(env, whisper.LightClientMode()) if err != nil { trouble = true log.Error("bad envelope received, peer will be disconnected", "peer", p.peer.ID(), "err", err) diff --git a/vendor/gopkg.in/karalabe/cookiejar.v2/LICENSE b/vendor/gopkg.in/karalabe/cookiejar.v2/LICENSE deleted file mode 100755 index 467d60878d..0000000000 --- a/vendor/gopkg.in/karalabe/cookiejar.v2/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2014 Péter Szilágyi. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Alternatively, the CookieJar toolbox may be used in accordance with the terms -and conditions contained in a signed written agreement between you and the -author(s). diff --git a/vendor/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque.go b/vendor/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque.go deleted file mode 100755 index 5c1967c658..0000000000 --- a/vendor/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque.go +++ /dev/null @@ -1,66 +0,0 @@ -// CookieJar - A contestant's algorithm toolbox -// Copyright (c) 2013 Peter Szilagyi. All rights reserved. -// -// CookieJar is dual licensed: use of this source code is governed by a BSD -// license that can be found in the LICENSE file. Alternatively, the CookieJar -// toolbox may be used in accordance with the terms and conditions contained -// in a signed written agreement between you and the author(s). - -// Package prque implements a priority queue data structure supporting arbitrary -// value types and float priorities. -// -// The reasoning behind using floats for the priorities vs. ints or interfaces -// was larger flexibility without sacrificing too much performance or code -// complexity. -// -// If you would like to use a min-priority queue, simply negate the priorities. -// -// Internally the queue is based on the standard heap package working on a -// sortable version of the block based stack. -package prque - -import ( - "container/heap" -) - -// Priority queue data structure. -type Prque struct { - cont *sstack -} - -// Creates a new priority queue. -func New() *Prque { - return &Prque{newSstack()} -} - -// Pushes a value with a given priority into the queue, expanding if necessary. -func (p *Prque) Push(data interface{}, priority float32) { - heap.Push(p.cont, &item{data, priority}) -} - -// Pops the value with the greates priority off the stack and returns it. -// Currently no shrinking is done. -func (p *Prque) Pop() (interface{}, float32) { - item := heap.Pop(p.cont).(*item) - return item.value, item.priority -} - -// Pops only the item from the queue, dropping the associated priority value. -func (p *Prque) PopItem() interface{} { - return heap.Pop(p.cont).(*item).value -} - -// Checks whether the priority queue is empty. -func (p *Prque) Empty() bool { - return p.cont.Len() == 0 -} - -// Returns the number of element in the priority queue. -func (p *Prque) Size() int { - return p.cont.Len() -} - -// Clears the contents of the priority queue. -func (p *Prque) Reset() { - *p = *New() -} diff --git a/vendor/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack.go b/vendor/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack.go deleted file mode 100755 index 9f393196ef..0000000000 --- a/vendor/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack.go +++ /dev/null @@ -1,91 +0,0 @@ -// CookieJar - A contestant's algorithm toolbox -// Copyright (c) 2013 Peter Szilagyi. All rights reserved. -// -// CookieJar is dual licensed: use of this source code is governed by a BSD -// license that can be found in the LICENSE file. Alternatively, the CookieJar -// toolbox may be used in accordance with the terms and conditions contained -// in a signed written agreement between you and the author(s). - -package prque - -// The size of a block of data -const blockSize = 4096 - -// A prioritized item in the sorted stack. -type item struct { - value interface{} - priority float32 -} - -// Internal sortable stack data structure. Implements the Push and Pop ops for -// the stack (heap) functionality and the Len, Less and Swap methods for the -// sortability requirements of the heaps. -type sstack struct { - size int - capacity int - offset int - - blocks [][]*item - active []*item -} - -// Creates a new, empty stack. -func newSstack() *sstack { - result := new(sstack) - result.active = make([]*item, blockSize) - result.blocks = [][]*item{result.active} - result.capacity = blockSize - return result -} - -// Pushes a value onto the stack, expanding it if necessary. Required by -// heap.Interface. -func (s *sstack) Push(data interface{}) { - if s.size == s.capacity { - s.active = make([]*item, blockSize) - s.blocks = append(s.blocks, s.active) - s.capacity += blockSize - s.offset = 0 - } else if s.offset == blockSize { - s.active = s.blocks[s.size/blockSize] - s.offset = 0 - } - s.active[s.offset] = data.(*item) - s.offset++ - s.size++ -} - -// Pops a value off the stack and returns it. Currently no shrinking is done. -// Required by heap.Interface. -func (s *sstack) Pop() (res interface{}) { - s.size-- - s.offset-- - if s.offset < 0 { - s.offset = blockSize - 1 - s.active = s.blocks[s.size/blockSize] - } - res, s.active[s.offset] = s.active[s.offset], nil - return -} - -// Returns the length of the stack. Required by sort.Interface. -func (s *sstack) Len() int { - return s.size -} - -// Compares the priority of two elements of the stack (higher is first). -// Required by sort.Interface. -func (s *sstack) Less(i, j int) bool { - return s.blocks[i/blockSize][i%blockSize].priority > s.blocks[j/blockSize][j%blockSize].priority -} - -// Swaps two elements in the stack. Required by sort.Interface. -func (s *sstack) Swap(i, j int) { - ib, io, jb, jo := i/blockSize, i%blockSize, j/blockSize, j%blockSize - s.blocks[ib][io], s.blocks[jb][jo] = s.blocks[jb][jo], s.blocks[ib][io] -} - -// Resets the stack, effectively clearing its contents. -func (s *sstack) Reset() { - *s = *newSstack() -} From f66f4404457f92d469f48ee90e0bece4018a2072 Mon Sep 17 00:00:00 2001 From: Adam Babik Date: Thu, 27 Sep 2018 15:54:27 +0200 Subject: [PATCH 7/7] clean up Makefile --- Makefile | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/Makefile b/Makefile index 7d72ea0abd..896f023559 100644 --- a/Makefile +++ b/Makefile @@ -269,21 +269,12 @@ clean: ##@other Cleanup deep-clean: clean rm -Rdf .ethereumtest/StatusChain -dep-ensure: ##@dependencies Dep ensure and apply all patches +dep-ensure: ##@dependencies Ensure all dependencies are in place with dep @dep ensure dep-install: ##@dependencies Install vendoring tool go get -u github.com/golang/dep/cmd/dep -patch-geth-vendor: ##@patching Apply all patches on ethereum in vendor/ - ./_assets/patches/patcher - -patch-geth-vendor-revert: ##@patching Revert all patches from ethereum in vendor/ - ./_assets/patches/patcher -r - -patch-geth-fork: ##@patching Apply patches to Status' go-ethereum fork - ./_assets/patches/update-fork-with-patches.sh - update-fleet-config: ##@other Update fleets configuration from fleets.status.im ./_assets/ci/update-fleet-config.sh @echo "Updating static assets..."