diff --git a/accounts/abi/abi.go b/accounts/abi/abi.go
index 4b88a52cef..6bbc01800d 100644
--- a/accounts/abi/abi.go
+++ b/accounts/abi/abi.go
@@ -24,6 +24,7 @@ import (
"io"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
)
// The ABI holds information about a contract's context and available
@@ -76,7 +77,7 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
return nil, err
}
// Pack up the method ID too if not a constructor and return
- return append(method.ID(), arguments...), nil
+ return append(method.ID, arguments...), nil
}
// Unpack output in v according to the abi specification
@@ -139,59 +140,17 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
for _, field := range fields {
switch field.Type {
case "constructor":
- abi.Constructor = Method{
- Inputs: field.Inputs,
-
- // Note for constructor the `StateMutability` can only
- // be payable or nonpayable according to the output of
- // compiler. So constant is always false.
- StateMutability: field.StateMutability,
-
- // Legacy fields, keep them for backward compatibility
- Constant: field.Constant,
- Payable: field.Payable,
- }
+ abi.Constructor = NewMethod("", "", Constructor, field.StateMutability, field.Constant, field.Payable, field.Inputs, nil)
case "function":
- name := field.Name
- _, ok := abi.Methods[name]
- for idx := 0; ok; idx++ {
- name = fmt.Sprintf("%s%d", field.Name, idx)
- _, ok = abi.Methods[name]
- }
- abi.Methods[name] = Method{
- Name: name,
- RawName: field.Name,
- StateMutability: field.StateMutability,
- Inputs: field.Inputs,
- Outputs: field.Outputs,
-
- // Legacy fields, keep them for backward compatibility
- Constant: field.Constant,
- Payable: field.Payable,
- }
+ name := abi.overloadedMethodName(field.Name)
+ abi.Methods[name] = NewMethod(name, field.Name, Function, field.StateMutability, field.Constant, field.Payable, field.Inputs, field.Outputs)
case "fallback":
// New introduced function type in v0.6.0, check more detail
// here https://solidity.readthedocs.io/en/v0.6.0/contracts.html#fallback-function
if abi.HasFallback() {
return errors.New("only single fallback is allowed")
}
- abi.Fallback = Method{
- Name: "",
- RawName: "",
-
- // The `StateMutability` can only be payable or nonpayable,
- // so the constant is always false.
- StateMutability: field.StateMutability,
- IsFallback: true,
-
- // Fallback doesn't have any input or output
- Inputs: nil,
- Outputs: nil,
-
- // Legacy fields, keep them for backward compatibility
- Constant: field.Constant,
- Payable: field.Payable,
- }
+ abi.Fallback = NewMethod("", "", Fallback, field.StateMutability, field.Constant, field.Payable, nil, nil)
case "receive":
// New introduced function type in v0.6.0, check more detail
// here https://solidity.readthedocs.io/en/v0.6.0/contracts.html#fallback-function
@@ -201,41 +160,47 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
if field.StateMutability != "payable" {
return errors.New("the statemutability of receive can only be payable")
}
- abi.Receive = Method{
- Name: "",
- RawName: "",
-
- // The `StateMutability` can only be payable, so constant
- // is always true while payable is always false.
- StateMutability: field.StateMutability,
- IsReceive: true,
-
- // Receive doesn't have any input or output
- Inputs: nil,
- Outputs: nil,
-
- // Legacy fields, keep them for backward compatibility
- Constant: field.Constant,
- Payable: field.Payable,
- }
+ abi.Receive = NewMethod("", "", Receive, field.StateMutability, field.Constant, field.Payable, nil, nil)
case "event":
- name := field.Name
- _, ok := abi.Events[name]
- for idx := 0; ok; idx++ {
- name = fmt.Sprintf("%s%d", field.Name, idx)
- _, ok = abi.Events[name]
- }
- abi.Events[name] = Event{
- Name: name,
- RawName: field.Name,
- Anonymous: field.Anonymous,
- Inputs: field.Inputs,
- }
+ name := abi.overloadedEventName(field.Name)
+ abi.Events[name] = NewEvent(name, field.Name, field.Anonymous, field.Inputs)
+ default:
+ return fmt.Errorf("abi: could not recognize type %v of field %v", field.Type, field.Name)
}
}
return nil
}
+// overloadedMethodName returns the next available name for a given function.
+// Needed since solidity allows for function overload.
+//
+// e.g. if the abi contains Methods send, send1
+// overloadedMethodName would return send2 for input send.
+func (abi *ABI) overloadedMethodName(rawName string) string {
+ name := rawName
+ _, ok := abi.Methods[name]
+ for idx := 0; ok; idx++ {
+ name = fmt.Sprintf("%s%d", rawName, idx)
+ _, ok = abi.Methods[name]
+ }
+ return name
+}
+
+// overloadedEventName returns the next available name for a given event.
+// Needed since solidity allows for event overload.
+//
+// e.g. if the abi contains events received, received1
+// overloadedEventName would return received2 for input received.
+func (abi *ABI) overloadedEventName(rawName string) string {
+ name := rawName
+ _, ok := abi.Events[name]
+ for idx := 0; ok; idx++ {
+ name = fmt.Sprintf("%s%d", rawName, idx)
+ _, ok = abi.Events[name]
+ }
+ return name
+}
+
// MethodById looks up a method by the 4-byte id
// returns nil if none found
func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
@@ -243,7 +208,7 @@ func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
return nil, fmt.Errorf("data too short (%d bytes) for abi method lookup", len(sigdata))
}
for _, method := range abi.Methods {
- if bytes.Equal(method.ID(), sigdata[:4]) {
+ if bytes.Equal(method.ID, sigdata[:4]) {
return &method, nil
}
}
@@ -254,7 +219,7 @@ func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
// ABI and returns nil if none found.
func (abi *ABI) EventByID(topic common.Hash) (*Event, error) {
for _, event := range abi.Events {
- if bytes.Equal(event.ID().Bytes(), topic.Bytes()) {
+ if bytes.Equal(event.ID.Bytes(), topic.Bytes()) {
return &event, nil
}
}
@@ -263,10 +228,32 @@ func (abi *ABI) EventByID(topic common.Hash) (*Event, error) {
// HasFallback returns an indicator whether a fallback function is included.
func (abi *ABI) HasFallback() bool {
- return abi.Fallback.IsFallback
+ return abi.Fallback.Type == Fallback
}
// HasReceive returns an indicator whether a receive function is included.
func (abi *ABI) HasReceive() bool {
- return abi.Receive.IsReceive
+ return abi.Receive.Type == Receive
+}
+
+// revertSelector is a special function selector for revert reason unpacking.
+var revertSelector = crypto.Keccak256([]byte("Error(string)"))[:4]
+
+// UnpackRevert resolves the abi-encoded revert reason. According to the solidity
+// spec https://solidity.readthedocs.io/en/latest/control-structures.html#revert,
+// the provided revert reason is abi-encoded as if it were a call to a function
+// `Error(string)`. So it's a special tool for it.
+func UnpackRevert(data []byte) (string, error) {
+ if len(data) < 4 {
+ return "", errors.New("invalid data for unpacking")
+ }
+ if !bytes.Equal(data[:4], revertSelector) {
+ return "", errors.New("invalid data for unpacking")
+ }
+ var reason string
+ typ, _ := NewType("string", "", nil)
+ if err := (Arguments{{Type: typ}}).Unpack(&reason, data[4:]); err != nil {
+ return "", err
+ }
+ return reason, nil
}
diff --git a/accounts/abi/abi_test.go b/accounts/abi/abi_test.go
index 352006cf5f..f41c91aa78 100644
--- a/accounts/abi/abi_test.go
+++ b/accounts/abi/abi_test.go
@@ -19,6 +19,7 @@ package abi
import (
"bytes"
"encoding/hex"
+ "errors"
"fmt"
"math/big"
"reflect"
@@ -26,17 +27,13 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto"
)
const jsondata = `
[
- { "type" : "function", "name" : "balance", "stateMutability" : "view" },
- { "type" : "function", "name" : "send", "inputs" : [ { "name" : "amount", "type" : "uint256" } ] }
-]`
-
-const jsondata2 = `
-[
+ { "type" : "function", "name" : "", "stateMutability" : "view" },
{ "type" : "function", "name" : "balance", "stateMutability" : "view" },
{ "type" : "function", "name" : "send", "inputs" : [ { "name" : "amount", "type" : "uint256" } ] },
{ "type" : "function", "name" : "test", "inputs" : [ { "name" : "number", "type" : "uint32" } ] },
@@ -45,6 +42,7 @@ const jsondata2 = `
{ "type" : "function", "name" : "address", "inputs" : [ { "name" : "inputs", "type" : "address" } ] },
{ "type" : "function", "name" : "uint64[2]", "inputs" : [ { "name" : "inputs", "type" : "uint64[2]" } ] },
{ "type" : "function", "name" : "uint64[]", "inputs" : [ { "name" : "inputs", "type" : "uint64[]" } ] },
+ { "type" : "function", "name" : "int8", "inputs" : [ { "name" : "inputs", "type" : "int8" } ] },
{ "type" : "function", "name" : "foo", "inputs" : [ { "name" : "inputs", "type" : "uint32" } ] },
{ "type" : "function", "name" : "bar", "inputs" : [ { "name" : "inputs", "type" : "uint32" }, { "name" : "string", "type" : "uint16" } ] },
{ "type" : "function", "name" : "slice", "inputs" : [ { "name" : "inputs", "type" : "uint32[2]" } ] },
@@ -53,30 +51,81 @@ const jsondata2 = `
{ "type" : "function", "name" : "sliceMultiAddress", "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] },
{ "type" : "function", "name" : "nestedArray", "inputs" : [ { "name" : "a", "type" : "uint256[2][2]" }, { "name" : "b", "type" : "address[]" } ] },
{ "type" : "function", "name" : "nestedArray2", "inputs" : [ { "name" : "a", "type" : "uint8[][2]" } ] },
- { "type" : "function", "name" : "nestedSlice", "inputs" : [ { "name" : "a", "type" : "uint8[][]" } ] }
+ { "type" : "function", "name" : "nestedSlice", "inputs" : [ { "name" : "a", "type" : "uint8[][]" } ] },
+ { "type" : "function", "name" : "receive", "inputs" : [ { "name" : "memo", "type" : "bytes" }], "outputs" : [], "payable" : true, "stateMutability" : "payable" },
+ { "type" : "function", "name" : "fixedArrStr", "stateMutability" : "view", "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr", "type" : "uint256[2]" } ] },
+ { "type" : "function", "name" : "fixedArrBytes", "stateMutability" : "view", "inputs" : [ { "name" : "bytes", "type" : "bytes" }, { "name" : "fixedArr", "type" : "uint256[2]" } ] },
+ { "type" : "function", "name" : "mixedArrStr", "stateMutability" : "view", "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr", "type" : "uint256[2]" }, { "name" : "dynArr", "type" : "uint256[]" } ] },
+ { "type" : "function", "name" : "doubleFixedArrStr", "stateMutability" : "view", "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr1", "type" : "uint256[2]" }, { "name" : "fixedArr2", "type" : "uint256[3]" } ] },
+ { "type" : "function", "name" : "multipleMixedArrStr", "stateMutability" : "view", "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr1", "type" : "uint256[2]" }, { "name" : "dynArr", "type" : "uint256[]" }, { "name" : "fixedArr2", "type" : "uint256[3]" } ] },
+ { "type" : "function", "name" : "overloadedNames", "stateMutability" : "view", "inputs": [ { "components": [ { "internalType": "uint256", "name": "_f", "type": "uint256" }, { "internalType": "uint256", "name": "__f", "type": "uint256"}, { "internalType": "uint256", "name": "f", "type": "uint256"}],"internalType": "struct Overloader.F", "name": "f","type": "tuple"}]}
]`
+var (
+ Uint256, _ = NewType("uint256", "", nil)
+ Uint32, _ = NewType("uint32", "", nil)
+ Uint16, _ = NewType("uint16", "", nil)
+ String, _ = NewType("string", "", nil)
+ Bool, _ = NewType("bool", "", nil)
+ Bytes, _ = NewType("bytes", "", nil)
+ Address, _ = NewType("address", "", nil)
+ Uint64Arr, _ = NewType("uint64[]", "", nil)
+ AddressArr, _ = NewType("address[]", "", nil)
+ Int8, _ = NewType("int8", "", nil)
+ // Special types for testing
+ Uint32Arr2, _ = NewType("uint32[2]", "", nil)
+ Uint64Arr2, _ = NewType("uint64[2]", "", nil)
+ Uint256Arr, _ = NewType("uint256[]", "", nil)
+ Uint256Arr2, _ = NewType("uint256[2]", "", nil)
+ Uint256Arr3, _ = NewType("uint256[3]", "", nil)
+ Uint256ArrNested, _ = NewType("uint256[2][2]", "", nil)
+ Uint8ArrNested, _ = NewType("uint8[][2]", "", nil)
+ Uint8SliceNested, _ = NewType("uint8[][]", "", nil)
+ TupleF, _ = NewType("tuple", "struct Overloader.F", []ArgumentMarshaling{
+ {Name: "_f", Type: "uint256"},
+ {Name: "__f", Type: "uint256"},
+ {Name: "f", Type: "uint256"}})
+)
+
+var methods = map[string]Method{
+ "": NewMethod("", "", Function, "view", false, false, nil, nil),
+ "balance": NewMethod("balance", "balance", Function, "view", false, false, nil, nil),
+ "send": NewMethod("send", "send", Function, "", false, false, []Argument{{"amount", Uint256, false}}, nil),
+ "test": NewMethod("test", "test", Function, "", false, false, []Argument{{"number", Uint32, false}}, nil),
+ "string": NewMethod("string", "string", Function, "", false, false, []Argument{{"inputs", String, false}}, nil),
+ "bool": NewMethod("bool", "bool", Function, "", false, false, []Argument{{"inputs", Bool, false}}, nil),
+ "address": NewMethod("address", "address", Function, "", false, false, []Argument{{"inputs", Address, false}}, nil),
+ "uint64[]": NewMethod("uint64[]", "uint64[]", Function, "", false, false, []Argument{{"inputs", Uint64Arr, false}}, nil),
+ "uint64[2]": NewMethod("uint64[2]", "uint64[2]", Function, "", false, false, []Argument{{"inputs", Uint64Arr2, false}}, nil),
+ "int8": NewMethod("int8", "int8", Function, "", false, false, []Argument{{"inputs", Int8, false}}, nil),
+ "foo": NewMethod("foo", "foo", Function, "", false, false, []Argument{{"inputs", Uint32, false}}, nil),
+ "bar": NewMethod("bar", "bar", Function, "", false, false, []Argument{{"inputs", Uint32, false}, {"string", Uint16, false}}, nil),
+ "slice": NewMethod("slice", "slice", Function, "", false, false, []Argument{{"inputs", Uint32Arr2, false}}, nil),
+ "slice256": NewMethod("slice256", "slice256", Function, "", false, false, []Argument{{"inputs", Uint256Arr2, false}}, nil),
+ "sliceAddress": NewMethod("sliceAddress", "sliceAddress", Function, "", false, false, []Argument{{"inputs", AddressArr, false}}, nil),
+ "sliceMultiAddress": NewMethod("sliceMultiAddress", "sliceMultiAddress", Function, "", false, false, []Argument{{"a", AddressArr, false}, {"b", AddressArr, false}}, nil),
+ "nestedArray": NewMethod("nestedArray", "nestedArray", Function, "", false, false, []Argument{{"a", Uint256ArrNested, false}, {"b", AddressArr, false}}, nil),
+ "nestedArray2": NewMethod("nestedArray2", "nestedArray2", Function, "", false, false, []Argument{{"a", Uint8ArrNested, false}}, nil),
+ "nestedSlice": NewMethod("nestedSlice", "nestedSlice", Function, "", false, false, []Argument{{"a", Uint8SliceNested, false}}, nil),
+ "receive": NewMethod("receive", "receive", Function, "payable", false, true, []Argument{{"memo", Bytes, false}}, []Argument{}),
+ "fixedArrStr": NewMethod("fixedArrStr", "fixedArrStr", Function, "view", false, false, []Argument{{"str", String, false}, {"fixedArr", Uint256Arr2, false}}, nil),
+ "fixedArrBytes": NewMethod("fixedArrBytes", "fixedArrBytes", Function, "view", false, false, []Argument{{"bytes", Bytes, false}, {"fixedArr", Uint256Arr2, false}}, nil),
+ "mixedArrStr": NewMethod("mixedArrStr", "mixedArrStr", Function, "view", false, false, []Argument{{"str", String, false}, {"fixedArr", Uint256Arr2, false}, {"dynArr", Uint256Arr, false}}, nil),
+ "doubleFixedArrStr": NewMethod("doubleFixedArrStr", "doubleFixedArrStr", Function, "view", false, false, []Argument{{"str", String, false}, {"fixedArr1", Uint256Arr2, false}, {"fixedArr2", Uint256Arr3, false}}, nil),
+ "multipleMixedArrStr": NewMethod("multipleMixedArrStr", "multipleMixedArrStr", Function, "view", false, false, []Argument{{"str", String, false}, {"fixedArr1", Uint256Arr2, false}, {"dynArr", Uint256Arr, false}, {"fixedArr2", Uint256Arr3, false}}, nil),
+ "overloadedNames": NewMethod("overloadedNames", "overloadedNames", Function, "view", false, false, []Argument{{"f", TupleF, false}}, nil),
+}
+
func TestReader(t *testing.T) {
- Uint256, _ := NewType("uint256", "", nil)
- exp := ABI{
- Methods: map[string]Method{
- "balance": {
- "balance", "balance", "view", false, false, false, false, nil, nil,
- },
- "send": {
- "send", "send", "", false, false, false, false, []Argument{
- {"amount", Uint256, false},
- }, nil,
- },
- },
+ abi := ABI{
+ Methods: methods,
}
- abi, err := JSON(strings.NewReader(jsondata))
+ exp, err := JSON(strings.NewReader(jsondata))
if err != nil {
- t.Error(err)
+ t.Fatal(err)
}
- // deep equal fails for some reason
for name, expM := range exp.Methods {
gotM, exist := abi.Methods[name]
if !exist {
@@ -98,8 +147,58 @@ func TestReader(t *testing.T) {
}
}
+func TestInvalidABI(t *testing.T) {
+ json := `[{ "type" : "function", "name" : "", "constant" : fals }]`
+ _, err := JSON(strings.NewReader(json))
+ if err == nil {
+ t.Fatal("invalid json should produce error")
+ }
+ json2 := `[{ "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "typ" : "uint256" } ] }]`
+ _, err = JSON(strings.NewReader(json2))
+ if err == nil {
+ t.Fatal("invalid json should produce error")
+ }
+}
+
+// TestConstructor tests a constructor function.
+// The test is based on the following contract:
+// contract TestConstructor {
+// constructor(uint256 a, uint256 b) public{}
+// }
+func TestConstructor(t *testing.T) {
+ json := `[{ "inputs": [{"internalType": "uint256","name": "a","type": "uint256" },{ "internalType": "uint256","name": "b","type": "uint256"}],"stateMutability": "nonpayable","type": "constructor"}]`
+ method := NewMethod("", "", Constructor, "nonpayable", false, false, []Argument{{"a", Uint256, false}, {"b", Uint256, false}}, nil)
+ // Test from JSON
+ abi, err := JSON(strings.NewReader(json))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(abi.Constructor, method) {
+ t.Error("Missing expected constructor")
+ }
+ // Test pack/unpack
+ packed, err := abi.Pack("", big.NewInt(1), big.NewInt(2))
+ if err != nil {
+ t.Error(err)
+ }
+ v := struct {
+ A *big.Int
+ B *big.Int
+ }{new(big.Int), new(big.Int)}
+ //abi.Unpack(&v, "", packed)
+ if err := abi.Constructor.Inputs.Unpack(&v, packed); err != nil {
+ t.Error(err)
+ }
+ if !reflect.DeepEqual(v.A, big.NewInt(1)) {
+ t.Error("Unable to pack/unpack from constructor")
+ }
+ if !reflect.DeepEqual(v.B, big.NewInt(2)) {
+ t.Error("Unable to pack/unpack from constructor")
+ }
+}
+
func TestTestNumbers(t *testing.T) {
- abi, err := JSON(strings.NewReader(jsondata2))
+ abi, err := JSON(strings.NewReader(jsondata))
if err != nil {
t.Fatal(err)
}
@@ -135,60 +234,22 @@ func TestTestNumbers(t *testing.T) {
}
}
-func TestTestString(t *testing.T) {
- abi, err := JSON(strings.NewReader(jsondata2))
- if err != nil {
- t.Fatal(err)
- }
-
- if _, err := abi.Pack("string", "hello world"); err != nil {
- t.Error(err)
- }
-}
-
-func TestTestBool(t *testing.T) {
- abi, err := JSON(strings.NewReader(jsondata2))
- if err != nil {
- t.Fatal(err)
- }
-
- if _, err := abi.Pack("bool", true); err != nil {
- t.Error(err)
- }
-}
-
-func TestTestSlice(t *testing.T) {
- abi, err := JSON(strings.NewReader(jsondata2))
- if err != nil {
- t.Fatal(err)
- }
- slice := make([]uint64, 2)
- if _, err := abi.Pack("uint64[2]", slice); err != nil {
- t.Error(err)
- }
- if _, err := abi.Pack("uint64[]", slice); err != nil {
- t.Error(err)
- }
-}
-
func TestMethodSignature(t *testing.T) {
- String, _ := NewType("string", "", nil)
- m := Method{"foo", "foo", "", false, false, false, false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil}
+ m := NewMethod("foo", "foo", Function, "", false, false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil)
exp := "foo(string,string)"
- if m.Sig() != exp {
- t.Error("signature mismatch", exp, "!=", m.Sig())
+ if m.Sig != exp {
+ t.Error("signature mismatch", exp, "!=", m.Sig)
}
idexp := crypto.Keccak256([]byte(exp))[:4]
- if !bytes.Equal(m.ID(), idexp) {
- t.Errorf("expected ids to match %x != %x", m.ID(), idexp)
+ if !bytes.Equal(m.ID, idexp) {
+ t.Errorf("expected ids to match %x != %x", m.ID, idexp)
}
- uintt, _ := NewType("uint256", "", nil)
- m = Method{"foo", "foo", "", false, false, false, false, []Argument{{"bar", uintt, false}}, nil}
+ m = NewMethod("foo", "foo", Function, "", false, false, []Argument{{"bar", Uint256, false}}, nil)
exp = "foo(uint256)"
- if m.Sig() != exp {
- t.Error("signature mismatch", exp, "!=", m.Sig())
+ if m.Sig != exp {
+ t.Error("signature mismatch", exp, "!=", m.Sig)
}
// Method with tuple arguments
@@ -204,10 +265,10 @@ func TestMethodSignature(t *testing.T) {
{Name: "y", Type: "int256"},
}},
})
- m = Method{"foo", "foo", "", false, false, false, false, []Argument{{"s", s, false}, {"bar", String, false}}, nil}
+ m = NewMethod("foo", "foo", Function, "", false, false, []Argument{{"s", s, false}, {"bar", String, false}}, nil)
exp = "foo((int256,int256[],(int256,int256)[],(int256,int256)[2]),string)"
- if m.Sig() != exp {
- t.Error("signature mismatch", exp, "!=", m.Sig())
+ if m.Sig != exp {
+ t.Error("signature mismatch", exp, "!=", m.Sig)
}
}
@@ -219,12 +280,12 @@ func TestOverloadedMethodSignature(t *testing.T) {
}
check := func(name string, expect string, method bool) {
if method {
- if abi.Methods[name].Sig() != expect {
- t.Fatalf("The signature of overloaded method mismatch, want %s, have %s", expect, abi.Methods[name].Sig())
+ if abi.Methods[name].Sig != expect {
+ t.Fatalf("The signature of overloaded method mismatch, want %s, have %s", expect, abi.Methods[name].Sig)
}
} else {
- if abi.Events[name].Sig() != expect {
- t.Fatalf("The signature of overloaded event mismatch, want %s, have %s", expect, abi.Events[name].Sig())
+ if abi.Events[name].Sig != expect {
+ t.Fatalf("The signature of overloaded event mismatch, want %s, have %s", expect, abi.Events[name].Sig)
}
}
}
@@ -235,7 +296,7 @@ func TestOverloadedMethodSignature(t *testing.T) {
}
func TestMultiPack(t *testing.T) {
- abi, err := JSON(strings.NewReader(jsondata2))
+ abi, err := JSON(strings.NewReader(jsondata))
if err != nil {
t.Fatal(err)
}
@@ -400,15 +461,7 @@ func TestInputVariableInputLength(t *testing.T) {
}
func TestInputFixedArrayAndVariableInputLength(t *testing.T) {
- const definition = `[
- { "type" : "function", "name" : "fixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr", "type" : "uint256[2]" } ] },
- { "type" : "function", "name" : "fixedArrBytes", "constant" : true, "inputs" : [ { "name" : "str", "type" : "bytes" }, { "name" : "fixedArr", "type" : "uint256[2]" } ] },
- { "type" : "function", "name" : "mixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr", "type": "uint256[2]" }, { "name" : "dynArr", "type": "uint256[]" } ] },
- { "type" : "function", "name" : "doubleFixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr1", "type": "uint256[2]" }, { "name" : "fixedArr2", "type": "uint256[3]" } ] },
- { "type" : "function", "name" : "multipleMixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr1", "type": "uint256[2]" }, { "name" : "dynArr", "type" : "uint256[]" }, { "name" : "fixedArr2", "type" : "uint256[3]" } ] }
- ]`
-
- abi, err := JSON(strings.NewReader(definition))
+ abi, err := JSON(strings.NewReader(jsondata))
if err != nil {
t.Error(err)
}
@@ -555,7 +608,7 @@ func TestInputFixedArrayAndVariableInputLength(t *testing.T) {
strvalue = common.RightPadBytes([]byte(strin), 32)
fixedarrin1value1 = common.LeftPadBytes(fixedarrin1[0].Bytes(), 32)
fixedarrin1value2 = common.LeftPadBytes(fixedarrin1[1].Bytes(), 32)
- dynarroffset = U256(big.NewInt(int64(256 + ((len(strin)/32)+1)*32)))
+ dynarroffset = math.U256Bytes(big.NewInt(int64(256 + ((len(strin)/32)+1)*32)))
dynarrlength = make([]byte, 32)
dynarrlength[31] = byte(len(dynarrin))
dynarrinvalue1 = common.LeftPadBytes(dynarrin[0].Bytes(), 32)
@@ -602,8 +655,6 @@ func TestBareEvents(t *testing.T) {
{ "type" : "event", "name" : "tuple", "inputs" : [{ "indexed":false, "name":"t", "type":"tuple", "components":[{"name":"a", "type":"uint256"}] }, { "indexed":true, "name":"arg1", "type":"address" }] }
]`
- arg0, _ := NewType("uint256", "", nil)
- arg1, _ := NewType("address", "", nil)
tuple, _ := NewType("tuple", "", []ArgumentMarshaling{{Name: "a", Type: "uint256"}})
expectedEvents := map[string]struct {
@@ -613,12 +664,12 @@ func TestBareEvents(t *testing.T) {
"balance": {false, nil},
"anon": {true, nil},
"args": {false, []Argument{
- {Name: "arg0", Type: arg0, Indexed: false},
- {Name: "arg1", Type: arg1, Indexed: true},
+ {Name: "arg0", Type: Uint256, Indexed: false},
+ {Name: "arg1", Type: Address, Indexed: true},
}},
"tuple": {false, []Argument{
{Name: "t", Type: tuple, Indexed: false},
- {Name: "arg1", Type: arg1, Indexed: true},
+ {Name: "arg1", Type: Address, Indexed: true},
}},
}
@@ -891,45 +942,25 @@ func TestUnpackIntoMapNamingConflict(t *testing.T) {
}
func TestABI_MethodById(t *testing.T) {
- const abiJSON = `[
- {"type":"function","name":"receive","constant":false,"inputs":[{"name":"memo","type":"bytes"}],"outputs":[],"payable":true,"stateMutability":"payable"},
- {"type":"event","name":"received","anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}]},
- {"type":"function","name":"fixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr","type":"uint256[2]"}]},
- {"type":"function","name":"fixedArrBytes","constant":true,"inputs":[{"name":"str","type":"bytes"},{"name":"fixedArr","type":"uint256[2]"}]},
- {"type":"function","name":"mixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr","type":"uint256[2]"},{"name":"dynArr","type":"uint256[]"}]},
- {"type":"function","name":"doubleFixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr1","type":"uint256[2]"},{"name":"fixedArr2","type":"uint256[3]"}]},
- {"type":"function","name":"multipleMixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr1","type":"uint256[2]"},{"name":"dynArr","type":"uint256[]"},{"name":"fixedArr2","type":"uint256[3]"}]},
- {"type":"function","name":"balance","constant":true},
- {"type":"function","name":"send","constant":false,"inputs":[{"name":"amount","type":"uint256"}]},
- {"type":"function","name":"test","constant":false,"inputs":[{"name":"number","type":"uint32"}]},
- {"type":"function","name":"string","constant":false,"inputs":[{"name":"inputs","type":"string"}]},
- {"type":"function","name":"bool","constant":false,"inputs":[{"name":"inputs","type":"bool"}]},
- {"type":"function","name":"address","constant":false,"inputs":[{"name":"inputs","type":"address"}]},
- {"type":"function","name":"uint64[2]","constant":false,"inputs":[{"name":"inputs","type":"uint64[2]"}]},
- {"type":"function","name":"uint64[]","constant":false,"inputs":[{"name":"inputs","type":"uint64[]"}]},
- {"type":"function","name":"foo","constant":false,"inputs":[{"name":"inputs","type":"uint32"}]},
- {"type":"function","name":"bar","constant":false,"inputs":[{"name":"inputs","type":"uint32"},{"name":"string","type":"uint16"}]},
- {"type":"function","name":"_slice","constant":false,"inputs":[{"name":"inputs","type":"uint32[2]"}]},
- {"type":"function","name":"__slice256","constant":false,"inputs":[{"name":"inputs","type":"uint256[2]"}]},
- {"type":"function","name":"sliceAddress","constant":false,"inputs":[{"name":"inputs","type":"address[]"}]},
- {"type":"function","name":"sliceMultiAddress","constant":false,"inputs":[{"name":"a","type":"address[]"},{"name":"b","type":"address[]"}]}
- ]
-`
- abi, err := JSON(strings.NewReader(abiJSON))
+ abi, err := JSON(strings.NewReader(jsondata))
if err != nil {
t.Fatal(err)
}
for name, m := range abi.Methods {
a := fmt.Sprintf("%v", m)
- m2, err := abi.MethodById(m.ID())
+ m2, err := abi.MethodById(m.ID)
if err != nil {
t.Fatalf("Failed to look up ABI method: %v", err)
}
b := fmt.Sprintf("%v", m2)
if a != b {
- t.Errorf("Method %v (id %x) not 'findable' by id in ABI", name, m.ID())
+ t.Errorf("Method %v (id %x) not 'findable' by id in ABI", name, m.ID)
}
}
+ // test unsuccessful lookups
+ if _, err = abi.MethodById(crypto.Keccak256()); err == nil {
+ t.Error("Expected error: no method with this id")
+ }
// Also test empty
if _, err := abi.MethodById([]byte{0x00}); err == nil {
t.Errorf("Expected error, too short to decode data")
@@ -995,8 +1026,8 @@ func TestABI_EventById(t *testing.T) {
t.Errorf("We should find a event for topic %s, test #%d", topicID.Hex(), testnum)
}
- if event.ID() != topicID {
- t.Errorf("Event id %s does not match topic %s, test #%d", event.ID().Hex(), topicID.Hex(), testnum)
+ if event.ID != topicID {
+ t.Errorf("Event id %s does not match topic %s, test #%d", event.ID.Hex(), topicID.Hex(), testnum)
}
unknowntopicID := crypto.Keccak256Hash([]byte("unknownEvent"))
@@ -1010,8 +1041,10 @@ func TestABI_EventById(t *testing.T) {
}
}
-func TestDuplicateMethodNames(t *testing.T) {
- abiJSON := `[{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"},{"name":"customFallback","type":"string"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]`
+// TestDoubleDuplicateMethodNames checks that if transfer0 already exists, there won't be a name
+// conflict and that the second transfer method will be renamed transfer1.
+func TestDoubleDuplicateMethodNames(t *testing.T) {
+ abiJSON := `[{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"}],"name":"transfer0","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"},{"name":"customFallback","type":"string"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]`
contractAbi, err := JSON(strings.NewReader(abiJSON))
if err != nil {
t.Fatal(err)
@@ -1030,24 +1063,86 @@ func TestDuplicateMethodNames(t *testing.T) {
}
}
-// TestDoubleDuplicateMethodNames checks that if transfer0 already exists, there won't be a name
-// conflict and that the second transfer method will be renamed transfer1.
-func TestDoubleDuplicateMethodNames(t *testing.T) {
- abiJSON := `[{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"}],"name":"transfer0","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"},{"name":"customFallback","type":"string"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]`
+// TestDoubleDuplicateEventNames checks that if send0 already exists, there won't be a name
+// conflict and that the second send event will be renamed send1.
+// The test runs the abi of the following contract.
+// contract DuplicateEvent {
+// event send(uint256 a);
+// event send0();
+// event send();
+// }
+func TestDoubleDuplicateEventNames(t *testing.T) {
+ abiJSON := `[{"anonymous": false,"inputs": [{"indexed": false,"internalType": "uint256","name": "a","type": "uint256"}],"name": "send","type": "event"},{"anonymous": false,"inputs": [],"name": "send0","type": "event"},{ "anonymous": false, "inputs": [],"name": "send","type": "event"}]`
contractAbi, err := JSON(strings.NewReader(abiJSON))
if err != nil {
t.Fatal(err)
}
- if _, ok := contractAbi.Methods["transfer"]; !ok {
- t.Fatalf("Could not find original method")
+ if _, ok := contractAbi.Events["send"]; !ok {
+ t.Fatalf("Could not find original event")
}
- if _, ok := contractAbi.Methods["transfer0"]; !ok {
- t.Fatalf("Could not find duplicate method")
+ if _, ok := contractAbi.Events["send0"]; !ok {
+ t.Fatalf("Could not find duplicate event")
}
- if _, ok := contractAbi.Methods["transfer1"]; !ok {
- t.Fatalf("Could not find duplicate method")
+ if _, ok := contractAbi.Events["send1"]; !ok {
+ t.Fatalf("Could not find duplicate event")
}
- if _, ok := contractAbi.Methods["transfer2"]; ok {
- t.Fatalf("Should not have found extra method")
+ if _, ok := contractAbi.Events["send2"]; ok {
+ t.Fatalf("Should not have found extra event")
+ }
+}
+
+// TestUnnamedEventParam checks that an event with unnamed parameters is
+// correctly handled
+// The test runs the abi of the following contract.
+// contract TestEvent {
+// event send(uint256, uint256);
+// }
+func TestUnnamedEventParam(t *testing.T) {
+ abiJSON := `[{ "anonymous": false, "inputs": [{ "indexed": false,"internalType": "uint256", "name": "","type": "uint256"},{"indexed": false,"internalType": "uint256","name": "","type": "uint256"}],"name": "send","type": "event"}]`
+ contractAbi, err := JSON(strings.NewReader(abiJSON))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ event, ok := contractAbi.Events["send"]
+ if !ok {
+ t.Fatalf("Could not find event")
+ }
+ if event.Inputs[0].Name != "arg0" {
+ t.Fatalf("Could not find input")
+ }
+ if event.Inputs[1].Name != "arg1" {
+ t.Fatalf("Could not find input")
+ }
+}
+
+func TestUnpackRevert(t *testing.T) {
+ t.Parallel()
+
+ var cases = []struct {
+ input string
+ expect string
+ expectErr error
+ }{
+ {"", "", errors.New("invalid data for unpacking")},
+ {"08c379a1", "", errors.New("invalid data for unpacking")},
+ {"08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000d72657665727420726561736f6e00000000000000000000000000000000000000", "revert reason", nil},
+ }
+ for index, c := range cases {
+ t.Run(fmt.Sprintf("case %d", index), func(t *testing.T) {
+ got, err := UnpackRevert(common.Hex2Bytes(c.input))
+ if c.expectErr != nil {
+ if err == nil {
+ t.Fatalf("Expected non-nil error")
+ }
+ if err.Error() != c.expectErr.Error() {
+ t.Fatalf("Expected error mismatch, want %v, got %v", c.expectErr, err)
+ }
+ return
+ }
+ if c.expect != got {
+ t.Fatalf("Output mismatch, want %v, got %v", c.expect, got)
+ }
+ })
}
}
diff --git a/accounts/abi/argument.go b/accounts/abi/argument.go
index 7f7f505865..5c1e391f51 100644
--- a/accounts/abi/argument.go
+++ b/accounts/abi/argument.go
@@ -59,18 +59,6 @@ func (argument *Argument) UnmarshalJSON(data []byte) error {
return nil
}
-// LengthNonIndexed returns the number of arguments when not counting 'indexed' ones. Only events
-// can ever have 'indexed' arguments, it should always be false on arguments for method input/output
-func (arguments Arguments) LengthNonIndexed() int {
- out := 0
- for _, arg := range arguments {
- if !arg.Indexed {
- out++
- }
- }
- return out
-}
-
// NonIndexed returns the arguments with indexed arguments filtered out
func (arguments Arguments) NonIndexed() Arguments {
var ret []Argument
@@ -92,9 +80,8 @@ func (arguments Arguments) Unpack(v interface{}, data []byte) error {
if len(data) == 0 {
if len(arguments) != 0 {
return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
- } else {
- return nil // Nothing to unmarshal, return
}
+ return nil // Nothing to unmarshal, return
}
// make sure the passed value is arguments pointer
if reflect.Ptr != reflect.ValueOf(v).Kind() {
@@ -104,6 +91,9 @@ func (arguments Arguments) Unpack(v interface{}, data []byte) error {
if err != nil {
return err
}
+ if len(marshalledValues) == 0 {
+ return fmt.Errorf("abi: Unpack(no-values unmarshalled %T)", v)
+ }
if arguments.isTuple() {
return arguments.unpackTuple(v, marshalledValues)
}
@@ -112,18 +102,24 @@ func (arguments Arguments) Unpack(v interface{}, data []byte) error {
// UnpackIntoMap performs the operation hexdata -> mapping of argument name to argument value
func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte) error {
+ // Make sure map is not nil
+ if v == nil {
+ return fmt.Errorf("abi: cannot unpack into a nil map")
+ }
if len(data) == 0 {
if len(arguments) != 0 {
return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
- } else {
- return nil // Nothing to unmarshal, return
}
+ return nil // Nothing to unmarshal, return
}
marshalledValues, err := arguments.UnpackValues(data)
if err != nil {
return err
}
- return arguments.unpackIntoMap(v, marshalledValues)
+ for i, arg := range arguments.NonIndexed() {
+ v[arg.Name] = marshalledValues[i]
+ }
+ return nil
}
// unpack sets the unmarshalled value to go format.
@@ -195,25 +191,13 @@ func unpack(t *Type, dst interface{}, src interface{}) error {
return nil
}
-// unpackIntoMap unpacks marshalledValues into the provided map[string]interface{}
-func (arguments Arguments) unpackIntoMap(v map[string]interface{}, marshalledValues []interface{}) error {
- // Make sure map is not nil
- if v == nil {
- return fmt.Errorf("abi: cannot unpack into a nil map")
- }
-
- for i, arg := range arguments.NonIndexed() {
- v[arg.Name] = marshalledValues[i]
- }
- return nil
-}
-
// unpackAtomic unpacks ( hexdata -> go ) a single value
func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues interface{}) error {
- if arguments.LengthNonIndexed() == 0 {
+ nonIndexedArgs := arguments.NonIndexed()
+ if len(nonIndexedArgs) == 0 {
return nil
}
- argument := arguments.NonIndexed()[0]
+ argument := nonIndexedArgs[0]
elem := reflect.ValueOf(v).Elem()
if elem.Kind() == reflect.Struct && argument.Type.T != TupleTy {
@@ -233,30 +217,28 @@ func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues interfac
// unpackTuple unpacks ( hexdata -> go ) a batch of values.
func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interface{}) error {
var (
- value = reflect.ValueOf(v).Elem()
- typ = value.Type()
- kind = value.Kind()
+ value = reflect.ValueOf(v).Elem()
+ typ = value.Type()
+ kind = value.Kind()
+ nonIndexedArgs = arguments.NonIndexed()
)
- if err := requireUnpackKind(value, typ, kind, arguments); err != nil {
+ if err := requireUnpackKind(value, len(nonIndexedArgs), arguments); err != nil {
return err
}
// If the interface is a struct, get of abi->struct_field mapping
var abi2struct map[string]string
if kind == reflect.Struct {
- var (
- argNames []string
- err error
- )
- for _, arg := range arguments.NonIndexed() {
- argNames = append(argNames, arg.Name)
+ argNames := make([]string, len(nonIndexedArgs))
+ for i, arg := range nonIndexedArgs {
+ argNames[i] = arg.Name
}
- abi2struct, err = mapArgNamesToStructFields(argNames, value)
- if err != nil {
+ var err error
+ if abi2struct, err = mapArgNamesToStructFields(argNames, value); err != nil {
return err
}
}
- for i, arg := range arguments.NonIndexed() {
+ for i, arg := range nonIndexedArgs {
switch kind {
case reflect.Struct:
field := value.FieldByName(abi2struct[arg.Name])
@@ -289,10 +271,11 @@ func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interfa
// without supplying a struct to unpack into. Instead, this method returns a list containing the
// values. An atomic argument will be a list with one element.
func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
- retval := make([]interface{}, 0, arguments.LengthNonIndexed())
+ nonIndexedArgs := arguments.NonIndexed()
+ retval := make([]interface{}, 0, len(nonIndexedArgs))
virtualArgs := 0
- for index, arg := range arguments.NonIndexed() {
- marshalledValue, err := ToGoType((index+virtualArgs)*32, arg.Type, data)
+ for index, arg := range nonIndexedArgs {
+ marshalledValue, err := toGoType((index+virtualArgs)*32, arg.Type, data)
if arg.Type.T == ArrayTy && !isDynamicType(arg.Type) {
// If we have a static array, like [3]uint256, these are coded as
// just like uint256,uint256,uint256.
@@ -329,7 +312,7 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
// Make sure arguments match up and pack them
abiArgs := arguments
if len(args) != len(abiArgs) {
- return nil, fmt.Errorf("argument count mismatch: %d for %d", len(args), len(abiArgs))
+ return nil, fmt.Errorf("argument count mismatch: got %d for %d", len(args), len(abiArgs))
}
// variable input is the output appended at the end of packed
// output. This is used for strings and bytes types input.
diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go
index d90b7437ba..a8796eaf32 100644
--- a/accounts/abi/bind/backends/simulated.go
+++ b/accounts/abi/bind/backends/simulated.go
@@ -25,6 +25,7 @@ import (
"time"
"github.com/ethereum/go-ethereum"
+ "github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
@@ -39,6 +40,7 @@ import (
"github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/multitenancy"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
@@ -52,7 +54,6 @@ var (
errBlockNumberUnsupported = errors.New("simulatedBackend cannot access blocks other than the latest block")
errBlockDoesNotExist = errors.New("block does not exist in blockchain")
errTransactionDoesNotExist = errors.New("transaction does not exist")
- errGasEstimationFailed = errors.New("gas required exceeds allowance or always failing transaction")
)
// SimulatedBackend implements bind.ContractBackend, simulating a blockchain in
@@ -78,7 +79,7 @@ type SimulatedBackend struct {
func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc}
genesis.MustCommit(database)
- blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{}, nil)
+ blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
backend := &SimulatedBackend{
database: database,
@@ -368,8 +369,11 @@ func (b *SimulatedBackend) CallContract(ctx context.Context, call ethereum.CallM
if err != nil {
return nil, err
}
- rval, _, _, err := b.callContract(ctx, call, b.blockchain.CurrentBlock(), state, state)
- return rval, err
+ res, err := b.callContract(ctx, call, b.blockchain.CurrentBlock(), state, state)
+ if err != nil {
+ return nil, err
+ }
+ return res.Return(), nil
}
// PendingCallContract executes a contract call on the pending state.
@@ -378,8 +382,11 @@ func (b *SimulatedBackend) PendingCallContract(ctx context.Context, call ethereu
defer b.mu.Unlock()
defer b.pendingState.RevertToSnapshot(b.pendingState.Snapshot())
- rval, _, _, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState, b.pendingState)
- return rval, err
+ res, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState, b.pendingState)
+ if err != nil {
+ return nil, err
+ }
+ return res.Return(), nil
}
// PendingNonceAt implements PendingStateReader.PendingNonceAt, retrieving
@@ -414,25 +421,57 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
} else {
hi = b.pendingBlock.GasLimit()
}
+ // Recap the highest gas allowance with account's balance.
+ if call.GasPrice != nil && call.GasPrice.Uint64() != 0 {
+ balance := b.pendingState.GetBalance(call.From) // from can't be nil
+ available := new(big.Int).Set(balance)
+ if call.Value != nil {
+ if call.Value.Cmp(available) >= 0 {
+ return 0, errors.New("insufficient funds for transfer")
+ }
+ available.Sub(available, call.Value)
+ }
+ allowance := new(big.Int).Div(available, call.GasPrice)
+ if hi > allowance.Uint64() {
+ transfer := call.Value
+ if transfer == nil {
+ transfer = new(big.Int)
+ }
+ log.Warn("Gas estimation capped by limited funds", "original", hi, "balance", balance,
+ "sent", transfer, "gasprice", call.GasPrice, "fundable", allowance)
+ hi = allowance.Uint64()
+ }
+ }
cap = hi
// Create a helper to check if a gas allowance results in an executable transaction
- executable := func(gas uint64) bool {
+ executable := func(gas uint64) (bool, *core.ExecutionResult, error) {
call.Gas = gas
snapshot := b.pendingState.Snapshot()
- _, _, failed, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState, b.pendingState)
+ res, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState, b.pendingState)
b.pendingState.RevertToSnapshot(snapshot)
- if err != nil || failed {
- return false
+ if err != nil {
+ if err == core.ErrIntrinsicGas {
+ return true, nil, nil // Special case, raise gas limit
+ }
+ return true, nil, err // Bail out
}
- return true
+ return res.Failed(), res, nil
}
// Execute the binary search and hone in on an executable gas limit
for lo+1 < hi {
mid := (hi + lo) / 2
- if !executable(mid) {
+ failed, _, err := executable(mid)
+
+ // If the error is not nil(consensus error), it means the provided message
+ // call or transaction will never be accepted no matter how much gas it is
+ // assigned. Return the error directly, don't struggle any more
+ if err != nil {
+ return 0, err
+ }
+ if failed {
lo = mid
} else {
hi = mid
@@ -440,8 +479,25 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
}
// Reject the transaction as invalid if it still fails at the highest allowance
if hi == cap {
- if !executable(hi) {
- return 0, errGasEstimationFailed
+ failed, result, err := executable(hi)
+ if err != nil {
+ return 0, err
+ }
+ if failed {
+ if result != nil && result.Err != vm.ErrOutOfGas {
+ errMsg := fmt.Sprintf("always failing transaction (%v)", result.Err)
+ if len(result.Revert()) > 0 {
+ ret, err := abi.UnpackRevert(result.Revert())
+ if err != nil {
+ errMsg += fmt.Sprintf(" (%#x)", result.Revert())
+ } else {
+ errMsg += fmt.Sprintf(" (%s)", ret)
+ }
+ }
+ return 0, errors.New(errMsg)
+ }
+ // Otherwise, the specified gas cap is too low
+ return 0, fmt.Errorf("gas required exceeds allowance (%d)", cap)
}
}
return hi, nil
@@ -449,7 +505,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
// callContract implements common code between normal and pending contract calls.
// state is modified during execution, make sure to copy it if necessary.
-func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, statedb *state.StateDB, privateState *state.StateDB) ([]byte, uint64, bool, error) {
+func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, statedb *state.StateDB, privateState *state.StateDB) (*core.ExecutionResult, error) {
// Ensure message is initialized properly.
if call.GasPrice == nil {
call.GasPrice = big.NewInt(1)
diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go
index 8f6c1b686e..c471c0525f 100644
--- a/accounts/abi/bind/backends/simulated_test.go
+++ b/accounts/abi/bind/backends/simulated_test.go
@@ -19,6 +19,7 @@ package backends
import (
"bytes"
"context"
+ "errors"
"math/big"
"strings"
"testing"
@@ -356,25 +357,179 @@ func TestSimulatedBackend_TransactionByHash(t *testing.T) {
}
func TestSimulatedBackend_EstimateGas(t *testing.T) {
- sim := NewSimulatedBackend(
- core.GenesisAlloc{}, 10000000,
- )
+ /*
+ pragma solidity ^0.6.4;
+ contract GasEstimation {
+ function PureRevert() public { revert(); }
+ function Revert() public { revert("revert reason");}
+ function OOG() public { for (uint i = 0; ; i++) {}}
+ function Assert() public { assert(false);}
+ function Valid() public {}
+ }*/
+ const contractAbi = "[{\"inputs\":[],\"name\":\"Assert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"OOG\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PureRevert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Revert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Valid\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]"
+ const contractBin = "0x60806040523480156100115760006000fd5b50610017565b61016e806100266000396000f3fe60806040523480156100115760006000fd5b506004361061005c5760003560e01c806350f6fe3414610062578063aa8b1d301461006c578063b9b046f914610076578063d8b9839114610080578063e09fface1461008a5761005c565b60006000fd5b61006a610094565b005b6100746100ad565b005b61007e6100b5565b005b6100886100c2565b005b610092610135565b005b6000600090505b5b808060010191505061009b565b505b565b60006000fd5b565b600015156100bf57fe5b5b565b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600d8152602001807f72657665727420726561736f6e0000000000000000000000000000000000000081526020015060200191505060405180910390fd5b565b5b56fea2646970667358221220345bbcbb1a5ecf22b53a78eaebf95f8ee0eceff6d10d4b9643495084d2ec934a64736f6c63430006040033"
+
+ key, _ := crypto.GenerateKey()
+ addr := crypto.PubkeyToAddress(key.PublicKey)
+ opts := bind.NewKeyedTransactor(key)
+
+ sim := NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(params.Ether)}}, 10000000)
defer sim.Close()
- bgCtx := context.Background()
- testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
- gas, err := sim.EstimateGas(bgCtx, ethereum.CallMsg{
- From: testAddr,
- To: &testAddr,
- Value: big.NewInt(1000),
- Data: []byte{},
- })
- if err != nil {
- t.Errorf("could not estimate gas: %v", err)
+ parsed, _ := abi.JSON(strings.NewReader(contractAbi))
+ contractAddr, _, _, _ := bind.DeployContract(opts, parsed, common.FromHex(contractBin), sim)
+ sim.Commit()
+
+ var cases = []struct {
+ name string
+ message ethereum.CallMsg
+ expect uint64
+ expectError error
+ }{
+ {"plain transfer(valid)", ethereum.CallMsg{
+ From: addr,
+ To: &addr,
+ Gas: 0,
+ GasPrice: big.NewInt(0),
+ Value: big.NewInt(1),
+ Data: nil,
+ }, params.TxGas, nil},
+
+ {"plain transfer(invalid)", ethereum.CallMsg{
+ From: addr,
+ To: &contractAddr,
+ Gas: 0,
+ GasPrice: big.NewInt(0),
+ Value: big.NewInt(1),
+ Data: nil,
+ }, 0, errors.New("always failing transaction (execution reverted)")},
+
+ {"Revert", ethereum.CallMsg{
+ From: addr,
+ To: &contractAddr,
+ Gas: 0,
+ GasPrice: big.NewInt(0),
+ Value: nil,
+ Data: common.Hex2Bytes("d8b98391"),
+ }, 0, errors.New("always failing transaction (execution reverted) (revert reason)")},
+
+ {"PureRevert", ethereum.CallMsg{
+ From: addr,
+ To: &contractAddr,
+ Gas: 0,
+ GasPrice: big.NewInt(0),
+ Value: nil,
+ Data: common.Hex2Bytes("aa8b1d30"),
+ }, 0, errors.New("always failing transaction (execution reverted)")},
+
+ {"OOG", ethereum.CallMsg{
+ From: addr,
+ To: &contractAddr,
+ Gas: 100000,
+ GasPrice: big.NewInt(0),
+ Value: nil,
+ Data: common.Hex2Bytes("50f6fe34"),
+ }, 0, errors.New("gas required exceeds allowance (100000)")},
+
+ {"Assert", ethereum.CallMsg{
+ From: addr,
+ To: &contractAddr,
+ Gas: 100000,
+ GasPrice: big.NewInt(0),
+ Value: nil,
+ Data: common.Hex2Bytes("b9b046f9"),
+ }, 0, errors.New("always failing transaction (invalid opcode: opcode 0xfe not defined)")},
+
+ {"Valid", ethereum.CallMsg{
+ From: addr,
+ To: &contractAddr,
+ Gas: 100000,
+ GasPrice: big.NewInt(0),
+ Value: nil,
+ Data: common.Hex2Bytes("e09fface"),
+ }, 21275, nil},
+ }
+ for _, c := range cases {
+ got, err := sim.EstimateGas(context.Background(), c.message)
+ if c.expectError != nil {
+ if err == nil {
+ t.Fatalf("Expect error, got nil")
+ }
+ if c.expectError.Error() != err.Error() {
+ t.Fatalf("Expect error, want %v, got %v", c.expectError, err)
+ }
+ continue
+ }
+ if got != c.expect {
+ t.Fatalf("Gas estimation mismatch, want %d, got %d", c.expect, got)
+ }
}
+}
+
+func TestSimulatedBackend_EstimateGasWithPrice(t *testing.T) {
+ key, _ := crypto.GenerateKey()
+ addr := crypto.PubkeyToAddress(key.PublicKey)
+
+ sim := NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(params.Ether*2 + 2e17)}}, 10000000)
+ defer sim.Close()
- if gas != params.TxGas {
- t.Errorf("expected 21000 gas cost for a transaction got %v", gas)
+ receipant := common.HexToAddress("deadbeef")
+ var cases = []struct {
+ name string
+ message ethereum.CallMsg
+ expect uint64
+ expectError error
+ }{
+ {"EstimateWithoutPrice", ethereum.CallMsg{
+ From: addr,
+ To: &receipant,
+ Gas: 0,
+ GasPrice: big.NewInt(0),
+ Value: big.NewInt(1000),
+ Data: nil,
+ }, 21000, nil},
+
+ {"EstimateWithPrice", ethereum.CallMsg{
+ From: addr,
+ To: &receipant,
+ Gas: 0,
+ GasPrice: big.NewInt(1000),
+ Value: big.NewInt(1000),
+ Data: nil,
+ }, 21000, nil},
+
+ {"EstimateWithVeryHighPrice", ethereum.CallMsg{
+ From: addr,
+ To: &receipant,
+ Gas: 0,
+ GasPrice: big.NewInt(1e14), // gascost = 2.1ether
+ Value: big.NewInt(1e17), // the remaining balance for fee is 2.1ether
+ Data: nil,
+ }, 21000, nil},
+
+ {"EstimateWithSuperhighPrice", ethereum.CallMsg{
+ From: addr,
+ To: &receipant,
+ Gas: 0,
+ GasPrice: big.NewInt(2e14), // gascost = 4.2ether
+ Value: big.NewInt(1000),
+ Data: nil,
+ }, 21000, errors.New("gas required exceeds allowance (10999)")}, // 10999=(2.2ether-1000wei)/(2e14)
+ }
+ for _, c := range cases {
+ got, err := sim.EstimateGas(context.Background(), c.message)
+ if c.expectError != nil {
+ if err == nil {
+ t.Fatalf("Expect error, got nil")
+ }
+ if c.expectError.Error() != err.Error() {
+ t.Fatalf("Expect error, want %v, got %v", c.expectError, err)
+ }
+ continue
+ }
+ if got != c.expect {
+ t.Fatalf("Gas estimation mismatch, want %d, got %d", c.expect, got)
+ }
}
}
diff --git a/accounts/abi/bind/base.go b/accounts/abi/bind/base.go
index 5246d5215e..1c66e17bcd 100644
--- a/accounts/abi/bind/base.go
+++ b/accounts/abi/bind/base.go
@@ -297,9 +297,9 @@ func (c *BoundContract) FilterLogs(opts *FilterOpts, name string, query ...[]int
opts = new(FilterOpts)
}
// Append the event selector to the query parameters and construct the topic set
- query = append([][]interface{}{{c.abi.Events[name].ID()}}, query...)
+ query = append([][]interface{}{{c.abi.Events[name].ID}}, query...)
- topics, err := makeTopics(query...)
+ topics, err := abi.MakeTopics(query...)
if err != nil {
return nil, nil, err
}
@@ -346,9 +346,9 @@ func (c *BoundContract) WatchLogs(opts *WatchOpts, name string, query ...[]inter
opts = new(WatchOpts)
}
// Append the event selector to the query parameters and construct the topic set
- query = append([][]interface{}{{c.abi.Events[name].ID()}}, query...)
+ query = append([][]interface{}{{c.abi.Events[name].ID}}, query...)
- topics, err := makeTopics(query...)
+ topics, err := abi.MakeTopics(query...)
if err != nil {
return nil, nil, err
}
@@ -382,7 +382,7 @@ func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log)
indexed = append(indexed, arg)
}
}
- return parseTopics(out, indexed, log.Topics[1:])
+ return abi.ParseTopics(out, indexed, log.Topics[1:])
}
// UnpackLogIntoMap unpacks a retrieved log into the provided map.
@@ -398,7 +398,7 @@ func (c *BoundContract) UnpackLogIntoMap(out map[string]interface{}, event strin
indexed = append(indexed, arg)
}
}
- return parseTopicsIntoMap(out, indexed, log.Topics[1:])
+ return abi.ParseTopicsIntoMap(out, indexed, log.Topics[1:])
}
// Quorum
diff --git a/accounts/abi/bind/base_test.go b/accounts/abi/bind/base_test.go
index 3ae685e00f..7d287850f4 100644
--- a/accounts/abi/bind/base_test.go
+++ b/accounts/abi/bind/base_test.go
@@ -17,9 +17,9 @@
package bind_test
import (
- "bytes"
"context"
"math/big"
+ "reflect"
"strings"
"testing"
@@ -34,8 +34,10 @@ import (
)
type mockCaller struct {
- codeAtBlockNumber *big.Int
- callContractBlockNumber *big.Int
+ codeAtBlockNumber *big.Int
+ callContractBlockNumber *big.Int
+ pendingCodeAtCalled bool
+ pendingCallContractCalled bool
}
func (mc *mockCaller) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) {
@@ -47,6 +49,16 @@ func (mc *mockCaller) CallContract(ctx context.Context, call ethereum.CallMsg, b
mc.callContractBlockNumber = blockNumber
return nil, nil
}
+
+func (mc *mockCaller) PendingCodeAt(ctx context.Context, contract common.Address) ([]byte, error) {
+ mc.pendingCodeAtCalled = true
+ return nil, nil
+}
+
+func (mc *mockCaller) PendingCallContract(ctx context.Context, call ethereum.CallMsg) ([]byte, error) {
+ mc.pendingCallContractCalled = true
+ return nil, nil
+}
func TestPassingBlockNumber(t *testing.T) {
mc := &mockCaller{}
@@ -82,57 +94,39 @@ func TestPassingBlockNumber(t *testing.T) {
if mc.codeAtBlockNumber != nil {
t.Fatalf("CodeAt() was passed a block number when it should not have been")
}
+
+ bc.Call(&bind.CallOpts{BlockNumber: blockNumber, Pending: true}, &ret, "something")
+
+ if !mc.pendingCallContractCalled {
+ t.Fatalf("CallContract() was not passed the block number")
+ }
+
+ if !mc.pendingCodeAtCalled {
+ t.Fatalf("CodeAt() was not passed the block number")
+ }
}
const hexData = "0x000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158"
func TestUnpackIndexedStringTyLogIntoMap(t *testing.T) {
hash := crypto.Keccak256Hash([]byte("testName"))
- mockLog := types.Log{
- Address: common.HexToAddress("0x0"),
- Topics: []common.Hash{
- common.HexToHash("0x0"),
- hash,
- },
- Data: hexutil.MustDecode(hexData),
- BlockNumber: uint64(26),
- TxHash: common.HexToHash("0x0"),
- TxIndex: 111,
- BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}),
- Index: 7,
- Removed: false,
+ topics := []common.Hash{
+ common.HexToHash("0x0"),
+ hash,
}
+ mockLog := newMockLog(topics, common.HexToHash("0x0"))
abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"string"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]`
parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
- receivedMap := make(map[string]interface{})
expectedReceivedMap := map[string]interface{}{
"name": hash,
"sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"),
"amount": big.NewInt(1),
"memo": []byte{88},
}
- if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil {
- t.Error(err)
- }
-
- if len(receivedMap) != 4 {
- t.Fatal("unpacked map expected to have length 4")
- }
- if receivedMap["name"] != expectedReceivedMap["name"] {
- t.Error("unpacked map does not match expected map")
- }
- if receivedMap["sender"] != expectedReceivedMap["sender"] {
- t.Error("unpacked map does not match expected map")
- }
- if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 {
- t.Error("unpacked map does not match expected map")
- }
- if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) {
- t.Error("unpacked map does not match expected map")
- }
+ unpackAndCheck(t, bc, expectedReceivedMap, mockLog)
}
func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) {
@@ -141,51 +135,23 @@ func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) {
t.Fatal(err)
}
hash := crypto.Keccak256Hash(sliceBytes)
- mockLog := types.Log{
- Address: common.HexToAddress("0x0"),
- Topics: []common.Hash{
- common.HexToHash("0x0"),
- hash,
- },
- Data: hexutil.MustDecode(hexData),
- BlockNumber: uint64(26),
- TxHash: common.HexToHash("0x0"),
- TxIndex: 111,
- BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}),
- Index: 7,
- Removed: false,
+ topics := []common.Hash{
+ common.HexToHash("0x0"),
+ hash,
}
+ mockLog := newMockLog(topics, common.HexToHash("0x0"))
abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"names","type":"string[]"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]`
parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
- receivedMap := make(map[string]interface{})
expectedReceivedMap := map[string]interface{}{
"names": hash,
"sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"),
"amount": big.NewInt(1),
"memo": []byte{88},
}
- if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil {
- t.Error(err)
- }
-
- if len(receivedMap) != 4 {
- t.Fatal("unpacked map expected to have length 4")
- }
- if receivedMap["names"] != expectedReceivedMap["names"] {
- t.Error("unpacked map does not match expected map")
- }
- if receivedMap["sender"] != expectedReceivedMap["sender"] {
- t.Error("unpacked map does not match expected map")
- }
- if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 {
- t.Error("unpacked map does not match expected map")
- }
- if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) {
- t.Error("unpacked map does not match expected map")
- }
+ unpackAndCheck(t, bc, expectedReceivedMap, mockLog)
}
func TestUnpackIndexedArrayTyLogIntoMap(t *testing.T) {
@@ -194,51 +160,23 @@ func TestUnpackIndexedArrayTyLogIntoMap(t *testing.T) {
t.Fatal(err)
}
hash := crypto.Keccak256Hash(arrBytes)
- mockLog := types.Log{
- Address: common.HexToAddress("0x0"),
- Topics: []common.Hash{
- common.HexToHash("0x0"),
- hash,
- },
- Data: hexutil.MustDecode(hexData),
- BlockNumber: uint64(26),
- TxHash: common.HexToHash("0x0"),
- TxIndex: 111,
- BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}),
- Index: 7,
- Removed: false,
+ topics := []common.Hash{
+ common.HexToHash("0x0"),
+ hash,
}
+ mockLog := newMockLog(topics, common.HexToHash("0x0"))
abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"addresses","type":"address[2]"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]`
parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
- receivedMap := make(map[string]interface{})
expectedReceivedMap := map[string]interface{}{
"addresses": hash,
"sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"),
"amount": big.NewInt(1),
"memo": []byte{88},
}
- if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil {
- t.Error(err)
- }
-
- if len(receivedMap) != 4 {
- t.Fatal("unpacked map expected to have length 4")
- }
- if receivedMap["addresses"] != expectedReceivedMap["addresses"] {
- t.Error("unpacked map does not match expected map")
- }
- if receivedMap["sender"] != expectedReceivedMap["sender"] {
- t.Error("unpacked map does not match expected map")
- }
- if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 {
- t.Error("unpacked map does not match expected map")
- }
- if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) {
- t.Error("unpacked map does not match expected map")
- }
+ unpackAndCheck(t, bc, expectedReceivedMap, mockLog)
}
func TestUnpackIndexedFuncTyLogIntoMap(t *testing.T) {
@@ -249,99 +187,72 @@ func TestUnpackIndexedFuncTyLogIntoMap(t *testing.T) {
functionTyBytes := append(addrBytes, functionSelector...)
var functionTy [24]byte
copy(functionTy[:], functionTyBytes[0:24])
- mockLog := types.Log{
- Address: common.HexToAddress("0x0"),
- Topics: []common.Hash{
- common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"),
- common.BytesToHash(functionTyBytes),
- },
- Data: hexutil.MustDecode(hexData),
- BlockNumber: uint64(26),
- TxHash: common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42"),
- TxIndex: 111,
- BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}),
- Index: 7,
- Removed: false,
+ topics := []common.Hash{
+ common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"),
+ common.BytesToHash(functionTyBytes),
}
-
+ mockLog := newMockLog(topics, common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42"))
abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"function","type":"function"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]`
parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
- receivedMap := make(map[string]interface{})
expectedReceivedMap := map[string]interface{}{
"function": functionTy,
"sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"),
"amount": big.NewInt(1),
"memo": []byte{88},
}
- if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil {
- t.Error(err)
- }
-
- if len(receivedMap) != 4 {
- t.Fatal("unpacked map expected to have length 4")
- }
- if receivedMap["function"] != expectedReceivedMap["function"] {
- t.Error("unpacked map does not match expected map")
- }
- if receivedMap["sender"] != expectedReceivedMap["sender"] {
- t.Error("unpacked map does not match expected map")
- }
- if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 {
- t.Error("unpacked map does not match expected map")
- }
- if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) {
- t.Error("unpacked map does not match expected map")
- }
+ unpackAndCheck(t, bc, expectedReceivedMap, mockLog)
}
func TestUnpackIndexedBytesTyLogIntoMap(t *testing.T) {
- byts := []byte{1, 2, 3, 4, 5}
- hash := crypto.Keccak256Hash(byts)
- mockLog := types.Log{
- Address: common.HexToAddress("0x0"),
- Topics: []common.Hash{
- common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"),
- hash,
- },
- Data: hexutil.MustDecode(hexData),
- BlockNumber: uint64(26),
- TxHash: common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42"),
- TxIndex: 111,
- BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}),
- Index: 7,
- Removed: false,
+ bytes := []byte{1, 2, 3, 4, 5}
+ hash := crypto.Keccak256Hash(bytes)
+ topics := []common.Hash{
+ common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"),
+ hash,
}
+ mockLog := newMockLog(topics, common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42"))
abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"content","type":"bytes"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]`
parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
- receivedMap := make(map[string]interface{})
expectedReceivedMap := map[string]interface{}{
"content": hash,
"sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"),
"amount": big.NewInt(1),
"memo": []byte{88},
}
- if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil {
+ unpackAndCheck(t, bc, expectedReceivedMap, mockLog)
+}
+
+func unpackAndCheck(t *testing.T, bc *bind.BoundContract, expected map[string]interface{}, mockLog types.Log) {
+ received := make(map[string]interface{})
+ if err := bc.UnpackLogIntoMap(received, "received", mockLog); err != nil {
t.Error(err)
}
- if len(receivedMap) != 4 {
- t.Fatal("unpacked map expected to have length 4")
- }
- if receivedMap["content"] != expectedReceivedMap["content"] {
- t.Error("unpacked map does not match expected map")
- }
- if receivedMap["sender"] != expectedReceivedMap["sender"] {
- t.Error("unpacked map does not match expected map")
+ if len(received) != len(expected) {
+ t.Fatalf("unpacked map length %v not equal expected length of %v", len(received), len(expected))
}
- if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 {
- t.Error("unpacked map does not match expected map")
+ for name, elem := range expected {
+ if !reflect.DeepEqual(elem, received[name]) {
+ t.Errorf("field %v does not match expected, want %v, got %v", name, elem, received[name])
+ }
}
- if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) {
- t.Error("unpacked map does not match expected map")
+}
+
+func newMockLog(topics []common.Hash, txHash common.Hash) types.Log {
+ return types.Log{
+ Address: common.HexToAddress("0x0"),
+ Topics: topics,
+ Data: hexutil.MustDecode(hexData),
+ BlockNumber: uint64(26),
+ TxHash: txHash,
+ TxIndex: 111,
+ BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}),
+ Index: 7,
+ Removed: false,
}
}
diff --git a/accounts/abi/bind/bind.go b/accounts/abi/bind/bind.go
index c98f8b4d4c..7aaa85bdc0 100644
--- a/accounts/abi/bind/bind.go
+++ b/accounts/abi/bind/bind.go
@@ -220,8 +220,6 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
"bindtype": bindType[lang],
"bindtopictype": bindTopicType[lang],
"namedtype": namedType[lang],
- "formatmethod": formatMethod,
- "formatevent": formatEvent,
"capitalise": capitalise,
"decapitalise": decapitalise,
}
@@ -537,9 +535,7 @@ var methodNormalizer = map[Lang]func(string) string{
}
// capitalise makes a camel-case string which starts with an upper case character.
-func capitalise(input string) string {
- return abi.ToCamelCase(input)
-}
+var capitalise = abi.ToCamelCase
// decapitalise makes a camel-case string which starts with a lower case character.
func decapitalise(input string) string {
@@ -588,74 +584,3 @@ func hasStruct(t abi.Type) bool {
return false
}
}
-
-// resolveArgName converts a raw argument representation into a user friendly format.
-func resolveArgName(arg abi.Argument, structs map[string]*tmplStruct) string {
- var (
- prefix string
- embedded string
- typ = &arg.Type
- )
-loop:
- for {
- switch typ.T {
- case abi.SliceTy:
- prefix += "[]"
- case abi.ArrayTy:
- prefix += fmt.Sprintf("[%d]", typ.Size)
- default:
- embedded = typ.TupleRawName + typ.String()
- break loop
- }
- typ = typ.Elem
- }
- if s, exist := structs[embedded]; exist {
- return prefix + s.Name
- } else {
- return arg.Type.String()
- }
-}
-
-// formatMethod transforms raw method representation into a user friendly one.
-func formatMethod(method abi.Method, structs map[string]*tmplStruct) string {
- inputs := make([]string, len(method.Inputs))
- for i, input := range method.Inputs {
- inputs[i] = fmt.Sprintf("%v %v", resolveArgName(input, structs), input.Name)
- }
- outputs := make([]string, len(method.Outputs))
- for i, output := range method.Outputs {
- outputs[i] = resolveArgName(output, structs)
- if len(output.Name) > 0 {
- outputs[i] += fmt.Sprintf(" %v", output.Name)
- }
- }
- // Extract meaningful state mutability of solidity method.
- // If it's default value, never print it.
- state := method.StateMutability
- if state == "nonpayable" {
- state = ""
- }
- if state != "" {
- state = state + " "
- }
- identity := fmt.Sprintf("function %v", method.RawName)
- if method.IsFallback {
- identity = "fallback"
- } else if method.IsReceive {
- identity = "receive"
- }
- return fmt.Sprintf("%s(%v) %sreturns(%v)", identity, strings.Join(inputs, ", "), state, strings.Join(outputs, ", "))
-}
-
-// formatEvent transforms raw event representation into a user friendly one.
-func formatEvent(event abi.Event, structs map[string]*tmplStruct) string {
- inputs := make([]string, len(event.Inputs))
- for i, input := range event.Inputs {
- if input.Indexed {
- inputs[i] = fmt.Sprintf("%v indexed %v", resolveArgName(input, structs), input.Name)
- } else {
- inputs[i] = fmt.Sprintf("%v %v", resolveArgName(input, structs), input.Name)
- }
- }
- return fmt.Sprintf("event %v(%v)", event.RawName, strings.Join(inputs, ", "))
-}
diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go
index 8753a1a174..56cdd372bf 100644
--- a/accounts/abi/bind/bind_test.go
+++ b/accounts/abi/bind/bind_test.go
@@ -199,7 +199,8 @@ var bindTests = []struct {
{"type":"event","name":"indexed","inputs":[{"name":"addr","type":"address","indexed":true},{"name":"num","type":"int256","indexed":true}]},
{"type":"event","name":"mixed","inputs":[{"name":"addr","type":"address","indexed":true},{"name":"num","type":"int256"}]},
{"type":"event","name":"anonymous","anonymous":true,"inputs":[]},
- {"type":"event","name":"dynamic","inputs":[{"name":"idxStr","type":"string","indexed":true},{"name":"idxDat","type":"bytes","indexed":true},{"name":"str","type":"string"},{"name":"dat","type":"bytes"}]}
+ {"type":"event","name":"dynamic","inputs":[{"name":"idxStr","type":"string","indexed":true},{"name":"idxDat","type":"bytes","indexed":true},{"name":"str","type":"string"},{"name":"dat","type":"bytes"}]},
+ {"type":"event","name":"unnamed","inputs":[{"name":"","type":"uint256","indexed": true},{"name":"","type":"uint256","indexed":true}]}
]
`},
`
@@ -249,6 +250,12 @@ var bindTests = []struct {
fmt.Println(event.Addr) // Make sure the reconstructed indexed fields are present
fmt.Println(res, str, dat, hash, err)
+
+ oit, err := e.FilterUnnamed(nil, []*big.Int{}, []*big.Int{})
+
+ arg0 := oit.Event.Arg0 // Make sure unnamed arguments are handled correctly
+ arg1 := oit.Event.Arg1 // Make sure unnamed arguments are handled correctly
+ fmt.Println(arg0, arg1)
}
// Run a tiny reflection test to ensure disallowed methods don't appear
if _, ok := reflect.TypeOf(&EventChecker{}).MethodByName("FilterAnonymous"); ok {
diff --git a/accounts/abi/bind/template.go b/accounts/abi/bind/template.go
index 1492d0c3f9..16d17b2161 100644
--- a/accounts/abi/bind/template.go
+++ b/accounts/abi/bind/template.go
@@ -103,7 +103,6 @@ var (
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
- _ = abi.U256
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
@@ -300,7 +299,7 @@ var (
{{range .Calls}}
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
//
- // Solidity: {{formatmethod .Original $structs}}
+ // Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Caller) {{.Normalized.Name}}(opts *bind.CallOpts {{range .Normalized.Inputs}}, {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} },{{else}}{{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}}{{end}} error) {
{{if .Structured}}ret := new(struct{
{{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}}
@@ -319,14 +318,14 @@ var (
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
//
- // Solidity: {{formatmethod .Original $structs}}
+ // Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Session) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} }, {{else}} {{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}} {{end}} error) {
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.CallOpts {{range .Normalized.Inputs}}, {{.Name}}{{end}})
}
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
//
- // Solidity: {{formatmethod .Original $structs}}
+ // Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}CallerSession) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} }, {{else}} {{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}} {{end}} error) {
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.CallOpts {{range .Normalized.Inputs}}, {{.Name}}{{end}})
}
@@ -335,21 +334,21 @@ var (
{{range .Transacts}}
// {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
//
- // Solidity: {{formatmethod .Original $structs}}
+ // Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Transactor) {{.Normalized.Name}}(opts *bind.TransactOpts {{range .Normalized.Inputs}}, {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) {
return _{{$contract.Type}}.contract.Transact(opts, "{{.Original.Name}}" {{range .Normalized.Inputs}}, {{.Name}}{{end}})
}
// {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
//
- // Solidity: {{formatmethod .Original $structs}}
+ // Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Session) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.TransactOpts {{range $i, $_ := .Normalized.Inputs}}, {{.Name}}{{end}})
}
// {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
//
- // Solidity: {{formatmethod .Original $structs}}
+ // Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.TransactOpts {{range $i, $_ := .Normalized.Inputs}}, {{.Name}}{{end}})
}
@@ -358,21 +357,21 @@ var (
{{if .Fallback}}
// Fallback is a paid mutator transaction binding the contract fallback function.
//
- // Solidity: {{formatmethod .Fallback.Original $structs}}
+ // Solidity: {{.Fallback.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Transactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) {
return _{{$contract.Type}}.contract.RawTransact(opts, calldata)
}
// Fallback is a paid mutator transaction binding the contract fallback function.
//
- // Solidity: {{formatmethod .Fallback.Original $structs}}
+ // Solidity: {{.Fallback.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Session) Fallback(calldata []byte) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.Fallback(&_{{$contract.Type}}.TransactOpts, calldata)
}
// Fallback is a paid mutator transaction binding the contract fallback function.
//
- // Solidity: {{formatmethod .Fallback.Original $structs}}
+ // Solidity: {{.Fallback.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) Fallback(calldata []byte) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.Fallback(&_{{$contract.Type}}.TransactOpts, calldata)
}
@@ -381,21 +380,21 @@ var (
{{if .Receive}}
// Receive is a paid mutator transaction binding the contract receive function.
//
- // Solidity: {{formatmethod .Receive.Original $structs}}
+ // Solidity: {{.Receive.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Transactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) {
return _{{$contract.Type}}.contract.RawTransact(opts, nil) // calldata is disallowed for receive function
}
// Receive is a paid mutator transaction binding the contract receive function.
//
- // Solidity: {{formatmethod .Receive.Original $structs}}
+ // Solidity: {{.Receive.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Session) Receive() (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.Receive(&_{{$contract.Type}}.TransactOpts)
}
// Receive is a paid mutator transaction binding the contract receive function.
//
- // Solidity: {{formatmethod .Receive.Original $structs}}
+ // Solidity: {{.Receive.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) Receive() (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.Receive(&_{{$contract.Type}}.TransactOpts)
}
@@ -474,7 +473,7 @@ var (
// Filter{{.Normalized.Name}} is a free log retrieval operation binding the contract event 0x{{printf "%x" .Original.ID}}.
//
- // Solidity: {{formatevent .Original $structs}}
+ // Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Filter{{.Normalized.Name}}(opts *bind.FilterOpts{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}} []{{bindtype .Type $structs}}{{end}}{{end}}) (*{{$contract.Type}}{{.Normalized.Name}}Iterator, error) {
{{range .Normalized.Inputs}}
{{if .Indexed}}var {{.Name}}Rule []interface{}
@@ -492,7 +491,7 @@ var (
var {{.Normalized.Name}}TopicHash = "0x{{printf "%x" .Original.ID}}"
// Watch{{.Normalized.Name}} is a free log subscription operation binding the contract event 0x{{printf "%x" .Original.ID}}.
//
- // Solidity: {{formatevent .Original $structs}}
+ // Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Watch{{.Normalized.Name}}(opts *bind.WatchOpts, sink chan<- *{{$contract.Type}}{{.Normalized.Name}}{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}} []{{bindtype .Type $structs}}{{end}}{{end}}) (event.Subscription, error) {
{{range .Normalized.Inputs}}
{{if .Indexed}}var {{.Name}}Rule []interface{}
@@ -534,7 +533,7 @@ var (
// Parse{{.Normalized.Name}} is a log parse operation binding the contract event 0x{{printf "%x" .Original.ID}}.
//
- // Solidity: {{formatevent .Original $structs}}
+ // Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Parse{{.Normalized.Name}}(log types.Log) (*{{$contract.Type}}{{.Normalized.Name}}, error) {
event := new({{$contract.Type}}{{.Normalized.Name}})
if err := _{{$contract.Type}}.contract.UnpackLog(event, "{{.Original.Name}}", log); err != nil {
@@ -628,7 +627,7 @@ import java.util.*;
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
- public {{if gt (len .Normalized.Outputs) 1}}{{capitalise .Normalized.Name}}Results{{else}}{{range .Normalized.Outputs}}{{bindtype .Type $structs}}{{end}}{{end}} {{.Normalized.Name}}(CallOpts opts{{range .Normalized.Inputs}}, {{bindtype .Type $structs}} {{.Name}}{{end}}) throws Exception {
+ public {{if gt (len .Normalized.Outputs) 1}}{{capitalise .Normalized.Name}}Results{{else if eq (len .Normalized.Outputs) 0}}void{{else}}{{range .Normalized.Outputs}}{{bindtype .Type $structs}}{{end}}{{end}} {{.Normalized.Name}}(CallOpts opts{{range .Normalized.Inputs}}, {{bindtype .Type $structs}} {{.Name}}{{end}}) throws Exception {
Interfaces args = Geth.newInterfaces({{(len .Normalized.Inputs)}});
{{range $index, $item := .Normalized.Inputs}}Interface arg{{$index}} = Geth.newInterface();arg{{$index}}.set{{namedtype (bindtype .Type $structs) .Type}}({{.Name}});args.set({{$index}},arg{{$index}});
{{end}}
@@ -666,7 +665,7 @@ import java.util.*;
{{if .Fallback}}
// Fallback is a paid mutator transaction binding the contract fallback function.
//
- // Solidity: {{formatmethod .Fallback.Original $structs}}
+ // Solidity: {{.Fallback.Original.String}}
public Transaction Fallback(TransactOpts opts, byte[] calldata) throws Exception {
return this.Contract.rawTransact(opts, calldata);
}
@@ -675,7 +674,7 @@ import java.util.*;
{{if .Receive}}
// Receive is a paid mutator transaction binding the contract receive function.
//
- // Solidity: {{formatmethod .Receive.Original $structs}}
+ // Solidity: {{.Receive.Original.String}}
public Transaction Receive(TransactOpts opts) throws Exception {
return this.Contract.rawTransact(opts, null);
}
diff --git a/accounts/abi/bind/util.go b/accounts/abi/bind/util.go
index d129993ca1..118abc59a7 100644
--- a/accounts/abi/bind/util.go
+++ b/accounts/abi/bind/util.go
@@ -18,7 +18,7 @@ package bind
import (
"context"
- "fmt"
+ "errors"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -56,14 +56,14 @@ func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*ty
// contract address when it is mined. It stops waiting when ctx is canceled.
func WaitDeployed(ctx context.Context, b DeployBackend, tx *types.Transaction) (common.Address, error) {
if tx.To() != nil {
- return common.Address{}, fmt.Errorf("tx is not contract creation")
+ return common.Address{}, errors.New("tx is not contract creation")
}
receipt, err := WaitMined(ctx, b, tx)
if err != nil {
return common.Address{}, err
}
if receipt.ContractAddress == (common.Address{}) {
- return common.Address{}, fmt.Errorf("zero address")
+ return common.Address{}, errors.New("zero address")
}
// Check that code has indeed been deployed at the address.
// This matters on pre-Homestead chains: OOG in the constructor
diff --git a/accounts/abi/bind/util_test.go b/accounts/abi/bind/util_test.go
index ea7fc62469..d83cb0b412 100644
--- a/accounts/abi/bind/util_test.go
+++ b/accounts/abi/bind/util_test.go
@@ -18,6 +18,7 @@ package bind_test
import (
"context"
+ "errors"
"math/big"
"testing"
"time"
@@ -84,7 +85,7 @@ func TestWaitDeployed(t *testing.T) {
select {
case <-mined:
if err != test.wantErr {
- t.Errorf("test %q: error mismatch: got %q, want %q", name, err, test.wantErr)
+ t.Errorf("test %q: error mismatch: want %q, got %q", name, test.wantErr, err)
}
if address != test.wantAddress {
t.Errorf("test %q: unexpected contract address %s", name, address.Hex())
@@ -94,3 +95,40 @@ func TestWaitDeployed(t *testing.T) {
}
}
}
+
+func TestWaitDeployedCornerCases(t *testing.T) {
+ backend := backends.NewSimulatedBackend(
+ core.GenesisAlloc{
+ crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)},
+ },
+ 10000000,
+ )
+ defer backend.Close()
+
+ // Create a transaction to an account.
+ code := "6060604052600a8060106000396000f360606040526008565b00"
+ tx := types.NewTransaction(0, common.HexToAddress("0x01"), big.NewInt(0), 3000000, big.NewInt(1), common.FromHex(code))
+ tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ backend.SendTransaction(ctx, tx, bind.PrivateTxArgs{})
+ backend.Commit()
+ notContentCreation := errors.New("tx is not contract creation")
+ if _, err := bind.WaitDeployed(ctx, backend, tx); err.Error() != notContentCreation.Error() {
+ t.Errorf("error missmatch: want %q, got %q, ", notContentCreation, err)
+ }
+
+ // Create a transaction that is not mined.
+ tx = types.NewContractCreation(1, big.NewInt(0), 3000000, big.NewInt(1), common.FromHex(code))
+ tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)
+
+ go func() {
+ contextCanceled := errors.New("context canceled")
+ if _, err := bind.WaitDeployed(ctx, backend, tx); err.Error() != contextCanceled.Error() {
+ t.Errorf("error missmatch: want %q, got %q, ", contextCanceled, err)
+ }
+ }()
+
+ backend.SendTransaction(ctx, tx, bind.PrivateTxArgs{})
+ cancel()
+}
diff --git a/accounts/abi/error.go b/accounts/abi/error.go
index 9d8674ad08..8d61d574c5 100644
--- a/accounts/abi/error.go
+++ b/accounts/abi/error.go
@@ -39,23 +39,21 @@ func formatSliceString(kind reflect.Kind, sliceSize int) string {
// type in t.
func sliceTypeCheck(t Type, val reflect.Value) error {
if val.Kind() != reflect.Slice && val.Kind() != reflect.Array {
- return typeErr(formatSliceString(t.Kind, t.Size), val.Type())
+ return typeErr(formatSliceString(t.getType().Kind(), t.Size), val.Type())
}
if t.T == ArrayTy && val.Len() != t.Size {
- return typeErr(formatSliceString(t.Elem.Kind, t.Size), formatSliceString(val.Type().Elem().Kind(), val.Len()))
+ return typeErr(formatSliceString(t.Elem.getType().Kind(), t.Size), formatSliceString(val.Type().Elem().Kind(), val.Len()))
}
- if t.Elem.T == SliceTy {
+ if t.Elem.T == SliceTy || t.Elem.T == ArrayTy {
if val.Len() > 0 {
return sliceTypeCheck(*t.Elem, val.Index(0))
}
- } else if t.Elem.T == ArrayTy {
- return sliceTypeCheck(*t.Elem, val.Index(0))
}
- if elemKind := val.Type().Elem().Kind(); elemKind != t.Elem.Kind {
- return typeErr(formatSliceString(t.Elem.Kind, t.Size), val.Type())
+ if elemKind := val.Type().Elem().Kind(); elemKind != t.Elem.getType().Kind() {
+ return typeErr(formatSliceString(t.Elem.getType().Kind(), t.Size), val.Type())
}
return nil
}
@@ -68,10 +66,10 @@ func typeCheck(t Type, value reflect.Value) error {
}
// Check base type validity. Element types will be checked later on.
- if t.Kind != value.Kind() {
- return typeErr(t.Kind, value.Kind())
+ if t.getType().Kind() != value.Kind() {
+ return typeErr(t.getType().Kind(), value.Kind())
} else if t.T == FixedBytesTy && t.Size != value.Len() {
- return typeErr(t.Type, value.Type())
+ return typeErr(t.getType(), value.Type())
} else {
return nil
}
diff --git a/accounts/abi/event.go b/accounts/abi/event.go
index f1474813af..f1e5398f79 100644
--- a/accounts/abi/event.go
+++ b/accounts/abi/event.go
@@ -42,36 +42,59 @@ type Event struct {
RawName string
Anonymous bool
Inputs Arguments
+ str string
+ // Sig contains the string signature according to the ABI spec.
+ // e.g. event foo(uint32 a, int b) = "foo(uint32,int256)"
+ // Please note that "int" is substitute for its canonical representation "int256"
+ Sig string
+ // ID returns the canonical representation of the event's signature used by the
+ // abi definition to identify event names and types.
+ ID common.Hash
}
-func (e Event) String() string {
- inputs := make([]string, len(e.Inputs))
- for i, input := range e.Inputs {
- inputs[i] = fmt.Sprintf("%v %v", input.Type, input.Name)
+// NewEvent creates a new Event.
+// It sanitizes the input arguments to remove unnamed arguments.
+// It also precomputes the id, signature and string representation
+// of the event.
+func NewEvent(name, rawName string, anonymous bool, inputs Arguments) Event {
+ // sanitize inputs to remove inputs without names
+ // and precompute string and sig representation.
+ names := make([]string, len(inputs))
+ types := make([]string, len(inputs))
+ for i, input := range inputs {
+ if input.Name == "" {
+ inputs[i] = Argument{
+ Name: fmt.Sprintf("arg%d", i),
+ Indexed: input.Indexed,
+ Type: input.Type,
+ }
+ } else {
+ inputs[i] = input
+ }
+ // string representation
+ names[i] = fmt.Sprintf("%v %v", input.Type, inputs[i].Name)
if input.Indexed {
- inputs[i] = fmt.Sprintf("%v indexed %v", input.Type, input.Name)
+ names[i] = fmt.Sprintf("%v indexed %v", input.Type, inputs[i].Name)
}
+ // sig representation
+ types[i] = input.Type.String()
}
- return fmt.Sprintf("event %v(%v)", e.RawName, strings.Join(inputs, ", "))
-}
-// Sig returns the event string signature according to the ABI spec.
-//
-// Example
-//
-// event foo(uint32 a, int b) = "foo(uint32,int256)"
-//
-// Please note that "int" is substitute for its canonical representation "int256"
-func (e Event) Sig() string {
- types := make([]string, len(e.Inputs))
- for i, input := range e.Inputs {
- types[i] = input.Type.String()
+ str := fmt.Sprintf("event %v(%v)", rawName, strings.Join(names, ", "))
+ sig := fmt.Sprintf("%v(%v)", rawName, strings.Join(types, ","))
+ id := common.BytesToHash(crypto.Keccak256([]byte(sig)))
+
+ return Event{
+ Name: name,
+ RawName: rawName,
+ Anonymous: anonymous,
+ Inputs: inputs,
+ str: str,
+ Sig: sig,
+ ID: id,
}
- return fmt.Sprintf("%v(%v)", e.RawName, strings.Join(types, ","))
}
-// ID returns the canonical representation of the event's signature used by the
-// abi definition to identify event names and types.
-func (e Event) ID() common.Hash {
- return common.BytesToHash(crypto.Keccak256([]byte(e.Sig())))
+func (e Event) String() string {
+ return e.str
}
diff --git a/accounts/abi/event_test.go b/accounts/abi/event_test.go
index 090b9217db..28da4c5024 100644
--- a/accounts/abi/event_test.go
+++ b/accounts/abi/event_test.go
@@ -104,8 +104,8 @@ func TestEventId(t *testing.T) {
}
for name, event := range abi.Events {
- if event.ID() != test.expectations[name] {
- t.Errorf("expected id to be %x, got %x", test.expectations[name], event.ID())
+ if event.ID != test.expectations[name] {
+ t.Errorf("expected id to be %x, got %x", test.expectations[name], event.ID)
}
}
}
diff --git a/accounts/abi/method.go b/accounts/abi/method.go
index 217c3d2e68..37c7af65ea 100644
--- a/accounts/abi/method.go
+++ b/accounts/abi/method.go
@@ -23,6 +23,24 @@ import (
"github.com/ethereum/go-ethereum/crypto"
)
+// FunctionType represents different types of functions a contract might have.
+type FunctionType int
+
+const (
+ // Constructor represents the constructor of the contract.
+ // The constructor function is called while deploying a contract.
+ Constructor FunctionType = iota
+ // Fallback represents the fallback function.
+ // This function is executed if no other function matches the given function
+ // signature and no receive function is specified.
+ Fallback
+ // Receive represents the receive function.
+ // This function is executed on plain Ether transfers.
+ Receive
+ // Function represents a normal function.
+ Function
+)
+
// Method represents a callable given a `Name` and whether the method is a constant.
// If the method is `Const` no transaction needs to be created for this
// particular Method call. It can easily be simulated using a local VM.
@@ -44,6 +62,10 @@ type Method struct {
Name string
RawName string // RawName is the raw method name parsed from ABI
+ // Type indicates whether the method is a
+ // special fallback introduced in solidity v0.6.0
+ Type FunctionType
+
// StateMutability indicates the mutability state of method,
// the default value is nonpayable. It can be empty if the abi
// is generated by legacy compiler.
@@ -53,69 +75,84 @@ type Method struct {
Constant bool
Payable bool
- // The following two flags indicates whether the method is a
- // special fallback introduced in solidity v0.6.0
- IsFallback bool
- IsReceive bool
-
Inputs Arguments
Outputs Arguments
+ str string
+ // Sig returns the methods string signature according to the ABI spec.
+ // e.g. function foo(uint32 a, int b) = "foo(uint32,int256)"
+ // Please note that "int" is substitute for its canonical representation "int256"
+ Sig string
+ // ID returns the canonical representation of the method's signature used by the
+ // abi definition to identify method names and types.
+ ID []byte
}
-// Sig returns the methods string signature according to the ABI spec.
-//
-// Example
-//
-// function foo(uint32 a, int b) = "foo(uint32,int256)"
-//
-// Please note that "int" is substitute for its canonical representation "int256"
-func (method Method) Sig() string {
- // Short circuit if the method is special. Fallback
- // and Receive don't have signature at all.
- if method.IsFallback || method.IsReceive {
- return ""
- }
- types := make([]string, len(method.Inputs))
- for i, input := range method.Inputs {
+// NewMethod creates a new Method.
+// A method should always be created using NewMethod.
+// It also precomputes the sig representation and the string representation
+// of the method.
+func NewMethod(name string, rawName string, funType FunctionType, mutability string, isConst, isPayable bool, inputs Arguments, outputs Arguments) Method {
+ var (
+ types = make([]string, len(inputs))
+ inputNames = make([]string, len(inputs))
+ outputNames = make([]string, len(outputs))
+ )
+ for i, input := range inputs {
+ inputNames[i] = fmt.Sprintf("%v %v", input.Type, input.Name)
types[i] = input.Type.String()
}
- return fmt.Sprintf("%v(%v)", method.RawName, strings.Join(types, ","))
-}
-
-func (method Method) String() string {
- inputs := make([]string, len(method.Inputs))
- for i, input := range method.Inputs {
- inputs[i] = fmt.Sprintf("%v %v", input.Type, input.Name)
- }
- outputs := make([]string, len(method.Outputs))
- for i, output := range method.Outputs {
- outputs[i] = output.Type.String()
+ for i, output := range outputs {
+ outputNames[i] = output.Type.String()
if len(output.Name) > 0 {
- outputs[i] += fmt.Sprintf(" %v", output.Name)
+ outputNames[i] += fmt.Sprintf(" %v", output.Name)
}
}
+ // calculate the signature and method id. Note only function
+ // has meaningful signature and id.
+ var (
+ sig string
+ id []byte
+ )
+ if funType == Function {
+ sig = fmt.Sprintf("%v(%v)", rawName, strings.Join(types, ","))
+ id = crypto.Keccak256([]byte(sig))[:4]
+ }
// Extract meaningful state mutability of solidity method.
// If it's default value, never print it.
- state := method.StateMutability
+ state := mutability
if state == "nonpayable" {
state = ""
}
if state != "" {
state = state + " "
}
- identity := fmt.Sprintf("function %v", method.RawName)
- if method.IsFallback {
+ identity := fmt.Sprintf("function %v", rawName)
+ if funType == Fallback {
identity = "fallback"
- } else if method.IsReceive {
+ } else if funType == Receive {
identity = "receive"
+ } else if funType == Constructor {
+ identity = "constructor"
+ }
+ str := fmt.Sprintf("%v(%v) %sreturns(%v)", identity, strings.Join(inputNames, ", "), state, strings.Join(outputNames, ", "))
+
+ return Method{
+ Name: name,
+ RawName: rawName,
+ Type: funType,
+ StateMutability: mutability,
+ Constant: isConst,
+ Payable: isPayable,
+ Inputs: inputs,
+ Outputs: outputs,
+ str: str,
+ Sig: sig,
+ ID: id,
}
- return fmt.Sprintf("%v(%v) %sreturns(%v)", identity, strings.Join(inputs, ", "), state, strings.Join(outputs, ", "))
}
-// ID returns the canonical representation of the method's signature used by the
-// abi definition to identify method names and types.
-func (method Method) ID() []byte {
- return crypto.Keccak256([]byte(method.Sig()))[:4]
+func (method Method) String() string {
+ return method.str
}
// IsConstant returns the indicator whether the method is read-only.
diff --git a/accounts/abi/method_test.go b/accounts/abi/method_test.go
index ea176bf4e0..395a528965 100644
--- a/accounts/abi/method_test.go
+++ b/accounts/abi/method_test.go
@@ -137,7 +137,7 @@ func TestMethodSig(t *testing.T) {
}
for _, test := range cases {
- got := abi.Methods[test.method].Sig()
+ got := abi.Methods[test.method].Sig
if got != test.expect {
t.Errorf("expected string to be %s, got %s", test.expect, got)
}
diff --git a/accounts/abi/numbers.go b/accounts/abi/numbers.go
deleted file mode 100644
index 4d706846da..0000000000
--- a/accounts/abi/numbers.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package abi
-
-import (
- "math/big"
- "reflect"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/math"
-)
-
-var (
- bigT = reflect.TypeOf(&big.Int{})
- derefbigT = reflect.TypeOf(big.Int{})
- uint8T = reflect.TypeOf(uint8(0))
- uint16T = reflect.TypeOf(uint16(0))
- uint32T = reflect.TypeOf(uint32(0))
- uint64T = reflect.TypeOf(uint64(0))
- int8T = reflect.TypeOf(int8(0))
- int16T = reflect.TypeOf(int16(0))
- int32T = reflect.TypeOf(int32(0))
- int64T = reflect.TypeOf(int64(0))
- addressT = reflect.TypeOf(common.Address{})
-)
-
-// U256 converts a big Int into a 256bit EVM number.
-func U256(n *big.Int) []byte {
- return math.PaddedBigBytes(math.U256(n), 32)
-}
diff --git a/accounts/abi/numbers_test.go b/accounts/abi/numbers_test.go
deleted file mode 100644
index d25a5abcb5..0000000000
--- a/accounts/abi/numbers_test.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package abi
-
-import (
- "bytes"
- "math/big"
- "testing"
-)
-
-func TestNumberTypes(t *testing.T) {
- ubytes := make([]byte, 32)
- ubytes[31] = 1
-
- unsigned := U256(big.NewInt(1))
- if !bytes.Equal(unsigned, ubytes) {
- t.Errorf("expected %x got %x", ubytes, unsigned)
- }
-}
diff --git a/accounts/abi/pack.go b/accounts/abi/pack.go
index dd1c9a5df8..096648be12 100644
--- a/accounts/abi/pack.go
+++ b/accounts/abi/pack.go
@@ -69,11 +69,11 @@ func packElement(t Type, reflectValue reflect.Value) []byte {
func packNum(value reflect.Value) []byte {
switch kind := value.Kind(); kind {
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return U256(new(big.Int).SetUint64(value.Uint()))
+ return math.U256Bytes(new(big.Int).SetUint64(value.Uint()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return U256(big.NewInt(value.Int()))
+ return math.U256Bytes(big.NewInt(value.Int()))
case reflect.Ptr:
- return U256(new(big.Int).Set(value.Interface().(*big.Int)))
+ return math.U256Bytes(new(big.Int).Set(value.Interface().(*big.Int)))
default:
panic("abi: fatal error")
}
diff --git a/accounts/abi/pack_test.go b/accounts/abi/pack_test.go
index cf649b4807..284215a7d7 100644
--- a/accounts/abi/pack_test.go
+++ b/accounts/abi/pack_test.go
@@ -18,623 +18,62 @@ package abi
import (
"bytes"
+ "encoding/hex"
+ "fmt"
"math"
"math/big"
"reflect"
+ "strconv"
"strings"
"testing"
"github.com/ethereum/go-ethereum/common"
)
+// TestPack tests the general pack/unpack tests in packing_test.go
func TestPack(t *testing.T) {
- for i, test := range []struct {
- typ string
- components []ArgumentMarshaling
- input interface{}
- output []byte
- }{
- {
- "uint8",
- nil,
- uint8(2),
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
- },
- {
- "uint8[]",
- nil,
- []uint8{1, 2},
- common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
- },
- {
- "uint16",
- nil,
- uint16(2),
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
- },
- {
- "uint16[]",
- nil,
- []uint16{1, 2},
- common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
- },
- {
- "uint32",
- nil,
- uint32(2),
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
- },
- {
- "uint32[]",
- nil,
- []uint32{1, 2},
- common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
- },
- {
- "uint64",
- nil,
- uint64(2),
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
- },
- {
- "uint64[]",
- nil,
- []uint64{1, 2},
- common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
- },
- {
- "uint256",
- nil,
- big.NewInt(2),
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
- },
- {
- "uint256[]",
- nil,
- []*big.Int{big.NewInt(1), big.NewInt(2)},
- common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
- },
- {
- "int8",
- nil,
- int8(2),
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
- },
- {
- "int8[]",
- nil,
- []int8{1, 2},
- common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
- },
- {
- "int16",
- nil,
- int16(2),
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
- },
- {
- "int16[]",
- nil,
- []int16{1, 2},
- common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
- },
- {
- "int32",
- nil,
- int32(2),
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
- },
- {
- "int32[]",
- nil,
- []int32{1, 2},
- common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
- },
- {
- "int64",
- nil,
- int64(2),
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
- },
- {
- "int64[]",
- nil,
- []int64{1, 2},
- common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
- },
- {
- "int256",
- nil,
- big.NewInt(2),
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
- },
- {
- "int256[]",
- nil,
- []*big.Int{big.NewInt(1), big.NewInt(2)},
- common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
- },
- {
- "bytes1",
- nil,
- [1]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes2",
- nil,
- [2]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes3",
- nil,
- [3]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes4",
- nil,
- [4]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes5",
- nil,
- [5]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes6",
- nil,
- [6]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes7",
- nil,
- [7]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes8",
- nil,
- [8]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes9",
- nil,
- [9]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes10",
- nil,
- [10]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes11",
- nil,
- [11]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes12",
- nil,
- [12]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes13",
- nil,
- [13]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes14",
- nil,
- [14]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes15",
- nil,
- [15]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes16",
- nil,
- [16]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes17",
- nil,
- [17]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes18",
- nil,
- [18]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes19",
- nil,
- [19]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes20",
- nil,
- [20]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes21",
- nil,
- [21]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes22",
- nil,
- [22]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes23",
- nil,
- [23]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes24",
- nil,
- [24]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes25",
- nil,
- [25]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes26",
- nil,
- [26]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes27",
- nil,
- [27]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes28",
- nil,
- [28]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes29",
- nil,
- [29]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes30",
- nil,
- [30]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes31",
- nil,
- [31]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes32",
- nil,
- [32]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "uint32[2][3][4]",
- nil,
- [4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}},
- common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000f000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001300000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000015000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000170000000000000000000000000000000000000000000000000000000000000018"),
- },
- {
- "address[]",
- nil,
- []common.Address{{1}, {2}},
- common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000"),
- },
- {
- "bytes32[]",
- nil,
- []common.Hash{{1}, {2}},
- common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000201000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "function",
- nil,
- [24]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "string",
- nil,
- "foobar",
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000006666f6f6261720000000000000000000000000000000000000000000000000000"),
- },
- {
- "string[]",
- nil,
- []string{"hello", "foobar"},
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
- "0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
- "0000000000000000000000000000000000000000000000000000000000000080" + // offset 128 to i = 1
- "0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
- "68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
- "0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
- "666f6f6261720000000000000000000000000000000000000000000000000000"), // str[1]
- },
- {
- "string[2]",
- nil,
- []string{"hello", "foobar"},
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset to i = 0
- "0000000000000000000000000000000000000000000000000000000000000080" + // offset to i = 1
- "0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
- "68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
- "0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
- "666f6f6261720000000000000000000000000000000000000000000000000000"), // str[1]
- },
- {
- "bytes32[][]",
- nil,
- [][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}},
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
- "0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
- "00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
- "0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
- "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
- "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
- "0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
- "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
- "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
- "0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
- },
-
- {
- "bytes32[][2]",
- nil,
- [][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}},
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
- "00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
- "0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
- "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
- "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
- "0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
- "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
- "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
- "0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
- },
-
- {
- "bytes32[3][2]",
- nil,
- [][]common.Hash{{{1}, {2}, {3}}, {{3}, {4}, {5}}},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
- "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
- "0300000000000000000000000000000000000000000000000000000000000000" + // array[0][2]
- "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
- "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
- "0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
- },
- {
- // static tuple
- "tuple",
- []ArgumentMarshaling{
- {Name: "a", Type: "int64"},
- {Name: "b", Type: "int256"},
- {Name: "c", Type: "int256"},
- {Name: "d", Type: "bool"},
- {Name: "e", Type: "bytes32[3][2]"},
- },
- struct {
- A int64
- B *big.Int
- C *big.Int
- D bool
- E [][]common.Hash
- }{1, big.NewInt(1), big.NewInt(-1), true, [][]common.Hash{{{1}, {2}, {3}}, {{3}, {4}, {5}}}},
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001" + // struct[a]
- "0000000000000000000000000000000000000000000000000000000000000001" + // struct[b]
- "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // struct[c]
- "0000000000000000000000000000000000000000000000000000000000000001" + // struct[d]
- "0100000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][0]
- "0200000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][1]
- "0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][2]
- "0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][0]
- "0400000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][1]
- "0500000000000000000000000000000000000000000000000000000000000000"), // struct[e] array[1][2]
- },
- {
- // dynamic tuple
- "tuple",
- []ArgumentMarshaling{
- {Name: "a", Type: "string"},
- {Name: "b", Type: "int64"},
- {Name: "c", Type: "bytes"},
- {Name: "d", Type: "string[]"},
- {Name: "e", Type: "int256[]"},
- {Name: "f", Type: "address[]"},
- },
- struct {
- FieldA string `abi:"a"` // Test whether abi tag works
- FieldB int64 `abi:"b"`
- C []byte
- D []string
- E []*big.Int
- F []common.Address
- }{"foobar", 1, []byte{1}, []string{"foo", "bar"}, []*big.Int{big.NewInt(1), big.NewInt(-1)}, []common.Address{{1}, {2}}},
- common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000000000c0" + // struct[a] offset
- "0000000000000000000000000000000000000000000000000000000000000001" + // struct[b]
- "0000000000000000000000000000000000000000000000000000000000000100" + // struct[c] offset
- "0000000000000000000000000000000000000000000000000000000000000140" + // struct[d] offset
- "0000000000000000000000000000000000000000000000000000000000000220" + // struct[e] offset
- "0000000000000000000000000000000000000000000000000000000000000280" + // struct[f] offset
- "0000000000000000000000000000000000000000000000000000000000000006" + // struct[a] length
- "666f6f6261720000000000000000000000000000000000000000000000000000" + // struct[a] "foobar"
- "0000000000000000000000000000000000000000000000000000000000000001" + // struct[c] length
- "0100000000000000000000000000000000000000000000000000000000000000" + // []byte{1}
- "0000000000000000000000000000000000000000000000000000000000000002" + // struct[d] length
- "0000000000000000000000000000000000000000000000000000000000000040" + // foo offset
- "0000000000000000000000000000000000000000000000000000000000000080" + // bar offset
- "0000000000000000000000000000000000000000000000000000000000000003" + // foo length
- "666f6f0000000000000000000000000000000000000000000000000000000000" + // foo
- "0000000000000000000000000000000000000000000000000000000000000003" + // bar offset
- "6261720000000000000000000000000000000000000000000000000000000000" + // bar
- "0000000000000000000000000000000000000000000000000000000000000002" + // struct[e] length
- "0000000000000000000000000000000000000000000000000000000000000001" + // 1
- "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // -1
- "0000000000000000000000000000000000000000000000000000000000000002" + // struct[f] length
- "0000000000000000000000000100000000000000000000000000000000000000" + // common.Address{1}
- "0000000000000000000000000200000000000000000000000000000000000000"), // common.Address{2}
- },
- {
- // nested tuple
- "tuple",
- []ArgumentMarshaling{
- {Name: "a", Type: "tuple", Components: []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256[]"}}},
- {Name: "b", Type: "int256[]"},
- },
- struct {
- A struct {
- FieldA *big.Int `abi:"a"`
- B []*big.Int
+ for i, test := range packUnpackTests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ encb, err := hex.DecodeString(test.packed)
+ if err != nil {
+ t.Fatalf("invalid hex %s: %v", test.packed, err)
+ }
+ inDef := fmt.Sprintf(`[{ "name" : "method", "type": "function", "inputs": %s}]`, test.def)
+ inAbi, err := JSON(strings.NewReader(inDef))
+ if err != nil {
+ t.Fatalf("invalid ABI definition %s, %v", inDef, err)
+ }
+ var packed []byte
+ if reflect.TypeOf(test.unpacked).Kind() != reflect.Struct {
+ packed, err = inAbi.Pack("method", test.unpacked)
+ } else {
+ // if want is a struct we need to use the components.
+ elem := reflect.ValueOf(test.unpacked)
+ var values []interface{}
+ for i := 0; i < elem.NumField(); i++ {
+ field := elem.Field(i)
+ values = append(values, field.Interface())
}
- B []*big.Int
- }{
- A: struct {
- FieldA *big.Int `abi:"a"` // Test whether abi tag works for nested tuple
- B []*big.Int
- }{big.NewInt(1), []*big.Int{big.NewInt(1), big.NewInt(0)}},
- B: []*big.Int{big.NewInt(1), big.NewInt(0)}},
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // a offset
- "00000000000000000000000000000000000000000000000000000000000000e0" + // b offset
- "0000000000000000000000000000000000000000000000000000000000000001" + // a.a value
- "0000000000000000000000000000000000000000000000000000000000000040" + // a.b offset
- "0000000000000000000000000000000000000000000000000000000000000002" + // a.b length
- "0000000000000000000000000000000000000000000000000000000000000001" + // a.b[0] value
- "0000000000000000000000000000000000000000000000000000000000000000" + // a.b[1] value
- "0000000000000000000000000000000000000000000000000000000000000002" + // b length
- "0000000000000000000000000000000000000000000000000000000000000001" + // b[0] value
- "0000000000000000000000000000000000000000000000000000000000000000"), // b[1] value
- },
- {
- // tuple slice
- "tuple[]",
- []ArgumentMarshaling{
- {Name: "a", Type: "int256"},
- {Name: "b", Type: "int256[]"},
- },
- []struct {
- A *big.Int
- B []*big.Int
- }{
- {big.NewInt(-1), []*big.Int{big.NewInt(1), big.NewInt(0)}},
- {big.NewInt(1), []*big.Int{big.NewInt(2), big.NewInt(-1)}},
- },
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // tuple length
- "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset
- "00000000000000000000000000000000000000000000000000000000000000e0" + // tuple[1] offset
- "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A
- "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0].B offset
- "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].B length
- "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].B[0] value
- "0000000000000000000000000000000000000000000000000000000000000000" + // tuple[0].B[1] value
- "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A
- "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[1].B offset
- "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B length
- "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B[0] value
- "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].B[1] value
- },
- {
- // static tuple array
- "tuple[2]",
- []ArgumentMarshaling{
- {Name: "a", Type: "int256"},
- {Name: "b", Type: "int256"},
- },
- [2]struct {
- A *big.Int
- B *big.Int
- }{
- {big.NewInt(-1), big.NewInt(1)},
- {big.NewInt(1), big.NewInt(-1)},
- },
- common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].a
- "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].b
- "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].a
- "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].b
- },
- {
- // dynamic tuple array
- "tuple[2]",
- []ArgumentMarshaling{
- {Name: "a", Type: "int256[]"},
- },
- [2]struct {
- A []*big.Int
- }{
- {[]*big.Int{big.NewInt(-1), big.NewInt(1)}},
- {[]*big.Int{big.NewInt(1), big.NewInt(-1)}},
- },
- common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset
- "00000000000000000000000000000000000000000000000000000000000000c0" + // tuple[1] offset
- "0000000000000000000000000000000000000000000000000000000000000020" + // tuple[0].A offset
- "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].A length
- "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A[0]
- "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].A[1]
- "0000000000000000000000000000000000000000000000000000000000000020" + // tuple[1].A offset
- "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].A length
- "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A[0]
- "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].A[1]
- },
- } {
- typ, err := NewType(test.typ, "", test.components)
- if err != nil {
- t.Fatalf("%v failed. Unexpected parse error: %v", i, err)
- }
- output, err := typ.pack(reflect.ValueOf(test.input))
- if err != nil {
- t.Fatalf("%v failed. Unexpected pack error: %v", i, err)
- }
+ packed, err = inAbi.Pack("method", values...)
+ }
- if !bytes.Equal(output, test.output) {
- t.Errorf("input %d for typ: %v failed. Expected bytes: '%x' Got: '%x'", i, typ.String(), test.output, output)
- }
+ if err != nil {
+ t.Fatalf("test %d (%v) failed: %v", i, test.def, err)
+ }
+ if !reflect.DeepEqual(packed[4:], encb) {
+ t.Errorf("test %d (%v) failed: expected %v, got %v", i, test.def, encb, packed[4:])
+ }
+ })
}
}
func TestMethodPack(t *testing.T) {
- abi, err := JSON(strings.NewReader(jsondata2))
+ abi, err := JSON(strings.NewReader(jsondata))
if err != nil {
t.Fatal(err)
}
- sig := abi.Methods["slice"].ID()
+ sig := abi.Methods["slice"].ID
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
@@ -648,7 +87,7 @@ func TestMethodPack(t *testing.T) {
}
var addrA, addrB = common.Address{1}, common.Address{2}
- sig = abi.Methods["sliceAddress"].ID()
+ sig = abi.Methods["sliceAddress"].ID
sig = append(sig, common.LeftPadBytes([]byte{32}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
sig = append(sig, common.LeftPadBytes(addrA[:], 32)...)
@@ -663,7 +102,7 @@ func TestMethodPack(t *testing.T) {
}
var addrC, addrD = common.Address{3}, common.Address{4}
- sig = abi.Methods["sliceMultiAddress"].ID()
+ sig = abi.Methods["sliceMultiAddress"].ID
sig = append(sig, common.LeftPadBytes([]byte{64}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{160}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
@@ -681,7 +120,7 @@ func TestMethodPack(t *testing.T) {
t.Errorf("expected %x got %x", sig, packed)
}
- sig = abi.Methods["slice256"].ID()
+ sig = abi.Methods["slice256"].ID
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
@@ -695,7 +134,7 @@ func TestMethodPack(t *testing.T) {
}
a := [2][2]*big.Int{{big.NewInt(1), big.NewInt(1)}, {big.NewInt(2), big.NewInt(0)}}
- sig = abi.Methods["nestedArray"].ID()
+ sig = abi.Methods["nestedArray"].ID
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
@@ -712,7 +151,7 @@ func TestMethodPack(t *testing.T) {
t.Errorf("expected %x got %x", sig, packed)
}
- sig = abi.Methods["nestedArray2"].ID()
+ sig = abi.Methods["nestedArray2"].ID
sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{0x80}, 32)...)
@@ -728,7 +167,7 @@ func TestMethodPack(t *testing.T) {
t.Errorf("expected %x got %x", sig, packed)
}
- sig = abi.Methods["nestedSlice"].ID()
+ sig = abi.Methods["nestedSlice"].ID
sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{0x02}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...)
diff --git a/accounts/abi/packing_test.go b/accounts/abi/packing_test.go
new file mode 100644
index 0000000000..16b4dc43d7
--- /dev/null
+++ b/accounts/abi/packing_test.go
@@ -0,0 +1,988 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package abi
+
+import (
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+type packUnpackTest struct {
+ def string
+ unpacked interface{}
+ packed string
+}
+
+var packUnpackTests = []packUnpackTest{
+ // Booleans
+ {
+ def: `[{ "type": "bool" }]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000001",
+ unpacked: true,
+ },
+ {
+ def: `[{ "type": "bool" }]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000000",
+ unpacked: false,
+ },
+ // Integers
+ {
+ def: `[{ "type": "uint8" }]`,
+ unpacked: uint8(2),
+ packed: "0000000000000000000000000000000000000000000000000000000000000002",
+ },
+ {
+ def: `[{ "type": "uint8[]" }]`,
+ unpacked: []uint8{1, 2},
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ },
+ {
+ def: `[{ "type": "uint16" }]`,
+ unpacked: uint16(2),
+ packed: "0000000000000000000000000000000000000000000000000000000000000002",
+ },
+ {
+ def: `[{ "type": "uint16[]" }]`,
+ unpacked: []uint16{1, 2},
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ },
+ {
+ def: `[{"type": "uint17"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000001",
+ unpacked: big.NewInt(1),
+ },
+ {
+ def: `[{"type": "uint32"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000001",
+ unpacked: uint32(1),
+ },
+ {
+ def: `[{"type": "uint32[]"}]`,
+ unpacked: []uint32{1, 2},
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ },
+ {
+ def: `[{"type": "uint64"}]`,
+ unpacked: uint64(2),
+ packed: "0000000000000000000000000000000000000000000000000000000000000002",
+ },
+ {
+ def: `[{"type": "uint64[]"}]`,
+ unpacked: []uint64{1, 2},
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ },
+ {
+ def: `[{"type": "uint256"}]`,
+ unpacked: big.NewInt(2),
+ packed: "0000000000000000000000000000000000000000000000000000000000000002",
+ },
+ {
+ def: `[{"type": "uint256[]"}]`,
+ unpacked: []*big.Int{big.NewInt(1), big.NewInt(2)},
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ },
+ {
+ def: `[{"type": "int8"}]`,
+ unpacked: int8(2),
+ packed: "0000000000000000000000000000000000000000000000000000000000000002",
+ },
+ {
+ def: `[{"type": "int8[]"}]`,
+ unpacked: []int8{1, 2},
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ },
+ {
+ def: `[{"type": "int16"}]`,
+ unpacked: int16(2),
+ packed: "0000000000000000000000000000000000000000000000000000000000000002",
+ },
+ {
+ def: `[{"type": "int16[]"}]`,
+ unpacked: []int16{1, 2},
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ },
+ {
+ def: `[{"type": "int17"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000001",
+ unpacked: big.NewInt(1),
+ },
+ {
+ def: `[{"type": "int32"}]`,
+ unpacked: int32(2),
+ packed: "0000000000000000000000000000000000000000000000000000000000000002",
+ },
+ {
+ def: `[{"type": "int32"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000001",
+ unpacked: int32(1),
+ },
+ {
+ def: `[{"type": "int32[]"}]`,
+ unpacked: []int32{1, 2},
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ },
+ {
+ def: `[{"type": "int64"}]`,
+ unpacked: int64(2),
+ packed: "0000000000000000000000000000000000000000000000000000000000000002",
+ },
+ {
+ def: `[{"type": "int64[]"}]`,
+ unpacked: []int64{1, 2},
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ },
+ {
+ def: `[{"type": "int256"}]`,
+ unpacked: big.NewInt(2),
+ packed: "0000000000000000000000000000000000000000000000000000000000000002",
+ },
+ {
+ def: `[{"type": "int256"}]`,
+ packed: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ unpacked: big.NewInt(-1),
+ },
+ {
+ def: `[{"type": "int256[]"}]`,
+ unpacked: []*big.Int{big.NewInt(1), big.NewInt(2)},
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ },
+ // Address
+ {
+ def: `[{"type": "address"}]`,
+ packed: "0000000000000000000000000100000000000000000000000000000000000000",
+ unpacked: common.Address{1},
+ },
+ {
+ def: `[{"type": "address[]"}]`,
+ unpacked: []common.Address{{1}, {2}},
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000100000000000000000000000000000000000000" +
+ "0000000000000000000000000200000000000000000000000000000000000000",
+ },
+ // Bytes
+ {
+ def: `[{"type": "bytes1"}]`,
+ unpacked: [1]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes2"}]`,
+ unpacked: [2]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes3"}]`,
+ unpacked: [3]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes4"}]`,
+ unpacked: [4]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes5"}]`,
+ unpacked: [5]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes6"}]`,
+ unpacked: [6]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes7"}]`,
+ unpacked: [7]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes8"}]`,
+ unpacked: [8]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes9"}]`,
+ unpacked: [9]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes10"}]`,
+ unpacked: [10]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes11"}]`,
+ unpacked: [11]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes12"}]`,
+ unpacked: [12]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes13"}]`,
+ unpacked: [13]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes14"}]`,
+ unpacked: [14]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes15"}]`,
+ unpacked: [15]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes16"}]`,
+ unpacked: [16]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes17"}]`,
+ unpacked: [17]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes18"}]`,
+ unpacked: [18]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes19"}]`,
+ unpacked: [19]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes20"}]`,
+ unpacked: [20]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes21"}]`,
+ unpacked: [21]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes22"}]`,
+ unpacked: [22]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes23"}]`,
+ unpacked: [23]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes24"}]`,
+ unpacked: [24]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes25"}]`,
+ unpacked: [25]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes26"}]`,
+ unpacked: [26]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes27"}]`,
+ unpacked: [27]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes28"}]`,
+ unpacked: [28]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes29"}]`,
+ unpacked: [29]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes30"}]`,
+ unpacked: [30]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes31"}]`,
+ unpacked: [31]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes32"}]`,
+ unpacked: [32]byte{1},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "bytes32"}]`,
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ unpacked: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ },
+ {
+ def: `[{"type": "bytes"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ unpacked: common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
+ },
+ {
+ def: `[{"type": "bytes32"}]`,
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ unpacked: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ },
+ // Functions
+ {
+ def: `[{"type": "function"}]`,
+ packed: "0100000000000000000000000000000000000000000000000000000000000000",
+ unpacked: [24]byte{1},
+ },
+ // Slice and Array
+ {
+ def: `[{"type": "uint8[]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ unpacked: []uint8{1, 2},
+ },
+ {
+ def: `[{"type": "uint8[]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ unpacked: []uint8{},
+ },
+ {
+ def: `[{"type": "uint256[]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ unpacked: []*big.Int{},
+ },
+ {
+ def: `[{"type": "uint8[2]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ unpacked: [2]uint8{1, 2},
+ },
+ {
+ def: `[{"type": "int8[2]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ unpacked: [2]int8{1, 2},
+ },
+ {
+ def: `[{"type": "int16[]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ unpacked: []int16{1, 2},
+ },
+ {
+ def: `[{"type": "int16[2]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ unpacked: [2]int16{1, 2},
+ },
+ {
+ def: `[{"type": "int32[]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ unpacked: []int32{1, 2},
+ },
+ {
+ def: `[{"type": "int32[2]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ unpacked: [2]int32{1, 2},
+ },
+ {
+ def: `[{"type": "int64[]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ unpacked: []int64{1, 2},
+ },
+ {
+ def: `[{"type": "int64[2]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ unpacked: [2]int64{1, 2},
+ },
+ {
+ def: `[{"type": "int256[]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ unpacked: []*big.Int{big.NewInt(1), big.NewInt(2)},
+ },
+ {
+ def: `[{"type": "int256[3]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000003",
+ unpacked: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)},
+ },
+ // multi dimensional, if these pass, all types that don't require length prefix should pass
+ {
+ def: `[{"type": "uint8[][]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ unpacked: [][]uint8{},
+ },
+ {
+ def: `[{"type": "uint8[][]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000040" +
+ "00000000000000000000000000000000000000000000000000000000000000a0" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ unpacked: [][]uint8{{1, 2}, {1, 2}},
+ },
+ {
+ def: `[{"type": "uint8[][]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000040" +
+ "00000000000000000000000000000000000000000000000000000000000000a0" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000003" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000003",
+ unpacked: [][]uint8{{1, 2}, {1, 2, 3}},
+ },
+ {
+ def: `[{"type": "uint8[2][2]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ unpacked: [2][2]uint8{{1, 2}, {1, 2}},
+ },
+ {
+ def: `[{"type": "uint8[][2]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000040" +
+ "0000000000000000000000000000000000000000000000000000000000000060" +
+ "0000000000000000000000000000000000000000000000000000000000000000" +
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ unpacked: [2][]uint8{{}, {}},
+ },
+ {
+ def: `[{"type": "uint8[][2]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000040" +
+ "0000000000000000000000000000000000000000000000000000000000000080" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000001",
+ unpacked: [2][]uint8{{1}, {1}},
+ },
+ {
+ def: `[{"type": "uint8[2][]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ unpacked: [][2]uint8{},
+ },
+ {
+ def: `[{"type": "uint8[2][]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ unpacked: [][2]uint8{{1, 2}},
+ },
+ {
+ def: `[{"type": "uint8[2][]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ unpacked: [][2]uint8{{1, 2}, {1, 2}},
+ },
+ {
+ def: `[{"type": "uint16[]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ unpacked: []uint16{1, 2},
+ },
+ {
+ def: `[{"type": "uint16[2]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ unpacked: [2]uint16{1, 2},
+ },
+ {
+ def: `[{"type": "uint32[]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ unpacked: []uint32{1, 2},
+ },
+ {
+ def: `[{"type": "uint32[2][3][4]"}]`,
+ unpacked: [4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}},
+ packed: "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000003" +
+ "0000000000000000000000000000000000000000000000000000000000000004" +
+ "0000000000000000000000000000000000000000000000000000000000000005" +
+ "0000000000000000000000000000000000000000000000000000000000000006" +
+ "0000000000000000000000000000000000000000000000000000000000000007" +
+ "0000000000000000000000000000000000000000000000000000000000000008" +
+ "0000000000000000000000000000000000000000000000000000000000000009" +
+ "000000000000000000000000000000000000000000000000000000000000000a" +
+ "000000000000000000000000000000000000000000000000000000000000000b" +
+ "000000000000000000000000000000000000000000000000000000000000000c" +
+ "000000000000000000000000000000000000000000000000000000000000000d" +
+ "000000000000000000000000000000000000000000000000000000000000000e" +
+ "000000000000000000000000000000000000000000000000000000000000000f" +
+ "0000000000000000000000000000000000000000000000000000000000000010" +
+ "0000000000000000000000000000000000000000000000000000000000000011" +
+ "0000000000000000000000000000000000000000000000000000000000000012" +
+ "0000000000000000000000000000000000000000000000000000000000000013" +
+ "0000000000000000000000000000000000000000000000000000000000000014" +
+ "0000000000000000000000000000000000000000000000000000000000000015" +
+ "0000000000000000000000000000000000000000000000000000000000000016" +
+ "0000000000000000000000000000000000000000000000000000000000000017" +
+ "0000000000000000000000000000000000000000000000000000000000000018",
+ },
+
+ {
+ def: `[{"type": "bytes32[]"}]`,
+ unpacked: []common.Hash{{1}, {2}},
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0100000000000000000000000000000000000000000000000000000000000000" +
+ "0200000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "uint32[2]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ unpacked: [2]uint32{1, 2},
+ },
+ {
+ def: `[{"type": "uint64[]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ unpacked: []uint64{1, 2},
+ },
+ {
+ def: `[{"type": "uint64[2]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ unpacked: [2]uint64{1, 2},
+ },
+ {
+ def: `[{"type": "uint256[]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ unpacked: []*big.Int{big.NewInt(1), big.NewInt(2)},
+ },
+ {
+ def: `[{"type": "uint256[3]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000003",
+ unpacked: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)},
+ },
+ {
+ def: `[{"type": "string[4]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000080" +
+ "00000000000000000000000000000000000000000000000000000000000000c0" +
+ "0000000000000000000000000000000000000000000000000000000000000100" +
+ "0000000000000000000000000000000000000000000000000000000000000140" +
+ "0000000000000000000000000000000000000000000000000000000000000005" +
+ "48656c6c6f000000000000000000000000000000000000000000000000000000" +
+ "0000000000000000000000000000000000000000000000000000000000000005" +
+ "576f726c64000000000000000000000000000000000000000000000000000000" +
+ "000000000000000000000000000000000000000000000000000000000000000b" +
+ "476f2d657468657265756d000000000000000000000000000000000000000000" +
+ "0000000000000000000000000000000000000000000000000000000000000008" +
+ "457468657265756d000000000000000000000000000000000000000000000000",
+ unpacked: [4]string{"Hello", "World", "Go-ethereum", "Ethereum"},
+ },
+ {
+ def: `[{"type": "string[]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000040" +
+ "0000000000000000000000000000000000000000000000000000000000000080" +
+ "0000000000000000000000000000000000000000000000000000000000000008" +
+ "457468657265756d000000000000000000000000000000000000000000000000" +
+ "000000000000000000000000000000000000000000000000000000000000000b" +
+ "676f2d657468657265756d000000000000000000000000000000000000000000",
+ unpacked: []string{"Ethereum", "go-ethereum"},
+ },
+ {
+ def: `[{"type": "bytes[]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000040" +
+ "0000000000000000000000000000000000000000000000000000000000000080" +
+ "0000000000000000000000000000000000000000000000000000000000000003" +
+ "f0f0f00000000000000000000000000000000000000000000000000000000000" +
+ "0000000000000000000000000000000000000000000000000000000000000003" +
+ "f0f0f00000000000000000000000000000000000000000000000000000000000",
+ unpacked: [][]byte{{0xf0, 0xf0, 0xf0}, {0xf0, 0xf0, 0xf0}},
+ },
+ {
+ def: `[{"type": "uint256[2][][]"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000040" +
+ "00000000000000000000000000000000000000000000000000000000000000e0" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "00000000000000000000000000000000000000000000000000000000000000c8" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "00000000000000000000000000000000000000000000000000000000000003e8" +
+ "0000000000000000000000000000000000000000000000000000000000000002" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "00000000000000000000000000000000000000000000000000000000000000c8" +
+ "0000000000000000000000000000000000000000000000000000000000000001" +
+ "00000000000000000000000000000000000000000000000000000000000003e8",
+ unpacked: [][][2]*big.Int{{{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}, {{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}},
+ },
+ // struct outputs
+ {
+ def: `[{"name":"int1","type":"int256"},{"name":"int2","type":"int256"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ unpacked: struct {
+ Int1 *big.Int
+ Int2 *big.Int
+ }{big.NewInt(1), big.NewInt(2)},
+ },
+ {
+ def: `[{"name":"int_one","type":"int256"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000001",
+ unpacked: struct {
+ IntOne *big.Int
+ }{big.NewInt(1)},
+ },
+ {
+ def: `[{"name":"int__one","type":"int256"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000001",
+ unpacked: struct {
+ IntOne *big.Int
+ }{big.NewInt(1)},
+ },
+ {
+ def: `[{"name":"int_one_","type":"int256"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000001",
+ unpacked: struct {
+ IntOne *big.Int
+ }{big.NewInt(1)},
+ },
+ {
+ def: `[{"name":"int_one","type":"int256"}, {"name":"intone","type":"int256"}]`,
+ packed: "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ unpacked: struct {
+ IntOne *big.Int
+ Intone *big.Int
+ }{big.NewInt(1), big.NewInt(2)},
+ },
+ {
+ def: `[{"type": "string"}]`,
+ unpacked: "foobar",
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000006" +
+ "666f6f6261720000000000000000000000000000000000000000000000000000",
+ },
+ {
+ def: `[{"type": "string[]"}]`,
+ unpacked: []string{"hello", "foobar"},
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
+ "0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
+ "0000000000000000000000000000000000000000000000000000000000000080" + // offset 128 to i = 1
+ "0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
+ "68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
+ "0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
+ "666f6f6261720000000000000000000000000000000000000000000000000000", // str[1]
+ },
+ {
+ def: `[{"type": "string[2]"}]`,
+ unpacked: [2]string{"hello", "foobar"},
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000040" + // offset to i = 0
+ "0000000000000000000000000000000000000000000000000000000000000080" + // offset to i = 1
+ "0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
+ "68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
+ "0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
+ "666f6f6261720000000000000000000000000000000000000000000000000000", // str[1]
+ },
+ {
+ def: `[{"type": "bytes32[][]"}]`,
+ unpacked: [][][32]byte{{{1}, {2}}, {{3}, {4}, {5}}},
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
+ "0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
+ "00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
+ "0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
+ "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
+ "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
+ "0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
+ "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
+ "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
+ "0500000000000000000000000000000000000000000000000000000000000000", // array[1][2]
+ },
+ {
+ def: `[{"type": "bytes32[][2]"}]`,
+ unpacked: [2][][32]byte{{{1}, {2}}, {{3}, {4}, {5}}},
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
+ "00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
+ "0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
+ "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
+ "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
+ "0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
+ "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
+ "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
+ "0500000000000000000000000000000000000000000000000000000000000000", // array[1][2]
+ },
+ {
+ def: `[{"type": "bytes32[3][2]"}]`,
+ unpacked: [2][3][32]byte{{{1}, {2}, {3}}, {{3}, {4}, {5}}},
+ packed: "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
+ "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
+ "0300000000000000000000000000000000000000000000000000000000000000" + // array[0][2]
+ "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
+ "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
+ "0500000000000000000000000000000000000000000000000000000000000000", // array[1][2]
+ },
+ {
+ // static tuple
+ def: `[{"name":"a","type":"int64"},
+ {"name":"b","type":"int256"},
+ {"name":"c","type":"int256"},
+ {"name":"d","type":"bool"},
+ {"name":"e","type":"bytes32[3][2]"}]`,
+ unpacked: struct {
+ A int64
+ B *big.Int
+ C *big.Int
+ D bool
+ E [2][3][32]byte
+ }{1, big.NewInt(1), big.NewInt(-1), true, [2][3][32]byte{{{1}, {2}, {3}}, {{3}, {4}, {5}}}},
+ packed: "0000000000000000000000000000000000000000000000000000000000000001" + // struct[a]
+ "0000000000000000000000000000000000000000000000000000000000000001" + // struct[b]
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // struct[c]
+ "0000000000000000000000000000000000000000000000000000000000000001" + // struct[d]
+ "0100000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][0]
+ "0200000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][1]
+ "0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][2]
+ "0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][0]
+ "0400000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][1]
+ "0500000000000000000000000000000000000000000000000000000000000000", // struct[e] array[1][2]
+ },
+ {
+ def: `[{"name":"a","type":"string"},
+ {"name":"b","type":"int64"},
+ {"name":"c","type":"bytes"},
+ {"name":"d","type":"string[]"},
+ {"name":"e","type":"int256[]"},
+ {"name":"f","type":"address[]"}]`,
+ unpacked: struct {
+ FieldA string `abi:"a"` // Test whether abi tag works
+ FieldB int64 `abi:"b"`
+ C []byte
+ D []string
+ E []*big.Int
+ F []common.Address
+ }{"foobar", 1, []byte{1}, []string{"foo", "bar"}, []*big.Int{big.NewInt(1), big.NewInt(-1)}, []common.Address{{1}, {2}}},
+ packed: "00000000000000000000000000000000000000000000000000000000000000c0" + // struct[a] offset
+ "0000000000000000000000000000000000000000000000000000000000000001" + // struct[b]
+ "0000000000000000000000000000000000000000000000000000000000000100" + // struct[c] offset
+ "0000000000000000000000000000000000000000000000000000000000000140" + // struct[d] offset
+ "0000000000000000000000000000000000000000000000000000000000000220" + // struct[e] offset
+ "0000000000000000000000000000000000000000000000000000000000000280" + // struct[f] offset
+ "0000000000000000000000000000000000000000000000000000000000000006" + // struct[a] length
+ "666f6f6261720000000000000000000000000000000000000000000000000000" + // struct[a] "foobar"
+ "0000000000000000000000000000000000000000000000000000000000000001" + // struct[c] length
+ "0100000000000000000000000000000000000000000000000000000000000000" + // []byte{1}
+ "0000000000000000000000000000000000000000000000000000000000000002" + // struct[d] length
+ "0000000000000000000000000000000000000000000000000000000000000040" + // foo offset
+ "0000000000000000000000000000000000000000000000000000000000000080" + // bar offset
+ "0000000000000000000000000000000000000000000000000000000000000003" + // foo length
+ "666f6f0000000000000000000000000000000000000000000000000000000000" + // foo
+ "0000000000000000000000000000000000000000000000000000000000000003" + // bar offset
+ "6261720000000000000000000000000000000000000000000000000000000000" + // bar
+ "0000000000000000000000000000000000000000000000000000000000000002" + // struct[e] length
+ "0000000000000000000000000000000000000000000000000000000000000001" + // 1
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // -1
+ "0000000000000000000000000000000000000000000000000000000000000002" + // struct[f] length
+ "0000000000000000000000000100000000000000000000000000000000000000" + // common.Address{1}
+ "0000000000000000000000000200000000000000000000000000000000000000", // common.Address{2}
+ },
+ {
+ def: `[{"components": [{"name": "a","type": "uint256"},
+ {"name": "b","type": "uint256[]"}],
+ "name": "a","type": "tuple"},
+ {"name": "b","type": "uint256[]"}]`,
+ unpacked: struct {
+ A struct {
+ FieldA *big.Int `abi:"a"`
+ B []*big.Int
+ }
+ B []*big.Int
+ }{
+ A: struct {
+ FieldA *big.Int `abi:"a"` // Test whether abi tag works for nested tuple
+ B []*big.Int
+ }{big.NewInt(1), []*big.Int{big.NewInt(1), big.NewInt(2)}},
+ B: []*big.Int{big.NewInt(1), big.NewInt(2)}},
+ packed: "0000000000000000000000000000000000000000000000000000000000000040" + // a offset
+ "00000000000000000000000000000000000000000000000000000000000000e0" + // b offset
+ "0000000000000000000000000000000000000000000000000000000000000001" + // a.a value
+ "0000000000000000000000000000000000000000000000000000000000000040" + // a.b offset
+ "0000000000000000000000000000000000000000000000000000000000000002" + // a.b length
+ "0000000000000000000000000000000000000000000000000000000000000001" + // a.b[0] value
+ "0000000000000000000000000000000000000000000000000000000000000002" + // a.b[1] value
+ "0000000000000000000000000000000000000000000000000000000000000002" + // b length
+ "0000000000000000000000000000000000000000000000000000000000000001" + // b[0] value
+ "0000000000000000000000000000000000000000000000000000000000000002", // b[1] value
+ },
+
+ {
+ def: `[{"components": [{"name": "a","type": "int256"},
+ {"name": "b","type": "int256[]"}],
+ "name": "a","type": "tuple[]"}]`,
+ unpacked: []struct {
+ A *big.Int
+ B []*big.Int
+ }{
+ {big.NewInt(-1), []*big.Int{big.NewInt(1), big.NewInt(3)}},
+ {big.NewInt(1), []*big.Int{big.NewInt(2), big.NewInt(-1)}},
+ },
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000002" + // tuple length
+ "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset
+ "00000000000000000000000000000000000000000000000000000000000000e0" + // tuple[1] offset
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A
+ "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0].B offset
+ "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].B length
+ "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].B[0] value
+ "0000000000000000000000000000000000000000000000000000000000000003" + // tuple[0].B[1] value
+ "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A
+ "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[1].B offset
+ "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B length
+ "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B[0] value
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // tuple[1].B[1] value
+ },
+ {
+ def: `[{"components": [{"name": "a","type": "int256"},
+ {"name": "b","type": "int256"}],
+ "name": "a","type": "tuple[2]"}]`,
+ unpacked: [2]struct {
+ A *big.Int
+ B *big.Int
+ }{
+ {big.NewInt(-1), big.NewInt(1)},
+ {big.NewInt(1), big.NewInt(-1)},
+ },
+ packed: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].a
+ "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].b
+ "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].a
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // tuple[1].b
+ },
+ {
+ def: `[{"components": [{"name": "a","type": "int256[]"}],
+ "name": "a","type": "tuple[2]"}]`,
+ unpacked: [2]struct {
+ A []*big.Int
+ }{
+ {[]*big.Int{big.NewInt(-1), big.NewInt(1)}},
+ {[]*big.Int{big.NewInt(1), big.NewInt(-1)}},
+ },
+ packed: "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset
+ "00000000000000000000000000000000000000000000000000000000000000c0" + // tuple[1] offset
+ "0000000000000000000000000000000000000000000000000000000000000020" + // tuple[0].A offset
+ "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].A length
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A[0]
+ "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].A[1]
+ "0000000000000000000000000000000000000000000000000000000000000020" + // tuple[1].A offset
+ "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].A length
+ "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A[0]
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // tuple[1].A[1]
+ },
+}
diff --git a/accounts/abi/reflect.go b/accounts/abi/reflect.go
index 73ca8fa2bd..4bb6c4fa09 100644
--- a/accounts/abi/reflect.go
+++ b/accounts/abi/reflect.go
@@ -18,6 +18,7 @@ package abi
import (
"fmt"
+ "math/big"
"reflect"
"strings"
)
@@ -25,7 +26,7 @@ import (
// indirect recursively dereferences the value until it either gets the value
// or finds a big.Int
func indirect(v reflect.Value) reflect.Value {
- if v.Kind() == reflect.Ptr && v.Elem().Type() != derefbigT {
+ if v.Kind() == reflect.Ptr && v.Elem().Type() != reflect.TypeOf(big.Int{}) {
return indirect(v.Elem())
}
return v
@@ -39,32 +40,32 @@ func indirectInterfaceOrPtr(v reflect.Value) reflect.Value {
return v
}
-// reflectIntKind returns the reflect using the given size and
+// reflectIntType returns the reflect using the given size and
// unsignedness.
-func reflectIntKindAndType(unsigned bool, size int) (reflect.Kind, reflect.Type) {
+func reflectIntType(unsigned bool, size int) reflect.Type {
+ if unsigned {
+ switch size {
+ case 8:
+ return reflect.TypeOf(uint8(0))
+ case 16:
+ return reflect.TypeOf(uint16(0))
+ case 32:
+ return reflect.TypeOf(uint32(0))
+ case 64:
+ return reflect.TypeOf(uint64(0))
+ }
+ }
switch size {
case 8:
- if unsigned {
- return reflect.Uint8, uint8T
- }
- return reflect.Int8, int8T
+ return reflect.TypeOf(int8(0))
case 16:
- if unsigned {
- return reflect.Uint16, uint16T
- }
- return reflect.Int16, int16T
+ return reflect.TypeOf(int16(0))
case 32:
- if unsigned {
- return reflect.Uint32, uint32T
- }
- return reflect.Int32, int32T
+ return reflect.TypeOf(int32(0))
case 64:
- if unsigned {
- return reflect.Uint64, uint64T
- }
- return reflect.Int64, int64T
+ return reflect.TypeOf(int64(0))
}
- return reflect.Ptr, bigT
+ return reflect.TypeOf(&big.Int{})
}
// mustArrayToBytesSlice creates a new byte slice with the exact same size as value
@@ -84,12 +85,12 @@ func set(dst, src reflect.Value) error {
switch {
case dstType.Kind() == reflect.Interface && dst.Elem().IsValid():
return set(dst.Elem(), src)
- case dstType.Kind() == reflect.Ptr && dstType.Elem() != derefbigT:
+ case dstType.Kind() == reflect.Ptr && dstType.Elem() != reflect.TypeOf(big.Int{}):
return set(dst.Elem(), src)
case srcType.AssignableTo(dstType) && dst.CanSet():
dst.Set(src)
- case dstType.Kind() == reflect.Slice && srcType.Kind() == reflect.Slice:
- return setSlice(dst, src)
+ case dstType.Kind() == reflect.Slice && srcType.Kind() == reflect.Slice && dst.CanSet():
+ setSlice(dst, src)
default:
return fmt.Errorf("abi: cannot unmarshal %v in to %v", src.Type(), dst.Type())
}
@@ -98,15 +99,13 @@ func set(dst, src reflect.Value) error {
// setSlice attempts to assign src to dst when slices are not assignable by default
// e.g. src: [][]byte -> dst: [][15]byte
-func setSlice(dst, src reflect.Value) error {
+// setSlice ignores if we cannot copy all of src' elements.
+func setSlice(dst, src reflect.Value) {
slice := reflect.MakeSlice(dst.Type(), src.Len(), src.Len())
for i := 0; i < src.Len(); i++ {
- v := src.Index(i)
- reflect.Copy(slice.Index(i), v)
+ reflect.Copy(slice.Index(i), src.Index(i))
}
-
dst.Set(slice)
- return nil
}
// requireAssignable assures that `dest` is a pointer and it's not an interface.
@@ -118,18 +117,16 @@ func requireAssignable(dst, src reflect.Value) error {
}
// requireUnpackKind verifies preconditions for unpacking `args` into `kind`
-func requireUnpackKind(v reflect.Value, t reflect.Type, k reflect.Kind,
- args Arguments) error {
-
- switch k {
+func requireUnpackKind(v reflect.Value, minLength int, args Arguments) error {
+ switch v.Kind() {
case reflect.Struct:
case reflect.Slice, reflect.Array:
- if minLen := args.LengthNonIndexed(); v.Len() < minLen {
+ if v.Len() < minLength {
return fmt.Errorf("abi: insufficient number of elements in the list/array for unpack, want %d, got %d",
- minLen, v.Len())
+ minLength, v.Len())
}
default:
- return fmt.Errorf("abi: cannot unmarshal tuple into %v", t)
+ return fmt.Errorf("abi: cannot unmarshal tuple into %v", v.Type())
}
return nil
}
@@ -156,9 +153,8 @@ func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[stri
continue
}
// skip fields that have no abi:"" tag.
- var ok bool
- var tagName string
- if tagName, ok = typ.Field(i).Tag.Lookup("abi"); !ok {
+ tagName, ok := typ.Field(i).Tag.Lookup("abi")
+ if !ok {
continue
}
// check if tag is empty.
diff --git a/accounts/abi/bind/topics.go b/accounts/abi/topics.go
similarity index 84%
rename from accounts/abi/bind/topics.go
rename to accounts/abi/topics.go
index 7b64f03347..474b9a4a7b 100644
--- a/accounts/abi/bind/topics.go
+++ b/accounts/abi/topics.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package bind
+package abi
import (
"encoding/binary"
@@ -23,13 +23,12 @@ import (
"math/big"
"reflect"
- "github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
)
-// makeTopics converts a filter query argument list into a filter topic set.
-func makeTopics(query ...[]interface{}) ([][]common.Hash, error) {
+// MakeTopics converts a filter query argument list into a filter topic set.
+func MakeTopics(query ...[]interface{}) ([][]common.Hash, error) {
topics := make([][]common.Hash, len(query))
for i, filter := range query {
for _, rule := range filter {
@@ -112,19 +111,19 @@ func genIntType(rule int64, size uint) []byte {
return topic[:]
}
-// parseTopics converts the indexed topic fields into actual log field values.
-func parseTopics(out interface{}, fields abi.Arguments, topics []common.Hash) error {
+// ParseTopics converts the indexed topic fields into actual log field values.
+func ParseTopics(out interface{}, fields Arguments, topics []common.Hash) error {
return parseTopicWithSetter(fields, topics,
- func(arg abi.Argument, reconstr interface{}) {
- field := reflect.ValueOf(out).Elem().FieldByName(capitalise(arg.Name))
+ func(arg Argument, reconstr interface{}) {
+ field := reflect.ValueOf(out).Elem().FieldByName(ToCamelCase(arg.Name))
field.Set(reflect.ValueOf(reconstr))
})
}
-// parseTopicsIntoMap converts the indexed topic field-value pairs into map key-value pairs
-func parseTopicsIntoMap(out map[string]interface{}, fields abi.Arguments, topics []common.Hash) error {
+// ParseTopicsIntoMap converts the indexed topic field-value pairs into map key-value pairs
+func ParseTopicsIntoMap(out map[string]interface{}, fields Arguments, topics []common.Hash) error {
return parseTopicWithSetter(fields, topics,
- func(arg abi.Argument, reconstr interface{}) {
+ func(arg Argument, reconstr interface{}) {
out[arg.Name] = reconstr
})
}
@@ -134,7 +133,7 @@ func parseTopicsIntoMap(out map[string]interface{}, fields abi.Arguments, topics
//
// Note, dynamic types cannot be reconstructed since they get mapped to Keccak256
// hashes as the topic value!
-func parseTopicWithSetter(fields abi.Arguments, topics []common.Hash, setter func(abi.Argument, interface{})) error {
+func parseTopicWithSetter(fields Arguments, topics []common.Hash, setter func(Argument, interface{})) error {
// Sanity check that the fields and topics match up
if len(fields) != len(topics) {
return errors.New("topic/field count mismatch")
@@ -146,13 +145,13 @@ func parseTopicWithSetter(fields abi.Arguments, topics []common.Hash, setter fun
}
var reconstr interface{}
switch arg.Type.T {
- case abi.TupleTy:
+ case TupleTy:
return errors.New("tuple type in topic reconstruction")
- case abi.StringTy, abi.BytesTy, abi.SliceTy, abi.ArrayTy:
+ case StringTy, BytesTy, SliceTy, ArrayTy:
// Array types (including strings and bytes) have their keccak256 hashes stored in the topic- not a hash
// whose bytes can be decoded to the actual value- so the best we can do is retrieve that hash
reconstr = topics[i]
- case abi.FunctionTy:
+ case FunctionTy:
if garbage := binary.BigEndian.Uint64(topics[i][0:8]); garbage != 0 {
return fmt.Errorf("bind: got improperly encoded function type, got %v", topics[i].Bytes())
}
@@ -161,7 +160,7 @@ func parseTopicWithSetter(fields abi.Arguments, topics []common.Hash, setter fun
reconstr = tmp
default:
var err error
- reconstr, err = abi.ToGoType(0, arg.Type, topics[i].Bytes())
+ reconstr, err = toGoType(0, arg.Type, topics[i].Bytes())
if err != nil {
return err
}
diff --git a/accounts/abi/bind/topics_test.go b/accounts/abi/topics_test.go
similarity index 67%
rename from accounts/abi/bind/topics_test.go
rename to accounts/abi/topics_test.go
index 627e43316e..4a539a7116 100644
--- a/accounts/abi/bind/topics_test.go
+++ b/accounts/abi/topics_test.go
@@ -14,14 +14,13 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package bind
+package abi
import (
"math/big"
"reflect"
"testing"
- "github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
)
@@ -119,7 +118,7 @@ func TestMakeTopics(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- got, err := makeTopics(tt.args.query...)
+ got, err := MakeTopics(tt.args.query...)
if (err != nil) != tt.wantErr {
t.Errorf("makeTopics() error = %v, wantErr %v", err, tt.wantErr)
return
@@ -135,7 +134,7 @@ type args struct {
createObj func() interface{}
resultObj func() interface{}
resultMap func() map[string]interface{}
- fields abi.Arguments
+ fields Arguments
topics []common.Hash
}
@@ -149,6 +148,14 @@ type int256Struct struct {
Int256Value *big.Int
}
+type hashStruct struct {
+ HashValue common.Hash
+}
+
+type funcStruct struct {
+ FuncValue [24]byte
+}
+
type topicTest struct {
name string
args args
@@ -156,10 +163,12 @@ type topicTest struct {
}
func setupTopicsTests() []topicTest {
- bytesType, _ := abi.NewType("bytes5", "", nil)
- int8Type, _ := abi.NewType("int8", "", nil)
- int256Type, _ := abi.NewType("int256", "", nil)
- tupleType, _ := abi.NewType("tuple(int256,int8)", "", nil)
+ bytesType, _ := NewType("bytes5", "", nil)
+ int8Type, _ := NewType("int8", "", nil)
+ int256Type, _ := NewType("int256", "", nil)
+ tupleType, _ := NewType("tuple(int256,int8)", "", nil)
+ stringType, _ := NewType("string", "", nil)
+ funcType, _ := NewType("function", "", nil)
tests := []topicTest{
{
@@ -170,7 +179,7 @@ func setupTopicsTests() []topicTest {
resultMap: func() map[string]interface{} {
return map[string]interface{}{"staticBytes": [5]byte{1, 2, 3, 4, 5}}
},
- fields: abi.Arguments{abi.Argument{
+ fields: Arguments{Argument{
Name: "staticBytes",
Type: bytesType,
Indexed: true,
@@ -189,7 +198,7 @@ func setupTopicsTests() []topicTest {
resultMap: func() map[string]interface{} {
return map[string]interface{}{"int8Value": int8(-1)}
},
- fields: abi.Arguments{abi.Argument{
+ fields: Arguments{Argument{
Name: "int8Value",
Type: int8Type,
Indexed: true,
@@ -209,7 +218,7 @@ func setupTopicsTests() []topicTest {
resultMap: func() map[string]interface{} {
return map[string]interface{}{"int256Value": big.NewInt(-1)}
},
- fields: abi.Arguments{abi.Argument{
+ fields: Arguments{Argument{
Name: "int256Value",
Type: int256Type,
Indexed: true,
@@ -222,12 +231,55 @@ func setupTopicsTests() []topicTest {
wantErr: false,
},
{
- name: "tuple(int256, int8)",
+ name: "hash type",
+ args: args{
+ createObj: func() interface{} { return &hashStruct{} },
+ resultObj: func() interface{} { return &hashStruct{crypto.Keccak256Hash([]byte("stringtopic"))} },
+ resultMap: func() map[string]interface{} {
+ return map[string]interface{}{"hashValue": crypto.Keccak256Hash([]byte("stringtopic"))}
+ },
+ fields: Arguments{Argument{
+ Name: "hashValue",
+ Type: stringType,
+ Indexed: true,
+ }},
+ topics: []common.Hash{
+ crypto.Keccak256Hash([]byte("stringtopic")),
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "function type",
+ args: args{
+ createObj: func() interface{} { return &funcStruct{} },
+ resultObj: func() interface{} {
+ return &funcStruct{[24]byte{255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ },
+ resultMap: func() map[string]interface{} {
+ return map[string]interface{}{"funcValue": [24]byte{255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ },
+ fields: Arguments{Argument{
+ Name: "funcValue",
+ Type: funcType,
+ Indexed: true,
+ }},
+ topics: []common.Hash{
+ {0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "error on topic/field count mismatch",
args: args{
createObj: func() interface{} { return nil },
resultObj: func() interface{} { return nil },
resultMap: func() map[string]interface{} { return make(map[string]interface{}) },
- fields: abi.Arguments{abi.Argument{
+ fields: Arguments{Argument{
Name: "tupletype",
Type: tupleType,
Indexed: true,
@@ -236,6 +288,59 @@ func setupTopicsTests() []topicTest {
},
wantErr: true,
},
+ {
+ name: "error on unindexed arguments",
+ args: args{
+ createObj: func() interface{} { return &int256Struct{} },
+ resultObj: func() interface{} { return &int256Struct{} },
+ resultMap: func() map[string]interface{} { return make(map[string]interface{}) },
+ fields: Arguments{Argument{
+ Name: "int256Value",
+ Type: int256Type,
+ Indexed: false,
+ }},
+ topics: []common.Hash{
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "error on tuple in topic reconstruction",
+ args: args{
+ createObj: func() interface{} { return &tupleType },
+ resultObj: func() interface{} { return &tupleType },
+ resultMap: func() map[string]interface{} { return make(map[string]interface{}) },
+ fields: Arguments{Argument{
+ Name: "tupletype",
+ Type: tupleType,
+ Indexed: true,
+ }},
+ topics: []common.Hash{{0}},
+ },
+ wantErr: true,
+ },
+ {
+ name: "error on improper encoded function",
+ args: args{
+ createObj: func() interface{} { return &funcStruct{} },
+ resultObj: func() interface{} { return &funcStruct{} },
+ resultMap: func() map[string]interface{} {
+ return make(map[string]interface{})
+ },
+ fields: Arguments{Argument{
+ Name: "funcValue",
+ Type: funcType,
+ Indexed: true,
+ }},
+ topics: []common.Hash{
+ {0, 0, 0, 0, 0, 0, 0, 128, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ },
+ wantErr: true,
+ },
}
return tests
@@ -247,7 +352,7 @@ func TestParseTopics(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
createObj := tt.args.createObj()
- if err := parseTopics(createObj, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr {
+ if err := ParseTopics(createObj, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr {
t.Errorf("parseTopics() error = %v, wantErr %v", err, tt.wantErr)
}
resultObj := tt.args.resultObj()
@@ -264,7 +369,7 @@ func TestParseTopicsIntoMap(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
outMap := make(map[string]interface{})
- if err := parseTopicsIntoMap(outMap, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr {
+ if err := ParseTopicsIntoMap(outMap, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr {
t.Errorf("parseTopicsIntoMap() error = %v, wantErr %v", err, tt.wantErr)
}
resultMap := tt.args.resultMap()
diff --git a/accounts/abi/type.go b/accounts/abi/type.go
index 4792283ee8..5de8bc9ff4 100644
--- a/accounts/abi/type.go
+++ b/accounts/abi/type.go
@@ -23,6 +23,8 @@ import (
"regexp"
"strconv"
"strings"
+
+ "github.com/ethereum/go-ethereum/common"
)
// Type enumerator
@@ -45,17 +47,16 @@ const (
// Type is the reflection of the supported argument type
type Type struct {
Elem *Type
- Kind reflect.Kind
- Type reflect.Type
Size int
T byte // Our own type checking
stringKind string // holds the unparsed string for deriving signatures
// Tuple relative fields
- TupleRawName string // Raw struct name defined in source code, may be empty.
- TupleElems []*Type // Type information of all tuple fields
- TupleRawNames []string // Raw field name of all tuple fields
+ TupleRawName string // Raw struct name defined in source code, may be empty.
+ TupleElems []*Type // Type information of all tuple fields
+ TupleRawNames []string // Raw field name of all tuple fields
+ TupleType reflect.Type // Underlying struct of the tuple
}
var (
@@ -94,20 +95,16 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
if len(intz) == 0 {
// is a slice
typ.T = SliceTy
- typ.Kind = reflect.Slice
typ.Elem = &embeddedType
- typ.Type = reflect.SliceOf(embeddedType.Type)
typ.stringKind = embeddedType.stringKind + sliced
} else if len(intz) == 1 {
// is a array
typ.T = ArrayTy
- typ.Kind = reflect.Array
typ.Elem = &embeddedType
typ.Size, err = strconv.Atoi(intz[0])
if err != nil {
return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err)
}
- typ.Type = reflect.ArrayOf(typ.Size, embeddedType.Type)
typ.stringKind = embeddedType.stringKind + sliced
} else {
return Type{}, fmt.Errorf("invalid formatting of array type")
@@ -139,36 +136,24 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
// varType is the parsed abi type
switch varType := parsedType[1]; varType {
case "int":
- typ.Kind, typ.Type = reflectIntKindAndType(false, varSize)
typ.Size = varSize
typ.T = IntTy
case "uint":
- typ.Kind, typ.Type = reflectIntKindAndType(true, varSize)
typ.Size = varSize
typ.T = UintTy
case "bool":
- typ.Kind = reflect.Bool
typ.T = BoolTy
- typ.Type = reflect.TypeOf(bool(false))
case "address":
- typ.Kind = reflect.Array
- typ.Type = addressT
typ.Size = 20
typ.T = AddressTy
case "string":
- typ.Kind = reflect.String
- typ.Type = reflect.TypeOf("")
typ.T = StringTy
case "bytes":
if varSize == 0 {
typ.T = BytesTy
- typ.Kind = reflect.Slice
- typ.Type = reflect.SliceOf(reflect.TypeOf(byte(0)))
} else {
typ.T = FixedBytesTy
- typ.Kind = reflect.Array
typ.Size = varSize
- typ.Type = reflect.ArrayOf(varSize, reflect.TypeOf(byte(0)))
}
case "tuple":
var (
@@ -178,17 +163,20 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
expression string // canonical parameter expression
)
expression += "("
+ overloadedNames := make(map[string]string)
for idx, c := range components {
cType, err := NewType(c.Type, c.InternalType, c.Components)
if err != nil {
return Type{}, err
}
- if ToCamelCase(c.Name) == "" {
- return Type{}, errors.New("abi: purely anonymous or underscored field is not supported")
+ fieldName, err := overloadedArgName(c.Name, overloadedNames)
+ if err != nil {
+ return Type{}, err
}
+ overloadedNames[fieldName] = fieldName
fields = append(fields, reflect.StructField{
- Name: ToCamelCase(c.Name), // reflect.StructOf will panic for any exported field.
- Type: cType.Type,
+ Name: fieldName, // reflect.StructOf will panic for any exported field.
+ Type: cType.getType(),
Tag: reflect.StructTag("json:\"" + c.Name + "\""),
})
elems = append(elems, &cType)
@@ -199,8 +187,8 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
}
}
expression += ")"
- typ.Kind = reflect.Struct
- typ.Type = reflect.StructOf(fields)
+
+ typ.TupleType = reflect.StructOf(fields)
typ.TupleElems = elems
typ.TupleRawNames = names
typ.T = TupleTy
@@ -217,10 +205,8 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
}
case "function":
- typ.Kind = reflect.Array
typ.T = FunctionTy
typ.Size = 24
- typ.Type = reflect.ArrayOf(24, reflect.TypeOf(byte(0)))
default:
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
}
@@ -228,6 +214,55 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
return
}
+func (t Type) getType() reflect.Type {
+ switch t.T {
+ case IntTy:
+ return reflectIntType(false, t.Size)
+ case UintTy:
+ return reflectIntType(true, t.Size)
+ case BoolTy:
+ return reflect.TypeOf(false)
+ case StringTy:
+ return reflect.TypeOf("")
+ case SliceTy:
+ return reflect.SliceOf(t.Elem.getType())
+ case ArrayTy:
+ return reflect.ArrayOf(t.Size, t.Elem.getType())
+ case TupleTy:
+ return t.TupleType
+ case AddressTy:
+ return reflect.TypeOf(common.Address{})
+ case FixedBytesTy:
+ return reflect.ArrayOf(t.Size, reflect.TypeOf(byte(0)))
+ case BytesTy:
+ return reflect.SliceOf(reflect.TypeOf(byte(0)))
+ case HashTy:
+ // hashtype currently not used
+ return reflect.ArrayOf(32, reflect.TypeOf(byte(0)))
+ case FixedPointTy:
+ // fixedpoint type currently not used
+ return reflect.ArrayOf(32, reflect.TypeOf(byte(0)))
+ case FunctionTy:
+ return reflect.ArrayOf(24, reflect.TypeOf(byte(0)))
+ default:
+ panic("Invalid type")
+ }
+}
+
+func overloadedArgName(rawName string, names map[string]string) (string, error) {
+ fieldName := ToCamelCase(rawName)
+ if fieldName == "" {
+ return "", errors.New("abi: purely anonymous or underscored field is not supported")
+ }
+ // Handle overloaded fieldNames
+ _, ok := names[fieldName]
+ for idx := 0; ok; idx++ {
+ fieldName = fmt.Sprintf("%s%d", ToCamelCase(rawName), idx)
+ _, ok = names[fieldName]
+ }
+ return fieldName, nil
+}
+
// String implements Stringer
func (t Type) String() (out string) {
return t.stringKind
diff --git a/accounts/abi/type_test.go b/accounts/abi/type_test.go
index a2c78dc2e0..566f991c54 100644
--- a/accounts/abi/type_test.go
+++ b/accounts/abi/type_test.go
@@ -36,58 +36,58 @@ func TestTypeRegexp(t *testing.T) {
components []ArgumentMarshaling
kind Type
}{
- {"bool", nil, Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}},
- {"bool[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool(nil)), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}},
- {"bool[2]", nil, Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}},
- {"bool[2][]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}},
- {"bool[][]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}},
- {"bool[][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}},
- {"bool[2][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}},
- {"bool[2][][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][][2]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}, stringKind: "bool[2][][2]"}},
- {"bool[2][2][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}, stringKind: "bool[2][2][2]"}},
- {"bool[][][]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}, stringKind: "bool[][][]"}},
- {"bool[][2][]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][2][]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}, stringKind: "bool[][2][]"}},
- {"int8", nil, Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}},
- {"int16", nil, Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}},
- {"int32", nil, Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}},
- {"int64", nil, Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}},
- {"int256", nil, Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}},
- {"int8[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}},
- {"int8[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}},
- {"int16[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}},
- {"int16[2]", nil, Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}},
- {"int32[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
- {"int32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
- {"int64[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}},
- {"int64[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}},
- {"int256[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
- {"int256[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
- {"uint8", nil, Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}},
- {"uint16", nil, Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}},
- {"uint32", nil, Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}},
- {"uint64", nil, Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}},
- {"uint256", nil, Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}},
- {"uint8[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}},
- {"uint8[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}},
- {"uint16[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}},
- {"uint16[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}},
- {"uint32[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
- {"uint32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
- {"uint64[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}},
- {"uint64[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}},
- {"uint256[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
- {"uint256[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]*big.Int{}), Size: 2, Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
- {"bytes32", nil, Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}},
- {"bytes[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]byte{}), Elem: &Type{Kind: reflect.Slice, Type: reflect.TypeOf([]byte{}), T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
- {"bytes[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]byte{}), Elem: &Type{T: BytesTy, Type: reflect.TypeOf([]byte{}), Kind: reflect.Slice, stringKind: "bytes"}, stringKind: "bytes[2]"}},
- {"bytes32[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][32]byte{}), Elem: &Type{Kind: reflect.Array, Type: reflect.TypeOf([32]byte{}), T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
- {"bytes32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][32]byte{}), Elem: &Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
- {"string", nil, Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}},
- {"string[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]string{}), Elem: &Type{Kind: reflect.String, Type: reflect.TypeOf(""), T: StringTy, stringKind: "string"}, stringKind: "string[]"}},
- {"string[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]string{}), Elem: &Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}, stringKind: "string[2]"}},
- {"address", nil, Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}},
- {"address[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}},
- {"address[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}},
+ {"bool", nil, Type{T: BoolTy, stringKind: "bool"}},
+ {"bool[]", nil, Type{T: SliceTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}},
+ {"bool[2]", nil, Type{Size: 2, T: ArrayTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}},
+ {"bool[2][]", nil, Type{T: SliceTy, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}},
+ {"bool[][]", nil, Type{T: SliceTy, Elem: &Type{T: SliceTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}},
+ {"bool[][2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: SliceTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}},
+ {"bool[2][2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}},
+ {"bool[2][][2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: SliceTy, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}, stringKind: "bool[2][][2]"}},
+ {"bool[2][2][2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}, stringKind: "bool[2][2][2]"}},
+ {"bool[][][]", nil, Type{T: SliceTy, Elem: &Type{T: SliceTy, Elem: &Type{T: SliceTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}, stringKind: "bool[][][]"}},
+ {"bool[][2][]", nil, Type{T: SliceTy, Elem: &Type{T: ArrayTy, Size: 2, Elem: &Type{T: SliceTy, Elem: &Type{T: BoolTy, stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}, stringKind: "bool[][2][]"}},
+ {"int8", nil, Type{Size: 8, T: IntTy, stringKind: "int8"}},
+ {"int16", nil, Type{Size: 16, T: IntTy, stringKind: "int16"}},
+ {"int32", nil, Type{Size: 32, T: IntTy, stringKind: "int32"}},
+ {"int64", nil, Type{Size: 64, T: IntTy, stringKind: "int64"}},
+ {"int256", nil, Type{Size: 256, T: IntTy, stringKind: "int256"}},
+ {"int8[]", nil, Type{T: SliceTy, Elem: &Type{Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}},
+ {"int8[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}},
+ {"int16[]", nil, Type{T: SliceTy, Elem: &Type{Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}},
+ {"int16[2]", nil, Type{Size: 2, T: ArrayTy, Elem: &Type{Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}},
+ {"int32[]", nil, Type{T: SliceTy, Elem: &Type{Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
+ {"int32[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
+ {"int64[]", nil, Type{T: SliceTy, Elem: &Type{Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}},
+ {"int64[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}},
+ {"int256[]", nil, Type{T: SliceTy, Elem: &Type{Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
+ {"int256[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
+ {"uint8", nil, Type{Size: 8, T: UintTy, stringKind: "uint8"}},
+ {"uint16", nil, Type{Size: 16, T: UintTy, stringKind: "uint16"}},
+ {"uint32", nil, Type{Size: 32, T: UintTy, stringKind: "uint32"}},
+ {"uint64", nil, Type{Size: 64, T: UintTy, stringKind: "uint64"}},
+ {"uint256", nil, Type{Size: 256, T: UintTy, stringKind: "uint256"}},
+ {"uint8[]", nil, Type{T: SliceTy, Elem: &Type{Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}},
+ {"uint8[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}},
+ {"uint16[]", nil, Type{T: SliceTy, Elem: &Type{Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}},
+ {"uint16[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}},
+ {"uint32[]", nil, Type{T: SliceTy, Elem: &Type{Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
+ {"uint32[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
+ {"uint64[]", nil, Type{T: SliceTy, Elem: &Type{Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}},
+ {"uint64[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}},
+ {"uint256[]", nil, Type{T: SliceTy, Elem: &Type{Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
+ {"uint256[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
+ {"bytes32", nil, Type{T: FixedBytesTy, Size: 32, stringKind: "bytes32"}},
+ {"bytes[]", nil, Type{T: SliceTy, Elem: &Type{T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
+ {"bytes[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[2]"}},
+ {"bytes32[]", nil, Type{T: SliceTy, Elem: &Type{T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
+ {"bytes32[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
+ {"string", nil, Type{T: StringTy, stringKind: "string"}},
+ {"string[]", nil, Type{T: SliceTy, Elem: &Type{T: StringTy, stringKind: "string"}, stringKind: "string[]"}},
+ {"string[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{T: StringTy, stringKind: "string"}, stringKind: "string[2]"}},
+ {"address", nil, Type{Size: 20, T: AddressTy, stringKind: "address"}},
+ {"address[]", nil, Type{T: SliceTy, Elem: &Type{Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}},
+ {"address[2]", nil, Type{T: ArrayTy, Size: 2, Elem: &Type{Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}},
// TODO when fixed types are implemented properly
// {"fixed", nil, Type{}},
// {"fixed128x128", nil, Type{}},
@@ -95,14 +95,14 @@ func TestTypeRegexp(t *testing.T) {
// {"fixed[2]", nil, Type{}},
// {"fixed128x128[]", nil, Type{}},
// {"fixed128x128[2]", nil, Type{}},
- {"tuple", []ArgumentMarshaling{{Name: "a", Type: "int64"}}, Type{Kind: reflect.Struct, T: TupleTy, Type: reflect.TypeOf(struct {
+ {"tuple", []ArgumentMarshaling{{Name: "a", Type: "int64"}}, Type{T: TupleTy, TupleType: reflect.TypeOf(struct {
A int64 `json:"a"`
}{}), stringKind: "(int64)",
- TupleElems: []*Type{{Kind: reflect.Int64, T: IntTy, Type: reflect.TypeOf(int64(0)), Size: 64, stringKind: "int64"}}, TupleRawNames: []string{"a"}}},
- {"tuple with long name", []ArgumentMarshaling{{Name: "aTypicalParamName", Type: "int64"}}, Type{Kind: reflect.Struct, T: TupleTy, Type: reflect.TypeOf(struct {
+ TupleElems: []*Type{{T: IntTy, Size: 64, stringKind: "int64"}}, TupleRawNames: []string{"a"}}},
+ {"tuple with long name", []ArgumentMarshaling{{Name: "aTypicalParamName", Type: "int64"}}, Type{T: TupleTy, TupleType: reflect.TypeOf(struct {
ATypicalParamName int64 `json:"aTypicalParamName"`
}{}), stringKind: "(int64)",
- TupleElems: []*Type{{Kind: reflect.Int64, T: IntTy, Type: reflect.TypeOf(int64(0)), Size: 64, stringKind: "int64"}}, TupleRawNames: []string{"aTypicalParamName"}}},
+ TupleElems: []*Type{{T: IntTy, Size: 64, stringKind: "int64"}}, TupleRawNames: []string{"aTypicalParamName"}}},
}
for _, tt := range tests {
@@ -306,3 +306,27 @@ func TestTypeCheck(t *testing.T) {
}
}
}
+
+func TestInternalType(t *testing.T) {
+ components := []ArgumentMarshaling{{Name: "a", Type: "int64"}}
+ internalType := "struct a.b[]"
+ kind := Type{
+ T: TupleTy,
+ TupleType: reflect.TypeOf(struct {
+ A int64 `json:"a"`
+ }{}),
+ stringKind: "(int64)",
+ TupleRawName: "ab[]",
+ TupleElems: []*Type{{T: IntTy, Size: 64, stringKind: "int64"}},
+ TupleRawNames: []string{"a"},
+ }
+
+ blob := "tuple"
+ typ, err := NewType(blob, internalType, components)
+ if err != nil {
+ t.Errorf("type %q: failed to parse type string: %v", blob, err)
+ }
+ if !reflect.DeepEqual(typ, kind) {
+ t.Errorf("type %q: parsed type mismatch:\nGOT %s\nWANT %s ", blob, spew.Sdump(typeWithoutStringer(typ)), spew.Sdump(typeWithoutStringer(kind)))
+ }
+}
diff --git a/accounts/abi/unpack.go b/accounts/abi/unpack.go
index 01aac707f1..c206c6ab25 100644
--- a/accounts/abi/unpack.go
+++ b/accounts/abi/unpack.go
@@ -33,33 +33,37 @@ var (
)
// ReadInteger reads the integer based on its kind and returns the appropriate value
-func ReadInteger(typ byte, kind reflect.Kind, b []byte) interface{} {
- switch kind {
- case reflect.Uint8:
- return b[len(b)-1]
- case reflect.Uint16:
- return binary.BigEndian.Uint16(b[len(b)-2:])
- case reflect.Uint32:
- return binary.BigEndian.Uint32(b[len(b)-4:])
- case reflect.Uint64:
- return binary.BigEndian.Uint64(b[len(b)-8:])
- case reflect.Int8:
+func ReadInteger(typ Type, b []byte) interface{} {
+ if typ.T == UintTy {
+ switch typ.Size {
+ case 8:
+ return b[len(b)-1]
+ case 16:
+ return binary.BigEndian.Uint16(b[len(b)-2:])
+ case 32:
+ return binary.BigEndian.Uint32(b[len(b)-4:])
+ case 64:
+ return binary.BigEndian.Uint64(b[len(b)-8:])
+ default:
+ // the only case left for unsigned integer is uint256.
+ return new(big.Int).SetBytes(b)
+ }
+ }
+ switch typ.Size {
+ case 8:
return int8(b[len(b)-1])
- case reflect.Int16:
+ case 16:
return int16(binary.BigEndian.Uint16(b[len(b)-2:]))
- case reflect.Int32:
+ case 32:
return int32(binary.BigEndian.Uint32(b[len(b)-4:]))
- case reflect.Int64:
+ case 64:
return int64(binary.BigEndian.Uint64(b[len(b)-8:]))
default:
- // the only case left for integer is int256/uint256.
- ret := new(big.Int).SetBytes(b)
- if typ == UintTy {
- return ret
- }
+ // the only case left for integer is int256
// big.SetBytes can't tell if a number is negative or positive in itself.
// On EVM, if the returned number > max int256, it is negative.
// A number is > max int256 if the bit at position 255 is set.
+ ret := new(big.Int).SetBytes(b)
if ret.Bit(255) == 1 {
ret.Add(MaxUint256, new(big.Int).Neg(ret))
ret.Add(ret, common.Big1)
@@ -106,7 +110,7 @@ func ReadFixedBytes(t Type, word []byte) (interface{}, error) {
return nil, fmt.Errorf("abi: invalid type in call to make fixed byte array")
}
// convert
- array := reflect.New(t.Type).Elem()
+ array := reflect.New(t.getType()).Elem()
reflect.Copy(array, reflect.ValueOf(word[0:t.Size]))
return array.Interface(), nil
@@ -127,10 +131,10 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
if t.T == SliceTy {
// declare our slice
- refSlice = reflect.MakeSlice(t.Type, size, size)
+ refSlice = reflect.MakeSlice(t.getType(), size, size)
} else if t.T == ArrayTy {
// declare our array
- refSlice = reflect.New(t.Type).Elem()
+ refSlice = reflect.New(t.getType()).Elem()
} else {
return nil, fmt.Errorf("abi: invalid type in array/slice unpacking stage")
}
@@ -140,7 +144,7 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
elemSize := getTypeSize(*t.Elem)
for i, j := start, 0; j < size; i, j = i+elemSize, j+1 {
- inter, err := ToGoType(i, *t.Elem, output)
+ inter, err := toGoType(i, *t.Elem, output)
if err != nil {
return nil, err
}
@@ -154,10 +158,10 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
}
func forTupleUnpack(t Type, output []byte) (interface{}, error) {
- retval := reflect.New(t.Type).Elem()
+ retval := reflect.New(t.getType()).Elem()
virtualArgs := 0
for index, elem := range t.TupleElems {
- marshalledValue, err := ToGoType((index+virtualArgs)*32, *elem, output)
+ marshalledValue, err := toGoType((index+virtualArgs)*32, *elem, output)
if elem.T == ArrayTy && !isDynamicType(*elem) {
// If we have a static array, like [3]uint256, these are coded as
// just like uint256,uint256,uint256.
@@ -183,9 +187,9 @@ func forTupleUnpack(t Type, output []byte) (interface{}, error) {
return retval.Interface(), nil
}
-// ToGoType parses the output bytes and recursively assigns the value of these bytes
+// toGoType parses the output bytes and recursively assigns the value of these bytes
// into a go type with accordance with the ABI spec.
-func ToGoType(index int, t Type, output []byte) (interface{}, error) {
+func toGoType(index int, t Type, output []byte) (interface{}, error) {
if index+32 > len(output) {
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), index+32)
}
@@ -214,9 +218,8 @@ func ToGoType(index int, t Type, output []byte) (interface{}, error) {
return nil, err
}
return forTupleUnpack(t, output[begin:])
- } else {
- return forTupleUnpack(t, output[index:])
}
+ return forTupleUnpack(t, output[index:])
case SliceTy:
return forEachUnpack(t, output[begin:], 0, length)
case ArrayTy:
@@ -228,7 +231,7 @@ func ToGoType(index int, t Type, output []byte) (interface{}, error) {
case StringTy: // variable arrays are written at the end of the return bytes
return string(output[begin : begin+length]), nil
case IntTy, UintTy:
- return ReadInteger(t.T, t.Kind, returnOutput), nil
+ return ReadInteger(t, returnOutput), nil
case BoolTy:
return readBool(returnOutput)
case AddressTy:
diff --git a/accounts/abi/unpack_test.go b/accounts/abi/unpack_test.go
index 0622e0adce..7a590b4b4b 100644
--- a/accounts/abi/unpack_test.go
+++ b/accounts/abi/unpack_test.go
@@ -30,6 +30,34 @@ import (
"github.com/stretchr/testify/require"
)
+// TestUnpack tests the general pack/unpack tests in packing_test.go
+func TestUnpack(t *testing.T) {
+ for i, test := range packUnpackTests {
+ t.Run(strconv.Itoa(i)+" "+test.def, func(t *testing.T) {
+ //Unpack
+ def := fmt.Sprintf(`[{ "name" : "method", "type": "function", "outputs": %s}]`, test.def)
+ abi, err := JSON(strings.NewReader(def))
+ if err != nil {
+ t.Fatalf("invalid ABI definition %s: %v", def, err)
+ }
+ encb, err := hex.DecodeString(test.packed)
+ if err != nil {
+ t.Fatalf("invalid hex %s: %v", test.packed, err)
+ }
+ outptr := reflect.New(reflect.TypeOf(test.unpacked))
+ err = abi.Unpack(outptr.Interface(), "method", encb)
+ if err != nil {
+ t.Errorf("test %d (%v) failed: %v", i, test.def, err)
+ return
+ }
+ out := outptr.Elem().Interface()
+ if !reflect.DeepEqual(test.unpacked, out) {
+ t.Errorf("test %d (%v) failed: expected %v, got %v", i, test.def, test.unpacked, out)
+ }
+ })
+ }
+}
+
type unpackTest struct {
def string // ABI definition JSON
enc string // evm return data
@@ -52,16 +80,6 @@ func (test unpackTest) checkError(err error) error {
var unpackTests = []unpackTest{
// Bools
- {
- def: `[{ "type": "bool" }]`,
- enc: "0000000000000000000000000000000000000000000000000000000000000001",
- want: true,
- },
- {
- def: `[{ "type": "bool" }]`,
- enc: "0000000000000000000000000000000000000000000000000000000000000000",
- want: false,
- },
{
def: `[{ "type": "bool" }]`,
enc: "0000000000000000000000000000000000000000000000000001000000000001",
@@ -75,11 +93,6 @@ var unpackTests = []unpackTest{
err: "abi: improperly encoded boolean value",
},
// Integers
- {
- def: `[{"type": "uint32"}]`,
- enc: "0000000000000000000000000000000000000000000000000000000000000001",
- want: uint32(1),
- },
{
def: `[{"type": "uint32"}]`,
enc: "0000000000000000000000000000000000000000000000000000000000000001",
@@ -92,16 +105,6 @@ var unpackTests = []unpackTest{
want: uint16(0),
err: "abi: cannot unmarshal *big.Int in to uint16",
},
- {
- def: `[{"type": "uint17"}]`,
- enc: "0000000000000000000000000000000000000000000000000000000000000001",
- want: big.NewInt(1),
- },
- {
- def: `[{"type": "int32"}]`,
- enc: "0000000000000000000000000000000000000000000000000000000000000001",
- want: int32(1),
- },
{
def: `[{"type": "int32"}]`,
enc: "0000000000000000000000000000000000000000000000000000000000000001",
@@ -114,33 +117,6 @@ var unpackTests = []unpackTest{
want: int16(0),
err: "abi: cannot unmarshal *big.Int in to int16",
},
- {
- def: `[{"type": "int17"}]`,
- enc: "0000000000000000000000000000000000000000000000000000000000000001",
- want: big.NewInt(1),
- },
- {
- def: `[{"type": "int256"}]`,
- enc: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
- want: big.NewInt(-1),
- },
- // Address
- {
- def: `[{"type": "address"}]`,
- enc: "0000000000000000000000000100000000000000000000000000000000000000",
- want: common.Address{1},
- },
- // Bytes
- {
- def: `[{"type": "bytes32"}]`,
- enc: "0100000000000000000000000000000000000000000000000000000000000000",
- want: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- },
- {
- def: `[{"type": "bytes"}]`,
- enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200100000000000000000000000000000000000000000000000000000000000000",
- want: common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
{
def: `[{"type": "bytes"}]`,
enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200100000000000000000000000000000000000000000000000000000000000000",
@@ -153,237 +129,6 @@ var unpackTests = []unpackTest{
want: []byte(nil),
err: "abi: cannot unmarshal [32]uint8 in to []uint8",
},
- {
- def: `[{"type": "bytes32"}]`,
- enc: "0100000000000000000000000000000000000000000000000000000000000000",
- want: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- },
- // Functions
- {
- def: `[{"type": "function"}]`,
- enc: "0100000000000000000000000000000000000000000000000000000000000000",
- want: [24]byte{1},
- },
- // Slice and Array
- {
- def: `[{"type": "uint8[]"}]`,
- enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: []uint8{1, 2},
- },
- {
- def: `[{"type": "uint8[]"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000",
- want: []uint8{},
- },
- {
- def: `[{"type": "uint256[]"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000",
- want: []*big.Int{},
- },
- {
- def: `[{"type": "uint8[2]"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: [2]uint8{1, 2},
- },
- // multi dimensional, if these pass, all types that don't require length prefix should pass
- {
- def: `[{"type": "uint8[][]"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000",
- want: [][]uint8{},
- },
- {
- def: `[{"type": "uint8[][]"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: [][]uint8{{1, 2}, {1, 2}},
- },
- {
- def: `[{"type": "uint8[][]"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003",
- want: [][]uint8{{1, 2}, {1, 2, 3}},
- },
- {
- def: `[{"type": "uint8[2][2]"}]`,
- enc: "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: [2][2]uint8{{1, 2}, {1, 2}},
- },
- {
- def: `[{"type": "uint8[][2]"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- want: [2][]uint8{{}, {}},
- },
- {
- def: `[{"type": "uint8[][2]"}]`,
- enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001",
- want: [2][]uint8{{1}, {1}},
- },
- {
- def: `[{"type": "uint8[2][]"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000",
- want: [][2]uint8{},
- },
- {
- def: `[{"type": "uint8[2][]"}]`,
- enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: [][2]uint8{{1, 2}},
- },
- {
- def: `[{"type": "uint8[2][]"}]`,
- enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: [][2]uint8{{1, 2}, {1, 2}},
- },
- {
- def: `[{"type": "uint16[]"}]`,
- enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: []uint16{1, 2},
- },
- {
- def: `[{"type": "uint16[2]"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: [2]uint16{1, 2},
- },
- {
- def: `[{"type": "uint32[]"}]`,
- enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: []uint32{1, 2},
- },
- {
- def: `[{"type": "uint32[2]"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: [2]uint32{1, 2},
- },
- {
- def: `[{"type": "uint32[2][3][4]"}]`,
- enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000f000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001300000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000015000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000170000000000000000000000000000000000000000000000000000000000000018",
- want: [4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}},
- },
- {
- def: `[{"type": "uint64[]"}]`,
- enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: []uint64{1, 2},
- },
- {
- def: `[{"type": "uint64[2]"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: [2]uint64{1, 2},
- },
- {
- def: `[{"type": "uint256[]"}]`,
- enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: []*big.Int{big.NewInt(1), big.NewInt(2)},
- },
- {
- def: `[{"type": "uint256[3]"}]`,
- enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003",
- want: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)},
- },
- {
- def: `[{"type": "string[4]"}]`,
- enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000000548656c6c6f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005576f726c64000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b476f2d657468657265756d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000",
- want: [4]string{"Hello", "World", "Go-ethereum", "Ethereum"},
- },
- {
- def: `[{"type": "string[]"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b676f2d657468657265756d000000000000000000000000000000000000000000",
- want: []string{"Ethereum", "go-ethereum"},
- },
- {
- def: `[{"type": "bytes[]"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000003f0f0f000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003f0f0f00000000000000000000000000000000000000000000000000000000000",
- want: [][]byte{{0xf0, 0xf0, 0xf0}, {0xf0, 0xf0, 0xf0}},
- },
- {
- def: `[{"type": "uint256[2][][]"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000003e80000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000003e8",
- want: [][][2]*big.Int{{{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}, {{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}},
- },
- {
- def: `[{"type": "int8[]"}]`,
- enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: []int8{1, 2},
- },
- {
- def: `[{"type": "int8[2]"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: [2]int8{1, 2},
- },
- {
- def: `[{"type": "int16[]"}]`,
- enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: []int16{1, 2},
- },
- {
- def: `[{"type": "int16[2]"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: [2]int16{1, 2},
- },
- {
- def: `[{"type": "int32[]"}]`,
- enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: []int32{1, 2},
- },
- {
- def: `[{"type": "int32[2]"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: [2]int32{1, 2},
- },
- {
- def: `[{"type": "int64[]"}]`,
- enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: []int64{1, 2},
- },
- {
- def: `[{"type": "int64[2]"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: [2]int64{1, 2},
- },
- {
- def: `[{"type": "int256[]"}]`,
- enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: []*big.Int{big.NewInt(1), big.NewInt(2)},
- },
- {
- def: `[{"type": "int256[3]"}]`,
- enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003",
- want: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)},
- },
- // struct outputs
- {
- def: `[{"name":"int1","type":"int256"},{"name":"int2","type":"int256"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: struct {
- Int1 *big.Int
- Int2 *big.Int
- }{big.NewInt(1), big.NewInt(2)},
- },
- {
- def: `[{"name":"int_one","type":"int256"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: struct {
- IntOne *big.Int
- }{big.NewInt(1)},
- },
- {
- def: `[{"name":"int__one","type":"int256"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: struct {
- IntOne *big.Int
- }{big.NewInt(1)},
- },
- {
- def: `[{"name":"int_one_","type":"int256"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: struct {
- IntOne *big.Int
- }{big.NewInt(1)},
- },
- {
- def: `[{"name":"int_one","type":"int256"}, {"name":"intone","type":"int256"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
- want: struct {
- IntOne *big.Int
- Intone *big.Int
- }{big.NewInt(1), big.NewInt(2)},
- },
{
def: `[{"name":"___","type":"int256"}]`,
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
@@ -438,11 +183,36 @@ var unpackTests = []unpackTest{
}{},
err: "abi: purely underscored output cannot unpack to struct",
},
+ // Make sure only the first argument is consumed
+ {
+ def: `[{"name":"int_one","type":"int256"}]`,
+ enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ want: struct {
+ IntOne *big.Int
+ }{big.NewInt(1)},
+ },
+ {
+ def: `[{"name":"int__one","type":"int256"}]`,
+ enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ want: struct {
+ IntOne *big.Int
+ }{big.NewInt(1)},
+ },
+ {
+ def: `[{"name":"int_one_","type":"int256"}]`,
+ enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ want: struct {
+ IntOne *big.Int
+ }{big.NewInt(1)},
+ },
}
-func TestUnpack(t *testing.T) {
+// TestLocalUnpackTests runs test specially designed only for unpacking.
+// All test cases that can be used to test packing and unpacking should move to packing_test.go
+func TestLocalUnpackTests(t *testing.T) {
for i, test := range unpackTests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
+ //Unpack
def := fmt.Sprintf(`[{ "name" : "method", "type": "function", "outputs": %s}]`, test.def)
abi, err := JSON(strings.NewReader(def))
if err != nil {
diff --git a/accounts/keystore/keystore.go b/accounts/keystore/keystore.go
index 46117ad967..b9519420f3 100644
--- a/accounts/keystore/keystore.go
+++ b/accounts/keystore/keystore.go
@@ -24,7 +24,6 @@ import (
"crypto/ecdsa"
crand "crypto/rand"
"errors"
- "fmt"
"math/big"
"os"
"path/filepath"
@@ -68,7 +67,8 @@ type KeyStore struct {
updateScope event.SubscriptionScope // Subscription scope tracking current live listeners
updating bool // Whether the event notification loop is running
- mu sync.RWMutex
+ mu sync.RWMutex
+ importMu sync.Mutex // Import Mutex locks the import to prevent two insertions from racing
}
type unlocked struct {
@@ -454,14 +454,21 @@ func (ks *KeyStore) Import(keyJSON []byte, passphrase, newPassphrase string) (ac
if err != nil {
return accounts.Account{}, err
}
+ ks.importMu.Lock()
+ defer ks.importMu.Unlock()
+ if ks.cache.hasAddress(key.Address) {
+ return accounts.Account{}, errors.New("account already exists")
+ }
return ks.importKey(key, newPassphrase)
}
// ImportECDSA stores the given key into the key directory, encrypting it with the passphrase.
func (ks *KeyStore) ImportECDSA(priv *ecdsa.PrivateKey, passphrase string) (accounts.Account, error) {
key := newKeyFromECDSA(priv)
+ ks.importMu.Lock()
+ defer ks.importMu.Unlock()
if ks.cache.hasAddress(key.Address) {
- return accounts.Account{}, fmt.Errorf("account already exists")
+ return accounts.Account{}, errors.New("account already exists")
}
return ks.importKey(key, passphrase)
}
diff --git a/accounts/keystore/keystore_test.go b/accounts/keystore/keystore_test.go
index a691c50627..29c251d7c1 100644
--- a/accounts/keystore/keystore_test.go
+++ b/accounts/keystore/keystore_test.go
@@ -23,11 +23,14 @@ import (
"runtime"
"sort"
"strings"
+ "sync"
+ "sync/atomic"
"testing"
"time"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/event"
)
@@ -338,6 +341,88 @@ func TestWalletNotifications(t *testing.T) {
checkEvents(t, wantEvents, events)
}
+// TestImportExport tests the import functionality of a keystore.
+func TestImportECDSA(t *testing.T) {
+ dir, ks := tmpKeyStore(t, true)
+ defer os.RemoveAll(dir)
+ key, err := crypto.GenerateKey()
+ if err != nil {
+ t.Fatalf("failed to generate key: %v", key)
+ }
+ if _, err = ks.ImportECDSA(key, "old"); err != nil {
+ t.Errorf("importing failed: %v", err)
+ }
+ if _, err = ks.ImportECDSA(key, "old"); err == nil {
+ t.Errorf("importing same key twice succeeded")
+ }
+ if _, err = ks.ImportECDSA(key, "new"); err == nil {
+ t.Errorf("importing same key twice succeeded")
+ }
+}
+
+// TestImportECDSA tests the import and export functionality of a keystore.
+func TestImportExport(t *testing.T) {
+ dir, ks := tmpKeyStore(t, true)
+ defer os.RemoveAll(dir)
+ acc, err := ks.NewAccount("old")
+ if err != nil {
+ t.Fatalf("failed to create account: %v", acc)
+ }
+ json, err := ks.Export(acc, "old", "new")
+ if err != nil {
+ t.Fatalf("failed to export account: %v", acc)
+ }
+ dir2, ks2 := tmpKeyStore(t, true)
+ defer os.RemoveAll(dir2)
+ if _, err = ks2.Import(json, "old", "old"); err == nil {
+ t.Errorf("importing with invalid password succeeded")
+ }
+ acc2, err := ks2.Import(json, "new", "new")
+ if err != nil {
+ t.Errorf("importing failed: %v", err)
+ }
+ if acc.Address != acc2.Address {
+ t.Error("imported account does not match exported account")
+ }
+ if _, err = ks2.Import(json, "new", "new"); err == nil {
+ t.Errorf("importing a key twice succeeded")
+ }
+
+}
+
+// TestImportRace tests the keystore on races.
+// This test should fail under -race if importing races.
+func TestImportRace(t *testing.T) {
+ dir, ks := tmpKeyStore(t, true)
+ defer os.RemoveAll(dir)
+ acc, err := ks.NewAccount("old")
+ if err != nil {
+ t.Fatalf("failed to create account: %v", acc)
+ }
+ json, err := ks.Export(acc, "old", "new")
+ if err != nil {
+ t.Fatalf("failed to export account: %v", acc)
+ }
+ dir2, ks2 := tmpKeyStore(t, true)
+ defer os.RemoveAll(dir2)
+ var atom uint32
+ var wg sync.WaitGroup
+ wg.Add(2)
+ for i := 0; i < 2; i++ {
+ go func() {
+ defer wg.Done()
+ if _, err := ks2.Import(json, "new", "new"); err != nil {
+ atomic.AddUint32(&atom, 1)
+ }
+
+ }()
+ }
+ wg.Wait()
+ if atom != 1 {
+ t.Errorf("Import is racy")
+ }
+}
+
// checkAccounts checks that all known live accounts are present in the wallet list.
func checkAccounts(t *testing.T, live map[common.Address]accounts.Account, wallets []accounts.Wallet) {
if len(live) != len(wallets) {
diff --git a/build/ci.go b/build/ci.go
index 0fa3781b5a..43893f68f0 100644
--- a/build/ci.go
+++ b/build/ci.go
@@ -329,7 +329,7 @@ func doTest(cmdline []string) {
// Test a single package at a time. CI builders are slow
// and some tests run into timeouts under load.
gotest := goTool("test", buildFlags(env)...)
- gotest.Args = append(gotest.Args, "-p", "1", "-timeout", "5m", "--short")
+ gotest.Args = append(gotest.Args, "-p", "1", "--short")
if *coverage {
gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover")
}
diff --git a/build/nsis.install.nsh b/build/nsis.install.nsh
index 57ef5a37c6..9b73148a44 100644
--- a/build/nsis.install.nsh
+++ b/build/nsis.install.nsh
@@ -19,9 +19,9 @@ Section "Geth" GETH_IDX
# Create start menu launcher
createDirectory "$SMPROGRAMS\${APPNAME}"
- createShortCut "$SMPROGRAMS\${APPNAME}\${APPNAME}.lnk" "$INSTDIR\geth.exe" "--fast" "--cache=512"
- createShortCut "$SMPROGRAMS\${APPNAME}\Attach.lnk" "$INSTDIR\geth.exe" "attach" "" ""
- createShortCut "$SMPROGRAMS\${APPNAME}\Uninstall.lnk" "$INSTDIR\uninstall.exe" "" "" ""
+ createShortCut "$SMPROGRAMS\${APPNAME}\${APPNAME}.lnk" "$INSTDIR\geth.exe"
+ createShortCut "$SMPROGRAMS\${APPNAME}\Attach.lnk" "$INSTDIR\geth.exe" "attach"
+ createShortCut "$SMPROGRAMS\${APPNAME}\Uninstall.lnk" "$INSTDIR\uninstall.exe"
# Firewall - remove rules (if exists)
SimpleFC::AdvRemoveRule "Geth incoming peers (TCP:30303)"
diff --git a/cmd/clef/main.go b/cmd/clef/main.go
index ca31f1f408..5af2f14be2 100644
--- a/cmd/clef/main.go
+++ b/cmd/clef/main.go
@@ -232,11 +232,11 @@ func init() {
utils.LightKDFFlag,
utils.NoUSBFlag,
utils.SmartCardDaemonPathFlag,
- utils.RPCListenAddrFlag,
- utils.RPCVirtualHostsFlag,
+ utils.HTTPListenAddrFlag,
+ utils.HTTPVirtualHostsFlag,
utils.IPCDisabledFlag,
utils.IPCPathFlag,
- utils.RPCEnabledFlag,
+ utils.HTTPEnabledFlag,
rpcPortFlag,
signerSecretFlag,
customDBFlag,
@@ -610,9 +610,9 @@ func signer(c *cli.Context) error {
}
//
- if c.GlobalBool(utils.RPCEnabledFlag.Name) {
- vhosts := splitAndTrim(c.GlobalString(utils.RPCVirtualHostsFlag.Name))
- cors := splitAndTrim(c.GlobalString(utils.RPCCORSDomainFlag.Name))
+ if c.GlobalBool(utils.HTTPEnabledFlag.Name) {
+ vhosts := splitAndTrim(c.GlobalString(utils.HTTPVirtualHostsFlag.Name))
+ cors := splitAndTrim(c.GlobalString(utils.HTTPCORSDomainFlag.Name))
srv := rpc.NewServer()
err := node.RegisterApisFromWhitelist(rpcAPI, []string{"account"}, srv, false)
@@ -622,16 +622,17 @@ func signer(c *cli.Context) error {
handler := node.NewHTTPHandlerStack(srv, cors, vhosts)
// start http server
- httpEndpoint := fmt.Sprintf("%s:%d", c.GlobalString(utils.RPCListenAddrFlag.Name), c.Int(rpcPortFlag.Name))
- listener, _, err := node.StartHTTPEndpoint(httpEndpoint, rpc.DefaultHTTPTimeouts, handler, nil)
+ httpEndpoint := fmt.Sprintf("%s:%d", c.GlobalString(utils.HTTPListenAddrFlag.Name), c.Int(rpcPortFlag.Name))
+ httpServer, addr, _, err := node.StartHTTPEndpoint(httpEndpoint, rpc.DefaultHTTPTimeouts, handler, nil)
if err != nil {
utils.Fatalf("Could not start RPC api: %v", err)
}
- extapiURL = fmt.Sprintf("http://%v/", listener.Addr())
+ extapiURL = fmt.Sprintf("http://%v/", addr)
log.Info("HTTP endpoint opened", "url", extapiURL)
defer func() {
- listener.Close()
+ // Don't bother imposing a timeout here.
+ httpServer.Shutdown(context.Background())
log.Info("HTTP endpoint closed", "url", extapiURL)
}()
}
diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go
index 52c1eca715..6f9e47cf50 100644
--- a/cmd/evm/staterunner.go
+++ b/cmd/evm/staterunner.go
@@ -96,7 +96,7 @@ func stateTestCmd(ctx *cli.Context) error {
for _, st := range test.Subtests() {
// Run the test and aggregate the result
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
- state, err := test.Run(st, cfg, false)
+ _, state, err := test.Run(st, cfg, false)
// print state root for evmlab tracing
if ctx.GlobalBool(MachineFlag.Name) && state != nil {
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
diff --git a/cmd/geth/accountcmd_test.go b/cmd/geth/accountcmd_test.go
index 45c3720b6c..32070e11bf 100644
--- a/cmd/geth/accountcmd_test.go
+++ b/cmd/geth/accountcmd_test.go
@@ -100,18 +100,23 @@ Path of the secret key file: .*UTC--.+--[0-9a-f]{40}
}
func TestAccountImport(t *testing.T) {
- tests := []struct{ key, output string }{
+ tests := []struct{ name, key, output string }{
{
+ name: "correct account",
key: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
output: "Address: {fcad0b19bb29d4674531d6f115237e16afce377c}\n",
},
{
+ name: "invalid character",
key: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef1",
output: "Fatal: Failed to load the private key: invalid character '1' at end of key file\n",
},
}
for _, test := range tests {
- importAccountWithExpect(t, test.key, test.output)
+ t.Run(test.name, func(t *testing.T) {
+ t.Parallel()
+ importAccountWithExpect(t, test.key, test.output)
+ })
}
}
diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go
index 59317bf455..54a9469ba0 100644
--- a/cmd/geth/chaincmd.go
+++ b/cmd/geth/chaincmd.go
@@ -92,6 +92,7 @@ The dumpgenesis command dumps the genesis block configuration in JSON format to
utils.MetricsInfluxDBUsernameFlag,
utils.MetricsInfluxDBPasswordFlag,
utils.MetricsInfluxDBTagsFlag,
+ utils.TxLookupLimitFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
@@ -159,6 +160,7 @@ The export-preimages command export hash preimages to an RLP encoded stream`,
utils.FakePoWFlag,
utils.RopstenFlag,
utils.RinkebyFlag,
+ utils.TxLookupLimitFlag,
utils.GoerliFlag,
utils.LegacyTestnetFlag,
},
@@ -304,7 +306,7 @@ func importChain(ctx *cli.Context) error {
stack := makeFullNode(ctx)
defer stack.Close()
- chain, db := utils.MakeChain(ctx, stack, true)
+ chain, db := utils.MakeChain(ctx, stack, false, true)
defer db.Close()
// Start periodically gathering memory profiles
@@ -394,7 +396,7 @@ func exportChain(ctx *cli.Context) error {
stack := makeFullNode(ctx)
defer stack.Close()
- chain, _ := utils.MakeChain(ctx, stack, true)
+ chain, _ := utils.MakeChain(ctx, stack, true, true)
start := time.Now()
var err error
@@ -469,7 +471,7 @@ func copyDb(ctx *cli.Context) error {
stack := makeFullNode(ctx)
defer stack.Close()
- chain, chainDb := utils.MakeChain(ctx, stack, false)
+ chain, chainDb := utils.MakeChain(ctx, stack, false, false)
syncMode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
var syncBloom *trie.SyncBloom
@@ -577,7 +579,7 @@ func dump(ctx *cli.Context) error {
stack := makeFullNode(ctx)
defer stack.Close()
- chain, chainDb := utils.MakeChain(ctx, stack, false)
+ chain, chainDb := utils.MakeChain(ctx, stack, true, false)
defer chainDb.Close()
for _, arg := range ctx.Args() {
var block *types.Block
@@ -616,7 +618,7 @@ func inspect(ctx *cli.Context) error {
node, _ := makeConfigNode(ctx)
defer node.Close()
- _, chainDb := utils.MakeChain(ctx, node, false)
+ _, chainDb := utils.MakeChain(ctx, node, true, false)
defer chainDb.Close()
return rawdb.InspectDatabase(chainDb)
diff --git a/cmd/geth/config.go b/cmd/geth/config.go
index 3c27360b7d..5ba1dc9cab 100644
--- a/cmd/geth/config.go
+++ b/cmd/geth/config.go
@@ -20,7 +20,6 @@ import (
"bufio"
"errors"
"fmt"
- "math/big"
"os"
"reflect"
"unicode"
@@ -150,14 +149,6 @@ func enableWhisper(ctx *cli.Context) bool {
func makeFullNode(ctx *cli.Context) *node.Node {
stack, cfg := makeConfigNode(ctx)
- if ctx.GlobalIsSet(utils.OverrideIstanbulFlag.Name) {
- cfg.Eth.OverrideIstanbul = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideIstanbulFlag.Name))
- }
-
- if ctx.GlobalIsSet(utils.OverrideMuirGlacierFlag.Name) {
- cfg.Eth.OverrideMuirGlacier = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideMuirGlacierFlag.Name))
- }
-
ethChan := utils.RegisterEthService(stack, &cfg.Eth)
// plugin service must be after eth service so that eth service will be stopped gradually if any of the plugin
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index 6d9c56dd15..e40273170c 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -67,16 +67,14 @@ var (
utils.UnlockedAccountFlag,
utils.PasswordFileFlag,
utils.BootnodesFlag,
- utils.BootnodesV4Flag,
- utils.BootnodesV5Flag,
+ utils.LegacyBootnodesV4Flag,
+ utils.LegacyBootnodesV5Flag,
utils.DataDirFlag,
utils.AncientFlag,
utils.KeyStoreDirFlag,
utils.ExternalSignerFlag,
utils.NoUSBFlag,
utils.SmartCardDaemonPathFlag,
- utils.OverrideIstanbulFlag,
- utils.OverrideMuirGlacierFlag,
utils.EthashCacheDirFlag,
utils.EthashCachesInMemoryFlag,
utils.EthashCachesOnDiskFlag,
@@ -100,12 +98,13 @@ var (
utils.ExitWhenSyncedFlag,
utils.GCModeFlag,
utils.SnapshotFlag,
+ utils.TxLookupLimitFlag,
utils.LightServeFlag,
- utils.LightLegacyServFlag,
+ utils.LegacyLightServFlag,
utils.LightIngressFlag,
utils.LightEgressFlag,
utils.LightMaxPeersFlag,
- utils.LightLegacyPeersFlag,
+ utils.LegacyLightPeersFlag,
utils.LightKDFFlag,
utils.UltraLightServersFlag,
utils.UltraLightFractionFlag,
@@ -122,17 +121,17 @@ var (
utils.MaxPendingPeersFlag,
utils.MiningEnabledFlag,
utils.MinerThreadsFlag,
- utils.MinerLegacyThreadsFlag,
+ utils.LegacyMinerThreadsFlag,
utils.MinerNotifyFlag,
utils.MinerGasTargetFlag,
- utils.MinerLegacyGasTargetFlag,
+ utils.LegacyMinerGasTargetFlag,
utils.MinerGasLimitFlag,
utils.MinerGasPriceFlag,
- utils.MinerLegacyGasPriceFlag,
+ utils.LegacyMinerGasPriceFlag,
utils.MinerEtherbaseFlag,
- utils.MinerLegacyEtherbaseFlag,
+ utils.LegacyMinerEtherbaseFlag,
utils.MinerExtraDataFlag,
- utils.MinerLegacyExtraDataFlag,
+ utils.LegacyMinerExtraDataFlag,
utils.MinerRecommitIntervalFlag,
utils.MinerNoVerfiyFlag,
utils.NATFlag,
@@ -154,7 +153,9 @@ var (
utils.FakePoWFlag,
utils.NoCompactionFlag,
utils.GpoBlocksFlag,
+ utils.LegacyGpoBlocksFlag,
utils.GpoPercentileFlag,
+ utils.LegacyGpoPercentileFlag,
utils.EWASMInterpreterFlag,
utils.EVMInterpreterFlag,
configFileFlag,
@@ -192,22 +193,32 @@ var (
}
rpcFlags = []cli.Flag{
- utils.RPCEnabledFlag,
- utils.RPCListenAddrFlag,
- utils.RPCPortFlag,
- utils.RPCCORSDomainFlag,
- utils.RPCVirtualHostsFlag,
+ utils.HTTPEnabledFlag,
+ utils.HTTPListenAddrFlag,
+ utils.HTTPPortFlag,
+ utils.HTTPCORSDomainFlag,
+ utils.HTTPVirtualHostsFlag,
+ utils.LegacyRPCEnabledFlag,
+ utils.LegacyRPCListenAddrFlag,
+ utils.LegacyRPCPortFlag,
+ utils.LegacyRPCCORSDomainFlag,
+ utils.LegacyRPCVirtualHostsFlag,
utils.GraphQLEnabledFlag,
utils.GraphQLListenAddrFlag,
utils.GraphQLPortFlag,
utils.GraphQLCORSDomainFlag,
utils.GraphQLVirtualHostsFlag,
- utils.RPCApiFlag,
+ utils.HTTPApiFlag,
+ utils.LegacyRPCApiFlag,
utils.WSEnabledFlag,
utils.WSListenAddrFlag,
+ utils.LegacyWSListenAddrFlag,
utils.WSPortFlag,
+ utils.LegacyWSPortFlag,
utils.WSApiFlag,
+ utils.LegacyWSApiFlag,
utils.WSAllowedOriginsFlag,
+ utils.LegacyWSAllowedOriginsFlag,
utils.IPCDisabledFlag,
utils.IPCPathFlag,
utils.InsecureUnlockAllowedFlag,
@@ -266,6 +277,8 @@ func init() {
dumpConfigCommand,
// See retesteth.go
retestethCommand,
+ // See cmd/utils/flags_legacy.go
+ utils.ShowDeprecated,
}
sort.Sort(cli.CommandsByName(app.Commands))
@@ -273,6 +286,7 @@ func init() {
app.Flags = append(app.Flags, rpcFlags...)
app.Flags = append(app.Flags, consoleFlags...)
app.Flags = append(app.Flags, debug.Flags...)
+ app.Flags = append(app.Flags, debug.DeprecatedFlags...)
app.Flags = append(app.Flags, whisperFlags...)
app.Flags = append(app.Flags, metricsFlags...)
@@ -362,6 +376,10 @@ func prepare(ctx *cli.Context) {
// Check https://github.com/elastic/gosigar#supported-platforms
if runtime.GOOS != "openbsd" {
if err := mem.Get(); err == nil {
+ if 32<<(^uintptr(0)>>63) == 32 && mem.Total > 2*1024*1024*1024 {
+ log.Warn("Lowering memory allowance on 32bit arch", "available", mem.Total/1024/1024, "addressable", 2*1024)
+ mem.Total = 2 * 1024 * 1024 * 1024
+ }
allowance := int(mem.Total / 1024 / 1024 / 3)
if cache := ctx.GlobalInt(utils.CacheFlag.Name); cache > allowance {
log.Warn("Sanitizing cache to Go's GC limits", "provided", cache, "updated", allowance)
@@ -446,7 +464,7 @@ func startNode(ctx *cli.Context, stack *node.Node) {
setContractAuthzProviderFunc := ethService.SetContractAuthorizationProvider
// Set contract backend for ethereum service if local node
// is serving LES requests.
- if ctx.GlobalInt(utils.LightLegacyServFlag.Name) > 0 || ctx.GlobalInt(utils.LightServeFlag.Name) > 0 {
+ if ctx.GlobalInt(utils.LegacyLightServFlag.Name) > 0 || ctx.GlobalInt(utils.LightServeFlag.Name) > 0 {
var ethService *eth.Ethereum
if err := stack.Service(ðService); err != nil {
utils.Fatalf("Failed to retrieve ethereum service: %v", err)
@@ -558,16 +576,18 @@ func startNode(ctx *cli.Context, stack *node.Node) {
utils.Fatalf("Ethereum service not running: %v", err)
}
// Set the gas price to the limits from the CLI and start mining
- gasprice := utils.GlobalBig(ctx, utils.MinerLegacyGasPriceFlag.Name)
- if ctx.IsSet(utils.MinerGasPriceFlag.Name) {
- gasprice = utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name)
+ gasprice := utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name)
+ if ctx.GlobalIsSet(utils.LegacyMinerGasPriceFlag.Name) && !ctx.GlobalIsSet(utils.MinerGasPriceFlag.Name) {
+ gasprice = utils.GlobalBig(ctx, utils.LegacyMinerGasPriceFlag.Name)
}
ethereum.TxPool().SetGasPrice(gasprice)
- threads := ctx.GlobalInt(utils.MinerLegacyThreadsFlag.Name)
- if ctx.GlobalIsSet(utils.MinerThreadsFlag.Name) {
- threads = ctx.GlobalInt(utils.MinerThreadsFlag.Name)
+ threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name)
+ if ctx.GlobalIsSet(utils.LegacyMinerThreadsFlag.Name) && !ctx.GlobalIsSet(utils.MinerThreadsFlag.Name) {
+ threads = ctx.GlobalInt(utils.LegacyMinerThreadsFlag.Name)
+ log.Warn("The flag --minerthreads is deprecated and will be removed in the future, please use --miner.threads")
}
+
if err := ethereum.StartMining(threads); err != nil {
utils.Fatalf("Failed to start mining: %v", err)
}
diff --git a/cmd/geth/retesteth.go b/cmd/geth/retesteth.go
index bc7dcda917..9d8ce515e6 100644
--- a/cmd/geth/retesteth.go
+++ b/cmd/geth/retesteth.go
@@ -404,7 +404,7 @@ func (api *RetestethAPI) SetChainParams(ctx context.Context, chainParams ChainPa
}
engine := &NoRewardEngine{inner: inner, rewardsOn: chainParams.SealEngine != "NoReward"}
- blockchain, err := core.NewBlockChain(ethDb, nil, chainConfig, engine, vm.Config{}, nil)
+ blockchain, err := core.NewBlockChain(ethDb, nil, chainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
return false, err
}
@@ -682,7 +682,7 @@ func (api *RetestethAPI) AccountRange(ctx context.Context,
context := core.NewEVMContext(msg, block.Header(), api.blockchain, nil)
// Not yet the searched for transaction, execute on top of the current state
vmenv := vm.NewEVM(context, statedb, pvtst, api.blockchain.Config(), vm.Config{})
- if _, _, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {
+ if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {
return AccountRangeResult{}, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err)
}
// Ensure any modifications are committed to the state
@@ -792,7 +792,7 @@ func (api *RetestethAPI) StorageRangeAt(ctx context.Context,
context := core.NewEVMContext(msg, block.Header(), api.blockchain, nil)
// Not yet the searched for transaction, execute on top of the current state
vmenv := vm.NewEVM(context, statedb, pvtstdb, api.blockchain.Config(), vm.Config{})
- if _, _, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {
+ if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {
return StorageRangeResult{}, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err)
}
// Ensure any modifications are committed to the state
@@ -891,8 +891,8 @@ func retesteth(ctx *cli.Context) error {
Version: "1.0",
},
}
- vhosts := splitAndTrim(ctx.GlobalString(utils.RPCVirtualHostsFlag.Name))
- cors := splitAndTrim(ctx.GlobalString(utils.RPCCORSDomainFlag.Name))
+ vhosts := splitAndTrim(ctx.GlobalString(utils.HTTPVirtualHostsFlag.Name))
+ cors := splitAndTrim(ctx.GlobalString(utils.HTTPCORSDomainFlag.Name))
// register apis and create handler stack
srv := rpc.NewServer()
@@ -908,8 +908,8 @@ func retesteth(ctx *cli.Context) error {
WriteTimeout: 120 * time.Second,
IdleTimeout: 120 * time.Second,
}
- httpEndpoint := fmt.Sprintf("%s:%d", ctx.GlobalString(utils.RPCListenAddrFlag.Name), ctx.Int(rpcPortFlag.Name))
- listener, _, err := node.StartHTTPEndpoint(httpEndpoint, RetestethHTTPTimeouts, handler, nil)
+ httpEndpoint := fmt.Sprintf("%s:%d", ctx.GlobalString(utils.HTTPListenAddrFlag.Name), ctx.Int(rpcPortFlag.Name))
+ httpServer, _, _, err := node.StartHTTPEndpoint(httpEndpoint, RetestethHTTPTimeouts, handler, nil)
if err != nil {
utils.Fatalf("Could not start RPC api: %v", err)
}
@@ -917,7 +917,8 @@ func retesteth(ctx *cli.Context) error {
log.Info("HTTP endpoint opened", "url", extapiURL)
defer func() {
- listener.Close()
+ // Don't bother imposing a timeout here.
+ httpServer.Shutdown(context.Background())
log.Info("HTTP endpoint closed", "url", httpEndpoint)
}()
diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go
index bad89e3632..c94213a438 100644
--- a/cmd/geth/usage.go
+++ b/cmd/geth/usage.go
@@ -80,6 +80,7 @@ var AppHelpFlagGroups = []flagGroup{
utils.SyncModeFlag,
utils.ExitWhenSyncedFlag,
utils.GCModeFlag,
+ utils.TxLookupLimitFlag,
utils.EthStatsURLFlag,
utils.IdentityFlag,
utils.LightKDFFlag,
@@ -159,13 +160,12 @@ var AppHelpFlagGroups = []flagGroup{
Flags: []cli.Flag{
utils.IPCDisabledFlag,
utils.IPCPathFlag,
- utils.RPCEnabledFlag,
- utils.RPCListenAddrFlag,
- utils.RPCPortFlag,
- utils.RPCApiFlag,
- utils.RPCGlobalGasCap,
- utils.RPCCORSDomainFlag,
- utils.RPCVirtualHostsFlag,
+ utils.HTTPEnabledFlag,
+ utils.HTTPListenAddrFlag,
+ utils.HTTPPortFlag,
+ utils.HTTPApiFlag,
+ utils.HTTPCORSDomainFlag,
+ utils.HTTPVirtualHostsFlag,
utils.WSEnabledFlag,
utils.WSListenAddrFlag,
utils.WSPortFlag,
@@ -176,6 +176,7 @@ var AppHelpFlagGroups = []flagGroup{
utils.GraphQLPortFlag,
utils.GraphQLCORSDomainFlag,
utils.GraphQLVirtualHostsFlag,
+ utils.RPCGlobalGasCap,
utils.JSpathFlag,
utils.ExecFlag,
utils.PreloadJSFlag,
@@ -190,8 +191,8 @@ var AppHelpFlagGroups = []flagGroup{
Name: "NETWORKING",
Flags: []cli.Flag{
utils.BootnodesFlag,
- utils.BootnodesV4Flag,
- utils.BootnodesV5Flag,
+ utils.LegacyBootnodesV4Flag,
+ utils.LegacyBootnodesV5Flag,
utils.DNSDiscoveryFlag,
utils.ListenPortFlag,
utils.MaxPeersFlag,
@@ -252,17 +253,21 @@ var AppHelpFlagGroups = []flagGroup{
Flags: whisperFlags,
},
{
- Name: "DEPRECATED",
- Flags: []cli.Flag{
- utils.LegacyTestnetFlag,
- utils.LightLegacyServFlag,
- utils.LightLegacyPeersFlag,
- utils.MinerLegacyThreadsFlag,
- utils.MinerLegacyGasTargetFlag,
- utils.MinerLegacyGasPriceFlag,
- utils.MinerLegacyEtherbaseFlag,
- utils.MinerLegacyExtraDataFlag,
- },
+ Name: "ALIASED (deprecated)",
+ Flags: append([]cli.Flag{
+ utils.LegacyRPCEnabledFlag,
+ utils.LegacyRPCListenAddrFlag,
+ utils.LegacyRPCPortFlag,
+ utils.LegacyRPCCORSDomainFlag,
+ utils.LegacyRPCVirtualHostsFlag,
+ utils.LegacyRPCApiFlag,
+ utils.LegacyWSListenAddrFlag,
+ utils.LegacyWSPortFlag,
+ utils.LegacyWSAllowedOriginsFlag,
+ utils.LegacyWSApiFlag,
+ utils.LegacyGpoBlocksFlag,
+ utils.LegacyGpoPercentileFlag,
+ }, debug.DeprecatedFlags...),
},
// QUORUM
{
@@ -321,6 +326,10 @@ var AppHelpFlagGroups = []flagGroup{
// END QUORUM
{
Name: "MISC",
+ Flags: []cli.Flag{
+ utils.SnapshotFlag,
+ cli.HelpFlag,
+ },
},
}
@@ -378,10 +387,17 @@ func init() {
categorized[flag.String()] = struct{}{}
}
}
+ deprecated := make(map[string]struct{})
+ for _, flag := range utils.DeprecatedFlags {
+ deprecated[flag.String()] = struct{}{}
+ }
+ // Only add uncategorized flags if they are not deprecated
var uncategorized []cli.Flag
for _, flag := range data.(*cli.App).Flags {
if _, ok := categorized[flag.String()]; !ok {
- uncategorized = append(uncategorized, flag)
+ if _, ok := deprecated[flag.String()]; !ok {
+ uncategorized = append(uncategorized, flag)
+ }
}
}
if len(uncategorized) > 0 {
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index bf6c5e2919..912d43779a 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -177,10 +177,6 @@ var (
Usage: "Network identifier (integer, 1=Frontier, 3=Ropsten, 4=Rinkeby, 5=Görli)",
Value: eth.DefaultConfig.NetworkId,
}
- LegacyTestnetFlag = cli.BoolFlag{ // TODO(q9f): Remove after Ropsten is discontinued.
- Name: "testnet",
- Usage: "Pre-configured test network (Deprecated: Please choose one of --goerli, --rinkeby, or --ropsten.)",
- }
GoerliFlag = cli.BoolFlag{
Name: "goerli",
Usage: "Görli network: pre-configured proof-of-authority test network",
@@ -245,6 +241,11 @@ var (
Name: "snapshot",
Usage: `Enables snapshot-database mode -- experimental work in progress feature`,
}
+ TxLookupLimitFlag = cli.Int64Flag{
+ Name: "txlookuplimit",
+ Usage: "Number of recent blocks to maintain transactions index by-hash for (default = index all blocks)",
+ Value: 0,
+ }
LightKDFFlag = cli.BoolFlag{
Name: "lightkdf",
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
@@ -253,20 +254,7 @@ var (
Name: "whitelist",
Usage: "Comma separated block number-to-hash mappings to enforce (=)",
}
- OverrideIstanbulFlag = cli.Uint64Flag{
- Name: "override.istanbul",
- Usage: "Manually specify Istanbul fork-block, overriding the bundled setting",
- }
- OverrideMuirGlacierFlag = cli.Uint64Flag{
- Name: "override.muirglacier",
- Usage: "Manually specify Muir Glacier fork-block, overriding the bundled setting",
- }
// Light server and client settings
- LightLegacyServFlag = cli.IntFlag{ // Deprecated in favor of light.serve, remove in 2021
- Name: "lightserv",
- Usage: "Maximum percentage of time allowed for serving LES requests (deprecated, use --light.serve)",
- Value: eth.DefaultConfig.LightServ,
- }
LightServeFlag = cli.IntFlag{
Name: "light.serve",
Usage: "Maximum percentage of time allowed for serving LES requests (multi-threaded processing allows values over 100)",
@@ -282,11 +270,6 @@ var (
Usage: "Outgoing bandwidth limit for serving light clients (kilobytes/sec, 0 = unlimited)",
Value: eth.DefaultConfig.LightEgress,
}
- LightLegacyPeersFlag = cli.IntFlag{ // Deprecated in favor of light.maxpeers, remove in 2021
- Name: "lightpeers",
- Usage: "Maximum number of light clients to serve, or light servers to attach to (deprecated, use --light.maxpeers)",
- Value: eth.DefaultConfig.LightPeers,
- }
LightMaxPeersFlag = cli.IntFlag{
Name: "light.maxpeers",
Usage: "Maximum number of light clients to serve, or light servers to attach to",
@@ -438,11 +421,6 @@ var (
Usage: "Number of CPU threads to use for mining",
Value: 0,
}
- MinerLegacyThreadsFlag = cli.IntFlag{
- Name: "minerthreads",
- Usage: "Number of CPU threads to use for mining (deprecated, use --miner.threads)",
- Value: 0,
- }
MinerNotifyFlag = cli.StringFlag{
Name: "miner.notify",
Usage: "Comma separated HTTP URL list to notify of new work packages",
@@ -452,11 +430,6 @@ var (
Usage: "Target gas floor for mined blocks",
Value: eth.DefaultConfig.Miner.GasFloor,
}
- MinerLegacyGasTargetFlag = cli.Uint64Flag{
- Name: "targetgaslimit",
- Usage: "Target gas floor for mined blocks (deprecated, use --miner.gastarget)",
- Value: eth.DefaultConfig.Miner.GasFloor,
- }
MinerGasLimitFlag = cli.Uint64Flag{
Name: "miner.gaslimit",
Usage: "Target gas ceiling for mined blocks",
@@ -467,29 +440,15 @@ var (
Usage: "Minimum gas price for mining a transaction",
Value: eth.DefaultConfig.Miner.GasPrice,
}
- MinerLegacyGasPriceFlag = BigFlag{
- Name: "gasprice",
- Usage: "Minimum gas price for mining a transaction (deprecated, use --miner.gasprice)",
- Value: eth.DefaultConfig.Miner.GasPrice,
- }
MinerEtherbaseFlag = cli.StringFlag{
Name: "miner.etherbase",
Usage: "Public address for block mining rewards (default = first account)",
Value: "0",
}
- MinerLegacyEtherbaseFlag = cli.StringFlag{
- Name: "etherbase",
- Usage: "Public address for block mining rewards (default = first account, deprecated, use --miner.etherbase)",
- Value: "0",
- }
MinerExtraDataFlag = cli.StringFlag{
Name: "miner.extradata",
Usage: "Block extra data set by the miner (default = client version)",
}
- MinerLegacyExtraDataFlag = cli.StringFlag{
- Name: "extradata",
- Usage: "Block extra data set by the miner (default = client version, deprecated, use --miner.extradata)",
- }
MinerRecommitIntervalFlag = cli.DurationFlag{
Name: "miner.recommit",
Usage: "Time interval to recreate the block being mined",
@@ -570,32 +529,32 @@ var (
Name: "ipcpath",
Usage: "Filename for IPC socket/pipe within the datadir (explicit paths escape it)",
}
- RPCEnabledFlag = cli.BoolFlag{
- Name: "rpc",
+ HTTPEnabledFlag = cli.BoolFlag{
+ Name: "http",
Usage: "Enable the HTTP-RPC server",
}
- RPCListenAddrFlag = cli.StringFlag{
- Name: "rpcaddr",
+ HTTPListenAddrFlag = cli.StringFlag{
+ Name: "http.addr",
Usage: "HTTP-RPC server listening interface",
Value: node.DefaultHTTPHost,
}
- RPCPortFlag = cli.IntFlag{
- Name: "rpcport",
+ HTTPPortFlag = cli.IntFlag{
+ Name: "http.port",
Usage: "HTTP-RPC server listening port",
Value: node.DefaultHTTPPort,
}
- RPCCORSDomainFlag = cli.StringFlag{
- Name: "rpccorsdomain",
+ HTTPCORSDomainFlag = cli.StringFlag{
+ Name: "http.corsdomain",
Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced)",
Value: "",
}
- RPCVirtualHostsFlag = cli.StringFlag{
- Name: "rpcvhosts",
+ HTTPVirtualHostsFlag = cli.StringFlag{
+ Name: "http.vhosts",
Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.",
Value: strings.Join(node.DefaultConfig.HTTPVirtualHosts, ","),
}
- RPCApiFlag = cli.StringFlag{
- Name: "rpcapi",
+ HTTPApiFlag = cli.StringFlag{
+ Name: "http.api",
Usage: "API's offered over the HTTP-RPC interface",
Value: "",
}
@@ -604,22 +563,22 @@ var (
Usage: "Enable the WS-RPC server",
}
WSListenAddrFlag = cli.StringFlag{
- Name: "wsaddr",
+ Name: "ws.addr",
Usage: "WS-RPC server listening interface",
Value: node.DefaultWSHost,
}
WSPortFlag = cli.IntFlag{
- Name: "wsport",
+ Name: "ws.port",
Usage: "WS-RPC server listening port",
Value: node.DefaultWSPort,
}
WSApiFlag = cli.StringFlag{
- Name: "wsapi",
+ Name: "ws.api",
Usage: "API's offered over the WS-RPC interface",
Value: "",
}
WSAllowedOriginsFlag = cli.StringFlag{
- Name: "wsorigins",
+ Name: "ws.origins",
Usage: "Origins from which to accept websockets requests",
Value: "",
}
@@ -674,17 +633,7 @@ var (
}
BootnodesFlag = cli.StringFlag{
Name: "bootnodes",
- Usage: "Comma separated enode URLs for P2P discovery bootstrap (set v4+v5 instead for light servers)",
- Value: "",
- }
- BootnodesV4Flag = cli.StringFlag{
- Name: "bootnodesv4",
- Usage: "Comma separated enode URLs for P2P v4 discovery bootstrap (light server, full nodes)",
- Value: "",
- }
- BootnodesV5Flag = cli.StringFlag{
- Name: "bootnodesv5",
- Usage: "Comma separated enode URLs for P2P v5 discovery bootstrap (light server, light nodes)",
+ Usage: "Comma separated enode URLs for P2P discovery bootstrap",
Value: "",
}
NodeKeyFileFlag = cli.StringFlag{
@@ -726,12 +675,12 @@ var (
// Gas price oracle settings
GpoBlocksFlag = cli.IntFlag{
- Name: "gpoblocks",
+ Name: "gpo.blocks",
Usage: "Number of recent blocks to check for gas prices",
Value: eth.DefaultConfig.GPO.Blocks,
}
GpoPercentileFlag = cli.IntFlag{
- Name: "gpopercentile",
+ Name: "gpo.percentile",
Usage: "Suggested gas price is the given percentile of a set of recent transaction gas prices",
Value: eth.DefaultConfig.GPO.Percentile,
}
@@ -1020,9 +969,9 @@ func setNodeUserIdent(ctx *cli.Context, cfg *node.Config) {
func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
urls := params.MainnetBootnodes
switch {
- case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(BootnodesV4Flag.Name):
- if ctx.GlobalIsSet(BootnodesV4Flag.Name) {
- urls = splitAndTrim(ctx.GlobalString(BootnodesV4Flag.Name))
+ case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(LegacyBootnodesV4Flag.Name):
+ if ctx.GlobalIsSet(LegacyBootnodesV4Flag.Name) {
+ urls = splitAndTrim(ctx.GlobalString(LegacyBootnodesV4Flag.Name))
} else {
urls = splitAndTrim(ctx.GlobalString(BootnodesFlag.Name))
}
@@ -1052,14 +1001,16 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
// setBootstrapNodesV5 creates a list of bootstrap nodes from the command line
// flags, reverting to pre-configured ones if none have been specified.
func setBootstrapNodesV5(ctx *cli.Context, cfg *p2p.Config) {
- urls := params.DiscoveryV5Bootnodes
+ urls := params.MainnetBootnodes
switch {
- case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(BootnodesV5Flag.Name):
- if ctx.GlobalIsSet(BootnodesV5Flag.Name) {
- urls = splitAndTrim(ctx.GlobalString(BootnodesV5Flag.Name))
+ case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(LegacyBootnodesV5Flag.Name):
+ if ctx.GlobalIsSet(LegacyBootnodesV5Flag.Name) {
+ urls = splitAndTrim(ctx.GlobalString(LegacyBootnodesV5Flag.Name))
} else {
urls = splitAndTrim(ctx.GlobalString(BootnodesFlag.Name))
}
+ case ctx.GlobalBool(RopstenFlag.Name):
+ urls = params.RopstenBootnodes
case ctx.GlobalBool(RinkebyFlag.Name):
urls = params.RinkebyBootnodes
case ctx.GlobalBool(GoerliFlag.Name):
@@ -1113,23 +1064,51 @@ func splitAndTrim(input string) []string {
// setHTTP creates the HTTP RPC listener interface string from the set
// command line flags, returning empty if the HTTP endpoint is disabled.
func setHTTP(ctx *cli.Context, cfg *node.Config) {
- if ctx.GlobalBool(RPCEnabledFlag.Name) && cfg.HTTPHost == "" {
+ if ctx.GlobalBool(LegacyRPCEnabledFlag.Name) && cfg.HTTPHost == "" {
+ log.Warn("The flag --rpc is deprecated and will be removed in the future, please use --http")
cfg.HTTPHost = "127.0.0.1"
- if ctx.GlobalIsSet(RPCListenAddrFlag.Name) {
- cfg.HTTPHost = ctx.GlobalString(RPCListenAddrFlag.Name)
+ if ctx.GlobalIsSet(LegacyRPCListenAddrFlag.Name) {
+ cfg.HTTPHost = ctx.GlobalString(LegacyRPCListenAddrFlag.Name)
+ log.Warn("The flag --rpcaddr is deprecated and will be removed in the future, please use --http.addr")
}
}
- if ctx.GlobalIsSet(RPCPortFlag.Name) {
- cfg.HTTPPort = ctx.GlobalInt(RPCPortFlag.Name)
+ if ctx.GlobalBool(HTTPEnabledFlag.Name) && cfg.HTTPHost == "" {
+ cfg.HTTPHost = "127.0.0.1"
+ if ctx.GlobalIsSet(HTTPListenAddrFlag.Name) {
+ cfg.HTTPHost = ctx.GlobalString(HTTPListenAddrFlag.Name)
+ }
+ }
+
+ if ctx.GlobalIsSet(LegacyRPCPortFlag.Name) {
+ cfg.HTTPPort = ctx.GlobalInt(LegacyRPCPortFlag.Name)
+ log.Warn("The flag --rpcport is deprecated and will be removed in the future, please use --http.port")
+ }
+ if ctx.GlobalIsSet(HTTPPortFlag.Name) {
+ cfg.HTTPPort = ctx.GlobalInt(HTTPPortFlag.Name)
+ }
+
+ if ctx.GlobalIsSet(LegacyRPCCORSDomainFlag.Name) {
+ cfg.HTTPCors = splitAndTrim(ctx.GlobalString(LegacyRPCCORSDomainFlag.Name))
+ log.Warn("The flag --rpccorsdomain is deprecated and will be removed in the future, please use --http.corsdomain")
+ }
+ if ctx.GlobalIsSet(HTTPCORSDomainFlag.Name) {
+ cfg.HTTPCors = splitAndTrim(ctx.GlobalString(HTTPCORSDomainFlag.Name))
+ }
+
+ if ctx.GlobalIsSet(LegacyRPCApiFlag.Name) {
+ cfg.HTTPModules = splitAndTrim(ctx.GlobalString(LegacyRPCApiFlag.Name))
+ log.Warn("The flag --rpcapi is deprecated and will be removed in the future, please use --http.api")
}
- if ctx.GlobalIsSet(RPCCORSDomainFlag.Name) {
- cfg.HTTPCors = splitAndTrim(ctx.GlobalString(RPCCORSDomainFlag.Name))
+ if ctx.GlobalIsSet(HTTPApiFlag.Name) {
+ cfg.HTTPModules = splitAndTrim(ctx.GlobalString(HTTPApiFlag.Name))
}
- if ctx.GlobalIsSet(RPCApiFlag.Name) {
- cfg.HTTPModules = splitAndTrim(ctx.GlobalString(RPCApiFlag.Name))
+
+ if ctx.GlobalIsSet(LegacyRPCVirtualHostsFlag.Name) {
+ cfg.HTTPVirtualHosts = splitAndTrim(ctx.GlobalString(LegacyRPCVirtualHostsFlag.Name))
+ log.Warn("The flag --rpcvhosts is deprecated and will be removed in the future, please use --http.vhosts")
}
- if ctx.GlobalIsSet(RPCVirtualHostsFlag.Name) {
- cfg.HTTPVirtualHosts = splitAndTrim(ctx.GlobalString(RPCVirtualHostsFlag.Name))
+ if ctx.GlobalIsSet(HTTPVirtualHostsFlag.Name) {
+ cfg.HTTPVirtualHosts = splitAndTrim(ctx.GlobalString(HTTPVirtualHostsFlag.Name))
}
}
@@ -1156,16 +1135,34 @@ func setGraphQL(ctx *cli.Context, cfg *node.Config) {
func setWS(ctx *cli.Context, cfg *node.Config) {
if ctx.GlobalBool(WSEnabledFlag.Name) && cfg.WSHost == "" {
cfg.WSHost = "127.0.0.1"
+ if ctx.GlobalIsSet(LegacyWSListenAddrFlag.Name) {
+ cfg.WSHost = ctx.GlobalString(LegacyWSListenAddrFlag.Name)
+ log.Warn("The flag --wsaddr is deprecated and will be removed in the future, please use --ws.addr")
+ }
if ctx.GlobalIsSet(WSListenAddrFlag.Name) {
cfg.WSHost = ctx.GlobalString(WSListenAddrFlag.Name)
}
}
+ if ctx.GlobalIsSet(LegacyWSPortFlag.Name) {
+ cfg.WSPort = ctx.GlobalInt(LegacyWSPortFlag.Name)
+ log.Warn("The flag --wsport is deprecated and will be removed in the future, please use --ws.port")
+ }
if ctx.GlobalIsSet(WSPortFlag.Name) {
cfg.WSPort = ctx.GlobalInt(WSPortFlag.Name)
}
+
+ if ctx.GlobalIsSet(LegacyWSAllowedOriginsFlag.Name) {
+ cfg.WSOrigins = splitAndTrim(ctx.GlobalString(LegacyWSAllowedOriginsFlag.Name))
+ log.Warn("The flag --wsorigins is deprecated and will be removed in the future, please use --ws.origins")
+ }
if ctx.GlobalIsSet(WSAllowedOriginsFlag.Name) {
cfg.WSOrigins = splitAndTrim(ctx.GlobalString(WSAllowedOriginsFlag.Name))
}
+
+ if ctx.GlobalIsSet(LegacyWSApiFlag.Name) {
+ cfg.WSModules = splitAndTrim(ctx.GlobalString(LegacyWSApiFlag.Name))
+ log.Warn("The flag --wsapi is deprecated and will be removed in the future, please use --ws.api")
+ }
if ctx.GlobalIsSet(WSApiFlag.Name) {
cfg.WSModules = splitAndTrim(ctx.GlobalString(WSApiFlag.Name))
}
@@ -1185,8 +1182,9 @@ func setIPC(ctx *cli.Context, cfg *node.Config) {
// setLes configures the les server and ultra light client settings from the command line flags.
func setLes(ctx *cli.Context, cfg *eth.Config) {
- if ctx.GlobalIsSet(LightLegacyServFlag.Name) {
- cfg.LightServ = ctx.GlobalInt(LightLegacyServFlag.Name)
+ if ctx.GlobalIsSet(LegacyLightServFlag.Name) {
+ cfg.LightServ = ctx.GlobalInt(LegacyLightServFlag.Name)
+ log.Warn("The flag --lightserv is deprecated and will be removed in the future, please use --light.serve")
}
if ctx.GlobalIsSet(LightServeFlag.Name) {
cfg.LightServ = ctx.GlobalInt(LightServeFlag.Name)
@@ -1197,8 +1195,9 @@ func setLes(ctx *cli.Context, cfg *eth.Config) {
if ctx.GlobalIsSet(LightEgressFlag.Name) {
cfg.LightEgress = ctx.GlobalInt(LightEgressFlag.Name)
}
- if ctx.GlobalIsSet(LightLegacyPeersFlag.Name) {
- cfg.LightPeers = ctx.GlobalInt(LightLegacyPeersFlag.Name)
+ if ctx.GlobalIsSet(LegacyLightPeersFlag.Name) {
+ cfg.LightPeers = ctx.GlobalInt(LegacyLightPeersFlag.Name)
+ log.Warn("The flag --lightpeers is deprecated and will be removed in the future, please use --light.maxpeers")
}
if ctx.GlobalIsSet(LightMaxPeersFlag.Name) {
cfg.LightPeers = ctx.GlobalInt(LightMaxPeersFlag.Name)
@@ -1262,8 +1261,10 @@ func MakeAddress(ks *keystore.KeyStore, account string) (accounts.Account, error
func setEtherbase(ctx *cli.Context, ks *keystore.KeyStore, cfg *eth.Config) {
// Extract the current etherbase, new flag overriding legacy one
var etherbase string
- if ctx.GlobalIsSet(MinerLegacyEtherbaseFlag.Name) {
- etherbase = ctx.GlobalString(MinerLegacyEtherbaseFlag.Name)
+ if ctx.GlobalIsSet(LegacyMinerEtherbaseFlag.Name) {
+ etherbase = ctx.GlobalString(LegacyMinerEtherbaseFlag.Name)
+ log.Warn("The flag --etherbase is deprecated and will be removed in the future, please use --miner.etherbase")
+
}
if ctx.GlobalIsSet(MinerEtherbaseFlag.Name) {
etherbase = ctx.GlobalString(MinerEtherbaseFlag.Name)
@@ -1308,27 +1309,27 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
setBootstrapNodesV5(ctx, cfg)
lightClient := ctx.GlobalString(SyncModeFlag.Name) == "light"
- lightServer := (ctx.GlobalInt(LightLegacyServFlag.Name) != 0 || ctx.GlobalInt(LightServeFlag.Name) != 0)
+ lightServer := (ctx.GlobalInt(LegacyLightServFlag.Name) != 0 || ctx.GlobalInt(LightServeFlag.Name) != 0)
- lightPeers := ctx.GlobalInt(LightLegacyPeersFlag.Name)
+ lightPeers := ctx.GlobalInt(LegacyLightPeersFlag.Name)
if ctx.GlobalIsSet(LightMaxPeersFlag.Name) {
lightPeers = ctx.GlobalInt(LightMaxPeersFlag.Name)
}
- if lightClient && !ctx.GlobalIsSet(LightLegacyPeersFlag.Name) && !ctx.GlobalIsSet(LightMaxPeersFlag.Name) {
+ if lightClient && !ctx.GlobalIsSet(LegacyLightPeersFlag.Name) && !ctx.GlobalIsSet(LightMaxPeersFlag.Name) {
// dynamic default - for clients we use 1/10th of the default for servers
lightPeers /= 10
}
if ctx.GlobalIsSet(MaxPeersFlag.Name) {
cfg.MaxPeers = ctx.GlobalInt(MaxPeersFlag.Name)
- if lightServer && !ctx.GlobalIsSet(LightLegacyPeersFlag.Name) && !ctx.GlobalIsSet(LightMaxPeersFlag.Name) {
+ if lightServer && !ctx.GlobalIsSet(LegacyLightPeersFlag.Name) && !ctx.GlobalIsSet(LightMaxPeersFlag.Name) {
cfg.MaxPeers += lightPeers
}
} else {
if lightServer {
cfg.MaxPeers += lightPeers
}
- if lightClient && (ctx.GlobalIsSet(LightLegacyPeersFlag.Name) || ctx.GlobalIsSet(LightMaxPeersFlag.Name)) && cfg.MaxPeers < lightPeers {
+ if lightClient && (ctx.GlobalIsSet(LegacyLightPeersFlag.Name) || ctx.GlobalIsSet(LightMaxPeersFlag.Name)) && cfg.MaxPeers < lightPeers {
cfg.MaxPeers = lightPeers
}
}
@@ -1499,9 +1500,18 @@ func urlReader(u *url.URL) (io.ReadCloser, error) {
}
func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
+ if ctx.GlobalIsSet(LegacyGpoBlocksFlag.Name) {
+ cfg.Blocks = ctx.GlobalInt(LegacyGpoBlocksFlag.Name)
+ log.Warn("The flag --gpoblocks is deprecated and will be removed in the future, please use --gpo.blocks")
+ }
if ctx.GlobalIsSet(GpoBlocksFlag.Name) {
cfg.Blocks = ctx.GlobalInt(GpoBlocksFlag.Name)
}
+
+ if ctx.GlobalIsSet(LegacyGpoPercentileFlag.Name) {
+ cfg.Percentile = ctx.GlobalInt(LegacyGpoPercentileFlag.Name)
+ log.Warn("The flag --gpopercentile is deprecated and will be removed in the future, please use --gpo.percentile")
+ }
if ctx.GlobalIsSet(GpoPercentileFlag.Name) {
cfg.Percentile = ctx.GlobalInt(GpoPercentileFlag.Name)
}
@@ -1581,14 +1591,16 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) {
if ctx.GlobalIsSet(MinerNotifyFlag.Name) {
cfg.Notify = strings.Split(ctx.GlobalString(MinerNotifyFlag.Name), ",")
}
- if ctx.GlobalIsSet(MinerLegacyExtraDataFlag.Name) {
- cfg.ExtraData = []byte(ctx.GlobalString(MinerLegacyExtraDataFlag.Name))
+ if ctx.GlobalIsSet(LegacyMinerExtraDataFlag.Name) {
+ cfg.ExtraData = []byte(ctx.GlobalString(LegacyMinerExtraDataFlag.Name))
+ log.Warn("The flag --extradata is deprecated and will be removed in the future, please use --miner.extradata")
}
if ctx.GlobalIsSet(MinerExtraDataFlag.Name) {
cfg.ExtraData = []byte(ctx.GlobalString(MinerExtraDataFlag.Name))
}
- if ctx.GlobalIsSet(MinerLegacyGasTargetFlag.Name) {
- cfg.GasFloor = ctx.GlobalUint64(MinerLegacyGasTargetFlag.Name)
+ if ctx.GlobalIsSet(LegacyMinerGasTargetFlag.Name) {
+ cfg.GasFloor = ctx.GlobalUint64(LegacyMinerGasTargetFlag.Name)
+ log.Warn("The flag --targetgaslimit is deprecated and will be removed in the future, please use --miner.gastarget")
}
if ctx.GlobalIsSet(MinerGasTargetFlag.Name) {
cfg.GasFloor = ctx.GlobalUint64(MinerGasTargetFlag.Name)
@@ -1596,8 +1608,9 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) {
if ctx.GlobalIsSet(MinerGasLimitFlag.Name) {
cfg.GasCeil = ctx.GlobalUint64(MinerGasLimitFlag.Name)
}
- if ctx.GlobalIsSet(MinerLegacyGasPriceFlag.Name) {
- cfg.GasPrice = GlobalBig(ctx, MinerLegacyGasPriceFlag.Name)
+ if ctx.GlobalIsSet(LegacyMinerGasPriceFlag.Name) {
+ cfg.GasPrice = GlobalBig(ctx, LegacyMinerGasPriceFlag.Name)
+ log.Warn("The flag --gasprice is deprecated and will be removed in the future, please use --miner.gasprice")
}
if ctx.GlobalIsSet(MinerGasPriceFlag.Name) {
cfg.GasPrice = GlobalBig(ctx, MinerGasPriceFlag.Name)
@@ -1715,9 +1728,13 @@ func SetShhConfig(ctx *cli.Context, stack *node.Node, cfg *whisper.Config) {
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
// Avoid conflicting network flags
CheckExclusive(ctx, DeveloperFlag, LegacyTestnetFlag, RopstenFlag, RinkebyFlag, GoerliFlag)
- CheckExclusive(ctx, LightLegacyServFlag, LightServeFlag, SyncModeFlag, "light")
+ CheckExclusive(ctx, LegacyLightServFlag, LightServeFlag, SyncModeFlag, "light")
CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer
-
+ CheckExclusive(ctx, GCModeFlag, "archive", TxLookupLimitFlag)
+ // todo(rjl493456442) make it available for les server
+ // Ancient tx indices pruning is not available for les server now
+ // since light client relies on the server for transaction status query.
+ CheckExclusive(ctx, LegacyLightServFlag, LightServeFlag, TxLookupLimitFlag)
var ks *keystore.KeyStore
if keystores := stack.AccountManager().Backends(keystore.KeyStoreType); len(keystores) > 0 {
ks = keystores[0].(*keystore.KeyStore)
@@ -1756,6 +1773,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
if ctx.GlobalIsSet(CacheNoPrefetchFlag.Name) {
cfg.NoPrefetch = ctx.GlobalBool(CacheNoPrefetchFlag.Name)
}
+ if ctx.GlobalIsSet(TxLookupLimitFlag.Name) {
+ cfg.TxLookupLimit = ctx.GlobalUint64(TxLookupLimitFlag.Name)
+ }
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) {
cfg.TrieCleanCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100
}
@@ -1841,7 +1861,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
log.Info("Using developer account", "address", developer.Address)
cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), developer.Address)
- if !ctx.GlobalIsSet(MinerGasPriceFlag.Name) && !ctx.GlobalIsSet(MinerLegacyGasPriceFlag.Name) {
+ if !ctx.GlobalIsSet(MinerGasPriceFlag.Name) && !ctx.GlobalIsSet(LegacyMinerGasPriceFlag.Name) {
cfg.Miner.GasPrice = big.NewInt(1)
}
default:
@@ -2101,7 +2121,7 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
}
// MakeChain creates a chain manager from set command line flags.
-func MakeChain(ctx *cli.Context, stack *node.Node, useExist bool) (chain *core.BlockChain, chainDb ethdb.Database) {
+func MakeChain(ctx *cli.Context, stack *node.Node, readOnly bool, useExist bool) (chain *core.BlockChain, chainDb ethdb.Database) {
var (
config *params.ChainConfig
err error
@@ -2172,7 +2192,12 @@ func MakeChain(ctx *cli.Context, stack *node.Node, useExist bool) (chain *core.B
cache.TrieDirtyLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
}
vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}
- chain, err = core.NewBlockChain(chainDb, cache, config, engine, vmcfg, nil)
+ var limit *uint64
+ if ctx.GlobalIsSet(TxLookupLimitFlag.Name) && !readOnly {
+ l := ctx.GlobalUint64(TxLookupLimitFlag.Name)
+ limit = &l
+ }
+ chain, err = core.NewBlockChain(chainDb, cache, config, engine, vmcfg, nil, limit)
if err != nil {
Fatalf("Can't create BlockChain: %v", err)
}
diff --git a/cmd/utils/flags_legacy.go b/cmd/utils/flags_legacy.go
new file mode 100644
index 0000000000..c39664d249
--- /dev/null
+++ b/cmd/utils/flags_legacy.go
@@ -0,0 +1,173 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see .
+
+package utils
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/ethereum/go-ethereum/eth"
+ "github.com/ethereum/go-ethereum/node"
+ "gopkg.in/urfave/cli.v1"
+)
+
+var ShowDeprecated = cli.Command{
+ Action: showDeprecated,
+ Name: "show-deprecated-flags",
+ Usage: "Show flags that have been deprecated",
+ ArgsUsage: " ",
+ Category: "MISCELLANEOUS COMMANDS",
+ Description: "Show flags that have been deprecated and will soon be removed",
+}
+
+var DeprecatedFlags = []cli.Flag{
+ LegacyTestnetFlag,
+ LegacyLightServFlag,
+ LegacyLightPeersFlag,
+ LegacyMinerThreadsFlag,
+ LegacyMinerGasTargetFlag,
+ LegacyMinerGasPriceFlag,
+ LegacyMinerEtherbaseFlag,
+ LegacyMinerExtraDataFlag,
+}
+
+var (
+ // (Deprecated April 2018)
+ LegacyMinerThreadsFlag = cli.IntFlag{
+ Name: "minerthreads",
+ Usage: "Number of CPU threads to use for mining (deprecated, use --miner.threads)",
+ Value: 0,
+ }
+ LegacyMinerGasTargetFlag = cli.Uint64Flag{
+ Name: "targetgaslimit",
+ Usage: "Target gas floor for mined blocks (deprecated, use --miner.gastarget)",
+ Value: eth.DefaultConfig.Miner.GasFloor,
+ }
+ LegacyMinerGasPriceFlag = BigFlag{
+ Name: "gasprice",
+ Usage: "Minimum gas price for mining a transaction (deprecated, use --miner.gasprice)",
+ Value: eth.DefaultConfig.Miner.GasPrice,
+ }
+ LegacyMinerEtherbaseFlag = cli.StringFlag{
+ Name: "etherbase",
+ Usage: "Public address for block mining rewards (default = first account, deprecated, use --miner.etherbase)",
+ Value: "0",
+ }
+ LegacyMinerExtraDataFlag = cli.StringFlag{
+ Name: "extradata",
+ Usage: "Block extra data set by the miner (default = client version, deprecated, use --miner.extradata)",
+ }
+
+ // (Deprecated June 2019)
+ LegacyLightServFlag = cli.IntFlag{
+ Name: "lightserv",
+ Usage: "Maximum percentage of time allowed for serving LES requests (deprecated, use --light.serve)",
+ Value: eth.DefaultConfig.LightServ,
+ }
+ LegacyLightPeersFlag = cli.IntFlag{
+ Name: "lightpeers",
+ Usage: "Maximum number of light clients to serve, or light servers to attach to (deprecated, use --light.maxpeers)",
+ Value: eth.DefaultConfig.LightPeers,
+ }
+
+ // (Deprecated April 2020)
+ LegacyTestnetFlag = cli.BoolFlag{ // TODO(q9f): Remove after Ropsten is discontinued.
+ Name: "testnet",
+ Usage: "Pre-configured test network (Deprecated: Please choose one of --goerli, --rinkeby, or --ropsten.)",
+ }
+ LegacyRPCEnabledFlag = cli.BoolFlag{
+ Name: "rpc",
+ Usage: "Enable the HTTP-RPC server (deprecated, use --http)",
+ }
+ LegacyRPCListenAddrFlag = cli.StringFlag{
+ Name: "rpcaddr",
+ Usage: "HTTP-RPC server listening interface (deprecated, use --http.addr)",
+ Value: node.DefaultHTTPHost,
+ }
+ LegacyRPCPortFlag = cli.IntFlag{
+ Name: "rpcport",
+ Usage: "HTTP-RPC server listening port (deprecated, use --http.port)",
+ Value: node.DefaultHTTPPort,
+ }
+ LegacyRPCCORSDomainFlag = cli.StringFlag{
+ Name: "rpccorsdomain",
+ Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced) (deprecated, use --http.corsdomain)",
+ Value: "",
+ }
+ LegacyRPCVirtualHostsFlag = cli.StringFlag{
+ Name: "rpcvhosts",
+ Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. (deprecated, use --http.vhosts)",
+ Value: strings.Join(node.DefaultConfig.HTTPVirtualHosts, ","),
+ }
+ LegacyRPCApiFlag = cli.StringFlag{
+ Name: "rpcapi",
+ Usage: "API's offered over the HTTP-RPC interface (deprecated, use --http.api)",
+ Value: "",
+ }
+ LegacyWSListenAddrFlag = cli.StringFlag{
+ Name: "wsaddr",
+ Usage: "WS-RPC server listening interface (deprecated, use --ws.addr)",
+ Value: node.DefaultWSHost,
+ }
+ LegacyWSPortFlag = cli.IntFlag{
+ Name: "wsport",
+ Usage: "WS-RPC server listening port (deprecated, use --ws.port)",
+ Value: node.DefaultWSPort,
+ }
+ LegacyWSApiFlag = cli.StringFlag{
+ Name: "wsapi",
+ Usage: "API's offered over the WS-RPC interface (deprecated, use --ws.api)",
+ Value: "",
+ }
+ LegacyWSAllowedOriginsFlag = cli.StringFlag{
+ Name: "wsorigins",
+ Usage: "Origins from which to accept websockets requests (deprecated, use --ws.origins)",
+ Value: "",
+ }
+ LegacyGpoBlocksFlag = cli.IntFlag{
+ Name: "gpoblocks",
+ Usage: "Number of recent blocks to check for gas prices (deprecated, use --gpo.blocks)",
+ Value: eth.DefaultConfig.GPO.Blocks,
+ }
+ LegacyGpoPercentileFlag = cli.IntFlag{
+ Name: "gpopercentile",
+ Usage: "Suggested gas price is the given percentile of a set of recent transaction gas prices (deprecated, use --gpo.percentile)",
+ Value: eth.DefaultConfig.GPO.Percentile,
+ }
+ LegacyBootnodesV4Flag = cli.StringFlag{
+ Name: "bootnodesv4",
+ Usage: "Comma separated enode URLs for P2P v4 discovery bootstrap (light server, full nodes) (deprecated, use --bootnodes)",
+ Value: "",
+ }
+ LegacyBootnodesV5Flag = cli.StringFlag{
+ Name: "bootnodesv5",
+ Usage: "Comma separated enode URLs for P2P v5 discovery bootstrap (light server, light nodes) (deprecated, use --bootnodes)",
+ Value: "",
+ }
+)
+
+// showDeprecated displays deprecated flags that will be soon removed from the codebase.
+func showDeprecated(*cli.Context) {
+ fmt.Println("--------------------------------------------------------------------")
+ fmt.Println("The following flags are deprecated and will be removed in the future!")
+ fmt.Println("--------------------------------------------------------------------")
+ fmt.Println()
+
+ for _, flag := range DeprecatedFlags {
+ fmt.Println(flag.String())
+ }
+}
diff --git a/common/math/big.go b/common/math/big.go
index d31c59af10..17a57df9dc 100644
--- a/common/math/big.go
+++ b/common/math/big.go
@@ -184,6 +184,12 @@ func U256(x *big.Int) *big.Int {
return x.And(x, tt256m1)
}
+// U256Bytes converts a big Int into a 256bit EVM number.
+// This operation is destructive.
+func U256Bytes(n *big.Int) []byte {
+ return PaddedBigBytes(U256(n), 32)
+}
+
// S256 interprets x as a two's complement number.
// x must not exceed 256 bits (the result is undefined if it does) and is not modified.
//
diff --git a/common/math/big_test.go b/common/math/big_test.go
index be9810dc8c..f896ec65be 100644
--- a/common/math/big_test.go
+++ b/common/math/big_test.go
@@ -212,6 +212,16 @@ func TestU256(t *testing.T) {
}
}
+func TestU256Bytes(t *testing.T) {
+ ubytes := make([]byte, 32)
+ ubytes[31] = 1
+
+ unsigned := U256Bytes(big.NewInt(1))
+ if !bytes.Equal(unsigned, ubytes) {
+ t.Errorf("expected %x got %x", ubytes, unsigned)
+ }
+}
+
func TestBigEndianByteAt(t *testing.T) {
tests := []struct {
x string
diff --git a/consensus/clique/clique_test.go b/consensus/clique/clique_test.go
index 710f448055..49313374fc 100644
--- a/consensus/clique/clique_test.go
+++ b/consensus/clique/clique_test.go
@@ -54,7 +54,7 @@ func TestReimportMirroredState(t *testing.T) {
genesis := genspec.MustCommit(db)
// Generate a batch of blocks, each properly signed
- chain, _ := core.NewBlockChain(db, nil, params.AllCliqueProtocolChanges, engine, vm.Config{}, nil)
+ chain, _ := core.NewBlockChain(db, nil, params.AllCliqueProtocolChanges, engine, vm.Config{}, nil, nil)
defer chain.Stop()
blocks, _ := core.GenerateChain(params.AllCliqueProtocolChanges, genesis, engine, db, 3, func(i int, block *core.BlockGen) {
@@ -88,7 +88,7 @@ func TestReimportMirroredState(t *testing.T) {
db = rawdb.NewMemoryDatabase()
genspec.MustCommit(db)
- chain, _ = core.NewBlockChain(db, nil, params.AllCliqueProtocolChanges, engine, vm.Config{}, nil)
+ chain, _ = core.NewBlockChain(db, nil, params.AllCliqueProtocolChanges, engine, vm.Config{}, nil, nil)
defer chain.Stop()
if _, err := chain.InsertChain(blocks[:2]); err != nil {
@@ -101,7 +101,7 @@ func TestReimportMirroredState(t *testing.T) {
// Simulate a crash by creating a new chain on top of the database, without
// flushing the dirty states out. Insert the last block, trigerring a sidechain
// reimport.
- chain, _ = core.NewBlockChain(db, nil, params.AllCliqueProtocolChanges, engine, vm.Config{}, nil)
+ chain, _ = core.NewBlockChain(db, nil, params.AllCliqueProtocolChanges, engine, vm.Config{}, nil, nil)
defer chain.Stop()
if _, err := chain.InsertChain(blocks[2:]); err != nil {
diff --git a/consensus/clique/snapshot_test.go b/consensus/clique/snapshot_test.go
index fc08722efd..3890fc51dd 100644
--- a/consensus/clique/snapshot_test.go
+++ b/consensus/clique/snapshot_test.go
@@ -448,7 +448,7 @@ func TestClique(t *testing.T) {
batches[len(batches)-1] = append(batches[len(batches)-1], block)
}
// Pass all the headers through clique and ensure tallying succeeds
- chain, err := core.NewBlockChain(db, nil, &config, engine, vm.Config{}, nil)
+ chain, err := core.NewBlockChain(db, nil, &config, engine, vm.Config{}, nil, nil)
if err != nil {
t.Errorf("test %d: failed to create test chain: %v", i, err)
continue
diff --git a/consensus/istanbul/backend/engine_test.go b/consensus/istanbul/backend/engine_test.go
index 8d3bd6966c..410cdc6d9d 100644
--- a/consensus/istanbul/backend/engine_test.go
+++ b/consensus/istanbul/backend/engine_test.go
@@ -47,7 +47,7 @@ func newBlockChain(n int) (*core.BlockChain, *backend) {
// Use the first key as private key
b, _ := New(config, nodeKeys[0], memDB).(*backend)
genesis.MustCommit(memDB)
- blockchain, err := core.NewBlockChain(memDB, nil, genesis.Config, b, vm.Config{}, nil)
+ blockchain, err := core.NewBlockChain(memDB, nil, genesis.Config, b, vm.Config{}, nil, nil)
if err != nil {
panic(err)
}
diff --git a/consensus/istanbul/backend/snapshot_test.go b/consensus/istanbul/backend/snapshot_test.go
index 8e3b247b4f..fe1fade8f4 100644
--- a/consensus/istanbul/backend/snapshot_test.go
+++ b/consensus/istanbul/backend/snapshot_test.go
@@ -347,7 +347,7 @@ func TestVoting(t *testing.T) {
config.Epoch = tt.epoch
}
engine := New(config, accounts.accounts[tt.validators[0]], db).(*backend)
- chain, _ := core.NewBlockChain(db, nil, genesis.Config, engine, vm.Config{}, nil)
+ chain, _ := core.NewBlockChain(db, nil, genesis.Config, engine, vm.Config{}, nil, nil)
// Assemble a chain of headers from the cast votes
headers := make([]*types.Header, len(tt.votes))
diff --git a/console/bridge.go b/console/bridge.go
index 2625c481d5..93655c24d3 100644
--- a/console/bridge.go
+++ b/console/bridge.go
@@ -229,6 +229,9 @@ func (b *bridge) readPinAndReopenWallet(call jsre.Call) (goja.Value, error) {
// original RPC method (saved in jeth.unlockAccount) with it to actually execute
// the RPC call.
func (b *bridge) UnlockAccount(call jsre.Call) (goja.Value, error) {
+ if nArgs := len(call.Arguments); nArgs < 2 {
+ return nil, fmt.Errorf("usage: unlockAccount(account, [ password, duration ])")
+ }
// Make sure we have an account specified to unlock.
if call.Argument(0).ExportType().Kind() != reflect.String {
return nil, fmt.Errorf("first argument must be the account to unlock")
@@ -272,6 +275,9 @@ func (b *bridge) UnlockAccount(call jsre.Call) (goja.Value, error) {
// prompt to acquire the passphrase and executes the original RPC method (saved in
// jeth.sign) with it to actually execute the RPC call.
func (b *bridge) Sign(call jsre.Call) (goja.Value, error) {
+ if nArgs := len(call.Arguments); nArgs < 2 {
+ return nil, fmt.Errorf("usage: sign(message, account, [ password ])")
+ }
var (
message = call.Argument(0)
account = call.Argument(1)
@@ -307,6 +313,9 @@ func (b *bridge) Sign(call jsre.Call) (goja.Value, error) {
// Sleep will block the console for the specified number of seconds.
func (b *bridge) Sleep(call jsre.Call) (goja.Value, error) {
+ if nArgs := len(call.Arguments); nArgs < 1 {
+ return nil, fmt.Errorf("usage: sleep()")
+ }
if !isNumber(call.Argument(0)) {
return nil, fmt.Errorf("usage: sleep()")
}
@@ -334,7 +343,7 @@ func (b *bridge) SleepBlocks(call jsre.Call) (goja.Value, error) {
blocks = call.Argument(0).ToInteger()
}
if nArgs >= 2 {
- if isNumber(call.Argument(1)) {
+ if !isNumber(call.Argument(1)) {
return nil, fmt.Errorf("expected number as second argument")
}
sleep = call.Argument(1).ToInteger()
diff --git a/contracts/checkpointoracle/contract/oracle.go b/contracts/checkpointoracle/contract/oracle.go
index 3bb351792f..998ccb93c2 100644
--- a/contracts/checkpointoracle/contract/oracle.go
+++ b/contracts/checkpointoracle/contract/oracle.go
@@ -20,7 +20,6 @@ var (
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
- _ = abi.U256
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
diff --git a/core/bench_test.go b/core/bench_test.go
index 1181b0549b..e69ad656fc 100644
--- a/core/bench_test.go
+++ b/core/bench_test.go
@@ -176,7 +176,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
// Time the insertion of the new chain.
// State and blocks are stored in the same DB.
- chainman, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
+ chainman, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
defer chainman.Stop()
b.ReportAllocs()
b.ResetTimer()
@@ -288,7 +288,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
if err != nil {
b.Fatalf("error opening database at %v: %v", dir, err)
}
- chain, err := NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil)
+ chain, err := NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
if err != nil {
b.Fatalf("error creating chain: %v", err)
}
diff --git a/core/block_validator_test.go b/core/block_validator_test.go
index 06e2ba1a4f..dfb37b88cf 100644
--- a/core/block_validator_test.go
+++ b/core/block_validator_test.go
@@ -42,7 +42,7 @@ func TestHeaderVerification(t *testing.T) {
headers[i] = block.Header()
}
// Run the header checker for blocks one-by-one, checking for both valid and invalid nonces
- chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil)
+ chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
defer chain.Stop()
for i := 0; i < len(blocks); i++ {
@@ -106,11 +106,11 @@ func testHeaderConcurrentVerification(t *testing.T, threads int) {
var results <-chan error
if valid {
- chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil)
+ chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
_, results = chain.engine.VerifyHeaders(chain, headers, seals)
chain.Stop()
} else {
- chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFakeFailer(uint64(len(headers)-1)), vm.Config{}, nil)
+ chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFakeFailer(uint64(len(headers)-1)), vm.Config{}, nil, nil)
_, results = chain.engine.VerifyHeaders(chain, headers, seals)
chain.Stop()
}
@@ -173,7 +173,7 @@ func testHeaderConcurrentAbortion(t *testing.T, threads int) {
defer runtime.GOMAXPROCS(old)
// Start the verifications and immediately abort
- chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFakeDelayer(time.Millisecond), vm.Config{}, nil)
+ chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFakeDelayer(time.Millisecond), vm.Config{}, nil, nil)
defer chain.Stop()
abort, results := chain.engine.VerifyHeaders(chain, headers, seals)
diff --git a/core/blockchain.go b/core/blockchain.go
index aac22f0825..0916f5b00b 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -152,6 +152,13 @@ type BlockChain struct {
triegc *prque.Prque // Priority queue mapping block numbers to tries to gc
gcproc time.Duration // Accumulates canonical block processing for trie dumping
+ // txLookupLimit is the maximum number of blocks from head whose tx indices
+ // are reserved:
+ // * 0: means no limit and regenerate any missing indexes
+ // * N: means N block limit [HEAD-N+1, HEAD] and delete extra indexes
+ // * nil: disable tx reindexer/deleter, but still index new blocks
+ txLookupLimit uint64
+
hc *HeaderChain
rmLogsFeed event.Feed
chainFeed event.Feed
@@ -211,7 +218,7 @@ func (bc *BlockChain) CheckAndSetPrivateState(txLogs []*types.Log, privateState
// NewBlockChain returns a fully initialised block chain using information
// available in the database. It initialises the default Ethereum Validator and
// Processor.
-func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) {
+func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool, txLookupLimit *uint64) (*BlockChain, error) {
if cacheConfig == nil {
cacheConfig = &CacheConfig{
TrieCleanLimit: 256,
@@ -267,8 +274,16 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
bc.currentFastBlock.Store(nilBlock)
// Initialize the chain with ancient data if it isn't empty.
+ var txIndexBlock uint64
+
if bc.empty() {
rawdb.InitDatabaseFromFreezer(bc.db)
+ // If ancient database is not empty, reconstruct all missing
+ // indices in the background.
+ frozen, _ := bc.db.Ancients()
+ if frozen > 0 {
+ txIndexBlock = frozen
+ }
}
if err := bc.loadLastState(); err != nil {
@@ -331,11 +346,15 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
}
// Take ownership of this particular state
go bc.update()
+ if txLookupLimit != nil {
+ bc.txLookupLimit = *txLookupLimit
+ go bc.maintainTxIndex(txIndexBlock)
+ }
return bc, nil
}
-func NewMultitenantBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) {
- bc, err := NewBlockChain(db, cacheConfig, chainConfig, engine, vmConfig, shouldPreserve)
+func NewMultitenantBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool, txLookupLimit *uint64) (*BlockChain, error) {
+ bc, err := NewBlockChain(db, cacheConfig, chainConfig, engine, vmConfig, shouldPreserve, txLookupLimit)
if err != nil {
return nil, err
}
@@ -1214,8 +1233,23 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
}
// Flush data into ancient database.
size += rawdb.WriteAncientBlock(bc.db, block, receiptChain[i], bc.GetTd(block.Hash(), block.NumberU64()))
- rawdb.WriteTxLookupEntries(batch, block)
+ // Write tx indices if any condition is satisfied:
+ // * If user requires to reserve all tx indices(txlookuplimit=0)
+ // * If all ancient tx indices are required to be reserved(txlookuplimit is even higher than ancientlimit)
+ // * If block number is large enough to be regarded as a recent block
+ // It means blocks below the ancientLimit-txlookupLimit won't be indexed.
+ //
+ // But if the `TxIndexTail` is not nil, e.g. Geth is initialized with
+ // an external ancient database, during the setup, blockchain will start
+ // a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients)
+ // range. In this case, all tx indices of newly imported blocks should be
+ // generated.
+ if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
+ rawdb.WriteTxLookupEntries(batch, block)
+ } else if rawdb.ReadTxIndexTail(bc.db) != nil {
+ rawdb.WriteTxLookupEntries(batch, block)
+ }
stats.processed++
}
// Flush all tx-lookup index data.
@@ -1289,7 +1323,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// Write all the data out into the database
rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])
- rawdb.WriteTxLookupEntries(batch, block)
+ rawdb.WriteTxLookupEntries(batch, block) // Always write tx indices for live blocks, we assume they are needed
// Write everything belongs to the blocks into the database. So that
// we can ensure all components of body is completed(body, receipts,
@@ -1315,7 +1349,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
updateHead(blockChain[len(blockChain)-1])
return 0, nil
}
- // Write downloaded chain data and corresponding receipt chain data.
+ // Write downloaded chain data and corresponding receipt chain data
if len(ancientBlocks) > 0 {
if n, err := writeAncient(ancientBlocks, ancientReceipts); err != nil {
if err == errInsertionInterrupted {
@@ -1324,6 +1358,19 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
return n, err
}
}
+ // Write the tx index tail (block number from where we index) before write any live blocks
+ if len(liveBlocks) > 0 && liveBlocks[0].NumberU64() == ancientLimit+1 {
+ // The tx index tail can only be one of the following two options:
+ // * 0: all ancient blocks have been indexed
+ // * ancient-limit: the indices of blocks before ancient-limit are ignored
+ if tail := rawdb.ReadTxIndexTail(bc.db); tail == nil {
+ if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit {
+ rawdb.WriteTxIndexTail(bc.db, 0)
+ } else {
+ rawdb.WriteTxIndexTail(bc.db, ancientLimit-bc.txLookupLimit)
+ }
+ }
+ }
if len(liveBlocks) > 0 {
if n, err := writeLive(liveBlocks, liveReceipts); err != nil {
if err == errInsertionInterrupted {
@@ -1347,6 +1394,18 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
return 0, nil
}
+// SetTxLookupLimit is responsible for updating the txlookup limit to the
+// original one stored in db if the new mismatches with the old one.
+func (bc *BlockChain) SetTxLookupLimit(limit uint64) {
+ bc.txLookupLimit = limit
+}
+
+// TxLookupLimit retrieves the txlookup limit used by blockchain to prune
+// stale transaction indices.
+func (bc *BlockChain) TxLookupLimit() uint64 {
+ return bc.txLookupLimit
+}
+
var lastWrite uint64
// writeBlockWithoutState writes only the block and its metadata to the database,
@@ -1801,6 +1860,20 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
"root", block.Root())
+ // Special case. Commit the empty receipt slice if we meet the known
+ // block in the middle. It can only happen in the clique chain. Whenever
+ // we insert blocks via `insertSideChain`, we only commit `td`, `header`
+ // and `body` if it's non-existent. Since we don't have receipts without
+ // reexecution, so nothing to commit. But if the sidechain will be adpoted
+ // as the canonical chain eventually, it needs to be reexecuted for missing
+ // state, but if it's this special case here(skip reexecution) we will lose
+ // the empty receipt entry.
+ if len(block.Transactions()) == 0 {
+ rawdb.WriteReceipts(bc.db, block.Hash(), block.NumberU64(), nil)
+ } else {
+ log.Error("Please file an issue, skip known block execution without receipt",
+ "hash", block.Hash(), "number", block.NumberU64())
+ }
if err := bc.writeKnownBlock(block); err != nil {
return it.index, err
}
@@ -2260,6 +2333,86 @@ func (bc *BlockChain) update() {
}
}
+// maintainTxIndex is responsible for the construction and deletion of the
+// transaction index.
+//
+// User can use flag `txlookuplimit` to specify a "recentness" block, below
+// which ancient tx indices get deleted. If `txlookuplimit` is 0, it means
+// all tx indices will be reserved.
+//
+// The user can adjust the txlookuplimit value for each launch after fast
+// sync, Geth will automatically construct the missing indices and delete
+// the extra indices.
+func (bc *BlockChain) maintainTxIndex(ancients uint64) {
+ // Before starting the actual maintenance, we need to handle a special case,
+ // where user might init Geth with an external ancient database. If so, we
+ // need to reindex all necessary transactions before starting to process any
+ // pruning requests.
+ if ancients > 0 {
+ var from = uint64(0)
+ if bc.txLookupLimit != 0 && ancients > bc.txLookupLimit {
+ from = ancients - bc.txLookupLimit
+ }
+ rawdb.IndexTransactions(bc.db, from, ancients)
+ }
+ // indexBlocks reindexes or unindexes transactions depending on user configuration
+ indexBlocks := func(tail *uint64, head uint64, done chan struct{}) {
+ defer func() { done <- struct{}{} }()
+
+ // If the user just upgraded Geth to a new version which supports transaction
+ // index pruning, write the new tail and remove anything older.
+ if tail == nil {
+ if bc.txLookupLimit == 0 || head < bc.txLookupLimit {
+ // Nothing to delete, write the tail and return
+ rawdb.WriteTxIndexTail(bc.db, 0)
+ } else {
+ // Prune all stale tx indices and record the tx index tail
+ rawdb.UnindexTransactions(bc.db, 0, head-bc.txLookupLimit+1)
+ }
+ return
+ }
+ // If a previous indexing existed, make sure that we fill in any missing entries
+ if bc.txLookupLimit == 0 || head < bc.txLookupLimit {
+ if *tail > 0 {
+ rawdb.IndexTransactions(bc.db, 0, *tail)
+ }
+ return
+ }
+ // Update the transaction index to the new chain state
+ if head-bc.txLookupLimit+1 < *tail {
+ // Reindex a part of missing indices and rewind index tail to HEAD-limit
+ rawdb.IndexTransactions(bc.db, head-bc.txLookupLimit+1, *tail)
+ } else {
+ // Unindex a part of stale indices and forward index tail to HEAD-limit
+ rawdb.UnindexTransactions(bc.db, *tail, head-bc.txLookupLimit+1)
+ }
+ }
+ // Any reindexing done, start listening to chain events and moving the index window
+ var (
+ done chan struct{} // Non-nil if background unindexing or reindexing routine is active.
+ headCh = make(chan ChainHeadEvent, 1) // Buffered to avoid locking up the event feed
+ )
+ sub := bc.SubscribeChainHeadEvent(headCh)
+ if sub == nil {
+ return
+ }
+ defer sub.Unsubscribe()
+
+ for {
+ select {
+ case head := <-headCh:
+ if done == nil {
+ done = make(chan struct{})
+ go indexBlocks(rawdb.ReadTxIndexTail(bc.db), head.Block.NumberU64(), done)
+ }
+ case <-done:
+ done = nil
+ case <-bc.quit:
+ return
+ }
+ }
+}
+
// BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
func (bc *BlockChain) BadBlocks() []*types.Block {
blocks := make([]*types.Block, 0, bc.badBlocks.Len())
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index e16ff41906..399d74b2ae 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -22,7 +22,6 @@ import (
"math/big"
"math/rand"
"os"
- "reflect"
"sync"
"testing"
"time"
@@ -55,7 +54,7 @@ func newCanonical(engine consensus.Engine, n int, full bool) (ethdb.Database, *B
)
// Initialize a fresh chain with only a genesis block
- blockchain, _ := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil)
+ blockchain, _ := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
// Create and inject the requested chain
if n == 0 {
return db, blockchain, nil
@@ -510,7 +509,7 @@ func testReorgBadHashes(t *testing.T, full bool) {
blockchain.Stop()
// Create a new BlockChain and check that it rolled back the state.
- ncm, err := NewBlockChain(blockchain.db, nil, blockchain.chainConfig, ethash.NewFaker(), vm.Config{}, nil)
+ ncm, err := NewBlockChain(blockchain.db, nil, blockchain.chainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create new chain manager: %v", err)
}
@@ -622,7 +621,7 @@ func TestFastVsFullChains(t *testing.T) {
// Import the chain as an archive node for the comparison baseline
archiveDb := rawdb.NewMemoryDatabase()
gspec.MustCommit(archiveDb)
- archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
+ archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
defer archive.Stop()
if n, err := archive.InsertChain(blocks); err != nil {
@@ -631,7 +630,7 @@ func TestFastVsFullChains(t *testing.T) {
// Fast import the chain as a non-archive node to test
fastDb := rawdb.NewMemoryDatabase()
gspec.MustCommit(fastDb)
- fast, _ := NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
+ fast, _ := NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
defer fast.Stop()
headers := make([]*types.Header, len(blocks))
@@ -655,7 +654,7 @@ func TestFastVsFullChains(t *testing.T) {
t.Fatalf("failed to create temp freezer db: %v", err)
}
gspec.MustCommit(ancientDb)
- ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
+ ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
defer ancient.Stop()
if n, err := ancient.InsertHeaderChain(headers, 1); err != nil {
@@ -751,7 +750,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
// Import the chain as an archive node and ensure all pointers are updated
archiveDb, delfn := makeDb()
defer delfn()
- archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
+ archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
if n, err := archive.InsertChain(blocks); err != nil {
t.Fatalf("failed to process block %d: %v", n, err)
}
@@ -764,7 +763,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
// Import the chain as a non-archive node and ensure all pointers are updated
fastDb, delfn := makeDb()
defer delfn()
- fast, _ := NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
+ fast, _ := NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
defer fast.Stop()
headers := make([]*types.Header, len(blocks))
@@ -784,7 +783,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
// Import the chain as a ancient-first node and ensure all pointers are updated
ancientDb, delfn := makeDb()
defer delfn()
- ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
+ ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
defer ancient.Stop()
if n, err := ancient.InsertHeaderChain(headers, 1); err != nil {
@@ -803,7 +802,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
// Import the chain as a light node and ensure all pointers are updated
lightDb, delfn := makeDb()
defer delfn()
- light, _ := NewBlockChain(lightDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
+ light, _ := NewBlockChain(lightDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
if n, err := light.InsertHeaderChain(headers, 1); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err)
}
@@ -872,7 +871,7 @@ func TestChainTxReorgs(t *testing.T) {
}
})
// Import the chain. This runs all block validation rules.
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
+ blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
if i, err := blockchain.InsertChain(chain); err != nil {
t.Fatalf("failed to insert original chain[%d]: %v", i, err)
}
@@ -942,7 +941,7 @@ func TestLogReorgs(t *testing.T) {
signer = types.NewEIP155Signer(gspec.Config.ChainID)
)
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
+ blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
rmLogsCh := make(chan RemovedLogsEvent)
@@ -981,189 +980,108 @@ func TestLogReorgs(t *testing.T) {
}
}
+// This EVM code generates a log when the contract is created.
+var logCode = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
+
+// This test checks that log events and RemovedLogsEvent are sent
+// when the chain reorganizes.
func TestLogRebirth(t *testing.T) {
var (
- key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- addr1 = crypto.PubkeyToAddress(key1.PublicKey)
- db = rawdb.NewMemoryDatabase()
-
- // this code generates a log
- code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
- gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}
- genesis = gspec.MustCommit(db)
- signer = types.NewEIP155Signer(gspec.Config.ChainID)
- newLogCh = make(chan bool)
- removeLogCh = make(chan bool)
+ key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr1 = crypto.PubkeyToAddress(key1.PublicKey)
+ db = rawdb.NewMemoryDatabase()
+ gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}
+ genesis = gspec.MustCommit(db)
+ signer = types.NewEIP155Signer(gspec.Config.ChainID)
+ engine = ethash.NewFaker()
+ blockchain, _ = NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}, nil, nil)
)
- // validateLogEvent checks whether the received logs number is equal with expected.
- validateLogEvent := func(sink interface{}, result chan bool, expect int) {
- chanval := reflect.ValueOf(sink)
- chantyp := chanval.Type()
- if chantyp.Kind() != reflect.Chan || chantyp.ChanDir()&reflect.RecvDir == 0 {
- t.Fatalf("invalid channel, given type %v", chantyp)
- }
- cnt := 0
- var recv []reflect.Value
- timeout := time.After(1 * time.Second)
- cases := []reflect.SelectCase{{Chan: chanval, Dir: reflect.SelectRecv}, {Chan: reflect.ValueOf(timeout), Dir: reflect.SelectRecv}}
- for {
- chose, v, _ := reflect.Select(cases)
- if chose == 1 {
- // Not enough event received
- result <- false
- return
- }
- cnt += 1
- recv = append(recv, v)
- if cnt == expect {
- break
- }
- }
- done := time.After(50 * time.Millisecond)
- cases = cases[:1]
- cases = append(cases, reflect.SelectCase{Chan: reflect.ValueOf(done), Dir: reflect.SelectRecv})
- chose, _, _ := reflect.Select(cases)
- // If chose equal 0, it means receiving redundant events.
- if chose == 1 {
- result <- true
- } else {
- result <- false
- }
- }
-
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop()
- logsCh := make(chan []*types.Log)
- blockchain.SubscribeLogsEvent(logsCh)
-
- rmLogsCh := make(chan RemovedLogsEvent)
+ // The event channels.
+ newLogCh := make(chan []*types.Log, 10)
+ rmLogsCh := make(chan RemovedLogsEvent, 10)
+ blockchain.SubscribeLogsEvent(newLogCh)
blockchain.SubscribeRemovedLogsEvent(rmLogsCh)
- chain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) {
+ // This chain contains a single log.
+ chain, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 2, func(i int, gen *BlockGen) {
if i == 1 {
- tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, new(big.Int), code), signer, key1)
+ tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, new(big.Int), logCode), signer, key1)
if err != nil {
t.Fatalf("failed to create tx: %v", err)
}
gen.AddTx(tx)
}
})
-
- // Spawn a goroutine to receive log events
- go validateLogEvent(logsCh, newLogCh, 1)
if _, err := blockchain.InsertChain(chain); err != nil {
t.Fatalf("failed to insert chain: %v", err)
}
- if !<-newLogCh {
- t.Fatal("failed to receive new log event")
- }
+ checkLogEvents(t, newLogCh, rmLogsCh, 1, 0)
- // Generate long reorg chain
- forkChain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) {
+ // Generate long reorg chain containing another log. Inserting the
+ // chain removes one log and adds one.
+ forkChain, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 2, func(i int, gen *BlockGen) {
if i == 1 {
- tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, new(big.Int), code), signer, key1)
+ tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, new(big.Int), logCode), signer, key1)
if err != nil {
t.Fatalf("failed to create tx: %v", err)
}
gen.AddTx(tx)
- // Higher block difficulty
- gen.OffsetTime(-9)
+ gen.OffsetTime(-9) // higher block difficulty
}
})
-
- // Spawn a goroutine to receive log events
- go validateLogEvent(logsCh, newLogCh, 1)
- go validateLogEvent(rmLogsCh, removeLogCh, 1)
if _, err := blockchain.InsertChain(forkChain); err != nil {
t.Fatalf("failed to insert forked chain: %v", err)
}
- if !<-newLogCh {
- t.Fatal("failed to receive new log event")
- }
- if !<-removeLogCh {
- t.Fatal("failed to receive removed log event")
- }
+ checkLogEvents(t, newLogCh, rmLogsCh, 1, 1)
- newBlocks, _ := GenerateChain(params.TestChainConfig, chain[len(chain)-1], ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {})
- go validateLogEvent(logsCh, newLogCh, 1)
- go validateLogEvent(rmLogsCh, removeLogCh, 1)
+ // This chain segment is rooted in the original chain, but doesn't contain any logs.
+ // When inserting it, the canonical chain switches away from forkChain and re-emits
+ // the log event for the old chain, as well as a RemovedLogsEvent for forkChain.
+ newBlocks, _ := GenerateChain(params.TestChainConfig, chain[len(chain)-1], engine, db, 1, func(i int, gen *BlockGen) {})
if _, err := blockchain.InsertChain(newBlocks); err != nil {
t.Fatalf("failed to insert forked chain: %v", err)
}
- // Rebirth logs should omit a newLogEvent
- if !<-newLogCh {
- t.Fatal("failed to receive new log event")
- }
- // Ensure removedLog events received
- if !<-removeLogCh {
- t.Fatal("failed to receive removed log event")
- }
+ checkLogEvents(t, newLogCh, rmLogsCh, 1, 1)
}
+// This test is a variation of TestLogRebirth. It verifies that log events are emitted
+// when a side chain containing log events overtakes the canonical chain.
func TestSideLogRebirth(t *testing.T) {
var (
- key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- addr1 = crypto.PubkeyToAddress(key1.PublicKey)
- db = rawdb.NewMemoryDatabase()
-
- // this code generates a log
- code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
- gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}
- genesis = gspec.MustCommit(db)
- signer = types.NewEIP155Signer(gspec.Config.ChainID)
- newLogCh = make(chan bool)
+ key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr1 = crypto.PubkeyToAddress(key1.PublicKey)
+ db = rawdb.NewMemoryDatabase()
+ gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}
+ genesis = gspec.MustCommit(db)
+ signer = types.NewEIP155Signer(gspec.Config.ChainID)
+ blockchain, _ = NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
)
- // listenNewLog checks whether the received logs number is equal with expected.
- listenNewLog := func(sink chan []*types.Log, expect int) {
- cnt := 0
- for {
- select {
- case logs := <-sink:
- cnt += len(logs)
- case <-time.NewTimer(5 * time.Second).C:
- // new logs timeout
- newLogCh <- false
- return
- }
- if cnt == expect {
- break
- } else if cnt > expect {
- // redundant logs received
- newLogCh <- false
- return
- }
- }
- select {
- case <-sink:
- // redundant logs received
- newLogCh <- false
- case <-time.NewTimer(100 * time.Millisecond).C:
- newLogCh <- true
- }
- }
-
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop()
- logsCh := make(chan []*types.Log)
- blockchain.SubscribeLogsEvent(logsCh)
+ newLogCh := make(chan []*types.Log, 10)
+ rmLogsCh := make(chan RemovedLogsEvent, 10)
+ blockchain.SubscribeLogsEvent(newLogCh)
+ blockchain.SubscribeRemovedLogsEvent(rmLogsCh)
+
chain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) {
if i == 1 {
- // Higher block difficulty
- gen.OffsetTime(-9)
+ gen.OffsetTime(-9) // higher block difficulty
+
}
})
if _, err := blockchain.InsertChain(chain); err != nil {
t.Fatalf("failed to insert forked chain: %v", err)
}
+ checkLogEvents(t, newLogCh, rmLogsCh, 0, 0)
// Generate side chain with lower difficulty
sideChain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) {
if i == 1 {
- tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, new(big.Int), code), signer, key1)
+ tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, new(big.Int), logCode), signer, key1)
if err != nil {
t.Fatalf("failed to create tx: %v", err)
}
@@ -1173,16 +1091,31 @@ func TestSideLogRebirth(t *testing.T) {
if _, err := blockchain.InsertChain(sideChain); err != nil {
t.Fatalf("failed to insert forked chain: %v", err)
}
+ checkLogEvents(t, newLogCh, rmLogsCh, 0, 0)
- // Generate a new block based on side chain
+ // Generate a new block based on side chain.
newBlocks, _ := GenerateChain(params.TestChainConfig, sideChain[len(sideChain)-1], ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {})
- go listenNewLog(logsCh, 1)
if _, err := blockchain.InsertChain(newBlocks); err != nil {
t.Fatalf("failed to insert forked chain: %v", err)
}
- // Rebirth logs should omit a newLogEvent
- if !<-newLogCh {
- t.Fatalf("failed to receive new log event")
+ checkLogEvents(t, newLogCh, rmLogsCh, 1, 0)
+}
+
+func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan RemovedLogsEvent, wantNew, wantRemoved int) {
+ t.Helper()
+
+ if len(logsCh) != wantNew {
+ t.Fatalf("wrong number of log events: got %d, want %d", len(logsCh), wantNew)
+ }
+ if len(rmLogsCh) != wantRemoved {
+ t.Fatalf("wrong number of removed log events: got %d, want %d", len(rmLogsCh), wantRemoved)
+ }
+ // Drain events.
+ for i := 0; i < len(logsCh); i++ {
+ <-logsCh
+ }
+ for i := 0; i < len(rmLogsCh); i++ {
+ <-rmLogsCh
}
}
@@ -1199,7 +1132,7 @@ func TestReorgSideEvent(t *testing.T) {
signer = types.NewEIP155Signer(gspec.Config.ChainID)
)
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
+ blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
chain, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 3, func(i int, gen *BlockGen) {})
@@ -1331,7 +1264,7 @@ func TestEIP155Transition(t *testing.T) {
genesis = gspec.MustCommit(db)
)
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
+ blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
blocks, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 4, func(i int, block *BlockGen) {
@@ -1439,7 +1372,7 @@ func TestEIP161AccountRemoval(t *testing.T) {
}
genesis = gspec.MustCommit(db)
)
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
+ blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
blocks, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 3, func(i int, block *BlockGen) {
@@ -1514,7 +1447,7 @@ func TestBlockchainHeaderchainReorgConsistency(t *testing.T) {
diskdb := rawdb.NewMemoryDatabase()
new(Genesis).MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
+ chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1558,7 +1491,7 @@ func TestTrieForkGC(t *testing.T) {
diskdb := rawdb.NewMemoryDatabase()
new(Genesis).MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
+ chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1597,7 +1530,7 @@ func TestLargeReorgTrieGC(t *testing.T) {
diskdb := rawdb.NewMemoryDatabase()
new(Genesis).MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
+ chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1657,7 +1590,7 @@ func TestBlockchainRecovery(t *testing.T) {
t.Fatalf("failed to create temp freezer db: %v", err)
}
gspec.MustCommit(ancientDb)
- ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
+ ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
@@ -1676,7 +1609,7 @@ func TestBlockchainRecovery(t *testing.T) {
rawdb.WriteHeadFastBlockHash(ancientDb, midBlock.Hash())
// Reopen broken blockchain again
- ancient, _ = NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
+ ancient, _ = NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
defer ancient.Stop()
if num := ancient.CurrentBlock().NumberU64(); num != 0 {
t.Errorf("head block mismatch: have #%v, want #%v", num, 0)
@@ -1713,7 +1646,7 @@ func TestIncompleteAncientReceiptChainInsertion(t *testing.T) {
t.Fatalf("failed to create temp freezer db: %v", err)
}
gspec.MustCommit(ancientDb)
- ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
+ ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
defer ancient.Stop()
headers := make([]*types.Header, len(blocks))
@@ -1770,7 +1703,7 @@ func TestLowDiffLongChain(t *testing.T) {
diskdb := rawdb.NewMemoryDatabase()
new(Genesis).MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
+ chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1817,7 +1750,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 2*TriesInMemory, nil)
diskdb := rawdb.NewMemoryDatabase()
new(Genesis).MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
+ chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1914,7 +1847,7 @@ func testInsertKnownChainData(t *testing.T, typ string) {
new(Genesis).MustCommit(chaindb)
defer os.RemoveAll(dir)
- chain, err := NewBlockChain(chaindb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
+ chain, err := NewBlockChain(chaindb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2030,7 +1963,7 @@ func getLongAndShortChains() (*BlockChain, []*types.Block, []*types.Block, error
diskdb := rawdb.NewMemoryDatabase()
new(Genesis).MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
+ chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to create tester chain: %v", err)
}
@@ -2134,6 +2067,219 @@ func TestReorgToShorterRemovesCanonMappingHeaderChain(t *testing.T) {
}
}
+func TestTransactionIndices(t *testing.T) {
+ // Configure and generate a sample block chain
+ var (
+ gendb = rawdb.NewMemoryDatabase()
+ key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ address = crypto.PubkeyToAddress(key.PublicKey)
+ funds = big.NewInt(1000000000)
+ gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}}
+ genesis = gspec.MustCommit(gendb)
+ signer = types.NewEIP155Signer(gspec.Config.ChainID)
+ )
+ height := uint64(128)
+ blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, int(height), func(i int, block *BlockGen) {
+ tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, nil, nil), signer, key)
+ if err != nil {
+ panic(err)
+ }
+ block.AddTx(tx)
+ })
+ blocks2, _ := GenerateChain(gspec.Config, blocks[len(blocks)-1], ethash.NewFaker(), gendb, 10, nil)
+
+ check := func(tail *uint64, chain *BlockChain) {
+ stored := rawdb.ReadTxIndexTail(chain.db)
+ if tail == nil && stored != nil {
+ t.Fatalf("Oldest indexded block mismatch, want nil, have %d", *stored)
+ }
+ if tail != nil && *stored != *tail {
+ t.Fatalf("Oldest indexded block mismatch, want %d, have %d", *tail, *stored)
+ }
+ if tail != nil {
+ for i := *tail; i <= chain.CurrentBlock().NumberU64(); i++ {
+ block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
+ if block.Transactions().Len() == 0 {
+ continue
+ }
+ for _, tx := range block.Transactions() {
+ if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index == nil {
+ t.Fatalf("Miss transaction indice, number %d hash %s", i, tx.Hash().Hex())
+ }
+ }
+ }
+ for i := uint64(0); i < *tail; i++ {
+ block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
+ if block.Transactions().Len() == 0 {
+ continue
+ }
+ for _, tx := range block.Transactions() {
+ if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index != nil {
+ t.Fatalf("Transaction indice should be deleted, number %d hash %s", i, tx.Hash().Hex())
+ }
+ }
+ }
+ }
+ }
+ frdir, err := ioutil.TempDir("", "")
+ if err != nil {
+ t.Fatalf("failed to create temp freezer dir: %v", err)
+ }
+ defer os.Remove(frdir)
+ ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "")
+ if err != nil {
+ t.Fatalf("failed to create temp freezer db: %v", err)
+ }
+ gspec.MustCommit(ancientDb)
+
+ // Import all blocks into ancient db
+ l := uint64(0)
+ chain, err := NewBlockChain(ancientDb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, &l)
+ if err != nil {
+ t.Fatalf("failed to create tester chain: %v", err)
+ }
+ headers := make([]*types.Header, len(blocks))
+ for i, block := range blocks {
+ headers[i] = block.Header()
+ }
+ if n, err := chain.InsertHeaderChain(headers, 0); err != nil {
+ t.Fatalf("failed to insert header %d: %v", n, err)
+ }
+ if n, err := chain.InsertReceiptChain(blocks, receipts, 128); err != nil {
+ t.Fatalf("block %d: failed to insert into chain: %v", n, err)
+ }
+ chain.Stop()
+ ancientDb.Close()
+
+ // Init block chain with external ancients, check all needed indices has been indexed.
+ limit := []uint64{0, 32, 64, 128}
+ for _, l := range limit {
+ ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "")
+ if err != nil {
+ t.Fatalf("failed to create temp freezer db: %v", err)
+ }
+ gspec.MustCommit(ancientDb)
+ chain, err = NewBlockChain(ancientDb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, &l)
+ if err != nil {
+ t.Fatalf("failed to create tester chain: %v", err)
+ }
+ time.Sleep(50 * time.Millisecond) // Wait for indices initialisation
+ var tail uint64
+ if l != 0 {
+ tail = uint64(128) - l + 1
+ }
+ check(&tail, chain)
+ chain.Stop()
+ ancientDb.Close()
+ }
+
+ // Reconstruct a block chain which only reserves HEAD-64 tx indices
+ ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "")
+ if err != nil {
+ t.Fatalf("failed to create temp freezer db: %v", err)
+ }
+ gspec.MustCommit(ancientDb)
+
+ limit = []uint64{0, 64 /* drop stale */, 32 /* shorten history */, 64 /* extend history */, 0 /* restore all */}
+ tails := []uint64{0, 67 /* 130 - 64 + 1 */, 100 /* 131 - 32 + 1 */, 69 /* 132 - 64 + 1 */, 0}
+ for i, l := range limit {
+ chain, err = NewBlockChain(ancientDb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, &l)
+ if err != nil {
+ t.Fatalf("failed to create tester chain: %v", err)
+ }
+ chain.InsertChain(blocks2[i : i+1]) // Feed chain a higher block to trigger indices updater.
+ time.Sleep(50 * time.Millisecond) // Wait for indices initialisation
+ check(&tails[i], chain)
+ chain.Stop()
+ }
+}
+
+func TestSkipStaleTxIndicesInFastSync(t *testing.T) {
+ // Configure and generate a sample block chain
+ var (
+ gendb = rawdb.NewMemoryDatabase()
+ key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ address = crypto.PubkeyToAddress(key.PublicKey)
+ funds = big.NewInt(1000000000)
+ gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}}
+ genesis = gspec.MustCommit(gendb)
+ signer = types.NewEIP155Signer(gspec.Config.ChainID)
+ )
+ height := uint64(128)
+ blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, int(height), func(i int, block *BlockGen) {
+ tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, nil, nil), signer, key)
+ if err != nil {
+ panic(err)
+ }
+ block.AddTx(tx)
+ })
+
+ check := func(tail *uint64, chain *BlockChain) {
+ stored := rawdb.ReadTxIndexTail(chain.db)
+ if tail == nil && stored != nil {
+ t.Fatalf("Oldest indexded block mismatch, want nil, have %d", *stored)
+ }
+ if tail != nil && *stored != *tail {
+ t.Fatalf("Oldest indexded block mismatch, want %d, have %d", *tail, *stored)
+ }
+ if tail != nil {
+ for i := *tail; i <= chain.CurrentBlock().NumberU64(); i++ {
+ block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
+ if block.Transactions().Len() == 0 {
+ continue
+ }
+ for _, tx := range block.Transactions() {
+ if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index == nil {
+ t.Fatalf("Miss transaction indice, number %d hash %s", i, tx.Hash().Hex())
+ }
+ }
+ }
+ for i := uint64(0); i < *tail; i++ {
+ block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
+ if block.Transactions().Len() == 0 {
+ continue
+ }
+ for _, tx := range block.Transactions() {
+ if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index != nil {
+ t.Fatalf("Transaction indice should be deleted, number %d hash %s", i, tx.Hash().Hex())
+ }
+ }
+ }
+ }
+ }
+
+ frdir, err := ioutil.TempDir("", "")
+ if err != nil {
+ t.Fatalf("failed to create temp freezer dir: %v", err)
+ }
+ defer os.Remove(frdir)
+ ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "")
+ if err != nil {
+ t.Fatalf("failed to create temp freezer db: %v", err)
+ }
+ gspec.MustCommit(ancientDb)
+
+ // Import all blocks into ancient db, only HEAD-32 indices are kept.
+ l := uint64(32)
+ chain, err := NewBlockChain(ancientDb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, &l)
+ if err != nil {
+ t.Fatalf("failed to create tester chain: %v", err)
+ }
+ headers := make([]*types.Header, len(blocks))
+ for i, block := range blocks {
+ headers[i] = block.Header()
+ }
+ if n, err := chain.InsertHeaderChain(headers, 0); err != nil {
+ t.Fatalf("failed to insert header %d: %v", n, err)
+ }
+ // The indices before ancient-N(32) should be ignored. After that all blocks should be indexed.
+ if n, err := chain.InsertReceiptChain(blocks, receipts, 64); err != nil {
+ t.Fatalf("block %d: failed to insert into chain: %v", n, err)
+ }
+ tail := uint64(32)
+ check(&tail, chain)
+}
+
// Benchmarks large blocks with value transfers to non-existing accounts
func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks int, recipientFn func(uint64) common.Address, dataFn func(uint64) []byte) {
var (
@@ -2179,7 +2325,7 @@ func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks in
diskdb := rawdb.NewMemoryDatabase()
gspec.MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
+ chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
b.Fatalf("failed to create tester chain: %v", err)
}
@@ -2261,7 +2407,7 @@ func TestSideImportPrunedBlocks(t *testing.T) {
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 2*TriesInMemory, nil)
diskdb := rawdb.NewMemoryDatabase()
new(Genesis).MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
+ chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2355,7 +2501,7 @@ func TestDeleteCreateRevert(t *testing.T) {
diskdb := rawdb.NewMemoryDatabase()
gspec.MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
+ chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2469,7 +2615,7 @@ func TestDeleteRecreateSlots(t *testing.T) {
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{
Debug: true,
Tracer: vm.NewJSONLogger(nil, os.Stdout),
- }, nil)
+ }, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2549,7 +2695,7 @@ func TestDeleteRecreateAccount(t *testing.T) {
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{
Debug: true,
Tracer: vm.NewJSONLogger(nil, os.Stdout),
- }, nil)
+ }, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2722,7 +2868,7 @@ func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) {
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{
//Debug: true,
//Tracer: vm.NewJSONLogger(nil, os.Stdout),
- }, nil)
+ }, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2856,7 +3002,7 @@ func TestInitThenFailCreateContract(t *testing.T) {
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{
//Debug: true,
//Tracer: vm.NewJSONLogger(nil, os.Stdout),
- }, nil)
+ }, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
diff --git a/core/call_helper.go b/core/call_helper.go
index 38a61165c9..46532b0c10 100644
--- a/core/call_helper.go
+++ b/core/call_helper.go
@@ -68,7 +68,7 @@ func (cg *callHelper) MakeCall(private bool, key *ecdsa.PrivateKey, to common.Ad
privateState = publicState
}
// TODO(joel): can we just pass nil instead of bc?
- bc, _ := NewBlockChain(cg.db, nil, params.QuorumTestChainConfig, ethash.NewFaker(), vm.Config{}, nil)
+ bc, _ := NewBlockChain(cg.db, nil, params.QuorumTestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
context := NewEVMContext(msg, &cg.header, bc, &from)
vmenv := vm.NewEVM(context, publicState, privateState, params.QuorumTestChainConfig, vm.Config{})
sender := vm.AccountRef(msg.From())
diff --git a/core/chain_indexer.go b/core/chain_indexer.go
index c0c2c4f7f9..1bff3aee74 100644
--- a/core/chain_indexer.go
+++ b/core/chain_indexer.go
@@ -439,6 +439,9 @@ func (c *ChainIndexer) Sections() (uint64, uint64, common.Hash) {
// AddChildIndexer adds a child ChainIndexer that can use the output of this one
func (c *ChainIndexer) AddChildIndexer(indexer *ChainIndexer) {
+ if indexer == c {
+ panic("can't add indexer as a child of itself")
+ }
c.lock.Lock()
defer c.lock.Unlock()
diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go
index 432ea47491..8ec2091c2f 100644
--- a/core/chain_makers_test.go
+++ b/core/chain_makers_test.go
@@ -79,7 +79,7 @@ func ExampleGenerateChain() {
})
// Import the chain. This runs all block validation rules.
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
+ blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
if i, err := blockchain.InsertChain(chain); err != nil {
diff --git a/core/dao_test.go b/core/dao_test.go
index 4e8dba9e84..89e1d83d7a 100644
--- a/core/dao_test.go
+++ b/core/dao_test.go
@@ -45,7 +45,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
proConf.DAOForkBlock = forkBlock
proConf.DAOForkSupport = true
- proBc, _ := NewBlockChain(proDb, nil, &proConf, ethash.NewFaker(), vm.Config{}, nil)
+ proBc, _ := NewBlockChain(proDb, nil, &proConf, ethash.NewFaker(), vm.Config{}, nil, nil)
defer proBc.Stop()
conDb := rawdb.NewMemoryDatabase()
@@ -55,7 +55,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
conConf.DAOForkBlock = forkBlock
conConf.DAOForkSupport = false
- conBc, _ := NewBlockChain(conDb, nil, &conConf, ethash.NewFaker(), vm.Config{}, nil)
+ conBc, _ := NewBlockChain(conDb, nil, &conConf, ethash.NewFaker(), vm.Config{}, nil, nil)
defer conBc.Stop()
if _, err := proBc.InsertChain(prefix); err != nil {
@@ -69,7 +69,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Create a pro-fork block, and try to feed into the no-fork chain
db = rawdb.NewMemoryDatabase()
gspec.MustCommit(db)
- bc, _ := NewBlockChain(db, nil, &conConf, ethash.NewFaker(), vm.Config{}, nil)
+ bc, _ := NewBlockChain(db, nil, &conConf, ethash.NewFaker(), vm.Config{}, nil, nil)
defer bc.Stop()
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64()))
@@ -94,7 +94,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Create a no-fork block, and try to feed into the pro-fork chain
db = rawdb.NewMemoryDatabase()
gspec.MustCommit(db)
- bc, _ = NewBlockChain(db, nil, &proConf, ethash.NewFaker(), vm.Config{}, nil)
+ bc, _ = NewBlockChain(db, nil, &proConf, ethash.NewFaker(), vm.Config{}, nil, nil)
defer bc.Stop()
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64()))
@@ -120,7 +120,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Verify that contra-forkers accept pro-fork extra-datas after forking finishes
db = rawdb.NewMemoryDatabase()
gspec.MustCommit(db)
- bc, _ := NewBlockChain(db, nil, &conConf, ethash.NewFaker(), vm.Config{}, nil)
+ bc, _ := NewBlockChain(db, nil, &conConf, ethash.NewFaker(), vm.Config{}, nil, nil)
defer bc.Stop()
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64()))
@@ -140,7 +140,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Verify that pro-forkers accept contra-fork extra-datas after forking finishes
db = rawdb.NewMemoryDatabase()
gspec.MustCommit(db)
- bc, _ = NewBlockChain(db, nil, &proConf, ethash.NewFaker(), vm.Config{}, nil)
+ bc, _ = NewBlockChain(db, nil, &proConf, ethash.NewFaker(), vm.Config{}, nil, nil)
defer bc.Stop()
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64()))
diff --git a/core/error.go b/core/error.go
index d58c273403..3051da66c7 100644
--- a/core/error.go
+++ b/core/error.go
@@ -22,21 +22,62 @@ var (
// ErrKnownBlock is returned when a block to import is already known locally.
ErrKnownBlock = errors.New("block already known")
- // ErrGasLimitReached is returned by the gas pool if the amount of gas required
- // by a transaction is higher than what's left in the block.
- ErrGasLimitReached = errors.New("gas limit reached")
-
// ErrBlacklistedHash is returned if a block to import is on the blacklist.
ErrBlacklistedHash = errors.New("blacklisted hash")
+ // ErrNoGenesis is returned when there is no Genesis Block.
+ ErrNoGenesis = errors.New("genesis not found in chain")
+)
+
+// List of evm-call-message pre-checking errors. All state transtion messages will
+// be pre-checked before execution. If any invalidation detected, the corresponding
+// error should be returned which is defined here.
+//
+// - If the pre-checking happens in the miner, then the transaction won't be packed.
+// - If the pre-checking happens in the block processing procedure, then a "BAD BLOCk"
+// error should be emitted.
+var (
+ // ErrNonceTooLow is returned if the nonce of a transaction is lower than the
+ // one present in the local chain.
+ ErrNonceTooLow = errors.New("nonce too low")
+
// ErrNonceTooHigh is returned if the nonce of a transaction is higher than the
// next one expected based on the local chain.
ErrNonceTooHigh = errors.New("nonce too high")
- // ErrNoGenesis is returned when there is no Genesis Block.
- ErrNoGenesis = errors.New("genesis not found in chain")
+ // ErrGasLimitReached is returned by the gas pool if the amount of gas required
+ // by a transaction is higher than what's left in the block.
+ ErrGasLimitReached = errors.New("gas limit reached")
+
+ // ErrInsufficientFundsForTransfer is returned if the transaction sender doesn't
+ // have enough funds for transfer(topmost call only).
+ ErrInsufficientFundsForTransfer = errors.New("insufficient funds for transfer")
+
+ // ErrInsufficientFunds is returned if the total cost of executing a transaction
+ // is higher than the balance of the user's account.
+ ErrInsufficientFunds = errors.New("insufficient funds for gas * price + value")
+
+ // ErrGasUintOverflow is returned when calculating gas usage.
+ ErrGasUintOverflow = errors.New("gas uint64 overflow")
+
+ // ErrIntrinsicGas is returned if the transaction is specified to use less gas
+ // than required to start the invocation.
+ ErrIntrinsicGas = errors.New("intrinsic gas too low")
// Quorum
// ErrAbortBlocksProcessing is returned if bc.insertChain is interrupted under raft mode
ErrAbortBlocksProcessing = errors.New("abort during blocks processing")
+
+ // ErrContractManagedPartiesCheckFailed is returned if managed parties check has failed for contract
+ ErrContractManagedPartiesCheckFailed = errors.New("managed parties check has failed for contract")
+
+ // ErrPrivacyMetadataInvalidMerkleRoot is returned if there is an empty MR during the pmh.prepare(...)
+ ErrPrivacyMetadataInvalidMerkleRoot = errors.New("privacy metadata has empty MR for stateValidation flag")
+
+ // ErrPrivacyEnhancedReceivedWhenDisabled is returned if privacy enhanced transaction received while privacy enhancements are disabled
+ ErrPrivacyEnhancedReceivedWhenDisabled = errors.New("privacy metadata has empty MR for stateValidation flag")
+
+ // ErrPrivateContractInteractionVerificationFailed is returned if the verification of contract interaction differs from the one returned by Tessera (check pmh.verify(...))
+ ErrPrivateContractInteractionVerificationFailed = errors.New("verification of contract interaction differs from the one returned by Tessera")
+ // End Quorum
)
diff --git a/core/genesis.go b/core/genesis.go
index 1f4bfdfc4f..841e006272 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -152,10 +152,6 @@ func (e *GenesisMismatchError) Error() string {
//
// The returned chain configuration is never nil.
func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
- return SetupGenesisBlockWithOverride(db, genesis, nil, nil)
-}
-
-func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideIstanbul, overrideMuirGlacier *big.Int) (*params.ChainConfig, common.Hash, error) {
if genesis != nil && genesis.Config == nil {
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
}
@@ -219,12 +215,6 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
// Get the existing chain configuration.
newcfg := genesis.configOrDefault(stored)
- if overrideIstanbul != nil {
- newcfg.IstanbulBlock = overrideIstanbul
- }
- if overrideMuirGlacier != nil {
- newcfg.MuirGlacierBlock = overrideMuirGlacier
- }
if err := newcfg.CheckConfigForkOrder(); err != nil {
return newcfg, common.Hash{}, err
}
diff --git a/core/headerchain.go b/core/headerchain.go
index f21dcf537e..a6028d8b9a 100644
--- a/core/headerchain.go
+++ b/core/headerchain.go
@@ -149,7 +149,8 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
if ptd == nil {
return NonStatTy, consensus.ErrUnknownAncestor
}
- localTd := hc.GetTd(hc.currentHeaderHash, hc.CurrentHeader().Number.Uint64())
+ head := hc.CurrentHeader().Number.Uint64()
+ localTd := hc.GetTd(hc.currentHeaderHash, head)
externTd := new(big.Int).Add(header.Difficulty, ptd)
// Irrelevant of the canonical status, write the td and header to the database
@@ -165,7 +166,15 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
// If the total difficulty is higher than our known, add it to the canonical chain
// Second clause in the if statement reduces the vulnerability to selfish mining.
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
- if externTd.Cmp(localTd) > 0 || (externTd.Cmp(localTd) == 0 && mrand.Float64() < 0.5) {
+ reorg := externTd.Cmp(localTd) > 0
+ if !reorg && externTd.Cmp(localTd) == 0 {
+ if header.Number.Uint64() < head {
+ reorg = true
+ } else if header.Number.Uint64() == head {
+ reorg = mrand.Float64() < 0.5
+ }
+ }
+ if reorg {
// If the header can be added into canonical chain, adjust the
// header chain markers(canonical indexes and head header flag).
//
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
index 9fa327062a..2290e87d52 100644
--- a/core/rawdb/accessors_chain.go
+++ b/core/rawdb/accessors_chain.go
@@ -172,6 +172,43 @@ func WriteFastTrieProgress(db ethdb.KeyValueWriter, count uint64) {
}
}
+// ReadTxIndexTail retrieves the number of oldest indexed block
+// whose transaction indices has been indexed. If the corresponding entry
+// is non-existent in database it means the indexing has been finished.
+func ReadTxIndexTail(db ethdb.KeyValueReader) *uint64 {
+ data, _ := db.Get(txIndexTailKey)
+ if len(data) != 8 {
+ return nil
+ }
+ number := binary.BigEndian.Uint64(data)
+ return &number
+}
+
+// WriteTxIndexTail stores the number of oldest indexed block
+// into database.
+func WriteTxIndexTail(db ethdb.KeyValueWriter, number uint64) {
+ if err := db.Put(txIndexTailKey, encodeBlockNumber(number)); err != nil {
+ log.Crit("Failed to store the transaction index tail", "err", err)
+ }
+}
+
+// ReadFastTxLookupLimit retrieves the tx lookup limit used in fast sync.
+func ReadFastTxLookupLimit(db ethdb.KeyValueReader) *uint64 {
+ data, _ := db.Get(fastTxLookupLimitKey)
+ if len(data) != 8 {
+ return nil
+ }
+ number := binary.BigEndian.Uint64(data)
+ return &number
+}
+
+// WriteFastTxLookupLimit stores the txlookup limit used in fast sync into database.
+func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) {
+ if err := db.Put(fastTxLookupLimitKey, encodeBlockNumber(number)); err != nil {
+ log.Crit("Failed to store transaction lookup limit for fast sync", "err", err)
+ }
+}
+
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
// First try to look up the data in ancient database. Extra hash
@@ -290,6 +327,25 @@ func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue
return nil // Can't find the data anywhere.
}
+// ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical
+// block at number, in RLP encoding.
+func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
+ // If it's an ancient one, we don't need the canonical hash
+ data, _ := db.Ancient(freezerBodiesTable, number)
+ if len(data) == 0 {
+ // Need to get the hash
+ data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number)))
+ // In the background freezer is moving data from leveldb to flatten files.
+ // So during the first check for ancient db, the data is not yet in there,
+ // but when we reach into leveldb, the data was already moved. That would
+ // result in a not found error.
+ if len(data) == 0 {
+ data, _ = db.Ancient(freezerBodiesTable, number)
+ }
+ }
+ return data
+}
+
// WriteBodyRLP stores an RLP encoded block body into the database.
func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) {
if err := db.Put(blockBodyKey(number, hash), rlp); err != nil {
diff --git a/core/rawdb/accessors_indexes.go b/core/rawdb/accessors_indexes.go
index 38f8fe10ea..c7f3df2ad7 100644
--- a/core/rawdb/accessors_indexes.go
+++ b/core/rawdb/accessors_indexes.go
@@ -63,9 +63,31 @@ func WriteTxLookupEntries(db ethdb.KeyValueWriter, block *types.Block) {
}
}
+// WriteTxLookupEntriesByHash is identical to WriteTxLookupEntries, but does not
+// require a full types.Block as input.
+func WriteTxLookupEntriesByHash(db ethdb.KeyValueWriter, number uint64, hashes []common.Hash) {
+ numberBytes := new(big.Int).SetUint64(number).Bytes()
+ for _, hash := range hashes {
+ if err := db.Put(txLookupKey(hash), numberBytes); err != nil {
+ log.Crit("Failed to store transaction lookup entry", "err", err)
+ }
+ }
+}
+
// DeleteTxLookupEntry removes all transaction data associated with a hash.
func DeleteTxLookupEntry(db ethdb.KeyValueWriter, hash common.Hash) {
- db.Delete(txLookupKey(hash))
+ if err := db.Delete(txLookupKey(hash)); err != nil {
+ log.Crit("Failed to delete transaction lookup entry", "err", err)
+ }
+}
+
+// DeleteTxLookupEntries removes all transaction lookups for a given block.
+func DeleteTxLookupEntriesByHash(db ethdb.KeyValueWriter, hashes []common.Hash) {
+ for _, hash := range hashes {
+ if err := db.Delete(txLookupKey(hash)); err != nil {
+ log.Crit("Failed to delete transaction lookup entry", "err", err)
+ }
+ }
}
// ReadTransaction retrieves a specific transaction from the database, along with
diff --git a/core/rawdb/chain_iterator.go b/core/rawdb/chain_iterator.go
new file mode 100644
index 0000000000..3cd7a3214d
--- /dev/null
+++ b/core/rawdb/chain_iterator.go
@@ -0,0 +1,304 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "runtime"
+ "sync/atomic"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/prque"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
+)
+
+// InitDatabaseFromFreezer reinitializes an empty database from a previous batch
+// of frozen ancient blocks. The method iterates over all the frozen blocks and
+// injects into the database the block hash->number mappings.
+func InitDatabaseFromFreezer(db ethdb.Database) {
+ // If we can't access the freezer or it's empty, abort
+ frozen, err := db.Ancients()
+ if err != nil || frozen == 0 {
+ return
+ }
+ var (
+ batch = db.NewBatch()
+ start = time.Now()
+ logged = start.Add(-7 * time.Second) // Unindex during import is fast, don't double log
+ hash common.Hash
+ )
+ for i := uint64(0); i < frozen; i++ {
+ // Since the freezer has all data in sequential order on a file,
+ // it would be 'neat' to read more data in one go, and let the
+ // freezerdb return N items (e.g up to 1000 items per go)
+ // That would require an API change in Ancients though
+ if h, err := db.Ancient(freezerHashTable, i); err != nil {
+ log.Crit("Failed to init database from freezer", "err", err)
+ } else {
+ hash = common.BytesToHash(h)
+ }
+ WriteHeaderNumber(batch, hash, i)
+ // If enough data was accumulated in memory or we're at the last block, dump to disk
+ if batch.ValueSize() > ethdb.IdealBatchSize {
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to write data to db", "err", err)
+ }
+ batch.Reset()
+ }
+ // If we've spent too much time already, notify the user of what we're doing
+ if time.Since(logged) > 8*time.Second {
+ log.Info("Initializing database from freezer", "total", frozen, "number", i, "hash", hash, "elapsed", common.PrettyDuration(time.Since(start)))
+ logged = time.Now()
+ }
+ }
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to write data to db", "err", err)
+ }
+ batch.Reset()
+
+ WriteHeadHeaderHash(db, hash)
+ WriteHeadFastBlockHash(db, hash)
+ log.Info("Initialized database from freezer", "blocks", frozen, "elapsed", common.PrettyDuration(time.Since(start)))
+}
+
+type blockTxHashes struct {
+ number uint64
+ hashes []common.Hash
+}
+
+// iterateTransactions iterates over all transactions in the (canon) block
+// number(s) given, and yields the hashes on a channel
+func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool) (chan *blockTxHashes, chan struct{}) {
+ // One thread sequentially reads data from db
+ type numberRlp struct {
+ number uint64
+ rlp rlp.RawValue
+ }
+ if to == from {
+ return nil, nil
+ }
+ threads := to - from
+ if cpus := runtime.NumCPU(); threads > uint64(cpus) {
+ threads = uint64(cpus)
+ }
+ var (
+ rlpCh = make(chan *numberRlp, threads*2) // we send raw rlp over this channel
+ hashesCh = make(chan *blockTxHashes, threads*2) // send hashes over hashesCh
+ abortCh = make(chan struct{})
+ )
+ // lookup runs in one instance
+ lookup := func() {
+ n, end := from, to
+ if reverse {
+ n, end = to-1, from-1
+ }
+ defer close(rlpCh)
+ for n != end {
+ data := ReadCanonicalBodyRLP(db, n)
+ // Feed the block to the aggregator, or abort on interrupt
+ select {
+ case rlpCh <- &numberRlp{n, data}:
+ case <-abortCh:
+ return
+ }
+ if reverse {
+ n--
+ } else {
+ n++
+ }
+ }
+ }
+ // process runs in parallell
+ nThreadsAlive := int32(threads)
+ process := func() {
+ defer func() {
+ // Last processor closes the result channel
+ if atomic.AddInt32(&nThreadsAlive, -1) == 0 {
+ close(hashesCh)
+ }
+ }()
+
+ var hasher = sha3.NewLegacyKeccak256()
+ for data := range rlpCh {
+ it, err := rlp.NewListIterator(data.rlp)
+ if err != nil {
+ log.Warn("tx iteration error", "error", err)
+ return
+ }
+ it.Next()
+ txs := it.Value()
+ txIt, err := rlp.NewListIterator(txs)
+ if err != nil {
+ log.Warn("tx iteration error", "error", err)
+ return
+ }
+ var hashes []common.Hash
+ for txIt.Next() {
+ if err := txIt.Err(); err != nil {
+ log.Warn("tx iteration error", "error", err)
+ return
+ }
+ var txHash common.Hash
+ hasher.Reset()
+ hasher.Write(txIt.Value())
+ hasher.Sum(txHash[:0])
+ hashes = append(hashes, txHash)
+ }
+ result := &blockTxHashes{
+ hashes: hashes,
+ number: data.number,
+ }
+ // Feed the block to the aggregator, or abort on interrupt
+ select {
+ case hashesCh <- result:
+ case <-abortCh:
+ return
+ }
+ }
+ }
+ go lookup() // start the sequential db accessor
+ for i := 0; i < int(threads); i++ {
+ go process()
+ }
+ return hashesCh, abortCh
+}
+
+// IndexTransactions creates txlookup indices of the specified block range.
+//
+// This function iterates canonical chain in reverse order, it has one main advantage:
+// We can write tx index tail flag periodically even without the whole indexing
+// procedure is finished. So that we can resume indexing procedure next time quickly.
+func IndexTransactions(db ethdb.Database, from uint64, to uint64) {
+ // short circuit for invalid range
+ if from >= to {
+ return
+ }
+ var (
+ hashesCh, abortCh = iterateTransactions(db, from, to, true)
+ batch = db.NewBatch()
+ start = time.Now()
+ logged = start.Add(-7 * time.Second)
+ // Since we iterate in reverse, we expect the first number to come
+ // in to be [to-1]. Therefore, setting lastNum to means that the
+ // prqueue gap-evaluation will work correctly
+ lastNum = to
+ queue = prque.New(nil)
+ // for stats reporting
+ blocks, txs = 0, 0
+ )
+ defer close(abortCh)
+
+ for chanDelivery := range hashesCh {
+ // Push the delivery into the queue and process contiguous ranges.
+ // Since we iterate in reverse, so lower numbers have lower prio, and
+ // we can use the number directly as prio marker
+ queue.Push(chanDelivery, int64(chanDelivery.number))
+ for !queue.Empty() {
+ // If the next available item is gapped, return
+ if _, priority := queue.Peek(); priority != int64(lastNum-1) {
+ break
+ }
+ // Next block available, pop it off and index it
+ delivery := queue.PopItem().(*blockTxHashes)
+ lastNum = delivery.number
+ WriteTxLookupEntriesByHash(batch, delivery.number, delivery.hashes)
+ blocks++
+ txs += len(delivery.hashes)
+ // If enough data was accumulated in memory or we're at the last block, dump to disk
+ if batch.ValueSize() > ethdb.IdealBatchSize {
+ // Also write the tail there
+ WriteTxIndexTail(batch, lastNum)
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed writing batch to db", "error", err)
+ return
+ }
+ batch.Reset()
+ }
+ // If we've spent too much time already, notify the user of what we're doing
+ if time.Since(logged) > 8*time.Second {
+ log.Info("Indexing transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "total", to-from, "elapsed", common.PrettyDuration(time.Since(start)))
+ logged = time.Now()
+ }
+ }
+ }
+ if lastNum < to {
+ WriteTxIndexTail(batch, lastNum)
+ // No need to write the batch if we never entered the loop above...
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed writing batch to db", "error", err)
+ return
+ }
+ }
+ log.Info("Indexed transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start)))
+}
+
+// UnindexTransactions removes txlookup indices of the specified block range.
+func UnindexTransactions(db ethdb.Database, from uint64, to uint64) {
+ // short circuit for invalid range
+ if from >= to {
+ return
+ }
+ // Write flag first and then unindex the transaction indices. Some indices
+ // will be left in the database if crash happens but it's fine.
+ WriteTxIndexTail(db, to)
+ // If only one block is unindexed, do it directly
+ //if from+1 == to {
+ // data := ReadCanonicalBodyRLP(db, uint64(from))
+ // DeleteTxLookupEntries(db, ReadBlock(db, ReadCanonicalHash(db, from), from))
+ // log.Info("Unindexed transactions", "blocks", 1, "tail", to)
+ // return
+ //}
+ // TODO @holiman, add this back (if we want it)
+ var (
+ hashesCh, abortCh = iterateTransactions(db, from, to, false)
+ batch = db.NewBatch()
+ start = time.Now()
+ logged = start.Add(-7 * time.Second)
+ )
+ defer close(abortCh)
+ // Otherwise spin up the concurrent iterator and unindexer
+ blocks, txs := 0, 0
+ for delivery := range hashesCh {
+ DeleteTxLookupEntriesByHash(batch, delivery.hashes)
+ txs += len(delivery.hashes)
+ blocks++
+
+ // If enough data was accumulated in memory or we're at the last block, dump to disk
+ // A batch counts the size of deletion as '1', so we need to flush more
+ // often than that.
+ if blocks%1000 == 0 {
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed writing batch to db", "error", err)
+ return
+ }
+ batch.Reset()
+ }
+ // If we've spent too much time already, notify the user of what we're doing
+ if time.Since(logged) > 8*time.Second {
+ log.Info("Unindexing transactions", "blocks", blocks, "txs", txs, "total", to-from, "elapsed", common.PrettyDuration(time.Since(start)))
+ logged = time.Now()
+ }
+ }
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed writing batch to db", "error", err)
+ return
+ }
+ log.Info("Unindexed transactions", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start)))
+}
diff --git a/core/rawdb/chain_iterator_test.go b/core/rawdb/chain_iterator_test.go
new file mode 100644
index 0000000000..c99a97c5f8
--- /dev/null
+++ b/core/rawdb/chain_iterator_test.go
@@ -0,0 +1,82 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "math/big"
+ "reflect"
+ "sort"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+func TestChainIterator(t *testing.T) {
+ // Construct test chain db
+ chainDb := NewMemoryDatabase()
+
+ var block *types.Block
+ var txs []*types.Transaction
+ for i := uint64(0); i <= 10; i++ {
+ if i == 0 {
+ block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, nil, nil, nil) // Empty genesis block
+ } else {
+ tx := types.NewTransaction(i, common.BytesToAddress([]byte{0x11}), big.NewInt(111), 1111, big.NewInt(11111), []byte{0x11, 0x11, 0x11})
+ txs = append(txs, tx)
+ block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil)
+ }
+ WriteBlock(chainDb, block)
+ WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
+ }
+
+ var cases = []struct {
+ from, to uint64
+ reverse bool
+ expect []int
+ }{
+ {0, 11, true, []int{10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}},
+ {0, 0, true, nil},
+ {0, 5, true, []int{4, 3, 2, 1, 0}},
+ {10, 11, true, []int{10}},
+ {0, 11, false, []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}},
+ {0, 0, false, nil},
+ {10, 11, false, []int{10}},
+ }
+ for i, c := range cases {
+ var numbers []int
+ hashCh, _ := iterateTransactions(chainDb, c.from, c.to, c.reverse)
+ if hashCh != nil {
+ for h := range hashCh {
+ numbers = append(numbers, int(h.number))
+ if len(h.hashes) > 0 {
+ if got, exp := h.hashes[0], txs[h.number-1].Hash(); got != exp {
+ t.Fatalf("hash wrong, got %x exp %x", got, exp)
+ }
+ }
+ }
+ }
+ if !c.reverse {
+ sort.Ints(numbers)
+ } else {
+ sort.Sort(sort.Reverse(sort.IntSlice(numbers)))
+ }
+ if !reflect.DeepEqual(numbers, c.expect) {
+ t.Fatalf("Case %d failed, visit element mismatch, want %v, got %v", i, c.expect, numbers)
+ }
+ }
+}
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index cc05491b84..583573407e 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -41,10 +41,10 @@ type freezerdb struct {
// the slow ancient tables.
func (frdb *freezerdb) Close() error {
var errs []error
- if err := frdb.KeyValueStore.Close(); err != nil {
+ if err := frdb.AncientStore.Close(); err != nil {
errs = append(errs, err)
}
- if err := frdb.AncientStore.Close(); err != nil {
+ if err := frdb.KeyValueStore.Close(); err != nil {
errs = append(errs, err)
}
if len(errs) != 0 {
diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go
index a6950e0c3c..cfb681d3c4 100644
--- a/core/rawdb/freezer.go
+++ b/core/rawdb/freezer.go
@@ -73,6 +73,7 @@ type freezer struct {
tables map[string]*freezerTable // Data tables for storing everything
instanceLock fileutil.Releaser // File-system lock to prevent double opens
+ quit chan struct{}
}
// newFreezer creates a chain freezer that moves ancient chain data into
@@ -101,6 +102,7 @@ func newFreezer(datadir string, namespace string) (*freezer, error) {
freezer := &freezer{
tables: make(map[string]*freezerTable),
instanceLock: lock,
+ quit: make(chan struct{}),
}
for name, disableSnappy := range freezerNoSnappy {
table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, disableSnappy)
@@ -126,6 +128,7 @@ func newFreezer(datadir string, namespace string) (*freezer, error) {
// Close terminates the chain freezer, unmapping all the data files.
func (f *freezer) Close() error {
+ f.quit <- struct{}{}
var errs []error
for _, table := range f.tables {
if err := table.Close(); err != nil {
@@ -254,35 +257,50 @@ func (f *freezer) Sync() error {
func (f *freezer) freeze(db ethdb.KeyValueStore) {
nfdb := &nofreezedb{KeyValueStore: db}
+ backoff := false
for {
+ select {
+ case <-f.quit:
+ log.Info("Freezer shutting down")
+ return
+ default:
+ }
+ if backoff {
+ select {
+ case <-time.NewTimer(freezerRecheckInterval).C:
+ backoff = false
+ case <-f.quit:
+ return
+ }
+ }
// Retrieve the freezing threshold.
hash := ReadHeadBlockHash(nfdb)
if hash == (common.Hash{}) {
log.Debug("Current full block hash unavailable") // new chain, empty database
- time.Sleep(freezerRecheckInterval)
+ backoff = true
continue
}
number := ReadHeaderNumber(nfdb, hash)
switch {
case number == nil:
log.Error("Current full block number unavailable", "hash", hash)
- time.Sleep(freezerRecheckInterval)
+ backoff = true
continue
case *number < uint64(params.GetImmutabilityThreshold()):
log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", params.GetImmutabilityThreshold())
- time.Sleep(freezerRecheckInterval)
+ backoff = true
continue
case *number-uint64(params.GetImmutabilityThreshold()) <= f.frozen:
log.Debug("Ancient blocks frozen already", "number", *number, "hash", hash, "frozen", f.frozen)
- time.Sleep(freezerRecheckInterval)
+ backoff = true
continue
}
head := ReadHeader(nfdb, hash, *number)
if head == nil {
log.Error("Current full block unavailable", "number", *number, "hash", hash)
- time.Sleep(freezerRecheckInterval)
+ backoff = true
continue
}
// Seems we have data ready to be frozen, process in usable batches
@@ -369,7 +387,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) {
// Avoid database thrashing with tiny writes
if f.frozen-first < freezerBatchLimit {
- time.Sleep(freezerRecheckInterval)
+ backoff = true
}
}
}
diff --git a/core/rawdb/freezer_reinit.go b/core/rawdb/freezer_reinit.go
deleted file mode 100644
index d6bf9ab1dd..0000000000
--- a/core/rawdb/freezer_reinit.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rawdb
-
-import (
- "errors"
- "runtime"
- "sync/atomic"
- "time"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/prque"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/log"
-)
-
-// InitDatabaseFromFreezer reinitializes an empty database from a previous batch
-// of frozen ancient blocks. The method iterates over all the frozen blocks and
-// injects into the database the block hash->number mappings and the transaction
-// lookup entries.
-func InitDatabaseFromFreezer(db ethdb.Database) error {
- // If we can't access the freezer or it's empty, abort
- frozen, err := db.Ancients()
- if err != nil || frozen == 0 {
- return err
- }
- // Blocks previously frozen, iterate over- and hash them concurrently
- var (
- number = ^uint64(0) // -1
- results = make(chan *types.Block, 4*runtime.NumCPU())
- )
- abort := make(chan struct{})
- defer close(abort)
-
- for i := 0; i < runtime.NumCPU(); i++ {
- go func() {
- for {
- // Fetch the next task number, terminating if everything's done
- n := atomic.AddUint64(&number, 1)
- if n >= frozen {
- return
- }
- // Retrieve the block from the freezer. If successful, pre-cache
- // the block hash and the individual transaction hashes for storing
- // into the database.
- block := ReadBlock(db, ReadCanonicalHash(db, n), n)
- if block != nil {
- block.Hash()
- for _, tx := range block.Transactions() {
- tx.Hash()
- }
- }
- // Feed the block to the aggregator, or abort on interrupt
- select {
- case results <- block:
- case <-abort:
- return
- }
- }
- }()
- }
- // Reassemble the blocks into a contiguous stream and push them out to disk
- var (
- queue = prque.New(nil)
- next = int64(0)
-
- batch = db.NewBatch()
- start = time.Now()
- logged time.Time
- )
- for i := uint64(0); i < frozen; i++ {
- // Retrieve the next result and bail if it's nil
- block := <-results
- if block == nil {
- return errors.New("broken ancient database")
- }
- // Push the block into the import queue and process contiguous ranges
- queue.Push(block, -int64(block.NumberU64()))
- for !queue.Empty() {
- // If the next available item is gapped, return
- if _, priority := queue.Peek(); -priority != next {
- break
- }
- // Next block available, pop it off and index it
- block = queue.PopItem().(*types.Block)
- next++
-
- // Inject hash<->number mapping and txlookup indexes
- WriteHeaderNumber(batch, block.Hash(), block.NumberU64())
- WriteTxLookupEntries(batch, block)
-
- // If enough data was accumulated in memory or we're at the last block, dump to disk
- if batch.ValueSize() > ethdb.IdealBatchSize || uint64(next) == frozen {
- if err := batch.Write(); err != nil {
- return err
- }
- batch.Reset()
- }
- // If we've spent too much time already, notify the user of what we're doing
- if time.Since(logged) > 8*time.Second {
- log.Info("Initializing chain from ancient data", "number", block.Number(), "hash", block.Hash(), "total", frozen-1, "elapsed", common.PrettyDuration(time.Since(start)))
- logged = time.Now()
- }
- }
- }
- hash := ReadCanonicalHash(db, frozen-1)
- WriteHeadHeaderHash(db, hash)
- WriteHeadFastBlockHash(db, hash)
-
- log.Info("Initialized chain from ancient data", "number", frozen-1, "hash", hash, "elapsed", common.PrettyDuration(time.Since(start)))
- return nil
-}
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index 2c20df200b..5a41199a7c 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -47,6 +47,12 @@ var (
// snapshotJournalKey tracks the in-memory diff layers across restarts.
snapshotJournalKey = []byte("SnapshotJournal")
+ // txIndexTailKey tracks the oldest block whose transactions have been indexed.
+ txIndexTailKey = []byte("TransactionIndexTail")
+
+ // fastTxLookupLimitKey tracks the transaction lookup limit during fast sync.
+ fastTxLookupLimitKey = []byte("FastTransactionLookupLimit")
+
// Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes).
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td
diff --git a/core/rlp_test.go b/core/rlp_test.go
new file mode 100644
index 0000000000..04daf2fc67
--- /dev/null
+++ b/core/rlp_test.go
@@ -0,0 +1,201 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package core
+
+import (
+ "fmt"
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
+)
+
+func getBlock(transactions int, uncles int, dataSize int) *types.Block {
+ var (
+ aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
+ // Generate a canonical chain to act as the main dataset
+ engine = ethash.NewFaker()
+ db = rawdb.NewMemoryDatabase()
+ // A sender who makes transactions, has some funds
+ key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ address = crypto.PubkeyToAddress(key.PublicKey)
+ funds = big.NewInt(1000000000)
+ gspec = &Genesis{
+ Config: params.TestChainConfig,
+ Alloc: GenesisAlloc{address: {Balance: funds}},
+ }
+ genesis = gspec.MustCommit(db)
+ )
+
+ // We need to generate as many blocks +1 as uncles
+ blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, uncles+1,
+ func(n int, b *BlockGen) {
+ if n == uncles {
+ // Add transactions and stuff on the last block
+ for i := 0; i < transactions; i++ {
+ tx, _ := types.SignTx(types.NewTransaction(uint64(i), aa,
+ big.NewInt(0), 50000, big.NewInt(1), make([]byte, dataSize)), types.HomesteadSigner{}, key)
+ b.AddTx(tx)
+ }
+ for i := 0; i < uncles; i++ {
+ b.AddUncle(&types.Header{ParentHash: b.PrevBlock(n - 1 - i).Hash(), Number: big.NewInt(int64(n - i))})
+ }
+ }
+ })
+ block := blocks[len(blocks)-1]
+ return block
+}
+
+// TestRlpIterator tests that individual transactions can be picked out
+// from blocks without full unmarshalling/marshalling
+func TestRlpIterator(t *testing.T) {
+ for _, tt := range []struct {
+ txs int
+ uncles int
+ datasize int
+ }{
+ {0, 0, 0},
+ {0, 2, 0},
+ {10, 0, 0},
+ {10, 2, 0},
+ {10, 2, 50},
+ } {
+ testRlpIterator(t, tt.txs, tt.uncles, tt.datasize)
+ }
+}
+
+func testRlpIterator(t *testing.T, txs, uncles, datasize int) {
+ desc := fmt.Sprintf("%d txs [%d datasize] and %d uncles", txs, datasize, uncles)
+ bodyRlp, _ := rlp.EncodeToBytes(getBlock(txs, uncles, datasize).Body())
+ it, err := rlp.NewListIterator(bodyRlp)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Check that txs exist
+ if !it.Next() {
+ t.Fatal("expected two elems, got zero")
+ }
+ txdata := it.Value()
+ // Check that uncles exist
+ if !it.Next() {
+ t.Fatal("expected two elems, got one")
+ }
+ // No more after that
+ if it.Next() {
+ t.Fatal("expected only two elems, got more")
+ }
+ txIt, err := rlp.NewListIterator(txdata)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var gotHashes []common.Hash
+ var expHashes []common.Hash
+ for txIt.Next() {
+ gotHashes = append(gotHashes, crypto.Keccak256Hash(txIt.Value()))
+ }
+
+ var expBody types.Body
+ err = rlp.DecodeBytes(bodyRlp, &expBody)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, tx := range expBody.Transactions {
+ expHashes = append(expHashes, tx.Hash())
+ }
+ if gotLen, expLen := len(gotHashes), len(expHashes); gotLen != expLen {
+ t.Fatalf("testcase %v: length wrong, got %d exp %d", desc, gotLen, expLen)
+ }
+ // also sanity check against input
+ if gotLen := len(gotHashes); gotLen != txs {
+ t.Fatalf("testcase %v: length wrong, got %d exp %d", desc, gotLen, txs)
+ }
+ for i, got := range gotHashes {
+ if exp := expHashes[i]; got != exp {
+ t.Errorf("testcase %v: hash wrong, got %x, exp %x", desc, got, exp)
+ }
+ }
+}
+
+// BenchmarkHashing compares the speeds of hashing a rlp raw data directly
+// without the unmarshalling/marshalling step
+func BenchmarkHashing(b *testing.B) {
+ // Make a pretty fat block
+ var (
+ bodyRlp []byte
+ blockRlp []byte
+ )
+ {
+ block := getBlock(200, 2, 50)
+ bodyRlp, _ = rlp.EncodeToBytes(block.Body())
+ blockRlp, _ = rlp.EncodeToBytes(block)
+ }
+ var got common.Hash
+ var hasher = sha3.NewLegacyKeccak256()
+ b.Run("iteratorhashing", func(b *testing.B) {
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ var hash common.Hash
+ it, err := rlp.NewListIterator(bodyRlp)
+ if err != nil {
+ b.Fatal(err)
+ }
+ it.Next()
+ txs := it.Value()
+ txIt, err := rlp.NewListIterator(txs)
+ if err != nil {
+ b.Fatal(err)
+ }
+ for txIt.Next() {
+ hasher.Reset()
+ hasher.Write(txIt.Value())
+ hasher.Sum(hash[:0])
+ got = hash
+ }
+ }
+ })
+ var exp common.Hash
+ b.Run("fullbodyhashing", func(b *testing.B) {
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ var body types.Body
+ rlp.DecodeBytes(bodyRlp, &body)
+ for _, tx := range body.Transactions {
+ exp = tx.Hash()
+ }
+ }
+ })
+ b.Run("fullblockhashing", func(b *testing.B) {
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ var block types.Block
+ rlp.DecodeBytes(blockRlp, &block)
+ for _, tx := range block.Transactions() {
+ tx.Hash()
+ }
+ }
+ })
+ if got != exp {
+ b.Fatalf("hash wrong, got %x exp %x", got, exp)
+ }
+}
diff --git a/core/state/dump.go b/core/state/dump.go
index 3f1d5f9cd9..eb12392f37 100644
--- a/core/state/dump.go
+++ b/core/state/dump.go
@@ -103,7 +103,6 @@ func (d iterativeDump) onRoot(root common.Hash) {
}
func (s *StateDB) dump(c collector, excludeCode, excludeStorage, excludeMissingPreimages bool, start []byte, maxResults int) (nextKey []byte) {
- emptyAddress := (common.Address{})
missingPreimages := 0
c.onRoot(s.trie.Hash())
@@ -114,15 +113,14 @@ func (s *StateDB) dump(c collector, excludeCode, excludeStorage, excludeMissingP
if err := rlp.DecodeBytes(it.Value, &data); err != nil {
panic(err)
}
- addr := common.BytesToAddress(s.trie.GetKey(it.Key))
- obj := newObject(nil, addr, data)
account := DumpAccount{
Balance: data.Balance.String(),
Nonce: data.Nonce,
Root: common.Bytes2Hex(data.Root[:]),
CodeHash: common.Bytes2Hex(data.CodeHash),
}
- if emptyAddress == addr {
+ addrBytes := s.trie.GetKey(it.Key)
+ if addrBytes == nil {
// Preimage missing
missingPreimages++
if excludeMissingPreimages {
@@ -130,6 +128,8 @@ func (s *StateDB) dump(c collector, excludeCode, excludeStorage, excludeMissingP
}
account.SecureKey = it.Key
}
+ addr := common.BytesToAddress(addrBytes)
+ obj := newObject(nil, addr, data)
if !excludeCode {
account.Code = common.Bytes2Hex(obj.Code(s.db))
}
diff --git a/core/state/snapshot/account.go b/core/state/snapshot/account.go
index 1068dc2a01..b92e942950 100644
--- a/core/state/snapshot/account.go
+++ b/core/state/snapshot/account.go
@@ -24,8 +24,10 @@ import (
"github.com/ethereum/go-ethereum/rlp"
)
-// Account is a slim version of a state.Account, where the root and code hash
-// are replaced with a nil byte slice for empty accounts.
+// Account is a modified version of a state.Account, where the root is replaced
+// with a byte slice. This format can be used to represent full-consensus format
+// or slim-snapshot format which replaces the empty root and code hash as nil
+// byte slice.
type Account struct {
Nonce uint64
Balance *big.Int
@@ -33,9 +35,8 @@ type Account struct {
CodeHash []byte
}
-// AccountRLP converts a state.Account content into a slim snapshot version RLP
-// encoded.
-func AccountRLP(nonce uint64, balance *big.Int, root common.Hash, codehash []byte) []byte {
+// SlimAccount converts a state.Account content into a slim snapshot account
+func SlimAccount(nonce uint64, balance *big.Int, root common.Hash, codehash []byte) Account {
slim := Account{
Nonce: nonce,
Balance: balance,
@@ -46,9 +47,40 @@ func AccountRLP(nonce uint64, balance *big.Int, root common.Hash, codehash []byt
if !bytes.Equal(codehash, emptyCode[:]) {
slim.CodeHash = codehash
}
- data, err := rlp.EncodeToBytes(slim)
+ return slim
+}
+
+// SlimAccountRLP converts a state.Account content into a slim snapshot
+// version RLP encoded.
+func SlimAccountRLP(nonce uint64, balance *big.Int, root common.Hash, codehash []byte) []byte {
+ data, err := rlp.EncodeToBytes(SlimAccount(nonce, balance, root, codehash))
if err != nil {
panic(err)
}
return data
}
+
+// FullAccount decodes the data on the 'slim RLP' format and return
+// the consensus format account.
+func FullAccount(data []byte) (Account, error) {
+ var account Account
+ if err := rlp.DecodeBytes(data, &account); err != nil {
+ return Account{}, err
+ }
+ if len(account.Root) == 0 {
+ account.Root = emptyRoot[:]
+ }
+ if len(account.CodeHash) == 0 {
+ account.CodeHash = emptyCode[:]
+ }
+ return account, nil
+}
+
+// FullAccountRLP converts data on the 'slim RLP' format into the full RLP-format.
+func FullAccountRLP(data []byte) ([]byte, error) {
+ account, err := FullAccount(data)
+ if err != nil {
+ return nil, err
+ }
+ return rlp.EncodeToBytes(account)
+}
diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go
index d9c86e5163..dee9ff0bf2 100644
--- a/core/state/snapshot/conversion.go
+++ b/core/state/snapshot/conversion.go
@@ -17,6 +17,8 @@
package snapshot
import (
+ "bytes"
+ "fmt"
"sync"
"time"
@@ -27,80 +29,239 @@ import (
"github.com/ethereum/go-ethereum/trie"
)
-// conversionAccount is used for converting between full and slim format. When
-// doing this, we can consider 'balance' as a byte array, as it has already
-// been converted from big.Int into an rlp-byteslice.
-type conversionAccount struct {
- Nonce uint64
- Balance []byte
- Root []byte
- CodeHash []byte
+// trieKV represents a trie key-value pair
+type trieKV struct {
+ key common.Hash
+ value []byte
}
-// SlimToFull converts data on the 'slim RLP' format into the full RLP-format
-func SlimToFull(data []byte) ([]byte, error) {
- acc := &conversionAccount{}
- if err := rlp.DecodeBytes(data, acc); err != nil {
- return nil, err
- }
- if len(acc.Root) == 0 {
- acc.Root = emptyRoot[:]
- }
- if len(acc.CodeHash) == 0 {
- acc.CodeHash = emptyCode[:]
+type (
+ // trieGeneratorFn is the interface of trie generation which can
+ // be implemented by different trie algorithm.
+ trieGeneratorFn func(in chan (trieKV), out chan (common.Hash))
+
+ // leafCallbackFn is the callback invoked at the leaves of the trie,
+ // returns the subtrie root with the specified subtrie identifier.
+ leafCallbackFn func(hash common.Hash, stat *generateStats) common.Hash
+)
+
+// GenerateAccountTrieRoot takes an account iterator and reproduces the root hash.
+func GenerateAccountTrieRoot(it AccountIterator) (common.Hash, error) {
+ return generateTrieRoot(it, common.Hash{}, stdGenerate, nil, &generateStats{start: time.Now()}, true)
+}
+
+// GenerateStorageTrieRoot takes a storage iterator and reproduces the root hash.
+func GenerateStorageTrieRoot(account common.Hash, it StorageIterator) (common.Hash, error) {
+ return generateTrieRoot(it, account, stdGenerate, nil, &generateStats{start: time.Now()}, true)
+}
+
+// VerifyState takes the whole snapshot tree as the input, traverses all the accounts
+// as well as the corresponding storages and compares the re-computed hash with the
+// original one(state root and the storage root).
+func VerifyState(snaptree *Tree, root common.Hash) error {
+ acctIt, err := snaptree.AccountIterator(root, common.Hash{})
+ if err != nil {
+ return err
}
- fullData, err := rlp.EncodeToBytes(acc)
+ defer acctIt.Release()
+
+ got, err := generateTrieRoot(acctIt, common.Hash{}, stdGenerate, func(account common.Hash, stat *generateStats) common.Hash {
+ storageIt, err := snaptree.StorageIterator(root, account, common.Hash{})
+ if err != nil {
+ return common.Hash{}
+ }
+ defer storageIt.Release()
+
+ hash, err := generateTrieRoot(storageIt, account, stdGenerate, nil, stat, false)
+ if err != nil {
+ return common.Hash{}
+ }
+ return hash
+ }, &generateStats{start: time.Now()}, true)
+
if err != nil {
- return nil, err
+ return err
+ }
+ if got != root {
+ return fmt.Errorf("state root hash mismatch: got %x, want %x", got, root)
}
- return fullData, nil
+ return nil
}
-// trieKV represents a trie key-value pair
-type trieKV struct {
- key common.Hash
- value []byte
+// generateStats is a collection of statistics gathered by the trie generator
+// for logging purposes.
+type generateStats struct {
+ accounts uint64
+ slots uint64
+ curAccount common.Hash
+ curSlot common.Hash
+ start time.Time
+ lock sync.RWMutex
+}
+
+// progress records the progress trie generator made recently.
+func (stat *generateStats) progress(accounts, slots uint64, curAccount common.Hash, curSlot common.Hash) {
+ stat.lock.Lock()
+ defer stat.lock.Unlock()
+
+ stat.accounts += accounts
+ stat.slots += slots
+ stat.curAccount = curAccount
+ stat.curSlot = curSlot
}
-type trieGeneratorFn func(in chan (trieKV), out chan (common.Hash))
+// report prints the cumulative progress statistic smartly.
+func (stat *generateStats) report() {
+ stat.lock.RLock()
+ defer stat.lock.RUnlock()
-// GenerateTrieRoot takes an account iterator and reproduces the root hash.
-func GenerateTrieRoot(it AccountIterator) common.Hash {
- return generateTrieRoot(it, stdGenerate)
+ var ctx []interface{}
+ if stat.curSlot != (common.Hash{}) {
+ ctx = append(ctx, []interface{}{
+ "in", stat.curAccount,
+ "at", stat.curSlot,
+ }...)
+ } else {
+ ctx = append(ctx, []interface{}{"at", stat.curAccount}...)
+ }
+ // Add the usual measurements
+ ctx = append(ctx, []interface{}{"accounts", stat.accounts}...)
+ if stat.slots != 0 {
+ ctx = append(ctx, []interface{}{"slots", stat.slots}...)
+ }
+ ctx = append(ctx, []interface{}{"elapsed", common.PrettyDuration(time.Since(stat.start))}...)
+ log.Info("Generating trie hash from snapshot", ctx...)
+}
+
+// reportDone prints the last log when the whole generation is finished.
+func (stat *generateStats) reportDone() {
+ stat.lock.RLock()
+ defer stat.lock.RUnlock()
+
+ var ctx []interface{}
+ ctx = append(ctx, []interface{}{"accounts", stat.accounts}...)
+ if stat.slots != 0 {
+ ctx = append(ctx, []interface{}{"slots", stat.slots}...)
+ }
+ ctx = append(ctx, []interface{}{"elapsed", common.PrettyDuration(time.Since(stat.start))}...)
+ log.Info("Generated trie hash from snapshot", ctx...)
}
-func generateTrieRoot(it AccountIterator, generatorFn trieGeneratorFn) common.Hash {
+// generateTrieRoot generates the trie hash based on the snapshot iterator.
+// It can be used for generating account trie, storage trie or even the
+// whole state which connects the accounts and the corresponding storages.
+func generateTrieRoot(it Iterator, account common.Hash, generatorFn trieGeneratorFn, leafCallback leafCallbackFn, stats *generateStats, report bool) (common.Hash, error) {
var (
- in = make(chan trieKV) // chan to pass leaves
- out = make(chan common.Hash) // chan to collect result
- wg sync.WaitGroup
+ in = make(chan trieKV) // chan to pass leaves
+ out = make(chan common.Hash, 1) // chan to collect result
+ stoplog = make(chan bool, 1) // 1-size buffer, works when logging is not enabled
+ wg sync.WaitGroup
)
+ // Spin up a go-routine for trie hash re-generation
wg.Add(1)
go func() {
+ defer wg.Done()
generatorFn(in, out)
- wg.Done()
}()
- // Feed leaves
- start := time.Now()
- logged := time.Now()
- accounts := 0
+
+ // Spin up a go-routine for progress logging
+ if report && stats != nil {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ timer := time.NewTimer(0)
+ defer timer.Stop()
+
+ for {
+ select {
+ case <-timer.C:
+ stats.report()
+ timer.Reset(time.Second * 8)
+ case success := <-stoplog:
+ if success {
+ stats.reportDone()
+ }
+ return
+ }
+ }
+ }()
+ }
+ // stop is a helper function to shutdown the background threads
+ // and return the re-generated trie hash.
+ stop := func(success bool) common.Hash {
+ close(in)
+ result := <-out
+ stoplog <- success
+ wg.Wait()
+ return result
+ }
+ var (
+ logged = time.Now()
+ processed = uint64(0)
+ leaf trieKV
+ last common.Hash
+ )
+ // Start to feed leaves
for it.Next() {
- slimData := it.Account()
- fullData, _ := SlimToFull(slimData)
- l := trieKV{it.Hash(), fullData}
- in <- l
- if time.Since(logged) > 8*time.Second {
- log.Info("Generating trie hash from snapshot",
- "at", l.key, "accounts", accounts, "elapsed", time.Since(start))
- logged = time.Now()
+ if account == (common.Hash{}) {
+ var (
+ err error
+ fullData []byte
+ )
+ if leafCallback == nil {
+ fullData, err = FullAccountRLP(it.(AccountIterator).Account())
+ if err != nil {
+ stop(false)
+ return common.Hash{}, err
+ }
+ } else {
+ account, err := FullAccount(it.(AccountIterator).Account())
+ if err != nil {
+ stop(false)
+ return common.Hash{}, err
+ }
+ // Apply the leaf callback. Normally the callback is used to traverse
+ // the storage trie and re-generate the subtrie root.
+ subroot := leafCallback(it.Hash(), stats)
+ if !bytes.Equal(account.Root, subroot.Bytes()) {
+ stop(false)
+ return common.Hash{}, fmt.Errorf("invalid subroot(%x), want %x, got %x", it.Hash(), account.Root, subroot)
+ }
+ fullData, err = rlp.EncodeToBytes(account)
+ if err != nil {
+ stop(false)
+ return common.Hash{}, err
+ }
+ }
+ leaf = trieKV{it.Hash(), fullData}
+ } else {
+ leaf = trieKV{it.Hash(), common.CopyBytes(it.(StorageIterator).Slot())}
+ }
+ in <- leaf
+
+ // Accumulate the generaation statistic if it's required.
+ processed++
+ if time.Since(logged) > 3*time.Second && stats != nil {
+ if account == (common.Hash{}) {
+ stats.progress(processed, 0, it.Hash(), common.Hash{})
+ } else {
+ stats.progress(0, processed, account, it.Hash())
+ }
+ logged, processed = time.Now(), 0
+ }
+ last = it.Hash()
+ }
+ // Commit the last part statistic.
+ if processed > 0 && stats != nil {
+ if account == (common.Hash{}) {
+ stats.progress(processed, 0, last, common.Hash{})
+ } else {
+ stats.progress(0, processed, account, last)
}
- accounts++
}
- close(in)
- result := <-out
- log.Info("Generated trie hash from snapshot", "accounts", accounts, "elapsed", time.Since(start))
- wg.Wait()
- return result
+ result := stop(true)
+ return result, nil
}
// stdGenerate is a very basic hexary trie builder which uses the same Trie
diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go
index 86ca5c8ba0..fc8f534b4b 100644
--- a/core/state/snapshot/difflayer.go
+++ b/core/state/snapshot/difflayer.go
@@ -105,6 +105,13 @@ type diffLayer struct {
root common.Hash // Root hash to which this snapshot diff belongs to
stale uint32 // Signals that the layer became stale (state progressed)
+ // destructSet is a very special helper marker. If an account is marked as
+ // deleted, then it's recorded in this set. However it's allowed that an account
+ // is included here but still available in other sets(e.g. storageData). The
+ // reason is the diff layer includes all the changes in a *block*. It can
+ // happen that in the tx_1, account A is self-destructed while in the tx_2
+ // it's recreated. But we still need this marker to indicate the "old" A is
+ // deleted, all data in other set belongs to the "new" A.
destructSet map[common.Hash]struct{} // Keyed markers for deleted (and potentially) recreated accounts
accountList []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
accountData map[common.Hash][]byte // Keyed accounts for direct retrival (nil means deleted)
@@ -169,6 +176,7 @@ func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]s
destructSet: destructs,
accountData: accounts,
storageData: storage,
+ storageList: make(map[common.Hash][]common.Hash),
}
switch parent := parent.(type) {
case *diskLayer:
@@ -194,11 +202,6 @@ func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]s
dl.memory += uint64(common.HashLength + len(data))
snapshotDirtyAccountWriteMeter.Mark(int64(len(data)))
}
- // Fill the storage hashes and sort them for the iterator
- dl.storageList = make(map[common.Hash][]common.Hash)
- for accountHash := range destructs {
- dl.storageList[accountHash] = nil
- }
// Determine memory size and track the dirty writes
for _, slots := range storage {
for _, data := range slots {
@@ -206,7 +209,7 @@ func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]s
snapshotDirtyStorageWriteMeter.Mark(int64(len(data)))
}
}
- dl.memory += uint64(len(dl.storageList) * common.HashLength)
+ dl.memory += uint64(len(destructs) * common.HashLength)
return dl
}
@@ -287,6 +290,8 @@ func (dl *diffLayer) Account(hash common.Hash) (*Account, error) {
// AccountRLP directly retrieves the account RLP associated with a particular
// hash in the snapshot slim data format.
+//
+// Note the returned account is not a copy, please don't modify it.
func (dl *diffLayer) AccountRLP(hash common.Hash) ([]byte, error) {
// Check the bloom filter first whether there's even a point in reaching into
// all the maps in all the layers below
@@ -347,6 +352,8 @@ func (dl *diffLayer) accountRLP(hash common.Hash, depth int) ([]byte, error) {
// Storage directly retrieves the storage data associated with a particular hash,
// within a particular account. If the slot is unknown to this diff, it's parent
// is consulted.
+//
+// Note the returned slot is not a copy, please don't modify it.
func (dl *diffLayer) Storage(accountHash, storageHash common.Hash) ([]byte, error) {
// Check the bloom filter first whether there's even a point in reaching into
// all the maps in all the layers below
@@ -502,22 +509,34 @@ func (dl *diffLayer) AccountList() []common.Hash {
}
}
sort.Sort(hashes(dl.accountList))
+ dl.memory += uint64(len(dl.accountList) * common.HashLength)
return dl.accountList
}
// StorageList returns a sorted list of all storage slot hashes in this difflayer
-// for the given account.
+// for the given account. If the whole storage is destructed in this layer, then
+// an additional flag *destructed = true* will be returned, otherwise the flag is
+// false. Besides, the returned list will include the hash of deleted storage slot.
+// Note a special case is an account is deleted in a prior tx but is recreated in
+// the following tx with some storage slots set. In this case the returned list is
+// not empty but the flag is true.
//
// Note, the returned slice is not a copy, so do not modify it.
-func (dl *diffLayer) StorageList(accountHash common.Hash) []common.Hash {
- // If an old list already exists, return it
+func (dl *diffLayer) StorageList(accountHash common.Hash) ([]common.Hash, bool) {
dl.lock.RLock()
- list := dl.storageList[accountHash]
+ _, destructed := dl.destructSet[accountHash]
+ if _, ok := dl.storageData[accountHash]; !ok {
+ // Account not tracked by this layer
+ dl.lock.RUnlock()
+ return nil, destructed
+ }
+ // If an old list already exists, return it
+ if list, exist := dl.storageList[accountHash]; exist {
+ dl.lock.RUnlock()
+ return list, destructed // the cached list can't be nil
+ }
dl.lock.RUnlock()
- if list != nil {
- return list
- }
// No old sorted account list exists, generate a new one
dl.lock.Lock()
defer dl.lock.Unlock()
@@ -529,5 +548,6 @@ func (dl *diffLayer) StorageList(accountHash common.Hash) []common.Hash {
}
sort.Sort(hashes(storageList))
dl.storageList[accountHash] = storageList
- return storageList
+ dl.memory += uint64(len(dl.storageList)*common.HashLength + common.HashLength)
+ return storageList, destructed
}
diff --git a/core/state/snapshot/difflayer_test.go b/core/state/snapshot/difflayer_test.go
index 329e0eb8e2..31636ee133 100644
--- a/core/state/snapshot/difflayer_test.go
+++ b/core/state/snapshot/difflayer_test.go
@@ -109,7 +109,8 @@ func TestMergeBasics(t *testing.T) {
if have, want := len(merged.storageList), i; have != want {
t.Errorf("[1] storageList wrong: have %v, want %v", have, want)
}
- if have, want := len(merged.StorageList(aHash)), len(sMap); have != want {
+ list, _ := merged.StorageList(aHash)
+ if have, want := len(list), len(sMap); have != want {
t.Errorf("[2] StorageList() wrong: have %v, want %v", have, want)
}
if have, want := len(merged.storageList[aHash]), len(sMap); have != want {
@@ -131,7 +132,7 @@ func TestMergeDelete(t *testing.T) {
flipDrops := func() map[common.Hash]struct{} {
return map[common.Hash]struct{}{
- h2: struct{}{},
+ h2: {},
}
}
flipAccs := func() map[common.Hash][]byte {
@@ -141,7 +142,7 @@ func TestMergeDelete(t *testing.T) {
}
flopDrops := func() map[common.Hash]struct{} {
return map[common.Hash]struct{}{
- h1: struct{}{},
+ h1: {},
}
}
flopAccs := func() map[common.Hash][]byte {
diff --git a/core/state/snapshot/disklayer_test.go b/core/state/snapshot/disklayer_test.go
index 15f3a6b1fa..8460cd332f 100644
--- a/core/state/snapshot/disklayer_test.go
+++ b/core/state/snapshot/disklayer_test.go
@@ -121,10 +121,10 @@ func TestDiskMerge(t *testing.T) {
// Modify or delete some accounts, flatten everything onto disk
if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{
- accDelNoCache: struct{}{},
- accDelCache: struct{}{},
- conNukeNoCache: struct{}{},
- conNukeCache: struct{}{},
+ accDelNoCache: {},
+ accDelCache: {},
+ conNukeNoCache: {},
+ conNukeCache: {},
}, map[common.Hash][]byte{
accModNoCache: reverse(accModNoCache[:]),
accModCache: reverse(accModCache[:]),
@@ -344,10 +344,10 @@ func TestDiskPartialMerge(t *testing.T) {
// Modify or delete some accounts, flatten everything onto disk
if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{
- accDelNoCache: struct{}{},
- accDelCache: struct{}{},
- conNukeNoCache: struct{}{},
- conNukeCache: struct{}{},
+ accDelNoCache: {},
+ accDelCache: {},
+ conNukeNoCache: {},
+ conNukeCache: {},
}, map[common.Hash][]byte{
accModNoCache: reverse(accModNoCache[:]),
accModCache: reverse(accModCache[:]),
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index 4b017fe69b..c3a4a552ff 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -42,7 +42,7 @@ var (
)
// generatorStats is a collection of statistics gathered by the snapshot generator
-// for logging purposes.
+// for logging purposes.
type generatorStats struct {
wiping chan struct{} // Notification channel if wiping is in progress
origin uint64 // Origin prefix where generation started
@@ -167,7 +167,7 @@ func (dl *diskLayer) generate(stats *generatorStats) {
if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
log.Crit("Invalid account encountered during snapshot creation", "err", err)
}
- data := AccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash)
+ data := SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash)
// If the account is not yet in-progress, write it out
if accMarker == nil || !bytes.Equal(accountHash[:], accMarker) {
diff --git a/core/state/snapshot/iterator.go b/core/state/snapshot/iterator.go
index 84cc5c3bca..adef367a3a 100644
--- a/core/state/snapshot/iterator.go
+++ b/core/state/snapshot/iterator.go
@@ -26,9 +26,9 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
)
-// AccountIterator is an iterator to step over all the accounts in a snapshot,
-// which may or may npt be composed of multiple layers.
-type AccountIterator interface {
+// Iterator is a iterator to step over all the accounts or the specific
+// storage in a snapshot which may or may not be composed of multiple layers.
+type Iterator interface {
// Next steps the iterator forward one element, returning false if exhausted,
// or an error if iteration failed for some reason (e.g. root being iterated
// becomes stale and garbage collected).
@@ -38,18 +38,35 @@ type AccountIterator interface {
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
Error() error
- // Hash returns the hash of the account the iterator is currently at.
+ // Hash returns the hash of the account or storage slot the iterator is
+ // currently at.
Hash() common.Hash
- // Account returns the RLP encoded slim account the iterator is currently at.
- // An error will be returned if the iterator becomes invalid (e.g. snaph
- Account() []byte
-
// Release releases associated resources. Release should always succeed and
// can be called multiple times without causing error.
Release()
}
+// AccountIterator is a iterator to step over all the accounts in a snapshot,
+// which may or may not be composed of multiple layers.
+type AccountIterator interface {
+ Iterator
+
+ // Account returns the RLP encoded slim account the iterator is currently at.
+ // An error will be returned if the iterator becomes invalid
+ Account() []byte
+}
+
+// StorageIterator is a iterator to step over the specific storage in a snapshot,
+// which may or may not be composed of multiple layers.
+type StorageIterator interface {
+ Iterator
+
+ // Slot returns the storage slot the iterator is currently at. An error will
+ // be returned if the iterator becomes invalid
+ Slot() []byte
+}
+
// diffAccountIterator is an account iterator that steps over the accounts (both
// live and deleted) contained within a single diff layer. Higher order iterators
// will use the deleted accounts to skip deeper iterators.
@@ -70,7 +87,7 @@ func (dl *diffLayer) AccountIterator(seek common.Hash) AccountIterator {
// Seek out the requested starting account
hashes := dl.AccountList()
index := sort.Search(len(hashes), func(i int) bool {
- return bytes.Compare(seek[:], hashes[i][:]) < 0
+ return bytes.Compare(seek[:], hashes[i][:]) <= 0
})
// Assemble and returned the already seeked iterator
return &diffAccountIterator{
@@ -120,11 +137,14 @@ func (it *diffAccountIterator) Hash() common.Hash {
// This method assumes that flattening does not delete elements from
// the accountdata mapping (writing nil into it is fine though), and will panic
// if elements have been deleted.
+//
+// Note the returned account is not a copy, please don't modify it.
func (it *diffAccountIterator) Account() []byte {
it.layer.lock.RLock()
blob, ok := it.layer.accountData[it.curHash]
if !ok {
if _, ok := it.layer.destructSet[it.curHash]; ok {
+ it.layer.lock.RUnlock()
return nil
}
panic(fmt.Sprintf("iterator referenced non-existent account: %x", it.curHash))
@@ -163,7 +183,7 @@ func (it *diskAccountIterator) Next() bool {
}
// Try to advance the iterator and release it if we reached the end
for {
- if !it.it.Next() || !bytes.HasPrefix(it.it.Key(), rawdb.SnapshotAccountPrefix) {
+ if !it.it.Next() {
it.it.Release()
it.it = nil
return false
@@ -181,12 +201,15 @@ func (it *diskAccountIterator) Next() bool {
// A diff layer is immutable after creation content wise and can always be fully
// iterated without error, so this method always returns nil.
func (it *diskAccountIterator) Error() error {
+ if it.it == nil {
+ return nil // Iterator is exhausted and released
+ }
return it.it.Error()
}
// Hash returns the hash of the account the iterator is currently at.
func (it *diskAccountIterator) Hash() common.Hash {
- return common.BytesToHash(it.it.Key())
+ return common.BytesToHash(it.it.Key()) // The prefix will be truncated
}
// Account returns the RLP encoded slim account the iterator is currently at.
@@ -202,3 +225,176 @@ func (it *diskAccountIterator) Release() {
it.it = nil
}
}
+
+// diffStorageIterator is a storage iterator that steps over the specific storage
+// (both live and deleted) contained within a single diff layer. Higher order
+// iterators will use the deleted slot to skip deeper iterators.
+type diffStorageIterator struct {
+ // curHash is the current hash the iterator is positioned on. The field is
+ // explicitly tracked since the referenced diff layer might go stale after
+ // the iterator was positioned and we don't want to fail accessing the old
+ // hash as long as the iterator is not touched any more.
+ curHash common.Hash
+ account common.Hash
+
+ layer *diffLayer // Live layer to retrieve values from
+ keys []common.Hash // Keys left in the layer to iterate
+ fail error // Any failures encountered (stale)
+}
+
+// StorageIterator creates a storage iterator over a single diff layer.
+// Execept the storage iterator is returned, there is an additional flag
+// "destructed" returned. If it's true then it means the whole storage is
+// destructed in this layer(maybe recreated too), don't bother deeper layer
+// for storage retrieval.
+func (dl *diffLayer) StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool) {
+ // Create the storage for this account even it's marked
+ // as destructed. The iterator is for the new one which
+ // just has the same adddress as the deleted one.
+ hashes, destructed := dl.StorageList(account)
+ index := sort.Search(len(hashes), func(i int) bool {
+ return bytes.Compare(seek[:], hashes[i][:]) <= 0
+ })
+ // Assemble and returned the already seeked iterator
+ return &diffStorageIterator{
+ layer: dl,
+ account: account,
+ keys: hashes[index:],
+ }, destructed
+}
+
+// Next steps the iterator forward one element, returning false if exhausted.
+func (it *diffStorageIterator) Next() bool {
+ // If the iterator was already stale, consider it a programmer error. Although
+ // we could just return false here, triggering this path would probably mean
+ // somebody forgot to check for Error, so lets blow up instead of undefined
+ // behavior that's hard to debug.
+ if it.fail != nil {
+ panic(fmt.Sprintf("called Next of failed iterator: %v", it.fail))
+ }
+ // Stop iterating if all keys were exhausted
+ if len(it.keys) == 0 {
+ return false
+ }
+ if it.layer.Stale() {
+ it.fail, it.keys = ErrSnapshotStale, nil
+ return false
+ }
+ // Iterator seems to be still alive, retrieve and cache the live hash
+ it.curHash = it.keys[0]
+ // key cached, shift the iterator and notify the user of success
+ it.keys = it.keys[1:]
+ return true
+}
+
+// Error returns any failure that occurred during iteration, which might have
+// caused a premature iteration exit (e.g. snapshot stack becoming stale).
+func (it *diffStorageIterator) Error() error {
+ return it.fail
+}
+
+// Hash returns the hash of the storage slot the iterator is currently at.
+func (it *diffStorageIterator) Hash() common.Hash {
+ return it.curHash
+}
+
+// Slot returns the raw storage slot value the iterator is currently at.
+// This method may _fail_, if the underlying layer has been flattened between
+// the call to Next and Value. That type of error will set it.Err.
+// This method assumes that flattening does not delete elements from
+// the storage mapping (writing nil into it is fine though), and will panic
+// if elements have been deleted.
+//
+// Note the returned slot is not a copy, please don't modify it.
+func (it *diffStorageIterator) Slot() []byte {
+ it.layer.lock.RLock()
+ storage, ok := it.layer.storageData[it.account]
+ if !ok {
+ panic(fmt.Sprintf("iterator referenced non-existent account storage: %x", it.account))
+ }
+ // Storage slot might be nil(deleted), but it must exist
+ blob, ok := storage[it.curHash]
+ if !ok {
+ panic(fmt.Sprintf("iterator referenced non-existent storage slot: %x", it.curHash))
+ }
+ it.layer.lock.RUnlock()
+ if it.layer.Stale() {
+ it.fail, it.keys = ErrSnapshotStale, nil
+ }
+ return blob
+}
+
+// Release is a noop for diff account iterators as there are no held resources.
+func (it *diffStorageIterator) Release() {}
+
+// diskStorageIterator is a storage iterator that steps over the live storage
+// contained within a disk layer.
+type diskStorageIterator struct {
+ layer *diskLayer
+ account common.Hash
+ it ethdb.Iterator
+}
+
+// StorageIterator creates a storage iterator over a disk layer.
+// If the whole storage is destructed, then all entries in the disk
+// layer are deleted already. So the "destructed" flag returned here
+// is always false.
+func (dl *diskLayer) StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool) {
+ pos := common.TrimRightZeroes(seek[:])
+ return &diskStorageIterator{
+ layer: dl,
+ account: account,
+ it: dl.diskdb.NewIterator(append(rawdb.SnapshotStoragePrefix, account.Bytes()...), pos),
+ }, false
+}
+
+// Next steps the iterator forward one element, returning false if exhausted.
+func (it *diskStorageIterator) Next() bool {
+ // If the iterator was already exhausted, don't bother
+ if it.it == nil {
+ return false
+ }
+ // Try to advance the iterator and release it if we reached the end
+ for {
+ if !it.it.Next() {
+ it.it.Release()
+ it.it = nil
+ return false
+ }
+ if len(it.it.Key()) == len(rawdb.SnapshotStoragePrefix)+common.HashLength+common.HashLength {
+ break
+ }
+ }
+ return true
+}
+
+// Error returns any failure that occurred during iteration, which might have
+// caused a premature iteration exit (e.g. snapshot stack becoming stale).
+//
+// A diff layer is immutable after creation content wise and can always be fully
+// iterated without error, so this method always returns nil.
+func (it *diskStorageIterator) Error() error {
+ if it.it == nil {
+ return nil // Iterator is exhausted and released
+ }
+ return it.it.Error()
+}
+
+// Hash returns the hash of the storage slot the iterator is currently at.
+func (it *diskStorageIterator) Hash() common.Hash {
+ return common.BytesToHash(it.it.Key()) // The prefix will be truncated
+}
+
+// Slot returns the raw strorage slot content the iterator is currently at.
+func (it *diskStorageIterator) Slot() []byte {
+ return it.it.Value()
+}
+
+// Release releases the database snapshot held during iteration.
+func (it *diskStorageIterator) Release() {
+ // The iterator is auto-released on exhaustion, so make sure it's still alive
+ if it.it != nil {
+ it.it.Release()
+ it.it = nil
+ }
+}
diff --git a/core/state/snapshot/iterator_binary.go b/core/state/snapshot/iterator_binary.go
index 7d647ee7ba..f82f750029 100644
--- a/core/state/snapshot/iterator_binary.go
+++ b/core/state/snapshot/iterator_binary.go
@@ -22,29 +22,91 @@ import (
"github.com/ethereum/go-ethereum/common"
)
-// binaryAccountIterator is a simplistic iterator to step over the accounts in
-// a snapshot, which may or may npt be composed of multiple layers. Performance
+// binaryIterator is a simplistic iterator to step over the accounts or storage
+// in a snapshot, which may or may not be composed of multiple layers. Performance
// wise this iterator is slow, it's meant for cross validating the fast one,
-type binaryAccountIterator struct {
- a *diffAccountIterator
- b AccountIterator
- aDone bool
- bDone bool
- k common.Hash
- fail error
+type binaryIterator struct {
+ a Iterator
+ b Iterator
+ aDone bool
+ bDone bool
+ accountIterator bool
+ k common.Hash
+ account common.Hash
+ fail error
}
-// newBinaryAccountIterator creates a simplistic account iterator to step over
-// all the accounts in a slow, but eaily verifiable way.
-func (dl *diffLayer) newBinaryAccountIterator() AccountIterator {
+// initBinaryAccountIterator creates a simplistic iterator to step over all the
+// accounts in a slow, but eaily verifiable way. Note this function is used for
+// initialization, use `newBinaryAccountIterator` as the API.
+func (dl *diffLayer) initBinaryAccountIterator() Iterator {
+ parent, ok := dl.parent.(*diffLayer)
+ if !ok {
+ l := &binaryIterator{
+ a: dl.AccountIterator(common.Hash{}),
+ b: dl.Parent().AccountIterator(common.Hash{}),
+ accountIterator: true,
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = !l.b.Next()
+ return l
+ }
+ l := &binaryIterator{
+ a: dl.AccountIterator(common.Hash{}),
+ b: parent.initBinaryAccountIterator(),
+ accountIterator: true,
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = !l.b.Next()
+ return l
+}
+
+// initBinaryStorageIterator creates a simplistic iterator to step over all the
+// storage slots in a slow, but eaily verifiable way. Note this function is used
+// for initialization, use `newBinaryStorageIterator` as the API.
+func (dl *diffLayer) initBinaryStorageIterator(account common.Hash) Iterator {
parent, ok := dl.parent.(*diffLayer)
if !ok {
- // parent is the disk layer
- return dl.AccountIterator(common.Hash{})
+ // If the storage in this layer is already destructed, discard all
+ // deeper layers but still return an valid single-branch iterator.
+ a, destructed := dl.StorageIterator(account, common.Hash{})
+ if destructed {
+ l := &binaryIterator{
+ a: a,
+ account: account,
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = true
+ return l
+ }
+ // The parent is disk layer, don't need to take care "destructed"
+ // anymore.
+ b, _ := dl.Parent().StorageIterator(account, common.Hash{})
+ l := &binaryIterator{
+ a: a,
+ b: b,
+ account: account,
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = !l.b.Next()
+ return l
}
- l := &binaryAccountIterator{
- a: dl.AccountIterator(common.Hash{}).(*diffAccountIterator),
- b: parent.newBinaryAccountIterator(),
+ // If the storage in this layer is already destructed, discard all
+ // deeper layers but still return an valid single-branch iterator.
+ a, destructed := dl.StorageIterator(account, common.Hash{})
+ if destructed {
+ l := &binaryIterator{
+ a: a,
+ account: account,
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = true
+ return l
+ }
+ l := &binaryIterator{
+ a: a,
+ b: parent.initBinaryStorageIterator(account),
+ account: account,
}
l.aDone = !l.a.Next()
l.bDone = !l.b.Next()
@@ -54,23 +116,22 @@ func (dl *diffLayer) newBinaryAccountIterator() AccountIterator {
// Next steps the iterator forward one element, returning false if exhausted,
// or an error if iteration failed for some reason (e.g. root being iterated
// becomes stale and garbage collected).
-func (it *binaryAccountIterator) Next() bool {
+func (it *binaryIterator) Next() bool {
if it.aDone && it.bDone {
return false
}
- nextB := it.b.Hash()
first:
- nextA := it.a.Hash()
if it.aDone {
+ it.k = it.b.Hash()
it.bDone = !it.b.Next()
- it.k = nextB
return true
}
if it.bDone {
+ it.k = it.a.Hash()
it.aDone = !it.a.Next()
- it.k = nextA
return true
}
+ nextA, nextB := it.a.Hash(), it.b.Hash()
if diff := bytes.Compare(nextA[:], nextB[:]); diff < 0 {
it.aDone = !it.a.Next()
it.k = nextA
@@ -87,20 +148,43 @@ first:
// Error returns any failure that occurred during iteration, which might have
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
-func (it *binaryAccountIterator) Error() error {
+func (it *binaryIterator) Error() error {
return it.fail
}
// Hash returns the hash of the account the iterator is currently at.
-func (it *binaryAccountIterator) Hash() common.Hash {
+func (it *binaryIterator) Hash() common.Hash {
return it.k
}
// Account returns the RLP encoded slim account the iterator is currently at, or
// nil if the iterated snapshot stack became stale (you can check Error after
// to see if it failed or not).
-func (it *binaryAccountIterator) Account() []byte {
- blob, err := it.a.layer.AccountRLP(it.k)
+//
+// Note the returned account is not a copy, please don't modify it.
+func (it *binaryIterator) Account() []byte {
+ if !it.accountIterator {
+ return nil
+ }
+ // The topmost iterator must be `diffAccountIterator`
+ blob, err := it.a.(*diffAccountIterator).layer.AccountRLP(it.k)
+ if err != nil {
+ it.fail = err
+ return nil
+ }
+ return blob
+}
+
+// Slot returns the raw storage slot data the iterator is currently at, or
+// nil if the iterated snapshot stack became stale (you can check Error after
+// to see if it failed or not).
+//
+// Note the returned slot is not a copy, please don't modify it.
+func (it *binaryIterator) Slot() []byte {
+ if it.accountIterator {
+ return nil
+ }
+ blob, err := it.a.(*diffStorageIterator).layer.Storage(it.account, it.k)
if err != nil {
it.fail = err
return nil
@@ -109,7 +193,21 @@ func (it *binaryAccountIterator) Account() []byte {
}
// Release recursively releases all the iterators in the stack.
-func (it *binaryAccountIterator) Release() {
+func (it *binaryIterator) Release() {
it.a.Release()
it.b.Release()
}
+
+// newBinaryAccountIterator creates a simplistic account iterator to step over
+// all the accounts in a slow, but eaily verifiable way.
+func (dl *diffLayer) newBinaryAccountIterator() AccountIterator {
+ iter := dl.initBinaryAccountIterator()
+ return iter.(AccountIterator)
+}
+
+// newBinaryStorageIterator creates a simplistic account iterator to step over
+// all the storage slots in a slow, but eaily verifiable way.
+func (dl *diffLayer) newBinaryStorageIterator(account common.Hash) StorageIterator {
+ iter := dl.initBinaryStorageIterator(account)
+ return iter.(StorageIterator)
+}
diff --git a/core/state/snapshot/iterator_fast.go b/core/state/snapshot/iterator_fast.go
index 99734ec912..82c46f1fb6 100644
--- a/core/state/snapshot/iterator_fast.go
+++ b/core/state/snapshot/iterator_fast.go
@@ -24,23 +24,23 @@ import (
"github.com/ethereum/go-ethereum/common"
)
-// weightedAccountIterator is an account iterator with an assigned weight. It is
-// used to prioritise which account is the correct one if multiple iterators find
-// the same one (modified in multiple consecutive blocks).
-type weightedAccountIterator struct {
- it AccountIterator
+// weightedIterator is a iterator with an assigned weight. It is used to prioritise
+// which account or storage slot is the correct one if multiple iterators find the
+// same one (modified in multiple consecutive blocks).
+type weightedIterator struct {
+ it Iterator
priority int
}
-// weightedAccountIterators is a set of iterators implementing the sort.Interface.
-type weightedAccountIterators []*weightedAccountIterator
+// weightedIterators is a set of iterators implementing the sort.Interface.
+type weightedIterators []*weightedIterator
// Len implements sort.Interface, returning the number of active iterators.
-func (its weightedAccountIterators) Len() int { return len(its) }
+func (its weightedIterators) Len() int { return len(its) }
// Less implements sort.Interface, returning which of two iterators in the stack
// is before the other.
-func (its weightedAccountIterators) Less(i, j int) bool {
+func (its weightedIterators) Less(i, j int) bool {
// Order the iterators primarily by the account hashes
hashI := its[i].it.Hash()
hashJ := its[j].it.Hash()
@@ -51,45 +51,64 @@ func (its weightedAccountIterators) Less(i, j int) bool {
case 1:
return false
}
- // Same account in multiple layers, split by priority
+ // Same account/storage-slot in multiple layers, split by priority
return its[i].priority < its[j].priority
}
// Swap implements sort.Interface, swapping two entries in the iterator stack.
-func (its weightedAccountIterators) Swap(i, j int) {
+func (its weightedIterators) Swap(i, j int) {
its[i], its[j] = its[j], its[i]
}
-// fastAccountIterator is a more optimized multi-layer iterator which maintains a
+// fastIterator is a more optimized multi-layer iterator which maintains a
// direct mapping of all iterators leading down to the bottom layer.
-type fastAccountIterator struct {
- tree *Tree // Snapshot tree to reinitialize stale sub-iterators with
- root common.Hash // Root hash to reinitialize stale sub-iterators through
+type fastIterator struct {
+ tree *Tree // Snapshot tree to reinitialize stale sub-iterators with
+ root common.Hash // Root hash to reinitialize stale sub-iterators through
+
curAccount []byte
+ curSlot []byte
- iterators weightedAccountIterators
+ iterators weightedIterators
initiated bool
+ account bool
fail error
}
-// newFastAccountIterator creates a new hierarhical account iterator with one
+// newFastIterator creates a new hierarhical account or storage iterator with one
// element per diff layer. The returned combo iterator can be used to walk over
// the entire snapshot diff stack simultaneously.
-func newFastAccountIterator(tree *Tree, root common.Hash, seek common.Hash) (AccountIterator, error) {
+func newFastIterator(tree *Tree, root common.Hash, account common.Hash, seek common.Hash, accountIterator bool) (*fastIterator, error) {
snap := tree.Snapshot(root)
if snap == nil {
return nil, fmt.Errorf("unknown snapshot: %x", root)
}
- fi := &fastAccountIterator{
- tree: tree,
- root: root,
+ fi := &fastIterator{
+ tree: tree,
+ root: root,
+ account: accountIterator,
}
current := snap.(snapshot)
for depth := 0; current != nil; depth++ {
- fi.iterators = append(fi.iterators, &weightedAccountIterator{
- it: current.AccountIterator(seek),
- priority: depth,
- })
+ if accountIterator {
+ fi.iterators = append(fi.iterators, &weightedIterator{
+ it: current.AccountIterator(seek),
+ priority: depth,
+ })
+ } else {
+ // If the whole storage is destructed in this layer, don't
+ // bother deeper layer anymore. But we should still keep
+ // the iterator for this layer, since the iterator can contain
+ // some valid slots which belongs to the re-created account.
+ it, destructed := current.StorageIterator(account, seek)
+ fi.iterators = append(fi.iterators, &weightedIterator{
+ it: it,
+ priority: depth,
+ })
+ if destructed {
+ break
+ }
+ }
current = current.Parent()
}
fi.init()
@@ -98,7 +117,7 @@ func newFastAccountIterator(tree *Tree, root common.Hash, seek common.Hash) (Acc
// init walks over all the iterators and resolves any clashes between them, after
// which it prepares the stack for step-by-step iteration.
-func (fi *fastAccountIterator) init() {
+func (fi *fastIterator) init() {
// Track which account hashes are iterators positioned on
var positioned = make(map[common.Hash]int)
@@ -153,7 +172,7 @@ func (fi *fastAccountIterator) init() {
}
// Next steps the iterator forward one element, returning false if exhausted.
-func (fi *fastAccountIterator) Next() bool {
+func (fi *fastIterator) Next() bool {
if len(fi.iterators) == 0 {
return false
}
@@ -161,21 +180,25 @@ func (fi *fastAccountIterator) Next() bool {
// Don't forward first time -- we had to 'Next' once in order to
// do the sorting already
fi.initiated = true
- fi.curAccount = fi.iterators[0].it.Account()
+ if fi.account {
+ fi.curAccount = fi.iterators[0].it.(AccountIterator).Account()
+ } else {
+ fi.curSlot = fi.iterators[0].it.(StorageIterator).Slot()
+ }
if innerErr := fi.iterators[0].it.Error(); innerErr != nil {
fi.fail = innerErr
return false
}
- if fi.curAccount != nil {
+ if fi.curAccount != nil || fi.curSlot != nil {
return true
}
- // Implicit else: we've hit a nil-account, and need to fall through to the
- // loop below to land on something non-nil
+ // Implicit else: we've hit a nil-account or nil-slot, and need to
+ // fall through to the loop below to land on something non-nil
}
- // If an account is deleted in one of the layers, the key will still be there,
- // but the actual value will be nil. However, the iterator should not
- // export nil-values (but instead simply omit the key), so we need to loop
- // here until we either
+ // If an account or a slot is deleted in one of the layers, the key will
+ // still be there, but the actual value will be nil. However, the iterator
+ // should not export nil-values (but instead simply omit the key), so we
+ // need to loop here until we either
// - get a non-nil value,
// - hit an error,
// - or exhaust the iterator
@@ -183,12 +206,16 @@ func (fi *fastAccountIterator) Next() bool {
if !fi.next(0) {
return false // exhausted
}
- fi.curAccount = fi.iterators[0].it.Account()
+ if fi.account {
+ fi.curAccount = fi.iterators[0].it.(AccountIterator).Account()
+ } else {
+ fi.curSlot = fi.iterators[0].it.(StorageIterator).Slot()
+ }
if innerErr := fi.iterators[0].it.Error(); innerErr != nil {
fi.fail = innerErr
return false // error
}
- if fi.curAccount != nil {
+ if fi.curAccount != nil || fi.curSlot != nil {
break // non-nil value found
}
}
@@ -201,7 +228,7 @@ func (fi *fastAccountIterator) Next() bool {
// For example, if the iterated hashes become [2,3,5,5,8,9,10], then we should
// invoke next(3), which will call Next on elem 3 (the second '5') and will
// cascade along the list, applying the same operation if needed.
-func (fi *fastAccountIterator) next(idx int) bool {
+func (fi *fastIterator) next(idx int) bool {
// If this particular iterator got exhausted, remove it and return true (the
// next one is surely not exhausted yet, otherwise it would have been removed
// already).
@@ -262,7 +289,7 @@ func (fi *fastAccountIterator) next(idx int) bool {
}
// move advances an iterator to another position in the list.
-func (fi *fastAccountIterator) move(index, newpos int) {
+func (fi *fastIterator) move(index, newpos int) {
elem := fi.iterators[index]
copy(fi.iterators[index:], fi.iterators[index+1:newpos+1])
fi.iterators[newpos] = elem
@@ -270,23 +297,30 @@ func (fi *fastAccountIterator) move(index, newpos int) {
// Error returns any failure that occurred during iteration, which might have
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
-func (fi *fastAccountIterator) Error() error {
+func (fi *fastIterator) Error() error {
return fi.fail
}
// Hash returns the current key
-func (fi *fastAccountIterator) Hash() common.Hash {
+func (fi *fastIterator) Hash() common.Hash {
return fi.iterators[0].it.Hash()
}
-// Account returns the current key
-func (fi *fastAccountIterator) Account() []byte {
+// Account returns the current account blob.
+// Note the returned account is not a copy, please don't modify it.
+func (fi *fastIterator) Account() []byte {
return fi.curAccount
}
+// Slot returns the current storage slot.
+// Note the returned slot is not a copy, please don't modify it.
+func (fi *fastIterator) Slot() []byte {
+ return fi.curSlot
+}
+
// Release iterates over all the remaining live layer iterators and releases each
// of thme individually.
-func (fi *fastAccountIterator) Release() {
+func (fi *fastIterator) Release() {
for _, it := range fi.iterators {
it.it.Release()
}
@@ -294,9 +328,23 @@ func (fi *fastAccountIterator) Release() {
}
// Debug is a convencience helper during testing
-func (fi *fastAccountIterator) Debug() {
+func (fi *fastIterator) Debug() {
for _, it := range fi.iterators {
fmt.Printf("[p=%v v=%v] ", it.priority, it.it.Hash()[0])
}
fmt.Println()
}
+
+// newFastAccountIterator creates a new hierarhical account iterator with one
+// element per diff layer. The returned combo iterator can be used to walk over
+// the entire snapshot diff stack simultaneously.
+func newFastAccountIterator(tree *Tree, root common.Hash, seek common.Hash) (AccountIterator, error) {
+ return newFastIterator(tree, root, common.Hash{}, seek, true)
+}
+
+// newFastStorageIterator creates a new hierarhical storage iterator with one
+// element per diff layer. The returned combo iterator can be used to walk over
+// the entire snapshot diff stack simultaneously.
+func newFastStorageIterator(tree *Tree, root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) {
+ return newFastIterator(tree, root, account, seek, false)
+}
diff --git a/core/state/snapshot/iterator_test.go b/core/state/snapshot/iterator_test.go
index 5468a9a589..2c7e876e08 100644
--- a/core/state/snapshot/iterator_test.go
+++ b/core/state/snapshot/iterator_test.go
@@ -28,7 +28,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
)
-// TestAccountIteratorBasics tests some simple single-layer iteration
+// TestAccountIteratorBasics tests some simple single-layer(diff and disk) iteration
func TestAccountIteratorBasics(t *testing.T) {
var (
destructs = make(map[common.Hash]struct{})
@@ -53,9 +53,55 @@ func TestAccountIteratorBasics(t *testing.T) {
}
}
// Add some (identical) layers on top
- parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
- it := parent.AccountIterator(common.Hash{})
- verifyIterator(t, 100, it)
+ diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
+ it := diffLayer.AccountIterator(common.Hash{})
+ verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
+
+ diskLayer := diffToDisk(diffLayer)
+ it = diskLayer.AccountIterator(common.Hash{})
+ verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
+}
+
+// TestStorageIteratorBasics tests some simple single-layer(diff and disk) iteration for storage
+func TestStorageIteratorBasics(t *testing.T) {
+ var (
+ nilStorage = make(map[common.Hash]int)
+ accounts = make(map[common.Hash][]byte)
+ storage = make(map[common.Hash]map[common.Hash][]byte)
+ )
+ // Fill some random data
+ for i := 0; i < 10; i++ {
+ h := randomHash()
+ accounts[h] = randomAccount()
+
+ accStorage := make(map[common.Hash][]byte)
+ value := make([]byte, 32)
+
+ var nilstorage int
+ for i := 0; i < 100; i++ {
+ rand.Read(value)
+ if rand.Intn(2) == 0 {
+ accStorage[randomHash()] = common.CopyBytes(value)
+ } else {
+ accStorage[randomHash()] = nil // delete slot
+ nilstorage += 1
+ }
+ }
+ storage[h] = accStorage
+ nilStorage[h] = nilstorage
+ }
+ // Add some (identical) layers on top
+ diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, nil, copyAccounts(accounts), copyStorage(storage))
+ for account := range accounts {
+ it, _ := diffLayer.StorageIterator(account, common.Hash{})
+ verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
+ }
+
+ diskLayer := diffToDisk(diffLayer)
+ for account := range accounts {
+ it, _ := diskLayer.StorageIterator(account, common.Hash{})
+ verifyIterator(t, 100-nilStorage[account], it, verifyNothing) // Nil is allowed for single layer iterator
+ }
}
type testIterator struct {
@@ -87,6 +133,10 @@ func (ti *testIterator) Account() []byte {
return nil
}
+func (ti *testIterator) Slot() []byte {
+ return nil
+}
+
func (ti *testIterator) Release() {}
func TestFastIteratorBasics(t *testing.T) {
@@ -102,13 +152,12 @@ func TestFastIteratorBasics(t *testing.T) {
{9, 10}, {10, 13, 15, 16}},
expKeys: []byte{0, 1, 2, 7, 8, 9, 10, 13, 14, 15, 16}},
} {
- var iterators []*weightedAccountIterator
+ var iterators []*weightedIterator
for i, data := range tc.lists {
it := newTestIterator(data...)
- iterators = append(iterators, &weightedAccountIterator{it, i})
-
+ iterators = append(iterators, &weightedIterator{it, i})
}
- fi := &fastAccountIterator{
+ fi := &fastIterator{
iterators: iterators,
initiated: false,
}
@@ -122,7 +171,15 @@ func TestFastIteratorBasics(t *testing.T) {
}
}
-func verifyIterator(t *testing.T, expCount int, it AccountIterator) {
+type verifyContent int
+
+const (
+ verifyNothing verifyContent = iota
+ verifyAccount
+ verifyStorage
+)
+
+func verifyIterator(t *testing.T, expCount int, it Iterator, verify verifyContent) {
t.Helper()
var (
@@ -134,10 +191,13 @@ func verifyIterator(t *testing.T, expCount int, it AccountIterator) {
if bytes.Compare(last[:], hash[:]) >= 0 {
t.Errorf("wrong order: %x >= %x", last, hash)
}
- if it.Account() == nil {
+ count++
+ if verify == verifyAccount && len(it.(AccountIterator).Account()) == 0 {
+ t.Errorf("iterator returned nil-value for hash %x", hash)
+ } else if verify == verifyStorage && len(it.(StorageIterator).Slot()) == 0 {
t.Errorf("iterator returned nil-value for hash %x", hash)
}
- count++
+ last = hash
}
if count != expCount {
t.Errorf("iterator count mismatch: have %d, want %d", count, expCount)
@@ -173,13 +233,74 @@ func TestAccountIteratorTraversal(t *testing.T) {
// Verify the single and multi-layer iterators
head := snaps.Snapshot(common.HexToHash("0x04"))
- verifyIterator(t, 3, head.(snapshot).AccountIterator(common.Hash{}))
- verifyIterator(t, 7, head.(*diffLayer).newBinaryAccountIterator())
+ verifyIterator(t, 3, head.(snapshot).AccountIterator(common.Hash{}), verifyNothing)
+ verifyIterator(t, 7, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount)
it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
- defer it.Release()
+ verifyIterator(t, 7, it, verifyAccount)
+ it.Release()
+
+ // Test after persist some bottom-most layers into the disk,
+ // the functionalities still work.
+ limit := aggregatorMemoryLimit
+ defer func() {
+ aggregatorMemoryLimit = limit
+ }()
+ aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
+ snaps.Cap(common.HexToHash("0x04"), 2)
+ verifyIterator(t, 7, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount)
+
+ it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
+ verifyIterator(t, 7, it, verifyAccount)
+ it.Release()
+}
+
+func TestStorageIteratorTraversal(t *testing.T) {
+ // Create an empty base layer and a snapshot tree out of it
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ // Stack three diff layers on top with various overlaps
+ snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil))
+
+ snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil))
+
+ snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil))
+
+ // Verify the single and multi-layer iterators
+ head := snaps.Snapshot(common.HexToHash("0x04"))
- verifyIterator(t, 7, it)
+ diffIter, _ := head.(snapshot).StorageIterator(common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 3, diffIter, verifyNothing)
+ verifyIterator(t, 6, head.(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage)
+
+ it, _ := snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 6, it, verifyStorage)
+ it.Release()
+
+ // Test after persist some bottom-most layers into the disk,
+ // the functionalities still work.
+ limit := aggregatorMemoryLimit
+ defer func() {
+ aggregatorMemoryLimit = limit
+ }()
+ aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
+ snaps.Cap(common.HexToHash("0x04"), 2)
+ verifyIterator(t, 6, head.(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage)
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 6, it, verifyStorage)
+ it.Release()
}
// TestAccountIteratorTraversalValues tests some multi-layer iteration, where we
@@ -242,8 +363,6 @@ func TestAccountIteratorTraversalValues(t *testing.T) {
snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, h, nil)
it, _ := snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{})
- defer it.Release()
-
head := snaps.Snapshot(common.HexToHash("0x09"))
for it.Next() {
hash := it.Hash()
@@ -255,6 +374,128 @@ func TestAccountIteratorTraversalValues(t *testing.T) {
t.Fatalf("hash %x: account mismatch: have %x, want %x", hash, have, want)
}
}
+ it.Release()
+
+ // Test after persist some bottom-most layers into the disk,
+ // the functionalities still work.
+ limit := aggregatorMemoryLimit
+ defer func() {
+ aggregatorMemoryLimit = limit
+ }()
+ aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
+ snaps.Cap(common.HexToHash("0x09"), 2)
+
+ it, _ = snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{})
+ for it.Next() {
+ hash := it.Hash()
+ want, err := head.AccountRLP(hash)
+ if err != nil {
+ t.Fatalf("failed to retrieve expected account: %v", err)
+ }
+ if have := it.Account(); !bytes.Equal(want, have) {
+ t.Fatalf("hash %x: account mismatch: have %x, want %x", hash, have, want)
+ }
+ }
+ it.Release()
+}
+
+func TestStorageIteratorTraversalValues(t *testing.T) {
+ // Create an empty base layer and a snapshot tree out of it
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ wrapStorage := func(storage map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte {
+ return map[common.Hash]map[common.Hash][]byte{
+ common.HexToHash("0xaa"): storage,
+ }
+ }
+ // Create a batch of storage sets to seed subsequent layers with
+ var (
+ a = make(map[common.Hash][]byte)
+ b = make(map[common.Hash][]byte)
+ c = make(map[common.Hash][]byte)
+ d = make(map[common.Hash][]byte)
+ e = make(map[common.Hash][]byte)
+ f = make(map[common.Hash][]byte)
+ g = make(map[common.Hash][]byte)
+ h = make(map[common.Hash][]byte)
+ )
+ for i := byte(2); i < 0xff; i++ {
+ a[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 0, i))
+ if i > 20 && i%2 == 0 {
+ b[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 1, i))
+ }
+ if i%4 == 0 {
+ c[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 2, i))
+ }
+ if i%7 == 0 {
+ d[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 3, i))
+ }
+ if i%8 == 0 {
+ e[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 4, i))
+ }
+ if i > 50 || i < 85 {
+ f[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 5, i))
+ }
+ if i%64 == 0 {
+ g[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 6, i))
+ }
+ if i%128 == 0 {
+ h[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 7, i))
+ }
+ }
+ // Assemble a stack of snapshots from the account layers
+ snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, randomAccountSet("0xaa"), wrapStorage(a))
+ snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, randomAccountSet("0xaa"), wrapStorage(b))
+ snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, randomAccountSet("0xaa"), wrapStorage(c))
+ snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, randomAccountSet("0xaa"), wrapStorage(d))
+ snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, randomAccountSet("0xaa"), wrapStorage(e))
+ snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, randomAccountSet("0xaa"), wrapStorage(e))
+ snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, randomAccountSet("0xaa"), wrapStorage(g))
+ snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, randomAccountSet("0xaa"), wrapStorage(h))
+
+ it, _ := snaps.StorageIterator(common.HexToHash("0x09"), common.HexToHash("0xaa"), common.Hash{})
+ head := snaps.Snapshot(common.HexToHash("0x09"))
+ for it.Next() {
+ hash := it.Hash()
+ want, err := head.Storage(common.HexToHash("0xaa"), hash)
+ if err != nil {
+ t.Fatalf("failed to retrieve expected storage slot: %v", err)
+ }
+ if have := it.Slot(); !bytes.Equal(want, have) {
+ t.Fatalf("hash %x: slot mismatch: have %x, want %x", hash, have, want)
+ }
+ }
+ it.Release()
+
+ // Test after persist some bottom-most layers into the disk,
+ // the functionalities still work.
+ limit := aggregatorMemoryLimit
+ defer func() {
+ aggregatorMemoryLimit = limit
+ }()
+ aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
+ snaps.Cap(common.HexToHash("0x09"), 2)
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x09"), common.HexToHash("0xaa"), common.Hash{})
+ for it.Next() {
+ hash := it.Hash()
+ want, err := head.Storage(common.HexToHash("0xaa"), hash)
+ if err != nil {
+ t.Fatalf("failed to retrieve expected slot: %v", err)
+ }
+ if have := it.Slot(); !bytes.Equal(want, have) {
+ t.Fatalf("hash %x: slot mismatch: have %x, want %x", hash, have, want)
+ }
+ }
+ it.Release()
}
// This testcase is notorious, all layers contain the exact same 200 accounts.
@@ -285,13 +526,27 @@ func TestAccountIteratorLargeTraversal(t *testing.T) {
}
// Iterate the entire stack and ensure everything is hit only once
head := snaps.Snapshot(common.HexToHash("0x80"))
- verifyIterator(t, 200, head.(snapshot).AccountIterator(common.Hash{}))
- verifyIterator(t, 200, head.(*diffLayer).newBinaryAccountIterator())
+ verifyIterator(t, 200, head.(snapshot).AccountIterator(common.Hash{}), verifyNothing)
+ verifyIterator(t, 200, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount)
it, _ := snaps.AccountIterator(common.HexToHash("0x80"), common.Hash{})
- defer it.Release()
+ verifyIterator(t, 200, it, verifyAccount)
+ it.Release()
+
+ // Test after persist some bottom-most layers into the disk,
+ // the functionalities still work.
+ limit := aggregatorMemoryLimit
+ defer func() {
+ aggregatorMemoryLimit = limit
+ }()
+ aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
+ snaps.Cap(common.HexToHash("0x80"), 2)
- verifyIterator(t, 200, it)
+ verifyIterator(t, 200, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount)
+
+ it, _ = snaps.AccountIterator(common.HexToHash("0x80"), common.Hash{})
+ verifyIterator(t, 200, it, verifyAccount)
+ it.Release()
}
// TestAccountIteratorFlattening tests what happens when we
@@ -351,40 +606,112 @@ func TestAccountIteratorSeek(t *testing.T) {
snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
randomAccountSet("0xcc", "0xf0", "0xff"), nil)
- // Construct various iterators and ensure their tranversal is correct
+ // Account set is now
+ // 02: aa, ee, f0, ff
+ // 03: aa, bb, dd, ee, f0 (, f0), ff
+ // 04: aa, bb, cc, dd, ee, f0 (, f0), ff (, ff)
+ // Construct various iterators and ensure their traversal is correct
it, _ := snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xdd"))
defer it.Release()
- verifyIterator(t, 3, it) // expected: ee, f0, ff
+ verifyIterator(t, 3, it, verifyAccount) // expected: ee, f0, ff
it, _ = snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"))
defer it.Release()
- verifyIterator(t, 3, it) // expected: ee, f0, ff
+ verifyIterator(t, 4, it, verifyAccount) // expected: aa, ee, f0, ff
it, _ = snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xff"))
defer it.Release()
- verifyIterator(t, 0, it) // expected: nothing
+ verifyIterator(t, 1, it, verifyAccount) // expected: ff
+
+ it, _ = snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xff1"))
+ defer it.Release()
+ verifyIterator(t, 0, it, verifyAccount) // expected: nothing
it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xbb"))
defer it.Release()
- verifyIterator(t, 5, it) // expected: cc, dd, ee, f0, ff
+ verifyIterator(t, 6, it, verifyAccount) // expected: bb, cc, dd, ee, f0, ff
it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xef"))
defer it.Release()
- verifyIterator(t, 2, it) // expected: f0, ff
+ verifyIterator(t, 2, it, verifyAccount) // expected: f0, ff
it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xf0"))
defer it.Release()
- verifyIterator(t, 1, it) // expected: ff
+ verifyIterator(t, 2, it, verifyAccount) // expected: f0, ff
it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xff"))
defer it.Release()
- verifyIterator(t, 0, it) // expected: nothing
+ verifyIterator(t, 1, it, verifyAccount) // expected: ff
+
+ it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xff1"))
+ defer it.Release()
+ verifyIterator(t, 0, it, verifyAccount) // expected: nothing
+}
+
+func TestStorageIteratorSeek(t *testing.T) {
+ // Create a snapshot stack with some initial data
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ // Stack three diff layers on top with various overlaps
+ snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil))
+
+ snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil))
+
+ snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil))
+
+ // Account set is now
+ // 02: 01, 03, 05
+ // 03: 01, 02, 03, 05 (, 05), 06
+ // 04: 01(, 01), 02, 03, 05(, 05, 05), 06, 08
+ // Construct various iterators and ensure their traversal is correct
+ it, _ := snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x01"))
+ defer it.Release()
+ verifyIterator(t, 3, it, verifyStorage) // expected: 01, 03, 05
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x02"))
+ defer it.Release()
+ verifyIterator(t, 2, it, verifyStorage) // expected: 03, 05
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x5"))
+ defer it.Release()
+ verifyIterator(t, 1, it, verifyStorage) // expected: 05
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x6"))
+ defer it.Release()
+ verifyIterator(t, 0, it, verifyStorage) // expected: nothing
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x01"))
+ defer it.Release()
+ verifyIterator(t, 6, it, verifyStorage) // expected: 01, 02, 03, 05, 06, 08
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x05"))
+ defer it.Release()
+ verifyIterator(t, 3, it, verifyStorage) // expected: 05, 06, 08
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x08"))
+ defer it.Release()
+ verifyIterator(t, 1, it, verifyStorage) // expected: 08
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x09"))
+ defer it.Release()
+ verifyIterator(t, 0, it, verifyStorage) // expected: nothing
}
-// TestIteratorDeletions tests that the iterator behaves correct when there are
+// TestAccountIteratorDeletions tests that the iterator behaves correct when there are
// deleted accounts (where the Account() value is nil). The iterator
// should not output any accounts or nil-values for those cases.
-func TestIteratorDeletions(t *testing.T) {
+func TestAccountIteratorDeletions(t *testing.T) {
// Create an empty base layer and a snapshot tree out of it
base := &diskLayer{
diskdb: rawdb.NewMemoryDatabase(),
@@ -402,7 +729,7 @@ func TestIteratorDeletions(t *testing.T) {
deleted := common.HexToHash("0x22")
destructed := map[common.Hash]struct{}{
- deleted: struct{}{},
+ deleted: {},
}
snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"),
destructed, randomAccountSet("0x11", "0x33"), nil)
@@ -413,7 +740,7 @@ func TestIteratorDeletions(t *testing.T) {
// The output should be 11,33,44,55
it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
// Do a quick check
- verifyIterator(t, 4, it)
+ verifyIterator(t, 4, it, verifyAccount)
it.Release()
// And a more detailed verification that we indeed do not see '0x22'
@@ -430,6 +757,63 @@ func TestIteratorDeletions(t *testing.T) {
}
}
+func TestStorageIteratorDeletions(t *testing.T) {
+ // Create an empty base layer and a snapshot tree out of it
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ // Stack three diff layers on top with various overlaps
+ snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil))
+
+ snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}))
+
+ // The output should be 02,04,05,06
+ it, _ := snaps.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 4, it, verifyStorage)
+ it.Release()
+
+ // The output should be 04,05,06
+ it, _ = snaps.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.HexToHash("0x03"))
+ verifyIterator(t, 3, it, verifyStorage)
+ it.Release()
+
+ // Destruct the whole storage
+ destructed := map[common.Hash]struct{}{
+ common.HexToHash("0xaa"): {},
+ }
+ snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), destructed, nil, nil)
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 0, it, verifyStorage)
+ it.Release()
+
+ // Re-insert the slots of the same account
+ snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil))
+
+ // The output should be 07,08,09
+ it, _ = snaps.StorageIterator(common.HexToHash("0x05"), common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 3, it, verifyStorage)
+ it.Release()
+
+ // Destruct the whole storage but re-create the account in the same layer
+ snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), destructed, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, nil))
+ it, _ = snaps.StorageIterator(common.HexToHash("0x06"), common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 2, it, verifyStorage) // The output should be 11,12
+ it.Release()
+
+ verifyIterator(t, 2, snaps.Snapshot(common.HexToHash("0x06")).(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage)
+}
+
// BenchmarkAccountIteratorTraversal is a bit a bit notorious -- all layers contain the
// exact same 200 accounts. That means that we need to process 2000 items, but
// only spit out 200 values eventually.
diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go
index 66c7aee0ac..0e73454168 100644
--- a/core/state/snapshot/journal.go
+++ b/core/state/snapshot/journal.go
@@ -158,7 +158,11 @@ func loadDiffLayer(parent snapshot, r *rlp.Stream) (snapshot, error) {
}
accountData := make(map[common.Hash][]byte)
for _, entry := range accounts {
- accountData[entry.Hash] = entry.Blob
+ if len(entry.Blob) > 0 { // RLP loses nil-ness, but `[]byte{}` is not a valid item, so reinterpret that
+ accountData[entry.Hash] = entry.Blob
+ } else {
+ accountData[entry.Hash] = nil
+ }
}
var storage []journalStorage
if err := r.Decode(&storage); err != nil {
@@ -168,7 +172,11 @@ func loadDiffLayer(parent snapshot, r *rlp.Stream) (snapshot, error) {
for _, entry := range storage {
slots := make(map[common.Hash][]byte)
for i, key := range entry.Keys {
- slots[key] = entry.Vals[i]
+ if len(entry.Vals[i]) > 0 { // RLP loses nil-ness, but `[]byte{}` is not a valid item, so reinterpret that
+ slots[key] = entry.Vals[i]
+ } else {
+ slots[key] = nil
+ }
}
storageData[entry.Hash] = slots
}
diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go
index 27a8c7f0bb..a3d4d21ee8 100644
--- a/core/state/snapshot/snapshot.go
+++ b/core/state/snapshot/snapshot.go
@@ -138,6 +138,9 @@ type snapshot interface {
// AccountIterator creates an account iterator over an arbitrary layer.
AccountIterator(seek common.Hash) AccountIterator
+
+ // StorageIterator creates a storage iterator over an arbitrary layer.
+ StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool)
}
// SnapshotTree is an Ethereum state snapshot tree. It consists of one persistent
@@ -601,3 +604,9 @@ func (t *Tree) Rebuild(root common.Hash) {
func (t *Tree) AccountIterator(root common.Hash, seek common.Hash) (AccountIterator, error) {
return newFastAccountIterator(t, root, seek)
}
+
+// StorageIterator creates a new storage iterator for the specified root hash and
+// account. The iterator will be move to the specific start position.
+func (t *Tree) StorageIterator(root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) {
+ return newFastStorageIterator(t, root, account, seek)
+}
diff --git a/core/state/snapshot/snapshot_test.go b/core/state/snapshot/snapshot_test.go
index 9109238412..ca4fa0a055 100644
--- a/core/state/snapshot/snapshot_test.go
+++ b/core/state/snapshot/snapshot_test.go
@@ -60,6 +60,29 @@ func randomAccountSet(hashes ...string) map[common.Hash][]byte {
return accounts
}
+// randomStorageSet generates a set of random slots with the given strings as
+// the slot addresses.
+func randomStorageSet(accounts []string, hashes [][]string, nilStorage [][]string) map[common.Hash]map[common.Hash][]byte {
+ storages := make(map[common.Hash]map[common.Hash][]byte)
+ for index, account := range accounts {
+ storages[common.HexToHash(account)] = make(map[common.Hash][]byte)
+
+ if index < len(hashes) {
+ hashes := hashes[index]
+ for _, hash := range hashes {
+ storages[common.HexToHash(account)][common.HexToHash(hash)] = randomHash().Bytes()
+ }
+ }
+ if index < len(nilStorage) {
+ nils := nilStorage[index]
+ for _, hash := range nils {
+ storages[common.HexToHash(account)][common.HexToHash(hash)] = nil
+ }
+ }
+ }
+ return storages
+}
+
// Tests that if a disk layer becomes stale, no active external references will
// be returned with junk data. This version of the test flattens every diff layer
// to check internal corner case around the bottom-most memory accumulator.
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 6797cabb77..cca0c467df 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -474,6 +474,23 @@ func (s *stateObject) Code(db Database) []byte {
return code
}
+// CodeSize returns the size of the contract code associated with this object,
+// or zero if none. This methos is an almost mirror of Code, but uses a cache
+// inside the database to avoid loading codes seen recently.
+func (s *stateObject) CodeSize(db Database) int {
+ if s.code != nil {
+ return len(s.code)
+ }
+ if bytes.Equal(s.CodeHash(), emptyCodeHash) {
+ return 0
+ }
+ size, err := db.ContractCodeSize(s.addrHash, common.BytesToHash(s.CodeHash()))
+ if err != nil {
+ s.setError(fmt.Errorf("can't load code size %x: %v", s.CodeHash(), err))
+ }
+ return size
+}
+
func (s *stateObject) SetCode(codeHash common.Hash, code []byte) {
prevcode := s.Code(s.db.db)
s.db.journal.append(codeChange{
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 54298530bf..c25e4023b5 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -359,17 +359,10 @@ func (s *StateDB) GetCode(addr common.Address) []byte {
func (s *StateDB) GetCodeSize(addr common.Address) int {
stateObject := s.getStateObject(addr)
- if stateObject == nil {
- return 0
- }
- if stateObject.code != nil {
- return len(stateObject.code)
- }
- size, err := s.db.ContractCodeSize(stateObject.addrHash, common.BytesToHash(stateObject.CodeHash()))
- if err != nil {
- s.setError(err)
+ if stateObject != nil {
+ return stateObject.CodeSize(s.db)
}
- return size
+ return 0
}
func (s *StateDB) GetCodeHash(addr common.Address) common.Hash {
@@ -562,14 +555,16 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
if err != nil {
panic(fmt.Errorf("can't encode object at %x: %v", addr[:], err))
}
- err = s.trie.TryUpdate(addr[:], data)
- s.setError(err)
+ if err = s.trie.TryUpdate(addr[:], data); err != nil {
+ s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err))
+ }
+
// If state snapshotting is active, cache the data til commit. Note, this
// update mechanism is not symmetric to the deletion, because whereas it is
// enough to track account updates at commit time, deletions need tracking
// at transaction boundary level to ensure we capture state clearing.
if s.snap != nil {
- s.snapAccounts[obj.addrHash] = snapshot.AccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash)
+ s.snapAccounts[obj.addrHash] = snapshot.SlimAccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash)
}
// Quorum - Privacy Enhancements - update the privacy metadata trie in case the privacy metadata is dirty
@@ -601,9 +596,8 @@ func (s *StateDB) deleteStateObject(obj *stateObject) {
}
// Delete the account from the trie
addr := obj.Address()
- err := s.trie.TryDelete(addr[:])
- if err != nil {
- s.setError(err)
+ if err := s.trie.TryDelete(addr[:]); err != nil {
+ s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err))
return
}
s.setError(s.accountExtraDataTrie.TryDelete(addr[:]))
@@ -658,8 +652,11 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
defer func(start time.Time) { s.AccountReads += time.Since(start) }(time.Now())
}
enc, err := s.trie.TryGet(addr[:])
+ if err != nil {
+ s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %v", addr[:], err))
+ return nil
+ }
if len(enc) == 0 {
- s.setError(err)
return nil
}
if err := rlp.DecodeBytes(enc, &data); err != nil {
@@ -930,6 +927,9 @@ func (s *StateDB) clearJournalAndRefund() {
// Quorum:
// - linking state root and the AccountExtraData root
func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
+ if s.dbErr != nil {
+ return common.Hash{}, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
+ }
// Finalize any pending changes and merge everything into the tries
s.IntermediateRoot(deleteEmptyObjects)
diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go
index 6b7f0428d2..be2304a5f7 100644
--- a/core/state/statedb_test.go
+++ b/core/state/statedb_test.go
@@ -733,6 +733,53 @@ func TestDeleteCreateRevert(t *testing.T) {
}
}
+// TestMissingTrieNodes tests that if the statedb fails to load parts of the trie,
+// the Commit operation fails with an error
+// If we are missing trie nodes, we should not continue writing to the trie
+func TestMissingTrieNodes(t *testing.T) {
+
+ // Create an initial state with a few accounts
+ memDb := rawdb.NewMemoryDatabase()
+ db := NewDatabase(memDb)
+ var root common.Hash
+ state, _ := New(common.Hash{}, db, nil)
+ addr := toAddr([]byte("so"))
+ {
+ state.SetBalance(addr, big.NewInt(1))
+ state.SetCode(addr, []byte{1, 2, 3})
+ a2 := toAddr([]byte("another"))
+ state.SetBalance(a2, big.NewInt(100))
+ state.SetCode(a2, []byte{1, 2, 4})
+ root, _ = state.Commit(false)
+ t.Logf("root: %x", root)
+ // force-flush
+ state.Database().TrieDB().Cap(0)
+ }
+ // Create a new state on the old root
+ state, _ = New(root, db, nil)
+ // Now we clear out the memdb
+ it := memDb.NewIterator(nil, nil)
+ for it.Next() {
+ k := it.Key()
+ // Leave the root intact
+ if !bytes.Equal(k, root[:]) {
+ t.Logf("key: %x", k)
+ memDb.Delete(k)
+ }
+ }
+ balance := state.GetBalance(addr)
+ // The removed elem should lead to it returning zero balance
+ if exp, got := uint64(0), balance.Uint64(); got != exp {
+ t.Errorf("expected %d, got %d", exp, got)
+ }
+ // Modify the state
+ state.SetBalance(addr, big.NewInt(2))
+ root, err := state.Commit(false)
+ if err == nil {
+ t.Fatalf("expected error, got root :%x", root)
+ }
+}
+
// Quorum - NewDual
func TestStorageRootNewDual(t *testing.T) {
diff --git a/core/state_prefetcher.go b/core/state_prefetcher.go
index 31626ce257..8605b38f5e 100644
--- a/core/state_prefetcher.go
+++ b/core/state_prefetcher.go
@@ -94,6 +94,6 @@ func precacheTransaction(config *params.ChainConfig, bc ChainContext, author *co
vm.SetCurrentTX(tx)
// /Quorum
- _, _, _, err = ApplyMessage(vm, msg, gaspool)
+ _, err = ApplyMessage(vm, msg, gaspool)
return err
}
diff --git a/core/state_processor.go b/core/state_processor.go
index e984a28838..a631b5621b 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -138,7 +138,7 @@ func ApplyTransaction(config *params.ChainConfig, bc *BlockChain, author *common
vmenv.SetCurrentTX(tx)
// Apply the transaction to the current state (included in the env)
- _, gas, failed, err := ApplyMessage(vmenv, msg, gp)
+ result, err := ApplyMessage(vmenv, msg, gp)
if err != nil {
return nil, nil, err
}
@@ -149,17 +149,17 @@ func ApplyTransaction(config *params.ChainConfig, bc *BlockChain, author *common
} else {
root = statedb.IntermediateRoot(config.IsEIP158(header.Number)).Bytes()
}
- *usedGas += gas
+ *usedGas += result.UsedGas
// If this is a private transaction, the public receipt should always
// indicate success.
- publicFailed := !(config.IsQuorum && tx.IsPrivate()) && failed
+ publicFailed := !(config.IsQuorum && tx.IsPrivate()) && result.Failed()
// Create a new receipt for the transaction, storing the intermediate root and gas used by the tx
// based on the eip phase, we're passing wether the root touch-delete accounts.
receipt := types.NewReceipt(root, publicFailed, *usedGas)
receipt.TxHash = tx.Hash()
- receipt.GasUsed = gas
+ receipt.GasUsed = result.UsedGas
// if the transaction created a contract, store the creation address in the receipt.
if msg.To() == nil {
receipt.ContractAddress = crypto.CreateAddress(vmenv.Context.Origin, tx.Nonce())
@@ -179,9 +179,9 @@ func ApplyTransaction(config *params.ChainConfig, bc *BlockChain, author *common
} else {
privateRoot = privateState.IntermediateRoot(config.IsEIP158(header.Number)).Bytes()
}
- privateReceipt = types.NewReceipt(privateRoot, failed, *usedGas)
+ privateReceipt = types.NewReceipt(privateRoot, result.Failed(), *usedGas)
privateReceipt.TxHash = tx.Hash()
- privateReceipt.GasUsed = gas
+ privateReceipt.GasUsed = result.UsedGas
if msg.To() == nil {
privateReceipt.ContractAddress = crypto.CreateAddress(vmenv.Context.Origin, tx.Nonce())
}
diff --git a/core/state_transition.go b/core/state_transition.go
index 6f3f7be329..eebedc2cc7 100644
--- a/core/state_transition.go
+++ b/core/state_transition.go
@@ -32,10 +32,6 @@ import (
"github.com/ethereum/go-ethereum/private"
)
-var (
- errInsufficientBalanceForGas = errors.New("insufficient balance to pay for gas")
-)
-
/*
The State Transitioning Model
@@ -68,7 +64,6 @@ type StateTransition struct {
// Message represents a message sent to a contract.
type Message interface {
From() common.Address
- //FromFrontier() (common.Address, error)
To() *common.Address
GasPrice() *big.Int
@@ -80,6 +75,41 @@ type Message interface {
Data() []byte
}
+// ExecutionResult includes all output after executing given evm
+// message no matter the execution itself is successful or not.
+type ExecutionResult struct {
+ UsedGas uint64 // Total used gas but include the refunded gas
+ Err error // Any error encountered during the execution(listed in core/vm/errors.go)
+ ReturnData []byte // Returned data from evm(function result or data supplied with revert opcode)
+}
+
+// Unwrap returns the internal evm error which allows us for further
+// analysis outside.
+func (result *ExecutionResult) Unwrap() error {
+ return result.Err
+}
+
+// Failed returns the indicator whether the execution is successful or not
+func (result *ExecutionResult) Failed() bool { return result.Err != nil }
+
+// Return is a helper function to help caller distinguish between revert reason
+// and function return. Return returns the data after execution if no error occurs.
+func (result *ExecutionResult) Return() []byte {
+ if result.Err != nil {
+ return nil
+ }
+ return common.CopyBytes(result.ReturnData)
+}
+
+// Revert returns the concrete revert reason if the execution is aborted by `REVERT`
+// opcode. Note the reason can be nil if no data supplied with revert opcode.
+func (result *ExecutionResult) Revert() []byte {
+ if result.Err != vm.ErrExecutionReverted {
+ return nil
+ }
+ return common.CopyBytes(result.ReturnData)
+}
+
// PrivateMessage implements a private message
type PrivateMessage interface {
Message
@@ -110,13 +140,13 @@ func IntrinsicGas(data []byte, contractCreation, isHomestead bool, isEIP2028 boo
nonZeroGas = params.TxDataNonZeroGasEIP2028
}
if (math.MaxUint64-gas)/nonZeroGas < nz {
- return 0, vm.ErrOutOfGas
+ return 0, ErrGasUintOverflow
}
gas += nz * nonZeroGas
z := uint64(len(data)) - nz
if (math.MaxUint64-gas)/params.TxDataZeroGas < z {
- return 0, vm.ErrOutOfGas
+ return 0, ErrGasUintOverflow
}
gas += z * params.TxDataZeroGas
}
@@ -144,7 +174,7 @@ func NewStateTransition(evm *vm.EVM, msg Message, gp *GasPool) *StateTransition
// indicates a core error meaning that the message would always fail for that particular
// state and would never be accepted within a block.
-func ApplyMessage(evm *vm.EVM, msg Message, gp *GasPool) ([]byte, uint64, bool, error) {
+func ApplyMessage(evm *vm.EVM, msg Message, gp *GasPool) (*ExecutionResult, error) {
return NewStateTransition(evm, msg, gp).TransitionDb()
}
@@ -156,19 +186,10 @@ func (st *StateTransition) to() common.Address {
return *st.msg.To()
}
-func (st *StateTransition) useGas(amount uint64) error {
- if st.gas < amount {
- return vm.ErrOutOfGas
- }
- st.gas -= amount
-
- return nil
-}
-
func (st *StateTransition) buyGas() error {
mgval := new(big.Int).Mul(new(big.Int).SetUint64(st.msg.Gas()), st.gasPrice)
if st.state.GetBalance(st.msg.From()).Cmp(mgval) < 0 {
- return errInsufficientBalanceForGas
+ return ErrInsufficientFunds
}
if err := st.gp.SubGas(st.msg.Gas()); err != nil {
return err
@@ -194,8 +215,18 @@ func (st *StateTransition) preCheck() error {
}
// TransitionDb will transition the state by applying the current message and
-// returning the result including the used gas. It returns an error if failed.
-// An error indicates a consensus issue.
+// returning the evm execution result with following fields.
+//
+// - used gas:
+// total gas used (including gas being refunded)
+// - returndata:
+// the returned data from evm
+// - concrete execution error:
+// various **EVM** error which aborts the execution,
+// e.g. ErrOutOfGas, ErrExecutionReverted
+//
+// However if any consensus issue encountered, return the error directly with
+// nil evm execution result.
//
// Quorum:
// 1. Intrinsic gas is calculated based on the encrypted payload hash
@@ -205,9 +236,21 @@ func (st *StateTransition) preCheck() error {
// 3. With multitenancy support, we enforce the party set in the contract index must contain all
// parties from the transaction. This is to detect unauthorized access from a legit proxy contract
// to an unauthorized contract.
-func (st *StateTransition) TransitionDb() (ret []byte, usedGas uint64, failed bool, err error) {
+func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
+ // First check this message satisfies all consensus rules before
+ // applying the message. The rules include these clauses
+ //
+ // 1. the nonce of the message caller is correct
+ // 2. caller has enough balance to cover transaction fee(gaslimit * gasprice)
+ // 3. the amount of gas required is available in the block
+ // 4. the purchased gas is enough to cover intrinsic usage
+ // 5. there is no overflow when calculating intrinsic gas
+ // 6. caller has enough balance to cover asset transfer for **topmost** call
+
+ // Check clauses 1-3, buy gas if everything is correct
+ var err error
if err = st.preCheck(); err != nil {
- return
+ return nil, err
}
msg := st.msg
sender := vm.AccountRef(msg.From())
@@ -233,15 +276,23 @@ func (st *StateTransition) TransitionDb() (ret []byte, usedGas uint64, failed bo
if err != nil || !contractCreation {
publicState.SetNonce(sender.Address(), publicState.GetNonce(sender.Address())+1)
}
-
if err != nil {
- return nil, 0, false, nil
+ return &ExecutionResult{
+ UsedGas: 0,
+ Err: nil,
+ ReturnData: nil,
+ }, nil
}
pmh.hasPrivatePayload = data != nil
- if ok, err := pmh.prepare(); !ok {
- return nil, 0, true, err
+ vmErr, consensusErr := pmh.prepare()
+ if consensusErr != nil || vmErr != nil {
+ return &ExecutionResult{
+ UsedGas: 0,
+ Err: vmErr,
+ ReturnData: nil,
+ }, consensusErr
}
} else {
data = st.data
@@ -250,17 +301,24 @@ func (st *StateTransition) TransitionDb() (ret []byte, usedGas uint64, failed bo
// Pay intrinsic gas. For a private contract this is done using the public hash passed in,
// not the private data retrieved above. This is because we need any (participant) validator
// node to get the same result as a (non-participant) minter node, to avoid out-of-gas issues.
+ // Check clauses 4-5, subtract intrinsic gas if everything is correct
gas, err := IntrinsicGas(st.data, contractCreation, homestead, istanbul)
if err != nil {
- return nil, 0, false, err
+ return nil, err
}
- if err = st.useGas(gas); err != nil {
- return nil, 0, false, err
+ if st.gas < gas {
+ return nil, ErrIntrinsicGas
}
+ st.gas -= gas
+ // Check clause 6
+ if msg.Value().Sign() > 0 && !st.evm.CanTransfer(st.state, msg.From(), msg.Value()) {
+ return nil, ErrInsufficientFundsForTransfer
+ }
var (
leftoverGas uint64
evm = st.evm
+ ret []byte
// vm errors do not effect consensus and are therefor
// not assigned to err, except for insufficient balance
// error.
@@ -285,7 +343,11 @@ func (st *StateTransition) TransitionDb() (ret []byte, usedGas uint64, failed bo
if len(data) == 0 && isPrivate {
st.refundGas()
st.state.AddBalance(st.evm.Coinbase, new(big.Int).Mul(new(big.Int).SetUint64(st.gasUsed()), st.gasPrice))
- return nil, 0, false, nil
+ return &ExecutionResult{
+ UsedGas: 0,
+ Err: nil,
+ ReturnData: nil,
+ }, nil
}
ret, leftoverGas, vmerr = evm.Call(sender, to, data, st.gas, st.value)
@@ -296,10 +358,10 @@ func (st *StateTransition) TransitionDb() (ret []byte, usedGas uint64, failed bo
// sufficient balance to make the transfer happen. The first
// balance transfer may never fail.
if vmerr == vm.ErrInsufficientBalance {
- return nil, 0, false, vmerr
+ return nil, vmerr
}
if errors.Is(vmerr, multitenancy.ErrNotAuthorized) {
- return nil, 0, false, vmerr
+ return nil, vmerr
}
}
@@ -309,18 +371,23 @@ func (st *StateTransition) TransitionDb() (ret []byte, usedGas uint64, failed bo
var exitEarly bool
exitEarly, err = pmh.verify(vmerr)
if exitEarly {
- return nil, 0, true, err
+ return &ExecutionResult{
+ UsedGas: 0,
+ Err: ErrPrivateContractInteractionVerificationFailed,
+ ReturnData: nil,
+ }, err
}
}
// End Quorum - Privacy Enhancements
+ // Quorum
// do the affected contract managed party checks
if msg, ok := msg.(PrivateMessage); ok && isQuorum && st.evm.SupportsMultitenancy && msg.IsPrivate() {
if len(managedPartiesInTx) > 0 {
for _, address := range evm.AffectedContracts() {
managedPartiesInContract, err := st.evm.StateDB.GetManagedParties(address)
if err != nil {
- return nil, 0, true, err
+ return nil, err
}
// managed parties for public transactions is empty so nothing to check there
if len(managedPartiesInContract) > 0 {
@@ -329,7 +396,11 @@ func (st *StateTransition) TransitionDb() (ret []byte, usedGas uint64, failed bo
pmh.eph.TerminalString(), "contractMP", managedPartiesInContract, "txMP", managedPartiesInTx)
st.evm.RevertToSnapshot(snapshot)
// TODO - see whether we can find a way to store this error and make it available via customizations to getTransactionReceipt
- return nil, 0, true, nil
+ return &ExecutionResult{
+ UsedGas: 0,
+ Err: ErrContractManagedPartiesCheckFailed,
+ ReturnData: nil,
+ }, nil
}
}
}
@@ -343,10 +414,12 @@ func (st *StateTransition) TransitionDb() (ret []byte, usedGas uint64, failed bo
if !isPrivate {
st.gas = leftoverGas
}
+ // End Quorum
st.refundGas()
st.state.AddBalance(st.evm.Coinbase, new(big.Int).Mul(new(big.Int).SetUint64(st.gasUsed()), st.gasPrice))
+ // Quorum
// for all contracts being created as the result of the transaction execution
// we build the index for them if multitenancy is enabled
if st.evm.SupportsMultitenancy {
@@ -361,9 +434,19 @@ func (st *StateTransition) TransitionDb() (ret []byte, usedGas uint64, failed bo
}
if isPrivate {
- return ret, 0, vmerr != nil, err
+ return &ExecutionResult{
+ UsedGas: 0,
+ Err: vmerr,
+ ReturnData: ret,
+ }, err
}
- return ret, st.gasUsed(), vmerr != nil, err
+ // End Quorum
+
+ return &ExecutionResult{
+ UsedGas: st.gasUsed(),
+ Err: vmerr,
+ ReturnData: ret,
+ }, nil
}
func (st *StateTransition) refundGas() {
diff --git a/core/state_transition_pmh.go b/core/state_transition_pmh.go
index f97d4d7a3e..d7a0079ba4 100644
--- a/core/state_transition_pmh.go
+++ b/core/state_transition_pmh.go
@@ -38,9 +38,9 @@ func (pmh *privateMessageHandler) mustVerify() bool {
}
// checks the privacy metadata in the state transition context
-// returns false if TransitionDb needs to exit early
-// true otherwise
-func (pmh *privateMessageHandler) prepare() (bool, error) {
+// returns vmError if there is an error in the EVM execution
+// returns consensusErr if there is an error in the consensus execution
+func (pmh *privateMessageHandler) prepare() (vmError, consensusErr error) {
if pmh.receivedPrivacyMetadata != nil {
if !pmh.stAPI.IsPrivacyEnhancementsEnabled() && pmh.receivedPrivacyMetadata.PrivacyFlag.IsNotStandardPrivate() {
// This situation is only possible if the current node has been upgraded (both quorum and tessera) yet the
@@ -50,18 +50,18 @@ func (pmh *privateMessageHandler) prepare() (bool, error) {
// continue to apply new blocks). The resolution should then be to revert to an appropriate block height and
// run geth init with the network agreed privacyEnhancementsBlock.
// The prepare method signature has been changed to allow returning the relevant error.
- return false, fmt.Errorf("Privacy enhanced transaction received while privacy enhancements are disabled."+
+ return ErrPrivacyEnhancedReceivedWhenDisabled, fmt.Errorf("Privacy enhanced transaction received while privacy enhancements are disabled."+
" Please check your node configuration. EPH=%s", pmh.eph.ToBase64())
}
if pmh.receivedPrivacyMetadata.PrivacyFlag == engine.PrivacyFlagStateValidation && common.EmptyHash(pmh.receivedPrivacyMetadata.ACMerkleRoot) {
- log.Error("Privacy metadata has empty MR for stateValidation flag")
- return false, nil
+ log.Error(ErrPrivacyMetadataInvalidMerkleRoot.Error())
+ return ErrPrivacyMetadataInvalidMerkleRoot, nil
}
privMetadata := types.NewTxPrivacyMetadata(pmh.receivedPrivacyMetadata.PrivacyFlag)
pmh.stAPI.SetTxPrivacyMetadata(privMetadata)
}
- return true, nil
+ return nil, nil
}
//If the list of affected CA Transactions by the time evm executes is different from the list of affected contract transactions returned from Tessera
diff --git a/core/state_transition_test.go b/core/state_transition_test.go
index c13c877106..8c8e9a89f2 100644
--- a/core/state_transition_test.go
+++ b/core/state_transition_test.go
@@ -147,10 +147,10 @@ func TestApplyMessage_Private_whenTypicalCreate_Success(t *testing.T) {
PrivacyFlag: engine.PrivacyFlagStandardPrivate,
}, nil)
- _, _, fail, err := ApplyMessage(newEVM(cfg), privateMsg, gp)
+ result, err := ApplyMessage(newEVM(cfg), privateMsg, gp)
assert.NoError(err, "EVM execution")
- assert.False(fail, "Transaction receipt status")
+ assert.False(result.Failed(), "Transaction receipt status")
mockPM.Verify(assert)
}
@@ -173,10 +173,10 @@ func TestApplyMessage_Private_whenCreatePartyProtectionC1_Success(t *testing.T)
PrivacyFlag: engine.PrivacyFlagPartyProtection,
}, nil)
- _, _, fail, err := ApplyMessage(newEVM(cfg), privateMsg, gp)
+ result, err := ApplyMessage(newEVM(cfg), privateMsg, gp)
assert.NoError(err, "EVM execution")
- assert.False(fail, "Transaction receipt status")
+ assert.False(result.Failed(), "Transaction receipt status")
mockPM.Verify(assert)
}
@@ -202,12 +202,12 @@ func TestApplyMessage_Private_whenCreatePartyProtectionC1WithPrivacyEnhancements
evm := newEVM(cfg)
evm.ChainConfig().PrivacyEnhancementsBlock = nil
- _, _, fail, err := ApplyMessage(evm, privateMsg, gp)
+ result, err := ApplyMessage(evm, privateMsg, gp)
assert.Error(err, "EVM execution")
- assert.True(fail, "Transaction receipt status")
+ assert.True(result.Failed(), "Transaction receipt status")
// check that there is no privacy metadata for the newly created contract
- assert.Len(evm.CreatedContracts(), 0, "no contracts createad")
+ assert.Len(evm.CreatedContracts(), 0, "no contracts created")
mockPM.Verify(assert)
}
@@ -239,10 +239,10 @@ func TestApplyMessage_Private_whenInteractWithPartyProtectionC1_Success(t *testi
PrivacyFlag: engine.PrivacyFlagPartyProtection,
}, nil)
- _, _, fail, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
+ result, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
assert.NoError(err, "EVM execution")
- assert.False(fail, "Transaction receipt status")
+ assert.False(result.Failed(), "Transaction receipt status")
mockPM.Verify(assert)
}
@@ -277,10 +277,10 @@ func TestApplyMessage_Private_whenInteractWithStateValidationC1_Success(t *testi
ACMerkleRoot: mr,
}, nil)
- _, _, fail, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
+ result, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
assert.NoError(err, "EVM execution")
- assert.False(fail, "Transaction receipt status")
+ assert.False(result.Failed(), "Transaction receipt status")
mockPM.Verify(assert)
}
@@ -314,10 +314,10 @@ func TestApplyMessage_Private_whenInteractWithStateValidationC1WithEmptyMRFromTe
ACMerkleRoot: common.Hash{},
}, nil)
- _, _, fail, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
+ result, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
assert.NoError(err, "EVM execution")
- assert.True(fail, "Transaction receipt status")
+ assert.True(result.Failed(), "Transaction receipt status")
mockPM.Verify(assert)
}
@@ -351,10 +351,10 @@ func TestApplyMessage_Private_whenInteractWithStateValidationC1WithWrongMRFromTe
ACMerkleRoot: common.Hash{123},
}, nil)
- _, _, fail, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
+ result, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
assert.NoError(err, "EVM execution")
- assert.True(fail, "Transaction receipt status")
+ assert.True(result.Failed(), "Transaction receipt status")
mockPM.Verify(assert)
}
@@ -387,10 +387,10 @@ func TestApplyMessage_Private_whenNonPartyTriesInteractingWithPartyProtectionC1_
PrivacyFlag: engine.PrivacyFlagStandardPrivate,
}, nil)
- _, _, fail, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
+ result, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
assert.NoError(err, "EVM execution")
- assert.False(fail, "Transaction receipt status")
+ assert.False(result.Failed(), "Transaction receipt status")
mockPM.Verify(assert)
}
@@ -421,10 +421,10 @@ func TestApplyMessage_Private_whenNonPartyTriesInteractingWithPartyProtectionC1_
PrivacyFlag: engine.PrivacyFlagPartyProtection,
}, nil)
- _, _, fail, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
+ result, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
assert.NoError(err, "EVM execution")
- assert.True(fail, "Transaction receipt status")
+ assert.True(result.Failed(), "Transaction receipt status")
mockPM.Verify(assert)
}
@@ -464,10 +464,10 @@ func TestApplyMessage_Private_whenPartyProtectionC2InteractsExistingStandardPriv
PrivacyFlag: engine.PrivacyFlagPartyProtection,
}, nil)
- _, _, fail, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
+ result, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
assert.NoError(err, "EVM execution")
- assert.True(fail, "Transaction receipt status")
+ assert.True(result.Failed(), "Transaction receipt status")
mockPM.Verify(assert)
}
@@ -506,10 +506,10 @@ func TestApplyMessage_Private_whenPartyProtectionC2InteractsNewStandardPrivateC1
PrivacyFlag: engine.PrivacyFlagPartyProtection,
}, nil)
- _, _, fail, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
+ result, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
assert.NoError(err, "EVM execution")
- assert.True(fail, "Transaction receipt status")
+ assert.True(result.Failed(), "Transaction receipt status")
mockPM.Verify(assert)
}
@@ -549,10 +549,10 @@ func TestApplyMessage_Private_whenPartyProtectionC2InteractsWithPartyProtectionC
PrivacyFlag: engine.PrivacyFlagPartyProtection,
}, nil)
- _, _, fail, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
+ result, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
assert.NoError(err, "EVM execution")
- assert.False(fail, "Transaction receipt status")
+ assert.False(result.Failed(), "Transaction receipt status")
mockPM.Verify(assert)
}
@@ -593,10 +593,10 @@ func TestApplyMessage_Private_whenPartyProtectionC2AndC1ButMissingC1CreationInTe
PrivacyFlag: engine.PrivacyFlagPartyProtection,
}, nil)
- _, _, fail, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
+ result, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
assert.NoError(err, "EVM execution")
- assert.True(fail, "Transaction receipt status")
+ assert.True(result.Failed(), "Transaction receipt status")
mockPM.Verify(assert)
}
@@ -643,11 +643,11 @@ func TestApplyMessage_Private_whenPartyProtectionC2AndC1AndC0ButMissingC0InState
PrivacyFlag: engine.PrivacyFlagPartyProtection,
}, nil)
- _, _, fail, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
+ result, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
assert.NoError(err, "EVM execution")
// after ACOTH check updates this is a successful scenario
- assert.False(fail, "Transaction receipt status")
+ assert.False(result.Failed(), "Transaction receipt status")
mockPM.Verify(assert)
}
@@ -695,10 +695,10 @@ func TestApplyMessage_Private_whenStateValidationC2InteractsWithStateValidationC
ACMerkleRoot: mr,
}, nil)
- _, _, fail, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
+ result, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
assert.NoError(err, "EVM execution")
- assert.False(fail, "Transaction receipt status")
+ assert.False(result.Failed(), "Transaction receipt status")
mockPM.Verify(assert)
}
@@ -742,10 +742,10 @@ func TestApplyMessage_Private_whenStateValidationC2InteractsWithPartyProtectionC
ACMerkleRoot: mr,
}, nil)
- _, _, fail, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
+ result, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
assert.NoError(err, "EVM execution")
- assert.True(fail, "Transaction receipt status")
+ assert.True(result.Failed(), "Transaction receipt status")
mockPM.Verify(assert)
}
@@ -780,10 +780,10 @@ func TestApplyMessage_Private_whenStandardPrivateC2InteractsWithPublicC1_Fail(t
PrivacyFlag: engine.PrivacyFlagStandardPrivate,
}, nil)
- _, _, fail, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
+ result, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
assert.NoError(err, "EVM execution")
- assert.True(fail, "Transaction receipt status")
+ assert.True(result.Failed(), "Transaction receipt status")
mockPM.Verify(assert)
}
@@ -819,10 +819,10 @@ func TestApplyMessage_Private_whenPartyProtectionC2InteractsWithPublicC1_Fail(t
PrivacyFlag: engine.PrivacyFlagPartyProtection,
}, nil)
- _, _, fail, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
+ result, err := ApplyMessage(newEVM(cfg), privateMsg, new(GasPool).AddGas(math.MaxUint64))
assert.NoError(err, "EVM execution")
- assert.True(fail, "Transaction receipt status")
+ assert.True(result.Failed(), "Transaction receipt status")
mockPM.Verify(assert)
}
@@ -843,10 +843,10 @@ func TestApplyMessage_Private_whenTxManagerReturnsError_Success(t *testing.T) {
//since standard private create only get back PrivacyFlag
mockPM.When("Receive").Return(nil, nil, fmt.Errorf("Error during receive"))
- _, _, fail, err := ApplyMessage(newEVM(cfg), privateMsg, gp)
+ result, err := ApplyMessage(newEVM(cfg), privateMsg, gp)
assert.NoError(err, "EVM execution")
- assert.False(fail, "Transaction receipt status")
+ assert.False(result.Failed(), "Transaction receipt status")
mockPM.Verify(assert)
}
@@ -867,10 +867,10 @@ func TestApplyMessage_Private_whenTxManagerReturnsEmptyResult_Success(t *testing
//since standard private create only get back PrivacyFlag
mockPM.When("Receive").Return(nil, nil, nil)
- _, _, fail, err := ApplyMessage(newEVM(cfg), privateMsg, gp)
+ result, err := ApplyMessage(newEVM(cfg), privateMsg, gp)
assert.NoError(err, "EVM execution")
- assert.False(fail, "Transaction receipt status")
+ assert.False(result.Failed(), "Transaction receipt status")
mockPM.Verify(assert)
}
@@ -888,10 +888,10 @@ func createContract(cfg *config, mockPM *mockPrivateTransactionManager, assert *
mockPM.When("Receive").Return(c.create(args...), metadata, nil)
evm := newEVM(cfg)
- _, _, fail, err := ApplyMessage(evm, privateMsg, new(GasPool).AddGas(math.MaxUint64))
+ result, err := ApplyMessage(evm, privateMsg, new(GasPool).AddGas(math.MaxUint64))
assert.NoError(err, "%s: EVM execution", c.name)
- assert.False(fail, "%s: Transaction receipt status", c.name)
+ assert.False(result.Failed(), "%s: Transaction receipt status", c.name)
mockPM.Verify(assert)
createdContracts := evm.CreatedContracts()
log.Trace("priv statedb", "evmstatedb", evm.StateDB)
@@ -906,9 +906,9 @@ func createPublicContract(cfg *config, assert *testifyassert.Assertions, c *cont
msg := newTypicalPublicMessage(pubcfg)
evm := newEVM(pubcfg)
- _, _, fail, err := ApplyMessage(evm, msg, new(GasPool).AddGas(math.MaxUint64))
+ result, err := ApplyMessage(evm, msg, new(GasPool).AddGas(math.MaxUint64))
assert.NoError(err, "%s: EVM execution", c.name)
- assert.False(fail, "%s: Transaction receipt status", c.name)
+ assert.False(result.Failed(), "%s: Transaction receipt status", c.name)
createdContracts := evm.CreatedContracts()
log.Trace("pub statedb", "evmstatedb", evm.StateDB)
assert.Len(createdContracts, 1, "%s: Number of created contracts", c.name)
@@ -1295,10 +1295,10 @@ func verifyGasPoolCalculation(t *testing.T, pm private.PrivateTransactionManager
testObject := NewStateTransition(evm, msg, gasPool)
- _, _, failed, err := testObject.TransitionDb()
+ result, err := testObject.TransitionDb()
assert.NoError(err)
- assert.False(failed)
+ assert.False(result.Failed())
assert.Equal(new(big.Int).SetUint64(expectedGasPool.Gas()), new(big.Int).SetUint64(gasPool.Gas()), "gas pool must be calculated correctly")
assert.Equal(arbitraryBalance, publicState.GetBalance(evm.Coinbase), "balance must not be changed")
diff --git a/core/tx_pool.go b/core/tx_pool.go
index b8a8d1391a..d49a892f39 100644
--- a/core/tx_pool.go
+++ b/core/tx_pool.go
@@ -61,10 +61,6 @@ var (
// ErrInvalidSender is returned if the transaction contains an invalid signature.
ErrInvalidSender = errors.New("invalid sender")
- // ErrNonceTooLow is returned if the nonce of a transaction is lower than the
- // one present in the local chain.
- ErrNonceTooLow = errors.New("nonce too low")
-
// ErrUnderpriced is returned if a transaction's gas price is below the minimum
// configured for the transaction pool.
ErrUnderpriced = errors.New("transaction underpriced")
@@ -73,14 +69,6 @@ var (
// with a different one without the required price bump.
ErrReplaceUnderpriced = errors.New("replacement transaction underpriced")
- // ErrInsufficientFunds is returned if the total cost of executing a transaction
- // is higher than the balance of the user's account.
- ErrInsufficientFunds = errors.New("insufficient funds for gas * price + value")
-
- // ErrIntrinsicGas is returned if the transaction is specified to use less gas
- // than required to start the invocation.
- ErrIntrinsicGas = errors.New("intrinsic gas too low")
-
// ErrGasLimit is returned if a transaction's requested gas limit exceeds the
// maximum allowance of the current block.
ErrGasLimit = errors.New("exceeds block gas limit")
diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go
index 5284c1b15d..3c65208ca0 100644
--- a/core/tx_pool_test.go
+++ b/core/tx_pool_test.go
@@ -128,7 +128,7 @@ func validateTxPoolInternals(pool *TxPool) error {
last = nonce
}
}
- if nonce := pool.Nonce(addr); nonce != last+1 {
+ if nonce := pool.pendingNonces.get(addr); nonce != last+1 {
return fmt.Errorf("pending nonce mismatch: have %v, want %v", nonce, last+1)
}
}
diff --git a/core/vm/errors.go b/core/vm/errors.go
index 771fde0224..7111603a24 100644
--- a/core/vm/errors.go
+++ b/core/vm/errors.go
@@ -16,16 +16,54 @@
package vm
-import "errors"
+import (
+ "errors"
+ "fmt"
+)
+// List evm execution errors
var (
ErrOutOfGas = errors.New("out of gas")
ErrCodeStoreOutOfGas = errors.New("contract creation code storage out of gas")
ErrDepth = errors.New("max call depth exceeded")
- ErrTraceLimitReached = errors.New("the number of logs reached the specified limit")
ErrInsufficientBalance = errors.New("insufficient balance for transfer")
ErrContractAddressCollision = errors.New("contract address collision")
+ ErrExecutionReverted = errors.New("execution reverted")
+ ErrMaxCodeSizeExceeded = errors.New("max code size exceeded")
+ ErrInvalidJump = errors.New("invalid jump destination")
+ ErrWriteProtection = errors.New("write protection")
+ ErrReturnDataOutOfBounds = errors.New("return data out of bounds")
+ ErrGasUintOverflow = errors.New("gas uint64 overflow")
ErrReadOnlyValueTransfer = errors.New("VM in read-only mode. Value transfer prohibited.")
ErrNoCompatibleInterpreter = errors.New("no compatible interpreter")
)
+
+// ErrStackUnderflow wraps an evm error when the items on the stack less
+// than the minimal requirement.
+type ErrStackUnderflow struct {
+ stackLen int
+ required int
+}
+
+func (e *ErrStackUnderflow) Error() string {
+ return fmt.Sprintf("stack underflow (%d <=> %d)", e.stackLen, e.required)
+}
+
+// ErrStackOverflow wraps an evm error when the items on the stack exceeds
+// the maximum allowance.
+type ErrStackOverflow struct {
+ stackLen int
+ limit int
+}
+
+func (e *ErrStackOverflow) Error() string {
+ return fmt.Sprintf("stack limit reached %d (%d)", e.stackLen, e.limit)
+}
+
+// ErrInvalidOpCode wraps an evm error when an invalid opcode is encountered.
+type ErrInvalidOpCode struct {
+ opcode OpCode
+}
+
+func (e *ErrInvalidOpCode) Error() string { return fmt.Sprintf("invalid opcode: %s", e.opcode) }
diff --git a/core/vm/evm.go b/core/vm/evm.go
index 4c225e6bc8..35f5680a49 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -17,6 +17,7 @@
package vm
import (
+ "errors"
"fmt"
"math/big"
"sync/atomic"
@@ -104,7 +105,7 @@ func run(evm *EVM, contract *Contract, input []byte, readOnly bool) ([]byte, err
return interpreter.Run(contract, input, readOnly)
}
}
- return nil, ErrNoCompatibleInterpreter
+ return nil, errors.New("no compatible interpreter")
}
// Context provides the EVM with auxiliary information. Once provided
@@ -351,7 +352,6 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
if !evm.Context.CanTransfer(evm.StateDB, caller.Address(), value) {
return nil, gas, ErrInsufficientBalance
}
-
var (
to = AccountRef(addr)
snapshot = evm.StateDB.Snapshot()
@@ -409,7 +409,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
// when we're in homestead this also counts for code storage gas errors.
if err != nil {
evm.StateDB.RevertToSnapshot(snapshot)
- if err != errExecutionReverted {
+ if err != ErrExecutionReverted {
contract.UseGas(contract.Gas)
}
}
@@ -436,10 +436,12 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte,
return nil, gas, ErrDepth
}
// Fail if we're trying to transfer more than the available balance
- if !evm.CanTransfer(evm.StateDB, caller.Address(), value) {
+ // Note although it's noop to transfer X ether to caller itself. But
+ // if caller doesn't have enough balance, it would be an error to allow
+ // over-charging itself. So the check here is necessary.
+ if !evm.Context.CanTransfer(evm.StateDB, caller.Address(), value) {
return nil, gas, ErrInsufficientBalance
}
-
var (
snapshot = evm.StateDB.Snapshot()
to = AccountRef(caller.Address())
@@ -452,7 +454,7 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte,
ret, err = run(evm, contract, input, false)
if err != nil {
evm.StateDB.RevertToSnapshot(snapshot)
- if err != errExecutionReverted {
+ if err != ErrExecutionReverted {
contract.UseGas(contract.Gas)
}
}
@@ -476,12 +478,10 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by
if evm.depth > int(params.CallCreateDepth) {
return nil, gas, ErrDepth
}
-
var (
snapshot = evm.StateDB.Snapshot()
to = AccountRef(caller.Address())
)
-
// Initialise a new contract and make initialise the delegate values
contract := NewContract(caller, to, nil, gas).AsDelegate()
contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
@@ -489,7 +489,7 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by
ret, err = run(evm, contract, input, false)
if err != nil {
evm.StateDB.RevertToSnapshot(snapshot)
- if err != errExecutionReverted {
+ if err != ErrExecutionReverted {
contract.UseGas(contract.Gas)
}
}
@@ -508,7 +508,6 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte
if evm.depth > int(params.CallCreateDepth) {
return nil, gas, ErrDepth
}
-
var (
to = AccountRef(addr)
stateDb = getDualState(evm, addr)
@@ -531,7 +530,7 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte
ret, err = run(evm, contract, input, true)
if err != nil {
stateDb.RevertToSnapshot(snapshot)
- if err != errExecutionReverted {
+ if err != ErrExecutionReverted {
contract.UseGas(contract.Gas)
}
}
@@ -654,13 +653,13 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
// when we're in homestead this also counts for code storage gas errors.
if maxCodeSizeExceeded || (err != nil && (evm.chainRules.IsHomestead || err != ErrCodeStoreOutOfGas)) {
evm.StateDB.RevertToSnapshot(snapshot)
- if err != errExecutionReverted {
+ if err != ErrExecutionReverted {
contract.UseGas(contract.Gas)
}
}
// Assign err if contract code size exceeds the max while the err is still empty.
if maxCodeSizeExceeded && err == nil {
- err = errMaxCodeSizeExceeded
+ err = ErrMaxCodeSizeExceeded
}
if evm.vmConfig.Debug && evm.depth == 0 {
evm.vmConfig.Tracer.CaptureEnd(ret, gas-contract.Gas, time.Since(start), err)
diff --git a/core/vm/gas.go b/core/vm/gas.go
index 3e3052faa8..59ce3b39f4 100644
--- a/core/vm/gas.go
+++ b/core/vm/gas.go
@@ -46,7 +46,7 @@ func callGas(isEip150 bool, availableGas, base uint64, callCost *big.Int) (uint6
}
}
if !callCost.IsUint64() {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
return callCost.Uint64(), nil
diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go
index c5b41df137..4b876a3a0b 100644
--- a/core/vm/gas_table.go
+++ b/core/vm/gas_table.go
@@ -36,7 +36,7 @@ func memoryGasCost(mem *Memory, newMemSize uint64) (uint64, error) {
// overflow. The constant 0x1FFFFFFFE0 is the highest number that can be used
// without overflowing the gas calculation.
if newMemSize > 0x1FFFFFFFE0 {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
newMemSizeWords := toWordSize(newMemSize)
newMemSize = newMemSizeWords * 32
@@ -72,15 +72,15 @@ func memoryCopierGas(stackpos int) gasFunc {
// And gas for copying data, charged per word at param.CopyGas
words, overflow := bigUint64(stack.Back(stackpos))
if overflow {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
if words, overflow = math.SafeMul(toWordSize(words), params.CopyGas); overflow {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
if gas, overflow = math.SafeAdd(gas, words); overflow {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
return gas, nil
}
@@ -222,7 +222,7 @@ func makeGasLog(n uint64) gasFunc {
return func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
requestedSize, overflow := bigUint64(stack.Back(1))
if overflow {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
gas, err := memoryGasCost(mem, memorySize)
@@ -231,18 +231,18 @@ func makeGasLog(n uint64) gasFunc {
}
if gas, overflow = math.SafeAdd(gas, params.LogGas); overflow {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
if gas, overflow = math.SafeAdd(gas, n*params.LogTopicGas); overflow {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
var memorySizeGas uint64
if memorySizeGas, overflow = math.SafeMul(requestedSize, params.LogDataGas); overflow {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
if gas, overflow = math.SafeAdd(gas, memorySizeGas); overflow {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
return gas, nil
}
@@ -255,13 +255,13 @@ func gasSha3(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize
}
wordGas, overflow := bigUint64(stack.Back(1))
if overflow {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
if wordGas, overflow = math.SafeMul(toWordSize(wordGas), params.Sha3WordGas); overflow {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
if gas, overflow = math.SafeAdd(gas, wordGas); overflow {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
return gas, nil
}
@@ -289,13 +289,13 @@ func gasCreate2(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memoryS
}
wordGas, overflow := bigUint64(stack.Back(2))
if overflow {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
if wordGas, overflow = math.SafeMul(toWordSize(wordGas), params.Sha3WordGas); overflow {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
if gas, overflow = math.SafeAdd(gas, wordGas); overflow {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
return gas, nil
}
@@ -308,7 +308,7 @@ func gasExpFrontier(evm *EVM, contract *Contract, stack *Stack, mem *Memory, mem
overflow bool
)
if gas, overflow = math.SafeAdd(gas, params.ExpGas); overflow {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
return gas, nil
}
@@ -321,7 +321,7 @@ func gasExpEIP158(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memor
overflow bool
)
if gas, overflow = math.SafeAdd(gas, params.ExpGas); overflow {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
return gas, nil
}
@@ -348,7 +348,7 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize
}
var overflow bool
if gas, overflow = math.SafeAdd(gas, memoryGas); overflow {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
evm.callGasTemp, err = callGas(evm.chainRules.IsEIP150, contract.Gas, gas, stack.Back(0))
@@ -356,7 +356,7 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize
return 0, err
}
if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
return gas, nil
}
@@ -374,14 +374,14 @@ func gasCallCode(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memory
gas += params.CallValueTransferGas
}
if gas, overflow = math.SafeAdd(gas, memoryGas); overflow {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
evm.callGasTemp, err = callGas(evm.chainRules.IsEIP150, contract.Gas, gas, stack.Back(0))
if err != nil {
return 0, err
}
if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
return gas, nil
}
@@ -397,7 +397,7 @@ func gasDelegateCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me
}
var overflow bool
if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
return gas, nil
}
@@ -413,7 +413,7 @@ func gasStaticCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memo
}
var overflow bool
if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow {
- return 0, errGasUintOverflow
+ return 0, ErrGasUintOverflow
}
return gas, nil
}
diff --git a/core/vm/gas_table_test.go b/core/vm/gas_table_test.go
index 7690c62a79..6a273c9584 100644
--- a/core/vm/gas_table_test.go
+++ b/core/vm/gas_table_test.go
@@ -39,8 +39,8 @@ func TestMemoryGasCost(t *testing.T) {
}
for i, tt := range tests {
v, err := memoryGasCost(&Memory{}, tt.size)
- if (err == errGasUintOverflow) != tt.overflow {
- t.Errorf("test %d: overflow mismatch: have %v, want %v", i, err == errGasUintOverflow, tt.overflow)
+ if (err == ErrGasUintOverflow) != tt.overflow {
+ t.Errorf("test %d: overflow mismatch: have %v, want %v", i, err == ErrGasUintOverflow, tt.overflow)
}
if v != tt.cost {
t.Errorf("test %d: gas cost mismatch: have %v, want %v", i, v, tt.cost)
diff --git a/core/vm/instructions.go b/core/vm/instructions.go
index d9b01f21a2..76b05e0b1c 100644
--- a/core/vm/instructions.go
+++ b/core/vm/instructions.go
@@ -17,7 +17,6 @@
package vm
import (
- "errors"
"math/big"
"github.com/ethereum/go-ethereum/common"
@@ -28,13 +27,8 @@ import (
)
var (
- bigZero = new(big.Int)
- tt255 = math.BigPow(2, 255)
- errWriteProtection = errors.New("evm: write protection")
- errReturnDataOutOfBounds = errors.New("evm: return data out of bounds")
- errExecutionReverted = errors.New("evm: execution reverted")
- errMaxCodeSizeExceeded = errors.New("evm: max code size exceeded")
- errInvalidJump = errors.New("evm: invalid jump destination")
+ bigZero = new(big.Int)
+ tt255 = math.BigPow(2, 255)
)
func opAdd(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) {
@@ -471,7 +465,7 @@ func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, callContext *call
defer interpreter.intPool.put(memOffset, dataOffset, length, end)
if !end.IsUint64() || uint64(len(interpreter.returnData)) < end.Uint64() {
- return nil, errReturnDataOutOfBounds
+ return nil, ErrReturnDataOutOfBounds
}
callContext.memory.Set(memOffset.Uint64(), length.Uint64(), interpreter.returnData[dataOffset.Uint64():end.Uint64()])
@@ -652,7 +646,7 @@ func opSstore(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]
func opJump(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) {
pos := callContext.stack.pop()
if !callContext.contract.validJumpdest(pos) {
- return nil, errInvalidJump
+ return nil, ErrInvalidJump
}
*pc = pos.Uint64()
@@ -664,7 +658,7 @@ func opJumpi(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]b
pos, cond := callContext.stack.pop(), callContext.stack.pop()
if cond.Sign() != 0 {
if !callContext.contract.validJumpdest(pos) {
- return nil, errInvalidJump
+ return nil, ErrInvalidJump
}
*pc = pos.Uint64()
} else {
@@ -721,7 +715,7 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]
callContext.contract.Gas += returnGas
interpreter.intPool.put(value, offset, size)
- if suberr == errExecutionReverted {
+ if suberr == ErrExecutionReverted {
return res, nil
}
return nil, nil
@@ -749,7 +743,7 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([
callContext.contract.Gas += returnGas
interpreter.intPool.put(endowment, offset, size, salt)
- if suberr == errExecutionReverted {
+ if suberr == ErrExecutionReverted {
return res, nil
}
return nil, nil
@@ -775,7 +769,7 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]by
} else {
callContext.stack.push(interpreter.intPool.get().SetUint64(1))
}
- if err == nil || err == errExecutionReverted {
+ if err == nil || err == ErrExecutionReverted {
callContext.memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
}
callContext.contract.Gas += returnGas
@@ -804,7 +798,7 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) (
} else {
callContext.stack.push(interpreter.intPool.get().SetUint64(1))
}
- if err == nil || err == errExecutionReverted {
+ if err == nil || err == ErrExecutionReverted {
callContext.memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
}
callContext.contract.Gas += returnGas
@@ -829,7 +823,7 @@ func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, callContext *callCt
} else {
callContext.stack.push(interpreter.intPool.get().SetUint64(1))
}
- if err == nil || err == errExecutionReverted {
+ if err == nil || err == ErrExecutionReverted {
callContext.memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
}
callContext.contract.Gas += returnGas
@@ -854,7 +848,7 @@ func opStaticCall(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx)
} else {
callContext.stack.push(interpreter.intPool.get().SetUint64(1))
}
- if err == nil || err == errExecutionReverted {
+ if err == nil || err == ErrExecutionReverted {
callContext.memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
}
callContext.contract.Gas += returnGas
diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go
index 11cf3c075a..e186cbcd95 100644
--- a/core/vm/interpreter.go
+++ b/core/vm/interpreter.go
@@ -137,7 +137,7 @@ func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter {
//
// It's important to note that any errors returned by the interpreter should be
// considered a revert-and-consume-all-gas operation except for
-// errExecutionReverted which means revert-and-keep-gas-left.
+// ErrExecutionReverted which means revert-and-keep-gas-left.
func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (ret []byte, err error) {
if in.intPool == nil {
in.intPool = poolOfIntPools.get()
@@ -223,13 +223,13 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
op = contract.GetOp(pc)
operation := in.cfg.JumpTable[op]
if !operation.valid {
- return nil, fmt.Errorf("invalid opcode 0x%x", int(op))
+ return nil, &ErrInvalidOpCode{opcode: op}
}
// Validate stack
if sLen := stack.len(); sLen < operation.minStack {
- return nil, fmt.Errorf("stack underflow (%d <=> %d)", sLen, operation.minStack)
+ return nil, &ErrStackUnderflow{stackLen: sLen, required: operation.minStack}
} else if sLen > operation.maxStack {
- return nil, fmt.Errorf("stack limit reached %d (%d)", sLen, operation.maxStack)
+ return nil, &ErrStackOverflow{stackLen: sLen, limit: operation.maxStack}
}
if in.evm.quorumReadOnly && operation.writes {
@@ -246,7 +246,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
// account to the others means the state is modified and should also
// return with an error.
if operation.writes || (op == CALL && stack.Back(2).Sign() != 0) {
- return nil, errWriteProtection
+ return nil, ErrWriteProtection
}
}
// Static portion of gas
@@ -263,12 +263,12 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
if operation.memorySize != nil {
memSize, overflow := operation.memorySize(stack)
if overflow {
- return nil, errGasUintOverflow
+ return nil, ErrGasUintOverflow
}
// memory is expanded in words of 32 bytes. Gas
// is also calculated in words.
if memorySize, overflow = math.SafeMul(toWordSize(memSize), 32); overflow {
- return nil, errGasUintOverflow
+ return nil, ErrGasUintOverflow
}
}
// Dynamic portion of gas
@@ -308,7 +308,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
case err != nil:
return nil, err
case operation.reverts:
- return res, errExecutionReverted
+ return res, ErrExecutionReverted
case operation.halts:
return res, nil
case !operation.jumps:
diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go
index c1e7c88dbf..23decaa7b9 100644
--- a/core/vm/jump_table.go
+++ b/core/vm/jump_table.go
@@ -17,8 +17,6 @@
package vm
import (
- "errors"
-
"github.com/ethereum/go-ethereum/params"
)
@@ -29,8 +27,6 @@ type (
memorySizeFunc func(*Stack) (size uint64, overflow bool)
)
-var errGasUintOverflow = errors.New("gas uint64 overflow")
-
type operation struct {
// execute is the operation function
execute executionFunc
diff --git a/core/vm/logger.go b/core/vm/logger.go
index 33c58ac9da..0462d8b4ac 100644
--- a/core/vm/logger.go
+++ b/core/vm/logger.go
@@ -18,6 +18,7 @@ package vm
import (
"encoding/hex"
+ "errors"
"fmt"
"io"
"math/big"
@@ -29,6 +30,8 @@ import (
"github.com/ethereum/go-ethereum/core/types"
)
+var errTraceLimitReached = errors.New("the number of logs reached the specified limit")
+
// Storage represents a contract's storage.
type Storage map[common.Hash]common.Hash
@@ -140,7 +143,7 @@ func (l *StructLogger) CaptureStart(from common.Address, to common.Address, crea
func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error {
// check if already accumulated the specified number of logs
if l.cfg.Limit != 0 && l.cfg.Limit <= len(l.logs) {
- return ErrTraceLimitReached
+ return errTraceLimitReached
}
// initialise new changed values storage container for this contract
diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go
index 71ef0724a4..322e01d17c 100644
--- a/core/vm/opcodes.go
+++ b/core/vm/opcodes.go
@@ -389,7 +389,7 @@ var opCodeToString = map[OpCode]string{
func (op OpCode) String() string {
str := opCodeToString[op]
if len(str) == 0 {
- return fmt.Sprintf("Missing opcode 0x%x", int(op))
+ return fmt.Sprintf("opcode 0x%x not defined", int(op))
}
return str
diff --git a/eth/api_tracer.go b/eth/api_tracer.go
index 6323edc497..32a47296ad 100644
--- a/eth/api_tracer.go
+++ b/eth/api_tracer.go
@@ -530,7 +530,7 @@ func (api *PrivateDebugAPI) traceBlock(ctx context.Context, block *types.Block,
vmenv.SetCurrentTX(tx)
// /Quorum
- if _, _, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())); err != nil {
+ if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())); err != nil {
failed = err
break
}
@@ -631,7 +631,7 @@ func (api *PrivateDebugAPI) standardTraceBlockToFile(ctx context.Context, block
vmenv.SetCurrentTX(tx)
// /Quorum
- _, _, _, err = core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas()))
+ _, err = core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas()))
if writer != nil {
writer.Flush()
}
@@ -816,7 +816,7 @@ func (api *PrivateDebugAPI) traceTx(ctx context.Context, message core.Message, t
vmenv.SetCurrentTX(tx)
// /Quorum
- ret, gas, failed, err := core.ApplyMessage(vmenv, message, new(core.GasPool).AddGas(message.Gas()))
+ result, err := core.ApplyMessage(vmenv, message, new(core.GasPool).AddGas(message.Gas()))
if err != nil {
return nil, fmt.Errorf("tracing failed: %v", err)
}
@@ -824,9 +824,9 @@ func (api *PrivateDebugAPI) traceTx(ctx context.Context, message core.Message, t
switch tracer := tracer.(type) {
case *vm.StructLogger:
return ðapi.ExecutionResult{
- Gas: gas,
- Failed: failed,
- ReturnValue: fmt.Sprintf("%x", ret),
+ Gas: result.UsedGas,
+ Failed: result.Failed(),
+ ReturnValue: fmt.Sprintf("%x", result.Return()),
StructLogs: ethapi.FormatLogs(tracer.StructLogs()),
}, nil
@@ -874,7 +874,7 @@ func (api *PrivateDebugAPI) computeTxEnv(blockHash common.Hash, txIndex int, ree
// Not yet the searched for transaction, execute on top of the current state
vmenv := vm.NewEVM(context, statedb, privateStateDbToUse, api.eth.blockchain.Config(), vm.Config{})
vmenv.SetCurrentTX(tx)
- if _, _, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {
+ if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {
return nil, vm.Context{}, nil, nil, fmt.Errorf("tx %#x failed: %v", tx.Hash(), err)
}
// Ensure any modifications are committed to the state
diff --git a/eth/backend.go b/eth/backend.go
index 30df33167a..aefe3ccf1e 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -140,8 +140,12 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
config.Miner.GasPrice = new(big.Int).Set(DefaultConfig.Miner.GasPrice)
}
if config.NoPruning && config.TrieDirtyCache > 0 {
- config.TrieCleanCache += config.TrieDirtyCache * 3 / 5
- config.SnapshotCache += config.TrieDirtyCache * 3 / 5
+ if config.SnapshotCache > 0 {
+ config.TrieCleanCache += config.TrieDirtyCache * 3 / 5
+ config.SnapshotCache += config.TrieDirtyCache * 2 / 5
+ } else {
+ config.TrieCleanCache += config.TrieDirtyCache
+ }
config.TrieDirtyCache = 0
}
log.Info("Allocated trie memory caches", "clean", common.StorageSize(config.TrieCleanCache)*1024*1024, "dirty", common.StorageSize(config.TrieDirtyCache)*1024*1024)
@@ -151,7 +155,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
if err != nil {
return nil, err
}
- chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideIstanbul, config.OverrideMuirGlacier)
+ chainConfig, genesisHash, genesisErr := core.SetupGenesisBlock(chainDb, config.Genesis)
if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
return nil, genesisErr
}
@@ -241,7 +245,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
if config.EnableMultitenancy {
newBlockChainFunc = core.NewMultitenantBlockChain
}
- eth.blockchain, err = newBlockChainFunc(chainDb, cacheConfig, chainConfig, eth.engine, vmConfig, eth.shouldPreserve)
+ eth.blockchain, err = newBlockChainFunc(chainDb, cacheConfig, chainConfig, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit)
if err != nil {
return nil, err
}
diff --git a/eth/config.go b/eth/config.go
index 488e552267..696716a01e 100644
--- a/eth/config.go
+++ b/eth/config.go
@@ -108,6 +108,8 @@ type Config struct {
NoPruning bool // Whether to disable pruning and flush everything to disk
NoPrefetch bool // Whether to disable prefetching and only load state on demand
+ TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
+
// Whitelist of required block number -> hash values to accept
Whitelist map[uint64]common.Hash `toml:"-"`
@@ -171,14 +173,9 @@ type Config struct {
// CheckpointOracle is the configuration for checkpoint oracle.
CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
- // Istanbul block override (TODO: remove after the fork)
- OverrideIstanbul *big.Int
-
// timeout value for call
EVMCallTimeOut time.Duration
- // MuirGlacier block override (TODO: remove after the fork)
- OverrideMuirGlacier *big.Int `toml:",omitempty"`
-
+ // Quorum
EnableMultitenancy bool
}
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index a78d132870..09cbbaf86c 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -372,7 +372,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
log.Info("Block synchronisation started")
}
// If we are already full syncing, but have a fast-sync bloom filter laying
- // around, make sure it does't use memory any more. This is a special case
+ // around, make sure it doesn't use memory any more. This is a special case
// when the user attempts to fast sync a new empty network.
if mode == FullSync && d.stateBloom != nil {
d.stateBloom.Close()
@@ -571,6 +571,8 @@ func (d *Downloader) spawnSync(fetchers []func() error) error {
func (d *Downloader) cancel() {
// Close the current cancel channel
d.cancelLock.Lock()
+ defer d.cancelLock.Unlock()
+
if d.cancelCh != nil {
select {
case <-d.cancelCh:
@@ -579,7 +581,6 @@ func (d *Downloader) cancel() {
close(d.cancelCh)
}
}
- d.cancelLock.Unlock()
}
// Cancel aborts all of the operations and waits for all download goroutines to
@@ -678,7 +679,7 @@ func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, ui
requestHead = 0
}
// requestBottom is the lowest block we want included in the query
- // Ideally, we want to include just below own head
+ // Ideally, we want to include the one just below our own head
requestBottom := int(localHeight - 1)
if requestBottom < 0 {
requestBottom = 0
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 38ffd42e78..f8e28b21a9 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -1130,7 +1130,7 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
chain := testChainBase.shorten(1)
for i, tt := range tests {
- // Register a new peer and ensure it's presence
+ // Register a new peer and ensure its presence
id := fmt.Sprintf("test %d", i)
if err := tester.newPeer(id, protocol, chain); err != nil {
t.Fatalf("test %d: failed to register new peer: %v", i, err)
diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go
index 1ec432faff..6ce0cbd515 100644
--- a/eth/downloader/queue.go
+++ b/eth/downloader/queue.go
@@ -234,8 +234,7 @@ func (q *queue) ShouldThrottleReceipts() bool {
}
// resultSlots calculates the number of results slots available for requests
-// whilst adhering to both the item and the memory limit too of the results
-// cache.
+// whilst adhering to both the item and the memory limits of the result cache.
func (q *queue) resultSlots(pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}) int {
// Calculate the maximum length capped by the memory limit
limit := len(q.resultCache)
@@ -348,7 +347,7 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
}
// Results retrieves and permanently removes a batch of fetch results from
-// the cache. the result slice will be empty if the queue has been closed.
+// the cache. The result slice will be empty if the queue has been closed.
func (q *queue) Results(block bool) []*fetchResult {
q.lock.Lock()
defer q.lock.Unlock()
diff --git a/eth/gen_config.go b/eth/gen_config.go
index 4d6ebeae37..afcf6c19e1 100644
--- a/eth/gen_config.go
+++ b/eth/gen_config.go
@@ -25,6 +25,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
DiscoveryURLs []string
NoPruning bool
NoPrefetch bool
+ TxLookupLimit uint64 `toml:",omitempty"`
Whitelist map[uint64]common.Hash `toml:"-"`
LightServ int `toml:",omitempty"`
LightIngress int `toml:",omitempty"`
@@ -62,6 +63,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.DiscoveryURLs = c.DiscoveryURLs
enc.NoPruning = c.NoPruning
enc.NoPrefetch = c.NoPrefetch
+ enc.TxLookupLimit = c.TxLookupLimit
enc.Whitelist = c.Whitelist
enc.LightServ = c.LightServ
enc.LightIngress = c.LightIngress
@@ -89,8 +91,6 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.RPCGasCap = c.RPCGasCap
enc.Checkpoint = c.Checkpoint
enc.CheckpointOracle = c.CheckpointOracle
- enc.OverrideIstanbul = c.OverrideIstanbul
- enc.OverrideMuirGlacier = c.OverrideMuirGlacier
return &enc, nil
}
@@ -103,6 +103,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
DiscoveryURLs []string
NoPruning *bool
NoPrefetch *bool
+ TxLookupLimit *uint64 `toml:",omitempty"`
Whitelist map[uint64]common.Hash `toml:"-"`
LightServ *int `toml:",omitempty"`
LightIngress *int `toml:",omitempty"`
@@ -155,6 +156,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.NoPrefetch != nil {
c.NoPrefetch = *dec.NoPrefetch
}
+ if dec.TxLookupLimit != nil {
+ c.TxLookupLimit = *dec.TxLookupLimit
+ }
if dec.Whitelist != nil {
c.Whitelist = dec.Whitelist
}
@@ -236,11 +240,5 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.CheckpointOracle != nil {
c.CheckpointOracle = dec.CheckpointOracle
}
- if dec.OverrideIstanbul != nil {
- c.OverrideIstanbul = dec.OverrideIstanbul
- }
- if dec.OverrideMuirGlacier != nil {
- c.OverrideMuirGlacier = dec.OverrideMuirGlacier
- }
return nil
}
diff --git a/eth/handler.go b/eth/handler.go
index 942dcb5417..ba4f754de6 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -75,6 +75,7 @@ type ProtocolManager struct {
txpool txPool
blockchain *core.BlockChain
+ chaindb ethdb.Database
maxPeers int
downloader *downloader.Downloader
@@ -115,6 +116,7 @@ func NewProtocolManager(config *params.ChainConfig, checkpoint *params.TrustedCh
eventMux: mux,
txpool: txpool,
blockchain: blockchain,
+ chaindb: chaindb,
peers: newPeerSet(),
whitelist: whitelist,
txsyncCh: make(chan *txsync),
diff --git a/eth/handler_test.go b/eth/handler_test.go
index 56c0062f7a..f73acb1980 100644
--- a/eth/handler_test.go
+++ b/eth/handler_test.go
@@ -531,7 +531,7 @@ func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpo
}
}
// Create a checkpoint aware protocol manager
- blockchain, err := core.NewBlockChain(db, nil, config, ethash.NewFaker(), vm.Config{}, nil)
+ blockchain, err := core.NewBlockChain(db, nil, config, ethash.NewFaker(), vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create new blockchain: %v", err)
}
@@ -618,7 +618,7 @@ func testBroadcastBlock(t *testing.T, totalPeers, broadcastExpected int) {
gspec = &core.Genesis{Config: config}
genesis = gspec.MustCommit(db)
)
- blockchain, err := core.NewBlockChain(db, nil, config, pow, vm.Config{}, nil)
+ blockchain, err := core.NewBlockChain(db, nil, config, pow, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create new blockchain: %v", err)
}
@@ -679,7 +679,7 @@ func TestBroadcastMalformedBlock(t *testing.T) {
gspec = &core.Genesis{Config: config}
genesis = gspec.MustCommit(db)
)
- blockchain, err := core.NewBlockChain(db, nil, config, engine, vm.Config{}, nil)
+ blockchain, err := core.NewBlockChain(db, nil, config, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create new blockchain: %v", err)
}
diff --git a/eth/helper_test.go b/eth/helper_test.go
index 335cdbfb15..5c0c206134 100644
--- a/eth/helper_test.go
+++ b/eth/helper_test.go
@@ -67,7 +67,7 @@ func newTestProtocolManager(mode downloader.SyncMode, blocks int, generator func
Alloc: core.GenesisAlloc{testBank: {Balance: big.NewInt(1000000)}},
}
genesis = gspec.MustCommit(db)
- blockchain, _ = core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}, nil)
+ blockchain, _ = core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}, nil, nil)
)
chain, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, blocks, generator)
if _, err := blockchain.InsertChain(chain); err != nil {
@@ -99,7 +99,7 @@ func newTestProtocolManagerConsensus(consensusAlgo string, cliqueConfig *params.
Alloc: core.GenesisAlloc{testBank: {Balance: big.NewInt(1000000)}},
}
genesis = gspec.MustCommit(db)
- blockchain, _ = core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}, nil)
+ blockchain, _ = core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}, nil, nil)
)
chain, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, blocks, nil)
if _, err := blockchain.InsertChain(chain); err != nil {
diff --git a/eth/peer.go b/eth/peer.go
index f434f497bf..6f621a356d 100644
--- a/eth/peer.go
+++ b/eth/peer.go
@@ -741,7 +741,6 @@ func (ps *peerSet) Register(p *peer, protoName string) error {
if p.version >= eth65 && protoName == protocolName {
go p.announceTransactions()
}
-
return nil
}
diff --git a/eth/protocol_test.go b/eth/protocol_test.go
index 0c11f17d6d..31d8a28763 100644
--- a/eth/protocol_test.go
+++ b/eth/protocol_test.go
@@ -175,8 +175,8 @@ func TestForkIDSplit(t *testing.T) {
genesisNoFork = gspecNoFork.MustCommit(dbNoFork)
genesisProFork = gspecProFork.MustCommit(dbProFork)
- chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, configNoFork, engine, vm.Config{}, nil)
- chainProFork, _ = core.NewBlockChain(dbProFork, nil, configProFork, engine, vm.Config{}, nil)
+ chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, configNoFork, engine, vm.Config{}, nil, nil)
+ chainProFork, _ = core.NewBlockChain(dbProFork, nil, configProFork, engine, vm.Config{}, nil, nil)
blocksNoFork, _ = core.GenerateChain(configNoFork, genesisNoFork, engine, dbNoFork, 2, nil)
blocksProFork, _ = core.GenerateChain(configProFork, genesisProFork, engine, dbProFork, 2, nil)
diff --git a/eth/sync.go b/eth/sync.go
index a29af2a789..a947051345 100644
--- a/eth/sync.go
+++ b/eth/sync.go
@@ -23,6 +23,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/log"
@@ -225,7 +226,7 @@ func (cs *chainSyncer) loop() {
case <-cs.pm.quitSync:
if cs.doneCh != nil {
- cs.pm.downloader.Cancel()
+ cs.pm.downloader.Terminate() // Double term is fine, Cancel would block until queue is emptied
<-cs.doneCh
}
return
@@ -299,6 +300,24 @@ func (cs *chainSyncer) startSync(op *chainSyncOp) {
// doSync synchronizes the local blockchain with a remote peer.
func (pm *ProtocolManager) doSync(op *chainSyncOp) error {
+ if op.mode == downloader.FastSync {
+ // Before launch the fast sync, we have to ensure user uses the same
+ // txlookup limit.
+ // The main concern here is: during the fast sync Geth won't index the
+ // block(generate tx indices) before the HEAD-limit. But if user changes
+ // the limit in the next fast sync(e.g. user kill Geth manually and
+ // restart) then it will be hard for Geth to figure out the oldest block
+ // has been indexed. So here for the user-experience wise, it's non-optimal
+ // that user can't change limit during the fast sync. If changed, Geth
+ // will just blindly use the original one.
+ limit := pm.blockchain.TxLookupLimit()
+ if stored := rawdb.ReadFastTxLookupLimit(pm.chaindb); stored == nil {
+ rawdb.WriteFastTxLookupLimit(pm.chaindb, limit)
+ } else if *stored != limit {
+ pm.blockchain.SetTxLookupLimit(*stored)
+ log.Warn("Update txLookup limit", "provided", limit, "updated", *stored)
+ }
+ }
// Run the sync cycle, and disable fast sync if we're past the pivot block
err := pm.downloader.Synchronise(op.peer.id, op.head, op.td, op.mode)
if err != nil {
diff --git a/eth/tracers/testdata/call_tracer_inner_create_oog_outer_throw.json b/eth/tracers/testdata/call_tracer_inner_create_oog_outer_throw.json
index d1c8ac913c..72152e27e7 100644
--- a/eth/tracers/testdata/call_tracer_inner_create_oog_outer_throw.json
+++ b/eth/tracers/testdata/call_tracer_inner_create_oog_outer_throw.json
@@ -65,7 +65,7 @@
"value": "0x0"
}
],
- "error": "evm: invalid jump destination",
+ "error": "invalid jump destination",
"from": "0xe4a13bc304682a903e9472f469c33801dd18d9e8",
"gas": "0x435c8",
"gasUsed": "0x435c8",
diff --git a/eth/tracers/testdata/call_tracer_inner_throw_outer_revert.json b/eth/tracers/testdata/call_tracer_inner_throw_outer_revert.json
index edd80e5b84..7627c8c23d 100644
--- a/eth/tracers/testdata/call_tracer_inner_throw_outer_revert.json
+++ b/eth/tracers/testdata/call_tracer_inner_throw_outer_revert.json
@@ -59,7 +59,7 @@
"result": {
"calls": [
{
- "error": "invalid opcode 0xfe",
+ "error": "invalid opcode: opcode 0xfe not defined",
"from": "0x33056b5dcac09a9b4becad0e1dcf92c19bd0af76",
"gas": "0x75fe3",
"gasUsed": "0x75fe3",
diff --git a/eth/tracers/testdata/call_tracer_throw.json b/eth/tracers/testdata/call_tracer_throw.json
index d66f1ffa61..09cf449776 100644
--- a/eth/tracers/testdata/call_tracer_throw.json
+++ b/eth/tracers/testdata/call_tracer_throw.json
@@ -50,7 +50,7 @@
},
"input": "0xf88b8206668504a817c8008303d09094c212e03b9e060e36facad5fd8f4435412ca22e6b80a451a34eb8000000000000000000000000000000000000000000000027fad02094277c000029a0692a3b4e7b2842f8dd7832e712c21e09f451f416c8976d5b8d02e8c0c2b4bea9a07645e90fc421b63dd755767fd93d3c03b4ec0c4d8fafa059558d08cf11d59750",
"result": {
- "error": "evm: invalid jump destination",
+ "error": "invalid jump destination",
"from": "0x70c9217d814985faef62b124420f8dfbddd96433",
"gas": "0x37b38",
"gasUsed": "0x37b38",
diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go
index 13409adf4a..b6664280b4 100644
--- a/eth/tracers/tracers_test.go
+++ b/eth/tracers/tracers_test.go
@@ -168,7 +168,7 @@ func TestPrestateTracerCreate2(t *testing.T) {
Code: []byte{},
Balance: big.NewInt(500000000000000),
}
- statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false)
+ _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false)
// Create the tracer, the EVM environment and run it
tracer, err := New("prestateTracer")
@@ -182,7 +182,7 @@ func TestPrestateTracerCreate2(t *testing.T) {
t.Fatalf("failed to prepare transaction for tracing: %v", err)
}
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
- if _, _, _, err = st.TransitionDb(); err != nil {
+ if _, err = st.TransitionDb(); err != nil {
t.Fatalf("failed to execute transaction: %v", err)
}
// Retrieve the trace result and compare against the etalon
@@ -242,7 +242,7 @@ func TestCallTracer(t *testing.T) {
GasLimit: uint64(test.Context.GasLimit),
GasPrice: tx.GasPrice(),
}
- statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
+ _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
// Create the tracer, the EVM environment and run it
tracer, err := New("callTracer")
@@ -256,7 +256,7 @@ func TestCallTracer(t *testing.T) {
t.Fatalf("failed to prepare transaction for tracing: %v", err)
}
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
- if _, _, _, err = st.TransitionDb(); err != nil {
+ if _, err = st.TransitionDb(); err != nil {
t.Fatalf("failed to execute transaction: %v", err)
}
// Retrieve the trace result and compare against the etalon
diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go
index 0675b7b18c..02301b3c6f 100644
--- a/ethstats/ethstats.go
+++ b/ethstats/ethstats.go
@@ -154,7 +154,7 @@ func (s *Service) loop() {
txSub := txpool.SubscribeNewTxsEvent(txEventCh)
defer txSub.Unsubscribe()
- // Start a goroutine that exhausts the subsciptions to avoid events piling up
+ // Start a goroutine that exhausts the subscriptions to avoid events piling up
var (
quitCh = make(chan struct{})
headCh = make(chan *types.Block, 1)
@@ -194,16 +194,17 @@ func (s *Service) loop() {
}
close(quitCh)
}()
+
+ // Resolve the URL, defaulting to TLS, but falling back to none too
+ path := fmt.Sprintf("%s/api", s.host)
+ urls := []string{path}
+
+ // url.Parse and url.IsAbs is unsuitable (https://github.com/golang/go/issues/19779)
+ if !strings.Contains(path, "://") {
+ urls = []string{"wss://" + path, "ws://" + path}
+ }
// Loop reporting until termination
for {
- // Resolve the URL, defaulting to TLS, but falling back to none too
- path := fmt.Sprintf("%s/api", s.host)
- urls := []string{path}
-
- // url.Parse and url.IsAbs is unsuitable (https://github.com/golang/go/issues/19779)
- if !strings.Contains(path, "://") {
- urls = []string{"wss://" + path, "ws://" + path}
- }
// Establish a websocket connection to the server on any supported URL
var (
conn *websocket.Conn
@@ -240,11 +241,12 @@ func (s *Service) loop() {
}
// Keep sending status updates until the connection breaks
fullReport := time.NewTicker(15 * time.Second)
- defer fullReport.Stop()
for err == nil {
select {
case <-quitCh:
+ fullReport.Stop()
+ // Make sure the connection is closed
conn.Close()
return
@@ -269,6 +271,7 @@ func (s *Service) loop() {
}
}
}
+ fullReport.Stop()
// Make sure the connection is closed
conn.Close()
}
diff --git a/event/event.go b/event/event.go
index 4232787314..ce1b03d523 100644
--- a/event/event.go
+++ b/event/event.go
@@ -104,6 +104,7 @@ func (mux *TypeMux) Post(ev interface{}) error {
// Stop blocks until all current deliveries have finished.
func (mux *TypeMux) Stop() {
mux.mutex.Lock()
+ defer mux.mutex.Unlock()
for _, subs := range mux.subm {
for _, sub := range subs {
sub.closewait()
@@ -111,11 +112,11 @@ func (mux *TypeMux) Stop() {
}
mux.subm = nil
mux.stopped = true
- mux.mutex.Unlock()
}
func (mux *TypeMux) del(s *TypeMuxSubscription) {
mux.mutex.Lock()
+ defer mux.mutex.Unlock()
for typ, subs := range mux.subm {
if pos := find(subs, s); pos >= 0 {
if len(subs) == 1 {
@@ -125,7 +126,6 @@ func (mux *TypeMux) del(s *TypeMuxSubscription) {
}
}
}
- s.mux.mutex.Unlock()
}
func find(slice []*TypeMuxSubscription, item *TypeMuxSubscription) int {
@@ -196,9 +196,9 @@ func (s *TypeMuxSubscription) closewait() {
s.closed = true
s.postMu.Lock()
+ defer s.postMu.Unlock()
close(s.postC)
s.postC = nil
- s.postMu.Unlock()
}
func (s *TypeMuxSubscription) deliver(event *TypeMuxEvent) {
diff --git a/extension/extensionContracts/contract_extender.go b/extension/extensionContracts/contract_extender.go
index 9404dccd67..56e800b09c 100644
--- a/extension/extensionContracts/contract_extender.go
+++ b/extension/extensionContracts/contract_extender.go
@@ -4,6 +4,7 @@
package extensionContracts
import (
+ "github.com/ethereum/go-ethereum/common/math"
"math/big"
"strings"
@@ -20,7 +21,7 @@ var (
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
- _ = abi.U256
+ _ = math.U256Bytes
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
diff --git a/go.mod b/go.mod
index fc370f70e7..4b4f7b4da4 100644
--- a/go.mod
+++ b/go.mod
@@ -16,12 +16,11 @@ require (
github.com/Azure/go-autorest/autorest/adal v0.8.0 // indirect
github.com/BurntSushi/toml v0.3.1
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
- github.com/VictoriaMetrics/fastcache v1.5.3
+ github.com/VictoriaMetrics/fastcache v1.5.7
github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847
github.com/aws/aws-sdk-go v1.25.48
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6
github.com/cespare/cp v0.1.0
- github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9
github.com/coreos/etcd v3.3.20+incompatible
github.com/coreos/go-semver v0.3.0 // indirect
@@ -51,8 +50,8 @@ require (
github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277
github.com/hashicorp/go-hclog v0.13.0
github.com/hashicorp/go-plugin v1.2.2
- github.com/hashicorp/golang-lru v0.0.0-20160813221303-0a025b7e63ad
- github.com/huin/goupnp v0.0.0-20161224104101-679507af18f3
+ github.com/hashicorp/golang-lru v0.5.4
+ github.com/huin/goupnp v1.0.0
github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458
github.com/jpmorganchase/quorum-account-plugin-sdk-go v0.0.0-20200714175524-662195b38a5e
@@ -84,9 +83,9 @@ require (
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4
- golang.org/x/net v0.0.0-20200301022130-244492dfa37a // indirect
+ golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0 // indirect
golang.org/x/sync v0.0.0-20190423024810-112230192c58
- golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527
+ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd
golang.org/x/text v0.3.2
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
google.golang.org/grpc v1.29.1
diff --git a/go.sum b/go.sum
index 9e15615692..2da5e299a6 100644
--- a/go.sum
+++ b/go.sum
@@ -25,12 +25,10 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
github.com/Consensys/etcd v3.3.13-quorum197+incompatible h1:ZBM9sH4QEufgaShSyNNhffuZv6Zhl5kyD2b/NHViByM=
github.com/Consensys/etcd v3.3.13-quorum197+incompatible/go.mod h1:wz4o/jwsTgMkSZUY9DmwVEIL3b2JX3t+tCDdy/J5ilY=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI=
-github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
-github.com/VictoriaMetrics/fastcache v1.5.3 h1:2odJnXLbFZcoV9KYtQ+7TH1UOq3dn3AssMgieaezkR4=
-github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE=
+github.com/VictoriaMetrics/fastcache v1.5.7 h1:4y6y0G8PRzszQUYIQHHssv/jgPHAb5qQuuDNdCbyAgw=
+github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
@@ -48,8 +46,6 @@ github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18 h1:pl4eWIqvFe/Kg3zkn7NxevNzILnZYWDCG7qbA1CJik0=
-github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
@@ -134,14 +130,15 @@ github.com/hashicorp/go-hclog v0.13.0 h1:Do32YnDMnq7v7FU50AgH+1ExKCOkl9HBxvSI1JW
github.com/hashicorp/go-hclog v0.13.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-plugin v1.2.2 h1:mgDpq0PkoK5gck2w4ivaMpWRHv/matdOR4xmeScmf/w=
github.com/hashicorp/go-plugin v1.2.2/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0=
-github.com/hashicorp/golang-lru v0.0.0-20160813221303-0a025b7e63ad h1:eMxs9EL0PvIGS9TTtxg4R+JxuPGav82J8rA+GFnY7po=
-github.com/hashicorp/golang-lru v0.0.0-20160813221303-0a025b7e63ad/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/huin/goupnp v0.0.0-20161224104101-679507af18f3 h1:DqD8eigqlUm0+znmx7zhL0xvTW3+e1jCekJMfBUADWI=
-github.com/huin/goupnp v0.0.0-20161224104101-679507af18f3/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag=
+github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo=
+github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc=
+github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883 h1:FSeK4fZCo8u40n2JMnyAsd6x7+SbvoOMHvQOU/n10P4=
github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA=
@@ -235,8 +232,6 @@ github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubr
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spaolacci/murmur3 v1.0.1-0.20190317074736-539464a789e9 h1:5Cp3cVwpQP4aCQ6jx6dNLP3IarbYiuStmIzYu+BjQwY=
-github.com/spaolacci/murmur3 v1.0.1-0.20190317074736-539464a789e9/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE=
@@ -259,7 +254,6 @@ github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 h1:1cngl9mPEoITZG8s8
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4 h1:QmwruyY+bKbDDL0BaglrbZABEali68eoMFhTZpCjYVA=
golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -271,14 +265,14 @@ golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0 h1:Jcxah/M+oLZ/R4/z5RzfPzGbPXnVDPkEDtf2JnuxN+U=
+golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -289,8 +283,8 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
diff --git a/graphql/graphql.go b/graphql/graphql.go
index 659006d443..e7b18aafa6 100644
--- a/graphql/graphql.go
+++ b/graphql/graphql.go
@@ -833,16 +833,19 @@ func (b *Block) Call(ctx context.Context, args struct {
}
// Quorum - replaced the default 5s time out with the value passed in vm.calltimeout
- result, gas, failed, err := ethapi.DoCall(ctx, b.backend, args.Data, *b.numberOrHash, nil, vm.Config{}, b.backend.CallTimeOut(), b.backend.RPCGasCap())
+ result, err := ethapi.DoCall(ctx, b.backend, args.Data, *b.numberOrHash, nil, vm.Config{}, b.backend.CallTimeOut(), b.backend.RPCGasCap())
+ if err != nil {
+ return nil, err
+ }
status := hexutil.Uint64(1)
- if failed {
+ if result.Failed() {
status = 0
}
return &CallResult{
- data: hexutil.Bytes(result),
- gasUsed: hexutil.Uint64(gas),
+ data: result.Return(),
+ gasUsed: hexutil.Uint64(result.UsedGas),
status: status,
- }, err
+ }, nil
}
func (b *Block) EstimateGas(ctx context.Context, args struct {
@@ -901,16 +904,19 @@ func (p *Pending) Call(ctx context.Context, args struct {
pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber)
// Quorum - replaced the default 5s time out with the value passed in vm.calltimeout
- result, gas, failed, err := ethapi.DoCall(ctx, p.backend, args.Data, pendingBlockNr, nil, vm.Config{}, p.backend.CallTimeOut(), p.backend.RPCGasCap())
+ result, err := ethapi.DoCall(ctx, p.backend, args.Data, pendingBlockNr, nil, vm.Config{}, p.backend.CallTimeOut(), p.backend.RPCGasCap())
+ if err != nil {
+ return nil, err
+ }
status := hexutil.Uint64(1)
- if failed {
+ if result.Failed() {
status = 0
}
return &CallResult{
- data: hexutil.Bytes(result),
- gasUsed: hexutil.Uint64(gas),
+ data: result.Return(),
+ gasUsed: hexutil.Uint64(result.UsedGas),
status: status,
- }, err
+ }, nil
}
func (p *Pending) EstimateGas(ctx context.Context, args struct {
diff --git a/internal/debug/flags.go b/internal/debug/flags.go
index 3c85749402..70c31cf0ef 100644
--- a/internal/debug/flags.go
+++ b/internal/debug/flags.go
@@ -60,39 +60,68 @@ var (
Usage: "Enable the pprof HTTP server",
}
pprofPortFlag = cli.IntFlag{
- Name: "pprofport",
+ Name: "pprof.port",
Usage: "pprof HTTP server listening port",
Value: 6060,
}
pprofAddrFlag = cli.StringFlag{
- Name: "pprofaddr",
+ Name: "pprof.addr",
Usage: "pprof HTTP server listening interface",
Value: "127.0.0.1",
}
memprofilerateFlag = cli.IntFlag{
- Name: "memprofilerate",
+ Name: "pprof.memprofilerate",
Usage: "Turn on memory profiling with the given rate",
Value: runtime.MemProfileRate,
}
blockprofilerateFlag = cli.IntFlag{
- Name: "blockprofilerate",
+ Name: "pprof.blockprofilerate",
Usage: "Turn on block profiling with the given rate",
}
cpuprofileFlag = cli.StringFlag{
- Name: "cpuprofile",
+ Name: "pprof.cpuprofile",
Usage: "Write CPU profile to the given file",
}
traceFlag = cli.StringFlag{
Name: "trace",
Usage: "Write execution trace to the given file",
}
+ // (Deprecated April 2020)
+ legacyPprofPortFlag = cli.IntFlag{
+ Name: "pprofport",
+ Usage: "pprof HTTP server listening port (deprecated, use --pprof.port)",
+ Value: 6060,
+ }
+ legacyPprofAddrFlag = cli.StringFlag{
+ Name: "pprofaddr",
+ Usage: "pprof HTTP server listening interface (deprecated, use --pprof.addr)",
+ Value: "127.0.0.1",
+ }
+ legacyMemprofilerateFlag = cli.IntFlag{
+ Name: "memprofilerate",
+ Usage: "Turn on memory profiling with the given rate (deprecated, use --pprof.memprofilerate)",
+ Value: runtime.MemProfileRate,
+ }
+ legacyBlockprofilerateFlag = cli.IntFlag{
+ Name: "blockprofilerate",
+ Usage: "Turn on block profiling with the given rate (deprecated, use --pprof.blockprofilerate)",
+ }
+ legacyCpuprofileFlag = cli.StringFlag{
+ Name: "cpuprofile",
+ Usage: "Write CPU profile to the given file (deprecated, use --pprof.cpuprofile)",
+ }
)
// Flags holds all command-line flags required for debugging.
var Flags = []cli.Flag{
verbosityFlag, vmoduleFlag, backtraceAtFlag, debugFlag,
- pprofFlag, pprofAddrFlag, pprofPortFlag,
- memprofilerateFlag, blockprofilerateFlag, cpuprofileFlag, traceFlag,
+ pprofFlag, pprofAddrFlag, pprofPortFlag, memprofilerateFlag,
+ blockprofilerateFlag, cpuprofileFlag, traceFlag,
+}
+
+var DeprecatedFlags = []cli.Flag{
+ legacyPprofPortFlag, legacyPprofAddrFlag, legacyMemprofilerateFlag,
+ legacyBlockprofilerateFlag, legacyCpuprofileFlag,
}
var (
@@ -121,22 +150,51 @@ func Setup(ctx *cli.Context) error {
log.Root().SetHandler(glogger)
// profiling, tracing
+ if ctx.GlobalIsSet(legacyMemprofilerateFlag.Name) {
+ runtime.MemProfileRate = ctx.GlobalInt(legacyMemprofilerateFlag.Name)
+ log.Warn("The flag --memprofilerate is deprecated and will be removed in the future, please use --pprof.memprofilerate")
+ }
runtime.MemProfileRate = ctx.GlobalInt(memprofilerateFlag.Name)
+
+ if ctx.GlobalIsSet(legacyBlockprofilerateFlag.Name) {
+ Handler.SetBlockProfileRate(ctx.GlobalInt(legacyBlockprofilerateFlag.Name))
+ log.Warn("The flag --blockprofilerate is deprecated and will be removed in the future, please use --pprof.blockprofilerate")
+ }
Handler.SetBlockProfileRate(ctx.GlobalInt(blockprofilerateFlag.Name))
+
if traceFile := ctx.GlobalString(traceFlag.Name); traceFile != "" {
if err := Handler.StartGoTrace(traceFile); err != nil {
return err
}
}
+
if cpuFile := ctx.GlobalString(cpuprofileFlag.Name); cpuFile != "" {
if err := Handler.StartCPUProfile(cpuFile); err != nil {
return err
}
}
+ if cpuFile := ctx.GlobalString(legacyCpuprofileFlag.Name); cpuFile != "" {
+ log.Warn("The flag --cpuprofile is deprecated and will be removed in the future, please use --pprof.cpuprofile")
+ if err := Handler.StartCPUProfile(cpuFile); err != nil {
+ return err
+ }
+ }
// pprof server
if ctx.GlobalBool(pprofFlag.Name) {
- address := fmt.Sprintf("%s:%d", ctx.GlobalString(pprofAddrFlag.Name), ctx.GlobalInt(pprofPortFlag.Name))
+ listenHost := ctx.GlobalString(pprofAddrFlag.Name)
+ if ctx.GlobalIsSet(legacyPprofAddrFlag.Name) && !ctx.GlobalIsSet(pprofAddrFlag.Name) {
+ listenHost = ctx.GlobalString(legacyPprofAddrFlag.Name)
+ log.Warn("The flag --pprofaddr is deprecated and will be removed in the future, please use --pprof.addr")
+ }
+
+ port := ctx.GlobalInt(pprofPortFlag.Name)
+ if ctx.GlobalIsSet(legacyPprofPortFlag.Name) && !ctx.GlobalIsSet(pprofPortFlag.Name) {
+ port = ctx.GlobalInt(legacyPprofPortFlag.Name)
+ log.Warn("The flag --pprofport is deprecated and will be removed in the future, please use --pprof.port")
+ }
+
+ address := fmt.Sprintf("%s:%d", listenHost, port)
StartPProf(address)
}
return nil
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 24204c0ea2..356442aea7 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -31,6 +31,7 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/accounts"
+ "github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/accounts/pluggable"
"github.com/ethereum/go-ethereum/accounts/scwallet"
@@ -894,14 +895,13 @@ type account struct {
// Quorum - Multitenancy
// Before returning the result, we need to inspect the EVM and
// perform verification check
-func DoCall(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides map[common.Address]account, vmCfg vm.Config, timeout time.Duration, globalGasCap *big.Int) ([]byte, uint64, bool, error) {
+func DoCall(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides map[common.Address]account, vmCfg vm.Config, timeout time.Duration, globalGasCap *big.Int) (*core.ExecutionResult, error) {
defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
if state == nil || err != nil {
- return nil, 0, false, err
+ return nil, err
}
-
// Override the fields of specified contracts before execution.
for addr, account := range overrides {
// Override account nonce.
@@ -917,7 +917,7 @@ func DoCall(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.Blo
state.SetBalance(addr, (*big.Int)(*account.Balance))
}
if account.State != nil && account.StateDiff != nil {
- return nil, 0, false, fmt.Errorf("account %s has both 'state' and 'stateDiff'", addr.Hex())
+ return nil, fmt.Errorf("account %s has both 'state' and 'stateDiff'", addr.Hex())
}
// Replace entire state if caller requires.
if account.State != nil {
@@ -930,7 +930,6 @@ func DoCall(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.Blo
}
}
}
-
// Setup context so it may be cancelled the call has completed
// or, in case of unmetered gas, setup a context with a timeout.
var cancel context.CancelFunc
@@ -977,7 +976,7 @@ func DoCall(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.Blo
// Get a new instance of the EVM.
evm, vmError, err := b.GetEVM(enrichedCtx, msg, state, header)
if err != nil {
- return nil, 0, false, err
+ return nil, err
}
// Wait for the context to be done and cancel the evm. Even if the
// EVM has finished, cancelling may be done (repeatedly)
@@ -989,15 +988,15 @@ func DoCall(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.Blo
// Setup the gas pool (also for unmetered requests)
// and apply the message.
gp := new(core.GasPool).AddGas(math.MaxUint64)
- res, gas, failed, applyErr := core.ApplyMessage(evm, msg, gp)
+ result, applyErr := core.ApplyMessage(evm, msg, gp)
if err := vmError(); err != nil {
- return nil, 0, false, err
+ return nil, err
}
// If the timer caused an abort, return an appropriate error message
if evm.Cancelled() {
- return nil, 0, false, fmt.Errorf("execution aborted (timeout = %v)", timeout)
+ return nil, fmt.Errorf("execution aborted (timeout = %v)", timeout)
}
- return res, gas, failed, applyErr
+ return result, applyErr
}
// Call executes the given transaction on the state for the given block number.
@@ -1015,9 +1014,28 @@ func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNrOr
accounts = *overrides
}
- result, _, _, err := DoCall(ctx, s.b, args, blockNrOrHash, accounts, vm.Config{}, s.b.CallTimeOut(), s.b.RPCGasCap())
+ result, err := DoCall(ctx, s.b, args, blockNrOrHash, accounts, vm.Config{}, s.b.CallTimeOut(), s.b.RPCGasCap())
+ if err != nil {
+ return nil, err
+ }
+ return result.Return(), nil
+}
- return (hexutil.Bytes)(result), err
+type estimateGasError struct {
+ error string // Concrete error type if it's failed to estimate gas usage
+ vmerr error // Additional field, it's non-nil if the given transaction is invalid
+ revert string // Additional field, it's non-empty if the transaction is reverted and reason is provided
+}
+
+func (e estimateGasError) Error() string {
+ errMsg := e.error
+ if e.vmerr != nil {
+ errMsg += fmt.Sprintf(" (%v)", e.vmerr)
+ }
+ if e.revert != "" {
+ errMsg += fmt.Sprintf(" (%s)", e.revert)
+ }
+ return errMsg
}
func DoEstimateGas(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, gasCap *big.Int) (hexutil.Uint64, error) {
@@ -1027,6 +1045,11 @@ func DoEstimateGas(ctx context.Context, b Backend, args CallArgs, blockNrOrHash
hi uint64
cap uint64
)
+ // Use zero address if sender unspecified.
+ if args.From == nil {
+ args.From = new(common.Address)
+ }
+ // Determine the highest gas limit can be used during the estimation.
if args.Gas != nil && uint64(*args.Gas) >= params.TxGas {
hi = uint64(*args.Gas)
} else {
@@ -1037,30 +1060,63 @@ func DoEstimateGas(ctx context.Context, b Backend, args CallArgs, blockNrOrHash
}
hi = block.GasLimit()
}
+ // Recap the highest gas limit with account's available balance.
+ if args.GasPrice != nil && args.GasPrice.ToInt().Uint64() != 0 {
+ state, _, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
+ if err != nil {
+ return 0, err
+ }
+ balance := state.GetBalance(*args.From) // from can't be nil
+ available := new(big.Int).Set(balance)
+ if args.Value != nil {
+ if args.Value.ToInt().Cmp(available) >= 0 {
+ return 0, errors.New("insufficient funds for transfer")
+ }
+ available.Sub(available, args.Value.ToInt())
+ }
+ allowance := new(big.Int).Div(available, args.GasPrice.ToInt())
+ if hi > allowance.Uint64() {
+ transfer := args.Value
+ if transfer == nil {
+ transfer = new(hexutil.Big)
+ }
+ log.Warn("Gas estimation capped by limited funds", "original", hi, "balance", balance,
+ "sent", transfer.ToInt(), "gasprice", args.GasPrice.ToInt(), "fundable", allowance)
+ hi = allowance.Uint64()
+ }
+ }
+ // Recap the highest gas allowance with specified gascap.
if gasCap != nil && hi > gasCap.Uint64() {
log.Warn("Caller gas above allowance, capping", "requested", hi, "cap", gasCap)
hi = gasCap.Uint64()
}
cap = hi
- // Use zero address if sender unspecified.
- if args.From == nil {
- args.From = new(common.Address)
- }
// Create a helper to check if a gas allowance results in an executable transaction
- executable := func(gas uint64) bool {
+ executable := func(gas uint64) (bool, *core.ExecutionResult, error) {
args.Gas = (*hexutil.Uint64)(&gas)
- _, _, failed, err := DoCall(ctx, b, args, blockNrOrHash, nil, vm.Config{}, 0, gasCap)
- if err != nil || failed {
- return false
+ result, err := DoCall(ctx, b, args, blockNrOrHash, nil, vm.Config{}, 0, gasCap)
+ if err != nil {
+ if err == core.ErrIntrinsicGas {
+ return true, nil, nil // Special case, raise gas limit
+ }
+ return true, nil, err // Bail out
}
- return true
+ return result.Failed(), result, nil
}
// Execute the binary search and hone in on an executable gas limit
for lo+1 < hi {
mid := (hi + lo) / 2
- if !executable(mid) {
+ failed, _, err := executable(mid)
+
+ // If the error is not nil(consensus error), it means the provided message
+ // call or transaction will never be accepted no matter how much gas it is
+ // assigened. Return the error directly, don't struggle any more.
+ if err != nil {
+ return 0, err
+ }
+ if failed {
lo = mid
} else {
hi = mid
@@ -1068,8 +1124,29 @@ func DoEstimateGas(ctx context.Context, b Backend, args CallArgs, blockNrOrHash
}
// Reject the transaction as invalid if it still fails at the highest allowance
if hi == cap {
- if !executable(hi) {
- return 0, fmt.Errorf("gas required exceeds allowance (%d) or always failing transaction", cap)
+ failed, result, err := executable(hi)
+ if err != nil {
+ return 0, err
+ }
+ if failed {
+ if result != nil && result.Err != vm.ErrOutOfGas {
+ var revert string
+ if len(result.Revert()) > 0 {
+ ret, err := abi.UnpackRevert(result.Revert())
+ if err != nil {
+ revert = hexutil.Encode(result.Revert())
+ } else {
+ revert = ret
+ }
+ }
+ return 0, estimateGasError{
+ error: "always failing transaction",
+ vmerr: result.Err,
+ revert: revert,
+ }
+ }
+ // Otherwise, the specified gas cap is too low
+ return 0, estimateGasError{error: fmt.Sprintf("gas required exceeds allowance (%d)", cap)}
}
}
diff --git a/les/client.go b/les/client.go
index 34d69942be..25910b3455 100644
--- a/les/client.go
+++ b/les/client.go
@@ -93,8 +93,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
if err != nil {
return nil, err
}
- chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis,
- config.OverrideIstanbul, config.OverrideMuirGlacier)
+ chainConfig, genesisHash, genesisErr := core.SetupGenesisBlock(chainDb, config.Genesis)
if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat {
return nil, genesisErr
}
diff --git a/les/odr_requests.go b/les/odr_requests.go
index 146da2213c..c4b38060cc 100644
--- a/les/odr_requests.go
+++ b/les/odr_requests.go
@@ -224,7 +224,7 @@ func (r *TrieRequest) Validate(db ethdb.Database, msg *Msg) error {
// Verify the proof and store if checks out
nodeSet := proofs.NodeSet()
reads := &readTraceDB{db: nodeSet}
- if _, _, err := trie.VerifyProof(r.Id.Root, r.Key, reads); err != nil {
+ if _, err := trie.VerifyProof(r.Id.Root, r.Key, reads); err != nil {
return fmt.Errorf("merkle proof verification failed: %v", err)
}
// check if all nodes have been read by VerifyProof
@@ -378,7 +378,7 @@ func (r *ChtRequest) Validate(db ethdb.Database, msg *Msg) error {
binary.BigEndian.PutUint64(encNumber[:], r.BlockNum)
reads := &readTraceDB{db: nodeSet}
- value, _, err := trie.VerifyProof(r.ChtRoot, encNumber[:], reads)
+ value, err := trie.VerifyProof(r.ChtRoot, encNumber[:], reads)
if err != nil {
return fmt.Errorf("merkle proof verification failed: %v", err)
}
@@ -470,7 +470,7 @@ func (r *BloomRequest) Validate(db ethdb.Database, msg *Msg) error {
for i, idx := range r.SectionIndexList {
binary.BigEndian.PutUint64(encNumber[2:], idx)
- value, _, err := trie.VerifyProof(r.BloomTrieRoot, encNumber[:], reads)
+ value, err := trie.VerifyProof(r.BloomTrieRoot, encNumber[:], reads)
if err != nil {
return err
}
diff --git a/les/odr_test.go b/les/odr_test.go
index a6f02eae8a..16a728f900 100644
--- a/les/odr_test.go
+++ b/les/odr_test.go
@@ -135,8 +135,8 @@ func odrContractCall(ctx context.Context, db ethdb.Database, config *params.Chai
//vmenv := core.NewEnv(statedb, config, bc, msg, header, vm.Config{})
gp := new(core.GasPool).AddGas(math.MaxUint64)
- ret, _, _, _ := core.ApplyMessage(vmenv, msg, gp)
- res = append(res, ret...)
+ result, _ := core.ApplyMessage(vmenv, msg, gp)
+ res = append(res, result.Return()...)
}
} else {
header := lc.GetHeaderByHash(bhash)
@@ -146,9 +146,9 @@ func odrContractCall(ctx context.Context, db ethdb.Database, config *params.Chai
context := core.NewEVMContext(msg, header, lc, nil)
vmenv := vm.NewEVM(context, state, state, config, vm.Config{})
gp := new(core.GasPool).AddGas(math.MaxUint64)
- ret, _, _, _ := core.ApplyMessage(vmenv, msg, gp)
+ result, _ := core.ApplyMessage(vmenv, msg, gp)
if state.Error() == nil {
- res = append(res, ret...)
+ res = append(res, result.Return()...)
}
}
}
diff --git a/les/peer.go b/les/peer.go
index d14a5e0d1e..e92b4580d6 100644
--- a/les/peer.go
+++ b/les/peer.go
@@ -65,9 +65,6 @@ const (
// handshakeTimeout is the timeout LES handshake will be treated as failed.
handshakeTimeout = 5 * time.Second
-
- // retrySendCachePeriod is the time interval a caching retry is performed.
- retrySendCachePeriod = time.Millisecond * 100
)
const (
@@ -164,24 +161,6 @@ func (p *peerCommons) queueSend(f func()) bool {
return p.sendQueue.Queue(f)
}
-// mustQueueSend starts a for loop and retry the caching if failed.
-// If the stopCh is closed, then it returns.
-func (p *peerCommons) mustQueueSend(f func()) {
- for {
- // Check whether the stopCh is closed.
- select {
- case <-p.closeCh:
- return
- default:
- }
- // If the function is successfully cached, return.
- if p.canQueue() && p.queueSend(f) {
- return
- }
- time.Sleep(retrySendCachePeriod)
- }
-}
-
// String implements fmt.Stringer.
func (p *peerCommons) String() string {
return fmt.Sprintf("Peer %s [%s]", p.id, fmt.Sprintf("les/%d", p.version))
@@ -899,7 +878,7 @@ func (p *clientPeer) updateCapacity(cap uint64) {
var kvList keyValueList
kvList = kvList.add("flowControl/MRR", cap)
kvList = kvList.add("flowControl/BL", cap*bufLimitRatio)
- p.mustQueueSend(func() { p.sendAnnounce(announceData{Update: kvList}) })
+ p.queueSend(func() { p.sendAnnounce(announceData{Update: kvList}) })
}
// freezeClient temporarily puts the client in a frozen state which means all
diff --git a/les/peer_test.go b/les/peer_test.go
index 8309e3557e..6d3c7f9755 100644
--- a/les/peer_test.go
+++ b/les/peer_test.go
@@ -18,6 +18,7 @@ package les
import (
"crypto/rand"
+ "errors"
"math/big"
"reflect"
"sort"
@@ -121,10 +122,10 @@ func TestHandshake(t *testing.T) {
var reqType uint64
err := recv.get("announceType", &reqType)
if err != nil {
- t.Fatal(err)
+ return err
}
if reqType != announceTypeSigned {
- t.Fatal("Expected announceTypeSigned")
+ return errors.New("Expected announceTypeSigned")
}
return nil
})
diff --git a/les/server_handler.go b/les/server_handler.go
index 64098db21f..5e97e4f3d9 100644
--- a/les/server_handler.go
+++ b/les/server_handler.go
@@ -262,7 +262,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
h.server.clientPool.requestCost(p, realCost)
}
if reply != nil {
- p.mustQueueSend(func() {
+ p.queueSend(func() {
if err := reply.send(bv); err != nil {
select {
case p.errCh <- err:
diff --git a/light/odr_test.go b/light/odr_test.go
index 1d67e5208b..9c34d7fd00 100644
--- a/light/odr_test.go
+++ b/light/odr_test.go
@@ -198,8 +198,8 @@ func odrContractCall(ctx context.Context, db ethdb.Database, bc *core.BlockChain
context := core.NewEVMContext(msg, header, chain, nil)
vmenv := vm.NewEVM(context, st, st, config, vm.Config{})
gp := new(core.GasPool).AddGas(math.MaxUint64)
- ret, _, _, _ := core.ApplyMessage(vmenv, msg, gp)
- res = append(res, ret...)
+ result, _ := core.ApplyMessage(vmenv, msg, gp)
+ res = append(res, result.Return()...)
if st.Error() != nil {
return res, st.Error()
}
@@ -257,7 +257,7 @@ func testChainOdr(t *testing.T, protocol int, fn odrTestFn) {
)
gspec.MustCommit(ldb)
// Assemble the test environment
- blockchain, _ := core.NewBlockChain(sdb, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil)
+ blockchain, _ := core.NewBlockChain(sdb, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil, nil)
gchain, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), sdb, 4, testChainGen)
if _, err := blockchain.InsertChain(gchain); err != nil {
t.Fatal(err)
diff --git a/light/trie_test.go b/light/trie_test.go
index 4919f89641..052194b4d8 100644
--- a/light/trie_test.go
+++ b/light/trie_test.go
@@ -40,7 +40,7 @@ func TestNodeIterator(t *testing.T) {
genesis = gspec.MustCommit(fulldb)
)
gspec.MustCommit(lightdb)
- blockchain, _ := core.NewBlockChain(fulldb, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil)
+ blockchain, _ := core.NewBlockChain(fulldb, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil, nil)
gchain, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), fulldb, 4, testChainGen)
if _, err := blockchain.InsertChain(gchain); err != nil {
panic(err)
diff --git a/light/txpool_test.go b/light/txpool_test.go
index 0996bd7c9c..39d5afe52f 100644
--- a/light/txpool_test.go
+++ b/light/txpool_test.go
@@ -88,7 +88,7 @@ func TestTxPool(t *testing.T) {
)
gspec.MustCommit(ldb)
// Assemble the test environment
- blockchain, _ := core.NewBlockChain(sdb, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil)
+ blockchain, _ := core.NewBlockChain(sdb, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil, nil)
gchain, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), sdb, poolTestBlocks, txPoolTestChainGen)
if _, err := blockchain.InsertChain(gchain); err != nil {
panic(err)
diff --git a/log/format.go b/log/format.go
index a1b5dac629..b63e60a014 100644
--- a/log/format.go
+++ b/log/format.go
@@ -358,49 +358,19 @@ func formatLogfmtValue(value interface{}, term bool) string {
}
}
-var stringBufPool = sync.Pool{
- New: func() interface{} { return new(bytes.Buffer) },
-}
-
+// escapeString checks if the provided string needs escaping/quoting, and
+// calls strconv.Quote if needed
func escapeString(s string) string {
- needsQuotes := false
- needsEscape := false
+ needsQuoting := false
for _, r := range s {
- if r <= ' ' || r == '=' || r == '"' {
- needsQuotes = true
- }
- if r == '\\' || r == '"' || r == '\n' || r == '\r' || r == '\t' {
- needsEscape = true
+ // We quote everything below " (0x34) and above~ (0x7E), plus equal-sign
+ if r <= '"' || r > '~' || r == '=' {
+ needsQuoting = true
+ break
}
}
- if !needsEscape && !needsQuotes {
+ if !needsQuoting {
return s
}
- e := stringBufPool.Get().(*bytes.Buffer)
- e.WriteByte('"')
- for _, r := range s {
- switch r {
- case '\\', '"':
- e.WriteByte('\\')
- e.WriteByte(byte(r))
- case '\n':
- e.WriteString("\\n")
- case '\r':
- e.WriteString("\\r")
- case '\t':
- e.WriteString("\\t")
- default:
- e.WriteRune(r)
- }
- }
- e.WriteByte('"')
- var ret string
- if needsQuotes {
- ret = e.String()
- } else {
- ret = string(e.Bytes()[1 : e.Len()-1])
- }
- e.Reset()
- stringBufPool.Put(e)
- return ret
+ return strconv.Quote(s)
}
diff --git a/miner/miner.go b/miner/miner.go
index 1f005562f2..e91aadea40 100644
--- a/miner/miner.go
+++ b/miner/miner.go
@@ -186,6 +186,23 @@ func (miner *Miner) SetEtherbase(addr common.Address) {
miner.worker.setEtherbase(addr)
}
+// EnablePreseal turns on the preseal mining feature. It's enabled by default.
+// Note this function shouldn't be exposed to API, it's unnecessary for users
+// (miners) to actually know the underlying detail. It's only for outside project
+// which uses this library.
+func (miner *Miner) EnablePreseal() {
+ miner.worker.enablePreseal()
+}
+
+// DisablePreseal turns off the preseal mining feature. It's necessary for some
+// fake consensus engine which can seal blocks instantaneously.
+// Note this function shouldn't be exposed to API, it's unnecessary for users
+// (miners) to actually know the underlying detail. It's only for outside project
+// which uses this library.
+func (miner *Miner) DisablePreseal() {
+ miner.worker.disablePreseal()
+}
+
// SubscribePendingLogs starts delivering logs from pending transactions
// to the given channel.
func (self *Miner) SubscribePendingLogs(ch chan<- []*types.Log) event.Subscription {
diff --git a/miner/worker.go b/miner/worker.go
index ba0d6d1b7c..9470526756 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -178,6 +178,13 @@ type worker struct {
running int32 // The indicator whether the consensus engine is running or not.
newTxs int32 // New arrival transaction count since last sealing work submitting.
+ // noempty is the flag used to control whether the feature of pre-seal empty
+ // block is enabled. The default value is false(pre-seal is enabled by default).
+ // But in some special scenario the consensus engine will seal blocks instantaneously,
+ // in this case this feature will add all empty blocks into canonical chain
+ // non-stop and no real transaction will be included.
+ noempty uint32
+
// External functions
isLocalBlock func(block *types.Block) bool // Function used to determine whether the specified block is mined by local miner.
@@ -257,6 +264,16 @@ func (w *worker) setRecommitInterval(interval time.Duration) {
w.resubmitIntervalCh <- interval
}
+// disablePreseal disables pre-sealing mining feature
+func (w *worker) disablePreseal() {
+ atomic.StoreUint32(&w.noempty, 1)
+}
+
+// enablePreseal enables pre-sealing mining feature
+func (w *worker) enablePreseal() {
+ atomic.StoreUint32(&w.noempty, 0)
+}
+
// pending returns the pending state and corresponding block.
func (w *worker) pending() (*types.Block, *state.StateDB, *state.StateDB) {
// return a snapshot to avoid contention on currentMu mutex
@@ -499,8 +516,9 @@ func (w *worker) mainLoop() {
w.updateSnapshot()
}
} else {
- // If clique is running in dev mode(period is 0), disable
- // advance sealing here.
+ // Special case, if the consensus engine is 0 period clique(dev mode),
+ // submit mining work here since all empty submission will be rejected
+ // by clique. Of course the advance sealing(empty submission) is disabled.
if w.chainConfig.Clique != nil && w.chainConfig.Clique.Period == 0 {
w.commitNewWork(nil, true, time.Now().Unix())
}
@@ -990,9 +1008,9 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64)
commitUncles(w.localUncles)
commitUncles(w.remoteUncles)
- if !noempty {
- // Create an empty block based on temporary copied state for sealing in advance without waiting block
- // execution finished.
+ // Create an empty block based on temporary copied state for
+ // sealing in advance without waiting block execution finished.
+ if !noempty && atomic.LoadUint32(&w.noempty) == 0 {
w.commit(uncles, nil, false, tstart)
}
@@ -1002,8 +1020,10 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64)
log.Error("Failed to fetch pending transactions", "err", err)
return
}
- // Short circuit if there is no available pending transactions
- if len(pending) == 0 {
+ // Short circuit if there is no available pending transactions.
+ // But if we disable empty precommit already, ignore it. Since
+ // empty block is necessary to keep the liveness of the network.
+ if len(pending) == 0 && atomic.LoadUint32(&w.noempty) == 0 {
w.updateSnapshot()
return
}
diff --git a/miner/worker_test.go b/miner/worker_test.go
index 47a6d1d85b..586d594a00 100644
--- a/miner/worker_test.go
+++ b/miner/worker_test.go
@@ -117,7 +117,7 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
}
genesis := gspec.MustCommit(db)
- chain, _ := core.NewBlockChain(db, &core.CacheConfig{TrieDirtyDisabled: true}, gspec.Config, engine, vm.Config{}, nil)
+ chain, _ := core.NewBlockChain(db, &core.CacheConfig{TrieDirtyDisabled: true}, gspec.Config, engine, vm.Config{}, nil, nil)
txpool := core.NewTxPool(testTxPoolConfig, chainConfig, chain)
// Generate a small n-block chain and an uncle block for it
@@ -213,7 +213,7 @@ func testGenerateBlockAndImport(t *testing.T, isClique bool) {
// This test chain imports the mined blocks.
db2 := rawdb.NewMemoryDatabase()
b.genesis.MustCommit(db2)
- chain, _ := core.NewBlockChain(db2, nil, b.chain.Config(), engine, vm.Config{}, nil)
+ chain, _ := core.NewBlockChain(db2, nil, b.chain.Config(), engine, vm.Config{}, nil, nil)
defer chain.Stop()
// Ignore empty commit here for less noise.
diff --git a/mobile/params.go b/mobile/params.go
index 25d3b8565b..43ac004740 100644
--- a/mobile/params.go
+++ b/mobile/params.go
@@ -62,8 +62,8 @@ func GoerliGenesis() string {
// FoundationBootnodes returns the enode URLs of the P2P bootstrap nodes operated
// by the foundation running the V5 discovery protocol.
func FoundationBootnodes() *Enodes {
- nodes := &Enodes{nodes: make([]*discv5.Node, len(params.DiscoveryV5Bootnodes))}
- for i, url := range params.DiscoveryV5Bootnodes {
+ nodes := &Enodes{nodes: make([]*discv5.Node, len(params.MainnetBootnodes))}
+ for i, url := range params.MainnetBootnodes {
nodes.nodes[i] = discv5.MustParseNode(url)
}
return nodes
diff --git a/node/endpoints.go b/node/endpoints.go
index 48a7b6ad60..50364fb5f3 100644
--- a/node/endpoints.go
+++ b/node/endpoints.go
@@ -30,7 +30,7 @@ import (
)
// StartHTTPEndpoint starts the HTTP RPC endpoint.
-func StartHTTPEndpoint(endpoint string, timeouts rpc.HTTPTimeouts, handler http.Handler, tlsConfigSource security.TLSConfigurationSource) (net.Listener, bool, error) {
+func StartHTTPEndpoint(endpoint string, timeouts rpc.HTTPTimeouts, handler http.Handler, tlsConfigSource security.TLSConfigurationSource) (*http.Server, net.Addr, bool, error) {
// start the HTTP listener
var (
listener net.Listener
@@ -38,7 +38,7 @@ func StartHTTPEndpoint(endpoint string, timeouts rpc.HTTPTimeouts, handler http.
isTlsEnabled bool
)
if isTlsEnabled, listener, err = startListener(endpoint, tlsConfigSource); err != nil {
- return nil, isTlsEnabled, err
+ return nil, nil, isTlsEnabled, err
}
// make sure timeout values are meaningful
CheckTimeouts(&timeouts)
@@ -54,11 +54,11 @@ func StartHTTPEndpoint(endpoint string, timeouts rpc.HTTPTimeouts, handler http.
TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)),
}
go httpSrv.Serve(listener)
- return listener, isTlsEnabled, err
+ return httpSrv, listener.Addr(), isTlsEnabled, err
}
// startWSEndpoint starts a websocket endpoint.
-func startWSEndpoint(endpoint string, handler http.Handler, tlsConfigSource security.TLSConfigurationSource) (net.Listener, bool, error) {
+func startWSEndpoint(endpoint string, handler http.Handler, tlsConfigSource security.TLSConfigurationSource) (*http.Server, net.Addr, bool, error) {
// start the HTTP listener
var (
listener net.Listener
@@ -66,11 +66,11 @@ func startWSEndpoint(endpoint string, handler http.Handler, tlsConfigSource secu
isTlsEnabled bool
)
if isTlsEnabled, listener, err = startListener(endpoint, tlsConfigSource); err != nil {
- return nil, isTlsEnabled, err
+ return nil, nil, isTlsEnabled, err
}
wsSrv := &http.Server{Handler: handler}
go wsSrv.Serve(listener)
- return listener, isTlsEnabled, err
+ return wsSrv, listener.Addr(), isTlsEnabled, err
}
// checkModuleAvailability checks that all names given in modules are actually
diff --git a/node/node.go b/node/node.go
index 97f4acabaf..dd1a7d1e87 100644
--- a/node/node.go
+++ b/node/node.go
@@ -17,10 +17,12 @@
package node
import (
+ "context"
"crypto/ecdsa"
"errors"
"fmt"
"net"
+ "net/http"
"os"
"path/filepath"
"reflect"
@@ -62,16 +64,18 @@ type Node struct {
ipcListener net.Listener // IPC RPC listener socket to serve API requests
ipcHandler *rpc.Server // IPC RPC request handler to process the API requests
- isHttps bool
- httpEndpoint string // HTTP endpoint (interface + port) to listen at (empty = HTTP disabled)
- httpWhitelist []string // HTTP RPC modules to allow through this endpoint
- httpListener net.Listener // HTTP RPC listener socket to server API requests
- httpHandler *rpc.Server // HTTP RPC request handler to process the API requests
+ isHttps bool
+ httpEndpoint string // HTTP endpoint (interface + port) to listen at (empty = HTTP disabled)
+ httpWhitelist []string // HTTP RPC modules to allow through this endpoint
+ httpListenerAddr net.Addr // Address of HTTP RPC listener socket serving API requests
+ httpServer *http.Server // HTTP RPC HTTP server
+ httpHandler *rpc.Server // HTTP RPC request handler to process the API requests
- isWss bool
- wsEndpoint string // Websocket endpoint (interface + port) to listen at (empty = websocket disabled)
- wsListener net.Listener // Websocket RPC listener socket to server API requests
- wsHandler *rpc.Server // Websocket RPC request handler to process the API requests
+ isWss bool
+ wsEndpoint string // WebSocket endpoint (interface + port) to listen at (empty = WebSocket disabled)
+ wsListenerAddr net.Addr // Address of WebSocket RPC listener socket serving API requests
+ wsHTTPServer *http.Server // WebSocket RPC HTTP server
+ wsHandler *rpc.Server // WebSocket RPC request handler to process the API requests
pluginManager *plugin.PluginManager // Manage all plugins for this node. If plugin is not enabled, an EmptyPluginManager is set.
@@ -431,24 +435,25 @@ func (n *Node) startHTTP(endpoint string, apis []rpc.API, modules []string, cors
return err
}
handler := NewHTTPHandlerStack(srv, cors, vhosts)
- // wrap handler in websocket handler only if websocket port is the same as http rpc
+ // wrap handler in WebSocket handler only if WebSocket port is the same as http rpc
if n.httpEndpoint == n.wsEndpoint {
handler = NewWebsocketUpgradeHandler(handler, srv.WebsocketHandler(wsOrigins))
}
- listener, isTlsEnabled, err := StartHTTPEndpoint(endpoint, timeouts, handler, tlsConfigSource)
+ httpServer, addr, isTlsEnabled, err := StartHTTPEndpoint(endpoint, timeouts, handler, tlsConfigSource)
if err != nil {
return err
}
n.isHttps = isTlsEnabled
- n.log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", listener.Addr()),
+ n.log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr),
"cors", strings.Join(cors, ","),
"vhosts", strings.Join(vhosts, ","))
if n.httpEndpoint == n.wsEndpoint {
- n.log.Info("WebSocket endpoint opened", "url", fmt.Sprintf("ws://%v", listener.Addr()))
+ n.log.Info("WebSocket endpoint opened", "url", fmt.Sprintf("ws://%v", addr))
}
// All listeners booted successfully
n.httpEndpoint = endpoint
- n.httpListener = listener
+ n.httpListenerAddr = addr
+ n.httpServer = httpServer
n.httpHandler = srv
return nil
@@ -456,11 +461,10 @@ func (n *Node) startHTTP(endpoint string, apis []rpc.API, modules []string, cors
// stopHTTP terminates the HTTP RPC endpoint.
func (n *Node) stopHTTP() {
- if n.httpListener != nil {
- url := fmt.Sprintf("http://%v/", n.httpListener.Addr())
- n.httpListener.Close()
- n.httpListener = nil
- n.log.Info("HTTP endpoint closed", "url", url)
+ if n.httpServer != nil {
+ // Don't bother imposing a timeout here.
+ n.httpServer.Shutdown(context.Background())
+ n.log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", n.httpListenerAddr))
}
if n.httpHandler != nil {
n.httpHandler.Stop()
@@ -468,7 +472,7 @@ func (n *Node) stopHTTP() {
}
}
-// startWS initializes and starts the websocket RPC endpoint.
+// startWS initializes and starts the WebSocket RPC endpoint.
func (n *Node) startWS(endpoint string, apis []rpc.API, modules []string, wsOrigins []string, exposeAll bool) error {
// Short circuit if the WS endpoint isn't being exposed
if endpoint == "" {
@@ -484,27 +488,27 @@ func (n *Node) startWS(endpoint string, apis []rpc.API, modules []string, wsOrig
if err != nil {
return err
}
- listener, isTlsEnabled, err := startWSEndpoint(endpoint, handler, tlsConfigSource)
+ httpServer, addr, isTlsEnabled, err := startWSEndpoint(endpoint, handler, tlsConfigSource)
if err != nil {
return err
}
n.isWss = isTlsEnabled
- n.log.Info("WebSocket endpoint opened", "url", fmt.Sprintf("ws://%s", listener.Addr()))
+ n.log.Info("WebSocket endpoint opened", "url", fmt.Sprintf("ws://%v", addr))
// All listeners booted successfully
n.wsEndpoint = endpoint
- n.wsListener = listener
+ n.wsListenerAddr = addr
+ n.wsHTTPServer = httpServer
n.wsHandler = srv
return nil
}
-// stopWS terminates the websocket RPC endpoint.
+// stopWS terminates the WebSocket RPC endpoint.
func (n *Node) stopWS() {
- if n.wsListener != nil {
- n.wsListener.Close()
- n.wsListener = nil
-
- n.log.Info("WebSocket endpoint closed", "url", fmt.Sprintf("%s://%s", n.wsScheme(), n.wsEndpoint))
+ if n.wsHTTPServer != nil {
+ // Don't bother imposing a timeout here.
+ n.wsHTTPServer.Shutdown(context.Background())
+ n.log.Info("WebSocket endpoint closed", "url", fmt.Sprintf("ws://%v", n.wsListenerAddr))
}
if n.wsHandler != nil {
n.wsHandler.Stop()
@@ -682,8 +686,8 @@ func (n *Node) HTTPEndpoint() string {
n.lock.Lock()
defer n.lock.Unlock()
- if n.httpListener != nil {
- return n.httpListener.Addr().String()
+ if n.httpListenerAddr != nil {
+ return n.httpListenerAddr.String()
}
return n.httpEndpoint
}
@@ -693,8 +697,8 @@ func (n *Node) WSEndpoint() string {
n.lock.Lock()
defer n.lock.Unlock()
- if n.wsListener != nil {
- return n.wsListener.Addr().String()
+ if n.wsListenerAddr != nil {
+ return n.wsListenerAddr.String()
}
return n.wsEndpoint
}
diff --git a/p2p/dial.go b/p2p/dial.go
index d190e866af..d36d665501 100644
--- a/p2p/dial.go
+++ b/p2p/dial.go
@@ -78,6 +78,7 @@ var (
errAlreadyConnected = errors.New("already connected")
errRecentlyDialed = errors.New("recently dialed")
errNotWhitelisted = errors.New("not contained in netrestrict whitelist")
+ errNoPort = errors.New("node does not provide TCP port")
)
// dialer creates outbound connections and submits them into Server.
@@ -388,6 +389,12 @@ func (d *dialScheduler) checkDial(n *enode.Node) error {
if n.ID() == d.self {
return errSelf
}
+ if n.IP() != nil && n.TCP() == 0 {
+ // This check can trigger if a non-TCP node is found
+ // by discovery. If there is no IP, the node is a static
+ // node and the actual endpoint will be resolved later in dialTask.
+ return errNoPort
+ }
if _, ok := d.dialing[n.ID()]; ok {
return errAlreadyDialing
}
@@ -474,15 +481,13 @@ type dialError struct {
}
func (t *dialTask) run(d *dialScheduler) {
- if t.dest.Incomplete() {
- if !t.resolve(d) {
- return
- }
+ if t.needResolve() && !t.resolve(d) {
+ return
}
err := t.dial(d, t.dest)
if err != nil {
- // Try resolving the ID of static nodes if dialing failed.
+ // For static nodes, resolve one more time if dialing fails.
if _, ok := err.(*dialError); ok && t.flags&staticDialedConn != 0 {
if t.resolve(d) {
t.dial(d, t.dest)
@@ -491,6 +496,10 @@ func (t *dialTask) run(d *dialScheduler) {
}
}
+func (t *dialTask) needResolve() bool {
+ return t.flags&staticDialedConn != 0 && t.dest.IP() == nil
+}
+
// resolve attempts to find the current endpoint for the destination
// using discovery.
//
diff --git a/p2p/discover/v4_udp.go b/p2p/discover/v4_udp.go
index 6af05f93dd..cc395d8f0d 100644
--- a/p2p/discover/v4_udp.go
+++ b/p2p/discover/v4_udp.go
@@ -169,7 +169,7 @@ func makeEndpoint(addr *net.UDPAddr, tcpPort uint16) rpcEndpoint {
func (t *UDPv4) nodeFromRPC(sender *net.UDPAddr, rn rpcNode) (*node, error) {
if rn.UDP <= 1024 {
- return nil, errors.New("low port")
+ return nil, errLowPort
}
if err := netutil.CheckRelayIP(sender.IP, rn.IP); err != nil {
return nil, err
diff --git a/p2p/discover/v5_udp.go b/p2p/discover/v5_udp.go
index e667be1690..d53375b48b 100644
--- a/p2p/discover/v5_udp.go
+++ b/p2p/discover/v5_udp.go
@@ -216,6 +216,26 @@ func (t *UDPv5) Resolve(n *enode.Node) *enode.Node {
return n
}
+// AllNodes returns all the nodes stored in the local table.
+func (t *UDPv5) AllNodes() []*enode.Node {
+ t.tab.mutex.Lock()
+ defer t.tab.mutex.Unlock()
+ nodes := make([]*enode.Node, 0)
+
+ for _, b := range &t.tab.buckets {
+ for _, n := range b.entries {
+ nodes = append(nodes, unwrapNode(n))
+ }
+ }
+ return nodes
+}
+
+// LocalNode returns the current local node running the
+// protocol.
+func (t *UDPv5) LocalNode() *enode.LocalNode {
+ return t.localNode
+}
+
func (t *UDPv5) RandomNodes() enode.Iterator {
if t.tab.len() == 0 {
// All nodes were dropped, refresh. The very first query will hit this
diff --git a/p2p/discover/v5_udp_test.go b/p2p/discover/v5_udp_test.go
index 15ea0402c2..7d3915e2dc 100644
--- a/p2p/discover/v5_udp_test.go
+++ b/p2p/discover/v5_udp_test.go
@@ -445,6 +445,28 @@ func TestUDPv5_lookup(t *testing.T) {
checkLookupResults(t, lookupTestnet, <-resultC)
}
+// This test checks the local node can be utilised to set key-values.
+func TestUDPv5_LocalNode(t *testing.T) {
+ t.Parallel()
+ var cfg Config
+ node := startLocalhostV5(t, cfg)
+ defer node.Close()
+ localNd := node.LocalNode()
+
+ // set value in node's local record
+ testVal := [4]byte{'A', 'B', 'C', 'D'}
+ localNd.Set(enr.WithEntry("testing", &testVal))
+
+ // retrieve the value from self to make sure it matches.
+ outputVal := [4]byte{}
+ if err := node.Self().Load(enr.WithEntry("testing", &outputVal)); err != nil {
+ t.Errorf("Could not load value from record: %v", err)
+ }
+ if testVal != outputVal {
+ t.Errorf("Wanted %#x to be retrieved from the record but instead got %#x", testVal, outputVal)
+ }
+}
+
// udpV5Test is the framework for all tests above.
// It runs the UDPv5 transport on a virtual socket and allows testing outgoing packets.
type udpV5Test struct {
diff --git a/p2p/enode/urlv4.go b/p2p/enode/urlv4.go
index 4145076a15..8ed500c566 100644
--- a/p2p/enode/urlv4.go
+++ b/p2p/enode/urlv4.go
@@ -59,7 +59,7 @@ func MustParseV4(rawurl string) *Node {
//
// For complete nodes, the node ID is encoded in the username portion
// of the URL, separated from the host by an @ sign. The hostname can
-// only be given as an IP address, DNS domain names are not allowed.
+// only be given as an IP address or using DNS domain name.
// The port in the host name section is the TCP listening port. If the
// TCP and UDP (discovery) ports differ, the UDP port is specified as
// query parameter "discport".
diff --git a/p2p/peer.go b/p2p/peer.go
index 3cc8f1eee6..647c729e45 100644
--- a/p2p/peer.go
+++ b/p2p/peer.go
@@ -380,6 +380,7 @@ func (p *Peer) startProtocols(writeStart <-chan struct{}, writeErr chan<- error)
}
p.log.Trace(fmt.Sprintf("Starting protocol %s/%d", proto.Name, proto.Version))
go func() {
+ defer p.wg.Done()
err := proto.Run(p, rw)
if err == nil {
p.log.Trace(fmt.Sprintf("Protocol %s/%d returned", proto.Name, proto.Version))
@@ -388,7 +389,6 @@ func (p *Peer) startProtocols(writeStart <-chan struct{}, writeErr chan<- error)
p.log.Trace(fmt.Sprintf("Protocol %s/%d failed", proto.Name, proto.Version), "err", err)
}
p.protoErr <- err
- p.wg.Done()
}()
}
}
diff --git a/p2p/simulations/adapters/types.go b/p2p/simulations/adapters/types.go
index ec22b712c3..498723d1ac 100644
--- a/p2p/simulations/adapters/types.go
+++ b/p2p/simulations/adapters/types.go
@@ -300,5 +300,5 @@ func (n *NodeConfig) initEnode(ip net.IP, tcpport int, udpport int) error {
}
func (n *NodeConfig) initDummyEnode() error {
- return n.initEnode(net.IPv4(127, 0, 0, 1), 0, 0)
+ return n.initEnode(net.IPv4(127, 0, 0, 1), int(n.Port), 0)
}
diff --git a/params/bootnodes.go b/params/bootnodes.go
index 5b4c962530..0d72321b03 100644
--- a/params/bootnodes.go
+++ b/params/bootnodes.go
@@ -63,15 +63,6 @@ var GoerliBootnodes = []string{
"enode://a61215641fb8714a373c80edbfa0ea8878243193f57c96eeb44d0bc019ef295abd4e044fd619bfc4c59731a73fb79afe84e9ab6da0c743ceb479cbb6d263fa91@3.11.147.67:30303",
}
-// DiscoveryV5Bootnodes are the enode URLs of the P2P bootstrap nodes for the
-// experimental RLPx v5 topic-discovery network.
-var DiscoveryV5Bootnodes = []string{
- "enode://06051a5573c81934c9554ef2898eb13b33a34b94cf36b202b69fde139ca17a85051979867720d4bdae4323d4943ddf9aeeb6643633aa656e0be843659795007a@35.177.226.168:30303",
- "enode://0cc5f5ffb5d9098c8b8c62325f3797f56509bff942704687b6530992ac706e2cb946b90a34f1f19548cd3c7baccbcaea354531e5983c7d1bc0dee16ce4b6440b@40.118.3.223:30304",
- "enode://1c7a64d76c0334b0418c004af2f67c50e36a3be60b5e4790bdac0439d21603469a85fad36f2473c9a80eb043ae60936df905fa28f1ff614c3e5dc34f15dcd2dc@40.118.3.223:30306",
- "enode://85c85d7143ae8bb96924f2b54f1b3e70d8c4d367af305325d30a61385a432f247d2c75c45c6b4a60335060d072d7f5b35dd1d4c45f76941f62a4f83b6e75daaf@40.118.3.223:30307",
-}
-
const dnsPrefix = "enrtree://AKA3AM6LPBYEUDMVNU3BSVQJ5AD45Y7YPOHJLEF6W26QOE4VTUDPE@"
// These DNS names provide bootstrap connectivity for public testnets and the mainnet.
diff --git a/params/config.go b/params/config.go
index 6b5cfbad1c..70424bd3c6 100644
--- a/params/config.go
+++ b/params/config.go
@@ -73,10 +73,10 @@ var (
// MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network.
MainnetTrustedCheckpoint = &TrustedCheckpoint{
- SectionIndex: 300,
- SectionHead: common.HexToHash("0x022d252ffcd289444eed5a4b8c58018aecb2afc9ab0da5fe059a69a7fb618702"),
- CHTRoot: common.HexToHash("0xe7044c70ae068969573c7f5abe58ef23d9d82d4ee9152ec88b7c6d0cc8ee2714"),
- BloomRoot: common.HexToHash("0xe22600caa25653abaef00d0c112b07b90f4e3395ce0c1f5f7f791cdd6d30a408"),
+ SectionIndex: 305,
+ SectionHead: common.HexToHash("0xebff6145928005e6e737ef680bc3c9a78a73cf4b31a4f3a9747a42ad3a08400a"),
+ CHTRoot: common.HexToHash("0x7ef6290e3c8ee479a9261707fe0a9dbac6e576e933b756d0430195015d1e09a3"),
+ BloomRoot: common.HexToHash("0xb603b9f2bc175c3ca59d27d2ebf69082c9a7c480e299a5aa1a6ea4eda96f7f0e"),
}
// MainnetCheckpointOracle contains a set of configs for the main network oracle.
@@ -112,10 +112,10 @@ var (
// RopstenTrustedCheckpoint contains the light client trusted checkpoint for the Ropsten test network.
RopstenTrustedCheckpoint = &TrustedCheckpoint{
- SectionIndex: 234,
- SectionHead: common.HexToHash("0x34659b817e99e6de868b0d4c5321bcff7e36c2cf79307386a2f5053361794d95"),
- CHTRoot: common.HexToHash("0x249401cd2b07e3f64892729d3f6198514cd11001231a1c001c2e7245659b26e0"),
- BloomRoot: common.HexToHash("0x37657aa58a07ac3fa13f421c3e5500a944a76def5a11c6d57f17a85f5b33c129"),
+ SectionIndex: 239,
+ SectionHead: common.HexToHash("0xede7e25698bf5321b921ba42889d125c107158af81c203dc9a1b1184b15b7a92"),
+ CHTRoot: common.HexToHash("0x5d64eeccf5e8ae284f0764105e3ba93f19a439f8a3b4a438e00d1a23920f7be0"),
+ BloomRoot: common.HexToHash("0x29ae48513dd5679abfc8252988e3f9a3d0b868a2dd629435fdb5aa6b2df32353"),
}
// RopstenCheckpointOracle contains a set of configs for the Ropsten test network oracle.
@@ -153,10 +153,10 @@ var (
// RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network.
RinkebyTrustedCheckpoint = &TrustedCheckpoint{
- SectionIndex: 191,
- SectionHead: common.HexToHash("0xfdf3085848b4126048caf176634fd96a208d8a3b055c643e9e32690420df36d5"),
- CHTRoot: common.HexToHash("0x48059ceb7e0bd25708cc736e5603d28a6f173a3bb904e6e1b3511a97fa30ca97"),
- BloomRoot: common.HexToHash("0x3566c2b173c0591d5bb4f3ef7e341d82da7577c125fca94e9b51fb7134a676d7"),
+ SectionIndex: 196,
+ SectionHead: common.HexToHash("0x66faad1de5cd0c9da5c4c0b0d4e2e86c2ed6a9cde7441a9211deb3b6d049a01e"),
+ CHTRoot: common.HexToHash("0x5752c6633b5d052298316a4d7dd9d2e931b83e3387584f82998a1f6f05b5e4c1"),
+ BloomRoot: common.HexToHash("0x6a2e14dc35d2b6e0361af41a0e28143b59a578a4458e58ca2fb2172b6688b963"),
}
// RinkebyCheckpointOracle contains a set of configs for the Rinkeby test network oracle.
@@ -192,10 +192,10 @@ var (
// GoerliTrustedCheckpoint contains the light client trusted checkpoint for the Görli test network.
GoerliTrustedCheckpoint = &TrustedCheckpoint{
- SectionIndex: 76,
- SectionHead: common.HexToHash("0xf56ca390d1131767b924d85ee8e039c8a4c4a498cfaf017c1a9abf63ef01ff17"),
- CHTRoot: common.HexToHash("0x78ffc5eecf514eed42f61e6f6df1bdcd79f9296c462faf6f33bd600f70a2e8b9"),
- BloomRoot: common.HexToHash("0x5186111a2d6c459cc341319398f7d14fa2c973b1ba846b7f2ec678129c7115fd"),
+ SectionIndex: 80,
+ SectionHead: common.HexToHash("0xb33267f604cd52c17deb879a8affbd111383422212e6d4a67e50d38c614b0938"),
+ CHTRoot: common.HexToHash("0xeb03081cf34cefe8dd1a44865c236b689883518d27af953123b0c922f93f5e13"),
+ BloomRoot: common.HexToHash("0x0463836aebf8b7b2d736b082d4e1ee91698750a3f7c1558a9b6fb9acc035ba15"),
}
// GoerliCheckpointOracle contains a set of configs for the Goerli test network oracle.
diff --git a/params/version.go b/params/version.go
index a9d7a235f3..e0ea3bc1a8 100644
--- a/params/version.go
+++ b/params/version.go
@@ -23,7 +23,7 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
VersionMinor = 9 // Minor version component of the current release
- VersionPatch = 13 // Patch version component of the current release
+ VersionPatch = 14 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string
QuorumVersionMajor = 21
diff --git a/permission/v1/bind/accounts.go b/permission/v1/bind/accounts.go
index 1aa9f1e68d..02940eae44 100644
--- a/permission/v1/bind/accounts.go
+++ b/permission/v1/bind/accounts.go
@@ -4,6 +4,7 @@
package permission
import (
+ "github.com/ethereum/go-ethereum/common/math"
"math/big"
"strings"
@@ -20,7 +21,7 @@ var (
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
- _ = abi.U256
+ _ = math.U256Bytes
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
diff --git a/permission/v1/bind/nodes.go b/permission/v1/bind/nodes.go
index 4f5da92faf..11e607eab1 100644
--- a/permission/v1/bind/nodes.go
+++ b/permission/v1/bind/nodes.go
@@ -4,6 +4,7 @@
package permission
import (
+ "github.com/ethereum/go-ethereum/common/math"
"math/big"
"strings"
@@ -20,7 +21,7 @@ var (
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
- _ = abi.U256
+ _ = math.U256Bytes
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
diff --git a/permission/v1/bind/org.go b/permission/v1/bind/org.go
index 6829403dfb..3e84b06990 100644
--- a/permission/v1/bind/org.go
+++ b/permission/v1/bind/org.go
@@ -4,6 +4,7 @@
package permission
import (
+ "github.com/ethereum/go-ethereum/common/math"
"math/big"
"strings"
@@ -20,7 +21,7 @@ var (
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
- _ = abi.U256
+ _ = math.U256Bytes
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
diff --git a/permission/v1/bind/permission_impl.go b/permission/v1/bind/permission_impl.go
index e9f5105052..2f488be8f8 100644
--- a/permission/v1/bind/permission_impl.go
+++ b/permission/v1/bind/permission_impl.go
@@ -4,6 +4,7 @@
package permission
import (
+ "github.com/ethereum/go-ethereum/common/math"
"math/big"
"strings"
@@ -20,7 +21,7 @@ var (
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
- _ = abi.U256
+ _ = math.U256Bytes
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
diff --git a/permission/v1/bind/permission_interface.go b/permission/v1/bind/permission_interface.go
index 643af9ab90..39468af8ef 100644
--- a/permission/v1/bind/permission_interface.go
+++ b/permission/v1/bind/permission_interface.go
@@ -4,6 +4,7 @@
package permission
import (
+ "github.com/ethereum/go-ethereum/common/math"
"math/big"
"strings"
@@ -20,7 +21,7 @@ var (
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
- _ = abi.U256
+ _ = math.U256Bytes
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
diff --git a/permission/v1/bind/permission_upgr.go b/permission/v1/bind/permission_upgr.go
index ddb22870ea..6f8de04912 100644
--- a/permission/v1/bind/permission_upgr.go
+++ b/permission/v1/bind/permission_upgr.go
@@ -4,6 +4,7 @@
package permission
import (
+ "github.com/ethereum/go-ethereum/common/math"
"math/big"
"strings"
@@ -20,7 +21,7 @@ var (
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
- _ = abi.U256
+ _ = math.U256Bytes
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
diff --git a/permission/v1/bind/roles.go b/permission/v1/bind/roles.go
index 0aa8694b77..1e05bbe3e7 100644
--- a/permission/v1/bind/roles.go
+++ b/permission/v1/bind/roles.go
@@ -4,6 +4,7 @@
package permission
import (
+ "github.com/ethereum/go-ethereum/common/math"
"math/big"
"strings"
@@ -20,7 +21,7 @@ var (
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
- _ = abi.U256
+ _ = math.U256Bytes
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
diff --git a/permission/v1/bind/voter.go b/permission/v1/bind/voter.go
index 108f3d78cd..8c5835206d 100644
--- a/permission/v1/bind/voter.go
+++ b/permission/v1/bind/voter.go
@@ -4,6 +4,7 @@
package permission
import (
+ "github.com/ethereum/go-ethereum/common/math"
"math/big"
"strings"
@@ -20,7 +21,7 @@ var (
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
- _ = abi.U256
+ _ = math.U256Bytes
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
diff --git a/permission/v2/bind/accounts.go b/permission/v2/bind/accounts.go
index 13fdb5980c..8af01882c2 100644
--- a/permission/v2/bind/accounts.go
+++ b/permission/v2/bind/accounts.go
@@ -4,6 +4,7 @@
package permission
import (
+ "github.com/ethereum/go-ethereum/common/math"
"math/big"
"strings"
@@ -20,7 +21,7 @@ var (
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
- _ = abi.U256
+ _ = math.U256Bytes
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
diff --git a/permission/v2/bind/nodes.go b/permission/v2/bind/nodes.go
index 0c63216533..c9184b295b 100644
--- a/permission/v2/bind/nodes.go
+++ b/permission/v2/bind/nodes.go
@@ -4,6 +4,7 @@
package permission
import (
+ "github.com/ethereum/go-ethereum/common/math"
"math/big"
"strings"
@@ -20,7 +21,7 @@ var (
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
- _ = abi.U256
+ _ = math.U256Bytes
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
diff --git a/permission/v2/bind/org.go b/permission/v2/bind/org.go
index 8959d78060..7c245ec6a1 100644
--- a/permission/v2/bind/org.go
+++ b/permission/v2/bind/org.go
@@ -4,6 +4,7 @@
package permission
import (
+ "github.com/ethereum/go-ethereum/common/math"
"math/big"
"strings"
@@ -20,7 +21,7 @@ var (
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
- _ = abi.U256
+ _ = math.U256Bytes
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
diff --git a/permission/v2/bind/permission_impl.go b/permission/v2/bind/permission_impl.go
index 98c9f5a5a7..663308606a 100644
--- a/permission/v2/bind/permission_impl.go
+++ b/permission/v2/bind/permission_impl.go
@@ -4,6 +4,7 @@
package permission
import (
+ "github.com/ethereum/go-ethereum/common/math"
"math/big"
"strings"
@@ -20,7 +21,7 @@ var (
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
- _ = abi.U256
+ _ = math.U256Bytes
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
diff --git a/permission/v2/bind/permission_interface.go b/permission/v2/bind/permission_interface.go
index fbfc32731b..3f0e032bcf 100644
--- a/permission/v2/bind/permission_interface.go
+++ b/permission/v2/bind/permission_interface.go
@@ -4,6 +4,7 @@
package permission
import (
+ "github.com/ethereum/go-ethereum/common/math"
"math/big"
"strings"
@@ -20,7 +21,7 @@ var (
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
- _ = abi.U256
+ _ = math.U256Bytes
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
diff --git a/permission/v2/bind/permission_upgr.go b/permission/v2/bind/permission_upgr.go
index 0bc67e6f5c..133e54ed7e 100644
--- a/permission/v2/bind/permission_upgr.go
+++ b/permission/v2/bind/permission_upgr.go
@@ -4,6 +4,7 @@
package permission
import (
+ "github.com/ethereum/go-ethereum/common/math"
"math/big"
"strings"
@@ -20,7 +21,7 @@ var (
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
- _ = abi.U256
+ _ = math.U256Bytes
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
diff --git a/permission/v2/bind/roles.go b/permission/v2/bind/roles.go
index dcccc4cb55..f0d6a37451 100644
--- a/permission/v2/bind/roles.go
+++ b/permission/v2/bind/roles.go
@@ -4,6 +4,7 @@
package permission
import (
+ "github.com/ethereum/go-ethereum/common/math"
"math/big"
"strings"
@@ -20,7 +21,7 @@ var (
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
- _ = abi.U256
+ _ = math.U256Bytes
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
diff --git a/permission/v2/bind/voter.go b/permission/v2/bind/voter.go
index 5f2a00279f..af2ba5230e 100644
--- a/permission/v2/bind/voter.go
+++ b/permission/v2/bind/voter.go
@@ -4,6 +4,7 @@
package permission
import (
+ "github.com/ethereum/go-ethereum/common/math"
"math/big"
"strings"
@@ -20,7 +21,7 @@ var (
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
- _ = abi.U256
+ _ = math.U256Bytes
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
diff --git a/rlp/iterator.go b/rlp/iterator.go
new file mode 100644
index 0000000000..c28866dbc1
--- /dev/null
+++ b/rlp/iterator.go
@@ -0,0 +1,60 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rlp
+
+type listIterator struct {
+ data []byte
+ next []byte
+ err error
+}
+
+// NewListIterator creates an iterator for the (list) represented by data
+func NewListIterator(data RawValue) (*listIterator, error) {
+ k, t, c, err := readKind(data)
+ if err != nil {
+ return nil, err
+ }
+ if k != List {
+ return nil, ErrExpectedList
+ }
+ it := &listIterator{
+ data: data[t : t+c],
+ }
+ return it, nil
+
+}
+
+// Next forwards the iterator one step, returns true if it was not at end yet
+func (it *listIterator) Next() bool {
+ if len(it.data) == 0 {
+ return false
+ }
+ _, t, c, err := readKind(it.data)
+ it.next = it.data[:t+c]
+ it.data = it.data[t+c:]
+ it.err = err
+ return true
+}
+
+// Value returns the current value
+func (it *listIterator) Value() []byte {
+ return it.next
+}
+
+func (it *listIterator) Err() error {
+ return it.err
+}
diff --git a/rlp/iterator_test.go b/rlp/iterator_test.go
new file mode 100644
index 0000000000..53c3819186
--- /dev/null
+++ b/rlp/iterator_test.go
@@ -0,0 +1,59 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rlp
+
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common/hexutil"
+)
+
+// TestIterator tests some basic things about the ListIterator. A more
+// comprehensive test can be found in core/rlp_test.go, where we can
+// use both types and rlp without dependency cycles
+func TestIterator(t *testing.T) {
+ bodyRlpHex := "0xf902cbf8d6f869800182c35094000000000000000000000000000000000000aaaa808a000000000000000000001ba01025c66fad28b4ce3370222624d952c35529e602af7cbe04f667371f61b0e3b3a00ab8813514d1217059748fd903288ace1b4001a4bc5fbde2790debdc8167de2ff869010182c35094000000000000000000000000000000000000aaaa808a000000000000000000001ca05ac4cf1d19be06f3742c21df6c49a7e929ceb3dbaf6a09f3cfb56ff6828bd9a7a06875970133a35e63ac06d360aa166d228cc013e9b96e0a2cae7f55b22e1ee2e8f901f0f901eda0c75448377c0e426b8017b23c5f77379ecf69abc1d5c224284ad3ba1c46c59adaa00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000808080808080a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"
+ bodyRlp := hexutil.MustDecode(bodyRlpHex)
+
+ it, err := NewListIterator(bodyRlp)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Check that txs exist
+ if !it.Next() {
+ t.Fatal("expected two elems, got zero")
+ }
+ txs := it.Value()
+ // Check that uncles exist
+ if !it.Next() {
+ t.Fatal("expected two elems, got one")
+ }
+ txit, err := NewListIterator(txs)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var i = 0
+ for txit.Next() {
+ if txit.err != nil {
+ t.Fatal(txit.err)
+ }
+ i++
+ }
+ if exp := 2; i != exp {
+ t.Errorf("count wrong, expected %d got %d", i, exp)
+ }
+}
diff --git a/rpc/client_example_test.go b/rpc/client_example_test.go
index 3bb8717b80..044b57a9c4 100644
--- a/rpc/client_example_test.go
+++ b/rpc/client_example_test.go
@@ -19,9 +19,9 @@ package rpc_test
import (
"context"
"fmt"
- "math/big"
"time"
+ "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rpc"
)
@@ -31,16 +31,16 @@ import (
// eth_getBlockByNumber("latest", {})
// returns the latest block object.
//
-// eth_subscribe("newBlocks")
+// eth_subscribe("newHeads")
// creates a subscription which fires block objects when new blocks arrive.
type Block struct {
- Number *big.Int
+ Number *hexutil.Big
}
func ExampleClientSubscription() {
// Connect the client.
- client, _ := rpc.Dial("ws://127.0.0.1:8485")
+ client, _ := rpc.Dial("ws://127.0.0.1:8545")
subch := make(chan Block)
// Ensure that subch receives the latest block.
@@ -75,7 +75,8 @@ func subscribeBlocks(client *rpc.Client, subch chan Block) {
// The connection is established now.
// Update the channel with the current block.
var lastBlock Block
- if err := client.CallContext(ctx, &lastBlock, "eth_getBlockByNumber", "latest"); err != nil {
+ err = client.CallContext(ctx, &lastBlock, "eth_getBlockByNumber", "latest", false)
+ if err != nil {
fmt.Println("can't get latest block:", err)
return
}
diff --git a/rpc/http.go b/rpc/http.go
index c0a564357c..9133f1fc74 100644
--- a/rpc/http.go
+++ b/rpc/http.go
@@ -233,6 +233,7 @@ func (t *httpServerConn) SetWriteDeadline(time.Time) error { return nil }
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Permit dumb empty requests for remote health-checks (AWS)
if r.Method == http.MethodGet && r.ContentLength == 0 && r.URL.RawQuery == "" {
+ w.WriteHeader(http.StatusOK)
return
}
if code, err := validateRequest(r); err != nil {
diff --git a/rpc/http_test.go b/rpc/http_test.go
index b3f694d8af..fc939ae48f 100644
--- a/rpc/http_test.go
+++ b/rpc/http_test.go
@@ -23,32 +23,78 @@ import (
"testing"
)
+func confirmStatusCode(t *testing.T, got, want int) {
+ t.Helper()
+ if got == want {
+ return
+ }
+ if gotName := http.StatusText(got); len(gotName) > 0 {
+ if wantName := http.StatusText(want); len(wantName) > 0 {
+ t.Fatalf("response status code: got %d (%s), want %d (%s)", got, gotName, want, wantName)
+ }
+ }
+ t.Fatalf("response status code: got %d, want %d", got, want)
+}
+
+func confirmRequestValidationCode(t *testing.T, method, contentType, body string, expectedStatusCode int) {
+ t.Helper()
+ request := httptest.NewRequest(method, "http://url.com", strings.NewReader(body))
+ if len(contentType) > 0 {
+ request.Header.Set("Content-Type", contentType)
+ }
+ code, err := validateRequest(request)
+ if code == 0 {
+ if err != nil {
+ t.Errorf("validation: got error %v, expected nil", err)
+ }
+ } else if err == nil {
+ t.Errorf("validation: code %d: got nil, expected error", code)
+ }
+ confirmStatusCode(t, code, expectedStatusCode)
+}
+
func TestHTTPErrorResponseWithDelete(t *testing.T) {
- testHTTPErrorResponse(t, http.MethodDelete, contentType, "", http.StatusMethodNotAllowed)
+ confirmRequestValidationCode(t, http.MethodDelete, contentType, "", http.StatusMethodNotAllowed)
}
func TestHTTPErrorResponseWithPut(t *testing.T) {
- testHTTPErrorResponse(t, http.MethodPut, contentType, "", http.StatusMethodNotAllowed)
+ confirmRequestValidationCode(t, http.MethodPut, contentType, "", http.StatusMethodNotAllowed)
}
func TestHTTPErrorResponseWithMaxContentLength(t *testing.T) {
body := make([]rune, maxRequestContentLength+1)
- testHTTPErrorResponse(t,
+ confirmRequestValidationCode(t,
http.MethodPost, contentType, string(body), http.StatusRequestEntityTooLarge)
}
func TestHTTPErrorResponseWithEmptyContentType(t *testing.T) {
- testHTTPErrorResponse(t, http.MethodPost, "", "", http.StatusUnsupportedMediaType)
+ confirmRequestValidationCode(t, http.MethodPost, "", "", http.StatusUnsupportedMediaType)
}
func TestHTTPErrorResponseWithValidRequest(t *testing.T) {
- testHTTPErrorResponse(t, http.MethodPost, contentType, "", 0)
+ confirmRequestValidationCode(t, http.MethodPost, contentType, "", 0)
}
-func testHTTPErrorResponse(t *testing.T, method, contentType, body string, expected int) {
- request := httptest.NewRequest(method, "http://url.com", strings.NewReader(body))
- request.Header.Set("content-type", contentType)
- if code, _ := validateRequest(request); code != expected {
- t.Fatalf("response code should be %d not %d", expected, code)
+func confirmHTTPRequestYieldsStatusCode(t *testing.T, method, contentType, body string, expectedStatusCode int) {
+ t.Helper()
+ s := Server{}
+ ts := httptest.NewServer(&s)
+ defer ts.Close()
+
+ request, err := http.NewRequest(method, ts.URL, strings.NewReader(body))
+ if err != nil {
+ t.Fatalf("failed to create a valid HTTP request: %v", err)
+ }
+ if len(contentType) > 0 {
+ request.Header.Set("Content-Type", contentType)
}
+ resp, err := http.DefaultClient.Do(request)
+ if err != nil {
+ t.Fatalf("request failed: %v", err)
+ }
+ confirmStatusCode(t, resp.StatusCode, expectedStatusCode)
+}
+
+func TestHTTPResponseWithEmptyGet(t *testing.T) {
+ confirmHTTPRequestYieldsStatusCode(t, http.MethodGet, "", "", http.StatusOK)
}
diff --git a/signer/core/cliui.go b/signer/core/cliui.go
index 1e033299af..65114ac4b6 100644
--- a/signer/core/cliui.go
+++ b/signer/core/cliui.go
@@ -85,10 +85,19 @@ func (ui *CommandlineUI) confirm() bool {
return false
}
+// sanitize quotes and truncates 'txt' if longer than 'limit'. If truncated,
+// and ellipsis is added after the quoted string
+func sanitize(txt string, limit int) string {
+ if len(txt) > limit {
+ return fmt.Sprintf("%q...", txt[:limit])
+ }
+ return fmt.Sprintf("%q", txt)
+}
+
func showMetadata(metadata Metadata) {
fmt.Printf("Request context:\n\t%v -> %v -> %v\n", metadata.Remote, metadata.Scheme, metadata.Local)
fmt.Printf("\nAdditional HTTP header data, provided by the external caller:\n")
- fmt.Printf("\tUser-Agent: %v\n\tOrigin: %v\n", metadata.UserAgent, metadata.Origin)
+ fmt.Printf("\tUser-Agent: %v\n\tOrigin: %v\n", sanitize(metadata.UserAgent, 200), sanitize(metadata.Origin, 100))
}
// ApproveTx prompt the user for confirmation to request to sign Transaction
@@ -113,7 +122,6 @@ func (ui *CommandlineUI) ApproveTx(request *SignTxRequest) (SignTxResponse, erro
if request.Transaction.Data != nil {
d := *request.Transaction.Data
if len(d) > 0 {
-
fmt.Printf("data: %v\n", hexutil.Encode(d))
}
}
@@ -145,7 +153,7 @@ func (ui *CommandlineUI) ApproveSignData(request *SignDataRequest) (SignDataResp
for _, nvt := range request.Messages {
fmt.Printf("\u00a0\u00a0%v\n", strings.TrimSpace(nvt.Pprint(1)))
}
- fmt.Printf("raw data: \n%q\n", request.Rawdata)
+ fmt.Printf("raw data: \n\t%q\n", request.Rawdata)
fmt.Printf("data hash: %v\n", request.Hash)
fmt.Printf("-------------------------------------------\n")
showMetadata(request.Meta)
diff --git a/signer/core/signed_data.go b/signer/core/signed_data.go
index 3a827afa2d..de36aa8896 100644
--- a/signer/core/signed_data.go
+++ b/signer/core/signed_data.go
@@ -31,7 +31,6 @@ import (
"unicode"
"github.com/ethereum/go-ethereum/accounts"
- "github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
@@ -587,7 +586,7 @@ func (typedData *TypedData) EncodePrimitiveValue(encType string, encValue interf
if err != nil {
return nil, err
}
- return abi.U256(b), nil
+ return math.U256Bytes(b), nil
}
return nil, fmt.Errorf("unrecognized type '%s'", encType)
@@ -827,23 +826,23 @@ func (t Types) validate() error {
}
for i, typeObj := range typeArr {
if len(typeObj.Type) == 0 {
- return fmt.Errorf("type %v:%d: empty Type", typeKey, i)
+ return fmt.Errorf("type %q:%d: empty Type", typeKey, i)
}
if len(typeObj.Name) == 0 {
- return fmt.Errorf("type %v:%d: empty Name", typeKey, i)
+ return fmt.Errorf("type %q:%d: empty Name", typeKey, i)
}
if typeKey == typeObj.Type {
- return fmt.Errorf("type '%s' cannot reference itself", typeObj.Type)
+ return fmt.Errorf("type %q cannot reference itself", typeObj.Type)
}
if typeObj.isReferenceType() {
if _, exist := t[typeObj.typeName()]; !exist {
- return fmt.Errorf("reference type '%s' is undefined", typeObj.Type)
+ return fmt.Errorf("reference type %q is undefined", typeObj.Type)
}
if !typedDataReferenceTypeRegexp.MatchString(typeObj.Type) {
- return fmt.Errorf("unknown reference type '%s", typeObj.Type)
+ return fmt.Errorf("unknown reference type %q", typeObj.Type)
}
} else if !isPrimitiveTypeValid(typeObj.Type) {
- return fmt.Errorf("unknown type '%s'", typeObj.Type)
+ return fmt.Errorf("unknown type %q", typeObj.Type)
}
}
}
diff --git a/signer/fourbyte/abi.go b/signer/fourbyte/abi.go
index 796086d415..d8fbabd3b1 100644
--- a/signer/fourbyte/abi.go
+++ b/signer/fourbyte/abi.go
@@ -85,7 +85,7 @@ var selectorRegexp = regexp.MustCompile(`^([^\)]+)\(([A-Za-z0-9,\[\]]*)\)`)
// parseSelector converts a method selector into an ABI JSON spec. The returned
// data is a valid JSON string which can be consumed by the standard abi package.
-func parseSelector(selector string) ([]byte, error) {
+func parseSelector(unescapedSelector string) ([]byte, error) {
// Define a tiny fake ABI struct for JSON marshalling
type fakeArg struct {
Type string `json:"type"`
@@ -95,10 +95,10 @@ func parseSelector(selector string) ([]byte, error) {
Type string `json:"type"`
Inputs []fakeArg `json:"inputs"`
}
- // Validate the selector and extract it's components
- groups := selectorRegexp.FindStringSubmatch(selector)
+ // Validate the unescapedSelector and extract it's components
+ groups := selectorRegexp.FindStringSubmatch(unescapedSelector)
if len(groups) != 3 {
- return nil, fmt.Errorf("invalid selector %s (%v matches)", selector, len(groups))
+ return nil, fmt.Errorf("invalid selector %q (%v matches)", unescapedSelector, len(groups))
}
name := groups[1]
args := groups[2]
@@ -115,7 +115,7 @@ func parseSelector(selector string) ([]byte, error) {
// parseCallData matches the provided call data against the ABI definition and
// returns a struct containing the actual go-typed values.
-func parseCallData(calldata []byte, abidata string) (*decodedCallData, error) {
+func parseCallData(calldata []byte, unescapedAbidata string) (*decodedCallData, error) {
// Validate the call data that it has the 4byte prefix and the rest divisible by 32 bytes
if len(calldata) < 4 {
return nil, fmt.Errorf("invalid call data, incomplete method signature (%d bytes < 4)", len(calldata))
@@ -127,9 +127,9 @@ func parseCallData(calldata []byte, abidata string) (*decodedCallData, error) {
return nil, fmt.Errorf("invalid call data; length should be a multiple of 32 bytes (was %d)", len(argdata))
}
// Validate the called method and upack the call data accordingly
- abispec, err := abi.JSON(strings.NewReader(abidata))
+ abispec, err := abi.JSON(strings.NewReader(unescapedAbidata))
if err != nil {
- return nil, fmt.Errorf("invalid method signature (%s): %v", abidata, err)
+ return nil, fmt.Errorf("invalid method signature (%q): %v", unescapedAbidata, err)
}
method, err := abispec.MethodById(sigdata)
if err != nil {
@@ -140,7 +140,7 @@ func parseCallData(calldata []byte, abidata string) (*decodedCallData, error) {
return nil, fmt.Errorf("signature %q matches, but arguments mismatch: %v", method.String(), err)
}
// Everything valid, assemble the call infos for the signer
- decoded := decodedCallData{signature: method.Sig(), name: method.RawName}
+ decoded := decodedCallData{signature: method.Sig, name: method.RawName}
for i := 0; i < len(method.Inputs); i++ {
decoded.inputs = append(decoded.inputs, decodedArgument{
soltype: method.Inputs[i],
@@ -158,7 +158,7 @@ func parseCallData(calldata []byte, abidata string) (*decodedCallData, error) {
if !bytes.Equal(encoded, argdata) {
was := common.Bytes2Hex(encoded)
exp := common.Bytes2Hex(argdata)
- return nil, fmt.Errorf("WARNING: Supplied data is stuffed with extra data. \nWant %s\nHave %s\nfor method %v", exp, was, method.Sig())
+ return nil, fmt.Errorf("WARNING: Supplied data is stuffed with extra data. \nWant %s\nHave %s\nfor method %v", exp, was, method.Sig)
}
return &decoded, nil
}
diff --git a/signer/fourbyte/fourbyte_test.go b/signer/fourbyte/fourbyte_test.go
index cdbd7ef73d..cf54c9b9c2 100644
--- a/signer/fourbyte/fourbyte_test.go
+++ b/signer/fourbyte/fourbyte_test.go
@@ -48,8 +48,8 @@ func TestEmbeddedDatabase(t *testing.T) {
t.Errorf("Failed to get method by id (%s): %v", id, err)
continue
}
- if m.Sig() != selector {
- t.Errorf("Selector mismatch: have %v, want %v", m.Sig(), selector)
+ if m.Sig != selector {
+ t.Errorf("Selector mismatch: have %v, want %v", m.Sig, selector)
}
}
}
diff --git a/signer/fourbyte/validation.go b/signer/fourbyte/validation.go
index fd13e0a630..baec32f72c 100644
--- a/signer/fourbyte/validation.go
+++ b/signer/fourbyte/validation.go
@@ -98,7 +98,7 @@ func (db *Database) ValidateCallData(selector *string, data []byte, messages *co
if info, err := verifySelector(*selector, data); err != nil {
messages.Warn(fmt.Sprintf("Transaction contains data, but provided ABI signature could not be matched: %v", err))
} else {
- messages.Info(info.String())
+ messages.Info(fmt.Sprintf("Transaction invokes the following method: %q", info.String()))
db.AddSelector(*selector, data[:4])
}
return
@@ -112,6 +112,6 @@ func (db *Database) ValidateCallData(selector *string, data []byte, messages *co
if info, err := verifySelector(embedded, data); err != nil {
messages.Warn(fmt.Sprintf("Transaction contains data, but provided ABI signature could not be verified: %v", err))
} else {
- messages.Info(info.String())
+ messages.Info(fmt.Sprintf("Transaction invokes the following method: %q", info.String()))
}
}
diff --git a/tests/block_test_util.go b/tests/block_test_util.go
index f008afe382..124f6d69d2 100644
--- a/tests/block_test_util.go
+++ b/tests/block_test_util.go
@@ -124,7 +124,7 @@ func (t *BlockTest) Run(snapshotter bool) error {
cache.SnapshotLimit = 1
cache.SnapshotWait = true
}
- chain, err := core.NewBlockChain(db, cache, config, engine, vm.Config{}, nil)
+ chain, err := core.NewBlockChain(db, cache, config, engine, vm.Config{}, nil, nil)
if err != nil {
return err
}
@@ -147,15 +147,8 @@ func (t *BlockTest) Run(snapshotter bool) error {
}
// Cross-check the snapshot-to-hash against the trie hash
if snapshotter {
- snapTree := chain.Snapshot()
- root := chain.CurrentBlock().Root()
- it, err := snapTree.AccountIterator(root, common.Hash{})
- if err != nil {
- return fmt.Errorf("Could not create iterator for root %x: %v", root, err)
- }
- generatedRoot := snapshot.GenerateTrieRoot(it)
- if generatedRoot != root {
- return fmt.Errorf("Snapshot corruption, got %d exp %d", generatedRoot, root)
+ if err := snapshot.VerifyState(chain.Snapshot(), chain.CurrentBlock().Root()); err != nil {
+ return err
}
}
return t.validateImportedHeaders(chain, validBlocks)
diff --git a/tests/state_test.go b/tests/state_test.go
index c0a90b3a42..b77a898c21 100644
--- a/tests/state_test.go
+++ b/tests/state_test.go
@@ -37,13 +37,15 @@ func TestState(t *testing.T) {
st.slow(`^stQuadraticComplexityTest/`)
st.slow(`^stStaticCall/static_Call50000`)
st.slow(`^stStaticCall/static_Return50000`)
- st.slow(`^stStaticCall/static_Call1MB`)
st.slow(`^stSystemOperationsTest/CallRecursiveBomb`)
st.slow(`^stTransactionTest/Opcodes_TransactionInit`)
// Very time consuming
st.skipLoad(`^stTimeConsuming/`)
+ // Uses 1GB RAM per tested fork
+ st.skipLoad(`^stStaticCall/static_Call1MB`)
+
// Broken tests:
// Expected failures:
//st.fails(`^stRevertTest/RevertPrecompiledTouch(_storage)?\.json/Byzantium/0`, "bug in test")
@@ -66,13 +68,16 @@ func TestState(t *testing.T) {
t.Run(key+"/trie", func(t *testing.T) {
withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
- _, err := test.Run(subtest, vmconfig, false)
+ _, _, err := test.Run(subtest, vmconfig, false)
return st.checkFailure(t, name+"/trie", err)
})
})
t.Run(key+"/snap", func(t *testing.T) {
withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
- _, err := test.Run(subtest, vmconfig, true)
+ snaps, statedb, err := test.Run(subtest, vmconfig, true)
+ if _, err := snaps.Journal(statedb.IntermediateRoot(false)); err != nil {
+ return err
+ }
return st.checkFailure(t, name+"/snap", err)
})
})
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index 2d71db0fde..41da94ef98 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -147,37 +147,37 @@ func (t *StateTest) Subtests() []StateSubtest {
}
// Run executes a specific subtest and verifies the post-state and logs
-func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*state.StateDB, error) {
- statedb, root, err := t.RunNoVerify(subtest, vmconfig, snapshotter)
+func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*snapshot.Tree, *state.StateDB, error) {
+ snaps, statedb, root, err := t.RunNoVerify(subtest, vmconfig, snapshotter)
if err != nil {
- return statedb, err
+ return snaps, statedb, err
}
post := t.json.Post[subtest.Fork][subtest.Index]
// N.B: We need to do this in a two-step process, because the first Commit takes care
// of suicides, and we need to touch the coinbase _after_ it has potentially suicided.
if root != common.Hash(post.Root) {
- return statedb, fmt.Errorf("post state root mismatch: got %x, want %x", root, post.Root)
+ return snaps, statedb, fmt.Errorf("post state root mismatch: got %x, want %x", root, post.Root)
}
if logs := rlpHash(statedb.Logs()); logs != common.Hash(post.Logs) {
- return statedb, fmt.Errorf("post state logs hash mismatch: got %x, want %x", logs, post.Logs)
+ return snaps, statedb, fmt.Errorf("post state logs hash mismatch: got %x, want %x", logs, post.Logs)
}
- return statedb, nil
+ return snaps, statedb, nil
}
// RunNoVerify runs a specific subtest and returns the statedb and post-state root
-func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*state.StateDB, common.Hash, error) {
+func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*snapshot.Tree, *state.StateDB, common.Hash, error) {
config, eips, err := getVMConfig(subtest.Fork)
if err != nil {
- return nil, common.Hash{}, UnsupportedForkError{subtest.Fork}
+ return nil, nil, common.Hash{}, UnsupportedForkError{subtest.Fork}
}
vmconfig.ExtraEips = eips
block := t.genesis(config).ToBlock(nil)
- statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter)
+ snaps, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter)
post := t.json.Post[subtest.Fork][subtest.Index]
msg, err := t.json.Tx.toMessage(post)
if err != nil {
- return nil, common.Hash{}, err
+ return nil, nil, common.Hash{}, err
}
context := core.NewEVMContext(msg, block.Header(), nil, &t.json.Env.Coinbase)
context.GetHash = vmTestBlockHash
@@ -186,7 +186,7 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
gaspool := new(core.GasPool)
gaspool.AddGas(block.GasLimit())
snapshot := statedb.Snapshot()
- if _, _, _, err := core.ApplyMessage(evm, msg, gaspool); err != nil {
+ if _, err := core.ApplyMessage(evm, msg, gaspool); err != nil {
statedb.RevertToSnapshot(snapshot)
}
// Commit block
@@ -199,14 +199,14 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
statedb.AddBalance(block.Coinbase(), new(big.Int))
// And _now_ get the state root
root := statedb.IntermediateRoot(config.IsEIP158(block.Number()))
- return statedb, root, nil
+ return snaps, statedb, root, nil
}
func (t *StateTest) gasLimit(subtest StateSubtest) uint64 {
return t.json.Tx.GasLimit[t.json.Post[subtest.Fork][subtest.Index].Indexes.Gas]
}
-func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool) *state.StateDB {
+func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool) (*snapshot.Tree, *state.StateDB) {
sdb := state.NewDatabase(db)
statedb, _ := state.New(common.Hash{}, sdb, nil)
for addr, a := range accounts {
@@ -225,7 +225,7 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter boo
snaps = snapshot.New(db, sdb.TrieDB(), 1, root, false)
}
statedb, _ = state.New(root, sdb, snaps)
- return statedb
+ return snaps, statedb
}
func (t *StateTest) genesis(config *params.ChainConfig) *core.Genesis {
diff --git a/tests/vm_test_util.go b/tests/vm_test_util.go
index edb397c8bf..fa54ef66c9 100644
--- a/tests/vm_test_util.go
+++ b/tests/vm_test_util.go
@@ -79,7 +79,15 @@ type vmExecMarshaling struct {
}
func (t *VMTest) Run(vmconfig vm.Config, snapshotter bool) error {
- statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter)
+ snaps, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter)
+ if snapshotter {
+ preRoot := statedb.IntermediateRoot(false)
+ defer func() {
+ if _, err := snaps.Journal(preRoot); err != nil {
+ panic(err)
+ }
+ }()
+ }
ret, gasRemaining, err := t.exec(statedb, vmconfig)
if t.json.GasRemaining == nil {
diff --git a/trie/database.go b/trie/database.go
index 522a5d1a7a..bd3808ac1c 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -59,8 +59,11 @@ var (
// secureKeyPrefix is the database key prefix used to store trie node preimages.
var secureKeyPrefix = []byte("secure-key-")
+// secureKeyPrefixLength is the length of the above prefix
+const secureKeyPrefixLength = 11
+
// secureKeyLength is the length of the above prefix + 32byte hash.
-const secureKeyLength = 11 + 32
+const secureKeyLength = secureKeyPrefixLength + 32
// Database is an intermediate write layer between the trie data structures and
// the disk database. The aim is to accumulate trie writes in-memory and only
@@ -79,7 +82,6 @@ type Database struct {
newest common.Hash // Newest tracked node, flush-list tail
preimages map[common.Hash][]byte // Preimages of nodes from the secure trie
- seckeybuf [secureKeyLength]byte // Ephemeral buffer for calculating preimage keys
gctime time.Duration // Time spent on garbage collection since last commit
gcnodes uint64 // Nodes garbage collected since last commit
@@ -448,15 +450,15 @@ func (db *Database) preimage(hash common.Hash) ([]byte, error) {
return preimage, nil
}
// Content unavailable in memory, attempt to retrieve from disk
- return db.diskdb.Get(db.secureKey(hash[:]))
+ return db.diskdb.Get(secureKey(hash))
}
-// secureKey returns the database key for the preimage of key, as an ephemeral
-// buffer. The caller must not hold onto the return value because it will become
-// invalid on the next call.
-func (db *Database) secureKey(key []byte) []byte {
- buf := append(db.seckeybuf[:0], secureKeyPrefix...)
- buf = append(buf, key...)
+// secureKey returns the database key for the preimage of key (as a newly
+// allocated byte-slice)
+func secureKey(hash common.Hash) []byte {
+ buf := make([]byte, secureKeyLength)
+ copy(buf, secureKeyPrefix)
+ copy(buf[secureKeyPrefixLength:], hash[:])
return buf
}
@@ -599,12 +601,18 @@ func (db *Database) Cap(limit common.StorageSize) error {
size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*cachedNodeSize)
size += db.childrenSize - common.StorageSize(len(db.dirties[common.Hash{}].children)*(common.HashLength+2))
+ // We reuse an ephemeral buffer for the keys. The batch Put operation
+ // copies it internally, so we can reuse it.
+ var keyBuf [secureKeyLength]byte
+ copy(keyBuf[:], secureKeyPrefix)
+
// If the preimage cache got large enough, push to disk. If it's still small
// leave for later to deduplicate writes.
flushPreimages := db.preimagesSize > 4*1024*1024
if flushPreimages {
for hash, preimage := range db.preimages {
- if err := batch.Put(db.secureKey(hash[:]), preimage); err != nil {
+ copy(keyBuf[secureKeyPrefixLength:], hash[:])
+ if err := batch.Put(keyBuf[:], preimage); err != nil {
log.Error("Failed to commit preimage from trie database", "err", err)
return err
}
@@ -695,9 +703,15 @@ func (db *Database) Commit(node common.Hash, report bool) error {
start := time.Now()
batch := db.diskdb.NewBatch()
+ // We reuse an ephemeral buffer for the keys. The batch Put operation
+ // copies it internally, so we can reuse it.
+ var keyBuf [secureKeyLength]byte
+ copy(keyBuf[:], secureKeyPrefix)
+
// Move all of the accumulated preimages into a write batch
for hash, preimage := range db.preimages {
- if err := batch.Put(db.secureKey(hash[:]), preimage); err != nil {
+ copy(keyBuf[secureKeyPrefixLength:], hash[:])
+ if err := batch.Put(keyBuf[:], preimage); err != nil {
log.Error("Failed to commit preimage from trie database", "err", err)
return err
}
diff --git a/trie/proof.go b/trie/proof.go
index 58ca69c680..07ce8e6d84 100644
--- a/trie/proof.go
+++ b/trie/proof.go
@@ -18,10 +18,12 @@ package trie
import (
"bytes"
+ "errors"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
)
@@ -101,33 +103,232 @@ func (t *SecureTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWri
// VerifyProof checks merkle proofs. The given proof must contain the value for
// key in a trie with the given root hash. VerifyProof returns an error if the
// proof contains invalid trie nodes or the wrong value.
-func VerifyProof(rootHash common.Hash, key []byte, proofDb ethdb.KeyValueReader) (value []byte, nodes int, err error) {
+func VerifyProof(rootHash common.Hash, key []byte, proofDb ethdb.KeyValueReader) (value []byte, err error) {
key = keybytesToHex(key)
wantHash := rootHash
for i := 0; ; i++ {
buf, _ := proofDb.Get(wantHash[:])
if buf == nil {
- return nil, i, fmt.Errorf("proof node %d (hash %064x) missing", i, wantHash)
+ return nil, fmt.Errorf("proof node %d (hash %064x) missing", i, wantHash)
}
n, err := decodeNode(wantHash[:], buf)
if err != nil {
- return nil, i, fmt.Errorf("bad proof node %d: %v", i, err)
+ return nil, fmt.Errorf("bad proof node %d: %v", i, err)
}
- keyrest, cld := get(n, key)
+ keyrest, cld := get(n, key, true)
switch cld := cld.(type) {
case nil:
// The trie doesn't contain the key.
- return nil, i, nil
+ return nil, nil
case hashNode:
key = keyrest
copy(wantHash[:], cld)
case valueNode:
- return cld, i + 1, nil
+ return cld, nil
}
}
}
-func get(tn node, key []byte) ([]byte, node) {
+// proofToPath converts a merkle proof to trie node path.
+// The main purpose of this function is recovering a node
+// path from the merkle proof stream. All necessary nodes
+// will be resolved and leave the remaining as hashnode.
+func proofToPath(rootHash common.Hash, root node, key []byte, proofDb ethdb.KeyValueReader) (node, error) {
+ // resolveNode retrieves and resolves trie node from merkle proof stream
+ resolveNode := func(hash common.Hash) (node, error) {
+ buf, _ := proofDb.Get(hash[:])
+ if buf == nil {
+ return nil, fmt.Errorf("proof node (hash %064x) missing", hash)
+ }
+ n, err := decodeNode(hash[:], buf)
+ if err != nil {
+ return nil, fmt.Errorf("bad proof node %v", err)
+ }
+ return n, err
+ }
+ // If the root node is empty, resolve it first
+ if root == nil {
+ n, err := resolveNode(rootHash)
+ if err != nil {
+ return nil, err
+ }
+ root = n
+ }
+ var (
+ err error
+ child, parent node
+ keyrest []byte
+ terminate bool
+ )
+ key, parent = keybytesToHex(key), root
+ for {
+ keyrest, child = get(parent, key, false)
+ switch cld := child.(type) {
+ case nil:
+ // The trie doesn't contain the key.
+ return nil, errors.New("the node is not contained in trie")
+ case *shortNode:
+ key, parent = keyrest, child // Already resolved
+ continue
+ case *fullNode:
+ key, parent = keyrest, child // Already resolved
+ continue
+ case hashNode:
+ child, err = resolveNode(common.BytesToHash(cld))
+ if err != nil {
+ return nil, err
+ }
+ case valueNode:
+ terminate = true
+ }
+ // Link the parent and child.
+ switch pnode := parent.(type) {
+ case *shortNode:
+ pnode.Val = child
+ case *fullNode:
+ pnode.Children[key[0]] = child
+ default:
+ panic(fmt.Sprintf("%T: invalid node: %v", pnode, pnode))
+ }
+ if terminate {
+ return root, nil // The whole path is resolved
+ }
+ key, parent = keyrest, child
+ }
+}
+
+// unsetInternal removes all internal node references(hashnode, embedded node).
+// It should be called after a trie is constructed with two edge proofs. Also
+// the given boundary keys must be the one used to construct the edge proofs.
+//
+// It's the key step for range proof. All visited nodes should be marked dirty
+// since the node content might be modified. Besides it can happen that some
+// fullnodes only have one child which is disallowed. But if the proof is valid,
+// the missing children will be filled, otherwise it will be thrown anyway.
+func unsetInternal(node node, left []byte, right []byte) error {
+ left, right = keybytesToHex(left), keybytesToHex(right)
+
+ // todo(rjl493456442) different length edge keys should be supported
+ if len(left) != len(right) {
+ return errors.New("inconsistent edge path")
+ }
+ // Step down to the fork point
+ prefix, pos := prefixLen(left, right), 0
+ for {
+ if pos >= prefix {
+ break
+ }
+ switch n := (node).(type) {
+ case *shortNode:
+ if len(left)-pos < len(n.Key) || !bytes.Equal(n.Key, left[pos:pos+len(n.Key)]) {
+ return errors.New("invalid edge path")
+ }
+ n.flags = nodeFlag{dirty: true}
+ node, pos = n.Val, pos+len(n.Key)
+ case *fullNode:
+ n.flags = nodeFlag{dirty: true}
+ node, pos = n.Children[left[pos]], pos+1
+ default:
+ panic(fmt.Sprintf("%T: invalid node: %v", node, node))
+ }
+ }
+ fn, ok := node.(*fullNode)
+ if !ok {
+ return errors.New("the fork point must be a fullnode")
+ }
+ // Find the fork point! Unset all intermediate references
+ for i := left[prefix] + 1; i < right[prefix]; i++ {
+ fn.Children[i] = nil
+ }
+ fn.flags = nodeFlag{dirty: true}
+ unset(fn.Children[left[prefix]], left[prefix+1:], false)
+ unset(fn.Children[right[prefix]], right[prefix+1:], true)
+ return nil
+}
+
+// unset removes all internal node references either the left most or right most.
+func unset(root node, rest []byte, removeLeft bool) {
+ switch rn := root.(type) {
+ case *fullNode:
+ if removeLeft {
+ for i := 0; i < int(rest[0]); i++ {
+ rn.Children[i] = nil
+ }
+ rn.flags = nodeFlag{dirty: true}
+ } else {
+ for i := rest[0] + 1; i < 16; i++ {
+ rn.Children[i] = nil
+ }
+ rn.flags = nodeFlag{dirty: true}
+ }
+ unset(rn.Children[rest[0]], rest[1:], removeLeft)
+ case *shortNode:
+ rn.flags = nodeFlag{dirty: true}
+ if _, ok := rn.Val.(valueNode); ok {
+ rn.Val = nilValueNode
+ return
+ }
+ unset(rn.Val, rest[len(rn.Key):], removeLeft)
+ case hashNode, nil, valueNode:
+ panic("it shouldn't happen")
+ }
+}
+
+// VerifyRangeProof checks whether the given leave nodes and edge proofs
+// can prove the given trie leaves range is matched with given root hash
+// and the range is consecutive(no gap inside).
+func VerifyRangeProof(rootHash common.Hash, keys [][]byte, values [][]byte, firstProof ethdb.KeyValueReader, lastProof ethdb.KeyValueReader) error {
+ if len(keys) != len(values) {
+ return fmt.Errorf("inconsistent proof data, keys: %d, values: %d", len(keys), len(values))
+ }
+ if len(keys) == 0 {
+ return fmt.Errorf("nothing to verify")
+ }
+ if len(keys) == 1 {
+ value, err := VerifyProof(rootHash, keys[0], firstProof)
+ if err != nil {
+ return err
+ }
+ if !bytes.Equal(value, values[0]) {
+ return fmt.Errorf("correct proof but invalid data")
+ }
+ return nil
+ }
+ // Convert the edge proofs to edge trie paths. Then we can
+ // have the same tree architecture with the original one.
+ root, err := proofToPath(rootHash, nil, keys[0], firstProof)
+ if err != nil {
+ return err
+ }
+ // Pass the root node here, the second path will be merged
+ // with the first one.
+ root, err = proofToPath(rootHash, root, keys[len(keys)-1], lastProof)
+ if err != nil {
+ return err
+ }
+ // Remove all internal references. All the removed parts should
+ // be re-filled(or re-constructed) by the given leaves range.
+ if err := unsetInternal(root, keys[0], keys[len(keys)-1]); err != nil {
+ return err
+ }
+ // Rebuild the trie with the leave stream, the shape of trie
+ // should be same with the original one.
+ newtrie := &Trie{root: root, db: NewDatabase(memorydb.New())}
+ for index, key := range keys {
+ newtrie.TryUpdate(key, values[index])
+ }
+ if newtrie.Hash() != rootHash {
+ return fmt.Errorf("invalid proof, wanthash %x, got %x", rootHash, newtrie.Hash())
+ }
+ return nil
+}
+
+// get returns the child of the given node. Return nil if the
+// node with specified key doesn't exist at all.
+//
+// There is an additional flag `skipResolved`. If it's set then
+// all resolved nodes won't be returned.
+func get(tn node, key []byte, skipResolved bool) ([]byte, node) {
for {
switch n := tn.(type) {
case *shortNode:
@@ -136,9 +337,15 @@ func get(tn node, key []byte) ([]byte, node) {
}
tn = n.Val
key = key[len(n.Key):]
+ if !skipResolved {
+ return key, tn
+ }
case *fullNode:
tn = n.Children[key[0]]
key = key[1:]
+ if !skipResolved {
+ return key, tn
+ }
case hashNode:
return key, n
case nil:
diff --git a/trie/proof_test.go b/trie/proof_test.go
index 4caae73381..ea02c289ea 100644
--- a/trie/proof_test.go
+++ b/trie/proof_test.go
@@ -20,6 +20,7 @@ import (
"bytes"
crand "crypto/rand"
mrand "math/rand"
+ "sort"
"testing"
"time"
@@ -65,7 +66,7 @@ func TestProof(t *testing.T) {
if proof == nil {
t.Fatalf("prover %d: missing key %x while constructing proof", i, kv.k)
}
- val, _, err := VerifyProof(root, kv.k, proof)
+ val, err := VerifyProof(root, kv.k, proof)
if err != nil {
t.Fatalf("prover %d: failed to verify proof for key %x: %v\nraw proof: %x", i, kv.k, err, proof)
}
@@ -87,7 +88,7 @@ func TestOneElementProof(t *testing.T) {
if proof.Len() != 1 {
t.Errorf("prover %d: proof should have one element", i)
}
- val, _, err := VerifyProof(trie.Hash(), []byte("k"), proof)
+ val, err := VerifyProof(trie.Hash(), []byte("k"), proof)
if err != nil {
t.Fatalf("prover %d: failed to verify proof: %v\nraw proof: %x", i, err, proof)
}
@@ -97,6 +98,156 @@ func TestOneElementProof(t *testing.T) {
}
}
+type entrySlice []*kv
+
+func (p entrySlice) Len() int { return len(p) }
+func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 }
+func (p entrySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func TestRangeProof(t *testing.T) {
+ trie, vals := randomTrie(4096)
+ var entries entrySlice
+ for _, kv := range vals {
+ entries = append(entries, kv)
+ }
+ sort.Sort(entries)
+ for i := 0; i < 500; i++ {
+ start := mrand.Intn(len(entries))
+ end := mrand.Intn(len(entries)-start) + start
+ if start == end {
+ continue
+ }
+ firstProof, lastProof := memorydb.New(), memorydb.New()
+ if err := trie.Prove(entries[start].k, 0, firstProof); err != nil {
+ t.Fatalf("Failed to prove the first node %v", err)
+ }
+ if err := trie.Prove(entries[end-1].k, 0, lastProof); err != nil {
+ t.Fatalf("Failed to prove the last node %v", err)
+ }
+ var keys [][]byte
+ var vals [][]byte
+ for i := start; i < end; i++ {
+ keys = append(keys, entries[i].k)
+ vals = append(vals, entries[i].v)
+ }
+ err := VerifyRangeProof(trie.Hash(), keys, vals, firstProof, lastProof)
+ if err != nil {
+ t.Fatalf("Case %d(%d->%d) expect no error, got %v", i, start, end-1, err)
+ }
+ }
+}
+
+func TestBadRangeProof(t *testing.T) {
+ trie, vals := randomTrie(4096)
+ var entries entrySlice
+ for _, kv := range vals {
+ entries = append(entries, kv)
+ }
+ sort.Sort(entries)
+
+ for i := 0; i < 500; i++ {
+ start := mrand.Intn(len(entries))
+ end := mrand.Intn(len(entries)-start) + start
+ if start == end {
+ continue
+ }
+ firstProof, lastProof := memorydb.New(), memorydb.New()
+ if err := trie.Prove(entries[start].k, 0, firstProof); err != nil {
+ t.Fatalf("Failed to prove the first node %v", err)
+ }
+ if err := trie.Prove(entries[end-1].k, 0, lastProof); err != nil {
+ t.Fatalf("Failed to prove the last node %v", err)
+ }
+ var keys [][]byte
+ var vals [][]byte
+ for i := start; i < end; i++ {
+ keys = append(keys, entries[i].k)
+ vals = append(vals, entries[i].v)
+ }
+ testcase := mrand.Intn(6)
+ var index int
+ switch testcase {
+ case 0:
+ // Modified key
+ index = mrand.Intn(end - start)
+ keys[index] = randBytes(32) // In theory it can't be same
+ case 1:
+ // Modified val
+ index = mrand.Intn(end - start)
+ vals[index] = randBytes(20) // In theory it can't be same
+ case 2:
+ // Gapped entry slice
+
+ // There are only two elements, skip it. Dropped any element
+ // will lead to single edge proof which is always correct.
+ if end-start <= 2 {
+ continue
+ }
+ // If the dropped element is the first or last one and it's a
+ // batch of small size elements. In this special case, it can
+ // happen that the proof for the edge element is exactly same
+ // with the first/last second element(since small values are
+ // embedded in the parent). Avoid this case.
+ index = mrand.Intn(end - start)
+ if (index == end-start-1 || index == 0) && end <= 100 {
+ continue
+ }
+ keys = append(keys[:index], keys[index+1:]...)
+ vals = append(vals[:index], vals[index+1:]...)
+ case 3:
+ // Switched entry slice, same effect with gapped
+ index = mrand.Intn(end - start)
+ keys[index] = entries[len(entries)-1].k
+ vals[index] = entries[len(entries)-1].v
+ case 4:
+ // Set random key to nil
+ index = mrand.Intn(end - start)
+ keys[index] = nil
+ case 5:
+ // Set random value to nil
+ index = mrand.Intn(end - start)
+ vals[index] = nil
+ }
+ err := VerifyRangeProof(trie.Hash(), keys, vals, firstProof, lastProof)
+ if err == nil {
+ t.Fatalf("%d Case %d index %d range: (%d->%d) expect error, got nil", i, testcase, index, start, end-1)
+ }
+ }
+}
+
+// TestGappedRangeProof focuses on the small trie with embedded nodes.
+// If the gapped node is embedded in the trie, it should be detected too.
+func TestGappedRangeProof(t *testing.T) {
+ trie := new(Trie)
+ var entries []*kv // Sorted entries
+ for i := byte(0); i < 10; i++ {
+ value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
+ trie.Update(value.k, value.v)
+ entries = append(entries, value)
+ }
+ first, last := 2, 8
+ firstProof, lastProof := memorydb.New(), memorydb.New()
+ if err := trie.Prove(entries[first].k, 0, firstProof); err != nil {
+ t.Fatalf("Failed to prove the first node %v", err)
+ }
+ if err := trie.Prove(entries[last-1].k, 0, lastProof); err != nil {
+ t.Fatalf("Failed to prove the last node %v", err)
+ }
+ var keys [][]byte
+ var vals [][]byte
+ for i := first; i < last; i++ {
+ if i == (first+last)/2 {
+ continue
+ }
+ keys = append(keys, entries[i].k)
+ vals = append(vals, entries[i].v)
+ }
+ err := VerifyRangeProof(trie.Hash(), keys, vals, firstProof, lastProof)
+ if err == nil {
+ t.Fatal("expect error, got nil")
+ }
+}
+
func TestBadProof(t *testing.T) {
trie, vals := randomTrie(800)
root := trie.Hash()
@@ -118,7 +269,7 @@ func TestBadProof(t *testing.T) {
mutateByte(val)
proof.Put(crypto.Keccak256(val), val)
- if _, _, err := VerifyProof(root, kv.k, proof); err == nil {
+ if _, err := VerifyProof(root, kv.k, proof); err == nil {
t.Fatalf("prover %d: expected proof to fail for key %x", i, kv.k)
}
}
@@ -138,7 +289,7 @@ func TestMissingKeyProof(t *testing.T) {
if proof.Len() != 1 {
t.Errorf("test %d: proof should have one element", i)
}
- val, _, err := VerifyProof(trie.Hash(), []byte(key), proof)
+ val, err := VerifyProof(trie.Hash(), []byte(key), proof)
if err != nil {
t.Fatalf("test %d: failed to verify proof: %v\nraw proof: %x", i, err, proof)
}
@@ -191,12 +342,50 @@ func BenchmarkVerifyProof(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
im := i % len(keys)
- if _, _, err := VerifyProof(root, []byte(keys[im]), proofs[im]); err != nil {
+ if _, err := VerifyProof(root, []byte(keys[im]), proofs[im]); err != nil {
b.Fatalf("key %x: %v", keys[im], err)
}
}
}
+func BenchmarkVerifyRangeProof10(b *testing.B) { benchmarkVerifyRangeProof(b, 10) }
+func BenchmarkVerifyRangeProof100(b *testing.B) { benchmarkVerifyRangeProof(b, 100) }
+func BenchmarkVerifyRangeProof1000(b *testing.B) { benchmarkVerifyRangeProof(b, 1000) }
+func BenchmarkVerifyRangeProof5000(b *testing.B) { benchmarkVerifyRangeProof(b, 5000) }
+
+func benchmarkVerifyRangeProof(b *testing.B, size int) {
+ trie, vals := randomTrie(8192)
+ var entries entrySlice
+ for _, kv := range vals {
+ entries = append(entries, kv)
+ }
+ sort.Sort(entries)
+
+ start := 2
+ end := start + size
+ firstProof, lastProof := memorydb.New(), memorydb.New()
+ if err := trie.Prove(entries[start].k, 0, firstProof); err != nil {
+ b.Fatalf("Failed to prove the first node %v", err)
+ }
+ if err := trie.Prove(entries[end-1].k, 0, lastProof); err != nil {
+ b.Fatalf("Failed to prove the last node %v", err)
+ }
+ var keys [][]byte
+ var values [][]byte
+ for i := start; i < end; i++ {
+ keys = append(keys, entries[i].k)
+ values = append(values, entries[i].v)
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ err := VerifyRangeProof(trie.Hash(), keys, values, firstProof, lastProof)
+ if err != nil {
+ b.Fatalf("Case %d(%d->%d) expect no error, got %v", i, start, end-1, err)
+ }
+ }
+}
+
func randomTrie(n int) (*Trie, map[string]*kv) {
trie := new(Trie)
vals := make(map[string]*kv)
diff --git a/trie/sync.go b/trie/sync.go
index e5a0c17493..978e76799a 100644
--- a/trie/sync.go
+++ b/trie/sync.go
@@ -174,8 +174,8 @@ func (s *Sync) Missing(max int) []common.Hash {
}
// Process injects a batch of retrieved trie nodes data, returning if something
-// was committed to the database and also the index of an entry if processing of
-// it failed.
+// was committed to the database and also the index of an entry if its processing
+// failed.
func (s *Sync) Process(results []SyncResult) (bool, int, error) {
committed := false
diff --git a/whisper/whisperv6/whisper.go b/whisper/whisperv6/whisper.go
index 377406b360..af513888d0 100644
--- a/whisper/whisperv6/whisper.go
+++ b/whisper/whisperv6/whisper.go
@@ -340,11 +340,11 @@ func (whisper *Whisper) getPeers() []*Peer {
arr := make([]*Peer, len(whisper.peers))
i := 0
whisper.peerMu.Lock()
+ defer whisper.peerMu.Unlock()
for p := range whisper.peers {
arr[i] = p
i++
}
- whisper.peerMu.Unlock()
return arr
}