diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e7f7a1fe351e..2c41c2b2a65f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -118,20 +118,6 @@ jobs: cd cicd/devnet/terraform terraform init ${{ env.tf_init_cli_options }} terraform apply -var "docker_tag=dev-upgrade-${git_hash}" ${{ env.tf_apply_cli_options }} - sleep 5 - source .env - for ((i=$us_east_2_start;i<$us_east_2_end;i++)); do - echo "Force deploy xdc-$i" - aws ecs update-service --region us-east-2 --cluster devnet-xdcnode-cluster --service ecs-service-xdc$i --force-new-deployment --no-cli-pager | head -n 10; - done - for ((i=$eu_west_1_start;i<$eu_west_1_end;i++)); do - echo "Force deploy xdc-$i" - aws ecs update-service --region eu-west-1 --cluster devnet-xdcnode-cluster --service ecs-service-xdc$i --force-new-deployment --no-cli-pager | head -n 10; - done - for ((i=$ap_southeast_2_start;i<$ap_southeast_2_end;i++)); do - echo "Force deploy xdc-$i" - aws ecs update-service --region ap-southeast-2 --cluster devnet-xdcnode-cluster --service ecs-service-xdc$i --force-new-deployment --no-cli-pager | head -n 10; - done rpcnode_terraform_apply: runs-on: ubuntu-latest diff --git a/XDCx/XDCx.go b/XDCx/XDCx.go index 6506929984d7..3dc3386d92bf 100644 --- a/XDCx/XDCx.go +++ b/XDCx/XDCx.go @@ -292,10 +292,10 @@ func (XDCx *XDCX) GetAveragePriceLastEpoch(chain consensus.ChainContext, statedb // return tokenQuantity (after convert from XDC to token), tokenPriceInXDC, error func (XDCx *XDCX) ConvertXDCToToken(chain consensus.ChainContext, statedb *state.StateDB, tradingStateDb *tradingstate.TradingStateDB, token common.Address, quantity *big.Int) (*big.Int, *big.Int, error) { - if token.String() == common.XDCNativeAddress { + if token == common.XDCNativeAddressBinary { return quantity, common.BasePrice, nil } - tokenPriceInXDC, err := XDCx.GetAveragePriceLastEpoch(chain, statedb, tradingStateDb, token, common.HexToAddress(common.XDCNativeAddress)) + tokenPriceInXDC, err := XDCx.GetAveragePriceLastEpoch(chain, statedb, tradingStateDb, token, common.XDCNativeAddressBinary) if err != nil || tokenPriceInXDC == nil || tokenPriceInXDC.Sign() <= 0 { return common.Big0, common.Big0, err } @@ -595,10 +595,11 @@ func (XDCx *XDCX) GetTriegc() *prque.Prque { func (XDCx *XDCX) GetTradingStateRoot(block *types.Block, author common.Address) (common.Hash, error) { for _, tx := range block.Transactions() { - from := *(tx.From()) - if tx.To() != nil && tx.To().Hex() == common.TradingStateAddr && from.String() == author.String() { - if len(tx.Data()) >= 32 { - return common.BytesToHash(tx.Data()[:32]), nil + to := tx.To() + if to != nil && *to == common.TradingStateAddrBinary && *tx.From() == author { + data := tx.Data() + if len(data) >= 32 { + return common.BytesToHash(data[:32]), nil } } } diff --git a/XDCx/order_processor.go b/XDCx/order_processor.go index ca9ca496e352..86d156fec881 100644 --- a/XDCx/order_processor.go +++ b/XDCx/order_processor.go @@ -236,11 +236,11 @@ func (XDCx *XDCX) processOrderList(coinbase common.Address, chain consensus.Chai maxTradedQuantity = tradingstate.CloneBigInt(amount) } var quotePrice *big.Int - if oldestOrder.QuoteToken.String() != common.XDCNativeAddress { - quotePrice = tradingStateDB.GetLastPrice(tradingstate.GetTradingOrderBookHash(oldestOrder.QuoteToken, common.HexToAddress(common.XDCNativeAddress))) + if oldestOrder.QuoteToken != common.XDCNativeAddressBinary { + quotePrice = tradingStateDB.GetLastPrice(tradingstate.GetTradingOrderBookHash(oldestOrder.QuoteToken, common.XDCNativeAddressBinary)) log.Debug("TryGet quotePrice QuoteToken/XDC", "quotePrice", quotePrice) if quotePrice == nil || quotePrice.Sign() == 0 { - inversePrice := tradingStateDB.GetLastPrice(tradingstate.GetTradingOrderBookHash(common.HexToAddress(common.XDCNativeAddress), oldestOrder.QuoteToken)) + inversePrice := tradingStateDB.GetLastPrice(tradingstate.GetTradingOrderBookHash(common.XDCNativeAddressBinary, oldestOrder.QuoteToken)) quoteTokenDecimal, err := XDCx.GetTokenDecimal(chain, statedb, oldestOrder.QuoteToken) if err != nil || quoteTokenDecimal.Sign() == 0 { return nil, nil, nil, fmt.Errorf("Fail to get tokenDecimal. Token: %v . Err: %v", oldestOrder.QuoteToken.String(), err) @@ -374,10 +374,10 @@ func (XDCx *XDCX) getTradeQuantity(quotePrice *big.Int, coinbase common.Address, if err != nil || quoteTokenDecimal.Sign() == 0 { return tradingstate.Zero, false, nil, fmt.Errorf("Fail to get tokenDecimal. Token: %v . Err: %v", makerOrder.QuoteToken.String(), err) } - if makerOrder.QuoteToken.String() == common.XDCNativeAddress { + if makerOrder.QuoteToken == common.XDCNativeAddressBinary { quotePrice = quoteTokenDecimal } - if takerOrder.ExchangeAddress.String() == makerOrder.ExchangeAddress.String() { + if takerOrder.ExchangeAddress == makerOrder.ExchangeAddress { if err := tradingstate.CheckRelayerFee(takerOrder.ExchangeAddress, new(big.Int).Mul(common.RelayerFee, big.NewInt(2)), statedb); err != nil { log.Debug("Reject order Taker Exchnage = Maker Exchange , relayer not enough fee ", "err", err) return tradingstate.Zero, false, nil, nil diff --git a/XDCx/order_processor_test.go b/XDCx/order_processor_test.go index 7ab396f83fe6..b3a651ae98c7 100644 --- a/XDCx/order_processor_test.go +++ b/XDCx/order_processor_test.go @@ -1,12 +1,13 @@ package XDCx import ( - "github.com/XinFinOrg/XDPoSChain/XDCx/tradingstate" - "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/core/rawdb" "math/big" "reflect" "testing" + + "github.com/XinFinOrg/XDPoSChain/XDCx/tradingstate" + "github.com/XinFinOrg/XDPoSChain/common" + "github.com/XinFinOrg/XDPoSChain/core/rawdb" ) func Test_getCancelFeeV1(t *testing.T) { @@ -103,9 +104,9 @@ func Test_getCancelFee(t *testing.T) { XDCx.SetTokenDecimal(testTokenB, tokenBDecimal) // set tokenAPrice = 1 XDC - tradingStateDb.SetMediumPriceBeforeEpoch(tradingstate.GetTradingOrderBookHash(testTokenA, common.HexToAddress(common.XDCNativeAddress)), common.BasePrice) + tradingStateDb.SetMediumPriceBeforeEpoch(tradingstate.GetTradingOrderBookHash(testTokenA, common.XDCNativeAddressBinary), common.BasePrice) // set tokenBPrice = 1 XDC - tradingStateDb.SetMediumPriceBeforeEpoch(tradingstate.GetTradingOrderBookHash(common.HexToAddress(common.XDCNativeAddress), testTokenB), tokenBDecimal) + tradingStateDb.SetMediumPriceBeforeEpoch(tradingstate.GetTradingOrderBookHash(common.XDCNativeAddressBinary, testTokenB), tokenBDecimal) type CancelFeeArg struct { feeRate *big.Int @@ -127,7 +128,7 @@ func Test_getCancelFee(t *testing.T) { feeRate: common.Big0, order: &tradingstate.OrderItem{ BaseToken: testTokenA, - QuoteToken: common.HexToAddress(common.XDCNativeAddress), + QuoteToken: common.XDCNativeAddressBinary, Quantity: new(big.Int).SetUint64(10000), Side: tradingstate.Ask, }, @@ -142,7 +143,7 @@ func Test_getCancelFee(t *testing.T) { feeRate: common.Big0, order: &tradingstate.OrderItem{ BaseToken: testTokenA, - QuoteToken: common.HexToAddress(common.XDCNativeAddress), + QuoteToken: common.XDCNativeAddressBinary, Quantity: new(big.Int).SetUint64(10000), Side: tradingstate.Bid, }, @@ -156,7 +157,7 @@ func Test_getCancelFee(t *testing.T) { CancelFeeArg{ feeRate: new(big.Int).SetUint64(10), // 10/10000= 0.1% order: &tradingstate.OrderItem{ - BaseToken: common.HexToAddress(common.XDCNativeAddress), + BaseToken: common.XDCNativeAddressBinary, QuoteToken: testTokenA, Quantity: new(big.Int).SetUint64(10000), Side: tradingstate.Ask, @@ -172,7 +173,7 @@ func Test_getCancelFee(t *testing.T) { feeRate: new(big.Int).SetUint64(10), // 10/10000= 0.1% order: &tradingstate.OrderItem{ Quantity: new(big.Int).SetUint64(10000), - BaseToken: common.HexToAddress(common.XDCNativeAddress), + BaseToken: common.XDCNativeAddressBinary, QuoteToken: testTokenA, Side: tradingstate.Bid, }, @@ -188,7 +189,7 @@ func Test_getCancelFee(t *testing.T) { CancelFeeArg{ feeRate: common.Big0, order: &tradingstate.OrderItem{ - BaseToken: common.HexToAddress(common.XDCNativeAddress), + BaseToken: common.XDCNativeAddressBinary, QuoteToken: testTokenA, Quantity: new(big.Int).SetUint64(10000), Side: tradingstate.Ask, @@ -203,7 +204,7 @@ func Test_getCancelFee(t *testing.T) { CancelFeeArg{ feeRate: common.Big0, order: &tradingstate.OrderItem{ - BaseToken: common.HexToAddress(common.XDCNativeAddress), + BaseToken: common.XDCNativeAddressBinary, QuoteToken: testTokenA, Quantity: new(big.Int).SetUint64(10000), Side: tradingstate.Bid, @@ -218,7 +219,7 @@ func Test_getCancelFee(t *testing.T) { CancelFeeArg{ feeRate: new(big.Int).SetUint64(10), // 10/10000= 0.1% order: &tradingstate.OrderItem{ - BaseToken: common.HexToAddress(common.XDCNativeAddress), + BaseToken: common.XDCNativeAddressBinary, QuoteToken: testTokenA, Quantity: new(big.Int).SetUint64(10000), Side: tradingstate.Ask, @@ -234,7 +235,7 @@ func Test_getCancelFee(t *testing.T) { feeRate: new(big.Int).SetUint64(10), // 10/10000= 0.1% order: &tradingstate.OrderItem{ Quantity: new(big.Int).SetUint64(10000), - BaseToken: common.HexToAddress(common.XDCNativeAddress), + BaseToken: common.XDCNativeAddressBinary, QuoteToken: testTokenA, Side: tradingstate.Bid, }, diff --git a/XDCx/token.go b/XDCx/token.go index 2bf37e3f2f1f..5aa8554cf9f5 100644 --- a/XDCx/token.go +++ b/XDCx/token.go @@ -48,7 +48,7 @@ func (XDCx *XDCX) GetTokenDecimal(chain consensus.ChainContext, statedb *state.S if tokenDecimal, ok := XDCx.tokenDecimalCache.Get(tokenAddr); ok { return tokenDecimal.(*big.Int), nil } - if tokenAddr.String() == common.XDCNativeAddress { + if tokenAddr == common.XDCNativeAddressBinary { XDCx.tokenDecimalCache.Add(tokenAddr, common.BasePrice) return common.BasePrice, nil } diff --git a/XDCx/tradingstate/orderitem.go b/XDCx/tradingstate/orderitem.go index f28e978435f5..8c39375b5fdf 100644 --- a/XDCx/tradingstate/orderitem.go +++ b/XDCx/tradingstate/orderitem.go @@ -239,7 +239,7 @@ func (o *OrderItem) verifyRelayer(state *state.StateDB) error { return nil } -//verify signatures +// verify signatures func (o *OrderItem) verifySignature() error { bigstr := o.Nonce.String() n, err := strconv.ParseInt(bigstr, 10, 64) @@ -269,7 +269,7 @@ func (o *OrderItem) verifyOrderType() error { return nil } -//verify order side +// verify order side func (o *OrderItem) verifyOrderSide() error { if o.Side != Bid && o.Side != Ask { @@ -356,11 +356,11 @@ func VerifyPair(statedb *state.StateDB, exchangeAddress, baseToken, quoteToken c func VerifyBalance(statedb *state.StateDB, XDCxStateDb *TradingStateDB, order *types.OrderTransaction, baseDecimal, quoteDecimal *big.Int) error { var quotePrice *big.Int - if order.QuoteToken().String() != common.XDCNativeAddress { - quotePrice = XDCxStateDb.GetLastPrice(GetTradingOrderBookHash(order.QuoteToken(), common.HexToAddress(common.XDCNativeAddress))) + if order.QuoteToken() != common.XDCNativeAddressBinary { + quotePrice = XDCxStateDb.GetLastPrice(GetTradingOrderBookHash(order.QuoteToken(), common.XDCNativeAddressBinary)) log.Debug("TryGet quotePrice QuoteToken/XDC", "quotePrice", quotePrice) if quotePrice == nil || quotePrice.Sign() == 0 { - inversePrice := XDCxStateDb.GetLastPrice(GetTradingOrderBookHash(common.HexToAddress(common.XDCNativeAddress), order.QuoteToken())) + inversePrice := XDCxStateDb.GetLastPrice(GetTradingOrderBookHash(common.XDCNativeAddressBinary, order.QuoteToken())) log.Debug("TryGet inversePrice XDC/QuoteToken", "inversePrice", inversePrice) if inversePrice != nil && inversePrice.Sign() > 0 { quotePrice = new(big.Int).Mul(common.BasePrice, quoteDecimal) diff --git a/XDCx/tradingstate/relayer_state.go b/XDCx/tradingstate/relayer_state.go index 2a1892492d97..348f2130418e 100644 --- a/XDCx/tradingstate/relayer_state.go +++ b/XDCx/tradingstate/relayer_state.go @@ -159,9 +159,9 @@ func CheckRelayerFee(relayer common.Address, fee *big.Int, statedb *state.StateD } func AddTokenBalance(addr common.Address, value *big.Int, token common.Address, statedb *state.StateDB) error { // XDC native - if token.String() == common.XDCNativeAddress { + if token == common.XDCNativeAddressBinary { balance := statedb.GetBalance(addr) - log.Debug("ApplyXDCXMatchedTransaction settle balance: ADD TOKEN XDC NATIVE BEFORE", "token", token.String(), "address", addr.String(), "balance", balance, "orderValue", value) + log.Debug("ApplyXDCXMatchedTransaction settle balance: ADD TOKEN XDC NATIVE BEFORE", "token", common.XDCNativeAddress, "address", addr.String(), "balance", balance, "orderValue", value) statedb.AddBalance(addr, value) balance = statedb.GetBalance(addr) log.Debug("ApplyXDCXMatchedTransaction settle balance: ADD XDC NATIVE BALANCE AFTER", "token", token.String(), "address", addr.String(), "balance", balance, "orderValue", value) @@ -186,10 +186,9 @@ func AddTokenBalance(addr common.Address, value *big.Int, token common.Address, func SubTokenBalance(addr common.Address, value *big.Int, token common.Address, statedb *state.StateDB) error { // XDC native - if token.String() == common.XDCNativeAddress { - + if token == common.XDCNativeAddressBinary { balance := statedb.GetBalance(addr) - log.Debug("ApplyXDCXMatchedTransaction settle balance: SUB XDC NATIVE BALANCE BEFORE", "token", token.String(), "address", addr.String(), "balance", balance, "orderValue", value) + log.Debug("ApplyXDCXMatchedTransaction settle balance: SUB XDC NATIVE BALANCE BEFORE", "token", common.XDCNativeAddress, "address", addr.String(), "balance", balance, "orderValue", value) if balance.Cmp(value) < 0 { return errors.Errorf("value %s in token %s not enough , have : %s , want : %s ", addr.String(), token.String(), balance, value) } @@ -219,7 +218,7 @@ func SubTokenBalance(addr common.Address, value *big.Int, token common.Address, func CheckSubTokenBalance(addr common.Address, value *big.Int, token common.Address, statedb *state.StateDB, mapBalances map[common.Address]map[common.Address]*big.Int) (*big.Int, error) { // XDC native - if token.String() == common.XDCNativeAddress { + if token == common.XDCNativeAddressBinary { var balance *big.Int if value := mapBalances[token][addr]; value != nil { balance = value @@ -256,7 +255,7 @@ func CheckSubTokenBalance(addr common.Address, value *big.Int, token common.Addr func CheckAddTokenBalance(addr common.Address, value *big.Int, token common.Address, statedb *state.StateDB, mapBalances map[common.Address]map[common.Address]*big.Int) (*big.Int, error) { // XDC native - if token.String() == common.XDCNativeAddress { + if token == common.XDCNativeAddressBinary { var balance *big.Int if value := mapBalances[token][addr]; value != nil { balance = value @@ -308,7 +307,7 @@ func CheckSubRelayerFee(relayer common.Address, fee *big.Int, statedb *state.Sta func GetTokenBalance(addr common.Address, token common.Address, statedb *state.StateDB) *big.Int { // XDC native - if token.String() == common.XDCNativeAddress { + if token == common.XDCNativeAddressBinary { return statedb.GetBalance(addr) } // TRC tokens @@ -323,7 +322,7 @@ func GetTokenBalance(addr common.Address, token common.Address, statedb *state.S func SetTokenBalance(addr common.Address, balance *big.Int, token common.Address, statedb *state.StateDB) error { // XDC native - if token.String() == common.XDCNativeAddress { + if token == common.XDCNativeAddressBinary { statedb.SetBalance(addr, balance) return nil } diff --git a/XDCx/tradingstate/settle_balance.go b/XDCx/tradingstate/settle_balance.go index 66996ae5b243..771da0c29b26 100644 --- a/XDCx/tradingstate/settle_balance.go +++ b/XDCx/tradingstate/settle_balance.go @@ -52,7 +52,7 @@ func GetSettleBalance(quotePrice *big.Int, takerSide string, takerFeeRate *big.I log.Debug("quantity trade too small", "quoteTokenQuantity", quoteTokenQuantity, "makerFee", makerFee, "defaultFee", defaultFee) return result, ErrQuantityTradeTooSmall } - if quoteToken.String() != common.XDCNativeAddress && quotePrice != nil && quotePrice.Cmp(common.Big0) > 0 { + if quoteToken != common.XDCNativeAddressBinary && quotePrice != nil && quotePrice.Cmp(common.Big0) > 0 { // defaultFeeInXDC defaultFeeInXDC := new(big.Int).Mul(defaultFee, quotePrice) defaultFeeInXDC = new(big.Int).Div(defaultFeeInXDC, quoteTokenDecimal) @@ -69,7 +69,7 @@ func GetSettleBalance(quotePrice *big.Int, takerSide string, takerFeeRate *big.I log.Debug("takerFee too small", "quoteTokenQuantity", quoteTokenQuantity, "takerFee", takerFee, "exTakerReceivedFee", exTakerReceivedFee, "quotePrice", quotePrice, "defaultFeeInXDC", defaultFeeInXDC) return result, ErrQuantityTradeTooSmall } - } else if quoteToken.String() == common.XDCNativeAddress { + } else if quoteToken == common.XDCNativeAddressBinary { exMakerReceivedFee := makerFee if (exMakerReceivedFee.Cmp(common.RelayerFee) <= 0 && exMakerReceivedFee.Sign() > 0) || defaultFee.Cmp(common.RelayerFee) <= 0 { log.Debug("makerFee too small", "quantityToTrade", quantityToTrade, "makerFee", makerFee, "exMakerReceivedFee", exMakerReceivedFee, "makerFeeRate", makerFeeRate, "defaultFee", defaultFee) @@ -108,7 +108,7 @@ func GetSettleBalance(quotePrice *big.Int, takerSide string, takerFeeRate *big.I log.Debug("quantity trade too small", "quoteTokenQuantity", quoteTokenQuantity, "takerFee", takerFee) return result, ErrQuantityTradeTooSmall } - if quoteToken.String() != common.XDCNativeAddress && quotePrice != nil && quotePrice.Cmp(common.Big0) > 0 { + if quoteToken != common.XDCNativeAddressBinary && quotePrice != nil && quotePrice.Cmp(common.Big0) > 0 { // defaultFeeInXDC defaultFeeInXDC := new(big.Int).Mul(defaultFee, quotePrice) defaultFeeInXDC = new(big.Int).Div(defaultFeeInXDC, quoteTokenDecimal) @@ -126,7 +126,7 @@ func GetSettleBalance(quotePrice *big.Int, takerSide string, takerFeeRate *big.I log.Debug("takerFee too small", "quoteTokenQuantity", quoteTokenQuantity, "takerFee", takerFee, "exTakerReceivedFee", exTakerReceivedFee, "quotePrice", quotePrice, "defaultFeeInXDC", defaultFeeInXDC) return result, ErrQuantityTradeTooSmall } - } else if quoteToken.String() == common.XDCNativeAddress { + } else if quoteToken == common.XDCNativeAddressBinary { exMakerReceivedFee := makerFee if (exMakerReceivedFee.Cmp(common.RelayerFee) <= 0 && exMakerReceivedFee.Sign() > 0) || defaultFee.Cmp(common.RelayerFee) <= 0 { log.Debug("makerFee too small", "quantityToTrade", quantityToTrade, "makerFee", makerFee, "exMakerReceivedFee", exMakerReceivedFee, "makerFeeRate", makerFeeRate, "defaultFee", defaultFee) diff --git a/XDCx/tradingstate/settle_balance_test.go b/XDCx/tradingstate/settle_balance_test.go index 674e381fd8d9..d340226fc756 100644 --- a/XDCx/tradingstate/settle_balance_test.go +++ b/XDCx/tradingstate/settle_balance_test.go @@ -1,10 +1,11 @@ package tradingstate import ( - "github.com/XinFinOrg/XDPoSChain/common" "math/big" "reflect" "testing" + + "github.com/XinFinOrg/XDPoSChain/common" ) func TestGetSettleBalance(t *testing.T) { @@ -89,7 +90,7 @@ func TestGetSettleBalance(t *testing.T) { takerSide: Bid, takerFeeRate: big.NewInt(10), // feeRate 0.1% baseToken: testToken, - quoteToken: common.HexToAddress(common.XDCNativeAddress), + quoteToken: common.XDCNativeAddressBinary, makerPrice: common.BasePrice, makerFeeRate: big.NewInt(10), // feeRate 0.1% baseTokenDecimal: common.BasePrice, @@ -106,7 +107,7 @@ func TestGetSettleBalance(t *testing.T) { takerSide: Bid, takerFeeRate: big.NewInt(5), // feeRate 0.05% baseToken: testToken, - quoteToken: common.HexToAddress(common.XDCNativeAddress), + quoteToken: common.XDCNativeAddressBinary, makerPrice: common.BasePrice, makerFeeRate: big.NewInt(10), // feeRate 0.1% baseTokenDecimal: common.BasePrice, @@ -124,7 +125,7 @@ func TestGetSettleBalance(t *testing.T) { takerSide: Bid, takerFeeRate: big.NewInt(10), // feeRate 0.1% baseToken: testToken, - quoteToken: common.HexToAddress(common.XDCNativeAddress), + quoteToken: common.XDCNativeAddressBinary, makerPrice: common.BasePrice, makerFeeRate: big.NewInt(10), // feeRate 0.1% baseTokenDecimal: common.BasePrice, @@ -132,8 +133,8 @@ func TestGetSettleBalance(t *testing.T) { quantityToTrade: new(big.Int).Mul(big.NewInt(1000), common.BasePrice), }, &SettleBalance{ - Taker: TradeResult{Fee: testFee, InToken: testToken, InTotal: tradeQuantity, OutToken: common.HexToAddress(common.XDCNativeAddress), OutTotal: tradeQuantityIncludedFee}, - Maker: TradeResult{Fee: testFee, InToken: common.HexToAddress(common.XDCNativeAddress), InTotal: tradeQuantityExcludedFee, OutToken: testToken, OutTotal: tradeQuantity}, + Taker: TradeResult{Fee: testFee, InToken: testToken, InTotal: tradeQuantity, OutToken: common.XDCNativeAddressBinary, OutTotal: tradeQuantityIncludedFee}, + Maker: TradeResult{Fee: testFee, InToken: common.XDCNativeAddressBinary, InTotal: tradeQuantityExcludedFee, OutToken: testToken, OutTotal: tradeQuantity}, }, false, }, @@ -196,7 +197,7 @@ func TestGetSettleBalance(t *testing.T) { takerSide: Ask, takerFeeRate: big.NewInt(10), // feeRate 0.1% baseToken: testToken, - quoteToken: common.HexToAddress(common.XDCNativeAddress), + quoteToken: common.XDCNativeAddressBinary, makerPrice: common.BasePrice, makerFeeRate: big.NewInt(10), // feeRate 0.1% baseTokenDecimal: common.BasePrice, @@ -213,7 +214,7 @@ func TestGetSettleBalance(t *testing.T) { takerSide: Ask, takerFeeRate: big.NewInt(5), // feeRate 0.05% baseToken: testToken, - quoteToken: common.HexToAddress(common.XDCNativeAddress), + quoteToken: common.XDCNativeAddressBinary, makerPrice: common.BasePrice, makerFeeRate: big.NewInt(10), // feeRate 0.1% baseTokenDecimal: common.BasePrice, @@ -231,7 +232,7 @@ func TestGetSettleBalance(t *testing.T) { takerSide: Ask, takerFeeRate: big.NewInt(10), // feeRate 15% baseToken: testToken, - quoteToken: common.HexToAddress(common.XDCNativeAddress), + quoteToken: common.XDCNativeAddressBinary, makerPrice: common.BasePrice, makerFeeRate: big.NewInt(10), // feeRate 0.1% baseTokenDecimal: common.BasePrice, @@ -239,8 +240,8 @@ func TestGetSettleBalance(t *testing.T) { quantityToTrade: new(big.Int).Mul(big.NewInt(1000), common.BasePrice), }, &SettleBalance{ - Maker: TradeResult{Fee: testFee, InToken: testToken, InTotal: tradeQuantity, OutToken: common.HexToAddress(common.XDCNativeAddress), OutTotal: tradeQuantityIncludedFee}, - Taker: TradeResult{Fee: testFee, InToken: common.HexToAddress(common.XDCNativeAddress), InTotal: tradeQuantityExcludedFee, OutToken: testToken, OutTotal: tradeQuantity}, + Maker: TradeResult{Fee: testFee, InToken: testToken, InTotal: tradeQuantity, OutToken: common.XDCNativeAddressBinary, OutTotal: tradeQuantityIncludedFee}, + Taker: TradeResult{Fee: testFee, InToken: common.XDCNativeAddressBinary, InTotal: tradeQuantityExcludedFee, OutToken: testToken, OutTotal: tradeQuantity}, }, false, }, diff --git a/XDCxlending/XDCxlending.go b/XDCxlending/XDCxlending.go index 352c224d0a20..48ff54077844 100644 --- a/XDCxlending/XDCxlending.go +++ b/XDCxlending/XDCxlending.go @@ -696,10 +696,11 @@ func (l *Lending) GetTriegc() *prque.Prque { func (l *Lending) GetLendingStateRoot(block *types.Block, author common.Address) (common.Hash, error) { for _, tx := range block.Transactions() { - from := *(tx.From()) - if tx.To() != nil && tx.To().Hex() == common.TradingStateAddr && from.String() == author.String() { - if len(tx.Data()) >= 64 { - return common.BytesToHash(tx.Data()[32:]), nil + to := tx.To() + if to != nil && *to == common.TradingStateAddrBinary && *tx.From() == author { + data := tx.Data() + if len(data) >= 64 { + return common.BytesToHash(data[32:]), nil } } } diff --git a/XDCxlending/lendingstate/lendingcontract.go b/XDCxlending/lendingstate/lendingcontract.go index fa2df7dc8491..eba5b1a06587 100644 --- a/XDCxlending/lendingstate/lendingcontract.go +++ b/XDCxlending/lendingstate/lendingcontract.go @@ -1,7 +1,7 @@ package lendingstate import ( - "fmt" + "errors" "math/big" "github.com/XinFinOrg/XDPoSChain/XDCx/tradingstate" @@ -273,10 +273,10 @@ func GetAllLendingBooks(statedb *state.StateDB) (mapLendingBook map[common.Hash] baseTokens := GetSupportedBaseToken(statedb) terms := GetSupportedTerms(statedb) if len(baseTokens) == 0 { - return nil, fmt.Errorf("GetAllLendingBooks: empty baseToken list") + return nil, errors.New("GetAllLendingBooks: empty baseToken list") } if len(terms) == 0 { - return nil, fmt.Errorf("GetAllLendingPairs: empty term list") + return nil, errors.New("GetAllLendingPairs: empty term list") } for _, baseToken := range baseTokens { for _, term := range terms { @@ -295,10 +295,10 @@ func GetAllLendingPairs(statedb *state.StateDB) (allPairs []LendingPair, err err baseTokens := GetSupportedBaseToken(statedb) collaterals := GetAllCollateral(statedb) if len(baseTokens) == 0 { - return allPairs, fmt.Errorf("GetAllLendingPairs: empty baseToken list") + return allPairs, errors.New("GetAllLendingPairs: empty baseToken list") } if len(collaterals) == 0 { - return allPairs, fmt.Errorf("GetAllLendingPairs: empty collateral list") + return allPairs, errors.New("GetAllLendingPairs: empty collateral list") } for _, baseToken := range baseTokens { for _, collateral := range collaterals { diff --git a/XDCxlending/lendingstate/lendingitem.go b/XDCxlending/lendingstate/lendingitem.go index 235a77fa6021..dd4553ab8793 100644 --- a/XDCxlending/lendingstate/lendingitem.go +++ b/XDCxlending/lendingstate/lendingitem.go @@ -1,6 +1,7 @@ package lendingstate import ( + "errors" "fmt" "math/big" "strconv" @@ -260,7 +261,7 @@ func (l *LendingItem) VerifyCollateral(state *state.StateDB) error { validCollateral := false collateralList := GetCollaterals(state, l.Relayer, l.LendingToken, l.Term) for _, collateral := range collateralList { - if l.CollateralToken.String() == collateral.String() { + if l.CollateralToken == collateral { validCollateral = true break } @@ -359,7 +360,7 @@ func (l *LendingItem) VerifyLendingSignature() error { tx.ImportSignature(V, R, S) from, _ := types.LendingSender(types.LendingTxSigner{}, tx) if from != tx.UserAddress() { - return fmt.Errorf("verify lending item: invalid signature") + return errors.New("verify lending item: invalid signature") } return nil } @@ -411,7 +412,7 @@ func VerifyBalance(isXDCXLendingFork bool, statedb *state.StateDB, lendingStateD defaultFee := new(big.Int).Mul(quantity, new(big.Int).SetUint64(DefaultFeeRate)) defaultFee = new(big.Int).Div(defaultFee, common.XDCXBaseFee) defaultFeeInXDC := common.Big0 - if lendingToken.String() != common.XDCNativeAddress { + if lendingToken != common.XDCNativeAddressBinary { defaultFeeInXDC = new(big.Int).Mul(defaultFee, lendTokenXDCPrice) defaultFeeInXDC = new(big.Int).Div(defaultFeeInXDC, lendingTokenDecimal) } else { @@ -473,10 +474,10 @@ func VerifyBalance(isXDCXLendingFork bool, statedb *state.StateDB, lendingStateD } return nil default: - return fmt.Errorf("VerifyBalance: unknown lending side") + return errors.New("VerifyBalance: unknown lending side") } default: - return fmt.Errorf("VerifyBalance: unknown lending type") + return errors.New("VerifyBalance: unknown lending type") } return nil } diff --git a/XDCxlending/lendingstate/relayer.go b/XDCxlending/lendingstate/relayer.go index 571ecdcd1593..536784741bd7 100644 --- a/XDCxlending/lendingstate/relayer.go +++ b/XDCxlending/lendingstate/relayer.go @@ -114,12 +114,12 @@ func CheckRelayerFee(relayer common.Address, fee *big.Int, statedb *state.StateD } func AddTokenBalance(addr common.Address, value *big.Int, token common.Address, statedb *state.StateDB) error { // XDC native - if token.String() == common.XDCNativeAddress { + if token == common.XDCNativeAddressBinary { balance := statedb.GetBalance(addr) - log.Debug("ApplyXDCXMatchedTransaction settle balance: ADD TOKEN XDC NATIVE BEFORE", "token", token.String(), "address", addr.String(), "balance", balance, "orderValue", value) + log.Debug("ApplyXDCXMatchedTransaction settle balance: ADD TOKEN XDC NATIVE BEFORE", "token", common.XDCNativeAddress, "address", addr.String(), "balance", balance, "orderValue", value) statedb.AddBalance(addr, value) balance = statedb.GetBalance(addr) - log.Debug("ApplyXDCXMatchedTransaction settle balance: ADD XDC NATIVE BALANCE AFTER", "token", token.String(), "address", addr.String(), "balance", balance, "orderValue", value) + log.Debug("ApplyXDCXMatchedTransaction settle balance: ADD XDC NATIVE BALANCE AFTER", "token", common.XDCNativeAddress, "address", addr.String(), "balance", balance, "orderValue", value) return nil } @@ -141,15 +141,15 @@ func AddTokenBalance(addr common.Address, value *big.Int, token common.Address, func SubTokenBalance(addr common.Address, value *big.Int, token common.Address, statedb *state.StateDB) error { // XDC native - if token.String() == common.XDCNativeAddress { + if token == common.XDCNativeAddressBinary { balance := statedb.GetBalance(addr) - log.Debug("ApplyXDCXMatchedTransaction settle balance: SUB XDC NATIVE BALANCE BEFORE", "token", token.String(), "address", addr.String(), "balance", balance, "orderValue", value) + log.Debug("ApplyXDCXMatchedTransaction settle balance: SUB XDC NATIVE BALANCE BEFORE", "token", common.XDCNativeAddress, "address", addr.String(), "balance", balance, "orderValue", value) if balance.Cmp(value) < 0 { - return errors.Errorf("value %s in token %s not enough , have : %s , want : %s ", addr.String(), token.String(), balance, value) + return errors.Errorf("value %s in token %s not enough , have : %s , want : %s ", addr.String(), common.XDCNativeAddress, balance, value) } statedb.SubBalance(addr, value) balance = statedb.GetBalance(addr) - log.Debug("ApplyXDCXMatchedTransaction settle balance: SUB XDC NATIVE BALANCE AFTER", "token", token.String(), "address", addr.String(), "balance", balance, "orderValue", value) + log.Debug("ApplyXDCXMatchedTransaction settle balance: SUB XDC NATIVE BALANCE AFTER", "token", common.XDCNativeAddress, "address", addr.String(), "balance", balance, "orderValue", value) return nil } @@ -174,7 +174,7 @@ func SubTokenBalance(addr common.Address, value *big.Int, token common.Address, func CheckSubTokenBalance(addr common.Address, value *big.Int, token common.Address, statedb *state.StateDB, mapBalances map[common.Address]map[common.Address]*big.Int) (*big.Int, error) { // XDC native - if token.String() == common.XDCNativeAddress { + if token == common.XDCNativeAddressBinary { var balance *big.Int if value := mapBalances[token][addr]; value != nil { balance = value @@ -182,10 +182,10 @@ func CheckSubTokenBalance(addr common.Address, value *big.Int, token common.Addr balance = statedb.GetBalance(addr) } if balance.Cmp(value) < 0 { - return nil, errors.Errorf("value %s in token %s not enough , have : %s , want : %s ", addr.String(), token.String(), balance, value) + return nil, errors.Errorf("value %s in token %s not enough , have : %s , want : %s ", addr.String(), common.XDCNativeAddress, balance, value) } newBalance := new(big.Int).Sub(balance, value) - log.Debug("CheckSubTokenBalance settle balance: SUB XDC NATIVE BALANCE ", "token", token.String(), "address", addr.String(), "balance", balance, "value", value, "newBalance", newBalance) + log.Debug("CheckSubTokenBalance settle balance: SUB XDC NATIVE BALANCE ", "token", common.XDCNativeAddress, "address", addr.String(), "balance", balance, "value", value, "newBalance", newBalance) return newBalance, nil } // TRC tokens @@ -211,7 +211,7 @@ func CheckSubTokenBalance(addr common.Address, value *big.Int, token common.Addr func CheckAddTokenBalance(addr common.Address, value *big.Int, token common.Address, statedb *state.StateDB, mapBalances map[common.Address]map[common.Address]*big.Int) (*big.Int, error) { // XDC native - if token.String() == common.XDCNativeAddress { + if token == common.XDCNativeAddressBinary { var balance *big.Int if value := mapBalances[token][addr]; value != nil { balance = value @@ -219,7 +219,7 @@ func CheckAddTokenBalance(addr common.Address, value *big.Int, token common.Addr balance = statedb.GetBalance(addr) } newBalance := new(big.Int).Add(balance, value) - log.Debug("CheckAddTokenBalance settle balance: ADD XDC NATIVE BALANCE ", "token", token.String(), "address", addr.String(), "balance", balance, "value", value, "newBalance", newBalance) + log.Debug("CheckAddTokenBalance settle balance: ADD XDC NATIVE BALANCE ", "token", common.XDCNativeAddress, "address", addr.String(), "balance", balance, "value", value, "newBalance", newBalance) return newBalance, nil } // TRC tokens @@ -263,7 +263,7 @@ func CheckSubRelayerFee(relayer common.Address, fee *big.Int, statedb *state.Sta func GetTokenBalance(addr common.Address, token common.Address, statedb *state.StateDB) *big.Int { // XDC native - if token.String() == common.XDCNativeAddress { + if token == common.XDCNativeAddressBinary { return statedb.GetBalance(addr) } // TRC tokens @@ -278,7 +278,7 @@ func GetTokenBalance(addr common.Address, token common.Address, statedb *state.S func SetTokenBalance(addr common.Address, balance *big.Int, token common.Address, statedb *state.StateDB) error { // XDC native - if token.String() == common.XDCNativeAddress { + if token == common.XDCNativeAddressBinary { statedb.SetBalance(addr, balance) return nil } diff --git a/XDCxlending/lendingstate/settle_balance.go b/XDCxlending/lendingstate/settle_balance.go index 108a459afe70..93ee96d9366b 100644 --- a/XDCxlending/lendingstate/settle_balance.go +++ b/XDCxlending/lendingstate/settle_balance.go @@ -3,9 +3,10 @@ package lendingstate import ( "encoding/json" "errors" + "math/big" + "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/log" - "math/big" ) const DefaultFeeRate = 100 // 100 / XDCXBaseFee = 100 / 10000 = 1% @@ -71,7 +72,7 @@ func GetSettleBalance(isXDCXLendingFork bool, log.Debug("quantity lending too small", "quantityToLend", quantityToLend, "takerFee", takerFee) return result, ErrQuantityTradeTooSmall } - if lendingToken.String() != common.XDCNativeAddress && lendTokenXDCPrice != nil && lendTokenXDCPrice.Cmp(common.Big0) > 0 { + if lendingToken != common.XDCNativeAddressBinary && lendTokenXDCPrice != nil && lendTokenXDCPrice.Cmp(common.Big0) > 0 { exTakerReceivedFee := new(big.Int).Mul(takerFee, lendTokenXDCPrice) exTakerReceivedFee = new(big.Int).Div(exTakerReceivedFee, lendTokenDecimal) @@ -82,7 +83,7 @@ func GetSettleBalance(isXDCXLendingFork bool, log.Debug("takerFee too small", "quantityToLend", quantityToLend, "takerFee", takerFee, "exTakerReceivedFee", exTakerReceivedFee, "borrowFeeRate", borrowFeeRate, "defaultFeeInXDC", defaultFeeInXDC) return result, ErrQuantityTradeTooSmall } - } else if lendingToken.String() == common.XDCNativeAddress { + } else if lendingToken == common.XDCNativeAddressBinary { exTakerReceivedFee := takerFee if (exTakerReceivedFee.Cmp(common.RelayerLendingFee) <= 0 && exTakerReceivedFee.Sign() > 0) || defaultFee.Cmp(common.RelayerLendingFee) <= 0 { log.Debug("takerFee too small", "quantityToLend", quantityToLend, "takerFee", takerFee, "exTakerReceivedFee", exTakerReceivedFee, "borrowFeeRate", borrowFeeRate, "defaultFee", defaultFee) @@ -121,7 +122,7 @@ func GetSettleBalance(isXDCXLendingFork bool, log.Debug("quantity lending too small", "quantityToLend", quantityToLend, "makerFee", makerFee) return result, ErrQuantityTradeTooSmall } - if lendingToken.String() != common.XDCNativeAddress && lendTokenXDCPrice != nil && lendTokenXDCPrice.Cmp(common.Big0) > 0 { + if lendingToken != common.XDCNativeAddressBinary && lendTokenXDCPrice != nil && lendTokenXDCPrice.Cmp(common.Big0) > 0 { exMakerReceivedFee := new(big.Int).Mul(makerFee, lendTokenXDCPrice) exMakerReceivedFee = new(big.Int).Div(exMakerReceivedFee, lendTokenDecimal) @@ -132,7 +133,7 @@ func GetSettleBalance(isXDCXLendingFork bool, log.Debug("makerFee too small", "quantityToLend", quantityToLend, "makerFee", makerFee, "exMakerReceivedFee", exMakerReceivedFee, "borrowFeeRate", borrowFeeRate, "defaultFeeInXDC", defaultFeeInXDC) return result, ErrQuantityTradeTooSmall } - } else if lendingToken.String() == common.XDCNativeAddress { + } else if lendingToken == common.XDCNativeAddressBinary { exMakerReceivedFee := makerFee if (exMakerReceivedFee.Cmp(common.RelayerLendingFee) <= 0 && exMakerReceivedFee.Sign() > 0) || defaultFee.Cmp(common.RelayerLendingFee) <= 0 { log.Debug("makerFee too small", "quantityToLend", quantityToLend, "makerFee", makerFee, "exMakerReceivedFee", exMakerReceivedFee, "borrowFeeRate", borrowFeeRate, "defaultFee", defaultFee) @@ -171,7 +172,7 @@ func GetSettleBalance(isXDCXLendingFork bool, log.Debug("quantity lending too small", "quantityToLend", quantityToLend, "borrowFee", borrowFee) return result, ErrQuantityTradeTooSmall } - if lendingToken.String() != common.XDCNativeAddress && lendTokenXDCPrice != nil && lendTokenXDCPrice.Cmp(common.Big0) > 0 { + if lendingToken != common.XDCNativeAddressBinary && lendTokenXDCPrice != nil && lendTokenXDCPrice.Cmp(common.Big0) > 0 { // exReceivedFee: the fee amount which borrowingRelayer will receive exReceivedFee := new(big.Int).Mul(borrowFee, lendTokenXDCPrice) exReceivedFee = new(big.Int).Div(exReceivedFee, lendTokenDecimal) @@ -183,7 +184,7 @@ func GetSettleBalance(isXDCXLendingFork bool, log.Debug("takerFee too small", "quantityToLend", quantityToLend, "borrowFee", borrowFee, "exReceivedFee", exReceivedFee, "borrowFeeRate", borrowFeeRate, "defaultFeeInXDC", defaultFeeInXDC) return result, ErrQuantityTradeTooSmall } - } else if lendingToken.String() == common.XDCNativeAddress { + } else if lendingToken == common.XDCNativeAddressBinary { exReceivedFee := borrowFee if (exReceivedFee.Cmp(common.RelayerLendingFee) <= 0 && exReceivedFee.Sign() > 0) || defaultFee.Cmp(common.RelayerLendingFee) <= 0 { log.Debug("takerFee too small", "quantityToLend", quantityToLend, "borrowFee", borrowFee, "exReceivedFee", exReceivedFee, "borrowFeeRate", borrowFeeRate, "defaultFee", defaultFee) diff --git a/XDCxlending/lendingstate/settle_balance_test.go b/XDCxlending/lendingstate/settle_balance_test.go index e4b318b6cf13..711777503cc8 100644 --- a/XDCxlending/lendingstate/settle_balance_test.go +++ b/XDCxlending/lendingstate/settle_balance_test.go @@ -1,10 +1,11 @@ package lendingstate import ( - "github.com/XinFinOrg/XDPoSChain/common" "math/big" "reflect" "testing" + + "github.com/XinFinOrg/XDPoSChain/common" ) func TestCalculateInterestRate(t *testing.T) { @@ -171,7 +172,7 @@ func TestGetSettleBalance(t *testing.T) { common.BasePrice, big.NewInt(150), big.NewInt(100), // 1% - common.HexToAddress(common.XDCNativeAddress), + common.XDCNativeAddressBinary, common.Address{}, common.BasePrice, common.BasePrice, @@ -277,7 +278,7 @@ func TestGetSettleBalance(t *testing.T) { common.BasePrice, big.NewInt(150), big.NewInt(100), // 1% - common.HexToAddress(common.XDCNativeAddress), + common.XDCNativeAddressBinary, collateral, common.BasePrice, common.BasePrice, @@ -288,12 +289,12 @@ func TestGetSettleBalance(t *testing.T) { Fee: common.Big0, InToken: common.Address{}, InTotal: common.Big0, - OutToken: common.HexToAddress(common.XDCNativeAddress), + OutToken: common.XDCNativeAddressBinary, OutTotal: lendQuantity, }, Maker: TradeResult{ Fee: fee, - InToken: common.HexToAddress(common.XDCNativeAddress), + InToken: common.XDCNativeAddressBinary, InTotal: lendQuantityExcluded, OutToken: collateral, OutTotal: collateralLocked, @@ -312,7 +313,7 @@ func TestGetSettleBalance(t *testing.T) { common.BasePrice, big.NewInt(150), big.NewInt(100), // 1% - common.HexToAddress(common.XDCNativeAddress), + common.XDCNativeAddressBinary, collateral, common.BasePrice, common.BasePrice, @@ -323,12 +324,12 @@ func TestGetSettleBalance(t *testing.T) { Fee: common.Big0, InToken: common.Address{}, InTotal: common.Big0, - OutToken: common.HexToAddress(common.XDCNativeAddress), + OutToken: common.XDCNativeAddressBinary, OutTotal: lendQuantity, }, Taker: TradeResult{ Fee: fee, - InToken: common.HexToAddress(common.XDCNativeAddress), + InToken: common.XDCNativeAddressBinary, InTotal: lendQuantityExcluded, OutToken: collateral, OutTotal: collateralLocked, diff --git a/XDCxlending/order_processor.go b/XDCxlending/order_processor.go index 5725c2003b08..5af2d2771314 100644 --- a/XDCxlending/order_processor.go +++ b/XDCxlending/order_processor.go @@ -2,6 +2,7 @@ package XDCxlending import ( "encoding/json" + "errors" "fmt" "math/big" @@ -264,7 +265,7 @@ func (l *Lending) processOrderList(header *types.Header, coinbase common.Address borrowFee = lendingstate.GetFee(statedb, oldestOrder.Relayer) } if collateralToken.IsZero() { - return nil, nil, nil, fmt.Errorf("empty collateral") + return nil, nil, nil, errors.New("empty collateral") } depositRate, liquidationRate, recallRate := lendingstate.GetCollateralDetail(statedb, collateralToken) if depositRate == nil || depositRate.Sign() <= 0 { @@ -282,10 +283,10 @@ func (l *Lending) processOrderList(header *types.Header, coinbase common.Address return nil, nil, nil, err } if lendTokenXDCPrice == nil || lendTokenXDCPrice.Sign() <= 0 { - return nil, nil, nil, fmt.Errorf("invalid lendToken price") + return nil, nil, nil, errors.New("invalid lendToken price") } if collateralPrice == nil || collateralPrice.Sign() <= 0 { - return nil, nil, nil, fmt.Errorf("invalid collateral price") + return nil, nil, nil, errors.New("invalid collateral price") } tradedQuantity, collateralLockedAmount, rejectMaker, settleBalanceResult, err := l.getLendQuantity(lendTokenXDCPrice, collateralPrice, depositRate, borrowFee, coinbase, chain, header, statedb, order, &oldestOrder, maxTradedQuantity) if err != nil && err == lendingstate.ErrQuantityTradeTooSmall && tradedQuantity != nil && tradedQuantity.Sign() >= 0 { @@ -447,7 +448,7 @@ func (l *Lending) getLendQuantity( if err != nil || collateralTokenDecimal.Sign() == 0 { return lendingstate.Zero, lendingstate.Zero, false, nil, fmt.Errorf("fail to get tokenDecimal. Token: %v . Err: %v", collateralToken.String(), err) } - if takerOrder.Relayer.String() == makerOrder.Relayer.String() { + if takerOrder.Relayer == makerOrder.Relayer { if err := lendingstate.CheckRelayerFee(takerOrder.Relayer, new(big.Int).Mul(common.RelayerLendingFee, big.NewInt(2)), statedb); err != nil { log.Debug("Reject order Taker Exchnage = Maker Exchange , relayer not enough fee ", "err", err) return lendingstate.Zero, lendingstate.Zero, false, nil, nil @@ -621,11 +622,11 @@ func DoSettleBalance(coinbase common.Address, takerOrder, makerOrder *lendingsta } mapBalances[settleBalance.Taker.InToken][takerExOwner] = newTakerFee - newCollateralTokenLock, err := lendingstate.CheckAddTokenBalance(common.HexToAddress(common.LendingLockAddress), settleBalance.Taker.OutTotal, settleBalance.Taker.OutToken, statedb, mapBalances) + newCollateralTokenLock, err := lendingstate.CheckAddTokenBalance(common.LendingLockAddressBinary, settleBalance.Taker.OutTotal, settleBalance.Taker.OutToken, statedb, mapBalances) if err != nil { return err } - mapBalances[settleBalance.Taker.OutToken][common.HexToAddress(common.LendingLockAddress)] = newCollateralTokenLock + mapBalances[settleBalance.Taker.OutToken][common.LendingLockAddressBinary] = newCollateralTokenLock } else { relayerFee, err := lendingstate.CheckSubRelayerFee(makerOrder.Relayer, common.RelayerLendingFee, statedb, map[common.Address]*big.Int{}) if err != nil { @@ -662,11 +663,11 @@ func DoSettleBalance(coinbase common.Address, takerOrder, makerOrder *lendingsta } mapBalances[settleBalance.Maker.InToken][makerExOwner] = newMakerFee - newCollateralTokenLock, err := lendingstate.CheckAddTokenBalance(common.HexToAddress(common.LendingLockAddress), settleBalance.Maker.OutTotal, settleBalance.Maker.OutToken, statedb, mapBalances) + newCollateralTokenLock, err := lendingstate.CheckAddTokenBalance(common.LendingLockAddressBinary, settleBalance.Maker.OutTotal, settleBalance.Maker.OutToken, statedb, mapBalances) if err != nil { return err } - mapBalances[settleBalance.Maker.OutToken][common.HexToAddress(common.LendingLockAddress)] = newCollateralTokenLock + mapBalances[settleBalance.Maker.OutToken][common.LendingLockAddressBinary] = newCollateralTokenLock } masternodeOwner := statedb.GetOwner(coinbase) statedb.AddBalance(masternodeOwner, matchingFee) @@ -789,10 +790,10 @@ func (l *Lending) ProcessTopUp(lendingStateDB *lendingstate.LendingStateDB, stat if lendingTrade == lendingstate.EmptyLendingTrade { return fmt.Errorf("process deposit for emptyLendingTrade is not allowed. lendingTradeId: %v", lendingTradeId.Hex()), true, nil } - if order.UserAddress.String() != lendingTrade.Borrower.String() { + if order.UserAddress != lendingTrade.Borrower { return fmt.Errorf("ProcessTopUp: invalid userAddress . UserAddress: %s . Borrower: %s", order.UserAddress.Hex(), lendingTrade.Borrower.Hex()), true, nil } - if order.Relayer.String() != lendingTrade.BorrowingRelayer.String() { + if order.Relayer != lendingTrade.BorrowingRelayer { return fmt.Errorf("ProcessTopUp: invalid relayerAddress . Got: %s . Expect: %s", order.Relayer.Hex(), lendingTrade.BorrowingRelayer.Hex()), true, nil } if order.Quantity.Sign() <= 0 || lendingTrade.TradeId != lendingTradeId.Big().Uint64() { @@ -810,10 +811,10 @@ func (l *Lending) ProcessRepay(header *types.Header, chain consensus.ChainContex if lendingTrade == lendingstate.EmptyLendingTrade || lendingTrade.TradeId != lendingTradeIdHash.Big().Uint64() { return nil, fmt.Errorf("ProcessRepay for emptyLendingTrade is not allowed. lendingTradeId: %v", lendingTradeId) } - if order.UserAddress.String() != lendingTrade.Borrower.String() { + if order.UserAddress != lendingTrade.Borrower { return nil, fmt.Errorf("ProcessRepay: invalid userAddress . UserAddress: %s . Borrower: %s", order.UserAddress.Hex(), lendingTrade.Borrower.Hex()) } - if order.Relayer.String() != lendingTrade.BorrowingRelayer.String() { + if order.Relayer != lendingTrade.BorrowingRelayer { return nil, fmt.Errorf("ProcessRepay: invalid relayerAddress . Got: %s . Expect: %s", order.Relayer.Hex(), lendingTrade.BorrowingRelayer.Hex()) } return l.ProcessRepayLendingTrade(header, chain, lendingStateDB, statedb, tradingstateDB, lendingBook, lendingTradeId) @@ -854,9 +855,9 @@ func (l *Lending) LiquidationExpiredTrade(header *types.Header, chain consensus. } else { repayAmount = lendingTrade.CollateralLockedAmount } - err = lendingstate.SubTokenBalance(common.HexToAddress(common.LendingLockAddress), lendingTrade.CollateralLockedAmount, lendingTrade.CollateralToken, statedb) + err = lendingstate.SubTokenBalance(common.LendingLockAddressBinary, lendingTrade.CollateralLockedAmount, lendingTrade.CollateralToken, statedb) if err != nil { - log.Warn("LiquidationExpiredTrade SubTokenBalance", "err", err, "LendingLockAddress", common.HexToAddress(common.LendingLockAddress), "lendingTrade.CollateralLockedAmount", *lendingTrade.CollateralLockedAmount, "lendingTrade.CollateralToken", lendingTrade.CollateralToken) + log.Warn("LiquidationExpiredTrade SubTokenBalance", "err", err, "LendingLockAddress", common.LendingLockAddress, "lendingTrade.CollateralLockedAmount", *lendingTrade.CollateralLockedAmount, "lendingTrade.CollateralToken", lendingTrade.CollateralToken) } err = lendingstate.AddTokenBalance(lendingTrade.Investor, repayAmount, lendingTrade.CollateralToken, statedb) if err != nil { @@ -897,9 +898,9 @@ func (l *Lending) LiquidationTrade(lendingStateDB *lendingstate.LendingStateDB, if lendingTrade.TradeId != lendingTradeId { return nil, fmt.Errorf("Lending Trade Id not found : %d ", lendingTradeId) } - err := lendingstate.SubTokenBalance(common.HexToAddress(common.LendingLockAddress), lendingTrade.CollateralLockedAmount, lendingTrade.CollateralToken, statedb) + err := lendingstate.SubTokenBalance(common.LendingLockAddressBinary, lendingTrade.CollateralLockedAmount, lendingTrade.CollateralToken, statedb) if err != nil { - log.Warn("LiquidationTrade SubTokenBalance", "err", err, "LendingLockAddress", common.HexToAddress(common.LendingLockAddress), "lendingTrade.CollateralLockedAmount", *lendingTrade.CollateralLockedAmount, "lendingTrade.CollateralToken", lendingTrade.CollateralToken) + log.Warn("LiquidationTrade SubTokenBalance", "err", err, "LendingLockAddress", common.LendingLockAddress, "lendingTrade.CollateralLockedAmount", *lendingTrade.CollateralLockedAmount, "lendingTrade.CollateralToken", lendingTrade.CollateralToken) } err = lendingstate.AddTokenBalance(lendingTrade.Investor, lendingTrade.CollateralLockedAmount, lendingTrade.CollateralToken, statedb) if err != nil { @@ -1052,10 +1053,10 @@ func (l *Lending) GetCollateralPrices(header *types.Header, chain consensus.Chai func (l *Lending) GetXDCBasePrices(header *types.Header, chain consensus.ChainContext, statedb *state.StateDB, tradingStateDb *tradingstate.TradingStateDB, token common.Address) (*big.Int, error) { - tokenXDCPriceFromContract, updatedBlock := lendingstate.GetCollateralPrice(statedb, token, common.HexToAddress(common.XDCNativeAddress)) + tokenXDCPriceFromContract, updatedBlock := lendingstate.GetCollateralPrice(statedb, token, common.XDCNativeAddressBinary) tokenXDCPriceUpdatedFromContract := updatedBlock.Uint64()/chain.Config().XDPoS.Epoch == header.Number.Uint64()/chain.Config().XDPoS.Epoch - if token == common.HexToAddress(common.XDCNativeAddress) { + if token == common.XDCNativeAddressBinary { return common.BasePrice, nil } else if tokenXDCPriceUpdatedFromContract { // getting lendToken price from contract first @@ -1063,7 +1064,7 @@ func (l *Lending) GetXDCBasePrices(header *types.Header, chain consensus.ChainCo log.Debug("Getting token/XDC price from contract", "price", tokenXDCPriceFromContract) return tokenXDCPriceFromContract, nil } else { - XDCTokenPriceFromContract, updatedBlock := lendingstate.GetCollateralPrice(statedb, common.HexToAddress(common.XDCNativeAddress), token) + XDCTokenPriceFromContract, updatedBlock := lendingstate.GetCollateralPrice(statedb, common.XDCNativeAddressBinary, token) XDCTokenPriceUpdatedFromContract := updatedBlock.Uint64()/chain.Config().XDPoS.Epoch == header.Number.Uint64()/chain.Config().XDPoS.Epoch if XDCTokenPriceUpdatedFromContract && XDCTokenPriceFromContract != nil && XDCTokenPriceFromContract.Sign() > 0 { // getting lendToken price from contract first @@ -1078,7 +1079,7 @@ func (l *Lending) GetXDCBasePrices(header *types.Header, chain consensus.ChainCo tokenXDCPrice = new(big.Int).Div(tokenXDCPrice, XDCTokenPriceFromContract) return tokenXDCPrice, nil } - tokenXDCPrice, err := l.GetMediumTradePriceBeforeEpoch(chain, statedb, tradingStateDb, token, common.HexToAddress(common.XDCNativeAddress)) + tokenXDCPrice, err := l.GetMediumTradePriceBeforeEpoch(chain, statedb, tradingStateDb, token, common.XDCNativeAddressBinary) if err != nil { return nil, err } @@ -1133,9 +1134,9 @@ func (l *Lending) ProcessTopUpLendingTrade(lendingStateDB *lendingstate.LendingS if err != nil { log.Warn("ProcessTopUpLendingTrade SubTokenBalance", "err", err, "lendingTrade.Borrower", lendingTrade.Borrower, "quantity", *quantity, "lendingTrade.CollateralToken", lendingTrade.CollateralToken) } - err = lendingstate.AddTokenBalance(common.HexToAddress(common.LendingLockAddress), quantity, lendingTrade.CollateralToken, statedb) + err = lendingstate.AddTokenBalance(common.LendingLockAddressBinary, quantity, lendingTrade.CollateralToken, statedb) if err != nil { - log.Warn("ProcessTopUpLendingTrade AddTokenBalance", "err", err, "LendingLockAddress", common.HexToAddress(common.LendingLockAddress), "quantity", *quantity, "lendingTrade.CollateralToken", lendingTrade.CollateralToken) + log.Warn("ProcessTopUpLendingTrade AddTokenBalance", "err", err, "LendingLockAddress", common.LendingLockAddress, "quantity", *quantity, "lendingTrade.CollateralToken", lendingTrade.CollateralToken) } oldLockedAmount := lendingTrade.CollateralLockedAmount newLockedAmount := new(big.Int).Add(quantity, oldLockedAmount) @@ -1199,9 +1200,9 @@ func (l *Lending) ProcessRepayLendingTrade(header *types.Header, chain consensus if err != nil { log.Warn("ProcessRepayLendingTrade AddTokenBalance", "err", err, "lendingTrade.Investor", lendingTrade.Investor, "paymentBalance", *paymentBalance, "lendingTrade.LendingToken", lendingTrade.LendingToken) } - err = lendingstate.SubTokenBalance(common.HexToAddress(common.LendingLockAddress), lendingTrade.CollateralLockedAmount, lendingTrade.CollateralToken, statedb) + err = lendingstate.SubTokenBalance(common.LendingLockAddressBinary, lendingTrade.CollateralLockedAmount, lendingTrade.CollateralToken, statedb) if err != nil { - log.Warn("ProcessRepayLendingTrade SubTokenBalance", "err", err, "LendingLockAddress", common.HexToAddress(common.LendingLockAddress), "lendingTrade.CollateralLockedAmount", *lendingTrade.CollateralLockedAmount, "lendingTrade.CollateralToken", lendingTrade.CollateralToken) + log.Warn("ProcessRepayLendingTrade SubTokenBalance", "err", err, "LendingLockAddress", common.LendingLockAddress, "lendingTrade.CollateralLockedAmount", *lendingTrade.CollateralLockedAmount, "lendingTrade.CollateralToken", lendingTrade.CollateralToken) } err = lendingstate.AddTokenBalance(lendingTrade.Borrower, lendingTrade.CollateralLockedAmount, lendingTrade.CollateralToken, statedb) if err != nil { @@ -1255,9 +1256,9 @@ func (l *Lending) ProcessRecallLendingTrade(lendingStateDB *lendingstate.Lending if err != nil { log.Warn("ProcessRecallLendingTrade AddTokenBalance", "err", err, "lendingTrade.Borrower", lendingTrade.Borrower, "recallAmount", *recallAmount, "lendingTrade.CollateralToken", lendingTrade.CollateralToken) } - err = lendingstate.SubTokenBalance(common.HexToAddress(common.LendingLockAddress), recallAmount, lendingTrade.CollateralToken, statedb) + err = lendingstate.SubTokenBalance(common.LendingLockAddressBinary, recallAmount, lendingTrade.CollateralToken, statedb) if err != nil { - log.Warn("ProcessRecallLendingTrade SubTokenBalance", "err", err, "LendingLockAddress", common.HexToAddress(common.LendingLockAddress), "recallAmount", *recallAmount, "lendingTrade.CollateralToken", lendingTrade.CollateralToken) + log.Warn("ProcessRecallLendingTrade SubTokenBalance", "err", err, "LendingLockAddress", common.LendingLockAddress, "recallAmount", *recallAmount, "lendingTrade.CollateralToken", lendingTrade.CollateralToken) } lendingStateDB.UpdateLiquidationPrice(lendingBook, lendingTrade.TradeId, newLiquidationPrice) diff --git a/XDCxlending/order_processor_test.go b/XDCxlending/order_processor_test.go index 028ed09aed83..172caa90a290 100644 --- a/XDCxlending/order_processor_test.go +++ b/XDCxlending/order_processor_test.go @@ -1,14 +1,15 @@ package XDCxlending import ( + "math/big" + "reflect" + "testing" + "github.com/XinFinOrg/XDPoSChain/XDCx" "github.com/XinFinOrg/XDPoSChain/XDCx/tradingstate" "github.com/XinFinOrg/XDPoSChain/XDCxlending/lendingstate" "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/core/rawdb" - "math/big" - "reflect" - "testing" ) func Test_getCancelFeeV1(t *testing.T) { @@ -107,9 +108,9 @@ func Test_getCancelFee(t *testing.T) { XDCx.SetTokenDecimal(testTokenB, new(big.Int).Exp(big.NewInt(10), big.NewInt(8), nil)) // set tokenAPrice = 1 XDC - tradingStateDb.SetMediumPriceBeforeEpoch(tradingstate.GetTradingOrderBookHash(testTokenA, common.HexToAddress(common.XDCNativeAddress)), common.BasePrice) + tradingStateDb.SetMediumPriceBeforeEpoch(tradingstate.GetTradingOrderBookHash(testTokenA, common.XDCNativeAddressBinary), common.BasePrice) // set tokenBPrice = 1 XDC - tradingStateDb.SetMediumPriceBeforeEpoch(tradingstate.GetTradingOrderBookHash(testTokenB, common.HexToAddress(common.XDCNativeAddress)), common.BasePrice) + tradingStateDb.SetMediumPriceBeforeEpoch(tradingstate.GetTradingOrderBookHash(testTokenB, common.XDCNativeAddressBinary), common.BasePrice) l := New(XDCx) @@ -132,7 +133,7 @@ func Test_getCancelFee(t *testing.T) { borrowFeeRate: common.Big0, order: &lendingstate.LendingItem{ LendingToken: testTokenA, - CollateralToken: common.HexToAddress(common.XDCNativeAddress), + CollateralToken: common.XDCNativeAddressBinary, Quantity: new(big.Int).SetUint64(10000), Side: lendingstate.Investing, }, @@ -147,7 +148,7 @@ func Test_getCancelFee(t *testing.T) { borrowFeeRate: common.Big0, order: &lendingstate.LendingItem{ LendingToken: testTokenA, - CollateralToken: common.HexToAddress(common.XDCNativeAddress), + CollateralToken: common.XDCNativeAddressBinary, Quantity: new(big.Int).SetUint64(10000), Side: lendingstate.Borrowing, }, @@ -162,7 +163,7 @@ func Test_getCancelFee(t *testing.T) { borrowFeeRate: new(big.Int).SetUint64(30), // 30/10000= 0.3% order: &lendingstate.LendingItem{ LendingToken: testTokenA, - CollateralToken: common.HexToAddress(common.XDCNativeAddress), + CollateralToken: common.XDCNativeAddressBinary, Quantity: new(big.Int).SetUint64(10000), Side: lendingstate.Investing, }, @@ -177,7 +178,7 @@ func Test_getCancelFee(t *testing.T) { borrowFeeRate: new(big.Int).SetUint64(30), // 30/10000= 0.3% order: &lendingstate.LendingItem{ LendingToken: testTokenA, - CollateralToken: common.HexToAddress(common.XDCNativeAddress), + CollateralToken: common.XDCNativeAddressBinary, Quantity: new(big.Int).SetUint64(10000), Side: lendingstate.Borrowing, }, @@ -194,7 +195,7 @@ func Test_getCancelFee(t *testing.T) { CancelFeeArg{ borrowFeeRate: common.Big0, order: &lendingstate.LendingItem{ - LendingToken: common.HexToAddress(common.XDCNativeAddress), + LendingToken: common.XDCNativeAddressBinary, CollateralToken: testTokenA, Quantity: new(big.Int).SetUint64(10000), Side: lendingstate.Investing, @@ -209,7 +210,7 @@ func Test_getCancelFee(t *testing.T) { CancelFeeArg{ borrowFeeRate: common.Big0, order: &lendingstate.LendingItem{ - LendingToken: common.HexToAddress(common.XDCNativeAddress), + LendingToken: common.XDCNativeAddressBinary, CollateralToken: testTokenA, Quantity: new(big.Int).SetUint64(10000), Side: lendingstate.Borrowing, @@ -224,7 +225,7 @@ func Test_getCancelFee(t *testing.T) { CancelFeeArg{ borrowFeeRate: new(big.Int).SetUint64(30), // 30/10000= 0.3% order: &lendingstate.LendingItem{ - LendingToken: common.HexToAddress(common.XDCNativeAddress), + LendingToken: common.XDCNativeAddressBinary, CollateralToken: testTokenA, Quantity: new(big.Int).SetUint64(10000), Side: lendingstate.Investing, @@ -239,7 +240,7 @@ func Test_getCancelFee(t *testing.T) { CancelFeeArg{ borrowFeeRate: new(big.Int).SetUint64(30), // 30/10000= 0.3% order: &lendingstate.LendingItem{ - LendingToken: common.HexToAddress(common.XDCNativeAddress), + LendingToken: common.XDCNativeAddressBinary, CollateralToken: testTokenA, Quantity: new(big.Int).SetUint64(10000), Side: lendingstate.Borrowing, diff --git a/accounts/abi/abi.go b/accounts/abi/abi.go index e3687968a5bd..0ca97525cd2b 100644 --- a/accounts/abi/abi.go +++ b/accounts/abi/abi.go @@ -79,19 +79,19 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) { // Unpack output in v according to the abi specification func (abi ABI) Unpack(v interface{}, name string, output []byte) (err error) { if len(output) == 0 { - return fmt.Errorf("abi: unmarshalling empty output") + return errors.New("abi: unmarshalling empty output") } // since there can't be naming collisions with contracts and events, // we need to decide whether we're calling a method or an event if method, ok := abi.Methods[name]; ok { if len(output)%32 != 0 { - return fmt.Errorf("abi: improperly formatted output") + return errors.New("abi: improperly formatted output") } return method.Outputs.Unpack(v, output) } else if event, ok := abi.Events[name]; ok { return event.Inputs.Unpack(v, output) } - return fmt.Errorf("abi: could not locate named method or event") + return errors.New("abi: could not locate named method or event") } // UnmarshalJSON implements json.Unmarshaler interface diff --git a/accounts/abi/bind/auth.go b/accounts/abi/bind/auth.go index 415c9015e0f6..1db4a496af49 100644 --- a/accounts/abi/bind/auth.go +++ b/accounts/abi/bind/auth.go @@ -20,16 +20,29 @@ import ( "crypto/ecdsa" "errors" "io" + "io/ioutil" + "math/big" + "github.com/XinFinOrg/XDPoSChain/accounts" "github.com/XinFinOrg/XDPoSChain/accounts/keystore" "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/crypto" + "github.com/XinFinOrg/XDPoSChain/log" ) +// ErrNoChainID is returned whenever the user failed to specify a chain id. +var ErrNoChainID = errors.New("no chain id specified") + +// ErrNotAuthorized is returned when an account is not properly unlocked. +var ErrNotAuthorized = errors.New("not authorized to sign this account") + // NewTransactor is a utility method to easily create a transaction signer from // an encrypted json key stream and the associated passphrase. +// +// Deprecated: Use NewTransactorWithChainID instead. func NewTransactor(keyin io.Reader, passphrase string) (*TransactOpts, error) { + log.Warn("WARNING: NewTransactor has been deprecated in favour of NewTransactorWithChainID") json, err := io.ReadAll(keyin) if err != nil { return nil, err @@ -43,13 +56,17 @@ func NewTransactor(keyin io.Reader, passphrase string) (*TransactOpts, error) { // NewKeyedTransactor is a utility method to easily create a transaction signer // from a single private key. +// +// Deprecated: Use NewKeyedTransactorWithChainID instead. func NewKeyedTransactor(key *ecdsa.PrivateKey) *TransactOpts { + log.Warn("WARNING: NewKeyedTransactor has been deprecated in favour of NewKeyedTransactorWithChainID") keyAddr := crypto.PubkeyToAddress(key.PublicKey) + signer := types.HomesteadSigner{} return &TransactOpts{ From: keyAddr, - Signer: func(signer types.Signer, address common.Address, tx *types.Transaction) (*types.Transaction, error) { + Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) { if address != keyAddr { - return nil, errors.New("not authorized to sign this account") + return nil, ErrNotAuthorized } signature, err := crypto.Sign(signer.Hash(tx).Bytes(), key) if err != nil { @@ -59,3 +76,62 @@ func NewKeyedTransactor(key *ecdsa.PrivateKey) *TransactOpts { }, } } + +// NewTransactorWithChainID is a utility method to easily create a transaction signer from +// an encrypted json key stream and the associated passphrase. +func NewTransactorWithChainID(keyin io.Reader, passphrase string, chainID *big.Int) (*TransactOpts, error) { + json, err := ioutil.ReadAll(keyin) + if err != nil { + return nil, err + } + key, err := keystore.DecryptKey(json, passphrase) + if err != nil { + return nil, err + } + return NewKeyedTransactorWithChainID(key.PrivateKey, chainID) +} + +// NewKeyStoreTransactorWithChainID is a utility method to easily create a transaction signer from +// an decrypted key from a keystore. +func NewKeyStoreTransactorWithChainID(keystore *keystore.KeyStore, account accounts.Account, chainID *big.Int) (*TransactOpts, error) { + if chainID == nil { + return nil, ErrNoChainID + } + signer := types.NewEIP155Signer(chainID) + return &TransactOpts{ + From: account.Address, + Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) { + if address != account.Address { + return nil, ErrNotAuthorized + } + signature, err := keystore.SignHash(account, signer.Hash(tx).Bytes()) + if err != nil { + return nil, err + } + return tx.WithSignature(signer, signature) + }, + }, nil +} + +// NewKeyedTransactorWithChainID is a utility method to easily create a transaction signer +// from a single private key. +func NewKeyedTransactorWithChainID(key *ecdsa.PrivateKey, chainID *big.Int) (*TransactOpts, error) { + keyAddr := crypto.PubkeyToAddress(key.PublicKey) + if chainID == nil { + return nil, ErrNoChainID + } + signer := types.NewEIP155Signer(chainID) + return &TransactOpts{ + From: keyAddr, + Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) { + if address != keyAddr { + return nil, ErrNotAuthorized + } + signature, err := crypto.Sign(signer.Hash(tx).Bytes(), key) + if err != nil { + return nil, err + } + return tx.WithSignature(signer, signature) + }, + }, nil +} diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index 0df4bd7450c1..08a7260d264f 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -61,11 +61,13 @@ type SimulatedBackend struct { database ethdb.Database // In memory database to store our testing data blockchain *core.BlockChain // Ethereum blockchain to handle the consensus - mu sync.Mutex - pendingBlock *types.Block // Currently pending block that will be imported on request - pendingState *state.StateDB // Currently pending state that will be the active on on request + mu sync.Mutex + pendingBlock *types.Block // Currently pending block that will be imported on request + pendingState *state.StateDB // Currently pending state that will be the active on request + pendingReceipts types.Receipts // Currently receipts for the pending block - events *filters.EventSystem // Event system for filtering log events live + events *filters.EventSystem // for filtering log events live + filterSystem *filters.FilterSystem // for filtering database logs config *params.ChainConfig } @@ -94,9 +96,7 @@ func SimulateWalletAddressAndSignFn() (common.Address, func(account accounts.Acc // XDC simulated backend for testing purpose. func NewXDCSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64, chainConfig *params.ChainConfig) *SimulatedBackend { - // database := ethdb.NewMemDatabase() database := rawdb.NewMemoryDatabase() - genesis := core.Genesis{ GasLimit: gasLimit, // need this big, support initial smart contract Config: chainConfig, @@ -126,8 +126,12 @@ func NewXDCSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64, chainConfi database: database, blockchain: blockchain, config: genesis.Config, - events: filters.NewEventSystem(new(event.TypeMux), &filterBackend{database, blockchain}, false), } + + filterBackend := &filterBackend{database, blockchain, backend} + backend.filterSystem = filters.NewFilterSystem(filterBackend, filters.Config{}) + backend.events = filters.NewEventSystem(backend.filterSystem, false) + blockchain.Client = backend backend.rollback() return backend @@ -135,6 +139,7 @@ func NewXDCSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64, chainConfi // NewSimulatedBackend creates a new binding backend using a simulated blockchain // for testing purposes. +// A simulated backend always uses chainID 1337. func NewSimulatedBackend(alloc core.GenesisAlloc) *SimulatedBackend { database := rawdb.NewMemoryDatabase() genesis := core.Genesis{Config: params.AllEthashProtocolChanges, Alloc: alloc, GasLimit: 42000000} @@ -145,8 +150,12 @@ func NewSimulatedBackend(alloc core.GenesisAlloc) *SimulatedBackend { database: database, blockchain: blockchain, config: genesis.Config, - events: filters.NewEventSystem(new(event.TypeMux), &filterBackend{database, blockchain}, false), } + + filterBackend := &filterBackend{database, blockchain, backend} + backend.filterSystem = filters.NewFilterSystem(filterBackend, filters.Config{}) + backend.events = filters.NewEventSystem(backend.filterSystem, false) + backend.rollback() return backend } @@ -399,7 +408,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa } // Include tx in chain. - blocks, _ := core.GenerateChain(b.config, block, b.blockchain.Engine(), b.database, 1, func(number int, block *core.BlockGen) { + blocks, receipts := core.GenerateChain(b.config, block, b.blockchain.Engine(), b.database, 1, func(number int, block *core.BlockGen) { for _, tx := range b.pendingBlock.Transactions() { block.AddTxWithChain(b.blockchain, tx) } @@ -409,6 +418,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa b.pendingBlock = blocks[0] b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database()) + b.pendingReceipts = receipts[0] return nil } @@ -420,7 +430,7 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query XDPoSChain.Filt var filter *filters.Filter if query.BlockHash != nil { // Block filter requested, construct a single-shot filter - filter = filters.NewBlockFilter(&filterBackend{b.database, b.blockchain}, *query.BlockHash, query.Addresses, query.Topics) + filter = b.filterSystem.NewBlockFilter(*query.BlockHash, query.Addresses, query.Topics) } else { // Initialize unset filter boundaried to run from genesis to chain head from := int64(0) @@ -432,7 +442,7 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query XDPoSChain.Filt to = query.ToBlock.Int64() } // Construct the range filter - filter = filters.NewRangeFilter(&filterBackend{b.database, b.blockchain}, from, to, query.Addresses, query.Topics) + filter = b.filterSystem.NewRangeFilter(from, to, query.Addresses, query.Topics) } // Run the filter and return all the logs logs, err := filter.Logs(ctx) @@ -522,8 +532,9 @@ func (m callMsg) AccessList() types.AccessList { return m.CallMsg.AccessList } // filterBackend implements filters.Backend to support filtering for logs without // taking bloom-bits acceleration structures into account. type filterBackend struct { - db ethdb.Database - bc *core.BlockChain + db ethdb.Database + bc *core.BlockChain + backend *SimulatedBackend } func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db } @@ -544,35 +555,51 @@ func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (typ return core.GetBlockReceipts(fb.db, hash, core.GetBlockNumber(fb.db, hash)), nil } -func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) { - receipts := core.GetBlockReceipts(fb.db, hash, core.GetBlockNumber(fb.db, hash)) - if receipts == nil { - return nil, nil - } - logs := make([][]*types.Log, len(receipts)) - for i, receipt := range receipts { - logs[i] = receipt.Logs +func (fb *filterBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) { + if body := fb.bc.GetBody(hash); body != nil { + return body, nil } + return nil, errors.New("block body not found") +} + +func (fb *filterBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { + return fb.backend.pendingBlock, fb.backend.pendingReceipts +} + +func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) { + logs := rawdb.ReadLogs(fb.db, hash, number) return logs, nil } func (fb *filterBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { - return event.NewSubscription(func(quit <-chan struct{}) error { - <-quit - return nil - }) + return nullSubscription() } + func (fb *filterBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { return fb.bc.SubscribeChainEvent(ch) } + func (fb *filterBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { return fb.bc.SubscribeRemovedLogsEvent(ch) } + func (fb *filterBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { return fb.bc.SubscribeLogsEvent(ch) } +func (fb *filterBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription { + return nullSubscription() +} + func (fb *filterBackend) BloomStatus() (uint64, uint64) { return 4096, 0 } + func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.MatcherSession) { panic("not supported") } + +func nullSubscription() event.Subscription { + return event.NewSubscription(func(quit <-chan struct{}) error { + <-quit + return nil + }) +} diff --git a/accounts/abi/bind/base.go b/accounts/abi/bind/base.go index 1b5c71701cef..4d5390d15bc2 100644 --- a/accounts/abi/bind/base.go +++ b/accounts/abi/bind/base.go @@ -36,7 +36,7 @@ var ( // SignerFn is a signer function callback when a contract requires a method to // sign the transaction before submission. -type SignerFn func(types.Signer, common.Address, *types.Transaction) (*types.Transaction, error) +type SignerFn func(common.Address, *types.Transaction) (*types.Transaction, error) // CallOpts is the collection of options to fine tune a contract call request. type CallOpts struct { @@ -238,7 +238,7 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i if opts.Signer == nil { return nil, errors.New("no signer to authorize the transaction with") } - signedTx, err := opts.Signer(types.HomesteadSigner{}, opts.From, rawTx) + signedTx, err := opts.Signer(opts.From, rawTx) if err != nil { return nil, err } @@ -335,7 +335,7 @@ func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) return errNoEventSignature } if log.Topics[0] != c.abi.Events[event].Id() { - return fmt.Errorf("event signature mismatch") + return errors.New("event signature mismatch") } if len(log.Data) > 0 { if err := c.abi.Unpack(out, event, log.Data); err != nil { diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go index 8bdd5b85c0db..ce6e36bffd34 100644 --- a/accounts/abi/bind/bind_test.go +++ b/accounts/abi/bind/bind_test.go @@ -228,7 +228,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewXDCSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000, params.TestXDPoSMockChainConfig) // Deploy an interaction tester contract and call a transaction on it @@ -269,7 +269,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewXDCSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000, params.TestXDPoSMockChainConfig) // Deploy a tuple tester contract and execute a structured call on it @@ -301,7 +301,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewXDCSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000, params.TestXDPoSMockChainConfig) // Deploy a tuple tester contract and execute a structured call on it @@ -343,7 +343,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewXDCSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000, params.TestXDPoSMockChainConfig) // Deploy a slice tester contract and execute a n array call on it @@ -377,7 +377,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewXDCSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000, params.TestXDPoSMockChainConfig) // Deploy a default method invoker contract and execute its default method @@ -446,7 +446,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewXDCSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000, params.TestXDPoSMockChainConfig) // Deploy a funky gas pattern contract @@ -481,7 +481,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewXDCSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000, params.TestXDPoSMockChainConfig) // Deploy a sender tester contract and execute a structured call on it @@ -541,7 +541,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewXDCSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000, params.TestXDPoSMockChainConfig) // Deploy a underscorer tester contract and execute a structured call on it @@ -611,7 +611,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewXDCSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000, params.TestXDPoSMockChainConfig) // Deploy an eventer contract @@ -760,7 +760,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewXDCSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000, params.TestXDPoSMockChainConfig) //deploy the test contract diff --git a/accounts/abi/bind/util.go b/accounts/abi/bind/util.go index 7943884f075a..dec604a801fd 100644 --- a/accounts/abi/bind/util.go +++ b/accounts/abi/bind/util.go @@ -18,7 +18,7 @@ package bind import ( "context" - "fmt" + "errors" "time" "github.com/XinFinOrg/XDPoSChain/common" @@ -56,14 +56,14 @@ func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*ty // contract address when it is mined. It stops waiting when ctx is canceled. func WaitDeployed(ctx context.Context, b DeployBackend, tx *types.Transaction) (common.Address, error) { if tx.To() != nil { - return common.Address{}, fmt.Errorf("tx is not contract creation") + return common.Address{}, errors.New("tx is not contract creation") } receipt, err := WaitMined(ctx, b, tx) if err != nil { return common.Address{}, err } if receipt.ContractAddress == (common.Address{}) { - return common.Address{}, fmt.Errorf("zero address") + return common.Address{}, errors.New("zero address") } // Check that code has indeed been deployed at the address. // This matters on pre-Homestead chains: OOG in the constructor diff --git a/accounts/abi/reflect.go b/accounts/abi/reflect.go index 2e6bf7098f2c..c1d411ac9560 100644 --- a/accounts/abi/reflect.go +++ b/accounts/abi/reflect.go @@ -17,6 +17,7 @@ package abi import ( + "errors" "fmt" "reflect" ) @@ -117,7 +118,7 @@ func requireUniqueStructFieldNames(args Arguments) error { for _, arg := range args { field := capitalise(arg.Name) if field == "" { - return fmt.Errorf("abi: purely underscored output cannot unpack to struct") + return errors.New("abi: purely underscored output cannot unpack to struct") } if exists[field] { return fmt.Errorf("abi: multiple outputs mapping to the same struct field '%s'", field) diff --git a/accounts/abi/type.go b/accounts/abi/type.go index cec1ce8f5653..355ac3ac77a8 100644 --- a/accounts/abi/type.go +++ b/accounts/abi/type.go @@ -17,6 +17,7 @@ package abi import ( + "errors" "fmt" "reflect" "regexp" @@ -61,7 +62,7 @@ var ( func NewType(t string) (typ Type, err error) { // check that array brackets are equal if they exist if strings.Count(t, "[") != strings.Count(t, "]") { - return Type{}, fmt.Errorf("invalid arg type in abi") + return Type{}, errors.New("invalid arg type in abi") } typ.stringKind = t @@ -98,7 +99,7 @@ func NewType(t string) (typ Type, err error) { } typ.Type = reflect.ArrayOf(typ.Size, embeddedType.Type) } else { - return Type{}, fmt.Errorf("invalid formatting of array type") + return Type{}, errors.New("invalid formatting of array type") } return typ, err } diff --git a/accounts/abi/unpack.go b/accounts/abi/unpack.go index 9f0bccf75b13..ff2bacebbcf8 100644 --- a/accounts/abi/unpack.go +++ b/accounts/abi/unpack.go @@ -18,6 +18,7 @@ package abi import ( "encoding/binary" + "errors" "fmt" "math/big" "reflect" @@ -70,7 +71,7 @@ func readBool(word []byte) (bool, error) { // This enforces that standard by always presenting it as a 24-array (address + sig = 24 bytes) func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) { if t.T != FunctionTy { - return [24]byte{}, fmt.Errorf("abi: invalid type in call to make function type byte array") + return [24]byte{}, errors.New("abi: invalid type in call to make function type byte array") } if garbage := binary.BigEndian.Uint64(word[24:32]); garbage != 0 { err = fmt.Errorf("abi: got improperly encoded function type, got %v", word) @@ -83,7 +84,7 @@ func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) { // through reflection, creates a fixed array to be read from func readFixedBytes(t Type, word []byte) (interface{}, error) { if t.T != FixedBytesTy { - return nil, fmt.Errorf("abi: invalid type in call to make fixed byte array") + return nil, errors.New("abi: invalid type in call to make fixed byte array") } // convert array := reflect.New(t.Type).Elem() @@ -123,7 +124,7 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) // declare our array refSlice = reflect.New(t.Type).Elem() } else { - return nil, fmt.Errorf("abi: invalid type in array/slice unpacking stage") + return nil, errors.New("abi: invalid type in array/slice unpacking stage") } // Arrays have packed elements, resulting in longer unpack steps. diff --git a/accounts/keystore/account_cache_test.go b/accounts/keystore/account_cache_test.go index e2436f416095..aa1636efc8a8 100644 --- a/accounts/keystore/account_cache_test.go +++ b/accounts/keystore/account_cache_test.go @@ -17,6 +17,7 @@ package keystore import ( + "errors" "fmt" "math/rand" "os" @@ -305,7 +306,7 @@ func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error { select { case <-ks.changes: default: - return fmt.Errorf("wasn't notified of new accounts") + return errors.New("wasn't notified of new accounts") } return nil } diff --git a/accounts/keystore/keystore.go b/accounts/keystore/keystore.go index 45a3be3036d7..80ac1102d3f0 100644 --- a/accounts/keystore/keystore.go +++ b/accounts/keystore/keystore.go @@ -24,7 +24,6 @@ import ( "crypto/ecdsa" crand "crypto/rand" "errors" - "fmt" "math/big" "os" "path/filepath" @@ -455,7 +454,7 @@ func (ks *KeyStore) Import(keyJSON []byte, passphrase, newPassphrase string) (ac func (ks *KeyStore) ImportECDSA(priv *ecdsa.PrivateKey, passphrase string) (accounts.Account, error) { key := newKeyFromECDSA(priv) if ks.cache.hasAddress(key.Address) { - return accounts.Account{}, fmt.Errorf("account already exists") + return accounts.Account{}, errors.New("account already exists") } return ks.importKey(key, passphrase) } diff --git a/bmt/bmt_test.go b/bmt/bmt_test.go index 7a479637006f..ac762993c5e1 100644 --- a/bmt/bmt_test.go +++ b/bmt/bmt_test.go @@ -19,6 +19,7 @@ package bmt import ( "bytes" crand "crypto/rand" + "errors" "fmt" "hash" "io" @@ -288,7 +289,7 @@ func TestHasherConcurrency(t *testing.T) { var err error select { case <-time.NewTimer(5 * time.Second).C: - err = fmt.Errorf("timed out") + err = errors.New("timed out") case err = <-errc: } if err != nil { @@ -321,7 +322,7 @@ func testHasherCorrectness(bmt hash.Hash, hasher BaseHasher, d []byte, n, count }() select { case <-timeout.C: - err = fmt.Errorf("BMT hash calculation timed out") + err = errors.New("BMT hash calculation timed out") case err = <-c: } return err diff --git a/cicd/ansible/playbooks/update-image.yaml b/cicd/ansible/playbooks/update-image.yaml index 96baac1fdc47..a55ac6a7ece9 100644 --- a/cicd/ansible/playbooks/update-image.yaml +++ b/cicd/ansible/playbooks/update-image.yaml @@ -8,6 +8,7 @@ shell: | export RPC_IMAGE={{ rpc_image }} cd {{ deploy_path }} + git pull ./docker-down.sh ./docker-up-hash.sh docker ps diff --git a/cicd/devnet/start-local-devnet.sh b/cicd/devnet/start-local-devnet.sh index 4bbd5786c9bd..c0a9448a8657 100755 --- a/cicd/devnet/start-local-devnet.sh +++ b/cicd/devnet/start-local-devnet.sh @@ -55,7 +55,7 @@ echo "Running a node with wallet: ${wallet} at local" --datadir ./tmp/xdcchain --networkid 551 \ -port 30303 --rpc --rpccorsdomain "*" --rpcaddr 0.0.0.0 \ --rpcport 8545 \ ---rpcapi admin,db,eth,debug,miner,net,shh,txpool,personal,web3,XDPoS \ +--rpcapi db,eth,debug,miner,net,shh,txpool,personal,web3,XDPoS \ --rpcvhosts "*" --unlock "${wallet}" --password ./tmp/.pwd --mine \ --gasprice "1" --targetgaslimit "420000000" --verbosity ${log_level} \ --ws --wsaddr=0.0.0.0 --wsport 8555 \ diff --git a/cicd/devnet/start.sh b/cicd/devnet/start.sh index 4c3a81ee5b53..707b8b32a5ad 100755 --- a/cicd/devnet/start.sh +++ b/cicd/devnet/start.sh @@ -77,7 +77,7 @@ XDC --ethstats ${netstats} --gcmode archive \ --datadir /work/xdcchain --networkid 551 \ -port $port --rpc --rpccorsdomain "*" --rpcaddr 0.0.0.0 \ --rpcport $rpc_port \ ---rpcapi admin,db,eth,debug,net,shh,txpool,personal,web3,XDPoS \ +--rpcapi db,eth,debug,net,shh,txpool,personal,web3,XDPoS \ --rpcvhosts "*" --unlock "${wallet}" --password /work/.pwd --mine \ --gasprice "1" --targetgaslimit "420000000" --verbosity ${log_level} \ --debugdatadir /work/xdcchain \ diff --git a/cicd/devnet/terraform/.env b/cicd/devnet/terraform/.env index 682e47178e0d..626762d2e6e3 100644 --- a/cicd/devnet/terraform/.env +++ b/cicd/devnet/terraform/.env @@ -1,13 +1,13 @@ log_level=2 # Ohio -us_east_2_start=0 +us_east_2_start=11 us_east_2_end=36 # Ireland eu_west_1_start=37 -eu_west_1_end=72 +eu_west_1_end=62 # Sydney ap_southeast_2_start=73 -ap_southeast_2_end=108 \ No newline at end of file +ap_southeast_2_end=73 \ No newline at end of file diff --git a/cicd/mainnet/start.sh b/cicd/mainnet/start.sh index 35f11a5d3406..0cfbe26e272b 100755 --- a/cicd/mainnet/start.sh +++ b/cicd/mainnet/start.sh @@ -76,7 +76,7 @@ XDC --ethstats ${netstats} --gcmode archive \ --datadir /work/xdcchain --networkid 50 \ -port $port --rpc --rpccorsdomain "*" --rpcaddr 0.0.0.0 \ --rpcport $rpc_port \ ---rpcapi admin,db,eth,debug,net,shh,txpool,personal,web3,XDPoS \ +--rpcapi db,eth,debug,net,shh,txpool,personal,web3,XDPoS \ --rpcvhosts "*" --unlock "${wallet}" --password /work/.pwd --mine \ --gasprice "1" --targetgaslimit "420000000" --verbosity ${log_level} \ --debugdatadir /work/xdcchain \ diff --git a/cicd/terraform/.env b/cicd/terraform/.env index 8a64c1d22446..bffc1dd977b8 100644 --- a/cicd/terraform/.env +++ b/cicd/terraform/.env @@ -1,13 +1 @@ log_level=3 - -# Ohio -us_east_2_start=0 -us_east_2_end=36 - -# Ireland -eu_west_1_start=37 -eu_west_1_end=72 - -# Sydney -ap_southeast_2_start=73 -ap_southeast_2_end=108 diff --git a/cicd/terraform/main.tf b/cicd/terraform/main.tf index 5a44d2238552..e34f8c5ffecb 100644 --- a/cicd/terraform/main.tf +++ b/cicd/terraform/main.tf @@ -19,73 +19,16 @@ provider "aws" { region = "ap-southeast-1" } -module "devnet-rpc" { - source = "./module/region" - region = "ap-southeast-1" - nodeKeys = local.rpcDevnetNodeKeys - enableFixedIp = true - logLevel = local.logLevel - xdc_ecs_tasks_execution_role_arn = aws_iam_role.xdc_ecs_tasks_execution_role.arn - - cpu = 1024 - memory = 4096 - - network = "devnet" - vpc_cidr = "10.0.0.0/16" - subnet_cidr = "10.0.0.0/20" - providers = { - aws = aws.ap-southeast-1 - } -} - -module "testnet-rpc" { - source = "./module/region" - region = "ap-southeast-1" - nodeKeys = local.rpcTestnetNodeKeys - enableFixedIp = true - logLevel = local.logLevel - xdc_ecs_tasks_execution_role_arn = aws_iam_role.xdc_ecs_tasks_execution_role.arn - - cpu = 1024 - memory = 4096 - - network = "testnet" - vpc_cidr = "10.1.0.0/16" - subnet_cidr = "10.1.0.0/20" - providers = { - aws = aws.ap-southeast-1 - } -} - -module "mainnet-rpc" { - source = "./module/region" - region = "ap-southeast-1" - nodeKeys = local.rpcMainnetNodeKeys - enableFixedIp = true - logLevel = local.logLevel - xdc_ecs_tasks_execution_role_arn = aws_iam_role.xdc_ecs_tasks_execution_role.arn - - cpu = 1024 - memory = 4096 - - network = "mainnet" - vpc_cidr = "10.2.0.0/16" - subnet_cidr = "10.2.0.0/20" - providers = { - aws = aws.ap-southeast-1 - } -} - - module "devnet_rpc" { source = "./module/ec2_rpc" network = "devnet" vpc_id = local.vpc_id aws_subnet_id = local.aws_subnet_id ami_id = local.ami_id - instance_type = "t3.large" + instance_type = "t3.xlarge" ssh_key_name = local.ssh_key_name rpc_image = local.rpc_image + volume_size = 1500 providers = { aws = aws.ap-southeast-1 @@ -101,6 +44,7 @@ module "testnet_rpc" { instance_type = "t3.large" ssh_key_name = local.ssh_key_name rpc_image = local.rpc_image + volume_size = 1500 providers = { aws = aws.ap-southeast-1 @@ -116,6 +60,7 @@ module "mainnet_rpc" { instance_type = "t3.large" ssh_key_name = local.ssh_key_name rpc_image = local.rpc_image + volume_size = 3000 providers = { aws = aws.ap-southeast-1 diff --git a/cicd/terraform/module/ec2_rpc/main.tf b/cicd/terraform/module/ec2_rpc/main.tf index 75535dd0ac81..00594517acea 100644 --- a/cicd/terraform/module/ec2_rpc/main.tf +++ b/cicd/terraform/module/ec2_rpc/main.tf @@ -27,6 +27,9 @@ variable ssh_key_name { variable rpc_image { type = string } +variable volume_size{ + type = number +} resource "aws_security_group" "rpc_sg" { name_prefix = "${var.network}_rpc_sg" @@ -75,9 +78,9 @@ resource "aws_instance" "rpc_instance" { } key_name = var.ssh_key_name vpc_security_group_ids = [aws_security_group.rpc_sg.id] - ebs_block_device { - device_name = "/dev/xvda" - volume_size = 500 + + root_block_device { + volume_size = var.volume_size } diff --git a/cicd/testnet/bootnodes.list b/cicd/testnet/bootnodes.list index db7e2c81ffe7..9fca6cadab7c 100644 --- a/cicd/testnet/bootnodes.list +++ b/cicd/testnet/bootnodes.list @@ -1,9 +1,39 @@ enode://1c20e6b46ce608c1fe739e78611225b94e663535b74a1545b1667eac8ff75ed43216306d123306c10e043f228e42cc53cb2728655019292380313393eaaf6e23@188.227.164.51:30301 -enode://1c20e6b46ce608c1fe739e78611225b94e663535b74a1545b1667eac8ff75ed43216306d123306c10e043f228e42cc53cb2728655019292380313393eaaf6e23@95.179.217.201:30301 -enode://1c20e6b46ce608c1fe739e78611225b94e663535b74a1545b1667eac8ff75ed43216306d123306c10e043f228e42cc53cb2728655019292380313393eaaf6e23@149.28.167.190:30301 -enode://1c20e6b46ce608c1fe739e78611225b94e663535b74a1545b1667eac8ff75ed43216306d123306c10e043f228e42cc53cb2728655019292380313393eaaf6e23@194.233.77.19:30301 -enode://1c20e6b46ce608c1fe739e78611225b94e663535b74a1545b1667eac8ff75ed43216306d123306c10e043f228e42cc53cb2728655019292380313393eaaf6e23@144.91.108.231:30301 -enode://1c20e6b46ce608c1fe739e78611225b94e663535b74a1545b1667eac8ff75ed43216306d123306c10e043f228e42cc53cb2728655019292380313393eaaf6e23@207.244.240.232:30301 -enode://1c20e6b46ce608c1fe739e78611225b94e663535b74a1545b1667eac8ff75ed43216306d123306c10e043f228e42cc53cb2728655019292380313393eaaf6e23@66.94.121.62:30301 -enode://1c20e6b46ce608c1fe739e78611225b94e663535b74a1545b1667eac8ff75ed43216306d123306c10e043f228e42cc53cb2728655019292380313393eaaf6e23@144.126.150.69:30301 -enode://1c20e6b46ce608c1fe739e78611225b94e663535b74a1545b1667eac8ff75ed43216306d123306c10e043f228e42cc53cb2728655019292380313393eaaf6e23@161.97.93.168:30301 +enode://278a1ac8aab1381b460788dcf4dcaffd58252f0f4c57d95e4c68b4b2bbf0b0fee83b5c140b3e6eb3af8b8369e76fa6996ccc667f65f9ce35c1d14fbffbfdfd52@95.179.217.201:30301 +enode://518a2b963f0e41ef6520973766a8caad9b13c4261c50a73eedfeaa3ebf3cce623dadc132d8d42ec41a50195c925ca55abd4e2a96258f9d335e3f625e6c2df789@149.28.167.190:30301 +enode://63f5a65ffce84b7123562b8bc871ae63db6dadded14df1dae6ad09b4a30e722a72e522d99aaf0bbad19380a0dd3abe94768ed03a8f68204b5dee6ae16ddb4d83@194.233.77.19:30301 +enode://87497c34ce0102e888040d6ec530e73c33bb0330770105797cf5667f465115d49d53cb583a8b4e34ff14048448c63f03fbd1c269897d7b99716dcdb942b6d707@144.91.108.231:30301 +enode://8c372fc5859e69a49037768f2ece54f14d1d159ec3181d06d7b58f1a54dac4f2f13623f1377a7e506e48e5f33b1752a9726a44b33fdb158ce7d6d5a57c055f65@207.244.240.232:30301 +enode://bed408a65f2894b09ac11ec03eee6b4b1af279e27c81fbe942aa6300ebe5d734913c0c7b30cdbcf98dfebd7a46bf9c79cfc1a8878e20240b5da872dc358be571@66.94.121.62:30301 +enode://dd8973429effba6d6f8edd27efad8544553bb39c82c6350c09e82cd3cfdcdecaf6d0ce9820ed97482aedd0b9236e88d362ca031a65dcbdac52aa292e693723e3@144.126.150.69:30301 +enode://fb5616c009265a162c08fb3984192ffc4df18946dc4d591ba5480011169e6f3f324685b5e102e8123d32e27cd968220d6400b9f1f0a6d1611c130079cd1e71bb@161.97.93.168:30301 +enode://75e95709fd89a6314b0d5363226e3e46c56d98c6ddd3ec3cbdffcb65fdfedc8cbad870e2af1382b97e5986c1465cdb00e621cc01e1910f622ba20ed2359a02a0@213.136.89.186:30304 +enode://34ba74c6a0ef379040243e19c5f673b29155ec5928a5300f1cdfd2215983b7720d52585acb16f7199cc5abdd9ff3657305e901e097f951e35740b0a80fce3e18@185.217.126.17:30304 +enode://d7c070939155be296a8b254d43a0927e6c6777f1352239499fab867af455b9c671bddd5f9ae359ddcd6af9a368746e2f993416bafff2d03a4d974d888daaf020@38.242.244.116:30304 +enode://0ad61fd32bcc99b32f6ead959f2f3472ecd11cd5b555767983b1827240cbac25fd978b94cf79ca8423088e5dd519479a6f7f26fe43a98e0da0b4b1caa995923d@207.180.210.192:30304 +enode://0985cfee342bf68bc21fea7ce728018d3a5f27a43a2c79b78e9103d07ac4893960fc399603d372744dfe2020a970a1daba979210e07c1d27bf6cfc317036ae13@173.249.33.28:30304 +enode://4c750ba2c069e00a8bbe37e45053e04a975a4cc635f1c53506da555bc9cc137da2680b76f48a232b88f762153af83aced601ad45475576100f175c0085750822@209.145.57.76:30304 +enode://d31b551d02ead096d5cbb3adda68fbcfbc76e4f939a6a6fe41e1a4e1be19c6f4b1c45a7545977764267d8bc6b7d1835c0e2060045a223af8ebb81d5cd30d01de@5.189.132.151:30304 +enode://686d8c5b886bf29633b73e5b4f7eb73cb1afa8c11cd5d3cc79027b742eeaff62fe44a42417d18c2d96ff8de2bf2c0c73ce51a07f3d5635b232e2eece090234b6@164.68.125.57:30304 +enode://7325b2eca70dfb9bde340ccfb6a5076f146f2af2401fae7996044207c95c797c2be4c0d90d76183abfdf33395553d6eb05fff6259c8fbe1df884df85d59f40b0@31.220.84.216:30304 +enode://f8e45296452c4e3988f9398bee1e1be029993a5332bd293629397dd71cca281fd7aaa3ade4d92c63a984218a6dd7ffddcd70135cfd5b3a66e0dd124dc9c35a37@31.220.84.220:30304 +enode://d8d8dff11b36dd683daccbb0306be706d97838dc0239aca077ab5475cbd488f9ebafa9a4a242d87f6def31028969756060ea412621d83a4715bd9a47e0787d3d@31.220.84.222:30304 +enode://0d3a38063c594523dbd619033ecc6b87545b204e91aa883b789389483baca964fcc4221832ea470308e1faa56b90705be234a113d63a3f0f7347d1237b58fec2@31.220.84.224:30304 +enode://1f479698e303b5ee9b8d35cdc6c660486b39a3f3f4581a1ecae5464142bef3103abe8665f1e25c864decaf80ee1261c2dc6f4f0caf2a2a66708fe934a63a564c@158.220.87.132:30304 +enode://6dd64c63402ebe46c1043c97ce66b7de88fb220b45f7435964bbfe6af2f3d0bf4d6b4702c274231afdab7d246054f85414befbe6ed0086aa5e9d98272e931278@158.220.87.144:30304 +enode://587d3c6962fedf45f07c758b5d7e07fbd3570e599a69f82d3f4676012327a92644785599ceaac9a35ed4f35478bd3dfe0dd4788b4529fa05cef15ba2e611f045@84.46.248.126:30304 +enode://e9684ecbf96348727f21d1d2253893aa2c5815dd8d9e0e2f8e842dfd92347d1daab1c2cea3a379a66fd6bf5321945eb65cc794a10345a4e1ec0a0121b77481d3@185.209.230.34:30304 +enode://52744d57d65b91fa2ccc43cb5758fca34f97509b068fc1fe1daee4d905cfbae7172a1647c28583b97fc51aaf0bf23b4e47340e2e658fcd5683daaa91c8d41c5d@167.86.125.253:30304 +enode://09aa42d01437643d3f2a02e12e0d6e41a6951281ef80d2a332dd025d5da0d4990b9932695f91ffb0a0998cec2531dc9c0e8e0fd4a3bb0b69409ad76c02da9f4e@167.86.125.15:30304 +enode://017ef59b3f3734aad9277170cd08c2a3231c75a63465085584ca00ffb32daccd2dfe657b2a3539af1d45e6b24251a18f2bc1b0e31b61f57c2039af95ebda1e2c@95.111.237.15:30304 +enode://e3eb10d6616dc9dbe6006bafbe02fceaef60bcce66666ef357b458e91df64b33572014ea2a162873ec6a68af80a405cc0a60e8125f8ec155be7e34caa4e8aeb6@173.212.253.234:30304 +enode://cbba3cf7f151bf79319107d0f24ca0fed9d6bc32bd922b173e5b87174fedd1d25378cf827236f05c00ce403a68d2ba75dda28e16ca43cbe87b0231a9ad16426e@178.18.249.111:30304 +enode://8b9a5f0433ed2dcb7fe0e4424ae6f83158cc73a562c8c5c1733a01eb2c92c1b77ffb46dc1f4b54d8b2392d9fd985661b968d9d062ffc73ece02b58f41589527a@173.249.54.137:30304 +enode://38d42a90e8c00beff03dc2f759a00e2f910b5bfd7b9eaafab3f24682c5e0bd2ae7094823a1af006295922848e3e417188f90766840220f377a780c8c5afa4c47@38.242.129.33:30304 +enode://e9efc55e68dbc38842c5ed43c597d7550d31d2c9d7e074e14a43a9304fa2eb8a2ad0955eef1961e6c5b850ce2061479623ba2ad8ae22db944f17fd4a19a8b902@167.86.106.195:30304 +enode://0c5dc604acbf5f04bd69e015e89c1c015f2641f5dbe8f56c0b2dc1d03e7b7d79e049ed3cca6b0f23cfecfd32c394a53e3d3810b0c4784b92646e658e1d879bf6@84.21.171.77:30304 +enode://a1dd03c142c2db1a786e51250e929f2bf5edf7575f23ecfedec6da4a51e89e7ed36479fb4f40677da496364894b97ab189475fbb6c8b7ca2ecfe4b3ff3d24ca9@85.208.51.215:30304 +enode://94389b49ca856a91b9962ead5a562a64383b7f8fdb819b6fa22d29db984a78b89245e894b8c29facc765348399e1dc4d4a16a787f94f21df8813bc7e703db4fc@167.86.118.99:30304 +enode://b786080fabb1b07046359c1820db88c589c6c4a964ec8b1f1a287c12c4178dec2fbbf406187e16dc29fd69cc5e4960741471bebbfe2ae111036f66b61f16cf41@38.242.129.40:30304 +enode://326d2d562754cdd72e1a4b7d42de44713d97f00c4b6e7250e6896b99d331a395012fe0cc6a2d133bdd8784e02649eb490197b65244f72003fff0c829d8695cb4@167.86.83.167:30304 +enode://c85d71dfb2dfc5abd832ed60bc0197ecb788d0f6bd44655d870dec6002a23345790344d65a2cfc0d2c0e023a7a6f630f99b6575cf5ca57203870f1971d1fc370@167.86.83.166:30304 \ No newline at end of file diff --git a/cicd/testnet/start.sh b/cicd/testnet/start.sh index d5f9a0f443fc..abfd91cadfba 100755 --- a/cicd/testnet/start.sh +++ b/cicd/testnet/start.sh @@ -78,7 +78,7 @@ XDC --ethstats ${netstats} --gcmode archive \ --datadir /work/xdcchain --networkid 51 \ -port $port --rpc --rpccorsdomain "*" --rpcaddr 0.0.0.0 \ --rpcport $rpc_port \ ---rpcapi admin,db,eth,debug,net,shh,txpool,personal,web3,XDPoS \ +--rpcapi db,eth,debug,net,shh,txpool,personal,web3,XDPoS \ --rpcvhosts "*" --unlock "${wallet}" --password /work/.pwd --mine \ --gasprice "1" --targetgaslimit "420000000" --verbosity ${log_level} \ --debugdatadir /work/xdcchain \ diff --git a/cmd/XDC/config.go b/cmd/XDC/config.go index 76e03981efa5..a47f19c7bf20 100644 --- a/cmd/XDC/config.go +++ b/cmd/XDC/config.go @@ -32,7 +32,7 @@ import ( "github.com/XinFinOrg/XDPoSChain/XDCx" "github.com/XinFinOrg/XDPoSChain/cmd/utils" "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/eth" + "github.com/XinFinOrg/XDPoSChain/eth/ethconfig" "github.com/XinFinOrg/XDPoSChain/internal/debug" "github.com/XinFinOrg/XDPoSChain/log" "github.com/XinFinOrg/XDPoSChain/node" @@ -90,7 +90,7 @@ type Bootnodes struct { } type XDCConfig struct { - Eth eth.Config + Eth ethconfig.Config Shh whisper.Config Node node.Config Ethstats ethstatsConfig @@ -129,7 +129,7 @@ func defaultNodeConfig() node.Config { func makeConfigNode(ctx *cli.Context) (*node.Node, XDCConfig) { // Load defaults. cfg := XDCConfig{ - Eth: eth.DefaultConfig, + Eth: ethconfig.Defaults, Shh: whisper.DefaultConfig, XDCX: XDCx.DefaultConfig, Node: defaultNodeConfig(), diff --git a/cmd/XDC/main.go b/cmd/XDC/main.go index 419978ba6e09..525c7bea9343 100644 --- a/cmd/XDC/main.go +++ b/cmd/XDC/main.go @@ -27,7 +27,6 @@ import ( "github.com/XinFinOrg/XDPoSChain/accounts" "github.com/XinFinOrg/XDPoSChain/accounts/keystore" "github.com/XinFinOrg/XDPoSChain/cmd/utils" - "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/consensus/XDPoS" "github.com/XinFinOrg/XDPoSChain/console" "github.com/XinFinOrg/XDPoSChain/core" @@ -37,6 +36,10 @@ import ( "github.com/XinFinOrg/XDPoSChain/log" "github.com/XinFinOrg/XDPoSChain/metrics" "github.com/XinFinOrg/XDPoSChain/node" + + // Force-load the native, to trigger registration + _ "github.com/XinFinOrg/XDPoSChain/eth/tracers/native" + "gopkg.in/urfave/cli.v1" ) @@ -89,10 +92,12 @@ var ( //utils.LightServFlag, //utils.LightPeersFlag, //utils.LightKDFFlag, - //utils.CacheFlag, - //utils.CacheDatabaseFlag, + utils.CacheFlag, + utils.CacheDatabaseFlag, //utils.CacheGCFlag, //utils.TrieCacheGenFlag, + utils.CacheLogSizeFlag, + utils.FDLimitFlag, utils.ListenPortFlag, utils.MaxPeersFlag, utils.MaxPendingPeersFlag, @@ -127,6 +132,8 @@ var ( //utils.NoCompactionFlag, //utils.GpoBlocksFlag, //utils.GpoPercentileFlag, + utils.GpoMaxGasPriceFlag, + utils.GpoIgnoreGasPriceFlag, //utils.ExtraDataFlag, configFileFlag, utils.AnnounceTxsFlag, @@ -148,6 +155,7 @@ var ( utils.WSAllowedOriginsFlag, utils.IPCDisabledFlag, utils.IPCPathFlag, + utils.RPCGlobalTxFeeCap, } whisperFlags = []cli.Flag{ @@ -310,16 +318,9 @@ func startNode(ctx *cli.Context, stack *node.Node, cfg XDCConfig) { ok := false slaveMode := ctx.GlobalIsSet(utils.XDCSlaveModeFlag.Name) var err error - if common.IsTestnet { - ok, err = ethereum.ValidateMasternodeTestnet() - if err != nil { - utils.Fatalf("Can't verify masternode permission: %v", err) - } - } else { - ok, err = ethereum.ValidateMasternode() - if err != nil { - utils.Fatalf("Can't verify masternode permission: %v", err) - } + ok, err = ethereum.ValidateMasternode() + if err != nil { + utils.Fatalf("Can't verify masternode permission: %v", err) } if ok { if slaveMode { @@ -351,16 +352,10 @@ func startNode(ctx *cli.Context, stack *node.Node, cfg XDCConfig) { log.Info("Update consensus parameters") chain := ethereum.BlockChain() engine.UpdateParams(chain.CurrentHeader()) - if common.IsTestnet { - ok, err = ethereum.ValidateMasternodeTestnet() - if err != nil { - utils.Fatalf("Can't verify masternode permission: %v", err) - } - } else { - ok, err = ethereum.ValidateMasternode() - if err != nil { - utils.Fatalf("Can't verify masternode permission: %v", err) - } + + ok, err = ethereum.ValidateMasternode() + if err != nil { + utils.Fatalf("Can't verify masternode permission: %v", err) } if !ok { if started { diff --git a/cmd/XDC/misccmd.go b/cmd/XDC/misccmd.go index a54ad673acd2..e86f7c27cccd 100644 --- a/cmd/XDC/misccmd.go +++ b/cmd/XDC/misccmd.go @@ -26,6 +26,7 @@ import ( "github.com/XinFinOrg/XDPoSChain/cmd/utils" "github.com/XinFinOrg/XDPoSChain/consensus/ethash" "github.com/XinFinOrg/XDPoSChain/eth" + "github.com/XinFinOrg/XDPoSChain/eth/ethconfig" "github.com/XinFinOrg/XDPoSChain/params" "gopkg.in/urfave/cli.v1" ) @@ -114,7 +115,7 @@ func version(ctx *cli.Context) error { } fmt.Println("Architecture:", runtime.GOARCH) fmt.Println("Protocol Versions:", eth.ProtocolVersions) - fmt.Println("Network Id:", eth.DefaultConfig.NetworkId) + fmt.Println("Network Id:", ethconfig.Defaults.NetworkId) fmt.Println("Go Version:", runtime.Version()) fmt.Println("Operating System:", runtime.GOOS) fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH")) diff --git a/cmd/XDC/usage.go b/cmd/XDC/usage.go index e6b2fc961f8f..a49da82bb581 100644 --- a/cmd/XDC/usage.go +++ b/cmd/XDC/usage.go @@ -123,15 +123,16 @@ var AppHelpFlagGroups = []flagGroup{ // utils.TxPoolLifetimeFlag, // }, //}, - //{ - // Name: "PERFORMANCE TUNING", - // Flags: []cli.Flag{ - // utils.CacheFlag, - // utils.CacheDatabaseFlag, - // utils.CacheGCFlag, - // utils.TrieCacheGenFlag, - // }, - //}, + { + Name: "PERFORMANCE TUNING", + Flags: []cli.Flag{ + utils.CacheFlag, + utils.CacheDatabaseFlag, + // utils.CacheGCFlag, + // utils.TrieCacheGenFlag, + utils.FDLimitFlag, + }, + }, { Name: "ACCOUNT", Flags: []cli.Flag{ @@ -156,6 +157,7 @@ var AppHelpFlagGroups = []flagGroup{ utils.IPCPathFlag, utils.RPCCORSDomainFlag, utils.RPCVirtualHostsFlag, + utils.RPCGlobalTxFeeCap, utils.JSpathFlag, utils.ExecFlag, utils.PreloadJSFlag, @@ -194,6 +196,8 @@ var AppHelpFlagGroups = []flagGroup{ // Flags: []cli.Flag{ // utils.GpoBlocksFlag, // utils.GpoPercentileFlag, + // utils.GpoMaxGasPriceFlag, + // utils.GpoIgnoreGasPriceFlag, // }, //}, //{ diff --git a/cmd/evm/json_logger.go b/cmd/evm/json_logger.go deleted file mode 100644 index a5b8c0fea21d..000000000000 --- a/cmd/evm/json_logger.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "encoding/json" - "io" - "math/big" - "time" - - "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/common/math" - "github.com/XinFinOrg/XDPoSChain/core/vm" -) - -type JSONLogger struct { - encoder *json.Encoder - cfg *vm.LogConfig -} - -func NewJSONLogger(cfg *vm.LogConfig, writer io.Writer) *JSONLogger { - l := &JSONLogger{json.NewEncoder(writer), cfg} - if l.cfg == nil { - l.cfg = &vm.LogConfig{} - } - return l -} - -func (l *JSONLogger) CaptureStart(env *vm.EVM, from, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { -} - -func (l *JSONLogger) CaptureFault(*vm.EVM, uint64, vm.OpCode, uint64, uint64, *vm.ScopeContext, int, error) { -} - -// CaptureState outputs state information on the logger. -func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { - memory := scope.Memory - stack := scope.Stack - log := vm.StructLog{ - Pc: pc, - Op: op, - Gas: gas, - GasCost: cost, - MemorySize: memory.Len(), - Storage: nil, - Depth: depth, - Err: err, - } - if !l.cfg.DisableMemory { - log.Memory = memory.Data() - } - if !l.cfg.DisableStack { - //TODO(@holiman) improve this - logstack := make([]*big.Int, len(stack.Data())) - for i, item := range stack.Data() { - logstack[i] = item.ToBig() - } - log.Stack = logstack - } - l.encoder.Encode(log) -} - -// CaptureEnd is triggered at end of execution. -func (l *JSONLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) { - type endLog struct { - Output string `json:"output"` - GasUsed math.HexOrDecimal64 `json:"gasUsed"` - Time time.Duration `json:"time"` - Err string `json:"error,omitempty"` - } - var errMsg string - if err != nil { - errMsg = err.Error() - } - l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, errMsg}) -} diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index d86144ff11c7..101069aff4f7 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -75,12 +75,12 @@ func runCmd(ctx *cli.Context) error { glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name))) log.Root().SetHandler(glogger) logconfig := &vm.LogConfig{ - DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name), + EnableMemory: !ctx.GlobalBool(DisableMemoryFlag.Name), DisableStack: ctx.GlobalBool(DisableStackFlag.Name), } var ( - tracer vm.Tracer + tracer vm.EVMLogger debugLogger *vm.StructLogger statedb *state.StateDB chainConfig *params.ChainConfig @@ -88,7 +88,7 @@ func runCmd(ctx *cli.Context) error { receiver = common.StringToAddress("receiver") ) if ctx.GlobalBool(MachineFlag.Name) { - tracer = NewJSONLogger(logconfig, os.Stdout) + tracer = vm.NewJSONLogger(logconfig, os.Stdout) } else if ctx.GlobalBool(DebugFlag.Name) { debugLogger = vm.NewStructLogger(logconfig) tracer = debugLogger diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go index edba08778920..1a236c0001ee 100644 --- a/cmd/evm/staterunner.go +++ b/cmd/evm/staterunner.go @@ -56,16 +56,16 @@ func stateTestCmd(ctx *cli.Context) error { // Configure the EVM logger config := &vm.LogConfig{ - DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name), - DisableStack: ctx.GlobalBool(DisableStackFlag.Name), + EnableMemory: !ctx.GlobalBool(DisableMemoryFlag.Name), + DisableStack: ctx.GlobalBool(DisableStackFlag.Name), } var ( - tracer vm.Tracer + tracer vm.EVMLogger debugger *vm.StructLogger ) switch { case ctx.GlobalBool(MachineFlag.Name): - tracer = NewJSONLogger(config, os.Stderr) + tracer = vm.NewJSONLogger(config, os.Stderr) case ctx.GlobalBool(DebugFlag.Name): debugger = vm.NewStructLogger(config) diff --git a/cmd/faucet/faucet.go b/cmd/faucet/faucet.go index 1a4705daf07e..f61909393d1b 100644 --- a/cmd/faucet/faucet.go +++ b/cmd/faucet/faucet.go @@ -46,8 +46,8 @@ import ( "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/core" "github.com/XinFinOrg/XDPoSChain/core/types" - "github.com/XinFinOrg/XDPoSChain/eth" "github.com/XinFinOrg/XDPoSChain/eth/downloader" + "github.com/XinFinOrg/XDPoSChain/eth/ethconfig" "github.com/XinFinOrg/XDPoSChain/ethclient" "github.com/XinFinOrg/XDPoSChain/ethstats" "github.com/XinFinOrg/XDPoSChain/les" @@ -239,7 +239,7 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u } // Assemble the Ethereum light client protocol if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { - cfg := eth.DefaultConfig + cfg := ethconfig.Defaults cfg.SyncMode = downloader.LightSync cfg.NetworkId = network cfg.Genesis = genesis diff --git a/cmd/gc/main.go b/cmd/gc/main.go index ce48f0aae813..d0532e3f0d1e 100644 --- a/cmd/gc/main.go +++ b/cmd/gc/main.go @@ -3,9 +3,6 @@ package main import ( "flag" "fmt" - "github.com/XinFinOrg/XDPoSChain/core/rawdb" - "github.com/XinFinOrg/XDPoSChain/ethdb" - "github.com/XinFinOrg/XDPoSChain/ethdb/leveldb" "os" "os/signal" "runtime" @@ -16,11 +13,14 @@ import ( "github.com/XinFinOrg/XDPoSChain/cmd/utils" "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/core" + "github.com/XinFinOrg/XDPoSChain/core/rawdb" "github.com/XinFinOrg/XDPoSChain/core/state" - "github.com/XinFinOrg/XDPoSChain/eth" + "github.com/XinFinOrg/XDPoSChain/eth/ethconfig" + "github.com/XinFinOrg/XDPoSChain/ethdb" + "github.com/XinFinOrg/XDPoSChain/ethdb/leveldb" "github.com/XinFinOrg/XDPoSChain/rlp" "github.com/XinFinOrg/XDPoSChain/trie" - "github.com/hashicorp/golang-lru" + "github.com/XinFinOrg/XDPoSChain/common/lru" ) var ( @@ -28,8 +28,8 @@ var ( cacheSize = flag.Int("size", 1000000, "LRU cache size") sercureKey = []byte("secure-key-") nWorker = runtime.NumCPU() / 2 - cleanAddress = []common.Address{common.HexToAddress(common.BlockSigners)} - cache *lru.Cache + cleanAddress = []common.Address{common.BlockSignersBinary} + cache *lru.Cache[common.Hash, struct{}] finish = int32(0) running = true stateRoots = make(chan TrieRoot) @@ -52,13 +52,13 @@ type ResultProcessNode struct { func main() { flag.Parse() - db, _ := leveldb.New(*dir, eth.DefaultConfig.DatabaseCache, utils.MakeDatabaseHandles(), "") + db, _ := leveldb.New(*dir, ethconfig.Defaults.DatabaseCache, utils.MakeDatabaseHandles(0), "") lddb := rawdb.NewDatabase(db) head := core.GetHeadBlockHash(lddb) currentHeader := core.GetHeader(lddb, head, core.GetBlockNumber(lddb, head)) tridb := trie.NewDatabase(lddb) catchEventInterupt(db) - cache, _ = lru.New(*cacheSize) + cache = lru.NewCache[common.Hash, struct{}](*cacheSize) go func() { for i := uint64(1); i <= currentHeader.Number.Uint64(); i++ { hash := core.GetCanonicalHash(lddb, i) @@ -222,7 +222,7 @@ func processNodes(node StateNode, db *leveldb.Database) ([17]*StateNode, [17]*[] } } } - cache.Add(commonHash, true) + cache.Add(commonHash, struct{}{}) } return newNodes, keys, number } diff --git a/cmd/puppeth/wizard_genesis.go b/cmd/puppeth/wizard_genesis.go index aefac61cc69d..c1e077c59513 100644 --- a/cmd/puppeth/wizard_genesis.go +++ b/cmd/puppeth/wizard_genesis.go @@ -197,7 +197,7 @@ func (w *wizard) makeGenesis() { fmt.Println() fmt.Println("What is foundation wallet address? (default = xdc0000000000000000000000000000000000000068)") - genesis.Config.XDPoS.FoudationWalletAddr = w.readDefaultAddress(common.HexToAddress(common.FoudationAddr)) + genesis.Config.XDPoS.FoudationWalletAddr = w.readDefaultAddress(common.FoudationAddrBinary) // Validator Smart Contract Code pKey, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") @@ -225,7 +225,7 @@ func (w *wizard) makeGenesis() { return true } contractBackend.ForEachStorageAt(ctx, validatorAddress, nil, f) - genesis.Alloc[common.HexToAddress(common.MasternodeVotingSMC)] = core.GenesisAccount{ + genesis.Alloc[common.MasternodeVotingSMCBinary] = core.GenesisAccount{ Balance: validatorCap.Mul(validatorCap, big.NewInt(int64(len(validatorCaps)))), Code: code, Storage: storage, @@ -259,7 +259,7 @@ func (w *wizard) makeGenesis() { fBalance := big.NewInt(0) // 16m fBalance.Add(fBalance, big.NewInt(16*1000*1000)) fBalance.Mul(fBalance, big.NewInt(1000000000000000000)) - genesis.Alloc[common.HexToAddress(common.FoudationAddr)] = core.GenesisAccount{ + genesis.Alloc[common.FoudationAddrBinary] = core.GenesisAccount{ Balance: fBalance, Code: code, Storage: storage, @@ -275,7 +275,7 @@ func (w *wizard) makeGenesis() { code, _ = contractBackend.CodeAt(ctx, blockSignerAddress, nil) storage = make(map[common.Hash]common.Hash) contractBackend.ForEachStorageAt(ctx, blockSignerAddress, nil, f) - genesis.Alloc[common.HexToAddress(common.BlockSigners)] = core.GenesisAccount{ + genesis.Alloc[common.BlockSignersBinary] = core.GenesisAccount{ Balance: big.NewInt(0), Code: code, Storage: storage, @@ -291,7 +291,7 @@ func (w *wizard) makeGenesis() { code, _ = contractBackend.CodeAt(ctx, randomizeAddress, nil) storage = make(map[common.Hash]common.Hash) contractBackend.ForEachStorageAt(ctx, randomizeAddress, nil, f) - genesis.Alloc[common.HexToAddress(common.RandomizeSMC)] = core.GenesisAccount{ + genesis.Alloc[common.RandomizeSMCBinary] = core.GenesisAccount{ Balance: big.NewInt(0), Code: code, Storage: storage, @@ -330,7 +330,7 @@ func (w *wizard) makeGenesis() { subBalance.Add(subBalance, big.NewInt(int64(len(signers))*50*1000)) subBalance.Mul(subBalance, big.NewInt(1000000000000000000)) balance.Sub(balance, subBalance) // 12m - i * 50k - genesis.Alloc[common.HexToAddress(common.TeamAddr)] = core.GenesisAccount{ + genesis.Alloc[common.TeamAddrBinary] = core.GenesisAccount{ Balance: balance, Code: code, Storage: storage, diff --git a/cmd/swarm/config.go b/cmd/swarm/config.go deleted file mode 100644 index 598985347e3f..000000000000 --- a/cmd/swarm/config.go +++ /dev/null @@ -1,367 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "errors" - "fmt" - "io" - "os" - "reflect" - "strconv" - "strings" - "unicode" - - cli "gopkg.in/urfave/cli.v1" - - "github.com/XinFinOrg/XDPoSChain/cmd/utils" - "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/node" - "github.com/naoina/toml" - - bzzapi "github.com/XinFinOrg/XDPoSChain/swarm/api" -) - -var ( - //flag definition for the dumpconfig command - DumpConfigCommand = cli.Command{ - Action: utils.MigrateFlags(dumpConfig), - Name: "dumpconfig", - Usage: "Show configuration values", - ArgsUsage: "", - Flags: app.Flags, - Category: "MISCELLANEOUS COMMANDS", - Description: `The dumpconfig command shows configuration values.`, - } - - //flag definition for the config file command - SwarmTomlConfigPathFlag = cli.StringFlag{ - Name: "config", - Usage: "TOML configuration file", - } -) - -//constants for environment variables -const ( - SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR" - SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT" - SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR" - SWARM_ENV_PORT = "SWARM_PORT" - SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID" - SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE" - SWARM_ENV_SWAP_API = "SWARM_SWAP_API" - SWARM_ENV_SYNC_ENABLE = "SWARM_SYNC_ENABLE" - SWARM_ENV_ENS_API = "SWARM_ENS_API" - SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR" - SWARM_ENV_CORS = "SWARM_CORS" - SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES" - XDC_ENV_DATADIR = "XDC_DATADIR" -) - -// These settings ensure that TOML keys use the same names as Go struct fields. -var tomlSettings = toml.Config{ - NormFieldName: func(rt reflect.Type, key string) string { - return key - }, - FieldToKey: func(rt reflect.Type, field string) string { - return field - }, - MissingField: func(rt reflect.Type, field string) error { - link := "" - if unicode.IsUpper(rune(rt.Name()[0])) && rt.PkgPath() != "main" { - link = fmt.Sprintf(", check github.com/XinFinOrg/XDPoSChain/swarm/api/config.go for available fields") - } - return fmt.Errorf("field '%s' is not defined in %s%s", field, rt.String(), link) - }, -} - -//before booting the swarm node, build the configuration -func buildConfig(ctx *cli.Context) (config *bzzapi.Config, err error) { - //check for deprecated flags - checkDeprecated(ctx) - //start by creating a default config - config = bzzapi.NewDefaultConfig() - //first load settings from config file (if provided) - config, err = configFileOverride(config, ctx) - if err != nil { - return nil, err - } - //override settings provided by environment variables - config = envVarsOverride(config) - //override settings provided by command line - config = cmdLineOverride(config, ctx) - //validate configuration parameters - err = validateConfig(config) - - return -} - -//finally, after the configuration build phase is finished, initialize -func initSwarmNode(config *bzzapi.Config, stack *node.Node, ctx *cli.Context) { - //at this point, all vars should be set in the Config - //get the account for the provided swarm account - prvkey := getAccount(config.BzzAccount, ctx, stack) - //set the resolved config path (XDC --datadir) - config.Path = stack.InstanceDir() - //finally, initialize the configuration - config.Init(prvkey) - //configuration phase completed here - log.Debug("Starting Swarm with the following parameters:") - //after having created the config, print it to screen - log.Debug(printConfig(config)) -} - -//override the current config with whatever is in the config file, if a config file has been provided -func configFileOverride(config *bzzapi.Config, ctx *cli.Context) (*bzzapi.Config, error) { - var err error - - //only do something if the -config flag has been set - if ctx.GlobalIsSet(SwarmTomlConfigPathFlag.Name) { - var filepath string - if filepath = ctx.GlobalString(SwarmTomlConfigPathFlag.Name); filepath == "" { - utils.Fatalf("Config file flag provided with invalid file path") - } - f, err := os.Open(filepath) - if err != nil { - return nil, err - } - defer f.Close() - - //decode the TOML file into a Config struct - //note that we are decoding into the existing defaultConfig; - //if an entry is not present in the file, the default entry is kept - err = tomlSettings.NewDecoder(f).Decode(&config) - // Add file name to errors that have a line number. - if _, ok := err.(*toml.LineError); ok { - err = errors.New(filepath + ", " + err.Error()) - } - } - return config, err -} - -//override the current config with whatever is provided through the command line -//most values are not allowed a zero value (empty string), if not otherwise noted -func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Config { - - if keyid := ctx.GlobalString(SwarmAccountFlag.Name); keyid != "" { - currentConfig.BzzAccount = keyid - } - - if chbookaddr := ctx.GlobalString(ChequebookAddrFlag.Name); chbookaddr != "" { - currentConfig.Contract = common.HexToAddress(chbookaddr) - } - - if networkid := ctx.GlobalString(SwarmNetworkIdFlag.Name); networkid != "" { - if id, _ := strconv.Atoi(networkid); id != 0 { - currentConfig.NetworkId = uint64(id) - } - } - - if ctx.GlobalIsSet(utils.DataDirFlag.Name) { - if datadir := ctx.GlobalString(utils.DataDirFlag.Name); datadir != "" { - currentConfig.Path = datadir - } - } - - bzzport := ctx.GlobalString(SwarmPortFlag.Name) - if len(bzzport) > 0 { - currentConfig.Port = bzzport - } - - if bzzaddr := ctx.GlobalString(SwarmListenAddrFlag.Name); bzzaddr != "" { - currentConfig.ListenAddr = bzzaddr - } - - if ctx.GlobalIsSet(SwarmSwapEnabledFlag.Name) { - currentConfig.SwapEnabled = true - } - - if ctx.GlobalIsSet(SwarmSyncEnabledFlag.Name) { - currentConfig.SyncEnabled = true - } - - currentConfig.SwapApi = ctx.GlobalString(SwarmSwapAPIFlag.Name) - if currentConfig.SwapEnabled && currentConfig.SwapApi == "" { - utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API) - } - - if ctx.GlobalIsSet(EnsAPIFlag.Name) { - ensAPIs := ctx.GlobalStringSlice(EnsAPIFlag.Name) - // preserve backward compatibility to disable ENS with --ens-api="" - if len(ensAPIs) == 1 && ensAPIs[0] == "" { - ensAPIs = nil - } - currentConfig.EnsAPIs = ensAPIs - } - - if ensaddr := ctx.GlobalString(DeprecatedEnsAddrFlag.Name); ensaddr != "" { - currentConfig.EnsRoot = common.HexToAddress(ensaddr) - } - - if cors := ctx.GlobalString(CorsStringFlag.Name); cors != "" { - currentConfig.Cors = cors - } - - if ctx.GlobalIsSet(utils.BootnodesFlag.Name) { - currentConfig.BootNodes = ctx.GlobalString(utils.BootnodesFlag.Name) - } - - return currentConfig - -} - -//override the current config with whatver is provided in environment variables -//most values are not allowed a zero value (empty string), if not otherwise noted -func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) { - - if keyid := os.Getenv(SWARM_ENV_ACCOUNT); keyid != "" { - currentConfig.BzzAccount = keyid - } - - if chbookaddr := os.Getenv(SWARM_ENV_CHEQUEBOOK_ADDR); chbookaddr != "" { - currentConfig.Contract = common.HexToAddress(chbookaddr) - } - - if networkid := os.Getenv(SWARM_ENV_NETWORK_ID); networkid != "" { - if id, _ := strconv.Atoi(networkid); id != 0 { - currentConfig.NetworkId = uint64(id) - } - } - - if datadir := os.Getenv(XDC_ENV_DATADIR); datadir != "" { - currentConfig.Path = datadir - } - - bzzport := os.Getenv(SWARM_ENV_PORT) - if len(bzzport) > 0 { - currentConfig.Port = bzzport - } - - if bzzaddr := os.Getenv(SWARM_ENV_LISTEN_ADDR); bzzaddr != "" { - currentConfig.ListenAddr = bzzaddr - } - - if swapenable := os.Getenv(SWARM_ENV_SWAP_ENABLE); swapenable != "" { - if swap, err := strconv.ParseBool(swapenable); err != nil { - currentConfig.SwapEnabled = swap - } - } - - if syncenable := os.Getenv(SWARM_ENV_SYNC_ENABLE); syncenable != "" { - if sync, err := strconv.ParseBool(syncenable); err != nil { - currentConfig.SyncEnabled = sync - } - } - - if swapapi := os.Getenv(SWARM_ENV_SWAP_API); swapapi != "" { - currentConfig.SwapApi = swapapi - } - - if currentConfig.SwapEnabled && currentConfig.SwapApi == "" { - utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API) - } - - if ensapi := os.Getenv(SWARM_ENV_ENS_API); ensapi != "" { - currentConfig.EnsAPIs = strings.Split(ensapi, ",") - } - - if ensaddr := os.Getenv(SWARM_ENV_ENS_ADDR); ensaddr != "" { - currentConfig.EnsRoot = common.HexToAddress(ensaddr) - } - - if cors := os.Getenv(SWARM_ENV_CORS); cors != "" { - currentConfig.Cors = cors - } - - if bootnodes := os.Getenv(SWARM_ENV_BOOTNODES); bootnodes != "" { - currentConfig.BootNodes = bootnodes - } - - return currentConfig -} - -// dumpConfig is the dumpconfig command. -// writes a default config to STDOUT -func dumpConfig(ctx *cli.Context) error { - cfg, err := buildConfig(ctx) - if err != nil { - utils.Fatalf(fmt.Sprintf("Uh oh - dumpconfig triggered an error %v", err)) - } - comment := "" - out, err := tomlSettings.Marshal(&cfg) - if err != nil { - return err - } - io.WriteString(os.Stdout, comment) - os.Stdout.Write(out) - return nil -} - -//deprecated flags checked here -func checkDeprecated(ctx *cli.Context) { - // exit if the deprecated --ethapi flag is set - if ctx.GlobalString(DeprecatedEthAPIFlag.Name) != "" { - utils.Fatalf("--ethapi is no longer a valid command line flag, please use --ens-api and/or --swap-api.") - } - // warn if --ens-api flag is set - if ctx.GlobalString(DeprecatedEnsAddrFlag.Name) != "" { - log.Warn("--ens-addr is no longer a valid command line flag, please use --ens-api to specify contract address.") - } -} - -//validate configuration parameters -func validateConfig(cfg *bzzapi.Config) (err error) { - for _, ensAPI := range cfg.EnsAPIs { - if ensAPI != "" { - if err := validateEnsAPIs(ensAPI); err != nil { - return fmt.Errorf("invalid format [tld:][contract-addr@]url for ENS API endpoint configuration %q: %v", ensAPI, err) - } - } - } - return nil -} - -//validate EnsAPIs configuration parameter -func validateEnsAPIs(s string) (err error) { - // missing contract address - if strings.HasPrefix(s, "@") { - return errors.New("missing contract address") - } - // missing url - if strings.HasSuffix(s, "@") { - return errors.New("missing url") - } - // missing tld - if strings.HasPrefix(s, ":") { - return errors.New("missing tld") - } - // missing url - if strings.HasSuffix(s, ":") { - return errors.New("missing url") - } - return nil -} - -//print a Config as string -func printConfig(config *bzzapi.Config) string { - out, err := tomlSettings.Marshal(&config) - if err != nil { - return fmt.Sprintf("Something is not right with the configuration: %v", err) - } - return string(out) -} diff --git a/cmd/swarm/config_test.go b/cmd/swarm/config_test.go deleted file mode 100644 index 877859170377..000000000000 --- a/cmd/swarm/config_test.go +++ /dev/null @@ -1,553 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "fmt" - "io" - "os" - "os/exec" - "testing" - "time" - - "github.com/XinFinOrg/XDPoSChain/rpc" - "github.com/XinFinOrg/XDPoSChain/swarm" - "github.com/XinFinOrg/XDPoSChain/swarm/api" - - "github.com/docker/docker/pkg/reexec" -) - -func TestDumpConfig(t *testing.T) { - swarm := runSwarm(t, "dumpconfig") - defaultConf := api.NewDefaultConfig() - out, err := tomlSettings.Marshal(&defaultConf) - if err != nil { - t.Fatal(err) - } - swarm.Expect(string(out)) - swarm.ExpectExit() -} - -func TestFailsSwapEnabledNoSwapApi(t *testing.T) { - flags := []string{ - fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42", - fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545", - fmt.Sprintf("--%s", SwarmSwapEnabledFlag.Name), - } - - swarm := runSwarm(t, flags...) - swarm.Expect("Fatal: " + SWARM_ERR_SWAP_SET_NO_API + "\n") - swarm.ExpectExit() -} - -func TestFailsNoBzzAccount(t *testing.T) { - flags := []string{ - fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42", - fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545", - } - - swarm := runSwarm(t, flags...) - swarm.Expect("Fatal: " + SWARM_ERR_NO_BZZACCOUNT + "\n") - swarm.ExpectExit() -} - -func TestCmdLineOverrides(t *testing.T) { - dir, err := os.MkdirTemp("", "bzztest") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - conf, account := getTestAccount(t, dir) - node := &testNode{Dir: dir} - - // assign ports - httpPort, err := assignTCPPort() - if err != nil { - t.Fatal(err) - } - - flags := []string{ - fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42", - fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort, - fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name), - fmt.Sprintf("--%s", CorsStringFlag.Name), "*", - fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(), - fmt.Sprintf("--%s", EnsAPIFlag.Name), "", - "--datadir", dir, - "--ipcpath", conf.IPCPath, - } - node.Cmd = runSwarm(t, flags...) - node.Cmd.InputLine(testPassphrase) - defer func() { - if t.Failed() { - node.Shutdown() - } - }() - // wait for the node to start - for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) { - node.Client, err = rpc.Dial(conf.IPCEndpoint()) - if err == nil { - break - } - } - if node.Client == nil { - t.Fatal(err) - } - - // load info - var info swarm.Info - if err := node.Client.Call(&info, "bzz_info"); err != nil { - t.Fatal(err) - } - - if info.Port != httpPort { - t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port) - } - - if info.NetworkId != 42 { - t.Fatalf("Expected network ID to be %d, got %d", 42, info.NetworkId) - } - - if !info.SyncEnabled { - t.Fatal("Expected Sync to be enabled, but is false") - } - - if info.Cors != "*" { - t.Fatalf("Expected Cors flag to be set to %s, got %s", "*", info.Cors) - } - - node.Shutdown() -} - -func TestFileOverrides(t *testing.T) { - - // assign ports - httpPort, err := assignTCPPort() - if err != nil { - t.Fatal(err) - } - - //create a config file - //first, create a default conf - defaultConf := api.NewDefaultConfig() - //change some values in order to test if they have been loaded - defaultConf.SyncEnabled = true - defaultConf.NetworkId = 54 - defaultConf.Port = httpPort - defaultConf.StoreParams.DbCapacity = 9000000 - defaultConf.ChunkerParams.Branches = 64 - defaultConf.HiveParams.CallInterval = 6000000000 - defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second - defaultConf.SyncParams.KeyBufferSize = 512 - //create a TOML string - out, err := tomlSettings.Marshal(&defaultConf) - if err != nil { - t.Fatalf("Error creating TOML file in TestFileOverride: %v", err) - } - //create file - f, err := os.CreateTemp("", "testconfig.toml") - if err != nil { - t.Fatalf("Error writing TOML file in TestFileOverride: %v", err) - } - //write file - _, err = f.WriteString(string(out)) - if err != nil { - t.Fatalf("Error writing TOML file in TestFileOverride: %v", err) - } - f.Sync() - - dir, err := os.MkdirTemp("", "bzztest") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - conf, account := getTestAccount(t, dir) - node := &testNode{Dir: dir} - - flags := []string{ - fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(), - fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(), - "--ens-api", "", - "--ipcpath", conf.IPCPath, - "--datadir", dir, - } - node.Cmd = runSwarm(t, flags...) - node.Cmd.InputLine(testPassphrase) - defer func() { - if t.Failed() { - node.Shutdown() - } - }() - // wait for the node to start - for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) { - node.Client, err = rpc.Dial(conf.IPCEndpoint()) - if err == nil { - break - } - } - if node.Client == nil { - t.Fatal(err) - } - - // load info - var info swarm.Info - if err := node.Client.Call(&info, "bzz_info"); err != nil { - t.Fatal(err) - } - - if info.Port != httpPort { - t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port) - } - - if info.NetworkId != 54 { - t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId) - } - - if !info.SyncEnabled { - t.Fatal("Expected Sync to be enabled, but is false") - } - - if info.StoreParams.DbCapacity != 9000000 { - t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId) - } - - if info.ChunkerParams.Branches != 64 { - t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches) - } - - if info.HiveParams.CallInterval != 6000000000 { - t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval)) - } - - if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second { - t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval) - } - - if info.SyncParams.KeyBufferSize != 512 { - t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize) - } - - node.Shutdown() -} - -func TestEnvVars(t *testing.T) { - // assign ports - httpPort, err := assignTCPPort() - if err != nil { - t.Fatal(err) - } - - envVars := os.Environ() - envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmPortFlag.EnvVar, httpPort)) - envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmNetworkIdFlag.EnvVar, "999")) - envVars = append(envVars, fmt.Sprintf("%s=%s", CorsStringFlag.EnvVar, "*")) - envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmSyncEnabledFlag.EnvVar, "true")) - - dir, err := os.MkdirTemp("", "bzztest") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - conf, account := getTestAccount(t, dir) - node := &testNode{Dir: dir} - flags := []string{ - fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(), - "--ens-api", "", - "--datadir", dir, - "--ipcpath", conf.IPCPath, - } - - //node.Cmd = runSwarm(t,flags...) - //node.Cmd.cmd.Env = envVars - //the above assignment does not work, so we need a custom Cmd here in order to pass envVars: - cmd := &exec.Cmd{ - Path: reexec.Self(), - Args: append([]string{"swarm-test"}, flags...), - Stderr: os.Stderr, - Stdout: os.Stdout, - } - cmd.Env = envVars - //stdout, err := cmd.StdoutPipe() - //if err != nil { - // t.Fatal(err) - //} - //stdout = bufio.NewReader(stdout) - var stdin io.WriteCloser - if stdin, err = cmd.StdinPipe(); err != nil { - t.Fatal(err) - } - if err := cmd.Start(); err != nil { - t.Fatal(err) - } - - //cmd.InputLine(testPassphrase) - io.WriteString(stdin, testPassphrase+"\n") - defer func() { - if t.Failed() { - node.Shutdown() - cmd.Process.Kill() - } - }() - // wait for the node to start - for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) { - node.Client, err = rpc.Dial(conf.IPCEndpoint()) - if err == nil { - break - } - } - - if node.Client == nil { - t.Fatal(err) - } - - // load info - var info swarm.Info - if err := node.Client.Call(&info, "bzz_info"); err != nil { - t.Fatal(err) - } - - if info.Port != httpPort { - t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port) - } - - if info.NetworkId != 999 { - t.Fatalf("Expected network ID to be %d, got %d", 999, info.NetworkId) - } - - if info.Cors != "*" { - t.Fatalf("Expected Cors flag to be set to %s, got %s", "*", info.Cors) - } - - if !info.SyncEnabled { - t.Fatal("Expected Sync to be enabled, but is false") - } - - node.Shutdown() - cmd.Process.Kill() -} - -func TestCmdLineOverridesFile(t *testing.T) { - - // assign ports - httpPort, err := assignTCPPort() - if err != nil { - t.Fatal(err) - } - - //create a config file - //first, create a default conf - defaultConf := api.NewDefaultConfig() - //change some values in order to test if they have been loaded - defaultConf.SyncEnabled = false - defaultConf.NetworkId = 54 - defaultConf.Port = "8588" - defaultConf.StoreParams.DbCapacity = 9000000 - defaultConf.ChunkerParams.Branches = 64 - defaultConf.HiveParams.CallInterval = 6000000000 - defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second - defaultConf.SyncParams.KeyBufferSize = 512 - //create a TOML file - out, err := tomlSettings.Marshal(&defaultConf) - if err != nil { - t.Fatalf("Error creating TOML file in TestFileOverride: %v", err) - } - //write file - f, err := os.CreateTemp("", "testconfig.toml") - if err != nil { - t.Fatalf("Error writing TOML file in TestFileOverride: %v", err) - } - //write file - _, err = f.WriteString(string(out)) - if err != nil { - t.Fatalf("Error writing TOML file in TestFileOverride: %v", err) - } - f.Sync() - - dir, err := os.MkdirTemp("", "bzztest") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - conf, account := getTestAccount(t, dir) - node := &testNode{Dir: dir} - - expectNetworkId := uint64(77) - - flags := []string{ - fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "77", - fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort, - fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name), - fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(), - fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(), - "--ens-api", "", - "--datadir", dir, - "--ipcpath", conf.IPCPath, - } - node.Cmd = runSwarm(t, flags...) - node.Cmd.InputLine(testPassphrase) - defer func() { - if t.Failed() { - node.Shutdown() - } - }() - // wait for the node to start - for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) { - node.Client, err = rpc.Dial(conf.IPCEndpoint()) - if err == nil { - break - } - } - if node.Client == nil { - t.Fatal(err) - } - - // load info - var info swarm.Info - if err := node.Client.Call(&info, "bzz_info"); err != nil { - t.Fatal(err) - } - - if info.Port != httpPort { - t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port) - } - - if info.NetworkId != expectNetworkId { - t.Fatalf("Expected network ID to be %d, got %d", expectNetworkId, info.NetworkId) - } - - if !info.SyncEnabled { - t.Fatal("Expected Sync to be enabled, but is false") - } - - if info.StoreParams.DbCapacity != 9000000 { - t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId) - } - - if info.ChunkerParams.Branches != 64 { - t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches) - } - - if info.HiveParams.CallInterval != 6000000000 { - t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval)) - } - - if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second { - t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval) - } - - if info.SyncParams.KeyBufferSize != 512 { - t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize) - } - - node.Shutdown() -} - -func TestValidateConfig(t *testing.T) { - for _, c := range []struct { - cfg *api.Config - err string - }{ - { - cfg: &api.Config{EnsAPIs: []string{ - "/data/testnet/geth.ipc", - }}, - }, - { - cfg: &api.Config{EnsAPIs: []string{ - "http://127.0.0.1:1234", - }}, - }, - { - cfg: &api.Config{EnsAPIs: []string{ - "ws://127.0.0.1:1234", - }}, - }, - { - cfg: &api.Config{EnsAPIs: []string{ - "test:/data/testnet/geth.ipc", - }}, - }, - { - cfg: &api.Config{EnsAPIs: []string{ - "test:ws://127.0.0.1:1234", - }}, - }, - { - cfg: &api.Config{EnsAPIs: []string{ - "314159265dD8dbb310642f98f50C066173C1259b@/data/testnet/geth.ipc", - }}, - }, - { - cfg: &api.Config{EnsAPIs: []string{ - "314159265dD8dbb310642f98f50C066173C1259b@http://127.0.0.1:1234", - }}, - }, - { - cfg: &api.Config{EnsAPIs: []string{ - "314159265dD8dbb310642f98f50C066173C1259b@ws://127.0.0.1:1234", - }}, - }, - { - cfg: &api.Config{EnsAPIs: []string{ - "test:314159265dD8dbb310642f98f50C066173C1259b@/data/testnet/geth.ipc", - }}, - }, - { - cfg: &api.Config{EnsAPIs: []string{ - "eth:314159265dD8dbb310642f98f50C066173C1259b@http://127.0.0.1:1234", - }}, - }, - { - cfg: &api.Config{EnsAPIs: []string{ - "eth:314159265dD8dbb310642f98f50C066173C1259b@ws://127.0.0.1:12344", - }}, - }, - { - cfg: &api.Config{EnsAPIs: []string{ - "eth:", - }}, - err: "invalid format [tld:][contract-addr@]url for ENS API endpoint configuration \"eth:\": missing url", - }, - { - cfg: &api.Config{EnsAPIs: []string{ - "314159265dD8dbb310642f98f50C066173C1259b@", - }}, - err: "invalid format [tld:][contract-addr@]url for ENS API endpoint configuration \"314159265dD8dbb310642f98f50C066173C1259b@\": missing url", - }, - { - cfg: &api.Config{EnsAPIs: []string{ - ":314159265dD8dbb310642f98f50C066173C1259", - }}, - err: "invalid format [tld:][contract-addr@]url for ENS API endpoint configuration \":314159265dD8dbb310642f98f50C066173C1259\": missing tld", - }, - { - cfg: &api.Config{EnsAPIs: []string{ - "@/data/testnet/geth.ipc", - }}, - err: "invalid format [tld:][contract-addr@]url for ENS API endpoint configuration \"@/data/testnet/geth.ipc\": missing contract address", - }, - } { - err := validateConfig(c.cfg) - if c.err != "" && err.Error() != c.err { - t.Errorf("expected error %q, got %q", c.err, err) - } - if c.err == "" && err != nil { - t.Errorf("unexpected error %q", err) - } - } -} diff --git a/cmd/swarm/db.go b/cmd/swarm/db.go deleted file mode 100644 index 6435db2f07f3..000000000000 --- a/cmd/swarm/db.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "fmt" - "io" - "os" - "path/filepath" - - "github.com/XinFinOrg/XDPoSChain/cmd/utils" - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/swarm/storage" - "gopkg.in/urfave/cli.v1" -) - -func dbExport(ctx *cli.Context) { - args := ctx.Args() - if len(args) != 2 { - utils.Fatalf("invalid arguments, please specify both (path to a local chunk database) and (path to write the tar archive to, - for stdout)") - } - - store, err := openDbStore(args[0]) - if err != nil { - utils.Fatalf("error opening local chunk database: %s", err) - } - defer store.Close() - - var out io.Writer - if args[1] == "-" { - out = os.Stdout - } else { - f, err := os.Create(args[1]) - if err != nil { - utils.Fatalf("error opening output file: %s", err) - } - defer f.Close() - out = f - } - - count, err := store.Export(out) - if err != nil { - utils.Fatalf("error exporting local chunk database: %s", err) - } - - log.Info(fmt.Sprintf("successfully exported %d chunks", count)) -} - -func dbImport(ctx *cli.Context) { - args := ctx.Args() - if len(args) != 2 { - utils.Fatalf("invalid arguments, please specify both (path to a local chunk database) and (path to read the tar archive from, - for stdin)") - } - - store, err := openDbStore(args[0]) - if err != nil { - utils.Fatalf("error opening local chunk database: %s", err) - } - defer store.Close() - - var in io.Reader - if args[1] == "-" { - in = os.Stdin - } else { - f, err := os.Open(args[1]) - if err != nil { - utils.Fatalf("error opening input file: %s", err) - } - defer f.Close() - in = f - } - - count, err := store.Import(in) - if err != nil { - utils.Fatalf("error importing local chunk database: %s", err) - } - - log.Info(fmt.Sprintf("successfully imported %d chunks", count)) -} - -func dbClean(ctx *cli.Context) { - args := ctx.Args() - if len(args) != 1 { - utils.Fatalf("invalid arguments, please specify (path to a local chunk database)") - } - - store, err := openDbStore(args[0]) - if err != nil { - utils.Fatalf("error opening local chunk database: %s", err) - } - defer store.Close() - - store.Cleanup() -} - -func openDbStore(path string) (*storage.DbStore, error) { - if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil { - return nil, fmt.Errorf("invalid chunkdb path: %s", err) - } - hash := storage.MakeHashFunc("SHA3") - return storage.NewDbStore(path, hash, 10000000, 0) -} diff --git a/cmd/swarm/hash.go b/cmd/swarm/hash.go deleted file mode 100644 index 1a2583ac8b8c..000000000000 --- a/cmd/swarm/hash.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -// Command bzzhash computes a swarm tree hash. -package main - -import ( - "fmt" - "os" - - "github.com/XinFinOrg/XDPoSChain/cmd/utils" - "github.com/XinFinOrg/XDPoSChain/swarm/storage" - "gopkg.in/urfave/cli.v1" -) - -func hash(ctx *cli.Context) { - args := ctx.Args() - if len(args) < 1 { - utils.Fatalf("Usage: swarm hash ") - } - f, err := os.Open(args[0]) - if err != nil { - utils.Fatalf("Error opening file " + args[0]) - } - defer f.Close() - - stat, _ := f.Stat() - chunker := storage.NewTreeChunker(storage.NewChunkerParams()) - key, err := chunker.Split(f, stat.Size(), nil, nil, nil) - if err != nil { - utils.Fatalf("%v\n", err) - } else { - fmt.Printf("%v\n", key) - } -} diff --git a/cmd/swarm/list.go b/cmd/swarm/list.go deleted file mode 100644 index e8772562c4e3..000000000000 --- a/cmd/swarm/list.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "fmt" - "os" - "strings" - "text/tabwriter" - - "github.com/XinFinOrg/XDPoSChain/cmd/utils" - swarm "github.com/XinFinOrg/XDPoSChain/swarm/api/client" - "gopkg.in/urfave/cli.v1" -) - -func list(ctx *cli.Context) { - args := ctx.Args() - - if len(args) < 1 { - utils.Fatalf("Please supply a manifest reference as the first argument") - } else if len(args) > 2 { - utils.Fatalf("Too many arguments - usage 'swarm ls manifest [prefix]'") - } - manifest := args[0] - - var prefix string - if len(args) == 2 { - prefix = args[1] - } - - bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") - client := swarm.NewClient(bzzapi) - list, err := client.List(manifest, prefix) - if err != nil { - utils.Fatalf("Failed to generate file and directory list: %s", err) - } - - w := tabwriter.NewWriter(os.Stdout, 1, 2, 2, ' ', 0) - defer w.Flush() - fmt.Fprintln(w, "HASH\tCONTENT TYPE\tPATH") - for _, prefix := range list.CommonPrefixes { - fmt.Fprintf(w, "%s\t%s\t%s\n", "", "DIR", prefix) - } - for _, entry := range list.Entries { - fmt.Fprintf(w, "%s\t%s\t%s\n", entry.Hash, entry.ContentType, entry.Path) - } -} diff --git a/cmd/swarm/main.go b/cmd/swarm/main.go deleted file mode 100644 index 9ca769070a03..000000000000 --- a/cmd/swarm/main.go +++ /dev/null @@ -1,552 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "crypto/ecdsa" - "fmt" - "os" - "os/signal" - "runtime" - "sort" - "strconv" - "strings" - "syscall" - - "github.com/XinFinOrg/XDPoSChain/accounts" - "github.com/XinFinOrg/XDPoSChain/accounts/keystore" - "github.com/XinFinOrg/XDPoSChain/cmd/utils" - "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/console" - "github.com/XinFinOrg/XDPoSChain/crypto" - "github.com/XinFinOrg/XDPoSChain/ethclient" - "github.com/XinFinOrg/XDPoSChain/internal/debug" - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/node" - "github.com/XinFinOrg/XDPoSChain/p2p" - "github.com/XinFinOrg/XDPoSChain/p2p/discover" - "github.com/XinFinOrg/XDPoSChain/params" - "github.com/XinFinOrg/XDPoSChain/swarm" - bzzapi "github.com/XinFinOrg/XDPoSChain/swarm/api" - swarmmetrics "github.com/XinFinOrg/XDPoSChain/swarm/metrics" - - "gopkg.in/urfave/cli.v1" -) - -const clientIdentifier = "swarm" - -var ( - gitCommit string // Git SHA1 commit hash of the release (set via linker flags) - testbetBootNodes = []string{ - "enode://ec8ae764f7cb0417bdfb009b9d0f18ab3818a3a4e8e7c67dd5f18971a93510a2e6f43cd0b69a27e439a9629457ea804104f37c85e41eed057d3faabbf7744cdf@13.74.157.139:30429", - "enode://c2e1fceb3bf3be19dff71eec6cccf19f2dbf7567ee017d130240c670be8594bc9163353ca55dd8df7a4f161dd94b36d0615c17418b5a3cdcbb4e9d99dfa4de37@13.74.157.139:30430", - "enode://fe29b82319b734ce1ec68b84657d57145fee237387e63273989d354486731e59f78858e452ef800a020559da22dcca759536e6aa5517c53930d29ce0b1029286@13.74.157.139:30431", - "enode://1d7187e7bde45cf0bee489ce9852dd6d1a0d9aa67a33a6b8e6db8a4fbc6fcfa6f0f1a5419343671521b863b187d1c73bad3603bae66421d157ffef357669ddb8@13.74.157.139:30432", - "enode://0e4cba800f7b1ee73673afa6a4acead4018f0149d2e3216be3f133318fd165b324cd71b81fbe1e80deac8dbf56e57a49db7be67f8b9bc81bd2b7ee496434fb5d@13.74.157.139:30433", - } -) - -var ( - ChequebookAddrFlag = cli.StringFlag{ - Name: "chequebook", - Usage: "chequebook contract address", - EnvVar: SWARM_ENV_CHEQUEBOOK_ADDR, - } - SwarmAccountFlag = cli.StringFlag{ - Name: "bzzaccount", - Usage: "Swarm account key file", - EnvVar: SWARM_ENV_ACCOUNT, - } - SwarmListenAddrFlag = cli.StringFlag{ - Name: "httpaddr", - Usage: "Swarm HTTP API listening interface", - EnvVar: SWARM_ENV_LISTEN_ADDR, - } - SwarmPortFlag = cli.StringFlag{ - Name: "bzzport", - Usage: "Swarm local http api port", - EnvVar: SWARM_ENV_PORT, - } - SwarmNetworkIdFlag = cli.IntFlag{ - Name: "bzznetworkid", - Usage: "Network identifier (integer, default 3=swarm testnet)", - EnvVar: SWARM_ENV_NETWORK_ID, - } - SwarmConfigPathFlag = cli.StringFlag{ - Name: "bzzconfig", - Usage: "DEPRECATED: please use --config path/to/TOML-file", - } - SwarmSwapEnabledFlag = cli.BoolFlag{ - Name: "swap", - Usage: "Swarm SWAP enabled (default false)", - EnvVar: SWARM_ENV_SWAP_ENABLE, - } - SwarmSwapAPIFlag = cli.StringFlag{ - Name: "swap-api", - Usage: "URL of the Ethereum API provider to use to settle SWAP payments", - EnvVar: SWARM_ENV_SWAP_API, - } - SwarmSyncEnabledFlag = cli.BoolTFlag{ - Name: "sync", - Usage: "Swarm Syncing enabled (default true)", - EnvVar: SWARM_ENV_SYNC_ENABLE, - } - EnsAPIFlag = cli.StringSliceFlag{ - Name: "ens-api", - Usage: "ENS API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url", - EnvVar: SWARM_ENV_ENS_API, - } - SwarmApiFlag = cli.StringFlag{ - Name: "bzzapi", - Usage: "Swarm HTTP endpoint", - Value: "http://127.0.0.1:8500", - } - SwarmRecursiveUploadFlag = cli.BoolFlag{ - Name: "recursive", - Usage: "Upload directories recursively", - } - SwarmWantManifestFlag = cli.BoolTFlag{ - Name: "manifest", - Usage: "Automatic manifest upload", - } - SwarmUploadDefaultPath = cli.StringFlag{ - Name: "defaultpath", - Usage: "path to file served for empty url path (none)", - } - SwarmUpFromStdinFlag = cli.BoolFlag{ - Name: "stdin", - Usage: "reads data to be uploaded from stdin", - } - SwarmUploadMimeType = cli.StringFlag{ - Name: "mime", - Usage: "force mime type", - } - CorsStringFlag = cli.StringFlag{ - Name: "corsdomain", - Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')", - EnvVar: SWARM_ENV_CORS, - } - - // the following flags are deprecated and should be removed in the future - DeprecatedEthAPIFlag = cli.StringFlag{ - Name: "ethapi", - Usage: "DEPRECATED: please use --ens-api and --swap-api", - } - DeprecatedEnsAddrFlag = cli.StringFlag{ - Name: "ens-addr", - Usage: "DEPRECATED: ENS contract address, please use --ens-api with contract address according to its format", - } -) - -// declare a few constant error messages, useful for later error check comparisons in test -var ( - SWARM_ERR_NO_BZZACCOUNT = "bzzaccount option is required but not set; check your config file, command line or environment variables" - SWARM_ERR_SWAP_SET_NO_API = "SWAP is enabled but --swap-api is not set" -) - -var defaultNodeConfig = node.DefaultConfig - -// This init function sets defaults so cmd/swarm can run alongside geth. -func init() { - defaultNodeConfig.Name = clientIdentifier - defaultNodeConfig.Version = params.VersionWithCommit(gitCommit) - defaultNodeConfig.P2P.ListenAddr = ":30399" - defaultNodeConfig.IPCPath = "bzzd.ipc" - // Set flag defaults for --help display. - utils.ListenPortFlag.Value = 30399 -} - -var app = utils.NewApp(gitCommit, "Ethereum Swarm") - -// This init function creates the cli.App. -func init() { - app.Action = bzzd - app.HideVersion = true // we have a command to print the version - app.Copyright = "Copyright 2013-2016 The go-ethereum Authors" - app.Commands = []cli.Command{ - { - Action: version, - Name: "version", - Usage: "Print version numbers", - ArgsUsage: " ", - Description: ` -The output of this command is supposed to be machine-readable. -`, - }, - { - Action: upload, - Name: "up", - Usage: "upload a file or directory to swarm using the HTTP API", - ArgsUsage: " ", - Description: ` -"upload a file or directory to swarm using the HTTP API and prints the root hash", -`, - }, - { - Action: list, - Name: "ls", - Usage: "list files and directories contained in a manifest", - ArgsUsage: " []", - Description: ` -Lists files and directories contained in a manifest. -`, - }, - { - Action: hash, - Name: "hash", - Usage: "print the swarm hash of a file or directory", - ArgsUsage: " ", - Description: ` -Prints the swarm hash of file or directory. -`, - }, - { - Name: "manifest", - Usage: "update a MANIFEST", - ArgsUsage: "manifest COMMAND", - Description: ` -Updates a MANIFEST by adding/removing/updating the hash of a path. -`, - Subcommands: []cli.Command{ - { - Action: add, - Name: "add", - Usage: "add a new path to the manifest", - ArgsUsage: " []", - Description: ` -Adds a new path to the manifest -`, - }, - { - Action: update, - Name: "update", - Usage: "update the hash for an already existing path in the manifest", - ArgsUsage: " []", - Description: ` -Update the hash for an already existing path in the manifest -`, - }, - { - Action: remove, - Name: "remove", - Usage: "removes a path from the manifest", - ArgsUsage: " ", - Description: ` -Removes a path from the manifest -`, - }, - }, - }, - { - Name: "db", - Usage: "manage the local chunk database", - ArgsUsage: "db COMMAND", - Description: ` -Manage the local chunk database. -`, - Subcommands: []cli.Command{ - { - Action: dbExport, - Name: "export", - Usage: "export a local chunk database as a tar archive (use - to send to stdout)", - ArgsUsage: " ", - Description: ` -Export a local chunk database as a tar archive (use - to send to stdout). - - swarm db export ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar - -The export may be quite large, consider piping the output through the Unix -pv(1) tool to get a progress bar: - - swarm db export ~/.ethereum/swarm/bzz-KEY/chunks - | pv > chunks.tar -`, - }, - { - Action: dbImport, - Name: "import", - Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)", - ArgsUsage: " ", - Description: ` -Import chunks from a tar archive into a local chunk database (use - to read from stdin). - - swarm db import ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar - -The import may be quite large, consider piping the input through the Unix -pv(1) tool to get a progress bar: - - pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks - -`, - }, - { - Action: dbClean, - Name: "clean", - Usage: "remove corrupt entries from a local chunk database", - ArgsUsage: "", - Description: ` -Remove corrupt entries from a local chunk database. -`, - }, - }, - }, - { - Action: func(ctx *cli.Context) { - utils.Fatalf("ERROR: 'swarm cleandb' has been removed, please use 'swarm db clean'.") - }, - Name: "cleandb", - Usage: "DEPRECATED: use 'swarm db clean'", - ArgsUsage: " ", - Description: ` -DEPRECATED: use 'swarm db clean'. -`, - }, - // See config.go - DumpConfigCommand, - } - sort.Sort(cli.CommandsByName(app.Commands)) - - app.Flags = []cli.Flag{ - utils.IdentityFlag, - utils.DataDirFlag, - utils.BootnodesFlag, - utils.KeyStoreDirFlag, - utils.ListenPortFlag, - utils.NoDiscoverFlag, - utils.DiscoveryV5Flag, - utils.NetrestrictFlag, - utils.NodeKeyFileFlag, - utils.NodeKeyHexFlag, - utils.MaxPeersFlag, - utils.NATFlag, - utils.IPCDisabledFlag, - utils.IPCPathFlag, - utils.PasswordFileFlag, - // bzzd-specific flags - CorsStringFlag, - EnsAPIFlag, - SwarmTomlConfigPathFlag, - SwarmConfigPathFlag, - SwarmSwapEnabledFlag, - SwarmSwapAPIFlag, - SwarmSyncEnabledFlag, - SwarmListenAddrFlag, - SwarmPortFlag, - SwarmAccountFlag, - SwarmNetworkIdFlag, - ChequebookAddrFlag, - // upload flags - SwarmApiFlag, - SwarmRecursiveUploadFlag, - SwarmWantManifestFlag, - SwarmUploadDefaultPath, - SwarmUpFromStdinFlag, - SwarmUploadMimeType, - //deprecated flags - DeprecatedEthAPIFlag, - DeprecatedEnsAddrFlag, - } - app.Flags = append(app.Flags, debug.Flags...) - app.Flags = append(app.Flags, swarmmetrics.Flags...) - app.Before = func(ctx *cli.Context) error { - runtime.GOMAXPROCS(runtime.NumCPU()) - if err := debug.Setup(ctx); err != nil { - return err - } - swarmmetrics.Setup(ctx) - return nil - } - app.After = func(ctx *cli.Context) error { - debug.Exit() - return nil - } -} - -func main() { - if err := app.Run(os.Args); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func version(ctx *cli.Context) error { - fmt.Println(strings.Title(clientIdentifier)) - fmt.Println("Version:", params.Version) - if gitCommit != "" { - fmt.Println("Git Commit:", gitCommit) - } - fmt.Println("Network Id:", ctx.GlobalInt(utils.NetworkIdFlag.Name)) - fmt.Println("Go Version:", runtime.Version()) - fmt.Println("OS:", runtime.GOOS) - fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH")) - fmt.Printf("GOROOT=%s\n", runtime.GOROOT()) - return nil -} - -func bzzd(ctx *cli.Context) error { - //build a valid bzzapi.Config from all available sources: - //default config, file config, command line and env vars - bzzconfig, err := buildConfig(ctx) - if err != nil { - utils.Fatalf("unable to configure swarm: %v", err) - } - - cfg := defaultNodeConfig - //XDC only supports --datadir via command line - //in order to be consistent within swarm, if we pass --datadir via environment variable - //or via config file, we get the same directory for XDC and swarm - if _, err := os.Stat(bzzconfig.Path); err == nil { - cfg.DataDir = bzzconfig.Path - } - //setup the ethereum node - utils.SetNodeConfig(ctx, &cfg) - stack, err := node.New(&cfg) - if err != nil { - utils.Fatalf("can't create node: %v", err) - } - //a few steps need to be done after the config phase is completed, - //due to overriding behavior - initSwarmNode(bzzconfig, stack, ctx) - //register BZZ as node.Service in the ethereum node - registerBzzService(bzzconfig, ctx, stack) - //start the node - utils.StartNode(stack) - - go func() { - sigc := make(chan os.Signal, 1) - signal.Notify(sigc, syscall.SIGTERM) - defer signal.Stop(sigc) - <-sigc - log.Info("Got sigterm, shutting swarm down...") - stack.Stop() - }() - - // Add bootnodes as initial peers. - if bzzconfig.BootNodes != "" { - bootnodes := strings.Split(bzzconfig.BootNodes, ",") - injectBootnodes(stack.Server(), bootnodes) - } else { - if bzzconfig.NetworkId == 3 { - injectBootnodes(stack.Server(), testbetBootNodes) - } - } - - stack.Wait() - return nil -} - -func registerBzzService(bzzconfig *bzzapi.Config, ctx *cli.Context, stack *node.Node) { - - //define the swarm service boot function - boot := func(ctx *node.ServiceContext) (node.Service, error) { - var swapClient *ethclient.Client - var err error - if bzzconfig.SwapApi != "" { - log.Info("connecting to SWAP API", "url", bzzconfig.SwapApi) - swapClient, err = ethclient.Dial(bzzconfig.SwapApi) - if err != nil { - return nil, fmt.Errorf("error connecting to SWAP API %s: %s", bzzconfig.SwapApi, err) - } - } - - return swarm.NewSwarm(ctx, swapClient, bzzconfig) - } - //register within the ethereum node - if err := stack.Register(boot); err != nil { - utils.Fatalf("Failed to register the Swarm service: %v", err) - } -} - -func getAccount(bzzaccount string, ctx *cli.Context, stack *node.Node) *ecdsa.PrivateKey { - //an account is mandatory - if bzzaccount == "" { - utils.Fatalf(SWARM_ERR_NO_BZZACCOUNT) - } - // Try to load the arg as a hex key file. - if key, err := crypto.LoadECDSA(bzzaccount); err == nil { - log.Info("Swarm account key loaded", "address", crypto.PubkeyToAddress(key.PublicKey)) - return key - } - // Otherwise try getting it from the keystore. - am := stack.AccountManager() - ks := am.Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) - - return decryptStoreAccount(ks, bzzaccount, utils.MakePasswordList(ctx)) -} - -func decryptStoreAccount(ks *keystore.KeyStore, account string, passwords []string) *ecdsa.PrivateKey { - var a accounts.Account - var err error - if common.IsHexAddress(account) { - a, err = ks.Find(accounts.Account{Address: common.HexToAddress(account)}) - } else if ix, ixerr := strconv.Atoi(account); ixerr == nil && ix > 0 { - if accounts := ks.Accounts(); len(accounts) > ix { - a = accounts[ix] - } else { - err = fmt.Errorf("index %d higher than number of accounts %d", ix, len(accounts)) - } - } else { - utils.Fatalf("Can't find swarm account key %s", account) - } - if err != nil { - utils.Fatalf("Can't find swarm account key: %v - Is the provided bzzaccount(%s) from the right datadir/Path?", err, account) - } - keyjson, err := os.ReadFile(a.URL.Path) - if err != nil { - utils.Fatalf("Can't load swarm account key: %v", err) - } - for i := 0; i < 3; i++ { - password := getPassPhrase(fmt.Sprintf("Unlocking swarm account %s [%d/3]", a.Address.Hex(), i+1), i, passwords) - key, err := keystore.DecryptKey(keyjson, password) - if err == nil { - return key.PrivateKey - } - } - utils.Fatalf("Can't decrypt swarm account key") - return nil -} - -// getPassPhrase retrieves the password associated with bzz account, either by fetching -// from a list of pre-loaded passwords, or by requesting it interactively from user. -func getPassPhrase(prompt string, i int, passwords []string) string { - // non-interactive - if len(passwords) > 0 { - if i < len(passwords) { - return passwords[i] - } - return passwords[len(passwords)-1] - } - - // fallback to interactive mode - if prompt != "" { - fmt.Println(prompt) - } - password, err := console.Stdin.PromptPassword("Passphrase: ") - if err != nil { - utils.Fatalf("Failed to read passphrase: %v", err) - } - return password -} - -func injectBootnodes(srv *p2p.Server, nodes []string) { - for _, url := range nodes { - n, err := discover.ParseNode(url) - if err != nil { - log.Error("Invalid swarm bootnode", "err", err) - continue - } - srv.AddPeer(n) - } -} diff --git a/cmd/swarm/manifest.go b/cmd/swarm/manifest.go deleted file mode 100644 index 8be0a086446a..000000000000 --- a/cmd/swarm/manifest.go +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -// Command MANIFEST update -package main - -import ( - "encoding/json" - "fmt" - "mime" - "path/filepath" - "strings" - - "github.com/XinFinOrg/XDPoSChain/cmd/utils" - "github.com/XinFinOrg/XDPoSChain/swarm/api" - swarm "github.com/XinFinOrg/XDPoSChain/swarm/api/client" - "gopkg.in/urfave/cli.v1" -) - -const bzzManifestJSON = "application/bzz-manifest+json" - -func add(ctx *cli.Context) { - args := ctx.Args() - if len(args) < 3 { - utils.Fatalf("Need at least three arguments []") - } - - var ( - mhash = args[0] - path = args[1] - hash = args[2] - - ctype string - wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name) - mroot api.Manifest - ) - - if len(args) > 3 { - ctype = args[3] - } else { - ctype = mime.TypeByExtension(filepath.Ext(path)) - } - - newManifest := addEntryToManifest(ctx, mhash, path, hash, ctype) - fmt.Println(newManifest) - - if !wantManifest { - // Print the manifest. This is the only output to stdout. - mrootJSON, _ := json.MarshalIndent(mroot, "", " ") - fmt.Println(string(mrootJSON)) - return - } -} - -func update(ctx *cli.Context) { - - args := ctx.Args() - if len(args) < 3 { - utils.Fatalf("Need at least three arguments ") - } - - var ( - mhash = args[0] - path = args[1] - hash = args[2] - - ctype string - wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name) - mroot api.Manifest - ) - if len(args) > 3 { - ctype = args[3] - } else { - ctype = mime.TypeByExtension(filepath.Ext(path)) - } - - newManifest := updateEntryInManifest(ctx, mhash, path, hash, ctype) - fmt.Println(newManifest) - - if !wantManifest { - // Print the manifest. This is the only output to stdout. - mrootJSON, _ := json.MarshalIndent(mroot, "", " ") - fmt.Println(string(mrootJSON)) - return - } -} - -func remove(ctx *cli.Context) { - args := ctx.Args() - if len(args) < 2 { - utils.Fatalf("Need at least two arguments ") - } - - var ( - mhash = args[0] - path = args[1] - - wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name) - mroot api.Manifest - ) - - newManifest := removeEntryFromManifest(ctx, mhash, path) - fmt.Println(newManifest) - - if !wantManifest { - // Print the manifest. This is the only output to stdout. - mrootJSON, _ := json.MarshalIndent(mroot, "", " ") - fmt.Println(string(mrootJSON)) - return - } -} - -func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) string { - - var ( - bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") - client = swarm.NewClient(bzzapi) - longestPathEntry = api.ManifestEntry{} - ) - - mroot, err := client.DownloadManifest(mhash) - if err != nil { - utils.Fatalf("Manifest download failed: %v", err) - } - - //TODO: check if the "hash" to add is valid and present in swarm - _, err = client.DownloadManifest(hash) - if err != nil { - utils.Fatalf("Hash to add is not present: %v", err) - } - - // See if we path is in this Manifest or do we have to dig deeper - for _, entry := range mroot.Entries { - if path == entry.Path { - utils.Fatalf("Path %s already present, not adding anything", path) - } else { - if entry.ContentType == bzzManifestJSON { - prfxlen := strings.HasPrefix(path, entry.Path) - if prfxlen && len(path) > len(longestPathEntry.Path) { - longestPathEntry = entry - } - } - } - } - - if longestPathEntry.Path != "" { - // Load the child Manifest add the entry there - newPath := path[len(longestPathEntry.Path):] - newHash := addEntryToManifest(ctx, longestPathEntry.Hash, newPath, hash, ctype) - - // Replace the hash for parent Manifests - newMRoot := &api.Manifest{} - for _, entry := range mroot.Entries { - if longestPathEntry.Path == entry.Path { - entry.Hash = newHash - } - newMRoot.Entries = append(newMRoot.Entries, entry) - } - mroot = newMRoot - } else { - // Add the entry in the leaf Manifest - newEntry := api.ManifestEntry{ - Hash: hash, - Path: path, - ContentType: ctype, - } - mroot.Entries = append(mroot.Entries, newEntry) - } - - newManifestHash, err := client.UploadManifest(mroot) - if err != nil { - utils.Fatalf("Manifest upload failed: %v", err) - } - return newManifestHash - -} - -func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) string { - - var ( - bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") - client = swarm.NewClient(bzzapi) - newEntry = api.ManifestEntry{} - longestPathEntry = api.ManifestEntry{} - ) - - mroot, err := client.DownloadManifest(mhash) - if err != nil { - utils.Fatalf("Manifest download failed: %v", err) - } - - //TODO: check if the "hash" with which to update is valid and present in swarm - - // See if we path is in this Manifest or do we have to dig deeper - for _, entry := range mroot.Entries { - if path == entry.Path { - newEntry = entry - } else { - if entry.ContentType == bzzManifestJSON { - prfxlen := strings.HasPrefix(path, entry.Path) - if prfxlen && len(path) > len(longestPathEntry.Path) { - longestPathEntry = entry - } - } - } - } - - if longestPathEntry.Path == "" && newEntry.Path == "" { - utils.Fatalf("Path %s not present in the Manifest, not setting anything", path) - } - - if longestPathEntry.Path != "" { - // Load the child Manifest add the entry there - newPath := path[len(longestPathEntry.Path):] - newHash := updateEntryInManifest(ctx, longestPathEntry.Hash, newPath, hash, ctype) - - // Replace the hash for parent Manifests - newMRoot := &api.Manifest{} - for _, entry := range mroot.Entries { - if longestPathEntry.Path == entry.Path { - entry.Hash = newHash - } - newMRoot.Entries = append(newMRoot.Entries, entry) - - } - mroot = newMRoot - } - - if newEntry.Path != "" { - // Replace the hash for leaf Manifest - newMRoot := &api.Manifest{} - for _, entry := range mroot.Entries { - if newEntry.Path == entry.Path { - myEntry := api.ManifestEntry{ - Hash: hash, - Path: entry.Path, - ContentType: ctype, - } - newMRoot.Entries = append(newMRoot.Entries, myEntry) - } else { - newMRoot.Entries = append(newMRoot.Entries, entry) - } - } - mroot = newMRoot - } - - newManifestHash, err := client.UploadManifest(mroot) - if err != nil { - utils.Fatalf("Manifest upload failed: %v", err) - } - return newManifestHash -} - -func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string { - - var ( - bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") - client = swarm.NewClient(bzzapi) - entryToRemove = api.ManifestEntry{} - longestPathEntry = api.ManifestEntry{} - ) - - mroot, err := client.DownloadManifest(mhash) - if err != nil { - utils.Fatalf("Manifest download failed: %v", err) - } - - // See if we path is in this Manifest or do we have to dig deeper - for _, entry := range mroot.Entries { - if path == entry.Path { - entryToRemove = entry - } else { - if entry.ContentType == bzzManifestJSON { - prfxlen := strings.HasPrefix(path, entry.Path) - if prfxlen && len(path) > len(longestPathEntry.Path) { - longestPathEntry = entry - } - } - } - } - - if longestPathEntry.Path == "" && entryToRemove.Path == "" { - utils.Fatalf("Path %s not present in the Manifest, not removing anything", path) - } - - if longestPathEntry.Path != "" { - // Load the child Manifest remove the entry there - newPath := path[len(longestPathEntry.Path):] - newHash := removeEntryFromManifest(ctx, longestPathEntry.Hash, newPath) - - // Replace the hash for parent Manifests - newMRoot := &api.Manifest{} - for _, entry := range mroot.Entries { - if longestPathEntry.Path == entry.Path { - entry.Hash = newHash - } - newMRoot.Entries = append(newMRoot.Entries, entry) - } - mroot = newMRoot - } - - if entryToRemove.Path != "" { - // remove the entry in this Manifest - newMRoot := &api.Manifest{} - for _, entry := range mroot.Entries { - if entryToRemove.Path != entry.Path { - newMRoot.Entries = append(newMRoot.Entries, entry) - } - } - mroot = newMRoot - } - - newManifestHash, err := client.UploadManifest(mroot) - if err != nil { - utils.Fatalf("Manifest upload failed: %v", err) - } - return newManifestHash -} diff --git a/cmd/swarm/run_test.go b/cmd/swarm/run_test.go deleted file mode 100644 index 539ef5827cf4..000000000000 --- a/cmd/swarm/run_test.go +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "fmt" - "net" - "os" - "path/filepath" - "runtime" - "testing" - "time" - - "github.com/XinFinOrg/XDPoSChain/accounts" - "github.com/XinFinOrg/XDPoSChain/accounts/keystore" - "github.com/XinFinOrg/XDPoSChain/internal/cmdtest" - "github.com/XinFinOrg/XDPoSChain/node" - "github.com/XinFinOrg/XDPoSChain/p2p" - "github.com/XinFinOrg/XDPoSChain/rpc" - "github.com/XinFinOrg/XDPoSChain/swarm" - "github.com/docker/docker/pkg/reexec" -) - -func init() { - // Run the app if we've been exec'd as "swarm-test" in runSwarm. - reexec.Register("swarm-test", func() { - if err := app.Run(os.Args); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - os.Exit(0) - }) -} - -func TestMain(m *testing.M) { - // check if we have been reexec'd - if reexec.Init() { - return - } - os.Exit(m.Run()) -} - -func runSwarm(t *testing.T, args ...string) *cmdtest.TestCmd { - tt := cmdtest.NewTestCmd(t, nil) - - // Boot "swarm". This actually runs the test binary but the TestMain - // function will prevent any tests from running. - tt.Run("swarm-test", args...) - - return tt -} - -type testCluster struct { - Nodes []*testNode - TmpDir string -} - -// newTestCluster starts a test swarm cluster of the given size. -// -// A temporary directory is created and each node gets a data directory inside -// it. -// -// Each node listens on 127.0.0.1 with random ports for both the HTTP and p2p -// ports (assigned by first listening on 127.0.0.1:0 and then passing the ports -// as flags). -// -// When starting more than one node, they are connected together using the -// admin SetPeer RPC method. -func newTestCluster(t *testing.T, size int) *testCluster { - cluster := &testCluster{} - defer func() { - if t.Failed() { - cluster.Shutdown() - } - }() - - tmpdir, err := os.MkdirTemp("", "swarm-test") - if err != nil { - t.Fatal(err) - } - cluster.TmpDir = tmpdir - - // start the nodes - cluster.Nodes = make([]*testNode, 0, size) - for i := 0; i < size; i++ { - dir := filepath.Join(cluster.TmpDir, fmt.Sprintf("swarm%02d", i)) - if err := os.Mkdir(dir, 0700); err != nil { - t.Fatal(err) - } - - node := newTestNode(t, dir) - node.Name = fmt.Sprintf("swarm%02d", i) - - cluster.Nodes = append(cluster.Nodes, node) - } - - if size == 1 { - return cluster - } - - // connect the nodes together - for _, node := range cluster.Nodes { - if err := node.Client.Call(nil, "admin_addPeer", cluster.Nodes[0].Enode); err != nil { - t.Fatal(err) - } - } - - // wait until all nodes have the correct number of peers -outer: - for _, node := range cluster.Nodes { - var peers []*p2p.PeerInfo - for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(50 * time.Millisecond) { - if err := node.Client.Call(&peers, "admin_peers"); err != nil { - t.Fatal(err) - } - if len(peers) == len(cluster.Nodes)-1 { - continue outer - } - } - t.Fatalf("%s only has %d / %d peers", node.Name, len(peers), len(cluster.Nodes)-1) - } - - return cluster -} - -func (c *testCluster) Shutdown() { - for _, node := range c.Nodes { - node.Shutdown() - } - os.RemoveAll(c.TmpDir) -} - -type testNode struct { - Name string - Addr string - URL string - Enode string - Dir string - Client *rpc.Client - Cmd *cmdtest.TestCmd -} - -const testPassphrase = "swarm-test-passphrase" - -func getTestAccount(t *testing.T, dir string) (conf *node.Config, account accounts.Account) { - // create key - conf = &node.Config{ - DataDir: dir, - IPCPath: "bzzd.ipc", - NoUSB: true, - } - n, err := node.New(conf) - if err != nil { - t.Fatal(err) - } - account, err = n.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore).NewAccount(testPassphrase) - if err != nil { - t.Fatal(err) - } - - // use a unique IPCPath when running tests on Windows - if runtime.GOOS == "windows" { - conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", account.Address.String()) - } - - return conf, account -} - -func newTestNode(t *testing.T, dir string) *testNode { - - conf, account := getTestAccount(t, dir) - node := &testNode{Dir: dir} - - // assign ports - httpPort, err := assignTCPPort() - if err != nil { - t.Fatal(err) - } - p2pPort, err := assignTCPPort() - if err != nil { - t.Fatal(err) - } - - // start the node - node.Cmd = runSwarm(t, - "--port", p2pPort, - "--nodiscover", - "--datadir", dir, - "--ipcpath", conf.IPCPath, - "--ens-api", "", - "--bzzaccount", account.Address.String(), - "--bzznetworkid", "321", - "--bzzport", httpPort, - "--verbosity", "6", - ) - node.Cmd.InputLine(testPassphrase) - defer func() { - if t.Failed() { - node.Shutdown() - } - }() - - // wait for the node to start - for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) { - node.Client, err = rpc.Dial(conf.IPCEndpoint()) - if err == nil { - break - } - } - if node.Client == nil { - t.Fatal(err) - } - - // load info - var info swarm.Info - if err := node.Client.Call(&info, "bzz_info"); err != nil { - t.Fatal(err) - } - node.Addr = net.JoinHostPort("127.0.0.1", info.Port) - node.URL = "http://" + node.Addr - - var nodeInfo p2p.NodeInfo - if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil { - t.Fatal(err) - } - node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort) - - return node -} - -func (n *testNode) Shutdown() { - if n.Cmd != nil { - n.Cmd.Kill() - } -} - -func assignTCPPort() (string, error) { - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - return "", err - } - l.Close() - _, port, err := net.SplitHostPort(l.Addr().String()) - if err != nil { - return "", err - } - return port, nil -} diff --git a/cmd/swarm/upload.go b/cmd/swarm/upload.go deleted file mode 100644 index 212d58f9507c..000000000000 --- a/cmd/swarm/upload.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -// Command bzzup uploads files to the swarm HTTP API. -package main - -import ( - "errors" - "fmt" - "io" - "mime" - "net/http" - "os" - "os/user" - "path" - "path/filepath" - "strings" - - "github.com/XinFinOrg/XDPoSChain/cmd/utils" - swarm "github.com/XinFinOrg/XDPoSChain/swarm/api/client" - "gopkg.in/urfave/cli.v1" -) - -func upload(ctx *cli.Context) { - - args := ctx.Args() - var ( - bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") - recursive = ctx.GlobalBool(SwarmRecursiveUploadFlag.Name) - wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name) - defaultPath = ctx.GlobalString(SwarmUploadDefaultPath.Name) - fromStdin = ctx.GlobalBool(SwarmUpFromStdinFlag.Name) - mimeType = ctx.GlobalString(SwarmUploadMimeType.Name) - client = swarm.NewClient(bzzapi) - file string - ) - - if len(args) != 1 { - if fromStdin { - tmp, err := os.CreateTemp("", "swarm-stdin") - if err != nil { - utils.Fatalf("error create tempfile: %s", err) - } - defer os.Remove(tmp.Name()) - n, err := io.Copy(tmp, os.Stdin) - if err != nil { - utils.Fatalf("error copying stdin to tempfile: %s", err) - } else if n == 0 { - utils.Fatalf("error reading from stdin: zero length") - } - file = tmp.Name() - } else { - utils.Fatalf("Need filename as the first and only argument") - } - } else { - file = expandPath(args[0]) - } - - if !wantManifest { - f, err := swarm.Open(file) - if err != nil { - utils.Fatalf("Error opening file: %s", err) - } - defer f.Close() - hash, err := client.UploadRaw(f, f.Size) - if err != nil { - utils.Fatalf("Upload failed: %s", err) - } - fmt.Println(hash) - return - } - - stat, err := os.Stat(file) - if err != nil { - utils.Fatalf("Error opening file: %s", err) - } - - // define a function which either uploads a directory or single file - // based on the type of the file being uploaded - var doUpload func() (hash string, err error) - if stat.IsDir() { - doUpload = func() (string, error) { - if !recursive { - return "", errors.New("Argument is a directory and recursive upload is disabled") - } - return client.UploadDirectory(file, defaultPath, "") - } - } else { - doUpload = func() (string, error) { - f, err := swarm.Open(file) - if err != nil { - return "", fmt.Errorf("error opening file: %s", err) - } - defer f.Close() - if mimeType == "" { - mimeType = detectMimeType(file) - } - f.ContentType = mimeType - return client.Upload(f, "") - } - } - hash, err := doUpload() - if err != nil { - utils.Fatalf("Upload failed: %s", err) - } - fmt.Println(hash) -} - -// Expands a file path -// 1. replace tilde with users home dir -// 2. expands embedded environment variables -// 3. cleans the path, e.g. /a/b/../c -> /a/c -// Note, it has limitations, e.g. ~someuser/tmp will not be expanded -func expandPath(p string) string { - if strings.HasPrefix(p, "~/") || strings.HasPrefix(p, "~\\") { - if home := homeDir(); home != "" { - p = home + p[1:] - } - } - return path.Clean(os.ExpandEnv(p)) -} - -func homeDir() string { - if home := os.Getenv("HOME"); home != "" { - return home - } - if usr, err := user.Current(); err == nil { - return usr.HomeDir - } - return "" -} - -func detectMimeType(file string) string { - if ext := filepath.Ext(file); ext != "" { - return mime.TypeByExtension(ext) - } - f, err := os.Open(file) - if err != nil { - return "" - } - defer f.Close() - buf := make([]byte, 512) - if n, _ := f.Read(buf); n > 0 { - return http.DetectContentType(buf) - } - return "" -} diff --git a/cmd/swarm/upload_test.go b/cmd/swarm/upload_test.go deleted file mode 100644 index c7d1eeb05373..000000000000 --- a/cmd/swarm/upload_test.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "io" - "net/http" - "os" - "testing" -) - -// TestCLISwarmUp tests that running 'swarm up' makes the resulting file -// available from all nodes via the HTTP API -func TestCLISwarmUp(t *testing.T) { - // start 3 node cluster - t.Log("starting 3 node cluster") - cluster := newTestCluster(t, 3) - defer cluster.Shutdown() - - // create a tmp file - tmp, err := os.CreateTemp("", "swarm-test") - assertNil(t, err) - defer tmp.Close() - defer os.Remove(tmp.Name()) - _, err = io.WriteString(tmp, "data") - assertNil(t, err) - - // upload the file with 'swarm up' and expect a hash - t.Log("uploading file with 'swarm up'") - up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", tmp.Name()) - _, matches := up.ExpectRegexp(`[a-f\d]{64}`) - up.ExpectExit() - hash := matches[0] - t.Logf("file uploaded with hash %s", hash) - - // get the file from the HTTP API of each node - for _, node := range cluster.Nodes { - t.Logf("getting file from %s", node.Name) - res, err := http.Get(node.URL + "/bzz:/" + hash) - assertNil(t, err) - assertHTTPResponse(t, res, http.StatusOK, "data") - } -} - -func assertNil(t *testing.T, err error) { - if err != nil { - t.Fatal(err) - } -} - -func assertHTTPResponse(t *testing.T, res *http.Response, expectedStatus int, expectedBody string) { - defer res.Body.Close() - if res.StatusCode != expectedStatus { - t.Fatalf("expected HTTP status %d, got %s", expectedStatus, res.Status) - } - data, err := io.ReadAll(res.Body) - assertNil(t, err) - if string(data) != expectedBody { - t.Fatalf("expected HTTP body %q, got %q", expectedBody, data) - } -} diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go index 161a65a417f6..0e736b95e0b3 100644 --- a/cmd/utils/cmd.go +++ b/cmd/utils/cmd.go @@ -19,6 +19,7 @@ package utils import ( "compress/gzip" + "errors" "fmt" "io" "os" @@ -130,7 +131,7 @@ func ImportChain(chain *core.BlockChain, fn string) error { for batch := 0; ; batch++ { // Load a batch of RLP blocks. if checkInterrupt() { - return fmt.Errorf("interrupted") + return errors.New("interrupted") } i := 0 for ; i < importBatchSize; i++ { @@ -153,7 +154,7 @@ func ImportChain(chain *core.BlockChain, fn string) error { } // Import the batch. if checkInterrupt() { - return fmt.Errorf("interrupted") + return errors.New("interrupted") } missing := missingBlocks(chain, blocks[:i]) if len(missing) == 0 { diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index a9fd25c4922a..38ce46222e1f 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -20,10 +20,12 @@ package utils import ( "crypto/ecdsa" "fmt" + "math" "math/big" "os" "path/filepath" "runtime" + godebug "runtime/debug" "strconv" "strings" @@ -38,10 +40,12 @@ import ( "github.com/XinFinOrg/XDPoSChain/core" "github.com/XinFinOrg/XDPoSChain/core/vm" "github.com/XinFinOrg/XDPoSChain/crypto" - "github.com/XinFinOrg/XDPoSChain/eth" "github.com/XinFinOrg/XDPoSChain/eth/downloader" + "github.com/XinFinOrg/XDPoSChain/eth/ethconfig" + "github.com/XinFinOrg/XDPoSChain/eth/filters" "github.com/XinFinOrg/XDPoSChain/eth/gasprice" "github.com/XinFinOrg/XDPoSChain/ethdb" + "github.com/XinFinOrg/XDPoSChain/internal/ethapi" "github.com/XinFinOrg/XDPoSChain/log" "github.com/XinFinOrg/XDPoSChain/metrics" "github.com/XinFinOrg/XDPoSChain/metrics/exp" @@ -52,7 +56,9 @@ import ( "github.com/XinFinOrg/XDPoSChain/p2p/nat" "github.com/XinFinOrg/XDPoSChain/p2p/netutil" "github.com/XinFinOrg/XDPoSChain/params" + "github.com/XinFinOrg/XDPoSChain/rpc" whisper "github.com/XinFinOrg/XDPoSChain/whisper/whisperv6" + gopsutil "github.com/shirou/gopsutil/mem" "gopkg.in/urfave/cli.v1" ) @@ -148,7 +154,7 @@ var ( NetworkIdFlag = cli.Uint64Flag{ Name: "networkid", Usage: "Network identifier (integer, 89=XDPoSChain)", - Value: eth.DefaultConfig.NetworkId, + Value: ethconfig.Defaults.NetworkId, } TestnetFlag = cli.BoolFlag{ Name: "testnet", @@ -187,7 +193,7 @@ var ( Name: "light", Usage: "Enable light client mode", } - defaultSyncMode = eth.DefaultConfig.SyncMode + defaultSyncMode = ethconfig.Defaults.SyncMode SyncModeFlag = TextMarshalerFlag{ Name: "syncmode", Usage: `Blockchain sync mode ("fast", "full", or "light")`, @@ -206,7 +212,7 @@ var ( LightPeersFlag = cli.IntFlag{ Name: "lightpeers", Usage: "Maximum number of LES client peers", - Value: eth.DefaultConfig.LightPeers, + Value: ethconfig.Defaults.LightPeers, } LightKDFFlag = cli.BoolFlag{ Name: "lightkdf", @@ -225,27 +231,27 @@ var ( EthashCachesInMemoryFlag = cli.IntFlag{ Name: "ethash.cachesinmem", Usage: "Number of recent ethash caches to keep in memory (16MB each)", - Value: eth.DefaultConfig.Ethash.CachesInMem, + Value: ethconfig.Defaults.Ethash.CachesInMem, } EthashCachesOnDiskFlag = cli.IntFlag{ Name: "ethash.cachesondisk", Usage: "Number of recent ethash caches to keep on disk (16MB each)", - Value: eth.DefaultConfig.Ethash.CachesOnDisk, + Value: ethconfig.Defaults.Ethash.CachesOnDisk, } EthashDatasetDirFlag = DirectoryFlag{ Name: "ethash.dagdir", Usage: "Directory to store the ethash mining DAGs (default = inside home folder)", - Value: DirectoryString{eth.DefaultConfig.Ethash.DatasetDir}, + Value: DirectoryString{ethconfig.Defaults.Ethash.DatasetDir}, } EthashDatasetsInMemoryFlag = cli.IntFlag{ Name: "ethash.dagsinmem", Usage: "Number of recent ethash mining DAGs to keep in memory (1+GB each)", - Value: eth.DefaultConfig.Ethash.DatasetsInMem, + Value: ethconfig.Defaults.Ethash.DatasetsInMem, } EthashDatasetsOnDiskFlag = cli.IntFlag{ Name: "ethash.dagsondisk", Usage: "Number of recent ethash mining DAGs to keep on disk (1+GB each)", - Value: eth.DefaultConfig.Ethash.DatasetsOnDisk, + Value: ethconfig.Defaults.Ethash.DatasetsOnDisk, } // Transaction pool settings TxPoolNoLocalsFlag = cli.BoolFlag{ @@ -265,37 +271,37 @@ var ( TxPoolPriceLimitFlag = cli.Uint64Flag{ Name: "txpool.pricelimit", Usage: "Minimum gas price limit to enforce for acceptance into the pool", - Value: eth.DefaultConfig.TxPool.PriceLimit, + Value: ethconfig.Defaults.TxPool.PriceLimit, } TxPoolPriceBumpFlag = cli.Uint64Flag{ Name: "txpool.pricebump", Usage: "Price bump percentage to replace an already existing transaction", - Value: eth.DefaultConfig.TxPool.PriceBump, + Value: ethconfig.Defaults.TxPool.PriceBump, } TxPoolAccountSlotsFlag = cli.Uint64Flag{ Name: "txpool.accountslots", Usage: "Minimum number of executable transaction slots guaranteed per account", - Value: eth.DefaultConfig.TxPool.AccountSlots, + Value: ethconfig.Defaults.TxPool.AccountSlots, } TxPoolGlobalSlotsFlag = cli.Uint64Flag{ Name: "txpool.globalslots", Usage: "Maximum number of executable transaction slots for all accounts", - Value: eth.DefaultConfig.TxPool.GlobalSlots, + Value: ethconfig.Defaults.TxPool.GlobalSlots, } TxPoolAccountQueueFlag = cli.Uint64Flag{ Name: "txpool.accountqueue", Usage: "Maximum number of non-executable transaction slots permitted per account", - Value: eth.DefaultConfig.TxPool.AccountQueue, + Value: ethconfig.Defaults.TxPool.AccountQueue, } TxPoolGlobalQueueFlag = cli.Uint64Flag{ Name: "txpool.globalqueue", Usage: "Maximum number of non-executable transaction slots for all accounts", - Value: eth.DefaultConfig.TxPool.GlobalQueue, + Value: ethconfig.Defaults.TxPool.GlobalQueue, } TxPoolLifetimeFlag = cli.DurationFlag{ Name: "txpool.lifetime", Usage: "Maximum amount of time non-executable transaction are queued", - Value: eth.DefaultConfig.TxPool.Lifetime, + Value: ethconfig.Defaults.TxPool.Lifetime, } // Performance tuning settings CacheFlag = cli.IntFlag{ @@ -313,6 +319,15 @@ var ( Usage: "Percentage of cache memory allowance to use for trie pruning", Value: 25, } + CacheLogSizeFlag = &cli.IntFlag{ + Name: "cache.blocklogs", + Usage: "Size (in number of blocks) of the log cache for filtering", + Value: ethconfig.Defaults.FilterLogCacheSize, + } + FDLimitFlag = cli.IntFlag{ + Name: "fdlimit", + Usage: "Raise the open file descriptor resource limit (default = system fd limit)", + } // Miner settings StakingEnabledFlag = cli.BoolFlag{ Name: "mine", @@ -336,7 +351,7 @@ var ( GasPriceFlag = BigFlag{ Name: "gasprice", Usage: "Minimal gas price to accept for mining a transactions", - Value: eth.DefaultConfig.GasPrice, + Value: ethconfig.Defaults.GasPrice, } ExtraDataFlag = cli.StringFlag{ Name: "extradata", @@ -361,7 +376,12 @@ var ( RPCGlobalGasCapFlag = cli.Uint64Flag{ Name: "rpc.gascap", Usage: "Sets a cap on gas that can be used in eth_call/estimateGas (0=infinite)", - Value: eth.DefaultConfig.RPCGasCap, + Value: ethconfig.Defaults.RPCGasCap, + } + RPCGlobalTxFeeCap = cli.Float64Flag{ + Name: "rpc.txfeecap", + Usage: "Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap)", + Value: ethconfig.Defaults.RPCTxFeeCap, } // Logging and debug settings EthStatsURLFlag = cli.StringFlag{ @@ -543,12 +563,22 @@ var ( GpoBlocksFlag = cli.IntFlag{ Name: "gpoblocks", Usage: "Number of recent blocks to check for gas prices", - Value: eth.DefaultConfig.GPO.Blocks, + Value: ethconfig.Defaults.GPO.Blocks, } GpoPercentileFlag = cli.IntFlag{ Name: "gpopercentile", Usage: "Suggested gas price is the given percentile of a set of recent transaction gas prices", - Value: eth.DefaultConfig.GPO.Percentile, + Value: ethconfig.Defaults.GPO.Percentile, + } + GpoMaxGasPriceFlag = cli.Int64Flag{ + Name: "gpo.maxprice", + Usage: "Maximum gas price will be recommended by gpo", + Value: ethconfig.Defaults.GPO.MaxPrice.Int64(), + } + GpoIgnoreGasPriceFlag = cli.Int64Flag{ + Name: "gpo.ignoreprice", + Usage: "Gas price below which gpo will ignore transactions", + Value: ethconfig.Defaults.GPO.IgnorePrice.Int64(), } WhisperEnabledFlag = cli.BoolFlag{ Name: "shh", @@ -801,20 +831,29 @@ func setPrefix(ctx *cli.Context, cfg *node.Config) { // MakeDatabaseHandles raises out the number of allowed file handles per process // for XDC and returns half of the allowance to assign to the database. -func MakeDatabaseHandles() int { - limit, err := fdlimit.Current() +func MakeDatabaseHandles(max int) int { + limit, err := fdlimit.Maximum() if err != nil { Fatalf("Failed to retrieve file descriptor allowance: %v", err) } - if limit < 2048 { - if err := fdlimit.Raise(2048); err != nil { - Fatalf("Failed to raise file descriptor allowance: %v", err) - } - } - if limit > 2048 { // cap database file descriptors even if more is available - limit = 2048 + switch { + case max == 0: + // User didn't specify a meaningful value, use system limits + case max < 128: + // User specified something unhealthy, just use system defaults + log.Error("File descriptor limit invalid (<128)", "had", max, "updated", limit) + case max > limit: + // User requested more than the OS allows, notify that we can't allocate it + log.Warn("Requested file descriptors denied by OS", "req", max, "limit", limit) + default: + // User limit is meaningful and within allowed range, use that + limit = max + } + raised, err := fdlimit.Raise(uint64(limit)) + if err != nil { + Fatalf("Failed to raise file descriptor allowance: %v", err) } - return limit / 2 // Leave half for networking and other stuff + return int(raised / 2) // Leave half for networking and other stuff } // MakeAddress converts an account specified directly as a hex encoded string or @@ -844,7 +883,7 @@ func MakeAddress(ks *keystore.KeyStore, account string) (accounts.Account, error // setEtherbase retrieves the etherbase either from the directly specified // command line flags or from the keystore if CLI indexed. -func setEtherbase(ctx *cli.Context, ks *keystore.KeyStore, cfg *eth.Config) { +func setEtherbase(ctx *cli.Context, ks *keystore.KeyStore, cfg *ethconfig.Config) { if ctx.GlobalIsSet(EtherbaseFlag.Name) { account, err := MakeAddress(ks, ctx.GlobalString(EtherbaseFlag.Name)) if err != nil { @@ -973,13 +1012,25 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) { } } -func setGPO(ctx *cli.Context, cfg *gasprice.Config) { +func setGPO(ctx *cli.Context, cfg *gasprice.Config, light bool) { + // If we are running the light client, apply another group + // settings for gas oracle. + if light { + cfg.Blocks = ethconfig.LightClientGPO.Blocks + cfg.Percentile = ethconfig.LightClientGPO.Percentile + } if ctx.GlobalIsSet(GpoBlocksFlag.Name) { cfg.Blocks = ctx.GlobalInt(GpoBlocksFlag.Name) } if ctx.GlobalIsSet(GpoPercentileFlag.Name) { cfg.Percentile = ctx.GlobalInt(GpoPercentileFlag.Name) } + if ctx.GlobalIsSet(GpoMaxGasPriceFlag.Name) { + cfg.MaxPrice = big.NewInt(ctx.GlobalInt64(GpoMaxGasPriceFlag.Name)) + } + if ctx.GlobalIsSet(GpoIgnoreGasPriceFlag.Name) { + cfg.IgnorePrice = big.NewInt(ctx.GlobalInt64(GpoIgnoreGasPriceFlag.Name)) + } } func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) { @@ -1015,7 +1066,7 @@ func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) { } } -func setEthash(ctx *cli.Context, cfg *eth.Config) { +func setEthash(ctx *cli.Context, cfg *ethconfig.Config) { if ctx.GlobalIsSet(EthashCacheDirFlag.Name) { cfg.Ethash.CacheDir = ctx.GlobalString(EthashCacheDirFlag.Name) } @@ -1121,7 +1172,7 @@ func SetXDCXConfig(ctx *cli.Context, cfg *XDCx.Config, XDCDataDir string) { } // SetEthConfig applies eth-related command line flags to the config. -func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { +func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { // Avoid conflicting network flags checkExclusive(ctx, DeveloperFlag, TestnetFlag, RinkebyFlag) checkExclusive(ctx, FastSyncFlag, LightModeFlag, SyncModeFlag) @@ -1130,10 +1181,30 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) setEtherbase(ctx, ks, cfg) - setGPO(ctx, &cfg.GPO) + setGPO(ctx, &cfg.GPO, ctx.GlobalString(SyncModeFlag.Name) == "light") setTxPool(ctx, &cfg.TxPool) setEthash(ctx, cfg) + // Cap the cache allowance and tune the garbage collector + mem, err := gopsutil.VirtualMemory() + if err == nil { + if 32<<(^uintptr(0)>>63) == 32 && mem.Total > 2*1024*1024*1024 { + log.Warn("Lowering memory allowance on 32bit arch", "available", mem.Total/1024/1024, "addressable", 2*1024) + mem.Total = 2 * 1024 * 1024 * 1024 + } + allowance := int(mem.Total / 1024 / 1024 / 3) + if cache := ctx.Int(CacheFlag.Name); cache > allowance { + log.Warn("Sanitizing cache to Go's GC limits", "provided", cache, "updated", allowance) + ctx.Set(CacheFlag.Name, strconv.Itoa(allowance)) + } + } + // Ensure Go's GC ignores the database cache for trigger percentage + cache := ctx.Int(CacheFlag.Name) + gogc := math.Max(20, math.Min(100, 100/(float64(cache)/1024))) + + log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc)) + godebug.SetGCPercent(int(gogc)) + switch { case ctx.GlobalIsSet(SyncModeFlag.Name): cfg.SyncMode = *GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode) @@ -1155,7 +1226,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheDatabaseFlag.Name) { cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100 } - cfg.DatabaseHandles = MakeDatabaseHandles() + cfg.DatabaseHandles = MakeDatabaseHandles(ctx.GlobalInt(FDLimitFlag.Name)) if gcmode := ctx.GlobalString(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" { Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name) @@ -1171,12 +1242,18 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { if ctx.GlobalIsSet(DocRootFlag.Name) { cfg.DocRoot = ctx.GlobalString(DocRootFlag.Name) } + if ctx.GlobalIsSet(RPCGlobalTxFeeCap.Name) { + cfg.RPCTxFeeCap = ctx.GlobalFloat64(RPCGlobalTxFeeCap.Name) + } if ctx.GlobalIsSet(ExtraDataFlag.Name) { cfg.ExtraData = []byte(ctx.GlobalString(ExtraDataFlag.Name)) } if ctx.GlobalIsSet(GasPriceFlag.Name) { cfg.GasPrice = GlobalBig(ctx, GasPriceFlag.Name) } + if ctx.IsSet(CacheLogSizeFlag.Name) { + cfg.FilterLogCacheSize = ctx.Int(CacheLogSizeFlag.Name) + } if ctx.GlobalIsSet(VMEnableDebugFlag.Name) { // TODO(fjl): force-enable this in --dev mode cfg.EnablePreimageRecording = ctx.GlobalBool(VMEnableDebugFlag.Name) @@ -1241,7 +1318,7 @@ func SetupNetwork(ctx *cli.Context) { func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database { var ( cache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100 - handles = MakeDatabaseHandles() + handles = MakeDatabaseHandles(ctx.GlobalInt(FDLimitFlag.Name)) ) name := "chaindata" if ctx.GlobalBool(LightModeFlag.Name) { @@ -1283,12 +1360,12 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai engine = ethash.NewFaker() if !ctx.GlobalBool(FakePoWFlag.Name) { engine = ethash.New(ethash.Config{ - CacheDir: stack.ResolvePath(eth.DefaultConfig.Ethash.CacheDir), - CachesInMem: eth.DefaultConfig.Ethash.CachesInMem, - CachesOnDisk: eth.DefaultConfig.Ethash.CachesOnDisk, - DatasetDir: stack.ResolvePath(eth.DefaultConfig.Ethash.DatasetDir), - DatasetsInMem: eth.DefaultConfig.Ethash.DatasetsInMem, - DatasetsOnDisk: eth.DefaultConfig.Ethash.DatasetsOnDisk, + CacheDir: stack.ResolvePath(ethconfig.Defaults.Ethash.CacheDir), + CachesInMem: ethconfig.Defaults.Ethash.CachesInMem, + CachesOnDisk: ethconfig.Defaults.Ethash.CachesOnDisk, + DatasetDir: stack.ResolvePath(ethconfig.Defaults.Ethash.DatasetDir), + DatasetsInMem: ethconfig.Defaults.Ethash.DatasetsInMem, + DatasetsOnDisk: ethconfig.Defaults.Ethash.DatasetsOnDisk, }) } Fatalf("Only support XDPoS consensus") @@ -1298,8 +1375,8 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai } cache := &core.CacheConfig{ Disabled: ctx.GlobalString(GCModeFlag.Name) == "archive", - TrieNodeLimit: eth.DefaultConfig.TrieCache, - TrieTimeLimit: eth.DefaultConfig.TrieTimeout, + TrieNodeLimit: ethconfig.Defaults.TrieCache, + TrieTimeLimit: ethconfig.Defaults.TrieTimeout, } if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) { cache.TrieNodeLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100 @@ -1376,6 +1453,19 @@ func WalkMatch(root, pattern string) ([]string, error) { return matches, nil } +// RegisterFilterAPI adds the eth log filtering RPC API to the node. +func RegisterFilterAPI(stack *node.Node, backend ethapi.Backend, ethcfg *ethconfig.Config) *filters.FilterSystem { + isLightClient := ethcfg.SyncMode == downloader.LightSync + filterSystem := filters.NewFilterSystem(backend, filters.Config{ + LogCacheSize: ethcfg.FilterLogCacheSize, + }) + stack.RegisterAPIs([]rpc.API{{ + Namespace: "eth", + Service: filters.NewFilterAPI(filterSystem, isLightClient), + }}) + return filterSystem +} + func SetupMetrics(ctx *cli.Context) { if metrics.Enabled { log.Info("Enabling metrics collection") diff --git a/cmd/utils/utils.go b/cmd/utils/utils.go index 94d2eb387ce8..a329fc8c4d2e 100644 --- a/cmd/utils/utils.go +++ b/cmd/utils/utils.go @@ -5,6 +5,7 @@ import ( "github.com/XinFinOrg/XDPoSChain/XDCxlending" "github.com/XinFinOrg/XDPoSChain/eth" "github.com/XinFinOrg/XDPoSChain/eth/downloader" + "github.com/XinFinOrg/XDPoSChain/eth/ethconfig" "github.com/XinFinOrg/XDPoSChain/ethstats" "github.com/XinFinOrg/XDPoSChain/les" "github.com/XinFinOrg/XDPoSChain/node" @@ -12,7 +13,7 @@ import ( ) // RegisterEthService adds an Ethereum client to the stack. -func RegisterEthService(stack *node.Node, cfg *eth.Config) { +func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) { var err error if cfg.SyncMode == downloader.LightSync { err = stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { @@ -46,8 +47,7 @@ func RegisterShhService(stack *node.Node, cfg *whisper.Config) { } } -// RegisterEthStatsService configures the Ethereum Stats daemon and adds it to -// th egiven node. +// RegisterEthStatsService configures the Ethereum Stats daemon and adds it to the node. func RegisterEthStatsService(stack *node.Node, url string) { if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { // Retrieve both eth and les services diff --git a/common/constants/constants.go.testnet b/common/constants/constants.go.testnet index a84373fc1cca..a7e9fb64ba46 100644 --- a/common/constants/constants.go.testnet +++ b/common/constants/constants.go.testnet @@ -54,7 +54,7 @@ var ShanghaiBlock = big.NewInt(61290000) // Target 31st March 2024 var Eip1559Block = big.NewInt(9999999999) var TIPXDCXTestnet = big.NewInt(23779191) -var IsTestnet bool = false +var IsTestnet bool = true var Enable0xPrefix bool = true var StoreRewardFolder string var RollbackHash Hash diff --git a/common/countdown/countdown_test.go b/common/countdown/countdown_test.go index fb5356dfc94f..6f1b0e10225e 100644 --- a/common/countdown/countdown_test.go +++ b/common/countdown/countdown_test.go @@ -1,7 +1,7 @@ package countdown import ( - "fmt" + "errors" "testing" "time" @@ -76,7 +76,7 @@ func TestCountdownShouldResetEvenIfErrored(t *testing.T) { called := make(chan int) OnTimeoutFn := func(time.Time, interface{}) error { called <- 1 - return fmt.Errorf("ERROR!") + return errors.New("ERROR!") } countdown := NewCountDown(5000 * time.Millisecond) diff --git a/common/fdlimit/fdlimit_freebsd.go b/common/fdlimit/fdlimit_bsd.go similarity index 86% rename from common/fdlimit/fdlimit_freebsd.go rename to common/fdlimit/fdlimit_bsd.go index c126b0c26583..a3a6902c0925 100644 --- a/common/fdlimit/fdlimit_freebsd.go +++ b/common/fdlimit/fdlimit_bsd.go @@ -14,23 +14,24 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -// +build freebsd +//go:build freebsd || dragonfly +// +build freebsd dragonfly package fdlimit import "syscall" // This file is largely identical to fdlimit_unix.go, -// but Rlimit fields have type int64 on FreeBSD so it needs +// but Rlimit fields have type int64 on *BSD so it needs // an extra conversion. // Raise tries to maximize the file descriptor allowance of this process // to the maximum hard-limit allowed by the OS. -func Raise(max uint64) error { +func Raise(max uint64) (uint64, error) { // Get the current limit var limit syscall.Rlimit if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { - return err + return 0, err } // Try to update the limit to the max allowance limit.Cur = limit.Max @@ -38,9 +39,12 @@ func Raise(max uint64) error { limit.Cur = int64(max) } if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { - return err + return 0, err + } + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { + return 0, err } - return nil + return uint64(limit.Cur), nil } // Current retrieves the number of file descriptors allowed to be opened by this diff --git a/common/fdlimit/fdlimit_darwin.go b/common/fdlimit/fdlimit_darwin.go new file mode 100644 index 000000000000..88dd0f56cbc3 --- /dev/null +++ b/common/fdlimit/fdlimit_darwin.go @@ -0,0 +1,71 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package fdlimit + +import "syscall" + +// hardlimit is the number of file descriptors allowed at max by the kernel. +const hardlimit = 10240 + +// Raise tries to maximize the file descriptor allowance of this process +// to the maximum hard-limit allowed by the OS. +// Returns the size it was set to (may differ from the desired 'max') +func Raise(max uint64) (uint64, error) { + // Get the current limit + var limit syscall.Rlimit + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { + return 0, err + } + // Try to update the limit to the max allowance + limit.Cur = limit.Max + if limit.Cur > max { + limit.Cur = max + } + if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { + return 0, err + } + // MacOS can silently apply further caps, so retrieve the actually set limit + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { + return 0, err + } + return limit.Cur, nil +} + +// Current retrieves the number of file descriptors allowed to be opened by this +// process. +func Current() (int, error) { + var limit syscall.Rlimit + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { + return 0, err + } + return int(limit.Cur), nil +} + +// Maximum retrieves the maximum number of file descriptors this process is +// allowed to request for itself. +func Maximum() (int, error) { + // Retrieve the maximum allowed by dynamic OS limits + var limit syscall.Rlimit + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { + return 0, err + } + // Cap it to OPEN_MAX (10240) because macos is a special snowflake + if limit.Max > hardlimit { + limit.Max = hardlimit + } + return int(limit.Max), nil +} diff --git a/common/fdlimit/fdlimit_test.go b/common/fdlimit/fdlimit_test.go index a9ee9ab36a9b..9fd5e9fc3cbd 100644 --- a/common/fdlimit/fdlimit_test.go +++ b/common/fdlimit/fdlimit_test.go @@ -17,7 +17,6 @@ package fdlimit import ( - "fmt" "testing" ) @@ -30,13 +29,13 @@ func TestFileDescriptorLimits(t *testing.T) { t.Fatal(err) } if hardlimit < target { - t.Skip(fmt.Sprintf("system limit is less than desired test target: %d < %d", hardlimit, target)) + t.Skipf("system limit is less than desired test target: %d < %d", hardlimit, target) } if limit, err := Current(); err != nil || limit <= 0 { t.Fatalf("failed to retrieve file descriptor limit (%d): %v", limit, err) } - if err := Raise(uint64(target)); err != nil { + if _, err := Raise(uint64(target)); err != nil { t.Fatalf("failed to raise file allowance") } if limit, err := Current(); err != nil || limit < target { diff --git a/common/fdlimit/fdlimit_unix.go b/common/fdlimit/fdlimit_unix.go index a258132353cd..a1f388ebb78d 100644 --- a/common/fdlimit/fdlimit_unix.go +++ b/common/fdlimit/fdlimit_unix.go @@ -14,7 +14,8 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -// +build linux darwin netbsd openbsd solaris +//go:build linux || netbsd || openbsd || solaris +// +build linux netbsd openbsd solaris package fdlimit @@ -22,11 +23,12 @@ import "syscall" // Raise tries to maximize the file descriptor allowance of this process // to the maximum hard-limit allowed by the OS. -func Raise(max uint64) error { +// Returns the size it was set to (may differ from the desired 'max') +func Raise(max uint64) (uint64, error) { // Get the current limit var limit syscall.Rlimit if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { - return err + return 0, err } // Try to update the limit to the max allowance limit.Cur = limit.Max @@ -34,9 +36,13 @@ func Raise(max uint64) error { limit.Cur = max } if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { - return err + return 0, err + } + // MacOS can silently apply further caps, so retrieve the actually set limit + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { + return 0, err } - return nil + return limit.Cur, nil } // Current retrieves the number of file descriptors allowed to be opened by this diff --git a/common/fdlimit/fdlimit_windows.go b/common/fdlimit/fdlimit_windows.go index 863c58bedfab..f472153662e6 100644 --- a/common/fdlimit/fdlimit_windows.go +++ b/common/fdlimit/fdlimit_windows.go @@ -16,28 +16,31 @@ package fdlimit -import "errors" +import "fmt" + +// hardlimit is the number of file descriptors allowed at max by the kernel. +const hardlimit = 16384 // Raise tries to maximize the file descriptor allowance of this process // to the maximum hard-limit allowed by the OS. -func Raise(max uint64) error { +func Raise(max uint64) (uint64, error) { // This method is NOP by design: // * Linux/Darwin counterparts need to manually increase per process limits // * On Windows Go uses the CreateFile API, which is limited to 16K files, non // changeable from within a running process // This way we can always "request" raising the limits, which will either have // or not have effect based on the platform we're running on. - if max > 16384 { - return errors.New("file descriptor limit (16384) reached") + if max > hardlimit { + return hardlimit, fmt.Errorf("file descriptor limit (%d) reached", hardlimit) } - return nil + return max, nil } // Current retrieves the number of file descriptors allowed to be opened by this // process. func Current() (int, error) { // Please see Raise for the reason why we use hard coded 16K as the limit - return 16384, nil + return hardlimit, nil } // Maximum retrieves the maximum number of file descriptors this process is diff --git a/common/lru/basiclru.go b/common/lru/basiclru.go index a429157fe50a..7386c77840a9 100644 --- a/common/lru/basiclru.go +++ b/common/lru/basiclru.go @@ -115,9 +115,7 @@ func (c *BasicLRU[K, V]) Peek(key K) (value V, ok bool) { // Purge empties the cache. func (c *BasicLRU[K, V]) Purge() { c.list.init() - for k := range c.items { - delete(c.items, k) - } + clear(c.items) } // Remove drops an item from the cache. Returns true if the key was present in cache. @@ -174,7 +172,7 @@ func (l *list[T]) init() { l.root.prev = &l.root } -// push adds an element to the front of the list. +// pushElem adds an element to the front of the list. func (l *list[T]) pushElem(e *listElem[T]) { e.prev = &l.root e.next = l.root.next diff --git a/common/types.go b/common/types.go index 905c7d5a4ecc..c7abab289b9a 100644 --- a/common/types.go +++ b/common/types.go @@ -50,6 +50,20 @@ const ( XDCZApplyMethod = "0xc6b32f34" ) +var ( + BlockSignersBinary = Address{19: 0x89} // xdc0000000000000000000000000000000000000089 + MasternodeVotingSMCBinary = Address{19: 0x88} // xdc0000000000000000000000000000000000000088 + RandomizeSMCBinary = Address{19: 0x90} // xdc0000000000000000000000000000000000000090 + FoudationAddrBinary = Address{19: 0x68} // xdc0000000000000000000000000000000000000068 + TeamAddrBinary = Address{19: 0x99} // xdc0000000000000000000000000000000000000099 + XDCXAddrBinary = Address{19: 0x91} // xdc0000000000000000000000000000000000000091 + TradingStateAddrBinary = Address{19: 0x92} // xdc0000000000000000000000000000000000000092 + XDCXLendingAddressBinary = Address{19: 0x93} // xdc0000000000000000000000000000000000000093 + XDCXLendingFinalizedTradeAddressBinary = Address{19: 0x94} // xdc0000000000000000000000000000000000000094 + XDCNativeAddressBinary = Address{19: 0x01} // xdc0000000000000000000000000000000000000001 + LendingLockAddressBinary = Address{19: 0x11} // xdc0000000000000000000000000000000000000011 +) + var ( hashT = reflect.TypeOf(Hash{}) addressT = reflect.TypeOf(Address{}) @@ -256,7 +270,7 @@ func (a *Address) Set(other Address) { // MarshalText returns the hex representation of a. func (a Address) MarshalText() ([]byte, error) { // Handle '0x' or 'xdc' prefix here. - if (Enable0xPrefix) { + if Enable0xPrefix { return hexutil.Bytes(a[:]).MarshalText() } else { return hexutil.Bytes(a[:]).MarshalXDCText() diff --git a/common/types_test.go b/common/types_test.go index 1ec2bfc26c31..7621a6e4b8ef 100644 --- a/common/types_test.go +++ b/common/types_test.go @@ -167,3 +167,39 @@ func TestRemoveItemInArray(t *testing.T) { t.Error("fail remove item from array address") } } + +var testCases = []struct { + bin Address + str string +}{ + {BlockSignersBinary, BlockSigners}, + {MasternodeVotingSMCBinary, MasternodeVotingSMC}, + {RandomizeSMCBinary, RandomizeSMC}, + {FoudationAddrBinary, FoudationAddr}, + {TeamAddrBinary, TeamAddr}, + {XDCXAddrBinary, XDCXAddr}, + {TradingStateAddrBinary, TradingStateAddr}, + {XDCXLendingAddressBinary, XDCXLendingAddress}, + {XDCXLendingFinalizedTradeAddressBinary, XDCXLendingFinalizedTradeAddress}, + {XDCNativeAddressBinary, XDCNativeAddress}, + {LendingLockAddressBinary, LendingLockAddress}, +} + +func TestBinaryAddressToString(t *testing.T) { + for _, tt := range testCases { + have := tt.bin.String() + want := tt.str + if have != want { + t.Errorf("fail to convert binary address to string address\nwant:%s\nhave:%s", have, want) + } + } +} +func TestStringToBinaryAddress(t *testing.T) { + for _, tt := range testCases { + want := tt.bin + have := HexToAddress(tt.str) + if have != want { + t.Errorf("fail to convert string address to binary address\nwant:%s\nhave:%s", have, want) + } + } +} diff --git a/consensus/XDPoS/XDPoS.go b/consensus/XDPoS/XDPoS.go index 6d5477a1ee61..eb976ca60370 100644 --- a/consensus/XDPoS/XDPoS.go +++ b/consensus/XDPoS/XDPoS.go @@ -17,6 +17,7 @@ package XDPoS import ( + "errors" "fmt" "math/big" @@ -438,7 +439,7 @@ func (x *XDPoS) CalculateMissingRounds(chain consensus.ChainReader, header *type case params.ConsensusEngineVersion2: return x.EngineV2.CalculateMissingRounds(chain, header) default: // Default "v1" - return nil, fmt.Errorf("Not supported in the v1 consensus") + return nil, errors.New("Not supported in the v1 consensus") } } @@ -507,7 +508,7 @@ func (x *XDPoS) CacheNoneTIPSigningTxs(header *types.Header, txs []*types.Transa signTxs := []*types.Transaction{} for _, tx := range txs { if tx.IsSigningTransaction() { - var b uint + var b uint64 for _, r := range receipts { if r.TxHash == tx.Hash() { if len(r.PostState) > 0 { @@ -549,3 +550,13 @@ func (x *XDPoS) CacheSigningTxs(hash common.Hash, txs []*types.Transaction) []*t func (x *XDPoS) GetCachedSigningTxs(hash common.Hash) (interface{}, bool) { return x.signingTxsCache.Get(hash) } + +func (x *XDPoS) GetEpochSwitchInfoBetween(chain consensus.ChainReader, begin, end *types.Header) ([]*types.EpochSwitchInfo, error) { + beginBlockVersion := x.config.BlockConsensusVersion(begin.Number, begin.Extra, ExtraFieldCheck) + endBlockVersion := x.config.BlockConsensusVersion(end.Number, end.Extra, ExtraFieldCheck) + if beginBlockVersion == params.ConsensusEngineVersion2 && endBlockVersion == params.ConsensusEngineVersion2 { + return x.EngineV2.GetEpochSwitchInfoBetween(chain, begin, end) + } + // Default "v1" + return nil, errors.New("not supported in the v1 consensus") +} diff --git a/consensus/XDPoS/api.go b/consensus/XDPoS/api.go index a922e57f78f3..fd06703128b3 100644 --- a/consensus/XDPoS/api.go +++ b/consensus/XDPoS/api.go @@ -17,6 +17,7 @@ package XDPoS import ( "encoding/base64" + "errors" "math/big" "github.com/XinFinOrg/XDPoSChain/common" @@ -259,7 +260,7 @@ func (api *API) GetV2BlockByHash(blockHash common.Hash) *V2BlockInfo { func (api *API) NetworkInformation() NetworkInformation { info := NetworkInformation{} info.NetworkId = api.chain.Config().ChainId - info.XDCValidatorAddress = common.HexToAddress(common.MasternodeVotingSMC) + info.XDCValidatorAddress = common.MasternodeVotingSMCBinary if common.IsTestnet { info.LendingAddress = common.HexToAddress(common.LendingRegistrationSMCTestnet) info.RelayerRegistrationAddress = common.HexToAddress(common.RelayerRegistrationSMCTestnet) @@ -320,3 +321,26 @@ func calculateSigners(message map[string]SignerTypes, pool map[string]map[common } } } + +func (api *API) GetEpochNumbersBetween(begin, end *rpc.BlockNumber) ([]uint64, error) { + beginHeader := api.getHeaderFromApiBlockNum(begin) + if beginHeader == nil { + return nil, errors.New("illegal begin block number") + } + endHeader := api.getHeaderFromApiBlockNum(end) + if endHeader == nil { + return nil, errors.New("illegal end block number") + } + if beginHeader.Number.Cmp(endHeader.Number) > 0 { + return nil, errors.New("illegal begin and end block number, begin > end") + } + epochSwitchInfos, err := api.XDPoS.GetEpochSwitchInfoBetween(api.chain, beginHeader, endHeader) + if err != nil { + return nil, err + } + epochSwitchNumbers := make([]uint64, len(epochSwitchInfos)) + for i, info := range epochSwitchInfos { + epochSwitchNumbers[i] = info.EpochSwitchBlockInfo.Number.Uint64() + } + return epochSwitchNumbers, nil +} diff --git a/consensus/XDPoS/engines/engine_v1/engine.go b/consensus/XDPoS/engines/engine_v1/engine.go index 7fce2e6a8676..c67dda874494 100644 --- a/consensus/XDPoS/engines/engine_v1/engine.go +++ b/consensus/XDPoS/engines/engine_v1/engine.go @@ -438,16 +438,6 @@ func (x *XDPoS_v1) YourTurn(chain consensus.ChainReader, parent *types.Header, s func (x *XDPoS_v1) yourTurn(chain consensus.ChainReader, parent *types.Header, signer common.Address) (int, int, int, bool, error) { masternodes := x.GetMasternodes(chain, parent) - - // if common.IsTestnet { - // // Only three mns hard code for XDC testnet. - // masternodes = []common.Address{ - // common.HexToAddress("0x3Ea0A3555f9B1dE983572BfF6444aeb1899eC58C"), - // common.HexToAddress("0x4F7900282F3d371d585ab1361205B0940aB1789C"), - // common.HexToAddress("0x942a5885A8844Ee5587C8AC5e371Fc39FFE61896"), - // } - // } - snap, err := x.GetSnapshot(chain, parent) if err != nil { log.Warn("Failed when trying to commit new work", "err", err) @@ -685,7 +675,7 @@ func (x *XDPoS_v1) GetValidator(creator common.Address, chain consensus.ChainRea if no%epoch == 0 { cpHeader = header } else { - return common.Address{}, fmt.Errorf("couldn't find checkpoint header") + return common.Address{}, errors.New("couldn't find checkpoint header") } } m, err := getM1M2FromCheckpointHeader(cpHeader, header, chain.Config()) diff --git a/consensus/XDPoS/engines/engine_v2/engine.go b/consensus/XDPoS/engines/engine_v2/engine.go index 9cc25d7fab76..c2235f5d32a9 100644 --- a/consensus/XDPoS/engines/engine_v2/engine.go +++ b/consensus/XDPoS/engines/engine_v2/engine.go @@ -661,7 +661,7 @@ func (x *XDPoS_v2) VerifyTimeoutMessage(chain consensus.ChainReader, timeoutMsg } if len(snap.NextEpochCandidates) == 0 { log.Error("[VerifyTimeoutMessage] cannot find NextEpochCandidates from snapshot", "messageGapNumber", timeoutMsg.GapNumber) - return false, fmt.Errorf("Empty master node lists from snapshot") + return false, errors.New("Empty master node lists from snapshot") } verified, signer, err := x.verifyMsgSignature(types.TimeoutSigHash(&types.TimeoutForSign{ @@ -748,7 +748,7 @@ func (x *XDPoS_v2) VerifyBlockInfo(blockChainReader consensus.ChainReader, block // If blockHeader present, then its value shall consistent with what's provided in the blockInfo if blockHeader.Hash() != blockInfo.Hash { log.Warn("[VerifyBlockInfo] BlockHeader and blockInfo mismatch", "BlockInfoHash", blockInfo.Hash.Hex(), "BlockHeaderHash", blockHeader.Hash()) - return fmt.Errorf("[VerifyBlockInfo] Provided blockheader does not match what's in the blockInfo") + return errors.New("[VerifyBlockInfo] Provided blockheader does not match what's in the blockInfo") } } @@ -761,7 +761,7 @@ func (x *XDPoS_v2) VerifyBlockInfo(blockChainReader consensus.ChainReader, block if blockInfo.Number.Cmp(x.config.V2.SwitchBlock) == 0 { if blockInfo.Round != 0 { log.Error("[VerifyBlockInfo] Switch block round is not 0", "BlockInfoHash", blockInfo.Hash.Hex(), "BlockInfoNum", blockInfo.Number, "BlockInfoRound", blockInfo.Round, "blockHeaderNum", blockHeader.Number) - return fmt.Errorf("[VerifyBlockInfo] switch block round have to be 0") + return errors.New("[VerifyBlockInfo] switch block round have to be 0") } return nil } @@ -789,7 +789,7 @@ func (x *XDPoS_v2) verifyQC(blockChainReader consensus.ChainReader, quorumCert * epochInfo, err := x.getEpochSwitchInfo(blockChainReader, parentHeader, quorumCert.ProposedBlockInfo.Hash) if err != nil { log.Error("[verifyQC] Error when getting epoch switch Info to verify QC", "Error", err) - return fmt.Errorf("Fail to verify QC due to failure in getting epoch switch info") + return errors.New("Fail to verify QC due to failure in getting epoch switch info") } signatures, duplicates := UniqueSignatures(quorumCert.Signatures) @@ -821,12 +821,12 @@ func (x *XDPoS_v2) verifyQC(blockChainReader consensus.ChainReader, quorumCert * }), sig, epochInfo.Masternodes) if err != nil { log.Error("[verifyQC] Error while verfying QC message signatures", "Error", err) - haveError = fmt.Errorf("Error while verfying QC message signatures") + haveError = errors.New("Error while verfying QC message signatures") return } if !verified { log.Warn("[verifyQC] Signature not verified doing QC verification", "QC", quorumCert) - haveError = fmt.Errorf("Fail to verify QC due to signature mis-match") + haveError = errors.New("Fail to verify QC due to signature mis-match") return } }(signature) diff --git a/consensus/XDPoS/engines/engine_v2/epochSwitch.go b/consensus/XDPoS/engines/engine_v2/epochSwitch.go index 981c46ff9a06..abb35bde0926 100644 --- a/consensus/XDPoS/engines/engine_v2/epochSwitch.go +++ b/consensus/XDPoS/engines/engine_v2/epochSwitch.go @@ -157,3 +157,34 @@ func (x *XDPoS_v2) IsEpochSwitch(header *types.Header) (bool, uint64, error) { log.Debug("[IsEpochSwitch]", "is", parentRound < epochStartRound, "parentRound", parentRound, "round", round, "number", header.Number.Uint64(), "epochNum", epochNum, "hash", header.Hash()) return parentRound < epochStartRound, epochNum, nil } + +// GetEpochSwitchInfoBetween get epoch switch between begin and end headers +// Search backwardly from end number to begin number +func (x *XDPoS_v2) GetEpochSwitchInfoBetween(chain consensus.ChainReader, begin, end *types.Header) ([]*types.EpochSwitchInfo, error) { + infos := make([]*types.EpochSwitchInfo, 0) + // after the first iteration, it becomes nil since epoch switch info does not have header info + iteratorHeader := end + // after the first iteration, it becomes the parent hash of the epoch switch block + iteratorHash := end.Hash() + iteratorNum := end.Number + // when iterator is strictly > begin number, do the search + for iteratorNum.Cmp(begin.Number) > 0 { + epochSwitchInfo, err := x.getEpochSwitchInfo(chain, iteratorHeader, iteratorHash) + if err != nil { + log.Error("[GetEpochSwitchInfoBetween] Adaptor v2 getEpochSwitchInfo has error, potentially bug", "err", err) + return nil, err + } + iteratorHeader = nil + iteratorHash = epochSwitchInfo.EpochSwitchParentBlockInfo.Hash + iteratorNum = epochSwitchInfo.EpochSwitchBlockInfo.Number + if iteratorNum.Cmp(begin.Number) >= 0 { + infos = append(infos, epochSwitchInfo) + } + + } + // reverse the array + for i := 0; i < len(infos)/2; i++ { + infos[i], infos[len(infos)-1-i] = infos[len(infos)-1-i], infos[i] + } + return infos, nil +} diff --git a/consensus/XDPoS/engines/engine_v2/forensics.go b/consensus/XDPoS/engines/engine_v2/forensics.go index 1c4542ab0e81..45f88097d6b3 100644 --- a/consensus/XDPoS/engines/engine_v2/forensics.go +++ b/consensus/XDPoS/engines/engine_v2/forensics.go @@ -2,6 +2,7 @@ package engine_v2 import ( "encoding/json" + "errors" "fmt" "math/big" "reflect" @@ -49,7 +50,7 @@ func (f *Forensics) SetCommittedQCs(headers []types.Header, incomingQC types.Quo // highestCommitQCs is an array, assign the parentBlockQc and its child as well as its grandchild QC into this array for forensics purposes. if len(headers) != NUM_OF_FORENSICS_QC-1 { log.Error("[SetCommittedQcs] Received input length not equal to 2", len(headers)) - return fmt.Errorf("received headers length not equal to 2 ") + return errors.New("received headers length not equal to 2 ") } var committedQCs []types.QuorumCert @@ -64,11 +65,11 @@ func (f *Forensics) SetCommittedQCs(headers []types.Header, incomingQC types.Quo if i != 0 { if decodedExtraField.QuorumCert.ProposedBlockInfo.Hash != headers[i-1].Hash() { log.Error("[SetCommittedQCs] Headers shall be on the same chain and in the right order", "parentHash", h.ParentHash.Hex(), "headers[i-1].Hash()", headers[i-1].Hash().Hex()) - return fmt.Errorf("headers shall be on the same chain and in the right order") + return errors.New("headers shall be on the same chain and in the right order") } else if i == len(headers)-1 { // The last header shall be pointed by the incoming QC if incomingQC.ProposedBlockInfo.Hash != h.Hash() { log.Error("[SetCommittedQCs] incomingQc is not pointing at the last header received", "hash", h.Hash().Hex(), "incomingQC.ProposedBlockInfo.Hash", incomingQC.ProposedBlockInfo.Hash.Hex()) - return fmt.Errorf("incomingQc is not pointing at the last header received") + return errors.New("incomingQc is not pointing at the last header received") } } } @@ -91,7 +92,7 @@ func (f *Forensics) ProcessForensics(chain consensus.ChainReader, engine *XDPoS_ highestCommittedQCs := f.HighestCommittedQCs if len(highestCommittedQCs) != NUM_OF_FORENSICS_QC { log.Error("[ProcessForensics] HighestCommittedQCs value not set", "incomingQcProposedBlockHash", incomingQC.ProposedBlockInfo.Hash, "incomingQcProposedBlockNumber", incomingQC.ProposedBlockInfo.Number.Uint64(), "incomingQcProposedBlockRound", incomingQC.ProposedBlockInfo.Round) - return fmt.Errorf("HighestCommittedQCs value not set") + return errors.New("HighestCommittedQCs value not set") } // Find the QC1 and QC2. We only care 2 parents in front of the incomingQC. The returned value contains QC1, QC2 and QC3(the incomingQC) incomingQuorunCerts, err := f.findAncestorQCs(chain, incomingQC, 2) @@ -163,7 +164,7 @@ func (f *Forensics) SendForensicProof(chain consensus.ChainReader, engine *XDPoS if ancestorBlock == nil { log.Error("[SendForensicProof] Unable to find the ancestor block by its hash", "Hash", ancestorHash) - return fmt.Errorf("Can't find ancestor block via hash") + return errors.New("Can't find ancestor block via hash") } content, err := json.Marshal(&types.ForensicsContent{ @@ -209,7 +210,7 @@ func (f *Forensics) findAncestorQCs(chain consensus.ChainReader, currentQc types parentHeader := chain.GetHeaderByHash(parentHash) if parentHeader == nil { log.Error("[findAncestorQCs] Forensics findAncestorQCs unable to find its parent block header", "BlockNum", parentHeader.Number.Int64(), "ParentHash", parentHash.Hex()) - return nil, fmt.Errorf("unable to find parent block header in forensics") + return nil, errors.New("unable to find parent block header in forensics") } var decodedExtraField types.ExtraFields_v2 err := utils.DecodeBytesExtraFields(parentHeader.Extra, &decodedExtraField) @@ -318,7 +319,7 @@ func (f *Forensics) findAncestorQcThroughRound(chain consensus.ChainReader, high } ancestorQC = *decodedExtraField.QuorumCert } - return ancestorQC, lowerRoundQCs, higherRoundQCs, fmt.Errorf("[findAncestorQcThroughRound] Could not find ancestor QC") + return ancestorQC, lowerRoundQCs, higherRoundQCs, errors.New("[findAncestorQcThroughRound] Could not find ancestor QC") } func (f *Forensics) FindAncestorBlockHash(chain consensus.ChainReader, firstBlockInfo *types.BlockInfo, secondBlockInfo *types.BlockInfo) (common.Hash, []string, []string, error) { @@ -398,7 +399,7 @@ func (f *Forensics) ProcessVoteEquivocation(chain consensus.ChainReader, engine highestCommittedQCs := f.HighestCommittedQCs if len(highestCommittedQCs) != NUM_OF_FORENSICS_QC { log.Error("[ProcessVoteEquivocation] HighestCommittedQCs value not set", "incomingVoteProposedBlockHash", incomingVote.ProposedBlockInfo.Hash, "incomingVoteProposedBlockNumber", incomingVote.ProposedBlockInfo.Number.Uint64(), "incomingVoteProposedBlockRound", incomingVote.ProposedBlockInfo.Round) - return fmt.Errorf("HighestCommittedQCs value not set") + return errors.New("HighestCommittedQCs value not set") } if incomingVote.ProposedBlockInfo.Round < highestCommittedQCs[NUM_OF_FORENSICS_QC-1].ProposedBlockInfo.Round { log.Debug("Received a too old vote in forensics", "vote", incomingVote) diff --git a/consensus/XDPoS/engines/engine_v2/snapshot.go b/consensus/XDPoS/engines/engine_v2/snapshot.go index 78ce0aff55c6..ccae59841278 100644 --- a/consensus/XDPoS/engines/engine_v2/snapshot.go +++ b/consensus/XDPoS/engines/engine_v2/snapshot.go @@ -64,7 +64,7 @@ func (s *SnapshotV2) GetMappedCandidates() map[common.Address]struct{} { func (s *SnapshotV2) IsCandidates(address common.Address) bool { for _, n := range s.NextEpochCandidates { - if n.String() == address.String() { + if n == address { return true } } diff --git a/consensus/XDPoS/engines/engine_v2/timeout.go b/consensus/XDPoS/engines/engine_v2/timeout.go index 39d8100b2402..2f077b2a0018 100644 --- a/consensus/XDPoS/engines/engine_v2/timeout.go +++ b/consensus/XDPoS/engines/engine_v2/timeout.go @@ -1,6 +1,7 @@ package engine_v2 import ( + "errors" "fmt" "strconv" "strings" @@ -99,7 +100,7 @@ func (x *XDPoS_v2) verifyTC(chain consensus.ChainReader, timeoutCert *types.Time } if snap == nil || len(snap.NextEpochCandidates) == 0 { log.Error("[verifyTC] Something wrong with the snapshot from gapNumber", "messageGapNumber", timeoutCert.GapNumber, "snapshot", snap) - return fmt.Errorf("empty master node lists from snapshot") + return errors.New("empty master node lists from snapshot") } signatures, duplicates := UniqueSignatures(timeoutCert.Signatures) @@ -145,7 +146,7 @@ func (x *XDPoS_v2) verifyTC(chain consensus.ChainReader, timeoutCert *types.Time haveError = fmt.Errorf("error while verifying TC message signatures, %s", err) } else { log.Warn("[verifyTC] Signature not verified doing TC verification", "timeoutCert.Round", timeoutCert.Round, "timeoutCert.GapNumber", timeoutCert.GapNumber, "Signatures len", len(signatures)) - haveError = fmt.Errorf("fail to verify TC due to signature mis-match") + haveError = errors.New("fail to verify TC due to signature mis-match") } } mutex.Unlock() // Unlock after modifying haveError diff --git a/consensus/XDPoS/engines/engine_v2/utils.go b/consensus/XDPoS/engines/engine_v2/utils.go index e8006951ea14..20198816280b 100644 --- a/consensus/XDPoS/engines/engine_v2/utils.go +++ b/consensus/XDPoS/engines/engine_v2/utils.go @@ -1,6 +1,7 @@ package engine_v2 import ( + "errors" "fmt" "github.com/XinFinOrg/XDPoSChain/accounts" @@ -105,7 +106,7 @@ func (x *XDPoS_v2) signSignature(signingHash common.Hash) (types.Signature, erro func (x *XDPoS_v2) verifyMsgSignature(signedHashToBeVerified common.Hash, signature types.Signature, masternodes []common.Address) (bool, common.Address, error) { var signerAddress common.Address if len(masternodes) == 0 { - return false, signerAddress, fmt.Errorf("Empty masternode list detected when verifying message signatures") + return false, signerAddress, errors.New("Empty masternode list detected when verifying message signatures") } // Recover the public key and the Ethereum address pubkey, err := crypto.Ecrecover(signedHashToBeVerified.Bytes(), signature) diff --git a/consensus/XDPoS/engines/engine_v2/vote.go b/consensus/XDPoS/engines/engine_v2/vote.go index 1ec2d4b24e42..de79585297af 100644 --- a/consensus/XDPoS/engines/engine_v2/vote.go +++ b/consensus/XDPoS/engines/engine_v2/vote.go @@ -1,6 +1,7 @@ package engine_v2 import ( + "errors" "fmt" "math/big" "strconv" @@ -78,7 +79,7 @@ func (x *XDPoS_v2) voteHandler(chain consensus.ChainReader, voteMsg *types.Vote) epochInfo, err := x.getEpochSwitchInfo(chain, chain.CurrentHeader(), chain.CurrentHeader().Hash()) if err != nil { log.Error("[voteHandler] Error when getting epoch switch Info", "error", err) - return fmt.Errorf("Fail on voteHandler due to failure in getting epoch switch info") + return errors.New("Fail on voteHandler due to failure in getting epoch switch info") } certThreshold := x.config.V2.Config(uint64(voteMsg.ProposedBlockInfo.Round)).CertThreshold @@ -177,7 +178,7 @@ func (x *XDPoS_v2) onVotePoolThresholdReached(chain consensus.ChainReader, poole epochInfo, err := x.getEpochSwitchInfo(chain, chain.CurrentHeader(), chain.CurrentHeader().Hash()) if err != nil { log.Error("[voteHandler] Error when getting epoch switch Info", "error", err) - return fmt.Errorf("Fail on voteHandler due to failure in getting epoch switch info") + return errors.New("Fail on voteHandler due to failure in getting epoch switch info") } // Skip and wait for the next vote to process again if valid votes is less than what we required diff --git a/consensus/XDPoS/utils/utils.go b/consensus/XDPoS/utils/utils.go index 1b4e816dd1f8..62b394c836d3 100644 --- a/consensus/XDPoS/utils/utils.go +++ b/consensus/XDPoS/utils/utils.go @@ -2,6 +2,7 @@ package utils import ( "bytes" + "errors" "fmt" "reflect" "sort" @@ -79,7 +80,7 @@ func CompareSignersLists(list1 []common.Address, list2 []common.Address) bool { // Decode extra fields for consensus version >= 2 (XDPoS 2.0 and future versions) func DecodeBytesExtraFields(b []byte, val interface{}) error { if len(b) == 0 { - return fmt.Errorf("extra field is 0 length") + return errors.New("extra field is 0 length") } switch b[0] { case 2: diff --git a/consensus/tests/engine_v1_tests/helper.go b/consensus/tests/engine_v1_tests/helper.go index be64d0c050a2..52baa648e00b 100644 --- a/consensus/tests/engine_v1_tests/helper.go +++ b/consensus/tests/engine_v1_tests/helper.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/hex" + "errors" "fmt" "math/big" "math/rand" @@ -144,11 +145,11 @@ func getCommonBackend(t *testing.T, chainConfig *params.ChainConfig) *backends.S // create test backend with smart contract in it contractBackend2 := backends.NewXDCSimulatedBackend(core.GenesisAlloc{ - acc1Addr: {Balance: new(big.Int).SetUint64(10000000000)}, - acc2Addr: {Balance: new(big.Int).SetUint64(10000000000)}, - acc3Addr: {Balance: new(big.Int).SetUint64(10000000000)}, - voterAddr: {Balance: new(big.Int).SetUint64(10000000000)}, - common.HexToAddress(common.MasternodeVotingSMC): {Balance: new(big.Int).SetUint64(1), Code: code, Storage: storage}, // Binding the MasternodeVotingSMC with newly created 'code' for SC execution + acc1Addr: {Balance: new(big.Int).SetUint64(10000000000)}, + acc2Addr: {Balance: new(big.Int).SetUint64(10000000000)}, + acc3Addr: {Balance: new(big.Int).SetUint64(10000000000)}, + voterAddr: {Balance: new(big.Int).SetUint64(10000000000)}, + common.MasternodeVotingSMCBinary: {Balance: new(big.Int).SetUint64(1), Code: code, Storage: storage}, // Binding the MasternodeVotingSMC with newly created 'code' for SC execution }, 10000000, chainConfig) return contractBackend2 @@ -178,9 +179,9 @@ func voteTX(gasLimit uint64, nonce uint64, addr string) (*types.Transaction, err amountInt := new(big.Int) amount, ok := amountInt.SetString("60000", 10) if !ok { - return nil, fmt.Errorf("big int init failed") + return nil, errors.New("big int init failed") } - to := common.HexToAddress(common.MasternodeVotingSMC) + to := common.MasternodeVotingSMCBinary tx := types.NewTransaction(nonce, to, amount, gasLimit, gasPrice, data) signedTX, err := types.SignTx(tx, types.LatestSignerForChainID(big.NewInt(chainID)), voterKey) @@ -213,7 +214,7 @@ func GetSnapshotSigner(bc *BlockChain, header *types.Header) (signersList, error } func GetCandidateFromCurrentSmartContract(backend bind.ContractBackend, t *testing.T) masterNodes { - addr := common.HexToAddress(common.MasternodeVotingSMC) + addr := common.MasternodeVotingSMCBinary validator, err := contractValidator.NewXDCValidator(addr, backend) if err != nil { t.Fatal(err) @@ -321,7 +322,7 @@ func CreateBlock(blockchain *BlockChain, chainConfig *params.ChainConfig, starti // Sign all the things for v1 block use v1 sigHash function sighash, err := signFn(accounts.Account{Address: signer}, blockchain.Engine().(*XDPoS.XDPoS).SigHash(header).Bytes()) if err != nil { - panic(fmt.Errorf("Error when sign last v1 block hash during test block creation")) + panic(errors.New("Error when sign last v1 block hash during test block creation")) } copy(header.Extra[len(header.Extra)-utils.ExtraSeal:], sighash) } diff --git a/consensus/tests/engine_v2_tests/api_test.go b/consensus/tests/engine_v2_tests/api_test.go index fb99f6f8794f..185f9a40ccd2 100644 --- a/consensus/tests/engine_v2_tests/api_test.go +++ b/consensus/tests/engine_v2_tests/api_test.go @@ -2,6 +2,7 @@ package engine_v2_tests import ( "math/big" + "reflect" "testing" "github.com/XinFinOrg/XDPoSChain/consensus/XDPoS" @@ -109,3 +110,61 @@ func TestGetMissedRoundsInEpochByBlockNum(t *testing.T) { assert.NotEqual(t, data.MissedRounds[0].Miner, data.MissedRounds[1].Miner) } + +func TestGetEpochNumbersBetween(t *testing.T) { + _, bc, _, _, _ := PrepareXDCTestBlockChainWith128Candidates(t, 1802, params.TestXDPoSMockChainConfig) + + engine := bc.GetBlockChain().Engine().(*XDPoS.XDPoS) + + begin := rpc.BlockNumber(1800) + end := rpc.BlockNumber(1802) + numbers, err := engine.APIs(bc.GetBlockChain())[0].Service.(*XDPoS.API).GetEpochNumbersBetween(&begin, &end) + + assert.True(t, reflect.DeepEqual([]uint64{1800}, numbers)) + assert.Nil(t, err) + + begin = rpc.BlockNumber(1799) + end = rpc.BlockNumber(1802) + numbers, err = engine.APIs(bc.GetBlockChain())[0].Service.(*XDPoS.API).GetEpochNumbersBetween(&begin, &end) + + assert.True(t, reflect.DeepEqual([]uint64{1800}, numbers)) + assert.Nil(t, err) + + begin = rpc.BlockNumber(1799) + end = rpc.BlockNumber(1802) + numbers, err = engine.APIs(bc.GetBlockChain())[0].Service.(*XDPoS.API).GetEpochNumbersBetween(&begin, &end) + + assert.True(t, reflect.DeepEqual([]uint64{1800}, numbers)) + assert.Nil(t, err) + + begin = rpc.BlockNumber(901) + end = rpc.BlockNumber(1802) + numbers, err = engine.APIs(bc.GetBlockChain())[0].Service.(*XDPoS.API).GetEpochNumbersBetween(&begin, &end) + + assert.True(t, reflect.DeepEqual([]uint64{901, 1800}, numbers)) + assert.Nil(t, err) + + // 900 is V1, not V2, so error + begin = rpc.BlockNumber(900) + end = rpc.BlockNumber(1802) + numbers, err = engine.APIs(bc.GetBlockChain())[0].Service.(*XDPoS.API).GetEpochNumbersBetween(&begin, &end) + + assert.Nil(t, numbers) + assert.EqualError(t, err, "not supported in the v1 consensus") + + // 1803 not exist + begin = rpc.BlockNumber(901) + end = rpc.BlockNumber(1803) + numbers, err = engine.APIs(bc.GetBlockChain())[0].Service.(*XDPoS.API).GetEpochNumbersBetween(&begin, &end) + + assert.Nil(t, numbers) + assert.EqualError(t, err, "illegal end block number") + + // 1803 not exist + begin = rpc.BlockNumber(1803) + end = rpc.BlockNumber(1803) + numbers, err = engine.APIs(bc.GetBlockChain())[0].Service.(*XDPoS.API).GetEpochNumbersBetween(&begin, &end) + + assert.Nil(t, numbers) + assert.EqualError(t, err, "illegal begin block number") +} diff --git a/consensus/tests/engine_v2_tests/helper.go b/consensus/tests/engine_v2_tests/helper.go index 2856b55f9f86..454f482d7172 100644 --- a/consensus/tests/engine_v2_tests/helper.go +++ b/consensus/tests/engine_v2_tests/helper.go @@ -5,6 +5,7 @@ import ( "context" "crypto/ecdsa" "encoding/hex" + "errors" "fmt" "math/big" "math/rand" @@ -103,9 +104,9 @@ func voteTX(gasLimit uint64, nonce uint64, addr string) (*types.Transaction, err amountInt := new(big.Int) amount, ok := amountInt.SetString("60000", 10) if !ok { - return nil, fmt.Errorf("big int init failed") + return nil, errors.New("big int init failed") } - to := common.HexToAddress(common.MasternodeVotingSMC) + to := common.MasternodeVotingSMCBinary tx := types.NewTransaction(nonce, to, amount, gasLimit, gasPrice, data) signedTX, err := types.SignTx(tx, types.LatestSignerForChainID(big.NewInt(chainID)), voterKey) @@ -189,11 +190,11 @@ func getCommonBackend(t *testing.T, chainConfig *params.ChainConfig) *backends.S // create test backend with smart contract in it contractBackend2 := backends.NewXDCSimulatedBackend(core.GenesisAlloc{ - acc1Addr: {Balance: new(big.Int).SetUint64(10000000000)}, - acc2Addr: {Balance: new(big.Int).SetUint64(10000000000)}, - acc3Addr: {Balance: new(big.Int).SetUint64(10000000000)}, - voterAddr: {Balance: new(big.Int).SetUint64(10000000000)}, - common.HexToAddress(common.MasternodeVotingSMC): {Balance: new(big.Int).SetUint64(1), Code: code, Storage: storage}, // Binding the MasternodeVotingSMC with newly created 'code' for SC execution + acc1Addr: {Balance: new(big.Int).SetUint64(10000000000)}, + acc2Addr: {Balance: new(big.Int).SetUint64(10000000000)}, + acc3Addr: {Balance: new(big.Int).SetUint64(10000000000)}, + voterAddr: {Balance: new(big.Int).SetUint64(10000000000)}, + common.MasternodeVotingSMCBinary: {Balance: new(big.Int).SetUint64(1), Code: code, Storage: storage}, // Binding the MasternodeVotingSMC with newly created 'code' for SC execution }, 10000000, chainConfig) return contractBackend2 @@ -273,18 +274,18 @@ func getMultiCandidatesBackend(t *testing.T, chainConfig *params.ChainConfig, n // create test backend with smart contract in it contractBackend2 := backends.NewXDCSimulatedBackend(core.GenesisAlloc{ - acc1Addr: {Balance: new(big.Int).SetUint64(10000000000)}, - acc2Addr: {Balance: new(big.Int).SetUint64(10000000000)}, - acc3Addr: {Balance: new(big.Int).SetUint64(10000000000)}, - voterAddr: {Balance: new(big.Int).SetUint64(10000000000)}, - common.HexToAddress(common.MasternodeVotingSMC): {Balance: new(big.Int).SetUint64(1), Code: code, Storage: storage}, // Binding the MasternodeVotingSMC with newly created 'code' for SC execution + acc1Addr: {Balance: new(big.Int).SetUint64(10000000000)}, + acc2Addr: {Balance: new(big.Int).SetUint64(10000000000)}, + acc3Addr: {Balance: new(big.Int).SetUint64(10000000000)}, + voterAddr: {Balance: new(big.Int).SetUint64(10000000000)}, + common.MasternodeVotingSMCBinary: {Balance: new(big.Int).SetUint64(1), Code: code, Storage: storage}, // Binding the MasternodeVotingSMC with newly created 'code' for SC execution }, 10000000, chainConfig) return contractBackend2 } func signingTxWithKey(header *types.Header, nonce uint64, privateKey *ecdsa.PrivateKey) (*types.Transaction, error) { - tx := contracts.CreateTxSign(header.Number, header.Hash(), nonce, common.HexToAddress(common.BlockSigners)) + tx := contracts.CreateTxSign(header.Number, header.Hash(), nonce, common.BlockSignersBinary) s := types.LatestSignerForChainID(big.NewInt(chainID)) h := s.Hash(tx) sig, err := crypto.Sign(h[:], privateKey) @@ -299,7 +300,7 @@ func signingTxWithKey(header *types.Header, nonce uint64, privateKey *ecdsa.Priv } func signingTxWithSignerFn(header *types.Header, nonce uint64, signer common.Address, signFn func(account accounts.Account, hash []byte) ([]byte, error)) (*types.Transaction, error) { - tx := contracts.CreateTxSign(header.Number, header.Hash(), nonce, common.HexToAddress(common.BlockSigners)) + tx := contracts.CreateTxSign(header.Number, header.Hash(), nonce, common.BlockSignersBinary) s := types.LatestSignerForChainID(big.NewInt(chainID)) h := s.Hash(tx) sig, err := signFn(accounts.Account{Address: signer}, h[:]) @@ -335,7 +336,7 @@ func GetSnapshotSigner(bc *BlockChain, header *types.Header) (signersList, error } func GetCandidateFromCurrentSmartContract(backend bind.ContractBackend, t *testing.T) masterNodes { - addr := common.HexToAddress(common.MasternodeVotingSMC) + addr := common.MasternodeVotingSMCBinary validator, err := contractValidator.NewXDCValidator(addr, backend) if err != nil { t.Fatal(err) @@ -633,7 +634,7 @@ func CreateBlock(blockchain *BlockChain, chainConfig *params.ChainConfig, starti // Sign all the things for v1 block use v1 sigHash function sighash, err := signFn(accounts.Account{Address: signer}, blockchain.Engine().(*XDPoS.XDPoS).SigHash(header).Bytes()) if err != nil { - panic(fmt.Errorf("Error when sign last v1 block hash during test block creation")) + panic(errors.New("Error when sign last v1 block hash during test block creation")) } copy(header.Extra[len(header.Extra)-utils.ExtraSeal:], sighash) } @@ -737,7 +738,7 @@ func findSignerAndSignFn(bc *BlockChain, header *types.Header, signer common.Add var decodedExtraField types.ExtraFields_v2 err := utils.DecodeBytesExtraFields(header.Extra, &decodedExtraField) if err != nil { - panic(fmt.Errorf("fail to seal header for v2 block")) + panic(errors.New("fail to seal header for v2 block")) } round := decodedExtraField.Round masterNodes := getMasternodesList(signer) @@ -757,7 +758,7 @@ func findSignerAndSignFn(bc *BlockChain, header *types.Header, signer common.Add } addressedSignFn = signFn if err != nil { - panic(fmt.Errorf("Error trying to use one of the pre-defined private key to sign")) + panic(errors.New("Error trying to use one of the pre-defined private key to sign")) } } diff --git a/console/bridge.go b/console/bridge.go index 80606a687862..80f809533678 100644 --- a/console/bridge.go +++ b/console/bridge.go @@ -18,6 +18,7 @@ package console import ( "encoding/json" + "errors" "fmt" "io" "reflect" @@ -75,18 +76,18 @@ func (b *bridge) NewAccount(call jsre.Call) (goja.Value, error) { return nil, err } if password != confirm { - return nil, fmt.Errorf("passwords don't match!") + return nil, errors.New("passwords don't match!") } // A single string password was specified, use that case len(call.Arguments) == 1 && call.Argument(0).ToString() != nil: password = call.Argument(0).ToString().String() default: - return nil, fmt.Errorf("expected 0 or 1 string argument") + return nil, errors.New("expected 0 or 1 string argument") } // Password acquired, execute the call and return newAccount, callable := goja.AssertFunction(getJeth(call.VM).Get("newAccount")) if !callable { - return nil, fmt.Errorf("jeth.newAccount is not callable") + return nil, errors.New("jeth.newAccount is not callable") } ret, err := newAccount(goja.Null(), call.VM.ToValue(password)) if err != nil { @@ -100,7 +101,7 @@ func (b *bridge) NewAccount(call jsre.Call) (goja.Value, error) { func (b *bridge) OpenWallet(call jsre.Call) (goja.Value, error) { // Make sure we have a wallet specified to open if call.Argument(0).ToObject(call.VM).ClassName() != "String" { - return nil, fmt.Errorf("first argument must be the wallet URL to open") + return nil, errors.New("first argument must be the wallet URL to open") } wallet := call.Argument(0) @@ -113,7 +114,7 @@ func (b *bridge) OpenWallet(call jsre.Call) (goja.Value, error) { // Open the wallet and return if successful in itself openWallet, callable := goja.AssertFunction(getJeth(call.VM).Get("openWallet")) if !callable { - return nil, fmt.Errorf("jeth.openWallet is not callable") + return nil, errors.New("jeth.openWallet is not callable") } val, err := openWallet(goja.Null(), wallet, passwd) if err == nil { @@ -147,7 +148,7 @@ func (b *bridge) readPassphraseAndReopenWallet(call jsre.Call) (goja.Value, erro } openWallet, callable := goja.AssertFunction(getJeth(call.VM).Get("openWallet")) if !callable { - return nil, fmt.Errorf("jeth.openWallet is not callable") + return nil, errors.New("jeth.openWallet is not callable") } return openWallet(goja.Null(), wallet, call.VM.ToValue(input)) } @@ -168,7 +169,7 @@ func (b *bridge) readPinAndReopenWallet(call jsre.Call) (goja.Value, error) { } openWallet, callable := goja.AssertFunction(getJeth(call.VM).Get("openWallet")) if !callable { - return nil, fmt.Errorf("jeth.openWallet is not callable") + return nil, errors.New("jeth.openWallet is not callable") } return openWallet(goja.Null(), wallet, call.VM.ToValue(input)) } @@ -180,7 +181,7 @@ func (b *bridge) readPinAndReopenWallet(call jsre.Call) (goja.Value, error) { func (b *bridge) UnlockAccount(call jsre.Call) (goja.Value, error) { // Make sure we have an account specified to unlock. if call.Argument(0).ExportType().Kind() != reflect.String { - return nil, fmt.Errorf("first argument must be the account to unlock") + return nil, errors.New("first argument must be the account to unlock") } account := call.Argument(0) @@ -195,7 +196,7 @@ func (b *bridge) UnlockAccount(call jsre.Call) (goja.Value, error) { passwd = call.VM.ToValue(input) } else { if call.Argument(1).ExportType().Kind() != reflect.String { - return nil, fmt.Errorf("password must be a string") + return nil, errors.New("password must be a string") } passwd = call.Argument(1) } @@ -204,7 +205,7 @@ func (b *bridge) UnlockAccount(call jsre.Call) (goja.Value, error) { duration := goja.Null() if !goja.IsUndefined(call.Argument(2)) && !goja.IsNull(call.Argument(2)) { if !isNumber(call.Argument(2)) { - return nil, fmt.Errorf("unlock duration must be a number") + return nil, errors.New("unlock duration must be a number") } duration = call.Argument(2) } @@ -212,7 +213,7 @@ func (b *bridge) UnlockAccount(call jsre.Call) (goja.Value, error) { // Send the request to the backend and return. unlockAccount, callable := goja.AssertFunction(getJeth(call.VM).Get("unlockAccount")) if !callable { - return nil, fmt.Errorf("jeth.unlockAccount is not callable") + return nil, errors.New("jeth.unlockAccount is not callable") } return unlockAccount(goja.Null(), account, passwd, duration) } @@ -228,10 +229,10 @@ func (b *bridge) Sign(call jsre.Call) (goja.Value, error) { ) if message.ExportType().Kind() != reflect.String { - return nil, fmt.Errorf("first argument must be the message to sign") + return nil, errors.New("first argument must be the message to sign") } if account.ExportType().Kind() != reflect.String { - return nil, fmt.Errorf("second argument must be the account to sign with") + return nil, errors.New("second argument must be the account to sign with") } // if the password is not given or null ask the user and ensure password is a string @@ -243,13 +244,13 @@ func (b *bridge) Sign(call jsre.Call) (goja.Value, error) { } passwd = call.VM.ToValue(input) } else if passwd.ExportType().Kind() != reflect.String { - return nil, fmt.Errorf("third argument must be the password to unlock the account") + return nil, errors.New("third argument must be the password to unlock the account") } // Send the request to the backend and return sign, callable := goja.AssertFunction(getJeth(call.VM).Get("unlockAccount")) if !callable { - return nil, fmt.Errorf("jeth.unlockAccount is not callable") + return nil, errors.New("jeth.unlockAccount is not callable") } return sign(goja.Null(), message, account, passwd) } @@ -257,7 +258,7 @@ func (b *bridge) Sign(call jsre.Call) (goja.Value, error) { // Sleep will block the console for the specified number of seconds. func (b *bridge) Sleep(call jsre.Call) (goja.Value, error) { if !isNumber(call.Argument(0)) { - return nil, fmt.Errorf("usage: sleep()") + return nil, errors.New("usage: sleep()") } sleep := call.Argument(0).ToFloat() time.Sleep(time.Duration(sleep * float64(time.Second))) @@ -274,17 +275,17 @@ func (b *bridge) SleepBlocks(call jsre.Call) (goja.Value, error) { ) nArgs := len(call.Arguments) if nArgs == 0 { - return nil, fmt.Errorf("usage: sleepBlocks([, max sleep in seconds])") + return nil, errors.New("usage: sleepBlocks([, max sleep in seconds])") } if nArgs >= 1 { if !isNumber(call.Argument(0)) { - return nil, fmt.Errorf("expected number as first argument") + return nil, errors.New("expected number as first argument") } blocks = call.Argument(0).ToInteger() } if nArgs >= 2 { if isNumber(call.Argument(1)) { - return nil, fmt.Errorf("expected number as second argument") + return nil, errors.New("expected number as second argument") } sleep = call.Argument(1).ToInteger() } @@ -361,7 +362,7 @@ func (b *bridge) Send(call jsre.Call) (goja.Value, error) { JSON := call.VM.Get("JSON").ToObject(call.VM) parse, callable := goja.AssertFunction(JSON.Get("parse")) if !callable { - return nil, fmt.Errorf("JSON.parse is not a function") + return nil, errors.New("JSON.parse is not a function") } resultVal, err := parse(goja.Null(), call.VM.ToValue(string(result))) if err != nil { diff --git a/console/console_test.go b/console/console_test.go index 04a1f5a2e49d..9433026db868 100644 --- a/console/console_test.go +++ b/console/console_test.go @@ -31,6 +31,7 @@ import ( "github.com/XinFinOrg/XDPoSChain/consensus/ethash" "github.com/XinFinOrg/XDPoSChain/core" "github.com/XinFinOrg/XDPoSChain/eth" + "github.com/XinFinOrg/XDPoSChain/eth/ethconfig" "github.com/XinFinOrg/XDPoSChain/internal/jsre" "github.com/XinFinOrg/XDPoSChain/node" ) @@ -84,7 +85,7 @@ type tester struct { // newTester creates a test environment based on which the console can operate. // Please ensure you call Close() on the returned tester to avoid leaks. -func newTester(t *testing.T, confOverride func(*eth.Config)) *tester { +func newTester(t *testing.T, confOverride func(*ethconfig.Config)) *tester { // Create a temporary storage for the node keys and initialize it workspace, err := os.MkdirTemp("", "console-tester-") if err != nil { @@ -96,7 +97,7 @@ func newTester(t *testing.T, confOverride func(*eth.Config)) *tester { if err != nil { t.Fatalf("failed to create node: %v", err) } - ethConf := ð.Config{ + ethConf := ðconfig.Config{ Genesis: core.DeveloperGenesisBlock(15, common.Address{}), Etherbase: common.HexToAddress(testAddress), Ethash: ethash.Config{ diff --git a/contracts/chequebook/api.go b/contracts/chequebook/api.go deleted file mode 100644 index 1503caa6999d..000000000000 --- a/contracts/chequebook/api.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package chequebook - -import ( - "errors" - "math/big" - - "github.com/XinFinOrg/XDPoSChain/common" -) - -const Version = "1.0" - -var errNoChequebook = errors.New("no chequebook") - -type Api struct { - chequebookf func() *Chequebook -} - -func NewApi(ch func() *Chequebook) *Api { - return &Api{ch} -} - -func (self *Api) Balance() (string, error) { - ch := self.chequebookf() - if ch == nil { - return "", errNoChequebook - } - return ch.Balance().String(), nil -} - -func (self *Api) Issue(beneficiary common.Address, amount *big.Int) (cheque *Cheque, err error) { - ch := self.chequebookf() - if ch == nil { - return nil, errNoChequebook - } - return ch.Issue(beneficiary, amount) -} - -func (self *Api) Cash(cheque *Cheque) (txhash string, err error) { - ch := self.chequebookf() - if ch == nil { - return "", errNoChequebook - } - return ch.Cash(cheque) -} - -func (self *Api) Deposit(amount *big.Int) (txhash string, err error) { - ch := self.chequebookf() - if ch == nil { - return "", errNoChequebook - } - return ch.Deposit(amount) -} diff --git a/contracts/chequebook/cheque.go b/contracts/chequebook/cheque.go deleted file mode 100644 index 8917d6ab5a82..000000000000 --- a/contracts/chequebook/cheque.go +++ /dev/null @@ -1,640 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package chequebook package wraps the 'chequebook' Ethereum smart contract. -// -// The functions in this package allow using chequebook for -// issuing, receiving, verifying cheques in ether; (auto)cashing cheques in ether -// as well as (auto)depositing ether to the chequebook contract. -package chequebook - -//go:generate abigen --sol contract/chequebook.sol --exc contract/mortal.sol:mortal,contract/owned.sol:owned --pkg contract --out contract/chequebook.go -//go:generate go run ./gencode.go - -import ( - "bytes" - "context" - "crypto/ecdsa" - "encoding/json" - "fmt" - "math/big" - "os" - "sync" - "time" - - "github.com/XinFinOrg/XDPoSChain/accounts/abi/bind" - "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/common/hexutil" - "github.com/XinFinOrg/XDPoSChain/contracts/chequebook/contract" - "github.com/XinFinOrg/XDPoSChain/core/types" - "github.com/XinFinOrg/XDPoSChain/crypto" - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/swarm/services/swap/swap" -) - -// TODO(zelig): watch peer solvency and notify of bouncing cheques -// TODO(zelig): enable paying with cheque by signing off - -// Some functionality requires interacting with the blockchain: -// * setting current balance on peer's chequebook -// * sending the transaction to cash the cheque -// * depositing ether to the chequebook -// * watching incoming ether - -var ( - gasToCash = uint64(2000000) // gas cost of a cash transaction using chequebook - // gasToDeploy = uint64(3000000) -) - -// Backend wraps all methods required for chequebook operation. -type Backend interface { - bind.ContractBackend - TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) - BalanceAt(ctx context.Context, address common.Address, blockNum *big.Int) (*big.Int, error) -} - -// Cheque represents a payment promise to a single beneficiary. -type Cheque struct { - Contract common.Address // address of chequebook, needed to avoid cross-contract submission - Beneficiary common.Address - Amount *big.Int // cumulative amount of all funds sent - Sig []byte // signature Sign(Keccak256(contract, beneficiary, amount), prvKey) -} - -func (self *Cheque) String() string { - return fmt.Sprintf("contract: %s, beneficiary: %s, amount: %v, signature: %x", self.Contract.Hex(), self.Beneficiary.Hex(), self.Amount, self.Sig) -} - -type Params struct { - ContractCode, ContractAbi string -} - -var ContractParams = &Params{contract.ChequebookBin, contract.ChequebookABI} - -// Chequebook can create and sign cheques from a single contract to multiple beneficiaries. -// It is the outgoing payment handler for peer to peer micropayments. -type Chequebook struct { - path string // path to chequebook file - prvKey *ecdsa.PrivateKey // private key to sign cheque with - lock sync.Mutex // - backend Backend // blockchain API - quit chan bool // when closed causes autodeposit to stop - owner common.Address // owner address (derived from pubkey) - contract *contract.Chequebook // abigen binding - session *contract.ChequebookSession // abigen binding with Tx Opts - - // persisted fields - balance *big.Int // not synced with blockchain - contractAddr common.Address // contract address - sent map[common.Address]*big.Int //tallies for beneficiaries - - txhash string // tx hash of last deposit tx - threshold *big.Int // threshold that triggers autodeposit if not nil - buffer *big.Int // buffer to keep on top of balance for fork protection - - log log.Logger // contextual logger with the contract address embedded -} - -func (self *Chequebook) String() string { - return fmt.Sprintf("contract: %s, owner: %s, balance: %v, signer: %x", self.contractAddr.Hex(), self.owner.Hex(), self.balance, self.prvKey.PublicKey) -} - -// NewChequebook creates a new Chequebook. -func NewChequebook(path string, contractAddr common.Address, prvKey *ecdsa.PrivateKey, backend Backend) (self *Chequebook, err error) { - balance := new(big.Int) - sent := make(map[common.Address]*big.Int) - - chbook, err := contract.NewChequebook(contractAddr, backend) - if err != nil { - return nil, err - } - transactOpts := bind.NewKeyedTransactor(prvKey) - session := &contract.ChequebookSession{ - Contract: chbook, - TransactOpts: *transactOpts, - } - - self = &Chequebook{ - prvKey: prvKey, - balance: balance, - contractAddr: contractAddr, - sent: sent, - path: path, - backend: backend, - owner: transactOpts.From, - contract: chbook, - session: session, - log: log.New("contract", contractAddr), - } - - if (contractAddr != common.Address{}) { - self.setBalanceFromBlockChain() - self.log.Trace("New chequebook initialised", "owner", self.owner, "balance", self.balance) - } - return -} - -func (self *Chequebook) setBalanceFromBlockChain() { - balance, err := self.backend.BalanceAt(context.TODO(), self.contractAddr, nil) - if err != nil { - log.Error("Failed to retrieve chequebook balance", "err", err) - } else { - self.balance.Set(balance) - } -} - -// LoadChequebook loads a chequebook from disk (file path). -func LoadChequebook(path string, prvKey *ecdsa.PrivateKey, backend Backend, checkBalance bool) (self *Chequebook, err error) { - var data []byte - data, err = os.ReadFile(path) - if err != nil { - return - } - self, _ = NewChequebook(path, common.Address{}, prvKey, backend) - - err = json.Unmarshal(data, self) - if err != nil { - return nil, err - } - if checkBalance { - self.setBalanceFromBlockChain() - } - log.Trace("Loaded chequebook from disk", "path", path) - - return -} - -// chequebookFile is the JSON representation of a chequebook. -type chequebookFile struct { - Balance string - Contract string - Owner string - Sent map[string]string -} - -// UnmarshalJSON deserialises a chequebook. -func (self *Chequebook) UnmarshalJSON(data []byte) error { - var file chequebookFile - err := json.Unmarshal(data, &file) - if err != nil { - return err - } - _, ok := self.balance.SetString(file.Balance, 10) - if !ok { - return fmt.Errorf("cumulative amount sent: unable to convert string to big integer: %v", file.Balance) - } - self.contractAddr = common.HexToAddress(file.Contract) - for addr, sent := range file.Sent { - self.sent[common.HexToAddress(addr)], ok = new(big.Int).SetString(sent, 10) - if !ok { - return fmt.Errorf("beneficiary %v cumulative amount sent: unable to convert string to big integer: %v", addr, sent) - } - } - return nil -} - -// MarshalJSON serialises a chequebook. -func (self *Chequebook) MarshalJSON() ([]byte, error) { - var file = &chequebookFile{ - Balance: self.balance.String(), - Contract: self.contractAddr.Hex(), - Owner: self.owner.Hex(), - Sent: make(map[string]string), - } - for addr, sent := range self.sent { - file.Sent[addr.Hex()] = sent.String() - } - return json.Marshal(file) -} - -// Save persists the chequebook on disk, remembering balance, contract address and -// cumulative amount of funds sent for each beneficiary. -func (self *Chequebook) Save() (err error) { - data, err := json.MarshalIndent(self, "", " ") - if err != nil { - return err - } - self.log.Trace("Saving chequebook to disk", self.path) - - return os.WriteFile(self.path, data, os.ModePerm) -} - -// Stop quits the autodeposit go routine to terminate -func (self *Chequebook) Stop() { - defer self.lock.Unlock() - self.lock.Lock() - if self.quit != nil { - close(self.quit) - self.quit = nil - } -} - -// Issue creates a cheque signed by the chequebook owner's private key. The -// signer commits to a contract (one that they own), a beneficiary and amount. -func (self *Chequebook) Issue(beneficiary common.Address, amount *big.Int) (ch *Cheque, err error) { - defer self.lock.Unlock() - self.lock.Lock() - - if amount.Sign() <= 0 { - return nil, fmt.Errorf("amount must be greater than zero (%v)", amount) - } - if self.balance.Cmp(amount) < 0 { - err = fmt.Errorf("insufficient funds to issue cheque for amount: %v. balance: %v", amount, self.balance) - } else { - var sig []byte - sent, found := self.sent[beneficiary] - if !found { - sent = new(big.Int) - self.sent[beneficiary] = sent - } - sum := new(big.Int).Set(sent) - sum.Add(sum, amount) - - sig, err = crypto.Sign(sigHash(self.contractAddr, beneficiary, sum), self.prvKey) - if err == nil { - ch = &Cheque{ - Contract: self.contractAddr, - Beneficiary: beneficiary, - Amount: sum, - Sig: sig, - } - sent.Set(sum) - self.balance.Sub(self.balance, amount) // subtract amount from balance - } - } - - // auto deposit if threshold is set and balance is less then threshold - // note this is called even if issuing cheque fails - // so we reattempt depositing - if self.threshold != nil { - if self.balance.Cmp(self.threshold) < 0 { - send := new(big.Int).Sub(self.buffer, self.balance) - self.deposit(send) - } - } - - return -} - -// Cash is a convenience method to cash any cheque. -func (self *Chequebook) Cash(ch *Cheque) (txhash string, err error) { - return ch.Cash(self.session) -} - -// data to sign: contract address, beneficiary, cumulative amount of funds ever sent -func sigHash(contract, beneficiary common.Address, sum *big.Int) []byte { - bigamount := sum.Bytes() - if len(bigamount) > 32 { - return nil - } - var amount32 [32]byte - copy(amount32[32-len(bigamount):32], bigamount) - input := append(contract.Bytes(), beneficiary.Bytes()...) - input = append(input, amount32[:]...) - return crypto.Keccak256(input) -} - -// Balance returns the current balance of the chequebook. -func (self *Chequebook) Balance() *big.Int { - defer self.lock.Unlock() - self.lock.Lock() - return new(big.Int).Set(self.balance) -} - -// Owner returns the owner account of the chequebook. -func (self *Chequebook) Owner() common.Address { - return self.owner -} - -// Address returns the on-chain contract address of the chequebook. -func (self *Chequebook) Address() common.Address { - return self.contractAddr -} - -// Deposit deposits money to the chequebook account. -func (self *Chequebook) Deposit(amount *big.Int) (string, error) { - defer self.lock.Unlock() - self.lock.Lock() - return self.deposit(amount) -} - -// deposit deposits amount to the chequebook account. -// The caller must hold self.lock. -func (self *Chequebook) deposit(amount *big.Int) (string, error) { - // since the amount is variable here, we do not use sessions - depositTransactor := bind.NewKeyedTransactor(self.prvKey) - depositTransactor.Value = amount - chbookRaw := &contract.ChequebookRaw{Contract: self.contract} - tx, err := chbookRaw.Transfer(depositTransactor) - if err != nil { - self.log.Warn("Failed to fund chequebook", "amount", amount, "balance", self.balance, "target", self.buffer, "err", err) - return "", err - } - // assume that transaction is actually successful, we add the amount to balance right away - self.balance.Add(self.balance, amount) - self.log.Trace("Deposited funds to chequebook", "amount", amount, "balance", self.balance, "target", self.buffer) - return tx.Hash().Hex(), nil -} - -// AutoDeposit (re)sets interval time and amount which triggers sending funds to the -// chequebook. Contract backend needs to be set if threshold is not less than buffer, then -// deposit will be triggered on every new cheque issued. -func (self *Chequebook) AutoDeposit(interval time.Duration, threshold, buffer *big.Int) { - defer self.lock.Unlock() - self.lock.Lock() - self.threshold = threshold - self.buffer = buffer - self.autoDeposit(interval) -} - -// autoDeposit starts a goroutine that periodically sends funds to the chequebook -// contract caller holds the lock the go routine terminates if Chequebook.quit is closed. -func (self *Chequebook) autoDeposit(interval time.Duration) { - if self.quit != nil { - close(self.quit) - self.quit = nil - } - // if threshold >= balance autodeposit after every cheque issued - if interval == time.Duration(0) || self.threshold != nil && self.buffer != nil && self.threshold.Cmp(self.buffer) >= 0 { - return - } - - ticker := time.NewTicker(interval) - self.quit = make(chan bool) - quit := self.quit - - go func() { - for { - select { - case <-quit: - return - case <-ticker.C: - self.lock.Lock() - if self.balance.Cmp(self.buffer) < 0 { - amount := new(big.Int).Sub(self.buffer, self.balance) - txhash, err := self.deposit(amount) - if err == nil { - self.txhash = txhash - } - } - self.lock.Unlock() - } - } - }() -} - -// Outbox can issue cheques from a single contract to a single beneficiary. -type Outbox struct { - chequeBook *Chequebook - beneficiary common.Address -} - -// NewOutbox creates an outbox. -func NewOutbox(chbook *Chequebook, beneficiary common.Address) *Outbox { - return &Outbox{chbook, beneficiary} -} - -// Issue creates cheque. -func (self *Outbox) Issue(amount *big.Int) (swap.Promise, error) { - return self.chequeBook.Issue(self.beneficiary, amount) -} - -// AutoDeposit enables auto-deposits on the underlying chequebook. -func (self *Outbox) AutoDeposit(interval time.Duration, threshold, buffer *big.Int) { - self.chequeBook.AutoDeposit(interval, threshold, buffer) -} - -// Stop helps satisfy the swap.OutPayment interface. -func (self *Outbox) Stop() {} - -// String implements fmt.Stringer. -func (self *Outbox) String() string { - return fmt.Sprintf("chequebook: %v, beneficiary: %s, balance: %v", self.chequeBook.Address().Hex(), self.beneficiary.Hex(), self.chequeBook.Balance()) -} - -// Inbox can deposit, verify and cash cheques from a single contract to a single -// beneficiary. It is the incoming payment handler for peer to peer micropayments. -type Inbox struct { - lock sync.Mutex - contract common.Address // peer's chequebook contract - beneficiary common.Address // local peer's receiving address - sender common.Address // local peer's address to send cashing tx from - signer *ecdsa.PublicKey // peer's public key - txhash string // tx hash of last cashing tx - session *contract.ChequebookSession // abi contract backend with tx opts - quit chan bool // when closed causes autocash to stop - maxUncashed *big.Int // threshold that triggers autocashing - cashed *big.Int // cumulative amount cashed - cheque *Cheque // last cheque, nil if none yet received - log log.Logger // contextual logger with the contract address embedded -} - -// NewInbox creates an Inbox. An Inboxes is not persisted, the cumulative sum is updated -// from blockchain when first cheque is received. -func NewInbox(prvKey *ecdsa.PrivateKey, contractAddr, beneficiary common.Address, signer *ecdsa.PublicKey, abigen bind.ContractBackend) (self *Inbox, err error) { - if signer == nil { - return nil, fmt.Errorf("signer is null") - } - chbook, err := contract.NewChequebook(contractAddr, abigen) - if err != nil { - return nil, err - } - transactOpts := bind.NewKeyedTransactor(prvKey) - transactOpts.GasLimit = gasToCash - session := &contract.ChequebookSession{ - Contract: chbook, - TransactOpts: *transactOpts, - } - sender := transactOpts.From - - self = &Inbox{ - contract: contractAddr, - beneficiary: beneficiary, - sender: sender, - signer: signer, - session: session, - cashed: new(big.Int).Set(common.Big0), - log: log.New("contract", contractAddr), - } - self.log.Trace("New chequebook inbox initialized", "beneficiary", self.beneficiary, "signer", hexutil.Bytes(crypto.FromECDSAPub(signer))) - return -} - -func (self *Inbox) String() string { - return fmt.Sprintf("chequebook: %v, beneficiary: %s, balance: %v", self.contract.Hex(), self.beneficiary.Hex(), self.cheque.Amount) -} - -// Stop quits the autocash goroutine. -func (self *Inbox) Stop() { - defer self.lock.Unlock() - self.lock.Lock() - if self.quit != nil { - close(self.quit) - self.quit = nil - } -} - -// Cash attempts to cash the current cheque. -func (self *Inbox) Cash() (txhash string, err error) { - if self.cheque != nil { - txhash, err = self.cheque.Cash(self.session) - self.log.Trace("Cashing in chequebook cheque", "amount", self.cheque.Amount, "beneficiary", self.beneficiary) - self.cashed = self.cheque.Amount - } - return -} - -// AutoCash (re)sets maximum time and amount which triggers cashing of the last uncashed -// cheque if maxUncashed is set to 0, then autocash on receipt. -func (self *Inbox) AutoCash(cashInterval time.Duration, maxUncashed *big.Int) { - defer self.lock.Unlock() - self.lock.Lock() - self.maxUncashed = maxUncashed - self.autoCash(cashInterval) -} - -// autoCash starts a loop that periodically clears the last cheque -// if the peer is trusted. Clearing period could be 24h or a week. -// The caller must hold self.lock. -func (self *Inbox) autoCash(cashInterval time.Duration) { - if self.quit != nil { - close(self.quit) - self.quit = nil - } - // if maxUncashed is set to 0, then autocash on receipt - if cashInterval == time.Duration(0) || self.maxUncashed != nil && self.maxUncashed.Sign() == 0 { - return - } - - ticker := time.NewTicker(cashInterval) - self.quit = make(chan bool) - quit := self.quit - - go func() { - for { - select { - case <-quit: - return - case <-ticker.C: - self.lock.Lock() - if self.cheque != nil && self.cheque.Amount.Cmp(self.cashed) != 0 { - txhash, err := self.Cash() - if err == nil { - self.txhash = txhash - } - } - self.lock.Unlock() - } - } - }() -} - -// Receive is called to deposit the latest cheque to the incoming Inbox. -// The given promise must be a *Cheque. -func (self *Inbox) Receive(promise swap.Promise) (*big.Int, error) { - ch := promise.(*Cheque) - - defer self.lock.Unlock() - self.lock.Lock() - - var sum *big.Int - if self.cheque == nil { - // the sum is checked against the blockchain once a cheque is received - tally, err := self.session.Sent(self.beneficiary) - if err != nil { - return nil, fmt.Errorf("inbox: error calling backend to set amount: %v", err) - } - sum = tally - } else { - sum = self.cheque.Amount - } - - amount, err := ch.Verify(self.signer, self.contract, self.beneficiary, sum) - var uncashed *big.Int - if err == nil { - self.cheque = ch - - if self.maxUncashed != nil { - uncashed = new(big.Int).Sub(ch.Amount, self.cashed) - if self.maxUncashed.Cmp(uncashed) < 0 { - self.Cash() - } - } - self.log.Trace("Received cheque in chequebook inbox", "amount", amount, "uncashed", uncashed) - } - - return amount, err -} - -// Verify verifies cheque for signer, contract, beneficiary, amount, valid signature. -func (self *Cheque) Verify(signerKey *ecdsa.PublicKey, contract, beneficiary common.Address, sum *big.Int) (*big.Int, error) { - log.Trace("Verifying chequebook cheque", "cheque", self, "sum", sum) - if sum == nil { - return nil, fmt.Errorf("invalid amount") - } - - if self.Beneficiary != beneficiary { - return nil, fmt.Errorf("beneficiary mismatch: %v != %v", self.Beneficiary.Hex(), beneficiary.Hex()) - } - if self.Contract != contract { - return nil, fmt.Errorf("contract mismatch: %v != %v", self.Contract.Hex(), contract.Hex()) - } - - amount := new(big.Int).Set(self.Amount) - if sum != nil { - amount.Sub(amount, sum) - if amount.Sign() <= 0 { - return nil, fmt.Errorf("incorrect amount: %v <= 0", amount) - } - } - - pubKey, err := crypto.SigToPub(sigHash(self.Contract, beneficiary, self.Amount), self.Sig) - if err != nil { - return nil, fmt.Errorf("invalid signature: %v", err) - } - if !bytes.Equal(crypto.FromECDSAPub(pubKey), crypto.FromECDSAPub(signerKey)) { - return nil, fmt.Errorf("signer mismatch: %x != %x", crypto.FromECDSAPub(pubKey), crypto.FromECDSAPub(signerKey)) - } - return amount, nil -} - -// v/r/s representation of signature -func sig2vrs(sig []byte) (v byte, r, s [32]byte) { - v = sig[64] + 27 - copy(r[:], sig[:32]) - copy(s[:], sig[32:64]) - return -} - -// Cash cashes the cheque by sending an Ethereum transaction. -func (self *Cheque) Cash(session *contract.ChequebookSession) (string, error) { - v, r, s := sig2vrs(self.Sig) - tx, err := session.Cash(self.Beneficiary, self.Amount, v, r, s) - if err != nil { - return "", err - } - return tx.Hash().Hex(), nil -} - -// ValidateCode checks that the on-chain code at address matches the expected chequebook -// contract code. This is used to detect suicided chequebooks. -func ValidateCode(ctx context.Context, b Backend, address common.Address) (ok bool, err error) { - code, err := b.CodeAt(ctx, address, nil) - if err != nil { - return false, err - } - return bytes.Equal(code, common.FromHex(contract.ContractDeployedCode)), nil -} diff --git a/contracts/chequebook/cheque_test.go b/contracts/chequebook/cheque_test.go deleted file mode 100644 index ee1704b7badf..000000000000 --- a/contracts/chequebook/cheque_test.go +++ /dev/null @@ -1,488 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package chequebook - -import ( - "crypto/ecdsa" - "math/big" - "os" - "path/filepath" - "testing" - "time" - - "github.com/XinFinOrg/XDPoSChain/accounts/abi/bind" - "github.com/XinFinOrg/XDPoSChain/accounts/abi/bind/backends" - "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/contracts/chequebook/contract" - "github.com/XinFinOrg/XDPoSChain/core" - "github.com/XinFinOrg/XDPoSChain/crypto" - "github.com/XinFinOrg/XDPoSChain/params" -) - -var ( - key0, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - key1, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - key2, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") - addr0 = crypto.PubkeyToAddress(key0.PublicKey) - addr1 = crypto.PubkeyToAddress(key1.PublicKey) - addr2 = crypto.PubkeyToAddress(key2.PublicKey) -) - -func newTestBackend() *backends.SimulatedBackend { - return backends.NewXDCSimulatedBackend(core.GenesisAlloc{ - addr0: {Balance: big.NewInt(1000000000)}, - addr1: {Balance: big.NewInt(1000000000)}, - addr2: {Balance: big.NewInt(1000000000)}, - }, 10000000, params.TestXDPoSMockChainConfig) -} - -func deploy(prvKey *ecdsa.PrivateKey, amount *big.Int, backend *backends.SimulatedBackend) (common.Address, error) { - deployTransactor := bind.NewKeyedTransactor(prvKey) - deployTransactor.Value = amount - addr, _, _, err := contract.DeployChequebook(deployTransactor, backend) - if err != nil { - return common.Address{}, err - } - backend.Commit() - return addr, nil -} - -func TestIssueAndReceive(t *testing.T) { - path := filepath.Join(os.TempDir(), "chequebook-test.json") - backend := newTestBackend() - addr0, err := deploy(key0, big.NewInt(0), backend) - if err != nil { - t.Fatalf("deploy contract: expected no error, got %v", err) - } - chbook, err := NewChequebook(path, addr0, key0, backend) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - chbook.sent[addr1] = new(big.Int).SetUint64(42) - amount := common.Big1 - - if _, err = chbook.Issue(addr1, amount); err == nil { - t.Fatalf("expected insufficient funds error, got none") - } - - chbook.balance = new(big.Int).Set(common.Big1) - if chbook.Balance().Cmp(common.Big1) != 0 { - t.Fatalf("expected: %v, got %v", "0", chbook.Balance()) - } - - ch, err := chbook.Issue(addr1, amount) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - if chbook.Balance().Sign() != 0 { - t.Errorf("expected: %v, got %v", "0", chbook.Balance()) - } - - chbox, err := NewInbox(key1, addr0, addr1, &key0.PublicKey, backend) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - received, err := chbox.Receive(ch) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - if received.Cmp(big.NewInt(43)) != 0 { - t.Errorf("expected: %v, got %v", "43", received) - } - -} - -func TestCheckbookFile(t *testing.T) { - path := filepath.Join(os.TempDir(), "chequebook-test.json") - backend := newTestBackend() - chbook, err := NewChequebook(path, addr0, key0, backend) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - chbook.sent[addr1] = new(big.Int).SetUint64(42) - chbook.balance = new(big.Int).Set(common.Big1) - - chbook.Save() - - chbook, err = LoadChequebook(path, key0, backend, false) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - if chbook.Balance().Cmp(common.Big1) != 0 { - t.Errorf("expected: %v, got %v", "0", chbook.Balance()) - } - - var ch *Cheque - if ch, err = chbook.Issue(addr1, common.Big1); err != nil { - t.Fatalf("expected no error, got %v", err) - } - if ch.Amount.Cmp(new(big.Int).SetUint64(43)) != 0 { - t.Errorf("expected: %v, got %v", "0", ch.Amount) - } - - err = chbook.Save() - if err != nil { - t.Fatalf("expected no error, got %v", err) - } -} - -func TestVerifyErrors(t *testing.T) { - path0 := filepath.Join(os.TempDir(), "chequebook-test-0.json") - backend := newTestBackend() - contr0, err := deploy(key0, common.Big2, backend) - if err != nil { - t.Errorf("expected no error, got %v", err) - } - chbook0, err := NewChequebook(path0, contr0, key0, backend) - if err != nil { - t.Errorf("expected no error, got %v", err) - } - - path1 := filepath.Join(os.TempDir(), "chequebook-test-1.json") - contr1, _ := deploy(key1, common.Big2, backend) - chbook1, err := NewChequebook(path1, contr1, key1, backend) - if err != nil { - t.Errorf("expected no error, got %v", err) - } - - chbook0.sent[addr1] = new(big.Int).SetUint64(42) - chbook0.balance = new(big.Int).Set(common.Big2) - chbook1.balance = new(big.Int).Set(common.Big1) - amount := common.Big1 - ch0, err := chbook0.Issue(addr1, amount) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - chbox, err := NewInbox(key1, contr0, addr1, &key0.PublicKey, backend) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - received, err := chbox.Receive(ch0) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - if received.Cmp(big.NewInt(43)) != 0 { - t.Errorf("expected: %v, got %v", "43", received) - } - - ch1, err := chbook0.Issue(addr2, amount) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - received, err = chbox.Receive(ch1) - t.Logf("correct error: %v", err) - if err == nil { - t.Fatalf("expected receiver error, got none and value %v", received) - } - - ch2, err := chbook1.Issue(addr1, amount) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - received, err = chbox.Receive(ch2) - t.Logf("correct error: %v", err) - if err == nil { - t.Fatalf("expected sender error, got none and value %v", received) - } - - _, err = chbook1.Issue(addr1, new(big.Int).SetInt64(-1)) - t.Logf("correct error: %v", err) - if err == nil { - t.Fatalf("expected incorrect amount error, got none") - } - - received, err = chbox.Receive(ch0) - t.Logf("correct error: %v", err) - if err == nil { - t.Fatalf("expected incorrect amount error, got none and value %v", received) - } - -} - -func TestDeposit(t *testing.T) { - path0 := filepath.Join(os.TempDir(), "chequebook-test-0.json") - backend := newTestBackend() - contr0, _ := deploy(key0, new(big.Int), backend) - - chbook, err := NewChequebook(path0, contr0, key0, backend) - if err != nil { - t.Errorf("expected no error, got %v", err) - } - - balance := new(big.Int).SetUint64(42) - chbook.Deposit(balance) - backend.Commit() - if chbook.Balance().Cmp(balance) != 0 { - t.Fatalf("expected balance %v, got %v", balance, chbook.Balance()) - } - - amount := common.Big1 - _, err = chbook.Issue(addr1, amount) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - backend.Commit() - exp := new(big.Int).SetUint64(41) - if chbook.Balance().Cmp(exp) != 0 { - t.Fatalf("expected balance %v, got %v", exp, chbook.Balance()) - } - - // autodeposit on each issue - chbook.AutoDeposit(0, balance, balance) - _, err = chbook.Issue(addr1, amount) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - backend.Commit() - _, err = chbook.Issue(addr1, amount) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - backend.Commit() - if chbook.Balance().Cmp(balance) != 0 { - t.Fatalf("expected balance %v, got %v", balance, chbook.Balance()) - } - - // autodeposit off - chbook.AutoDeposit(0, common.Big0, balance) - _, err = chbook.Issue(addr1, amount) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - backend.Commit() - _, err = chbook.Issue(addr1, amount) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - backend.Commit() - - exp = new(big.Int).SetUint64(40) - if chbook.Balance().Cmp(exp) != 0 { - t.Fatalf("expected balance %v, got %v", exp, chbook.Balance()) - } - - // autodeposit every 200ms if new cheque issued - interval := 200 * time.Millisecond - chbook.AutoDeposit(interval, common.Big1, balance) - _, err = chbook.Issue(addr1, amount) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - backend.Commit() - _, err = chbook.Issue(addr1, amount) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - backend.Commit() - - exp = new(big.Int).SetUint64(38) - if chbook.Balance().Cmp(exp) != 0 { - t.Fatalf("expected balance %v, got %v", exp, chbook.Balance()) - } - - time.Sleep(3 * interval) - backend.Commit() - if chbook.Balance().Cmp(balance) != 0 { - t.Fatalf("expected balance %v, got %v", balance, chbook.Balance()) - } - - exp = new(big.Int).SetUint64(40) - chbook.AutoDeposit(4*interval, exp, balance) - _, err = chbook.Issue(addr1, amount) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - backend.Commit() - _, err = chbook.Issue(addr1, amount) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - time.Sleep(3 * interval) - backend.Commit() - if chbook.Balance().Cmp(exp) != 0 { - t.Fatalf("expected balance %v, got %v", exp, chbook.Balance()) - } - - _, err = chbook.Issue(addr1, amount) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - time.Sleep(1 * interval) - backend.Commit() - - if chbook.Balance().Cmp(balance) != 0 { - t.Fatalf("expected balance %v, got %v", balance, chbook.Balance()) - } - - chbook.AutoDeposit(1*interval, common.Big0, balance) - chbook.Stop() - - _, err = chbook.Issue(addr1, common.Big1) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - backend.Commit() - - _, err = chbook.Issue(addr1, common.Big2) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - time.Sleep(1 * interval) - backend.Commit() - - exp = new(big.Int).SetUint64(39) - if chbook.Balance().Cmp(exp) != 0 { - t.Fatalf("expected balance %v, got %v", exp, chbook.Balance()) - } - -} - -func TestCash(t *testing.T) { - path := filepath.Join(os.TempDir(), "chequebook-test.json") - backend := newTestBackend() - contr0, _ := deploy(key0, common.Big2, backend) - - chbook, err := NewChequebook(path, contr0, key0, backend) - if err != nil { - t.Errorf("expected no error, got %v", err) - } - chbook.sent[addr1] = new(big.Int).SetUint64(42) - amount := common.Big1 - chbook.balance = new(big.Int).Set(common.Big1) - ch, err := chbook.Issue(addr1, amount) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - backend.Commit() - chbox, err := NewInbox(key1, contr0, addr1, &key0.PublicKey, backend) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // cashing latest cheque - if _, err = chbox.Receive(ch); err != nil { - t.Fatalf("expected no error, got %v", err) - } - if _, err = ch.Cash(chbook.session); err != nil { - t.Fatal("Cash failed:", err) - } - backend.Commit() - - chbook.balance = new(big.Int).Set(common.Big3) - ch0, err := chbook.Issue(addr1, amount) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - backend.Commit() - ch1, err := chbook.Issue(addr1, amount) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - backend.Commit() - - interval := 10 * time.Millisecond - // setting autocash with interval of 10ms - chbox.AutoCash(interval, nil) - _, err = chbox.Receive(ch0) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - _, err = chbox.Receive(ch1) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - backend.Commit() - // after 3x interval time and 2 cheques received, exactly one cashing tx is sent - time.Sleep(4 * interval) - backend.Commit() - - // after stopping autocash no more tx are sent - ch2, err := chbook.Issue(addr1, amount) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - chbox.Stop() - _, err = chbox.Receive(ch2) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - time.Sleep(2 * interval) - backend.Commit() - - // autocash below 1 - chbook.balance = big.NewInt(2) - chbox.AutoCash(0, common.Big1) - - ch3, err := chbook.Issue(addr1, amount) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - backend.Commit() - - ch4, err := chbook.Issue(addr1, amount) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - backend.Commit() - - _, err = chbox.Receive(ch3) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - backend.Commit() - _, err = chbox.Receive(ch4) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - backend.Commit() - - // autochash on receipt when maxUncashed is 0 - chbook.balance = new(big.Int).Set(common.Big2) - chbox.AutoCash(0, common.Big0) - - ch5, err := chbook.Issue(addr1, amount) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - backend.Commit() - - ch6, err := chbook.Issue(addr1, amount) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - _, err = chbox.Receive(ch5) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - backend.Commit() - - _, err = chbox.Receive(ch6) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - backend.Commit() - -} diff --git a/contracts/chequebook/contract/chequebook.go b/contracts/chequebook/contract/chequebook.go deleted file mode 100644 index cee0c2f3da9f..000000000000 --- a/contracts/chequebook/contract/chequebook.go +++ /dev/null @@ -1,367 +0,0 @@ -// Code generated - DO NOT EDIT. -// This file is a generated binding and any manual changes will be lost. - -package contract - -import ( - "math/big" - "strings" - - ethereum "github.com/XinFinOrg/XDPoSChain" - "github.com/XinFinOrg/XDPoSChain/accounts/abi" - "github.com/XinFinOrg/XDPoSChain/accounts/abi/bind" - "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/core/types" - "github.com/XinFinOrg/XDPoSChain/event" -) - -// ChequebookABI is the input ABI used to generate the binding from. -const ChequebookABI = "[{\"constant\":false,\"inputs\":[],\"name\":\"kill\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"\",\"type\":\"address\"}],\"name\":\"sent\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"beneficiary\",\"type\":\"address\"},{\"name\":\"amount\",\"type\":\"uint256\"},{\"name\":\"sig_v\",\"type\":\"uint8\"},{\"name\":\"sig_r\",\"type\":\"bytes32\"},{\"name\":\"sig_s\",\"type\":\"bytes32\"}],\"name\":\"cash\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"payable\":true,\"stateMutability\":\"payable\",\"type\":\"fallback\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"deadbeat\",\"type\":\"address\"}],\"name\":\"Overdraft\",\"type\":\"event\"}]" - -// ChequebookBin is the compiled bytecode used for deploying new contracts. -const ChequebookBin = `0x606060405260008054600160a060020a033316600160a060020a03199091161790556102ec806100306000396000f3006060604052600436106100565763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166341c0e1b581146100585780637bf786f81461006b578063fbf788d61461009c575b005b341561006357600080fd5b6100566100ca565b341561007657600080fd5b61008a600160a060020a03600435166100f1565b60405190815260200160405180910390f35b34156100a757600080fd5b610056600160a060020a036004351660243560ff60443516606435608435610103565b60005433600160a060020a03908116911614156100ef57600054600160a060020a0316ff5b565b60016020526000908152604090205481565b600160a060020a0385166000908152600160205260408120548190861161012957600080fd5b3087876040516c01000000000000000000000000600160a060020a03948516810282529290931690910260148301526028820152604801604051809103902091506001828686866040516000815260200160405260006040516020015260405193845260ff90921660208085019190915260408085019290925260608401929092526080909201915160208103908084039060008661646e5a03f115156101cf57600080fd5b505060206040510351600054600160a060020a039081169116146101f257600080fd5b50600160a060020a03808716600090815260016020526040902054860390301631811161026257600160a060020a0387166000818152600160205260409081902088905582156108fc0290839051600060405180830381858888f19350505050151561025d57600080fd5b6102b7565b6000547f2250e2993c15843b32621c89447cc589ee7a9f049c026986e545d3c2c0c6f97890600160a060020a0316604051600160a060020a03909116815260200160405180910390a186600160a060020a0316ff5b505050505050505600a165627a7a72305820533e856fc37e3d64d1706bcc7dfb6b1d490c8d566ea498d9d01ec08965a896ca0029` - -// DeployChequebook deploys a new Ethereum contract, binding an instance of Chequebook to it. -func DeployChequebook(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *Chequebook, error) { - parsed, err := abi.JSON(strings.NewReader(ChequebookABI)) - if err != nil { - return common.Address{}, nil, nil, err - } - address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(ChequebookBin), backend) - if err != nil { - return common.Address{}, nil, nil, err - } - return address, tx, &Chequebook{ChequebookCaller: ChequebookCaller{contract: contract}, ChequebookTransactor: ChequebookTransactor{contract: contract}, ChequebookFilterer: ChequebookFilterer{contract: contract}}, nil -} - -// Chequebook is an auto generated Go binding around an Ethereum contract. -type Chequebook struct { - ChequebookCaller // Read-only binding to the contract - ChequebookTransactor // Write-only binding to the contract - ChequebookFilterer // Log filterer for contract events -} - -// ChequebookCaller is an auto generated read-only Go binding around an Ethereum contract. -type ChequebookCaller struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// ChequebookTransactor is an auto generated write-only Go binding around an Ethereum contract. -type ChequebookTransactor struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// ChequebookFilterer is an auto generated log filtering Go binding around an Ethereum contract events. -type ChequebookFilterer struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// ChequebookSession is an auto generated Go binding around an Ethereum contract, -// with pre-set call and transact options. -type ChequebookSession struct { - Contract *Chequebook // Generic contract binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// ChequebookCallerSession is an auto generated read-only Go binding around an Ethereum contract, -// with pre-set call options. -type ChequebookCallerSession struct { - Contract *ChequebookCaller // Generic contract caller binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session -} - -// ChequebookTransactorSession is an auto generated write-only Go binding around an Ethereum contract, -// with pre-set transact options. -type ChequebookTransactorSession struct { - Contract *ChequebookTransactor // Generic contract transactor binding to set the session for - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// ChequebookRaw is an auto generated low-level Go binding around an Ethereum contract. -type ChequebookRaw struct { - Contract *Chequebook // Generic contract binding to access the raw methods on -} - -// ChequebookCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. -type ChequebookCallerRaw struct { - Contract *ChequebookCaller // Generic read-only contract binding to access the raw methods on -} - -// ChequebookTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. -type ChequebookTransactorRaw struct { - Contract *ChequebookTransactor // Generic write-only contract binding to access the raw methods on -} - -// NewChequebook creates a new instance of Chequebook, bound to a specific deployed contract. -func NewChequebook(address common.Address, backend bind.ContractBackend) (*Chequebook, error) { - contract, err := bindChequebook(address, backend, backend, backend) - if err != nil { - return nil, err - } - return &Chequebook{ChequebookCaller: ChequebookCaller{contract: contract}, ChequebookTransactor: ChequebookTransactor{contract: contract}, ChequebookFilterer: ChequebookFilterer{contract: contract}}, nil -} - -// NewChequebookCaller creates a new read-only instance of Chequebook, bound to a specific deployed contract. -func NewChequebookCaller(address common.Address, caller bind.ContractCaller) (*ChequebookCaller, error) { - contract, err := bindChequebook(address, caller, nil, nil) - if err != nil { - return nil, err - } - return &ChequebookCaller{contract: contract}, nil -} - -// NewChequebookTransactor creates a new write-only instance of Chequebook, bound to a specific deployed contract. -func NewChequebookTransactor(address common.Address, transactor bind.ContractTransactor) (*ChequebookTransactor, error) { - contract, err := bindChequebook(address, nil, transactor, nil) - if err != nil { - return nil, err - } - return &ChequebookTransactor{contract: contract}, nil -} - -// NewChequebookFilterer creates a new log filterer instance of Chequebook, bound to a specific deployed contract. -func NewChequebookFilterer(address common.Address, filterer bind.ContractFilterer) (*ChequebookFilterer, error) { - contract, err := bindChequebook(address, nil, nil, filterer) - if err != nil { - return nil, err - } - return &ChequebookFilterer{contract: contract}, nil -} - -// bindChequebook binds a generic wrapper to an already deployed contract. -func bindChequebook(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { - parsed, err := abi.JSON(strings.NewReader(ChequebookABI)) - if err != nil { - return nil, err - } - return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_Chequebook *ChequebookRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { - return _Chequebook.Contract.ChequebookCaller.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_Chequebook *ChequebookRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _Chequebook.Contract.ChequebookTransactor.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_Chequebook *ChequebookRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _Chequebook.Contract.ChequebookTransactor.contract.Transact(opts, method, params...) -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_Chequebook *ChequebookCallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { - return _Chequebook.Contract.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_Chequebook *ChequebookTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _Chequebook.Contract.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_Chequebook *ChequebookTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _Chequebook.Contract.contract.Transact(opts, method, params...) -} - -// Sent is a free data retrieval call binding the contract method 0x7bf786f8. -// -// Solidity: function sent( address) constant returns(uint256) -func (_Chequebook *ChequebookCaller) Sent(opts *bind.CallOpts, arg0 common.Address) (*big.Int, error) { - var ( - ret0 = new(*big.Int) - ) - out := ret0 - err := _Chequebook.contract.Call(opts, out, "sent", arg0) - return *ret0, err -} - -// Sent is a free data retrieval call binding the contract method 0x7bf786f8. -// -// Solidity: function sent( address) constant returns(uint256) -func (_Chequebook *ChequebookSession) Sent(arg0 common.Address) (*big.Int, error) { - return _Chequebook.Contract.Sent(&_Chequebook.CallOpts, arg0) -} - -// Sent is a free data retrieval call binding the contract method 0x7bf786f8. -// -// Solidity: function sent( address) constant returns(uint256) -func (_Chequebook *ChequebookCallerSession) Sent(arg0 common.Address) (*big.Int, error) { - return _Chequebook.Contract.Sent(&_Chequebook.CallOpts, arg0) -} - -// Cash is a paid mutator transaction binding the contract method 0xfbf788d6. -// -// Solidity: function cash(beneficiary address, amount uint256, sig_v uint8, sig_r bytes32, sig_s bytes32) returns() -func (_Chequebook *ChequebookTransactor) Cash(opts *bind.TransactOpts, beneficiary common.Address, amount *big.Int, sig_v uint8, sig_r [32]byte, sig_s [32]byte) (*types.Transaction, error) { - return _Chequebook.contract.Transact(opts, "cash", beneficiary, amount, sig_v, sig_r, sig_s) -} - -// Cash is a paid mutator transaction binding the contract method 0xfbf788d6. -// -// Solidity: function cash(beneficiary address, amount uint256, sig_v uint8, sig_r bytes32, sig_s bytes32) returns() -func (_Chequebook *ChequebookSession) Cash(beneficiary common.Address, amount *big.Int, sig_v uint8, sig_r [32]byte, sig_s [32]byte) (*types.Transaction, error) { - return _Chequebook.Contract.Cash(&_Chequebook.TransactOpts, beneficiary, amount, sig_v, sig_r, sig_s) -} - -// Cash is a paid mutator transaction binding the contract method 0xfbf788d6. -// -// Solidity: function cash(beneficiary address, amount uint256, sig_v uint8, sig_r bytes32, sig_s bytes32) returns() -func (_Chequebook *ChequebookTransactorSession) Cash(beneficiary common.Address, amount *big.Int, sig_v uint8, sig_r [32]byte, sig_s [32]byte) (*types.Transaction, error) { - return _Chequebook.Contract.Cash(&_Chequebook.TransactOpts, beneficiary, amount, sig_v, sig_r, sig_s) -} - -// Kill is a paid mutator transaction binding the contract method 0x41c0e1b5. -// -// Solidity: function kill() returns() -func (_Chequebook *ChequebookTransactor) Kill(opts *bind.TransactOpts) (*types.Transaction, error) { - return _Chequebook.contract.Transact(opts, "kill") -} - -// Kill is a paid mutator transaction binding the contract method 0x41c0e1b5. -// -// Solidity: function kill() returns() -func (_Chequebook *ChequebookSession) Kill() (*types.Transaction, error) { - return _Chequebook.Contract.Kill(&_Chequebook.TransactOpts) -} - -// Kill is a paid mutator transaction binding the contract method 0x41c0e1b5. -// -// Solidity: function kill() returns() -func (_Chequebook *ChequebookTransactorSession) Kill() (*types.Transaction, error) { - return _Chequebook.Contract.Kill(&_Chequebook.TransactOpts) -} - -// ChequebookOverdraftIterator is returned from FilterOverdraft and is used to iterate over the raw logs and unpacked data for Overdraft events raised by the Chequebook contract. -type ChequebookOverdraftIterator struct { - Event *ChequebookOverdraft // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *ChequebookOverdraftIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(ChequebookOverdraft) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(ChequebookOverdraft) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error retruned any retrieval or parsing error occurred during filtering. -func (it *ChequebookOverdraftIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *ChequebookOverdraftIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// ChequebookOverdraft represents a Overdraft event raised by the Chequebook contract. -type ChequebookOverdraft struct { - Deadbeat common.Address - Raw types.Log // Blockchain specific contextual infos -} - -// FilterOverdraft is a free log retrieval operation binding the contract event 0x2250e2993c15843b32621c89447cc589ee7a9f049c026986e545d3c2c0c6f978. -// -// Solidity: event Overdraft(deadbeat address) -func (_Chequebook *ChequebookFilterer) FilterOverdraft(opts *bind.FilterOpts) (*ChequebookOverdraftIterator, error) { - - logs, sub, err := _Chequebook.contract.FilterLogs(opts, "Overdraft") - if err != nil { - return nil, err - } - return &ChequebookOverdraftIterator{contract: _Chequebook.contract, event: "Overdraft", logs: logs, sub: sub}, nil -} - -// WatchOverdraft is a free log subscription operation binding the contract event 0x2250e2993c15843b32621c89447cc589ee7a9f049c026986e545d3c2c0c6f978. -// -// Solidity: event Overdraft(deadbeat address) -func (_Chequebook *ChequebookFilterer) WatchOverdraft(opts *bind.WatchOpts, sink chan<- *ChequebookOverdraft) (event.Subscription, error) { - - logs, sub, err := _Chequebook.contract.WatchLogs(opts, "Overdraft") - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(ChequebookOverdraft) - if err := _Chequebook.contract.UnpackLog(event, "Overdraft", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} diff --git a/contracts/chequebook/contract/chequebook.sol b/contracts/chequebook/contract/chequebook.sol deleted file mode 100644 index c386cceed856..000000000000 --- a/contracts/chequebook/contract/chequebook.sol +++ /dev/null @@ -1,47 +0,0 @@ -pragma solidity ^0.4.18; - -import "./mortal.sol"; - -/// @title Chequebook for Ethereum micropayments -/// @author Daniel A. Nagy -contract chequebook is mortal { - // Cumulative paid amount in wei to each beneficiary - mapping (address => uint256) public sent; - - /// @notice Overdraft event - event Overdraft(address deadbeat); - - // Allow sending ether to the chequebook. - function() public payable { } - - /// @notice Cash cheque - /// - /// @param beneficiary beneficiary address - /// @param amount cumulative amount in wei - /// @param sig_v signature parameter v - /// @param sig_r signature parameter r - /// @param sig_s signature parameter s - /// The digital signature is calculated on the concatenated triplet of contract address, beneficiary address and cumulative amount - function cash(address beneficiary, uint256 amount, uint8 sig_v, bytes32 sig_r, bytes32 sig_s) public { - // Check if the cheque is old. - // Only cheques that are more recent than the last cashed one are considered. - require(amount > sent[beneficiary]); - // Check the digital signature of the cheque. - bytes32 hash = keccak256(address(this), beneficiary, amount); - require(owner == ecrecover(hash, sig_v, sig_r, sig_s)); - // Attempt sending the difference between the cumulative amount on the cheque - // and the cumulative amount on the last cashed cheque to beneficiary. - uint256 diff = amount - sent[beneficiary]; - if (diff <= this.balance) { - // update the cumulative amount before sending - sent[beneficiary] = amount; - beneficiary.transfer(diff); - } else { - // Upon failure, punish owner for writing a bounced cheque. - // owner.sendToDebtorsPrison(); - Overdraft(owner); - // Compensate beneficiary. - selfdestruct(beneficiary); - } - } -} diff --git a/contracts/chequebook/contract/code.go b/contracts/chequebook/contract/code.go deleted file mode 100644 index d837a9d60114..000000000000 --- a/contracts/chequebook/contract/code.go +++ /dev/null @@ -1,5 +0,0 @@ -package contract - -// ContractDeployedCode is used to detect suicides. This constant needs to be -// updated when the contract code is changed. -const ContractDeployedCode = "0x6060604052600436106100565763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166341c0e1b581146100585780637bf786f81461006b578063fbf788d61461009c575b005b341561006357600080fd5b6100566100ca565b341561007657600080fd5b61008a600160a060020a03600435166100f1565b60405190815260200160405180910390f35b34156100a757600080fd5b610056600160a060020a036004351660243560ff60443516606435608435610103565b60005433600160a060020a03908116911614156100ef57600054600160a060020a0316ff5b565b60016020526000908152604090205481565b600160a060020a0385166000908152600160205260408120548190861161012957600080fd5b3087876040516c01000000000000000000000000600160a060020a03948516810282529290931690910260148301526028820152604801604051809103902091506001828686866040516000815260200160405260006040516020015260405193845260ff90921660208085019190915260408085019290925260608401929092526080909201915160208103908084039060008661646e5a03f115156101cf57600080fd5b505060206040510351600054600160a060020a039081169116146101f257600080fd5b50600160a060020a03808716600090815260016020526040902054860390301631811161026257600160a060020a0387166000818152600160205260409081902088905582156108fc0290839051600060405180830381858888f19350505050151561025d57600080fd5b6102b7565b6000547f2250e2993c15843b32621c89447cc589ee7a9f049c026986e545d3c2c0c6f97890600160a060020a0316604051600160a060020a03909116815260200160405180910390a186600160a060020a0316ff5b505050505050505600a165627a7a72305820533e856fc37e3d64d1706bcc7dfb6b1d490c8d566ea498d9d01ec08965a896ca0029" diff --git a/contracts/chequebook/contract/mortal.sol b/contracts/chequebook/contract/mortal.sol deleted file mode 100644 index c43f1e4f7951..000000000000 --- a/contracts/chequebook/contract/mortal.sol +++ /dev/null @@ -1,10 +0,0 @@ -pragma solidity ^0.4.0; - -import "./owned.sol"; - -contract mortal is owned { - function kill() public { - if (msg.sender == owner) - selfdestruct(owner); - } -} diff --git a/contracts/chequebook/contract/owned.sol b/contracts/chequebook/contract/owned.sol deleted file mode 100644 index ee9860d343af..000000000000 --- a/contracts/chequebook/contract/owned.sol +++ /dev/null @@ -1,15 +0,0 @@ -pragma solidity ^0.4.0; - -contract owned { - address owner; - - modifier onlyowner() { - if (msg.sender == owner) { - _; - } - } - - function owned() public { - owner = msg.sender; - } -} diff --git a/contracts/chequebook/gencode.go b/contracts/chequebook/gencode.go deleted file mode 100644 index 49590f6b933d..000000000000 --- a/contracts/chequebook/gencode.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -//go:build none -// +build none - -// This program generates contract/code.go, which contains the chequebook code -// after deployment. -package main - -import ( - "fmt" - "math/big" - "os" - - "github.com/XinFinOrg/XDPoSChain/accounts/abi/bind" - "github.com/XinFinOrg/XDPoSChain/accounts/abi/bind/backends" - "github.com/XinFinOrg/XDPoSChain/contracts/chequebook/contract" - "github.com/XinFinOrg/XDPoSChain/core" - "github.com/XinFinOrg/XDPoSChain/crypto" -) - -var ( - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - testAlloc = core.GenesisAlloc{ - crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(500000000000)}, - } -) - -func main() { - backend := backends.NewSimulatedBackend(testAlloc) - auth := bind.NewKeyedTransactor(testKey) - - // Deploy the contract, get the code. - addr, _, _, err := contract.DeployChequebook(auth, backend) - if err != nil { - panic(err) - } - backend.Commit() - code, err := backend.CodeAt(nil, addr, nil) - if err != nil { - panic(err) - } - if len(code) == 0 { - panic("empty code") - } - - // Write the output file. - content := fmt.Sprintf(`package contract - -// ContractDeployedCode is used to detect suicides. This constant needs to be -// updated when the contract code is changed. -const ContractDeployedCode = "%#x" -`, code) - if err := os.WriteFile("contract/code.go", []byte(content), 0644); err != nil { - panic(err) - } -} diff --git a/contracts/utils.go b/contracts/utils.go index 8f605a5e862e..9af44e052af7 100644 --- a/contracts/utils.go +++ b/contracts/utils.go @@ -87,7 +87,7 @@ func CreateTransactionSign(chainConfig *params.ChainConfig, pool *core.TxPool, m // Create and send tx to smart contract for sign validate block. nonce := pool.Nonce(account.Address) - tx := CreateTxSign(block.Number(), block.Hash(), nonce, common.HexToAddress(common.BlockSigners)) + tx := CreateTxSign(block.Number(), block.Hash(), nonce, common.BlockSignersBinary) txSigned, err := wallet.SignTx(account, tx, chainConfig.ChainId) if err != nil { log.Error("Fail to create tx sign", "error", err) @@ -112,7 +112,7 @@ func CreateTransactionSign(chainConfig *params.ChainConfig, pool *core.TxPool, m // Only process when private key empty in state db. // Save randomize key into state db. randomizeKeyValue := RandStringByte(32) - tx, err := BuildTxSecretRandomize(nonce+1, common.HexToAddress(common.RandomizeSMC), chainConfig.XDPoS.Epoch, randomizeKeyValue) + tx, err := BuildTxSecretRandomize(nonce+1, common.RandomizeSMCBinary, chainConfig.XDPoS.Epoch, randomizeKeyValue) if err != nil { log.Error("Fail to get tx opening for randomize", "error", err) return err @@ -141,7 +141,7 @@ func CreateTransactionSign(chainConfig *params.ChainConfig, pool *core.TxPool, m return err } - tx, err := BuildTxOpeningRandomize(nonce+1, common.HexToAddress(common.RandomizeSMC), randomizeKeyValue) + tx, err := BuildTxOpeningRandomize(nonce+1, common.RandomizeSMCBinary, randomizeKeyValue) if err != nil { log.Error("Fail to get tx opening for randomize", "error", err) return err @@ -232,7 +232,7 @@ func GetSignersByExecutingEVM(addrBlockSigner common.Address, client bind.Contra // Get random from randomize contract. func GetRandomizeFromContract(client bind.ContractBackend, addrMasternode common.Address) (int64, error) { - randomize, err := randomizeContract.NewXDCRandomize(common.HexToAddress(common.RandomizeSMC), client) + randomize, err := randomizeContract.NewXDCRandomize(common.RandomizeSMCBinary, client) if err != nil { log.Error("Fail to get instance of randomize", "error", err) } @@ -407,7 +407,7 @@ func CalculateRewardForSigner(chainReward *big.Int, signers map[common.Address]* log.Info("Signers data", "totalSigner", totalSigner, "totalReward", chainReward) for addr, signer := range signers { - log.Info("Signer reward", "signer", addr, "sign", signer.Sign, "reward", signer.Reward) + log.Debug("Signer reward", "signer", addr, "sign", signer.Sign, "reward", signer.Reward) } return resultSigners, nil diff --git a/contracts/validator/validator_test.go b/contracts/validator/validator_test.go index 047f34deac75..6ebe924b5cf2 100644 --- a/contracts/validator/validator_test.go +++ b/contracts/validator/validator_test.go @@ -150,7 +150,7 @@ func TestRewardBalance(t *testing.T) { logCaps[i] = &logCap{accounts[randIndex].From.String(), randCap} } - foundationAddr := common.HexToAddress(common.FoudationAddr) + foundationAddr := common.FoudationAddrBinary totalReward := new(big.Int).SetInt64(15 * 1000) rewards, err := GetRewardBalancesRate(foundationAddr, acc3Addr, totalReward, baseValidator) if err != nil { @@ -309,13 +309,13 @@ func TestStatedbUtils(t *testing.T) { return true } contractBackend.ForEachStorageAt(ctx, validatorAddress, nil, f) - genesisAlloc[common.HexToAddress(common.MasternodeVotingSMC)] = core.GenesisAccount{ + genesisAlloc[common.MasternodeVotingSMCBinary] = core.GenesisAccount{ Balance: validatorCap, Code: code, Storage: storage, } contractBackendForValidator := backends.NewXDCSimulatedBackend(genesisAlloc, 10000000, params.TestXDPoSMockChainConfig) - validator, err := NewValidator(transactOpts, common.HexToAddress(common.MasternodeVotingSMC), contractBackendForValidator) + validator, err := NewValidator(transactOpts, common.MasternodeVotingSMCBinary, contractBackendForValidator) if err != nil { t.Fatalf("can't get validator object: %v", err) } @@ -379,4 +379,4 @@ func TestStatedbUtils(t *testing.T) { t.Fatalf("cap should not be zero") } } -} \ No newline at end of file +} diff --git a/core/bench_test.go b/core/bench_test.go index 7cfed07f45a0..1129142b1af6 100644 --- a/core/bench_test.go +++ b/core/bench_test.go @@ -235,12 +235,12 @@ func makeChainForBench(db ethdb.Database, full bool, count uint64) { ReceiptHash: types.EmptyRootHash, } hash = header.Hash() - WriteHeader(db, header) - WriteCanonicalHash(db, hash, n) + rawdb.WriteHeader(db, header) + rawdb.WriteCanonicalHash(db, hash, n) WriteTd(db, hash, n, big.NewInt(int64(n+1))) if full || n == 0 { block := types.NewBlockWithHeader(header) - WriteBody(db, hash, n, block.Body()) + rawdb.WriteBody(db, hash, n, block.Body()) WriteBlockReceipts(db, hash, n, nil) } } diff --git a/core/block_validator.go b/core/block_validator.go index a4144aa495e9..e713342d9c62 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -17,6 +17,7 @@ package core import ( + "errors" "fmt" "github.com/XinFinOrg/XDPoSChain/XDCx/tradingstate" @@ -113,7 +114,7 @@ func (v *BlockValidator) ValidateTradingOrder(statedb *state.StateDB, XDCxStated } XDCXService := XDPoSEngine.GetXDCXService() if XDCXService == nil { - return fmt.Errorf("XDCx not found") + return errors.New("XDCx not found") } log.Debug("verify matching transaction found a TxMatches Batch", "numTxMatches", len(txMatchBatch.Data)) tradingResult := map[common.Hash]tradingstate.MatchingResult{} @@ -149,11 +150,11 @@ func (v *BlockValidator) ValidateLendingOrder(statedb *state.StateDB, lendingSta } XDCXService := XDPoSEngine.GetXDCXService() if XDCXService == nil { - return fmt.Errorf("XDCx not found") + return errors.New("XDCx not found") } lendingService := XDPoSEngine.GetLendingService() if lendingService == nil { - return fmt.Errorf("lendingService not found") + return errors.New("lendingService not found") } log.Debug("verify lendingItem ", "numItems", len(batch.Data)) lendingResult := map[common.Hash]lendingstate.MatchingResult{} diff --git a/core/blockchain.go b/core/blockchain.go index d9d20c43450c..006e94fd0e05 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -32,6 +32,7 @@ import ( "github.com/XinFinOrg/XDPoSChain/XDCxlending/lendingstate" "github.com/XinFinOrg/XDPoSChain/accounts/abi/bind" "github.com/XinFinOrg/XDPoSChain/common" + "github.com/XinFinOrg/XDPoSChain/common/lru" "github.com/XinFinOrg/XDPoSChain/common/mclock" "github.com/XinFinOrg/XDPoSChain/common/prque" "github.com/XinFinOrg/XDPoSChain/common/sort" @@ -39,6 +40,7 @@ import ( "github.com/XinFinOrg/XDPoSChain/consensus/XDPoS" "github.com/XinFinOrg/XDPoSChain/consensus/XDPoS/utils" contractValidator "github.com/XinFinOrg/XDPoSChain/contracts/validator/contract" + "github.com/XinFinOrg/XDPoSChain/core/rawdb" "github.com/XinFinOrg/XDPoSChain/core/state" "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/core/vm" @@ -51,7 +53,6 @@ import ( "github.com/XinFinOrg/XDPoSChain/params" "github.com/XinFinOrg/XDPoSChain/rlp" "github.com/XinFinOrg/XDPoSChain/trie" - lru "github.com/hashicorp/golang-lru" ) var ( @@ -139,37 +140,40 @@ type BlockChain struct { stateCache state.Database // State database to reuse between imports (contains state cache) - bodyCache *lru.Cache // Cache for the most recent block bodies - bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format - blockCache *lru.Cache // Cache for the most recent entire blocks - futureBlocks *lru.Cache // future blocks are blocks added for later processing - resultProcess *lru.Cache // Cache for processed blocks - calculatingBlock *lru.Cache // Cache for processing blocks - downloadingBlock *lru.Cache // Cache for downloading blocks (avoid duplication from fetcher) - quit chan struct{} // blockchain quit channel - running int32 // running must be called atomically - // procInterrupt must be atomically called - procInterrupt int32 // interrupt signaler for block processing + bodyCache *lru.Cache[common.Hash, *types.Body] // Cache for the most recent block bodies + bodyRLPCache *lru.Cache[common.Hash, rlp.RawValue] // Cache for the most recent block bodies in RLP encoded format + blockCache *lru.Cache[common.Hash, *types.Block] // Cache for the most recent entire blocks + resultProcess *lru.Cache[common.Hash, *ResultProcessBlock] // Cache for processed blocks + calculatingBlock *lru.Cache[common.Hash, *CalculatedBlock] // Cache for processing blocks + downloadingBlock *lru.Cache[common.Hash, struct{}] // Cache for downloading blocks (avoid duplication from fetcher) + badBlocks *lru.Cache[common.Hash, *types.Header] // Bad block cache + + // future blocks are blocks added for later processing + futureBlocks *lru.Cache[common.Hash, *types.Block] + wg sync.WaitGroup // chain processing wait group for shutting down + quit chan struct{} // shutdown signal, closed in Stop. + running int32 // 0 if chain is running, 1 when stopped + procInterrupt int32 // interrupt signaler for block processing engine consensus.Engine processor Processor // block processor interface validator Validator // block and state validator interface vmConfig vm.Config - badBlocks *lru.Cache // Bad block cache shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block. IPCEndpoint string Client bind.ContractBackend // Global ipc client instance. + // Blocks hash array by block number // cache field for tracking finality purpose, can't use for tracking block vs block relationship - blocksHashCache *lru.Cache + blocksHashCache *lru.Cache[uint64, []common.Hash] - resultTrade *lru.Cache // trades result: key - takerOrderHash, value: trades corresponding to takerOrder - rejectedOrders *lru.Cache // rejected orders: key - takerOrderHash, value: rejected orders corresponding to takerOrder - resultLendingTrade *lru.Cache - rejectedLendingItem *lru.Cache - finalizedTrade *lru.Cache // include both trades which force update to closed/liquidated by the protocol + resultTrade *lru.Cache[common.Hash, interface{}] // trades result: key - takerOrderHash, value: trades corresponding to takerOrder + rejectedOrders *lru.Cache[common.Hash, interface{}] // rejected orders: key - takerOrderHash, value: rejected orders corresponding to takerOrder + resultLendingTrade *lru.Cache[common.Hash, interface{}] + rejectedLendingItem *lru.Cache[common.Hash, interface{}] + finalizedTrade *lru.Cache[common.Hash, interface{}] // include both trades which force update to closed/liquidated by the protocol } // NewBlockChain returns a fully initialised block chain using information @@ -182,24 +186,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par TrieTimeLimit: 5 * time.Minute, } } - bodyCache, _ := lru.New(bodyCacheLimit) - bodyRLPCache, _ := lru.New(bodyCacheLimit) - blockCache, _ := lru.New(blockCacheLimit) - blocksHashCache, _ := lru.New(blocksHashCacheLimit) - futureBlocks, _ := lru.New(maxFutureBlocks) - badBlocks, _ := lru.New(badBlockLimit) - resultProcess, _ := lru.New(blockCacheLimit) - preparingBlock, _ := lru.New(blockCacheLimit) - downloadingBlock, _ := lru.New(blockCacheLimit) - - // for XDCx - resultTrade, _ := lru.New(tradingstate.OrderCacheLimit) - rejectedOrders, _ := lru.New(tradingstate.OrderCacheLimit) - - // XDCxlending - resultLendingTrade, _ := lru.New(tradingstate.OrderCacheLimit) - rejectedLendingItem, _ := lru.New(tradingstate.OrderCacheLimit) - finalizedTrade, _ := lru.New(tradingstate.OrderCacheLimit) + bc := &BlockChain{ chainConfig: chainConfig, cacheConfig: cacheConfig, @@ -207,22 +194,22 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par triegc: prque.New(nil), stateCache: state.NewDatabase(db), quit: make(chan struct{}), - bodyCache: bodyCache, - bodyRLPCache: bodyRLPCache, - blockCache: blockCache, - futureBlocks: futureBlocks, - resultProcess: resultProcess, - calculatingBlock: preparingBlock, - downloadingBlock: downloadingBlock, + bodyCache: lru.NewCache[common.Hash, *types.Body](bodyCacheLimit), + bodyRLPCache: lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit), + blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit), + futureBlocks: lru.NewCache[common.Hash, *types.Block](maxFutureBlocks), + resultProcess: lru.NewCache[common.Hash, *ResultProcessBlock](blockCacheLimit), + calculatingBlock: lru.NewCache[common.Hash, *CalculatedBlock](blockCacheLimit), + downloadingBlock: lru.NewCache[common.Hash, struct{}](blockCacheLimit), engine: engine, vmConfig: vmConfig, - badBlocks: badBlocks, - blocksHashCache: blocksHashCache, - resultTrade: resultTrade, - rejectedOrders: rejectedOrders, - resultLendingTrade: resultLendingTrade, - rejectedLendingItem: rejectedLendingItem, - finalizedTrade: finalizedTrade, + badBlocks: lru.NewCache[common.Hash, *types.Header](badBlockLimit), + blocksHashCache: lru.NewCache[uint64, []common.Hash](blocksHashCacheLimit), + resultTrade: lru.NewCache[common.Hash, interface{}](tradingstate.OrderCacheLimit), + rejectedOrders: lru.NewCache[common.Hash, interface{}](tradingstate.OrderCacheLimit), + resultLendingTrade: lru.NewCache[common.Hash, interface{}](tradingstate.OrderCacheLimit), + rejectedLendingItem: lru.NewCache[common.Hash, interface{}](tradingstate.OrderCacheLimit), + finalizedTrade: lru.NewCache[common.Hash, interface{}](tradingstate.OrderCacheLimit), } bc.SetValidator(NewBlockValidator(chainConfig, bc, engine)) bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine)) @@ -437,9 +424,7 @@ func (bc *BlockChain) SetHead(head uint64) error { } currentBlock := bc.CurrentBlock() currentFastBlock := bc.CurrentFastBlock() - if err := WriteHeadBlockHash(bc.db, currentBlock.Hash()); err != nil { - log.Crit("Failed to reset head full block", "err", err) - } + rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash()) if err := WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash()); err != nil { log.Crit("Failed to reset head fast block", "err", err) } @@ -586,9 +571,7 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil { log.Crit("Failed to write genesis block TD", "err", err) } - if err := WriteBlock(bc.db, genesis); err != nil { - log.Crit("Failed to write genesis block", "err", err) - } + rawdb.WriteBlock(bc.db, genesis) bc.genesisBlock = genesis bc.insert(bc.genesisBlock, false) bc.currentBlock.Store(bc.genesisBlock) @@ -681,20 +664,18 @@ func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { // // Note, this function assumes that the `mu` mutex is held! func (bc *BlockChain) insert(block *types.Block, writeBlock bool) { + + blockHash := block.Hash() + blockNumberU64 := block.NumberU64() + // If the block is on a side chain or an unknown one, force other heads onto it too - updateHeads := GetCanonicalHash(bc.db, block.NumberU64()) != block.Hash() + updateHeads := GetCanonicalHash(bc.db, blockNumberU64) != blockHash // Add the block to the canonical chain number scheme and mark as the head - if err := WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64()); err != nil { - log.Crit("Failed to insert block number", "err", err) - } - if err := WriteHeadBlockHash(bc.db, block.Hash()); err != nil { - log.Crit("Failed to insert head block hash", "err", err) - } + rawdb.WriteCanonicalHash(bc.db, blockHash, blockNumberU64) + rawdb.WriteHeadBlockHash(bc.db, blockHash) if writeBlock { - if err := WriteBlock(bc.db, block); err != nil { - log.Crit("Failed to insert block", "err", err) - } + rawdb.WriteBlock(bc.db, block) } bc.currentBlock.Store(block) @@ -702,7 +683,7 @@ func (bc *BlockChain) insert(block *types.Block, writeBlock bool) { if bc.chainConfig.XDPoS != nil && !bc.chainConfig.IsTIPSigning(block.Number()) { engine, ok := bc.Engine().(*XDPoS.XDPoS) if ok { - engine.CacheNoneTIPSigningTxs(block.Header(), block.Transactions(), bc.GetReceiptsByHash(block.Hash())) + engine.CacheNoneTIPSigningTxs(block.Header(), block.Transactions(), bc.GetReceiptsByHash(blockHash)) } } @@ -710,7 +691,7 @@ func (bc *BlockChain) insert(block *types.Block, writeBlock bool) { if updateHeads { bc.hc.SetCurrentHeader(block.Header()) - if err := WriteHeadFastBlockHash(bc.db, block.Hash()); err != nil { + if err := WriteHeadFastBlockHash(bc.db, blockHash); err != nil { log.Crit("Failed to insert head fast block hash", "err", err) } bc.currentFastBlock.Store(block) @@ -727,8 +708,7 @@ func (bc *BlockChain) Genesis() *types.Block { func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { // Short circuit if the body's already in the cache, retrieve otherwise if cached, ok := bc.bodyCache.Get(hash); ok { - body := cached.(*types.Body) - return body + return cached } body := GetBody(bc.db, hash, bc.hc.GetBlockNumber(hash)) if body == nil { @@ -744,7 +724,7 @@ func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue { // Short circuit if the body's already in the cache, retrieve otherwise if cached, ok := bc.bodyRLPCache.Get(hash); ok { - return cached.(rlp.RawValue) + return cached } body := GetBodyRLP(bc.db, hash, bc.hc.GetBlockNumber(hash)) if len(body) == 0 { @@ -801,7 +781,7 @@ func (bc *BlockChain) HasBlockAndFullState(hash common.Hash, number uint64) bool func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { // Short circuit if the block's already in the cache, retrieve otherwise if block, ok := bc.blockCache.Get(hash); ok { - return block.(*types.Block) + return block } block := GetBlock(bc.db, hash, number) if block == nil { @@ -854,7 +834,7 @@ func (bc *BlockChain) GetBlocksHashCache(number uint64) []common.Hash { cached, ok := bc.blocksHashCache.Get(number) if ok { - return cached.([]common.Hash) + return cached } return nil } @@ -987,7 +967,7 @@ func (bc *BlockChain) procFutureBlocks() { blocks := make([]*types.Block, 0, bc.futureBlocks.Len()) for _, hash := range bc.futureBlocks.Keys() { if block, exist := bc.futureBlocks.Peek(hash); exist { - blocks = append(blocks, block.(*types.Block)) + blocks = append(blocks, block) } } if len(blocks) > 0 { @@ -1044,7 +1024,7 @@ func (bc *BlockChain) Rollback(chain []common.Hash) { if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash { newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1) bc.currentBlock.Store(newBlock) - WriteHeadBlockHash(bc.db, newBlock.Hash()) + rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash()) } } } @@ -1134,9 +1114,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ return i, fmt.Errorf("failed to set receipts data: %v", err) } // Write all the data out into the database - if err := WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()); err != nil { - return i, fmt.Errorf("failed to write block body: %v", err) - } + rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()) if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil { return i, fmt.Errorf("failed to write block receipts: %v", err) } @@ -1196,9 +1174,7 @@ func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (e if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil { return err } - if err := WriteBlock(bc.db, block); err != nil { - return err - } + rawdb.WriteBlock(bc.db, block) return nil } @@ -1226,9 +1202,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types. } // Write other block data using a batch. batch := bc.db.NewBatch() - if err := WriteBlock(batch, block); err != nil { - return NonStatTy, err - } + rawdb.WriteBlock(batch, block) root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number())) if err != nil { return NonStatTy, err @@ -1504,7 +1478,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, [] for i, block := range chain { headers[i] = block.Header() seals[i] = verifySeals - bc.downloadingBlock.Add(block.Hash(), true) + bc.downloadingBlock.Add(block.Hash(), struct{}{}) } abort, results := bc.engine.VerifyHeaders(bc, headers, seals) defer close(abort) @@ -1818,11 +1792,11 @@ func (bc *BlockChain) getResultBlock(block *types.Block, verifiedM2 bool) (*Resu if verifiedM2 { if result, check := bc.resultProcess.Get(block.HashNoValidator()); check { log.Debug("Get result block from cache ", "number", block.NumberU64(), "hash", block.Hash(), "hash no validator", block.HashNoValidator()) - return result.(*ResultProcessBlock), nil + return result, nil } log.Debug("Not found cache prepare block ", "number", block.NumberU64(), "hash", block.Hash(), "validator", block.HashNoValidator()) if calculatedBlock, _ := bc.calculatingBlock.Get(block.HashNoValidator()); calculatedBlock != nil { - calculatedBlock.(*CalculatedBlock).stop = true + calculatedBlock.stop = true } } calculatedBlock = &CalculatedBlock{block, false} @@ -2020,7 +1994,7 @@ func (bc *BlockChain) UpdateBlocksHashCache(block *types.Block) []common.Hash { cached, ok := bc.blocksHashCache.Get(blockNumber) if ok { - hashArr := cached.([]common.Hash) + hashArr := cached hashArr = append(hashArr, block.Hash()) bc.blocksHashCache.Remove(blockNumber) bc.blocksHashCache.Add(blockNumber, hashArr) @@ -2203,10 +2177,10 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { } } if oldBlock == nil { - return fmt.Errorf("Invalid old chain") + return errors.New("Invalid old chain") } if newBlock == nil { - return fmt.Errorf("Invalid new chain") + return errors.New("Invalid new chain") } for { @@ -2222,10 +2196,10 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) if oldBlock == nil { - return fmt.Errorf("Invalid old chain") + return errors.New("Invalid old chain") } if newBlock == nil { - return fmt.Errorf("Invalid new chain") + return errors.New("Invalid new chain") } } // Ensure XDPoS engine committed block will be not reverted @@ -2353,8 +2327,7 @@ type BadBlockArgs struct { func (bc *BlockChain) BadBlocks() ([]BadBlockArgs, error) { headers := make([]BadBlockArgs, 0, bc.badBlocks.Len()) for _, hash := range bc.badBlocks.Keys() { - if hdr, exist := bc.badBlocks.Peek(hash); exist { - header := hdr.(*types.Header) + if header, exist := bc.badBlocks.Peek(hash); exist { headers = append(headers, BadBlockArgs{header.Hash(), header}) } } @@ -2568,7 +2541,7 @@ func (bc *BlockChain) UpdateM1() error { if err != nil { return err } - addr := common.HexToAddress(common.MasternodeVotingSMC) + addr := common.MasternodeVotingSMCBinary validator, err := contractValidator.NewXDCValidator(addr, client) if err != nil { return err @@ -2584,6 +2557,8 @@ func (bc *BlockChain) UpdateM1() error { if err != nil { return err } + } else if stateDB == nil { + return errors.New("nil stateDB in UpdateM1") } else { candidates = state.GetCandidates(stateDB) } diff --git a/core/blockchain_test.go b/core/blockchain_test.go index c19a68dcc129..87b58bf3ad0a 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -130,7 +130,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error { } blockchain.mu.Lock() WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTdByHash(block.ParentHash()))) - WriteBlock(blockchain.db, block) + rawdb.WriteBlock(blockchain.db, block) statedb.Commit(true) blockchain.mu.Unlock() } @@ -148,7 +148,7 @@ func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error // Manually insert the header into the database, but don't reorganise (allows subsequent testing) blockchain.mu.Lock() WriteTd(blockchain.db, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, blockchain.GetTdByHash(header.ParentHash))) - WriteHeader(blockchain.db, header) + rawdb.WriteHeader(blockchain.db, header) blockchain.mu.Unlock() } return nil @@ -1367,7 +1367,7 @@ func TestBlocksHashCacheUpdate(t *testing.T) { } cachedAt, _ := chain.blocksHashCache.Get(uint64(3)) - if len(cachedAt.([]common.Hash)) != 2 { + if len(cachedAt) != 2 { t.Error("BlocksHashCache doesn't add new cache after concating new fork ") } }) diff --git a/core/chain_indexer.go b/core/chain_indexer.go index 7be107be85b4..dd6466ab3770 100644 --- a/core/chain_indexer.go +++ b/core/chain_indexer.go @@ -18,6 +18,7 @@ package core import ( "encoding/binary" + "errors" "fmt" "sync" "sync/atomic" @@ -357,7 +358,7 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com if header == nil { return common.Hash{}, fmt.Errorf("block #%d [%x…] not found", number, hash[:4]) } else if header.ParentHash != lastHead { - return common.Hash{}, fmt.Errorf("chain reorged during section processing") + return common.Hash{}, errors.New("chain reorged during section processing") } c.backend.Process(header) lastHead = header.Hash() diff --git a/core/chain_indexer_test.go b/core/chain_indexer_test.go index c042a8319da0..4d44f3018a6f 100644 --- a/core/chain_indexer_test.go +++ b/core/chain_indexer_test.go @@ -18,12 +18,13 @@ package core import ( "fmt" - "github.com/XinFinOrg/XDPoSChain/core/rawdb" "math/big" "math/rand" "testing" "time" + "github.com/XinFinOrg/XDPoSChain/core/rawdb" + "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/core/types" ) @@ -94,8 +95,8 @@ func testChainIndexer(t *testing.T, count int) { if number > 0 { header.ParentHash = GetCanonicalHash(db, number-1) } - WriteHeader(db, header) - WriteCanonicalHash(db, header.Hash(), number) + rawdb.WriteHeader(db, header) + rawdb.WriteCanonicalHash(db, header.Hash(), number) } // Start indexer with an already existing chain for i := uint64(0); i <= 100; i++ { diff --git a/core/chain_makers.go b/core/chain_makers.go index 348d68d1687a..7a4012945f7d 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -113,6 +113,15 @@ func (b *BlockGen) AddTxWithChain(bc *BlockChain, tx *types.Transaction) { } } +// AddUncheckedTx forcefully adds a transaction to the block without any +// validation. +// +// AddUncheckedTx will cause consensus failures when used during real +// chain processing. This is best used in conjunction with raw block insertion. +func (b *BlockGen) AddUncheckedTx(tx *types.Transaction) { + b.txs = append(b.txs, tx) +} + // Number returns the block number of the block being generated. func (b *BlockGen) Number() *big.Int { return new(big.Int).Set(b.header.Number) @@ -235,6 +244,19 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse return blocks, receipts } +// GenerateChainWithGenesis is a wrapper of GenerateChain which will initialize +// genesis block to database first according to the provided genesis specification +// then generate chain on top. +func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gen func(int, *BlockGen)) (ethdb.Database, []*types.Block, []types.Receipts) { + db := rawdb.NewMemoryDatabase() + _, err := genesis.Commit(db) + if err != nil { + panic(err) + } + blocks, receipts := GenerateChain(genesis.Config, genesis.ToBlock(nil), engine, db, n, gen) + return db, blocks, receipts +} + func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header { var time *big.Int if parent.Time() == nil { diff --git a/core/database_util.go b/core/database_util.go index 647d6afd3b59..9d48ffec6ce2 100644 --- a/core/database_util.go +++ b/core/database_util.go @@ -356,15 +356,6 @@ func GetBloomBits(db DatabaseReader, bit uint, section uint64, head common.Hash) return db.Get(key) } -// WriteCanonicalHash stores the canonical hash for the given block number. -func WriteCanonicalHash(db ethdb.KeyValueWriter, hash common.Hash, number uint64) error { - key := append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...) - if err := db.Put(key, hash.Bytes()); err != nil { - log.Crit("Failed to store number to hash mapping", "err", err) - } - return nil -} - // WriteHeadHeaderHash stores the head header's hash. func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) error { if err := db.Put(headHeaderKey, hash.Bytes()); err != nil { @@ -373,14 +364,6 @@ func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) error { return nil } -// WriteHeadBlockHash stores the head block's hash. -func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) error { - if err := db.Put(headBlockKey, hash.Bytes()); err != nil { - log.Crit("Failed to store last block's hash", "err", err) - } - return nil -} - // WriteHeadFastBlockHash stores the fast head block's hash. func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) error { if err := db.Put(headFastKey, hash.Bytes()); err != nil { @@ -398,44 +381,6 @@ func WriteTrieSyncProgress(db ethdb.KeyValueWriter, count uint64) error { return nil } -// WriteHeader serializes a block header into the database. -func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) error { - data, err := rlp.EncodeToBytes(header) - if err != nil { - return err - } - hash := header.Hash().Bytes() - num := header.Number.Uint64() - encNum := encodeBlockNumber(num) - key := append(blockHashPrefix, hash...) - if err := db.Put(key, encNum); err != nil { - log.Crit("Failed to store hash to number mapping", "err", err) - } - key = append(append(headerPrefix, encNum...), hash...) - if err := db.Put(key, data); err != nil { - log.Crit("Failed to store header", "err", err) - } - return nil -} - -// WriteBody serializes the body of a block into the database. -func WriteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64, body *types.Body) error { - data, err := rlp.EncodeToBytes(body) - if err != nil { - return err - } - return WriteBodyRLP(db, hash, number, data) -} - -// WriteBodyRLP writes a serialized body of a block into the database. -func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) error { - key := append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...) - if err := db.Put(key, rlp); err != nil { - log.Crit("Failed to store block body", "err", err) - } - return nil -} - // WriteTd serializes the total difficulty of a block into the database. func WriteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64, td *big.Int) error { data, err := rlp.EncodeToBytes(td) @@ -449,19 +394,6 @@ func WriteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64, td *big.I return nil } -// WriteBlock serializes a block into the database, header and body separately. -func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) error { - // Store the body first to retain database consistency - if err := WriteBody(db, block.Hash(), block.NumberU64(), block.Body()); err != nil { - return err - } - // Store the header too, signaling full block ownership - if err := WriteHeader(db, block.Header()); err != nil { - return err - } - return nil -} - // WriteBlockReceipts stores all the transaction receipts belonging to a block // as a single receipt slice. This is used during chain reorganisations for // rescheduling dropped transactions. diff --git a/core/database_util_test.go b/core/database_util_test.go index ecd843a3e880..a0d5a9ec8371 100644 --- a/core/database_util_test.go +++ b/core/database_util_test.go @@ -38,9 +38,7 @@ func TestHeaderStorage(t *testing.T) { t.Fatalf("Non existent header returned: %v", entry) } // Write and verify the header in the database - if err := WriteHeader(db, header); err != nil { - t.Fatalf("Failed to write header into database: %v", err) - } + rawdb.WriteHeader(db, header) if entry := GetHeader(db, header.Hash(), header.Number.Uint64()); entry == nil { t.Fatalf("Stored header not found") } else if entry.Hash() != header.Hash() { @@ -78,9 +76,7 @@ func TestBodyStorage(t *testing.T) { t.Fatalf("Non existent body returned: %v", entry) } // Write and verify the body in the database - if err := WriteBody(db, hash, 0, body); err != nil { - t.Fatalf("Failed to write body into database: %v", err) - } + rawdb.WriteBody(db, hash, 0, body) if entry := GetBody(db, hash, 0); entry == nil { t.Fatalf("Stored body not found") } else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(types.Transactions(body.Transactions)) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) { @@ -124,9 +120,7 @@ func TestBlockStorage(t *testing.T) { t.Fatalf("Non existent body returned: %v", entry) } // Write and verify the block in the database - if err := WriteBlock(db, block); err != nil { - t.Fatalf("Failed to write block into database: %v", err) - } + rawdb.WriteBlock(db, block) if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry == nil { t.Fatalf("Stored block not found") } else if entry.Hash() != block.Hash() { @@ -165,30 +159,22 @@ func TestPartialBlockStorage(t *testing.T) { ReceiptHash: types.EmptyRootHash, }) // Store a header and check that it's not recognized as a block - if err := WriteHeader(db, block.Header()); err != nil { - t.Fatalf("Failed to write header into database: %v", err) - } + rawdb.WriteHeader(db, block.Header()) if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry != nil { t.Fatalf("Non existent block returned: %v", entry) } DeleteHeader(db, block.Hash(), block.NumberU64()) // Store a body and check that it's not recognized as a block - if err := WriteBody(db, block.Hash(), block.NumberU64(), block.Body()); err != nil { - t.Fatalf("Failed to write body into database: %v", err) - } + rawdb.WriteBody(db, block.Hash(), block.NumberU64(), block.Body()) if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry != nil { t.Fatalf("Non existent block returned: %v", entry) } DeleteBody(db, block.Hash(), block.NumberU64()) // Store a header and a body separately and check reassembly - if err := WriteHeader(db, block.Header()); err != nil { - t.Fatalf("Failed to write header into database: %v", err) - } - if err := WriteBody(db, block.Hash(), block.NumberU64(), block.Body()); err != nil { - t.Fatalf("Failed to write body into database: %v", err) - } + rawdb.WriteHeader(db, block.Header()) + rawdb.WriteBody(db, block.Hash(), block.NumberU64(), block.Body()) if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry == nil { t.Fatalf("Stored block not found") } else if entry.Hash() != block.Hash() { @@ -231,9 +217,7 @@ func TestCanonicalMappingStorage(t *testing.T) { t.Fatalf("Non existent canonical mapping returned: %v", entry) } // Write and verify the TD in the database - if err := WriteCanonicalHash(db, hash, number); err != nil { - t.Fatalf("Failed to write canonical mapping into database: %v", err) - } + rawdb.WriteCanonicalHash(db, hash, number) if entry := GetCanonicalHash(db, number); entry == (common.Hash{}) { t.Fatalf("Stored canonical mapping not found") } else if entry != hash { @@ -268,9 +252,7 @@ func TestHeadStorage(t *testing.T) { if err := WriteHeadHeaderHash(db, blockHead.Hash()); err != nil { t.Fatalf("Failed to write head header hash: %v", err) } - if err := WriteHeadBlockHash(db, blockFull.Hash()); err != nil { - t.Fatalf("Failed to write head block hash: %v", err) - } + rawdb.WriteHeadBlockHash(db, blockFull.Hash()) if err := WriteHeadFastBlockHash(db, blockFast.Hash()); err != nil { t.Fatalf("Failed to write fast head block hash: %v", err) } @@ -304,9 +286,7 @@ func TestLookupStorage(t *testing.T) { } } // Insert all the transactions into the database, and verify contents - if err := WriteBlock(db, block); err != nil { - t.Fatalf("failed to write block contents: %v", err) - } + rawdb.WriteBlock(db, block) if err := WriteTxLookupEntries(db, block); err != nil { t.Fatalf("failed to write transactions: %v", err) } diff --git a/core/error.go b/core/error.go index 6268c0dc9abc..5503a6e6f2ef 100644 --- a/core/error.go +++ b/core/error.go @@ -37,6 +37,10 @@ var ( // next one expected based on the local chain. ErrNonceTooHigh = errors.New("nonce too high") + // ErrNonceMax is returned if the nonce of a transaction sender account has + // maximum allowed value and would become invalid if incremented. + ErrNonceMax = errors.New("nonce has max value") + ErrNotXDPoS = errors.New("XDPoS not found in config") ErrNotFoundM1 = errors.New("list M1 not found ") @@ -46,4 +50,7 @@ var ( // ErrTxTypeNotSupported is returned if a transaction is not supported in the // current network configuration. ErrTxTypeNotSupported = types.ErrTxTypeNotSupported + + // ErrGasUintOverflow is returned when calculating gas usage. + ErrGasUintOverflow = errors.New("gas uint64 overflow") ) diff --git a/core/events.go b/core/events.go index 60dc8d7ddd36..bf7e7027e5c9 100644 --- a/core/events.go +++ b/core/events.go @@ -30,11 +30,6 @@ type OrderTxPreEvent struct{ Tx *types.OrderTransaction } // LendingTxPreEvent is posted when a order transaction enters the order transaction pool. type LendingTxPreEvent struct{ Tx *types.LendingTransaction } -// PendingLogsEvent is posted pre mining and notifies of pending logs. -type PendingLogsEvent struct { - Logs []*types.Log -} - // PendingStateEvent is posted pre mining and notifies of pending state changes. type PendingStateEvent struct{} diff --git a/core/genesis.go b/core/genesis.go index 79a73f34dbac..2156ddc7259d 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -141,10 +141,10 @@ func (e *GenesisMismatchError) Error() string { // SetupGenesisBlock writes or updates the genesis block in db. // The block that will be used is: // -// genesis == nil genesis != nil -// +------------------------------------------ -// db has no genesis | main-net default | genesis -// db has genesis | from DB | genesis (if compatible) +// genesis == nil genesis != nil +// +------------------------------------------ +// db has no genesis | main-net default | genesis +// db has genesis | from DB | genesis (if compatible) // // The stored chain configuration will be updated if it is compatible (i.e. does not // specify a fork block below the local head block). In case of a conflict, the @@ -200,7 +200,7 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig // are returned to the caller unless we're already at block zero. height := GetBlockNumber(db, GetHeadHeaderHash(db)) if height == missingNumber { - return newcfg, stored, fmt.Errorf("missing block number for head header hash") + return newcfg, stored, errors.New("missing block number for head header hash") } compatErr := storedcfg.CheckCompatible(newcfg, height) if compatErr != nil && height != 0 && compatErr.RewindTo != 0 { @@ -275,23 +275,17 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block { func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) { block := g.ToBlock(db) if block.Number().Sign() != 0 { - return nil, fmt.Errorf("can't commit genesis block with number > 0") + return nil, errors.New("can't commit genesis block with number > 0") } if err := WriteTd(db, block.Hash(), block.NumberU64(), g.Difficulty); err != nil { return nil, err } - if err := WriteBlock(db, block); err != nil { - return nil, err - } + rawdb.WriteBlock(db, block) if err := WriteBlockReceipts(db, block.Hash(), block.NumberU64(), nil); err != nil { return nil, err } - if err := WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil { - return nil, err - } - if err := WriteHeadBlockHash(db, block.Hash()); err != nil { - return nil, err - } + rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) + rawdb.WriteHeadBlockHash(db, block.Hash()) if err := WriteHeadHeaderHash(db, block.Hash()); err != nil { return nil, err } diff --git a/core/headerchain.go b/core/headerchain.go index 0dbc47e1a845..424bfb687d6c 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -27,12 +27,13 @@ import ( "time" "github.com/XinFinOrg/XDPoSChain/common" + "github.com/XinFinOrg/XDPoSChain/common/lru" "github.com/XinFinOrg/XDPoSChain/consensus" + "github.com/XinFinOrg/XDPoSChain/core/rawdb" "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/ethdb" "github.com/XinFinOrg/XDPoSChain/log" "github.com/XinFinOrg/XDPoSChain/params" - lru "github.com/hashicorp/golang-lru" ) const ( @@ -55,9 +56,9 @@ type HeaderChain struct { currentHeader atomic.Value // Current head of the header chain (may be above the block chain!) currentHeaderHash common.Hash // Hash of the current head of the header chain (prevent recomputing all the time) - headerCache *lru.Cache // Cache for the most recent block headers - tdCache *lru.Cache // Cache for the most recent block total difficulties - numberCache *lru.Cache // Cache for the most recent block numbers + headerCache *lru.Cache[common.Hash, *types.Header] + tdCache *lru.Cache[common.Hash, *big.Int] // most recent total difficulties + numberCache *lru.Cache[common.Hash, uint64] // most recent block numbers procInterrupt func() bool @@ -66,14 +67,11 @@ type HeaderChain struct { } // NewHeaderChain creates a new HeaderChain structure. -// getValidator should return the parent's validator -// procInterrupt points to the parent's interrupt semaphore -// wg points to the parent's shutdown wait group +// +// getValidator should return the parent's validator +// procInterrupt points to the parent's interrupt semaphore +// wg points to the parent's shutdown wait group func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) { - headerCache, _ := lru.New(headerCacheLimit) - tdCache, _ := lru.New(tdCacheLimit) - numberCache, _ := lru.New(numberCacheLimit) - // Seed a fast but crypto originating random generator seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) if err != nil { @@ -83,9 +81,9 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c hc := &HeaderChain{ config: config, chainDb: chainDb, - headerCache: headerCache, - tdCache: tdCache, - numberCache: numberCache, + headerCache: lru.NewCache[common.Hash, *types.Header](headerCacheLimit), + tdCache: lru.NewCache[common.Hash, *big.Int](tdCacheLimit), + numberCache: lru.NewCache[common.Hash, uint64](numberCacheLimit), procInterrupt: procInterrupt, rand: mrand.New(mrand.NewSource(seed.Int64())), engine: engine, @@ -111,7 +109,7 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c // from the cache or database func (hc *HeaderChain) GetBlockNumber(hash common.Hash) uint64 { if cached, ok := hc.numberCache.Get(hash); ok { - return cached.(uint64) + return cached } number := GetBlockNumber(hc.chainDb, hash) if number != missingNumber { @@ -147,9 +145,7 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er if err := hc.WriteTd(hash, number, externTd); err != nil { log.Crit("Failed to write header total difficulty", "err", err) } - if err := WriteHeader(hc.chainDb, header); err != nil { - log.Crit("Failed to write header content", "err", err) - } + rawdb.WriteHeader(hc.chainDb, header) // If the total difficulty is higher than our known, add it to the canonical chain // Second clause in the if statement reduces the vulnerability to selfish mining. // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf @@ -169,16 +165,14 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er headHeader = hc.GetHeader(headHash, headNumber) ) for GetCanonicalHash(hc.chainDb, headNumber) != headHash { - WriteCanonicalHash(hc.chainDb, headHash, headNumber) + rawdb.WriteCanonicalHash(hc.chainDb, headHash, headNumber) headHash = headHeader.ParentHash headNumber = headHeader.Number.Uint64() - 1 headHeader = hc.GetHeader(headHash, headNumber) } // Extend the canonical chain with the new header - if err := WriteCanonicalHash(hc.chainDb, hash, number); err != nil { - log.Crit("Failed to insert header number", "err", err) - } + rawdb.WriteCanonicalHash(hc.chainDb, hash, number) if err := WriteHeadHeaderHash(hc.chainDb, hash); err != nil { log.Crit("Failed to insert head header hash", "err", err) } @@ -314,7 +308,7 @@ func (hc *HeaderChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []co func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int { // Short circuit if the td's already in the cache, retrieve otherwise if cached, ok := hc.tdCache.Get(hash); ok { - return cached.(*big.Int) + return cached } td := GetTd(hc.chainDb, hash, number) if td == nil { @@ -346,7 +340,7 @@ func (hc *HeaderChain) WriteTd(hash common.Hash, number uint64, td *big.Int) err func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header { // Short circuit if the header's already in the cache, retrieve otherwise if header, ok := hc.headerCache.Get(hash); ok { - return header.(*types.Header) + return header } header := GetHeader(hc.chainDb, hash, number) if header == nil { diff --git a/core/lending_pool.go b/core/lending_pool.go index fc0deb18215b..89aac0665f36 100644 --- a/core/lending_pool.go +++ b/core/lending_pool.go @@ -435,7 +435,7 @@ func (pool *LendingPool) validateNewLending(cloneStateDb *state.StateDB, cloneLe validCollateral := false collateralList := lendingstate.GetCollaterals(cloneStateDb, tx.RelayerAddress(), tx.LendingToken(), tx.Term()) for _, collateral := range collateralList { - if tx.CollateralToken().String() == collateral.String() { + if tx.CollateralToken() == collateral { validCollateral = true break } @@ -476,10 +476,10 @@ func (pool *LendingPool) validateRepayLending(cloneStateDb *state.StateDB, clone if lendingTrade == lendingstate.EmptyLendingTrade { return ErrInvalidLendingTradeID } - if tx.UserAddress().String() != lendingTrade.Borrower.String() { + if tx.UserAddress() != lendingTrade.Borrower { return ErrInvalidLendingUserAddress } - if tx.RelayerAddress().String() != lendingTrade.BorrowingRelayer.String() { + if tx.RelayerAddress() != lendingTrade.BorrowingRelayer { return ErrInvalidLendingRelayer } if err := pool.validateBalance(cloneStateDb, cloneLendingStateDb, tx, tx.CollateralToken()); err != nil { @@ -499,10 +499,10 @@ func (pool *LendingPool) validateTopupLending(cloneStateDb *state.StateDB, clone if lendingTrade == lendingstate.EmptyLendingTrade { return ErrInvalidLendingTradeID } - if tx.UserAddress().String() != lendingTrade.Borrower.String() { + if tx.UserAddress() != lendingTrade.Borrower { return ErrInvalidLendingUserAddress } - if tx.RelayerAddress().String() != lendingTrade.BorrowingRelayer.String() { + if tx.RelayerAddress() != lendingTrade.BorrowingRelayer { return ErrInvalidLendingRelayer } if err := pool.validateBalance(cloneStateDb, cloneLendingStateDb, tx, lendingTrade.CollateralToken); err != nil { @@ -519,7 +519,7 @@ func (pool *LendingPool) validateBalance(cloneStateDb *state.StateDB, cloneLendi XDCXServ := XDPoSEngine.GetXDCXService() lendingServ := XDPoSEngine.GetLendingService() if XDCXServ == nil { - return fmt.Errorf("XDCx not found in order validation") + return errors.New("XDCx not found in order validation") } lendingTokenDecimal, err := XDCXServ.GetTokenDecimal(pool.chain, cloneStateDb, tx.LendingToken()) if err != nil { @@ -554,10 +554,10 @@ func (pool *LendingPool) validateBalance(cloneStateDb *state.StateDB, cloneLendi } } if lendTokenXDCPrice == nil || lendTokenXDCPrice.Sign() == 0 { - if tx.LendingToken().String() == common.XDCNativeAddress { + if tx.LendingToken() == common.XDCNativeAddressBinary { lendTokenXDCPrice = common.BasePrice } else { - lendTokenXDCPrice, err = lendingServ.GetMediumTradePriceBeforeEpoch(pool.chain, cloneStateDb, cloneTradingStateDb, tx.LendingToken(), common.HexToAddress(common.XDCNativeAddress)) + lendTokenXDCPrice, err = lendingServ.GetMediumTradePriceBeforeEpoch(pool.chain, cloneStateDb, cloneTradingStateDb, tx.LendingToken(), common.XDCNativeAddressBinary) if err != nil { return err } diff --git a/core/lending_pool_test.go b/core/lending_pool_test.go index 2d8104403f57..5ffe82d58c34 100644 --- a/core/lending_pool_test.go +++ b/core/lending_pool_test.go @@ -3,6 +3,13 @@ package core import ( "context" "fmt" + "log" + "math/big" + "strconv" + "strings" + "testing" + "time" + "github.com/XinFinOrg/XDPoSChain/XDCxlending/lendingstate" "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/core/types" @@ -10,12 +17,6 @@ import ( "github.com/XinFinOrg/XDPoSChain/crypto/sha3" "github.com/XinFinOrg/XDPoSChain/ethclient" "github.com/XinFinOrg/XDPoSChain/rpc" - "log" - "math/big" - "strconv" - "strings" - "testing" - "time" ) type LendingMsg struct { @@ -197,7 +198,7 @@ func TestSendLending(t *testing.T) { testSendLending(key, nonce, USDAddress, common.Address{}, new(big.Int).Mul(_1E8, big.NewInt(1000)), interestRate, lendingstate.Investing, lendingstate.LendingStatusNew, true, 0, 0, common.Hash{}, "") nonce++ time.Sleep(time.Second) - testSendLending(key, nonce, USDAddress, common.HexToAddress(common.XDCNativeAddress), new(big.Int).Mul(_1E8, big.NewInt(1000)), interestRate, lendingstate.Borrowing, lendingstate.LendingStatusNew, true, 0, 0, common.Hash{}, "") + testSendLending(key, nonce, USDAddress, common.XDCNativeAddressBinary, new(big.Int).Mul(_1E8, big.NewInt(1000)), interestRate, lendingstate.Borrowing, lendingstate.LendingStatusNew, true, 0, 0, common.Hash{}, "") nonce++ time.Sleep(time.Second) @@ -206,7 +207,7 @@ func TestSendLending(t *testing.T) { testSendLending(key, nonce, BTCAddress, common.Address{}, new(big.Int).Mul(_1E18, big.NewInt(1)), interestRate, lendingstate.Investing, lendingstate.LendingStatusNew, true, 0, 0, common.Hash{}, "") nonce++ time.Sleep(time.Second) - testSendLending(key, nonce, BTCAddress, common.HexToAddress(common.XDCNativeAddress), new(big.Int).Mul(_1E18, big.NewInt(1)), interestRate, lendingstate.Borrowing, lendingstate.LendingStatusNew, true, 0, 0, common.Hash{}, "") + testSendLending(key, nonce, BTCAddress, common.XDCNativeAddressBinary, new(big.Int).Mul(_1E18, big.NewInt(1)), interestRate, lendingstate.Borrowing, lendingstate.LendingStatusNew, true, 0, 0, common.Hash{}, "") nonce++ time.Sleep(time.Second) @@ -221,19 +222,19 @@ func TestSendLending(t *testing.T) { // lendToken: XDC, collateral: BTC // amount 1000 XDC - testSendLending(key, nonce, common.HexToAddress(common.XDCNativeAddress), common.Address{}, new(big.Int).Mul(_1E18, big.NewInt(1000)), interestRate, lendingstate.Investing, lendingstate.LendingStatusNew, true, 0, 0, common.Hash{}, "") + testSendLending(key, nonce, common.XDCNativeAddressBinary, common.Address{}, new(big.Int).Mul(_1E18, big.NewInt(1000)), interestRate, lendingstate.Investing, lendingstate.LendingStatusNew, true, 0, 0, common.Hash{}, "") nonce++ time.Sleep(time.Second) - testSendLending(key, nonce, common.HexToAddress(common.XDCNativeAddress), BTCAddress, new(big.Int).Mul(_1E18, big.NewInt(1000)), interestRate, lendingstate.Borrowing, lendingstate.LendingStatusNew, true, 0, 0, common.Hash{}, "") + testSendLending(key, nonce, common.XDCNativeAddressBinary, BTCAddress, new(big.Int).Mul(_1E18, big.NewInt(1000)), interestRate, lendingstate.Borrowing, lendingstate.LendingStatusNew, true, 0, 0, common.Hash{}, "") nonce++ time.Sleep(time.Second) // lendToken: XDC, collateral: ETH // amount 1000 XDC - testSendLending(key, nonce, common.HexToAddress(common.XDCNativeAddress), common.Address{}, new(big.Int).Mul(_1E18, big.NewInt(1000)), interestRate, lendingstate.Investing, lendingstate.LendingStatusNew, true, 0, 0, common.Hash{}, "") + testSendLending(key, nonce, common.XDCNativeAddressBinary, common.Address{}, new(big.Int).Mul(_1E18, big.NewInt(1000)), interestRate, lendingstate.Investing, lendingstate.LendingStatusNew, true, 0, 0, common.Hash{}, "") nonce++ time.Sleep(time.Second) - testSendLending(key, nonce, common.HexToAddress(common.XDCNativeAddress), ETHAddress, new(big.Int).Mul(_1E18, big.NewInt(1000)), interestRate, lendingstate.Borrowing, lendingstate.LendingStatusNew, true, 0, 0, common.Hash{}, "") + testSendLending(key, nonce, common.XDCNativeAddressBinary, ETHAddress, new(big.Int).Mul(_1E18, big.NewInt(1000)), interestRate, lendingstate.Borrowing, lendingstate.LendingStatusNew, true, 0, 0, common.Hash{}, "") nonce++ time.Sleep(time.Second) } @@ -282,6 +283,6 @@ func TestRecallLending(t *testing.T) { t.Error("fail to get nonce") t.FailNow() } - testSendLending(key, nonce, USDAddress, common.HexToAddress(common.XDCNativeAddress), new(big.Int).Mul(_1E8, big.NewInt(1000)), interestRate, lendingstate.Borrowing, lendingstate.LendingStatusNew, true, 0, 0, common.Hash{}, "") + testSendLending(key, nonce, USDAddress, common.XDCNativeAddressBinary, new(big.Int).Mul(_1E8, big.NewInt(1000)), interestRate, lendingstate.Borrowing, lendingstate.LendingStatusNew, true, 0, 0, common.Hash{}, "") time.Sleep(2 * time.Second) } diff --git a/core/order_pool.go b/core/order_pool.go index c8708149b89c..dfade0ecd448 100644 --- a/core/order_pool.go +++ b/core/order_pool.go @@ -468,7 +468,7 @@ func (pool *OrderPool) validateOrder(tx *types.OrderTransaction) error { } XDCXServ := XDPoSEngine.GetXDCXService() if XDCXServ == nil { - return fmt.Errorf("XDCx not found in order validation") + return errors.New("XDCx not found in order validation") } baseDecimal, err := XDCXServ.GetTokenDecimal(pool.chain, cloneStateDb, tx.BaseToken()) if err != nil { diff --git a/core/order_pool_test.go b/core/order_pool_test.go index ea11b265bca7..1f03c692eefe 100644 --- a/core/order_pool_test.go +++ b/core/order_pool_test.go @@ -2,17 +2,18 @@ package core import ( "context" - "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/core/types" - "github.com/XinFinOrg/XDPoSChain/crypto" - "github.com/XinFinOrg/XDPoSChain/ethclient" - "github.com/XinFinOrg/XDPoSChain/rpc" "log" "math/big" "strconv" "strings" "testing" "time" + + "github.com/XinFinOrg/XDPoSChain/common" + "github.com/XinFinOrg/XDPoSChain/core/types" + "github.com/XinFinOrg/XDPoSChain/crypto" + "github.com/XinFinOrg/XDPoSChain/ethclient" + "github.com/XinFinOrg/XDPoSChain/rpc" ) type OrderMsg struct { @@ -89,7 +90,7 @@ func testSendOrder(t *testing.T, amount, price *big.Int, side string, status str Price: price, ExchangeAddress: common.HexToAddress("0x0D3ab14BBaD3D99F4203bd7a11aCB94882050E7e"), UserAddress: crypto.PubkeyToAddress(privateKey.PublicKey), - BaseToken: common.HexToAddress(common.XDCNativeAddress), + BaseToken: common.XDCNativeAddressBinary, QuoteToken: BTCAddress, Status: status, Side: side, @@ -124,7 +125,7 @@ func testSendOrderXDCUSD(t *testing.T, amount, price *big.Int, side string, stat Price: price, ExchangeAddress: common.HexToAddress("0x0D3ab14BBaD3D99F4203bd7a11aCB94882050E7e"), UserAddress: crypto.PubkeyToAddress(privateKey.PublicKey), - BaseToken: common.HexToAddress(common.XDCNativeAddress), + BaseToken: common.XDCNativeAddressBinary, QuoteToken: USDAddress, Status: status, Side: side, @@ -194,7 +195,7 @@ func testSendOrderXDCBTC(t *testing.T, amount, price *big.Int, side string, stat Price: price, ExchangeAddress: common.HexToAddress("0x0D3ab14BBaD3D99F4203bd7a11aCB94882050E7e"), UserAddress: crypto.PubkeyToAddress(privateKey.PublicKey), - BaseToken: common.HexToAddress(common.XDCNativeAddress), + BaseToken: common.XDCNativeAddressBinary, QuoteToken: BTCAddress, Status: status, Side: side, diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go new file mode 100644 index 000000000000..6790fe141c58 --- /dev/null +++ b/core/rawdb/accessors_chain.go @@ -0,0 +1,321 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +import ( + "bytes" + "encoding/binary" + "errors" + + "github.com/XinFinOrg/XDPoSChain/common" + "github.com/XinFinOrg/XDPoSChain/core/types" + "github.com/XinFinOrg/XDPoSChain/ethdb" + "github.com/XinFinOrg/XDPoSChain/log" + "github.com/XinFinOrg/XDPoSChain/params" + "github.com/XinFinOrg/XDPoSChain/rlp" +) + +// WriteCanonicalHash stores the hash assigned to a canonical block number. +func WriteCanonicalHash(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { + if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil { + log.Crit("Failed to store number to hash mapping", "err", err) + } +} + +// WriteHeaderNumber stores the hash->number mapping. +func WriteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { + key := headerNumberKey(hash) + enc := encodeBlockNumber(number) + if err := db.Put(key, enc); err != nil { + log.Crit("Failed to store hash to number mapping", "err", err) + } +} + +// ReadHeaderNumber returns the header number assigned to a hash. +func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 { + data, _ := db.Get(headerNumberKey(hash)) + if len(data) != 8 { + return nil + } + number := binary.BigEndian.Uint64(data) + return &number +} + +// WriteHeadBlockHash stores the head block's hash. +func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) { + if err := db.Put(headBlockKey, hash.Bytes()); err != nil { + log.Crit("Failed to store last block's hash", "err", err) + } +} + +// WriteHeader stores a block header into the database and also stores the hash- +// to-number mapping. +func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) { + var ( + hash = header.Hash() + number = header.Number.Uint64() + ) + // Write the hash -> number mapping + WriteHeaderNumber(db, hash, number) + + // Write the encoded header + data, err := rlp.EncodeToBytes(header) + if err != nil { + log.Crit("Failed to RLP encode header", "err", err) + } + key := headerKey(number, hash) + if err := db.Put(key, data); err != nil { + log.Crit("Failed to store header", "err", err) + } +} + +// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding. +func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { + // First try to look up the data in ancient database. Extra hash + // comparison is necessary since ancient database only maintains + // the canonical data. + data, _ := db.Ancient(freezerBodiesTable, number) + if len(data) > 0 { + h, _ := db.Ancient(freezerHashTable, number) + if common.BytesToHash(h) == hash { + return data + } + } + // Then try to look up the data in leveldb. + data, _ = db.Get(blockBodyKey(number, hash)) + if len(data) > 0 { + return data + } + // In the background freezer is moving data from leveldb to flatten files. + // So during the first check for ancient db, the data is not yet in there, + // but when we reach into leveldb, the data was already moved. That would + // result in a not found error. + data, _ = db.Ancient(freezerBodiesTable, number) + if len(data) > 0 { + h, _ := db.Ancient(freezerHashTable, number) + if common.BytesToHash(h) == hash { + return data + } + } + return nil // Can't find the data anywhere. +} + +// WriteBodyRLP stores an RLP encoded block body into the database. +func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) { + if err := db.Put(blockBodyKey(number, hash), rlp); err != nil { + log.Crit("Failed to store block body", "err", err) + } +} + +// ReadBody retrieves the block body corresponding to the hash. +func ReadBody(db ethdb.Reader, hash common.Hash, number uint64) *types.Body { + data := ReadBodyRLP(db, hash, number) + if len(data) == 0 { + return nil + } + body := new(types.Body) + if err := rlp.Decode(bytes.NewReader(data), body); err != nil { + log.Error("Invalid block body RLP", "hash", hash, "err", err) + return nil + } + return body +} + +// WriteBody stores a block body into the database. +func WriteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64, body *types.Body) { + data, err := rlp.EncodeToBytes(body) + if err != nil { + log.Crit("Failed to RLP encode body", "err", err) + } + WriteBodyRLP(db, hash, number, data) +} + +// ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding. +func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { + // First try to look up the data in ancient database. Extra hash + // comparison is necessary since ancient database only maintains + // the canonical data. + data, _ := db.Ancient(freezerReceiptTable, number) + if len(data) > 0 { + h, _ := db.Ancient(freezerHashTable, number) + if common.BytesToHash(h) == hash { + return data + } + } + // Then try to look up the data in leveldb. + data, _ = db.Get(blockReceiptsKey(number, hash)) + if len(data) > 0 { + return data + } + // In the background freezer is moving data from leveldb to flatten files. + // So during the first check for ancient db, the data is not yet in there, + // but when we reach into leveldb, the data was already moved. That would + // result in a not found error. + data, _ = db.Ancient(freezerReceiptTable, number) + if len(data) > 0 { + h, _ := db.Ancient(freezerHashTable, number) + if common.BytesToHash(h) == hash { + return data + } + } + return nil // Can't find the data anywhere. +} + +// ReadRawReceipts retrieves all the transaction receipts belonging to a block. +// The receipt metadata fields are not guaranteed to be populated, so they +// should not be used. Use ReadReceipts instead if the metadata is needed. +func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64) types.Receipts { + // Retrieve the flattened receipt slice + data := ReadReceiptsRLP(db, hash, number) + if len(data) == 0 { + return nil + } + // Convert the receipts from their storage form to their internal representation + storageReceipts := []*types.ReceiptForStorage{} + if err := rlp.DecodeBytes(data, &storageReceipts); err != nil { + log.Error("Invalid receipt array RLP", "hash", hash, "err", err) + return nil + } + receipts := make(types.Receipts, len(storageReceipts)) + for i, storageReceipt := range storageReceipts { + receipts[i] = (*types.Receipt)(storageReceipt) + } + return receipts +} + +// ReadReceipts retrieves all the transaction receipts belonging to a block, including +// its correspoinding metadata fields. If it is unable to populate these metadata +// fields then nil is returned. +// +// The current implementation populates these metadata fields by reading the receipts' +// corresponding block body, so if the block body is not found it will return nil even +// if the receipt itself is stored. +func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) types.Receipts { + // We're deriving many fields from the block body, retrieve beside the receipt + receipts := ReadRawReceipts(db, hash, number) + if receipts == nil { + return nil + } + body := ReadBody(db, hash, number) + if body == nil { + log.Error("Missing body but have receipt", "hash", hash, "number", number) + return nil + } + if err := receipts.DeriveFields(config, hash, number, body.Transactions); err != nil { + log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err) + return nil + } + return receipts +} + +// WriteReceipts stores all the transaction receipts belonging to a block. +func WriteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64, receipts types.Receipts) { + // Convert the receipts into their storage form and serialize them + storageReceipts := make([]*types.ReceiptForStorage, len(receipts)) + for i, receipt := range receipts { + storageReceipts[i] = (*types.ReceiptForStorage)(receipt) + } + bytes, err := rlp.EncodeToBytes(storageReceipts) + if err != nil { + log.Crit("Failed to encode block receipts", "err", err) + } + // Store the flattened receipt slice + if err := db.Put(blockReceiptsKey(number, hash), bytes); err != nil { + log.Crit("Failed to store block receipts", "err", err) + } +} + +// storedReceiptRLP is the storage encoding of a receipt. +// Re-definition in core/types/receipt.go. +type storedReceiptRLP struct { + PostStateOrStatus []byte + CumulativeGasUsed uint64 + Bloom types.Bloom + TxHash common.Hash + ContractAddress common.Address + Logs []*types.LogForStorage + GasUsed uint64 +} + +// ReceiptLogs is a barebone version of ReceiptForStorage which only keeps +// the list of logs. When decoding a stored receipt into this object we +// avoid creating the bloom filter. +type receiptLogs struct { + Logs []*types.Log +} + +// DecodeRLP implements rlp.Decoder. +func (r *receiptLogs) DecodeRLP(s *rlp.Stream) error { + var stored storedReceiptRLP + if err := s.Decode(&stored); err != nil { + return err + } + r.Logs = make([]*types.Log, len(stored.Logs)) + for i, log := range stored.Logs { + r.Logs[i] = (*types.Log)(log) + } + return nil +} + +// DeriveLogFields fills the logs in receiptLogs with information such as block number, txhash, etc. +func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, txs types.Transactions) error { + logIndex := uint(0) + if len(txs) != len(receipts) { + return errors.New("transaction and receipt count mismatch") + } + for i := 0; i < len(receipts); i++ { + txHash := txs[i].Hash() + // The derived log fields can simply be set from the block and transaction + for j := 0; j < len(receipts[i].Logs); j++ { + receipts[i].Logs[j].BlockNumber = number + receipts[i].Logs[j].BlockHash = hash + receipts[i].Logs[j].TxHash = txHash + receipts[i].Logs[j].TxIndex = uint(i) + receipts[i].Logs[j].Index = logIndex + logIndex++ + } + } + return nil +} + +// ReadLogs retrieves the logs for all transactions in a block. In case +// receipts is not found, a nil is returned. +// Note: ReadLogs does not derive unstored log fields. +func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64) [][]*types.Log { + // Retrieve the flattened receipt slice + data := ReadReceiptsRLP(db, hash, number) + if len(data) == 0 { + return nil + } + receipts := []*receiptLogs{} + if err := rlp.DecodeBytes(data, &receipts); err != nil { + log.Error("Invalid receipt array RLP", "hash", hash, "err", err) + return nil + } + + logs := make([][]*types.Log, len(receipts)) + for i, receipt := range receipts { + logs[i] = receipt.Logs + } + return logs +} + +// WriteBlock serializes a block into the database, header and body separately. +func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) { + WriteBody(db, block.Hash(), block.NumberU64(), block.Body()) + WriteHeader(db, block.Header()) +} diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go new file mode 100644 index 000000000000..a514c5857fdc --- /dev/null +++ b/core/rawdb/accessors_chain_test.go @@ -0,0 +1,239 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +import ( + "bytes" + "encoding/hex" + "io/ioutil" + "math/big" + "testing" + + "github.com/XinFinOrg/XDPoSChain/common" + "github.com/XinFinOrg/XDPoSChain/core/types" + "github.com/XinFinOrg/XDPoSChain/params" + "github.com/XinFinOrg/XDPoSChain/rlp" +) + +type fullLogRLP struct { + Address common.Address + Topics []common.Hash + Data []byte + BlockNumber uint64 + TxHash common.Hash + TxIndex uint + BlockHash common.Hash + Index uint +} + +func newFullLogRLP(l *types.Log) *fullLogRLP { + return &fullLogRLP{ + Address: l.Address, + Topics: l.Topics, + Data: l.Data, + BlockNumber: l.BlockNumber, + TxHash: l.TxHash, + TxIndex: l.TxIndex, + BlockHash: l.BlockHash, + Index: l.Index, + } +} + +// Tests that logs associated with a single block can be retrieved. +func TestReadLogs(t *testing.T) { + db := NewMemoryDatabase() + + // Create a live block since we need metadata to reconstruct the receipt + tx1 := types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, big.NewInt(1), nil) + tx2 := types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, big.NewInt(2), nil) + + body := &types.Body{Transactions: types.Transactions{tx1, tx2}} + + // Create the two receipts to manage afterwards + receipt1 := &types.Receipt{ + Status: types.ReceiptStatusFailed, + CumulativeGasUsed: 1, + Logs: []*types.Log{ + {Address: common.BytesToAddress([]byte{0x11})}, + {Address: common.BytesToAddress([]byte{0x01, 0x11})}, + }, + TxHash: tx1.Hash(), + ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}), + GasUsed: 111111, + } + receipt1.Bloom = types.CreateBloom(types.Receipts{receipt1}) + + receipt2 := &types.Receipt{ + PostState: common.Hash{2}.Bytes(), + CumulativeGasUsed: 2, + Logs: []*types.Log{ + {Address: common.BytesToAddress([]byte{0x22})}, + {Address: common.BytesToAddress([]byte{0x02, 0x22})}, + }, + TxHash: tx2.Hash(), + ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}), + GasUsed: 222222, + } + receipt2.Bloom = types.CreateBloom(types.Receipts{receipt2}) + receipts := []*types.Receipt{receipt1, receipt2} + + hash := common.BytesToHash([]byte{0x03, 0x14}) + // Check that no receipt entries are in a pristine database + if rs := ReadReceipts(db, hash, 0, params.TestChainConfig); len(rs) != 0 { + t.Fatalf("non existent receipts returned: %v", rs) + } + // Insert the body that corresponds to the receipts + WriteBody(db, hash, 0, body) + + // Insert the receipt slice into the database and check presence + WriteReceipts(db, hash, 0, receipts) + + logs := ReadLogs(db, hash, 0) + if len(logs) == 0 { + t.Fatalf("no logs returned") + } + if have, want := len(logs), 2; have != want { + t.Fatalf("unexpected number of logs returned, have %d want %d", have, want) + } + if have, want := len(logs[0]), 2; have != want { + t.Fatalf("unexpected number of logs[0] returned, have %d want %d", have, want) + } + if have, want := len(logs[1]), 2; have != want { + t.Fatalf("unexpected number of logs[1] returned, have %d want %d", have, want) + } + + for i, pr := range receipts { + for j, pl := range pr.Logs { + rlpHave, err := rlp.EncodeToBytes(newFullLogRLP(logs[i][j])) + if err != nil { + t.Fatal(err) + } + rlpWant, err := rlp.EncodeToBytes(newFullLogRLP(pl)) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(rlpHave, rlpWant) { + t.Fatalf("receipt #%d: receipt mismatch: have %s, want %s", i, hex.EncodeToString(rlpHave), hex.EncodeToString(rlpWant)) + } + } + } +} + +func TestDeriveLogFields(t *testing.T) { + // Create a few transactions to have receipts for + to2 := common.HexToAddress("0x2") + to3 := common.HexToAddress("0x3") + txs := types.Transactions{ + types.NewTx(&types.LegacyTx{ + Nonce: 1, + Value: big.NewInt(1), + Gas: 1, + GasPrice: big.NewInt(1), + }), + types.NewTx(&types.LegacyTx{ + To: &to2, + Nonce: 2, + Value: big.NewInt(2), + Gas: 2, + GasPrice: big.NewInt(2), + }), + types.NewTx(&types.AccessListTx{ + To: &to3, + Nonce: 3, + Value: big.NewInt(3), + Gas: 3, + GasPrice: big.NewInt(3), + }), + } + // Create the corresponding receipts + receipts := []*receiptLogs{ + { + Logs: []*types.Log{ + {Address: common.BytesToAddress([]byte{0x11})}, + {Address: common.BytesToAddress([]byte{0x01, 0x11})}, + }, + }, + { + Logs: []*types.Log{ + {Address: common.BytesToAddress([]byte{0x22})}, + {Address: common.BytesToAddress([]byte{0x02, 0x22})}, + }, + }, + { + Logs: []*types.Log{ + {Address: common.BytesToAddress([]byte{0x33})}, + {Address: common.BytesToAddress([]byte{0x03, 0x33})}, + }, + }, + } + + // Derive log metadata fields + number := big.NewInt(1) + hash := common.BytesToHash([]byte{0x03, 0x14}) + if err := deriveLogFields(receipts, hash, number.Uint64(), txs); err != nil { + t.Fatal(err) + } + + // Iterate over all the computed fields and check that they're correct + logIndex := uint(0) + for i := range receipts { + for j := range receipts[i].Logs { + if receipts[i].Logs[j].BlockNumber != number.Uint64() { + t.Errorf("receipts[%d].Logs[%d].BlockNumber = %d, want %d", i, j, receipts[i].Logs[j].BlockNumber, number.Uint64()) + } + if receipts[i].Logs[j].BlockHash != hash { + t.Errorf("receipts[%d].Logs[%d].BlockHash = %s, want %s", i, j, receipts[i].Logs[j].BlockHash.String(), hash.String()) + } + if receipts[i].Logs[j].TxHash != txs[i].Hash() { + t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, receipts[i].Logs[j].TxHash.String(), txs[i].Hash().String()) + } + if receipts[i].Logs[j].TxIndex != uint(i) { + t.Errorf("receipts[%d].Logs[%d].TransactionIndex = %d, want %d", i, j, receipts[i].Logs[j].TxIndex, i) + } + if receipts[i].Logs[j].Index != logIndex { + t.Errorf("receipts[%d].Logs[%d].Index = %d, want %d", i, j, receipts[i].Logs[j].Index, logIndex) + } + logIndex++ + } + } +} + +func BenchmarkDecodeRLPLogs(b *testing.B) { + // Encoded receipts from block 0x14ee094309fbe8f70b65f45ebcc08fb33f126942d97464aad5eb91cfd1e2d269 + buf, err := ioutil.ReadFile("testdata/stored_receipts.bin") + if err != nil { + b.Fatal(err) + } + b.Run("ReceiptForStorage", func(b *testing.B) { + b.ReportAllocs() + var r []*types.ReceiptForStorage + for i := 0; i < b.N; i++ { + if err := rlp.DecodeBytes(buf, &r); err != nil { + b.Fatal(err) + } + } + }) + b.Run("rlpLogs", func(b *testing.B) { + b.ReportAllocs() + var r []*receiptLogs + for i := 0; i < b.N; i++ { + if err := rlp.DecodeBytes(buf, &r); err != nil { + b.Fatal(err) + } + } + }) +} diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go new file mode 100644 index 000000000000..0a08eeed69ed --- /dev/null +++ b/core/rawdb/schema.go @@ -0,0 +1,79 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package rawdb contains a collection of low level database accessors. +package rawdb + +import ( + "encoding/binary" + + "github.com/XinFinOrg/XDPoSChain/common" +) + +// The fields below define the low level database schema prefixing. +var ( + headBlockKey = []byte("LastBlock") + // Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes). + headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header + headerHashSuffix = []byte("n") // headerPrefix + num (uint64 big endian) + headerHashSuffix -> hash + headerNumberPrefix = []byte("H") // headerNumberPrefix + hash -> num (uint64 big endian) + + blockBodyPrefix = []byte("b") // blockBodyPrefix + num (uint64 big endian) + hash -> block body + blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts +) + +const ( + // freezerHashTable indicates the name of the freezer canonical hash table. + freezerHashTable = "hashes" + + // freezerBodiesTable indicates the name of the freezer block body table. + freezerBodiesTable = "bodies" + + // freezerReceiptTable indicates the name of the freezer receipts table. + freezerReceiptTable = "receipts" +) + +// encodeBlockNumber encodes a block number as big endian uint64 +func encodeBlockNumber(number uint64) []byte { + enc := make([]byte, 8) + binary.BigEndian.PutUint64(enc, number) + return enc +} + +// headerKey = headerPrefix + num (uint64 big endian) + hash +func headerKey(number uint64, hash common.Hash) []byte { + return append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...) +} + +// headerHashKey = headerPrefix + num (uint64 big endian) + headerHashSuffix +func headerHashKey(number uint64) []byte { + return append(append(headerPrefix, encodeBlockNumber(number)...), headerHashSuffix...) +} + +// headerNumberKey = headerNumberPrefix + hash +func headerNumberKey(hash common.Hash) []byte { + return append(headerNumberPrefix, hash.Bytes()...) +} + +// blockBodyKey = blockBodyPrefix + num (uint64 big endian) + hash +func blockBodyKey(number uint64, hash common.Hash) []byte { + return append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...) +} + +// blockReceiptsKey = blockReceiptsPrefix + num (uint64 big endian) + hash +func blockReceiptsKey(number uint64, hash common.Hash) []byte { + return append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...) +} diff --git a/core/state/database.go b/core/state/database.go index 03e4a67ac3ba..06348bb85f50 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -20,9 +20,9 @@ import ( "fmt" "github.com/XinFinOrg/XDPoSChain/common" + "github.com/XinFinOrg/XDPoSChain/common/lru" "github.com/XinFinOrg/XDPoSChain/ethdb" "github.com/XinFinOrg/XDPoSChain/trie" - lru "github.com/hashicorp/golang-lru" ) const ( @@ -107,16 +107,15 @@ func NewDatabase(db ethdb.Database) Database { // is safe for concurrent use and retains a lot of collapsed RLP trie nodes in a // large memory cache. func NewDatabaseWithCache(db ethdb.Database, cache int) Database { - csc, _ := lru.New(codeSizeCacheSize) return &cachingDB{ db: trie.NewDatabaseWithCache(db, cache), - codeSizeCache: csc, + codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), } } type cachingDB struct { db *trie.Database - codeSizeCache *lru.Cache + codeSizeCache *lru.Cache[common.Hash, int] } // OpenTrie opens the main account trie at a specific root hash. @@ -151,7 +150,7 @@ func (db *cachingDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error // ContractCodeSize retrieves a particular contracts code's size. func (db *cachingDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, error) { if cached, ok := db.codeSizeCache.Get(codeHash); ok { - return cached.(int), nil + return cached, nil } code, err := db.ContractCode(addrHash, codeHash) return len(code), err diff --git a/core/state/statedb.go b/core/state/statedb.go index 15ca9ca67efc..bac5106f5b96 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -796,6 +796,6 @@ func (s *StateDB) GetOwner(candidate common.Address) common.Address { // validatorsState[_candidate].owner; locValidatorsState := GetLocMappingAtKey(candidate.Hash(), slot) locCandidateOwner := locValidatorsState.Add(locValidatorsState, new(big.Int).SetUint64(uint64(0))) - ret := s.GetState(common.HexToAddress(common.MasternodeVotingSMC), common.BigToHash(locCandidateOwner)) + ret := s.GetState(common.MasternodeVotingSMCBinary, common.BigToHash(locCandidateOwner)) return common.HexToAddress(ret.Hex()) } diff --git a/core/state/statedb_utils.go b/core/state/statedb_utils.go index a9c210b569ea..7bd2c4d355cc 100644 --- a/core/state/statedb_utils.go +++ b/core/state/statedb_utils.go @@ -20,7 +20,7 @@ func GetSigners(statedb *StateDB, block *types.Block) []common.Address { slot := slotBlockSignerMapping["blockSigners"] keys := []common.Hash{} keyArrSlot := GetLocMappingAtKey(block.Hash(), slot) - arrSlot := statedb.GetState(common.HexToAddress(common.BlockSigners), common.BigToHash(keyArrSlot)) + arrSlot := statedb.GetState(common.BlockSignersBinary, common.BigToHash(keyArrSlot)) arrLength := arrSlot.Big().Uint64() for i := uint64(0); i < arrLength; i++ { key := GetLocDynamicArrAtElement(common.BigToHash(keyArrSlot), i, 1) @@ -28,7 +28,7 @@ func GetSigners(statedb *StateDB, block *types.Block) []common.Address { } rets := []common.Address{} for _, key := range keys { - ret := statedb.GetState(common.HexToAddress(common.BlockSigners), key) + ret := statedb.GetState(common.BlockSignersBinary, key) rets = append(rets, common.HexToAddress(ret.Hex())) } @@ -45,7 +45,7 @@ var ( func GetSecret(statedb *StateDB, address common.Address) [][32]byte { slot := slotRandomizeMapping["randomSecret"] locSecret := GetLocMappingAtKey(address.Hash(), slot) - arrLength := statedb.GetState(common.HexToAddress(common.RandomizeSMC), common.BigToHash(locSecret)) + arrLength := statedb.GetState(common.RandomizeSMCBinary, common.BigToHash(locSecret)) keys := []common.Hash{} for i := uint64(0); i < arrLength.Big().Uint64(); i++ { key := GetLocDynamicArrAtElement(common.BigToHash(locSecret), i, 1) @@ -53,7 +53,7 @@ func GetSecret(statedb *StateDB, address common.Address) [][32]byte { } rets := [][32]byte{} for _, key := range keys { - ret := statedb.GetState(common.HexToAddress(common.RandomizeSMC), key) + ret := statedb.GetState(common.RandomizeSMCBinary, key) rets = append(rets, ret) } return rets @@ -62,7 +62,7 @@ func GetSecret(statedb *StateDB, address common.Address) [][32]byte { func GetOpening(statedb *StateDB, address common.Address) [32]byte { slot := slotRandomizeMapping["randomOpening"] locOpening := GetLocMappingAtKey(address.Hash(), slot) - ret := statedb.GetState(common.HexToAddress(common.RandomizeSMC), common.BigToHash(locOpening)) + ret := statedb.GetState(common.RandomizeSMCBinary, common.BigToHash(locOpening)) return ret } @@ -92,13 +92,13 @@ var ( func GetCandidates(statedb *StateDB) []common.Address { slot := slotValidatorMapping["candidates"] slotHash := common.BigToHash(new(big.Int).SetUint64(slot)) - arrLength := statedb.GetState(common.HexToAddress(common.MasternodeVotingSMC), slotHash) + arrLength := statedb.GetState(common.MasternodeVotingSMCBinary, slotHash) count := arrLength.Big().Uint64() rets := make([]common.Address, 0, count) for i := uint64(0); i < count; i++ { key := GetLocDynamicArrAtElement(slotHash, i, 1) - ret := statedb.GetState(common.HexToAddress(common.MasternodeVotingSMC), key) + ret := statedb.GetState(common.MasternodeVotingSMCBinary, key) if !ret.IsZero() { rets = append(rets, common.HexToAddress(ret.Hex())) } @@ -112,7 +112,7 @@ func GetCandidateOwner(statedb *StateDB, candidate common.Address) common.Addres // validatorsState[_candidate].owner; locValidatorsState := GetLocMappingAtKey(candidate.Hash(), slot) locCandidateOwner := locValidatorsState.Add(locValidatorsState, new(big.Int).SetUint64(uint64(0))) - ret := statedb.GetState(common.HexToAddress(common.MasternodeVotingSMC), common.BigToHash(locCandidateOwner)) + ret := statedb.GetState(common.MasternodeVotingSMCBinary, common.BigToHash(locCandidateOwner)) return common.HexToAddress(ret.Hex()) } @@ -121,7 +121,7 @@ func GetCandidateCap(statedb *StateDB, candidate common.Address) *big.Int { // validatorsState[_candidate].cap; locValidatorsState := GetLocMappingAtKey(candidate.Hash(), slot) locCandidateCap := locValidatorsState.Add(locValidatorsState, new(big.Int).SetUint64(uint64(1))) - ret := statedb.GetState(common.HexToAddress(common.MasternodeVotingSMC), common.BigToHash(locCandidateCap)) + ret := statedb.GetState(common.MasternodeVotingSMCBinary, common.BigToHash(locCandidateCap)) return ret.Big() } @@ -129,7 +129,7 @@ func GetVoters(statedb *StateDB, candidate common.Address) []common.Address { //mapping(address => address[]) voters; slot := slotValidatorMapping["voters"] locVoters := GetLocMappingAtKey(candidate.Hash(), slot) - arrLength := statedb.GetState(common.HexToAddress(common.MasternodeVotingSMC), common.BigToHash(locVoters)) + arrLength := statedb.GetState(common.MasternodeVotingSMCBinary, common.BigToHash(locVoters)) keys := []common.Hash{} for i := uint64(0); i < arrLength.Big().Uint64(); i++ { key := GetLocDynamicArrAtElement(common.BigToHash(locVoters), i, 1) @@ -137,7 +137,7 @@ func GetVoters(statedb *StateDB, candidate common.Address) []common.Address { } rets := []common.Address{} for _, key := range keys { - ret := statedb.GetState(common.HexToAddress(common.MasternodeVotingSMC), key) + ret := statedb.GetState(common.MasternodeVotingSMCBinary, key) rets = append(rets, common.HexToAddress(ret.Hex())) } @@ -149,6 +149,6 @@ func GetVoterCap(statedb *StateDB, candidate, voter common.Address) *big.Int { locValidatorsState := GetLocMappingAtKey(candidate.Hash(), slot) locCandidateVoters := locValidatorsState.Add(locValidatorsState, new(big.Int).SetUint64(uint64(2))) retByte := crypto.Keccak256(voter.Hash().Bytes(), common.BigToHash(locCandidateVoters).Bytes()) - ret := statedb.GetState(common.HexToAddress(common.MasternodeVotingSMC), common.BytesToHash(retByte)) + ret := statedb.GetState(common.MasternodeVotingSMCBinary, common.BytesToHash(retByte)) return ret.Big() } diff --git a/core/state_processor.go b/core/state_processor.go index cc5697e069e5..e4bab6797bc1 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -80,7 +80,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, tra misc.ApplyDAOHardFork(statedb) } if common.TIPSigning.Cmp(header.Number) == 0 { - statedb.DeleteAddress(common.HexToAddress(common.BlockSigners)) + statedb.DeleteAddress(common.BlockSignersBinary) } parentState := statedb.Copy() InitSignerInTransactions(p.config, header, block.Transactions()) @@ -146,7 +146,7 @@ func (p *StateProcessor) ProcessBlockNoValidator(cBlock *CalculatedBlock, stated misc.ApplyDAOHardFork(statedb) } if common.TIPSigning.Cmp(header.Number) == 0 { - statedb.DeleteAddress(common.HexToAddress(common.BlockSigners)) + statedb.DeleteAddress(common.BlockSignersBinary) } if cBlock.stop { return nil, nil, 0, ErrStopPreparingBlock @@ -215,13 +215,14 @@ func (p *StateProcessor) ProcessBlockNoValidator(cBlock *CalculatedBlock, stated // for the transaction, gas used and an error if the transaction failed, // indicating the block was invalid. func ApplyTransaction(config *params.ChainConfig, tokensFee map[common.Address]*big.Int, bc *BlockChain, author *common.Address, gp *GasPool, statedb *state.StateDB, XDCxState *tradingstate.TradingStateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, uint64, error, bool) { - if tx.To() != nil && tx.To().String() == common.BlockSigners && config.IsTIPSigning(header.Number) { + to := tx.To() + if to != nil && *to == common.BlockSignersBinary && config.IsTIPSigning(header.Number) { return ApplySignTransaction(config, statedb, header, tx, usedGas) } - if tx.To() != nil && tx.To().String() == common.TradingStateAddr && config.IsTIPXDCXReceiver(header.Number) { + if to != nil && *to == common.TradingStateAddrBinary && config.IsTIPXDCXReceiver(header.Number) { return ApplyEmptyTransaction(config, statedb, header, tx, usedGas) } - if tx.To() != nil && tx.To().String() == common.XDCXLendingAddress && config.IsTIPXDCXReceiver(header.Number) { + if to != nil && *to == common.XDCXLendingAddressBinary && config.IsTIPXDCXReceiver(header.Number) { return ApplyEmptyTransaction(config, statedb, header, tx, usedGas) } if tx.IsTradingTransaction() && config.IsTIPXDCXReceiver(header.Number) { @@ -233,8 +234,8 @@ func ApplyTransaction(config *params.ChainConfig, tokensFee map[common.Address]* } var balanceFee *big.Int - if tx.To() != nil { - if value, ok := tokensFee[*tx.To()]; ok { + if to != nil { + if value, ok := tokensFee[*to]; ok { balanceFee = value } } @@ -441,7 +442,7 @@ func ApplyTransaction(config *params.ChainConfig, tokensFee map[common.Address]* receipt.BlockNumber = header.Number receipt.TransactionIndex = uint(statedb.TxIndex()) if balanceFee != nil && failed { - state.PayFeeWithTRC21TxFail(statedb, msg.From(), *tx.To()) + state.PayFeeWithTRC21TxFail(statedb, msg.From(), *to) } return receipt, gas, err, balanceFee != nil } @@ -473,7 +474,7 @@ func ApplySignTransaction(config *params.ChainConfig, statedb *state.StateDB, he // if the transaction created a contract, store the creation address in the receipt. // Set the receipt logs and create a bloom for filtering log := &types.Log{} - log.Address = common.HexToAddress(common.BlockSigners) + log.Address = common.BlockSignersBinary log.BlockNumber = header.Number.Uint64() statedb.AddLog(log) receipt.Logs = statedb.GetLogs(tx.Hash()) diff --git a/core/state_transition.go b/core/state_transition.go index 48d6ebcae4ec..3e1120ceb986 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -18,6 +18,7 @@ package core import ( "errors" + "fmt" "math" "math/big" @@ -100,13 +101,13 @@ func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation, } // Make sure we don't exceed uint64 for all data combinations if (math.MaxUint64-gas)/params.TxDataNonZeroGas < nz { - return 0, vm.ErrOutOfGas + return 0, ErrGasUintOverflow } gas += nz * params.TxDataNonZeroGas z := uint64(len(data)) - nz if (math.MaxUint64-gas)/params.TxDataZeroGas < z { - return 0, vm.ErrOutOfGas + return 0, ErrGasUintOverflow } gas += z * params.TxDataZeroGas } @@ -169,15 +170,6 @@ func (st *StateTransition) to() vm.AccountRef { return reference } -func (st *StateTransition) useGas(amount uint64) error { - if st.gas < amount { - return vm.ErrOutOfGas - } - st.gas -= amount - - return nil -} - func (st *StateTransition) buyGas() error { var ( state = st.state @@ -205,16 +197,19 @@ func (st *StateTransition) buyGas() error { } func (st *StateTransition) preCheck() error { - msg := st.msg - sender := st.from() - // Make sure this transaction's nonce is correct - if msg.CheckNonce() { - nonce := st.state.GetNonce(sender.Address()) - if nonce < msg.Nonce() { - return ErrNonceTooHigh - } else if nonce > msg.Nonce() { - return ErrNonceTooLow + if st.msg.CheckNonce() { + // Make sure this transaction's nonce is correct. + stNonce := st.state.GetNonce(st.from().Address()) + if msgNonce := st.msg.Nonce(); stNonce < msgNonce { + return fmt.Errorf("%w: address %v, tx: %d state: %d", ErrNonceTooHigh, + st.msg.From().Hex(), msgNonce, stNonce) + } else if stNonce > msgNonce { + return fmt.Errorf("%w: address %v, tx: %d state: %d", ErrNonceTooLow, + st.msg.From().Hex(), msgNonce, stNonce) + } else if stNonce+1 < stNonce { + return fmt.Errorf("%w: address %v, nonce: %d", ErrNonceMax, + st.msg.From().Hex(), stNonce) } } return st.buyGas() @@ -238,9 +233,10 @@ func (st *StateTransition) TransitionDb(owner common.Address) (ret []byte, usedG if err != nil { return nil, 0, false, err, nil } - if err = st.useGas(gas); err != nil { - return nil, 0, false, err, nil + if st.gas < gas { + return nil, 0, false, fmt.Errorf("%w: have %d, want %d", ErrIntrinsicGas, st.gas, gas), nil } + st.gas -= gas if rules := st.evm.ChainConfig().Rules(st.evm.Context.BlockNumber); rules.IsEIP1559 { st.state.PrepareAccessList(msg.From(), msg.To(), vm.ActivePrecompiles(rules), msg.AccessList()) @@ -286,7 +282,7 @@ func (st *StateTransition) TransitionDb(owner common.Address) (ret []byte, usedG st.state.AddBalance(st.evm.Coinbase, new(big.Int).Mul(new(big.Int).SetUint64(st.gasUsed()), st.gasPrice)) } - return ret, st.gasUsed(), vmerr != nil, err, vmerr + return ret, st.gasUsed(), vmerr != nil, nil, vmerr } func (st *StateTransition) refundGas() { diff --git a/core/tx_list.go b/core/tx_list.go index 5f623806d609..3e746ff3518e 100644 --- a/core/tx_list.go +++ b/core/tx_list.go @@ -21,6 +21,8 @@ import ( "math" "math/big" "sort" + "sync" + "sync/atomic" "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/core/types" @@ -450,9 +452,10 @@ func (h *priceHeap) Pop() interface{} { // in txpool but only interested in the remote part. It means only remote transactions // will be considered for tracking, sorting, eviction, etc. type txPricedList struct { - all *txLookup // Pointer to the map of all transactions - remotes *priceHeap // Heap of prices of all the stored **remote** transactions - stales int // Number of stale price points to (re-heap trigger) + all *txLookup // Pointer to the map of all transactions + remotes *priceHeap // Heap of prices of all the stored **remote** transactions + stales int64 // Number of stale price points to (re-heap trigger) + reheapMu sync.Mutex // Mutex asserts that only one routine is reheaping the list } // newTxPricedList creates a new price-sorted transaction heap. @@ -476,8 +479,8 @@ func (l *txPricedList) Put(tx *types.Transaction, local bool) { // the heap if a large enough ratio of transactions go stale. func (l *txPricedList) Removed(count int) { // Bump the stale counter, but exit if still too low (< 25%) - l.stales += count - if l.stales <= len(*l.remotes)/4 { + stales := atomic.AddInt64(&l.stales, int64(count)) + if int(stales) <= len(*l.remotes)/4 { return } // Seems we've reached a critical number of stale transactions, reheap @@ -515,7 +518,7 @@ func (l *txPricedList) Underpriced(tx *types.Transaction) bool { for len(*l.remotes) > 0 { head := []*types.Transaction(*l.remotes)[0] if l.all.GetRemote(head.Hash()) == nil { // Removed or migrated - l.stales-- + atomic.AddInt64(&l.stales, -1) heap.Pop(l.remotes) continue } @@ -541,7 +544,7 @@ func (l *txPricedList) Discard(slots int, force bool) (types.Transactions, bool) // Discard stale transactions if found during cleanup tx := heap.Pop(l.remotes).(*types.Transaction) if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated - l.stales-- + atomic.AddInt64(&l.stales, -1) continue } // Non stale transaction found, discard it @@ -560,9 +563,12 @@ func (l *txPricedList) Discard(slots int, force bool) (types.Transactions, bool) // Reheap forcibly rebuilds the heap based on the current remote transaction set. func (l *txPricedList) Reheap() { + l.reheapMu.Lock() + defer l.reheapMu.Unlock() reheap := make(priceHeap, 0, l.all.RemoteCount()) - l.stales, l.remotes = 0, &reheap + atomic.StoreInt64(&l.stales, 0) + l.remotes = &reheap l.all.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { *l.remotes = append(*l.remotes, tx) return true diff --git a/core/tx_pool.go b/core/tx_pool.go index 64359a972bbc..7a3aa8cd1efe 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -23,6 +23,7 @@ import ( "math/big" "sort" "sync" + "sync/atomic" "time" "github.com/XinFinOrg/XDPoSChain/common" @@ -282,6 +283,7 @@ type TxPool struct { reorgDoneCh chan chan struct{} reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop wg sync.WaitGroup // tracks loop, scheduleReorgLoop + initDoneCh chan struct{} // is closed once the pool is initialized (for tests) eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions. IsSigner func(address common.Address) bool @@ -314,6 +316,7 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block queueTxEventCh: make(chan *types.Transaction), reorgDoneCh: make(chan chan struct{}), reorgShutdownCh: make(chan struct{}), + initDoneCh: make(chan struct{}), gasPrice: new(big.Int).SetUint64(config.PriceLimit), trc21FeeCapacity: map[common.Address]*big.Int{}, } @@ -368,6 +371,8 @@ func (pool *TxPool) loop() { defer evict.Stop() defer journal.Stop() + // Notify tests that the init phase is done + close(pool.initDoneCh) for { select { // Handle ChainHeadEvent @@ -386,8 +391,8 @@ func (pool *TxPool) loop() { case <-report.C: pool.mu.RLock() pending, queued := pool.stats() - stales := pool.priced.stales pool.mu.RUnlock() + stales := int(atomic.LoadInt64(&pool.priced.stales)) if pending != prevPending || queued != prevQueued || stales != prevStales { log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go index dbbdd24baa83..62e1d70a61da 100644 --- a/core/tx_pool_test.go +++ b/core/tx_pool_test.go @@ -22,13 +22,13 @@ import ( "math/big" "math/rand" "os" + "sync/atomic" "testing" "time" + "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/consensus" "github.com/XinFinOrg/XDPoSChain/core/rawdb" - - "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/core/state" "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/crypto" @@ -69,7 +69,7 @@ func (bc *testBlockChain) Config() *params.ChainConfig { func (bc *testBlockChain) CurrentBlock() *types.Block { return types.NewBlock(&types.Header{ - GasLimit: bc.gasLimit, + GasLimit: atomic.LoadUint64(&bc.gasLimit), }, nil, nil, nil) } @@ -110,6 +110,8 @@ func setupTxPool() (*TxPool, *ecdsa.PrivateKey) { key, _ := crypto.GenerateKey() pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) + // wait for the pool to initialize + <-pool.initDoneCh return pool, key } @@ -572,7 +574,7 @@ func TestTransactionDropping(t *testing.T) { t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 4) } // Reduce the block gas limit, check that invalidated transactions are dropped - pool.chain.(*testBlockChain).gasLimit = 100 + atomic.StoreUint64(&pool.chain.(*testBlockChain).gasLimit, 100) <-pool.requestReset(nil, nil) if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok { diff --git a/core/types/consensus_v2_test.go b/core/types/consensus_v2_test.go index 55687f150b5b..890dcba628d4 100644 --- a/core/types/consensus_v2_test.go +++ b/core/types/consensus_v2_test.go @@ -1,6 +1,7 @@ package types import ( + "errors" "fmt" "math/big" "reflect" @@ -15,11 +16,11 @@ import ( // Decode extra fields for consensus version >= 2 (XDPoS 2.0 and future versions) func DecodeBytesExtraFields(b []byte, val interface{}) error { if len(b) == 0 { - return fmt.Errorf("extra field is 0 length") + return errors.New("extra field is 0 length") } switch b[0] { case 1: - return fmt.Errorf("consensus version 1 is not applicable for decoding extra fields") + return errors.New("consensus version 1 is not applicable for decoding extra fields") case 2: return rlp.DecodeBytes(b[1:], val) default: diff --git a/core/types/gen_log_json.go b/core/types/gen_log_json.go index cb0071938dce..c9bdff5b3801 100644 --- a/core/types/gen_log_json.go +++ b/core/types/gen_log_json.go @@ -12,6 +12,7 @@ import ( var _ = (*logMarshaling)(nil) +// MarshalJSON marshals as JSON. func (l Log) MarshalJSON() ([]byte, error) { type Log struct { Address common.Address `json:"address" gencodec:"required"` @@ -19,9 +20,9 @@ func (l Log) MarshalJSON() ([]byte, error) { Data hexutil.Bytes `json:"data" gencodec:"required"` BlockNumber hexutil.Uint64 `json:"blockNumber"` TxHash common.Hash `json:"transactionHash" gencodec:"required"` - TxIndex hexutil.Uint `json:"transactionIndex" gencodec:"required"` + TxIndex hexutil.Uint `json:"transactionIndex"` BlockHash common.Hash `json:"blockHash"` - Index hexutil.Uint `json:"logIndex" gencodec:"required"` + Index hexutil.Uint `json:"logIndex"` Removed bool `json:"removed"` } var enc Log @@ -37,6 +38,7 @@ func (l Log) MarshalJSON() ([]byte, error) { return json.Marshal(&enc) } +// UnmarshalJSON unmarshals from JSON. func (l *Log) UnmarshalJSON(input []byte) error { type Log struct { Address *common.Address `json:"address" gencodec:"required"` @@ -44,9 +46,9 @@ func (l *Log) UnmarshalJSON(input []byte) error { Data *hexutil.Bytes `json:"data" gencodec:"required"` BlockNumber *hexutil.Uint64 `json:"blockNumber"` TxHash *common.Hash `json:"transactionHash" gencodec:"required"` - TxIndex *hexutil.Uint `json:"transactionIndex" gencodec:"required"` + TxIndex *hexutil.Uint `json:"transactionIndex"` BlockHash *common.Hash `json:"blockHash"` - Index *hexutil.Uint `json:"logIndex" gencodec:"required"` + Index *hexutil.Uint `json:"logIndex"` Removed *bool `json:"removed"` } var dec Log @@ -72,17 +74,15 @@ func (l *Log) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'transactionHash' for Log") } l.TxHash = *dec.TxHash - if dec.TxIndex == nil { - return errors.New("missing required field 'transactionIndex' for Log") + if dec.TxIndex != nil { + l.TxIndex = uint(*dec.TxIndex) } - l.TxIndex = uint(*dec.TxIndex) if dec.BlockHash != nil { l.BlockHash = *dec.BlockHash } - if dec.Index == nil { - return errors.New("missing required field 'logIndex' for Log") + if dec.Index != nil { + l.Index = uint(*dec.Index) } - l.Index = uint(*dec.Index) if dec.Removed != nil { l.Removed = *dec.Removed } diff --git a/core/types/gen_receipt_json.go b/core/types/gen_receipt_json.go index b72ef0270d3d..89379a823a16 100644 --- a/core/types/gen_receipt_json.go +++ b/core/types/gen_receipt_json.go @@ -18,7 +18,7 @@ func (r Receipt) MarshalJSON() ([]byte, error) { type Receipt struct { Type hexutil.Uint64 `json:"type,omitempty"` PostState hexutil.Bytes `json:"root"` - Status hexutil.Uint `json:"status"` + Status hexutil.Uint64 `json:"status"` CumulativeGasUsed hexutil.Uint64 `json:"cumulativeGasUsed" gencodec:"required"` Bloom Bloom `json:"logsBloom" gencodec:"required"` Logs []*Log `json:"logs" gencodec:"required"` @@ -32,7 +32,7 @@ func (r Receipt) MarshalJSON() ([]byte, error) { var enc Receipt enc.Type = hexutil.Uint64(r.Type) enc.PostState = r.PostState - enc.Status = hexutil.Uint(r.Status) + enc.Status = hexutil.Uint64(r.Status) enc.CumulativeGasUsed = hexutil.Uint64(r.CumulativeGasUsed) enc.Bloom = r.Bloom enc.Logs = r.Logs @@ -50,7 +50,7 @@ func (r *Receipt) UnmarshalJSON(input []byte) error { type Receipt struct { Type *hexutil.Uint64 `json:"type,omitempty"` PostState *hexutil.Bytes `json:"root"` - Status *hexutil.Uint `json:"status"` + Status *hexutil.Uint64 `json:"status"` CumulativeGasUsed *hexutil.Uint64 `json:"cumulativeGasUsed" gencodec:"required"` Bloom *Bloom `json:"logsBloom" gencodec:"required"` Logs []*Log `json:"logs" gencodec:"required"` @@ -72,7 +72,7 @@ func (r *Receipt) UnmarshalJSON(input []byte) error { r.PostState = *dec.PostState } if dec.Status != nil { - r.Status = uint(*dec.Status) + r.Status = uint64(*dec.Status) } if dec.CumulativeGasUsed == nil { return errors.New("missing required field 'cumulativeGasUsed' for Receipt") diff --git a/core/types/log.go b/core/types/log.go index d459d2892f61..cf1dbc4ecb58 100644 --- a/core/types/log.go +++ b/core/types/log.go @@ -45,11 +45,11 @@ type Log struct { // hash of the transaction TxHash common.Hash `json:"transactionHash" gencodec:"required"` // index of the transaction in the block - TxIndex uint `json:"transactionIndex" gencodec:"required"` + TxIndex uint `json:"transactionIndex"` // hash of the block in which the transaction was included BlockHash common.Hash `json:"blockHash"` // index of the log in the receipt - Index uint `json:"logIndex" gencodec:"required"` + Index uint `json:"logIndex"` // The Removed field is true if this log was reverted due to a chain reorganisation. // You must pay attention to this field if you receive logs through a filter query. diff --git a/core/types/log_test.go b/core/types/log_test.go index b6ab7810b766..fbd26f72ff83 100644 --- a/core/types/log_test.go +++ b/core/types/log_test.go @@ -18,7 +18,7 @@ package types import ( "encoding/json" - "fmt" + "errors" "reflect" "testing" @@ -97,7 +97,7 @@ var unmarshalLogTests = map[string]struct { }, "missing data": { input: `{"address":"0xecf8f87f810ecf450940c9f60066b4a7a501d6a7","blockHash":"0x656c34545f90a730a19008c0e7a7cd4fb3895064b48d6d69761bd5abad681056","blockNumber":"0x1ecfa4","logIndex":"0x2","topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef","0x00000000000000000000000080b2c9d7cbbf30a1b0fc8983c647d754c6525615","0x000000000000000000000000f9dff387dcb5cc4cca5b91adb07a95f54e9f1bb6"],"transactionHash":"0x3b198bfd5d2907285af009e9ae84a0ecd63677110d89d7e030251acb87f6487e","transactionIndex":"0x3"}`, - wantError: fmt.Errorf("missing required field 'data' for Log"), + wantError: errors.New("missing required field 'data' for Log"), }, } diff --git a/core/types/receipt.go b/core/types/receipt.go index d1b3cc46bf37..76507b521064 100644 --- a/core/types/receipt.go +++ b/core/types/receipt.go @@ -26,6 +26,8 @@ import ( "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/common/hexutil" + "github.com/XinFinOrg/XDPoSChain/crypto" + "github.com/XinFinOrg/XDPoSChain/params" "github.com/XinFinOrg/XDPoSChain/rlp" ) @@ -41,10 +43,10 @@ var errEmptyTypedReceipt = errors.New("empty typed receipt bytes") const ( // ReceiptStatusFailed is the status code of a transaction if execution failed. - ReceiptStatusFailed = uint(0) + ReceiptStatusFailed = uint64(0) // ReceiptStatusSuccessful is the status code of a transaction if execution succeeded. - ReceiptStatusSuccessful = uint(1) + ReceiptStatusSuccessful = uint64(1) ) // Receipt represents the results of a transaction. @@ -52,7 +54,7 @@ type Receipt struct { // Consensus fields: These fields are defined by the Yellow Paper Type uint8 `json:"type,omitempty"` PostState []byte `json:"root"` - Status uint `json:"status"` + Status uint64 `json:"status"` CumulativeGasUsed uint64 `json:"cumulativeGasUsed" gencodec:"required"` Bloom Bloom `json:"logsBloom" gencodec:"required"` Logs []*Log `json:"logs" gencodec:"required"` @@ -73,7 +75,7 @@ type Receipt struct { type receiptMarshaling struct { Type hexutil.Uint64 PostState hexutil.Bytes - Status hexutil.Uint + Status hexutil.Uint64 CumulativeGasUsed hexutil.Uint64 GasUsed hexutil.Uint64 BlockNumber *hexutil.Big @@ -328,3 +330,47 @@ func (r Receipts) GetRlp(i int) []byte { } return bytes } + +// DeriveFields fills the receipts with their computed fields based on consensus +// data and contextual infos like containing block and transactions. +func (r Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, number uint64, txs Transactions) error { + signer := MakeSigner(config, new(big.Int).SetUint64(number)) + + logIndex := uint(0) + if len(txs) != len(r) { + return errors.New("transaction and receipt count mismatch") + } + for i := 0; i < len(r); i++ { + // The transaction type and hash can be retrieved from the transaction itself + r[i].Type = txs[i].Type() + r[i].TxHash = txs[i].Hash() + + // block location fields + r[i].BlockHash = hash + r[i].BlockNumber = new(big.Int).SetUint64(number) + r[i].TransactionIndex = uint(i) + + // The contract address can be derived from the transaction itself + if txs[i].To() == nil { + // Deriving the signer is expensive, only do if it's actually needed + from, _ := Sender(signer, txs[i]) + r[i].ContractAddress = crypto.CreateAddress(from, txs[i].Nonce()) + } + // The used gas can be calculated based on previous r + if i == 0 { + r[i].GasUsed = r[i].CumulativeGasUsed + } else { + r[i].GasUsed = r[i].CumulativeGasUsed - r[i-1].CumulativeGasUsed + } + // The derived log fields can simply be set from the block and transaction + for j := 0; j < len(r[i].Logs); j++ { + r[i].Logs[j].BlockNumber = number + r[i].Logs[j].BlockHash = hash + r[i].Logs[j].TxHash = r[i].TxHash + r[i].Logs[j].TxIndex = uint(i) + r[i].Logs[j].Index = logIndex + logIndex++ + } + } + return nil +} diff --git a/core/types/receipt_test.go b/core/types/receipt_test.go index 82fec06c9667..1ca5a2864774 100644 --- a/core/types/receipt_test.go +++ b/core/types/receipt_test.go @@ -18,11 +18,14 @@ package types import ( "bytes" + "math" "math/big" "reflect" "testing" "github.com/XinFinOrg/XDPoSChain/common" + "github.com/XinFinOrg/XDPoSChain/crypto" + "github.com/XinFinOrg/XDPoSChain/params" "github.com/XinFinOrg/XDPoSChain/rlp" ) @@ -141,6 +144,169 @@ func encodeAsV3StoredReceiptRLP(want *Receipt) ([]byte, error) { return rlp.EncodeToBytes(stored) } +// Tests that receipt data can be correctly derived from the contextual infos +func TestDeriveFields(t *testing.T) { + // Create a few transactions to have receipts for + to2 := common.HexToAddress("0x2") + to3 := common.HexToAddress("0x3") + txs := Transactions{ + NewTx(&LegacyTx{ + Nonce: 1, + Value: big.NewInt(1), + Gas: 1, + GasPrice: big.NewInt(1), + }), + NewTx(&LegacyTx{ + To: &to2, + Nonce: 2, + Value: big.NewInt(2), + Gas: 2, + GasPrice: big.NewInt(2), + }), + NewTx(&AccessListTx{ + To: &to3, + Nonce: 3, + Value: big.NewInt(3), + Gas: 3, + GasPrice: big.NewInt(3), + }), + } + // Create the corresponding receipts + receipts := Receipts{ + &Receipt{ + Status: ReceiptStatusFailed, + CumulativeGasUsed: 1, + Logs: []*Log{ + {Address: common.BytesToAddress([]byte{0x11})}, + {Address: common.BytesToAddress([]byte{0x01, 0x11})}, + }, + TxHash: txs[0].Hash(), + ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}), + GasUsed: 1, + }, + &Receipt{ + PostState: common.Hash{2}.Bytes(), + CumulativeGasUsed: 3, + Logs: []*Log{ + {Address: common.BytesToAddress([]byte{0x22})}, + {Address: common.BytesToAddress([]byte{0x02, 0x22})}, + }, + TxHash: txs[1].Hash(), + ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}), + GasUsed: 2, + }, + &Receipt{ + Type: AccessListTxType, + PostState: common.Hash{3}.Bytes(), + CumulativeGasUsed: 6, + Logs: []*Log{ + {Address: common.BytesToAddress([]byte{0x33})}, + {Address: common.BytesToAddress([]byte{0x03, 0x33})}, + }, + TxHash: txs[2].Hash(), + ContractAddress: common.BytesToAddress([]byte{0x03, 0x33, 0x33}), + GasUsed: 3, + }, + } + // Clear all the computed fields and re-derive them + number := big.NewInt(1) + hash := common.BytesToHash([]byte{0x03, 0x14}) + + clearComputedFieldsOnReceipts(t, receipts) + if err := receipts.DeriveFields(params.TestChainConfig, hash, number.Uint64(), txs); err != nil { + t.Fatalf("DeriveFields(...) = %v, want ", err) + } + // Iterate over all the computed fields and check that they're correct + signer := MakeSigner(params.TestChainConfig, number) + + logIndex := uint(0) + for i := range receipts { + if receipts[i].Type != txs[i].Type() { + t.Errorf("receipts[%d].Type = %d, want %d", i, receipts[i].Type, txs[i].Type()) + } + if receipts[i].TxHash != txs[i].Hash() { + t.Errorf("receipts[%d].TxHash = %s, want %s", i, receipts[i].TxHash.String(), txs[i].Hash().String()) + } + if receipts[i].BlockHash != hash { + t.Errorf("receipts[%d].BlockHash = %s, want %s", i, receipts[i].BlockHash.String(), hash.String()) + } + if receipts[i].BlockNumber.Cmp(number) != 0 { + t.Errorf("receipts[%c].BlockNumber = %s, want %s", i, receipts[i].BlockNumber.String(), number.String()) + } + if receipts[i].TransactionIndex != uint(i) { + t.Errorf("receipts[%d].TransactionIndex = %d, want %d", i, receipts[i].TransactionIndex, i) + } + if receipts[i].GasUsed != txs[i].Gas() { + t.Errorf("receipts[%d].GasUsed = %d, want %d", i, receipts[i].GasUsed, txs[i].Gas()) + } + if txs[i].To() != nil && receipts[i].ContractAddress != (common.Address{}) { + t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, receipts[i].ContractAddress.String(), (common.Address{}).String()) + } + from, _ := Sender(signer, txs[i]) + contractAddress := crypto.CreateAddress(from, txs[i].Nonce()) + if txs[i].To() == nil && receipts[i].ContractAddress != contractAddress { + t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, receipts[i].ContractAddress.String(), contractAddress.String()) + } + for j := range receipts[i].Logs { + if receipts[i].Logs[j].BlockNumber != number.Uint64() { + t.Errorf("receipts[%d].Logs[%d].BlockNumber = %d, want %d", i, j, receipts[i].Logs[j].BlockNumber, number.Uint64()) + } + if receipts[i].Logs[j].BlockHash != hash { + t.Errorf("receipts[%d].Logs[%d].BlockHash = %s, want %s", i, j, receipts[i].Logs[j].BlockHash.String(), hash.String()) + } + if receipts[i].Logs[j].TxHash != txs[i].Hash() { + t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, receipts[i].Logs[j].TxHash.String(), txs[i].Hash().String()) + } + if receipts[i].Logs[j].TxIndex != uint(i) { + t.Errorf("receipts[%d].Logs[%d].TransactionIndex = %d, want %d", i, j, receipts[i].Logs[j].TxIndex, i) + } + if receipts[i].Logs[j].Index != logIndex { + t.Errorf("receipts[%d].Logs[%d].Index = %d, want %d", i, j, receipts[i].Logs[j].Index, logIndex) + } + logIndex++ + } + } +} + +func clearComputedFieldsOnReceipts(t *testing.T, receipts Receipts) { + t.Helper() + + for _, receipt := range receipts { + clearComputedFieldsOnReceipt(t, receipt) + } +} + +func clearComputedFieldsOnReceipt(t *testing.T, receipt *Receipt) { + t.Helper() + + receipt.TxHash = common.Hash{} + receipt.BlockHash = common.Hash{} + receipt.BlockNumber = big.NewInt(math.MaxUint32) + receipt.TransactionIndex = math.MaxUint32 + receipt.ContractAddress = common.Address{} + receipt.GasUsed = 0 + + clearComputedFieldsOnLogs(t, receipt.Logs) +} + +func clearComputedFieldsOnLogs(t *testing.T, logs []*Log) { + t.Helper() + + for _, log := range logs { + clearComputedFieldsOnLog(t, log) + } +} + +func clearComputedFieldsOnLog(t *testing.T, log *Log) { + t.Helper() + + log.BlockNumber = math.MaxUint32 + log.BlockHash = common.Hash{} + log.TxHash = common.Hash{} + log.TxIndex = math.MaxUint32 + log.Index = math.MaxUint32 +} + // TestTypedReceiptEncodingDecoding reproduces a flaw that existed in the receipt // rlp decoder, which failed due to a shadowing error. func TestTypedReceiptEncodingDecoding(t *testing.T) { diff --git a/core/types/transaction.go b/core/types/transaction.go index 983162943c52..1c02d70537c3 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -32,7 +32,6 @@ import ( ) //go:generate gencodec -type txdata -field-override txdataMarshaling -out gen_tx_json.go - var ( ErrInvalidSig = errors.New("invalid transaction v, r, s values") ErrUnexpectedProtection = errors.New("transaction type does not supported EIP-155 protected signatures") @@ -45,11 +44,11 @@ var ( errVYParityMissing = errors.New("missing 'yParity' or 'v' field in transaction") errEmptyTypedTx = errors.New("empty typed transaction bytes") errNoSigner = errors.New("missing signing methods") - skipNonceDestinationAddress = map[string]bool{ - common.XDCXAddr: true, - common.TradingStateAddr: true, - common.XDCXLendingAddress: true, - common.XDCXLendingFinalizedTradeAddress: true, + skipNonceDestinationAddress = map[common.Address]bool{ + common.XDCXAddrBinary: true, + common.TradingStateAddrBinary: true, + common.XDCXLendingAddressBinary: true, + common.XDCXLendingFinalizedTradeAddressBinary: true, } ) @@ -406,121 +405,68 @@ func (tx *Transaction) TxCost(number *big.Int) *big.Int { } func (tx *Transaction) IsSpecialTransaction() bool { - if tx.To() == nil { - return false - } - toBytes := tx.To().Bytes() - randomizeSMCBytes := common.HexToAddress(common.RandomizeSMC).Bytes() - blockSignersBytes := common.HexToAddress(common.BlockSigners).Bytes() - return bytes.Equal(toBytes, randomizeSMCBytes) || bytes.Equal(toBytes, blockSignersBytes) + to := tx.To() + return to != nil && (*to == common.RandomizeSMCBinary || *to == common.BlockSignersBinary) } func (tx *Transaction) IsTradingTransaction() bool { - if tx.To() == nil { - return false - } - - if tx.To().String() != common.XDCXAddr { - return false - } - - return true + to := tx.To() + return to != nil && *to == common.XDCXAddrBinary } func (tx *Transaction) IsLendingTransaction() bool { - if tx.To() == nil { - return false - } - - if tx.To().String() != common.XDCXLendingAddress { - return false - } - return true + to := tx.To() + return to != nil && *to == common.XDCXLendingAddressBinary } func (tx *Transaction) IsLendingFinalizedTradeTransaction() bool { - if tx.To() == nil { - return false - } - - if tx.To().String() != common.XDCXLendingFinalizedTradeAddress { - return false - } - return true + to := tx.To() + return to != nil && *to == common.XDCXLendingFinalizedTradeAddressBinary } func (tx *Transaction) IsSkipNonceTransaction() bool { - if tx.To() == nil { - return false - } - if skip := skipNonceDestinationAddress[tx.To().String()]; skip { - return true - } - return false + to := tx.To() + return to != nil && skipNonceDestinationAddress[*to] } func (tx *Transaction) IsSigningTransaction() bool { - if tx.To() == nil { - return false - } - - if tx.To().String() != common.BlockSigners { - return false - } - - method := common.ToHex(tx.Data()[0:4]) - - if method != common.SignMethod { + to := tx.To() + if to == nil || *to != common.BlockSignersBinary { return false } - - if len(tx.Data()) != (32*2 + 4) { + data := tx.Data() + if len(data) != (32*2 + 4) { return false } - - return true + method := common.ToHex(data[0:4]) + return method == common.SignMethod } func (tx *Transaction) IsVotingTransaction() (bool, *common.Address) { - if tx.To() == nil { + to := tx.To() + if to == nil || *to != common.MasternodeVotingSMCBinary { return false, nil } - b := (tx.To().String() == common.MasternodeVotingSMC) - - if !b { - return b, nil - } - - method := common.ToHex(tx.Data()[0:4]) - if b = (method == common.VoteMethod); b { - addr := tx.Data()[len(tx.Data())-20:] - m := common.BytesToAddress(addr) - return b, &m - } - - if b = (method == common.UnvoteMethod); b { - addr := tx.Data()[len(tx.Data())-32-20 : len(tx.Data())-32] - m := common.BytesToAddress(addr) - return b, &m - } - - if b = (method == common.ProposeMethod); b { - addr := tx.Data()[len(tx.Data())-20:] - m := common.BytesToAddress(addr) - return b, &m + var end int + data := tx.Data() + method := common.ToHex(data[0:4]) + if method == common.VoteMethod || method == common.ProposeMethod || method == common.ResignMethod { + end = len(data) + } else if method == common.UnvoteMethod { + end = len(data) - 32 + } else { + return false, nil } - if b = (method == common.ResignMethod); b { - addr := tx.Data()[len(tx.Data())-20:] - m := common.BytesToAddress(addr) - return b, &m - } + addr := data[end-20 : end] + m := common.BytesToAddress(addr) + return true, &m - return b, nil } func (tx *Transaction) IsXDCXApplyTransaction() bool { - if tx.To() == nil { + to := tx.To() + if to == nil { return false } @@ -528,26 +474,22 @@ func (tx *Transaction) IsXDCXApplyTransaction() bool { if common.IsTestnet { addr = common.XDCXListingSMCTestNet } - if tx.To().String() != addr.String() { - return false - } - - method := common.ToHex(tx.Data()[0:4]) - - if method != common.XDCXApplyMethod { + if *to != addr { return false } - + data := tx.Data() // 4 bytes for function name // 32 bytes for 1 parameter - if len(tx.Data()) != (32 + 4) { + if len(data) != (32 + 4) { return false } - return true + method := common.ToHex(data[0:4]) + return method == common.XDCXApplyMethod } func (tx *Transaction) IsXDCZApplyTransaction() bool { - if tx.To() == nil { + to := tx.To() + if to == nil { return false } @@ -555,22 +497,17 @@ func (tx *Transaction) IsXDCZApplyTransaction() bool { if common.IsTestnet { addr = common.TRC21IssuerSMCTestNet } - if tx.To().String() != addr.String() { + if *to != addr { return false } - - method := common.ToHex(tx.Data()[0:4]) - if method != common.XDCZApplyMethod { - return false - } - + data := tx.Data() // 4 bytes for function name // 32 bytes for 1 parameter - if len(tx.Data()) != (32 + 4) { + if len(data) != (32 + 4) { return false } - - return true + method := common.ToHex(data[0:4]) + return method == common.XDCZApplyMethod } func (tx *Transaction) String() string { @@ -841,3 +778,8 @@ func (m Message) CheckNonce() bool { return m.checkNonce } func (m Message) AccessList() AccessList { return m.accessList } func (m *Message) SetNonce(nonce uint64) { m.nonce = nonce } + +func (m *Message) SetBalanceTokenFeeForCall() { + m.balanceTokenFee = new(big.Int).SetUint64(m.gasLimit) + m.balanceTokenFee.Mul(m.balanceTokenFee, m.gasPrice) +} diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go index 500dec7227f9..b5b84be08ce9 100644 --- a/core/types/transaction_test.go +++ b/core/types/transaction_test.go @@ -540,7 +540,7 @@ func assertEqual(orig *Transaction, cpy *Transaction) error { } if orig.AccessList() != nil { if !reflect.DeepEqual(orig.AccessList(), cpy.AccessList()) { - return fmt.Errorf("access list wrong!") + return errors.New("access list wrong!") } } return nil diff --git a/core/vm/access_list_tracer.go b/core/vm/access_list_tracer.go index 1093af7c4d76..97dd59fac8ce 100644 --- a/core/vm/access_list_tracer.go +++ b/core/vm/access_list_tracer.go @@ -96,7 +96,7 @@ func (al accessList) equal(other accessList) bool { func (al accessList) accessList() types.AccessList { acl := make(types.AccessList, 0, len(al)) for addr, slots := range al { - tuple := types.AccessTuple{Address: addr} + tuple := types.AccessTuple{Address: addr, StorageKeys: []common.Hash{}} for slot := range slots { tuple.StorageKeys = append(tuple.StorageKeys, slot) } @@ -166,6 +166,11 @@ func (*AccessListTracer) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost func (*AccessListTracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) {} +func (*AccessListTracer) CaptureEnter(typ OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { +} + +func (*AccessListTracer) CaptureExit(output []byte, gasUsed uint64, err error) {} + // AccessList returns the current accesslist maintained by the tracer. func (a *AccessListTracer) AccessList() types.AccessList { return a.list.accessList() diff --git a/core/vm/eips.go b/core/vm/eips.go index 69cca508b3af..0d7cc71f119f 100644 --- a/core/vm/eips.go +++ b/core/vm/eips.go @@ -24,26 +24,24 @@ import ( "github.com/holiman/uint256" ) +var activators = map[int]func(*JumpTable){ + 3855: enable3855, + 3198: enable3198, + 2929: enable2929, + 2200: enable2200, + 1884: enable1884, + 1344: enable1344, +} + // EnableEIP enables the given EIP on the config. // This operation writes in-place, and callers need to ensure that the globally // defined jump tables are not polluted. func EnableEIP(eipNum int, jt *JumpTable) error { - switch eipNum { - case 3855: - enable3855(jt) - case 3198: - enable3198(jt) - case 2929: - enable2929(jt) - case 2200: - enable2200(jt) - case 1884: - enable1884(jt) - case 1344: - enable1344(jt) - default: + enablerFn, ok := activators[eipNum] + if !ok { return fmt.Errorf("undefined eip %d", eipNum) } + enablerFn(jt) return nil } diff --git a/core/vm/errors.go b/core/vm/errors.go index 236e22568b58..02ce2a678b34 100644 --- a/core/vm/errors.go +++ b/core/vm/errors.go @@ -34,10 +34,12 @@ var ( ErrWriteProtection = errors.New("write protection") ErrReturnDataOutOfBounds = errors.New("return data out of bounds") ErrGasUintOverflow = errors.New("gas uint64 overflow") + ErrNonceUintOverflow = errors.New("nonce uint64 overflow") + ErrInvalidCode = errors.New("invalid code: must not begin with 0xef") // errStopToken is an internal token indicating interpreter loop termination, // never returned to outside callers. - errStopToken = errors.New("stop token") + errStopToken = errors.New("stop token") ) // ErrStackUnderflow wraps an evm error when the items on the stack less diff --git a/core/vm/evm.go b/core/vm/evm.go index cdc0f9a48aeb..4f61fc3c80a0 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -232,32 +232,47 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas _, isPrecompile := evm.precompile2(addr) if !isPrecompile && evm.chainRules.IsEIP158 && value.Sign() == 0 { // Calling a non existing account, don't do anything, but ping the tracer - if evm.vmConfig.Debug && evm.depth == 0 { - evm.vmConfig.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value) - evm.vmConfig.Tracer.CaptureEnd(ret, 0, 0, nil) + if evm.vmConfig.Debug { + if evm.depth == 0 { + evm.vmConfig.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value) + evm.vmConfig.Tracer.CaptureEnd(ret, 0, 0, nil) + } else { + evm.vmConfig.Tracer.CaptureEnter(CALL, caller.Address(), addr, input, gas, value) + evm.vmConfig.Tracer.CaptureExit(ret, 0, nil) + } } return nil, gas, nil } evm.StateDB.CreateAccount(addr) } evm.Transfer(evm.StateDB, caller.Address(), to.Address(), value) + + // Capture the tracer start/end events in debug mode + if evm.vmConfig.Debug { + if evm.depth == 0 { + evm.vmConfig.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value) + + defer func(startGas uint64, startTime time.Time) { // Lazy evaluation of the parameters + evm.vmConfig.Tracer.CaptureEnd(ret, startGas-gas, time.Since(startTime), err) + }(gas, time.Now()) + } else { + // Handle tracer events for entering and exiting a call frame + evm.vmConfig.Tracer.CaptureEnter(CALL, caller.Address(), addr, input, gas, value) + defer func(startGas uint64) { + evm.vmConfig.Tracer.CaptureExit(ret, startGas-gas, err) + }(gas) + } + } + // Initialise a new contract and set the code that is to be used by the EVM. // The contract is a scoped environment for this execution context only. contract := NewContract(caller, to, value, gas) contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr)) // Even if the account has no code, we need to continue because it might be a precompile - start := time.Now() - // Capture the tracer start/end events in debug mode - if evm.vmConfig.Debug && evm.depth == 0 { - evm.vmConfig.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value) - - defer func() { // Lazy evaluation of the parameters - evm.vmConfig.Tracer.CaptureEnd(ret, gas-contract.Gas, time.Since(start), err) - }() - } ret, err = run(evm, contract, input, false) + gas = contract.Gas // When an error was returned by the EVM or when setting the creation code // above we revert to the snapshot and consume any gas remaining. Additionally @@ -265,10 +280,10 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas if err != nil { evm.StateDB.RevertToSnapshot(snapshot) if err != ErrExecutionReverted { - contract.UseGas(contract.Gas) + gas = 0 } } - return ret, contract.Gas, err + return ret, gas, err } // CallCode executes the contract associated with the addr with the given input @@ -294,19 +309,29 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte, snapshot = evm.StateDB.Snapshot() to = AccountRef(caller.Address()) ) + + // Invoke tracer hooks that signal entering/exiting a call frame + if evm.vmConfig.Debug { + evm.vmConfig.Tracer.CaptureEnter(CALLCODE, caller.Address(), addr, input, gas, value) + defer func(startGas uint64) { + evm.vmConfig.Tracer.CaptureExit(ret, startGas-gas, err) + }(gas) + } + // Initialise a new contract and set the code that is to be used by the EVM. // The contract is a scoped environment for this execution context only. contract := NewContract(caller, to, value, gas) contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr)) ret, err = run(evm, contract, input, false) + gas = contract.Gas if err != nil { evm.StateDB.RevertToSnapshot(snapshot) if err != ErrExecutionReverted { - contract.UseGas(contract.Gas) + gas = 0 } } - return ret, contract.Gas, err + return ret, gas, err } // DelegateCall executes the contract associated with the addr with the given input @@ -323,18 +348,28 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by snapshot = evm.StateDB.Snapshot() to = AccountRef(caller.Address()) ) + + // Invoke tracer hooks that signal entering/exiting a call frame + if evm.vmConfig.Debug { + evm.vmConfig.Tracer.CaptureEnter(DELEGATECALL, caller.Address(), addr, input, gas, nil) + defer func(startGas uint64) { + evm.vmConfig.Tracer.CaptureExit(ret, startGas-gas, err) + }(gas) + } + // Initialise a new contract and make initialise the delegate values contract := NewContract(caller, to, nil, gas).AsDelegate() contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr)) ret, err = run(evm, contract, input, false) + gas = contract.Gas if err != nil { evm.StateDB.RevertToSnapshot(snapshot) if err != ErrExecutionReverted { - contract.UseGas(contract.Gas) + gas = 0 } } - return ret, contract.Gas, err + return ret, gas, err } // StaticCall executes the contract associated with the addr with the given input @@ -363,17 +398,26 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte evm.StateDB.AddBalance(addr, big.NewInt(0)) } + // Invoke tracer hooks that signal entering/exiting a call frame + if evm.vmConfig.Debug { + evm.vmConfig.Tracer.CaptureEnter(STATICCALL, caller.Address(), addr, input, gas, nil) + defer func(startGas uint64) { + evm.vmConfig.Tracer.CaptureExit(ret, startGas-gas, err) + }(gas) + } + // When an error was returned by the EVM or when setting the creation code // above we revert to the snapshot and consume any gas remaining. Additionally // when we're in Homestead this also counts for code storage gas errors. ret, err = run(evm, contract, input, evm.ChainConfig().IsTIPXDCXCancellationFee(evm.BlockNumber)) + gas = contract.Gas if err != nil { evm.StateDB.RevertToSnapshot(snapshot) if err != ErrExecutionReverted { - contract.UseGas(contract.Gas) + gas = 0 } } - return ret, contract.Gas, err + return ret, gas, err } type codeAndHash struct { @@ -389,7 +433,7 @@ func (c *codeAndHash) Hash() common.Hash { } // create creates a new contract using code as deployment code. -func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, value *big.Int, address common.Address) ([]byte, common.Address, uint64, error) { +func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, value *big.Int, address common.Address, typ OpCode) ([]byte, common.Address, uint64, error) { // Depth check execution. Fail if we're trying to execute above the // limit. if evm.depth > int(params.CallCreateDepth) { @@ -399,6 +443,9 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, return nil, common.Address{}, gas, ErrInsufficientBalance } nonce := evm.StateDB.GetNonce(caller.Address()) + if nonce+1 < nonce { + return nil, common.Address{}, gas, ErrNonceUintOverflow + } evm.StateDB.SetNonce(caller.Address(), nonce+1) // We add this to the access list _before_ taking a snapshot. Even if the creation fails, // the access-list change should not be rolled back @@ -423,8 +470,12 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, contract := NewContract(caller, AccountRef(address), value, gas) contract.SetCodeOptionalHash(&address, codeAndHash) - if evm.vmConfig.Debug && evm.depth == 0 { - evm.vmConfig.Tracer.CaptureStart(evm, caller.Address(), address, true, codeAndHash.code, gas, value) + if evm.vmConfig.Debug { + if evm.depth == 0 { + evm.vmConfig.Tracer.CaptureStart(evm, caller.Address(), address, true, codeAndHash.code, gas, value) + } else { + evm.vmConfig.Tracer.CaptureEnter(typ, caller.Address(), address, codeAndHash.code, gas, value) + } } start := time.Now() @@ -435,6 +486,11 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, err = ErrMaxCodeSizeExceeded } + // Reject code starting with 0xEF if EIP-3541 is enabled. + if err == nil && len(ret) >= 1 && ret[0] == 0xEF && evm.chainRules.IsEIP1559 { + err = ErrInvalidCode + } + // if the contract creation ran successfully and no errors were returned // calculate the gas required to store the code. If the code could not // be stored due to not enough gas set an error and let it be handled @@ -458,8 +514,12 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } } - if evm.vmConfig.Debug && evm.depth == 0 { - evm.vmConfig.Tracer.CaptureEnd(ret, gas-contract.Gas, time.Since(start), err) + if evm.vmConfig.Debug { + if evm.depth == 0 { + evm.vmConfig.Tracer.CaptureEnd(ret, gas-contract.Gas, time.Since(start), err) + } else { + evm.vmConfig.Tracer.CaptureExit(ret, gas-contract.Gas, err) + } } return ret, address, contract.Gas, err } @@ -467,7 +527,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, // Create creates a new contract using code as deployment code. func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) { contractAddr = crypto.CreateAddress(caller.Address(), evm.StateDB.GetNonce(caller.Address())) - return evm.create(caller, &codeAndHash{code: code}, gas, value, contractAddr) + return evm.create(caller, &codeAndHash{code: code}, gas, value, contractAddr, CREATE) } // Create2 creates a new contract using code as deployment code. @@ -477,7 +537,7 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.I func (evm *EVM) Create2(caller ContractRef, code []byte, gas uint64, endowment *big.Int, salt *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) { codeAndHash := &codeAndHash{code: code} contractAddr = crypto.CreateAddress2(caller.Address(), common.BigToHash(salt), codeAndHash.Hash().Bytes()) - return evm.create(caller, codeAndHash, gas, endowment, contractAddr) + return evm.create(caller, codeAndHash, gas, endowment, contractAddr, CREATE2) } // ChainConfig returns the environment's chain configuration diff --git a/core/vm/gen_structlog.go b/core/vm/gen_structlog.go index 8a689c1ce1b1..74c4ec39b351 100644 --- a/core/vm/gen_structlog.go +++ b/core/vm/gen_structlog.go @@ -4,51 +4,40 @@ package vm import ( "encoding/json" - "math/big" "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/common/hexutil" - "github.com/XinFinOrg/XDPoSChain/common/math" + "github.com/holiman/uint256" ) -var _ = (*structLogMarshaling)(nil) - // MarshalJSON marshals as JSON. func (s StructLog) MarshalJSON() ([]byte, error) { type StructLog struct { Pc uint64 `json:"pc"` Op OpCode `json:"op"` - Gas math.HexOrDecimal64 `json:"gas"` - GasCost math.HexOrDecimal64 `json:"gasCost"` - Memory hexutil.Bytes `json:"memory"` + Gas uint64 `json:"gas"` + GasCost uint64 `json:"gasCost"` + Memory []byte `json:"memory"` MemorySize int `json:"memSize"` - Stack []*math.HexOrDecimal256 `json:"stack"` + Stack []uint256.Int `json:"stack"` + ReturnData []byte `json:"returnData"` Storage map[common.Hash]common.Hash `json:"-"` Depth int `json:"depth"` RefundCounter uint64 `json:"refund"` Err error `json:"-"` - OpName string `json:"opName"` - ErrorString string `json:"error"` } var enc StructLog enc.Pc = s.Pc enc.Op = s.Op - enc.Gas = math.HexOrDecimal64(s.Gas) - enc.GasCost = math.HexOrDecimal64(s.GasCost) + enc.Gas = s.Gas + enc.GasCost = s.GasCost enc.Memory = s.Memory enc.MemorySize = s.MemorySize - if s.Stack != nil { - enc.Stack = make([]*math.HexOrDecimal256, len(s.Stack)) - for k, v := range s.Stack { - enc.Stack[k] = (*math.HexOrDecimal256)(v) - } - } + enc.Stack = s.Stack + enc.ReturnData = s.ReturnData enc.Storage = s.Storage enc.Depth = s.Depth enc.RefundCounter = s.RefundCounter enc.Err = s.Err - enc.OpName = s.OpName() - enc.ErrorString = s.ErrorString() return json.Marshal(&enc) } @@ -57,11 +46,12 @@ func (s *StructLog) UnmarshalJSON(input []byte) error { type StructLog struct { Pc *uint64 `json:"pc"` Op *OpCode `json:"op"` - Gas *math.HexOrDecimal64 `json:"gas"` - GasCost *math.HexOrDecimal64 `json:"gasCost"` - Memory *hexutil.Bytes `json:"memory"` + Gas *uint64 `json:"gas"` + GasCost *uint64 `json:"gasCost"` + Memory []byte `json:"memory"` MemorySize *int `json:"memSize"` - Stack []*math.HexOrDecimal256 `json:"stack"` + Stack []uint256.Int `json:"stack"` + ReturnData []byte `json:"returnData"` Storage map[common.Hash]common.Hash `json:"-"` Depth *int `json:"depth"` RefundCounter *uint64 `json:"refund"` @@ -78,22 +68,22 @@ func (s *StructLog) UnmarshalJSON(input []byte) error { s.Op = *dec.Op } if dec.Gas != nil { - s.Gas = uint64(*dec.Gas) + s.Gas = *dec.Gas } if dec.GasCost != nil { - s.GasCost = uint64(*dec.GasCost) + s.GasCost = *dec.GasCost } if dec.Memory != nil { - s.Memory = *dec.Memory + s.Memory = dec.Memory } if dec.MemorySize != nil { s.MemorySize = *dec.MemorySize } if dec.Stack != nil { - s.Stack = make([]*big.Int, len(dec.Stack)) - for k, v := range dec.Stack { - s.Stack[k] = (*big.Int)(v) - } + s.Stack = dec.Stack + } + if dec.ReturnData != nil { + s.ReturnData = dec.ReturnData } if dec.Storage != nil { s.Storage = dec.Storage diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 39f0032abe84..b2bf296c1faf 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -821,6 +821,10 @@ func opSelfdestruct(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address()) interpreter.evm.StateDB.AddBalance(common.Address(beneficiary.Bytes20()), balance) interpreter.evm.StateDB.Suicide(scope.Contract.Address()) + if interpreter.cfg.Debug { + interpreter.cfg.Tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance) + interpreter.cfg.Tracer.CaptureExit([]byte{}, 0, nil) + } return nil, errStopToken } diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 9d16be1a664c..6e71411ac143 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -26,9 +26,9 @@ import ( // Config are the configuration options for the Interpreter type Config struct { - Debug bool // Enables debugging - Tracer Tracer // Opcode logger - EnablePreimageRecording bool // Enables recording of SHA3/keccak preimages + Debug bool // Enables debugging + Tracer EVMLogger // Opcode logger + EnablePreimageRecording bool // Enables recording of SHA3/keccak preimages JumpTable *JumpTable // EVM instruction table, automatically populated if unset @@ -181,9 +181,9 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( pc = uint64(0) // program counter cost uint64 // copies used by tracer - pcCopy uint64 // needed for the deferred Tracer - gasCopy uint64 // for Tracer to log gas remaining before execution - logged bool // deferred Tracer should ignore already logged steps + pcCopy uint64 // needed for the deferred EVMLogger + gasCopy uint64 // for EVMLogger to log gas remaining before execution + logged bool // deferred EVMLogger should ignore already logged steps res []byte // result of the opcode execution function ) contract.Input = input diff --git a/core/vm/logger.go b/core/vm/logger.go index a6a995eef942..e8294e3d674a 100644 --- a/core/vm/logger.go +++ b/core/vm/logger.go @@ -29,6 +29,7 @@ import ( "github.com/XinFinOrg/XDPoSChain/common/math" "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/params" + "github.com/holiman/uint256" ) // Storage represents a contract's storage. @@ -40,18 +41,17 @@ func (s Storage) Copy() Storage { for key, value := range s { cpy[key] = value } - return cpy } // LogConfig are the configuration options for structured logger the EVM type LogConfig struct { - DisableMemory bool // disable memory capture - DisableStack bool // disable stack capture - DisableStorage bool // disable storage capture - Debug bool // print output during capture end - Limit int // maximum length of output, but zero means unlimited - + EnableMemory bool // enable memory capture + DisableStack bool // disable stack capture + DisableStorage bool // disable storage capture + EnableReturnData bool // enable return data capture + Debug bool // print output during capture end + Limit int // maximum length of output, but zero means unlimited // Chain overrides, can be used to execute a trace using future fork rules Overrides *params.ChainConfig `json:"overrides,omitempty"` } @@ -67,7 +67,8 @@ type StructLog struct { GasCost uint64 `json:"gasCost"` Memory []byte `json:"memory"` MemorySize int `json:"memSize"` - Stack []*big.Int `json:"stack"` + Stack []uint256.Int `json:"stack"` + ReturnData []byte `json:"returnData"` Storage map[common.Hash]common.Hash `json:"-"` Depth int `json:"depth"` RefundCounter uint64 `json:"refund"` @@ -76,10 +77,10 @@ type StructLog struct { // overrides for gencodec type structLogMarshaling struct { - Stack []*math.HexOrDecimal256 Gas math.HexOrDecimal64 GasCost math.HexOrDecimal64 Memory hexutil.Bytes + ReturnData hexutil.Bytes OpName string `json:"opName"` // adds call to OpName() in MarshalJSON ErrorString string `json:"error"` // adds call to ErrorString() in MarshalJSON } @@ -97,19 +98,21 @@ func (s *StructLog) ErrorString() string { return "" } -// Tracer is used to collect execution traces from an EVM transaction +// EVMLogger is used to collect execution traces from an EVM transaction // execution. CaptureState is called for each step of the VM with the // current VM state. // Note that reference types are actual VM data structures; make copies // if you need to retain them beyond the current call. -type Tracer interface { +type EVMLogger interface { CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) + CaptureEnter(typ OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) + CaptureExit(output []byte, gasUsed uint64, err error) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) } -// StructLogger is an EVM state logger and implements Tracer. +// StructLogger is an EVM state logger and implements EVMLogger. // // StructLogger can capture state based on the given Log configuration and also keeps // a track record of modified storage which is used in reporting snapshots of the @@ -117,16 +120,16 @@ type Tracer interface { type StructLogger struct { cfg LogConfig - logs []StructLog - changedValues map[common.Address]Storage - output []byte - err error + storage map[common.Address]Storage + logs []StructLog + output []byte + err error } // NewStructLogger returns a new logger func NewStructLogger(cfg *LogConfig) *StructLogger { logger := &StructLogger{ - changedValues: make(map[common.Address]Storage), + storage: make(map[common.Address]Storage), } if cfg != nil { logger.cfg = *cfg @@ -134,7 +137,15 @@ func NewStructLogger(cfg *LogConfig) *StructLogger { return logger } -// CaptureStart implements the Tracer interface to initialize the tracing operation. +// Reset clears the data held by the logger. +func (l *StructLogger) Reset() { + l.storage = make(map[common.Address]Storage) + l.output = make([]byte, 0) + l.logs = l.logs[:0] + l.err = nil +} + +// CaptureStart implements the EVMLogger interface to initialize the tracing operation. func (l *StructLogger) CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { } @@ -149,48 +160,57 @@ func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost ui if l.cfg.Limit != 0 && l.cfg.Limit <= len(l.logs) { return } - - // initialise new changed values storage container for this contract - // if not present. - if l.changedValues[contract.Address()] == nil { - l.changedValues[contract.Address()] = make(Storage) - } - - // capture SSTORE opcodes and determine the changed value and store - // it in the local storage container. - if op == SSTORE && stack.len() >= 2 { - var ( - value = common.Hash(stack.data[stack.len()-2].Bytes32()) - address = common.Hash(stack.data[stack.len()-1].Bytes32()) - ) - l.changedValues[contract.Address()][address] = value - } // Copy a snapshot of the current memory state to a new buffer var mem []byte - if !l.cfg.DisableMemory { + if l.cfg.EnableMemory { mem = make([]byte, len(memory.Data())) copy(mem, memory.Data()) } // Copy a snapshot of the current stack state to a new buffer - var stck []*big.Int + var stck []uint256.Int if !l.cfg.DisableStack { - stck = make([]*big.Int, len(stack.Data())) + stck = make([]uint256.Int, len(stack.Data())) for i, item := range stack.Data() { - stck[i] = new(big.Int).Set(item.ToBig()) + stck[i] = item } } // Copy a snapshot of the current storage to a new container var storage Storage - if !l.cfg.DisableStorage { - storage = l.changedValues[contract.Address()].Copy() + if !l.cfg.DisableStorage && (op == SLOAD || op == SSTORE) { + // initialise new changed values storage container for this contract + // if not present. + if l.storage[contract.Address()] == nil { + l.storage[contract.Address()] = make(Storage) + } + // capture SLOAD opcodes and record the read entry in the local storage + if op == SLOAD && stack.len() >= 1 { + var ( + address = common.Hash(stack.data[stack.len()-1].Bytes32()) + value = env.StateDB.GetState(contract.Address(), address) + ) + l.storage[contract.Address()][address] = value + storage = l.storage[contract.Address()].Copy() + } else if op == SSTORE && stack.len() >= 2 { + // capture SSTORE opcodes and record the written entry in the local storage. + var ( + value = common.Hash(stack.data[stack.len()-2].Bytes32()) + address = common.Hash(stack.data[stack.len()-1].Bytes32()) + ) + l.storage[contract.Address()][address] = value + storage = l.storage[contract.Address()].Copy() + } + } + var rdata []byte + if l.cfg.EnableReturnData { + rdata = make([]byte, len(rData)) + copy(rdata, rData) } // create a new snapshot of the EVM. - log := StructLog{pc, op, gas, cost, mem, memory.Len(), stck, storage, depth, env.StateDB.GetRefund(), err} - + log := StructLog{pc, op, gas, cost, mem, memory.Len(), stck, rdata, storage, depth, env.StateDB.GetRefund(), err} l.logs = append(l.logs, log) } -// CaptureFault implements the Tracer interface to trace an execution fault +// CaptureFault implements the EVMLogger interface to trace an execution fault // while running an opcode. func (l *StructLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) { } @@ -207,6 +227,11 @@ func (l *StructLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration } } +func (l *StructLogger) CaptureEnter(typ OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { +} + +func (l *StructLogger) CaptureExit(output []byte, gasUsed uint64, err error) {} + // StructLogs returns the captured log entries. func (l *StructLogger) StructLogs() []StructLog { return l.logs } @@ -228,7 +253,7 @@ func WriteTrace(writer io.Writer, logs []StructLog) { if len(log.Stack) > 0 { fmt.Fprintln(writer, "Stack:") for i := len(log.Stack) - 1; i >= 0; i-- { - fmt.Fprintf(writer, "%08d %x\n", len(log.Stack)-i-1, math.PaddedBigBytes(log.Stack[i], 32)) + fmt.Fprintf(writer, "%08d %s\n", len(log.Stack)-i-1, log.Stack[i].Hex()) } } if len(log.Memory) > 0 { @@ -241,6 +266,10 @@ func WriteTrace(writer io.Writer, logs []StructLog) { fmt.Fprintf(writer, "%x: %x\n", h, item) } } + if len(log.ReturnData) > 0 { + fmt.Fprintln(writer, "ReturnData:") + fmt.Fprint(writer, hex.Dump(log.ReturnData)) + } fmt.Fprintln(writer) } } @@ -300,7 +329,7 @@ func (t *mdLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64 // format stack var a []string for _, elem := range stack.data { - a = append(a, fmt.Sprintf("%v", elem.String())) + a = append(a, elem.Hex()) } b := fmt.Sprintf("[%v]", strings.Join(a, ",")) fmt.Fprintf(t.out, "%10v |", b) @@ -320,3 +349,8 @@ func (t *mdLogger) CaptureEnd(output []byte, gasUsed uint64, tm time.Duration, e fmt.Fprintf(t.out, "\nOutput: `0x%x`\nConsumed gas: `%d`\nError: `%v`\n", output, gasUsed, err) } + +func (t *mdLogger) CaptureEnter(typ OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { +} + +func (t *mdLogger) CaptureExit(output []byte, gasUsed uint64, err error) {} diff --git a/core/vm/logger_json.go b/core/vm/logger_json.go index 64bb3a9fe9e1..b2778a1b511a 100644 --- a/core/vm/logger_json.go +++ b/core/vm/logger_json.go @@ -57,21 +57,18 @@ func (l *JSONLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint Gas: gas, GasCost: cost, MemorySize: memory.Len(), - Storage: nil, Depth: depth, RefundCounter: env.StateDB.GetRefund(), Err: err, } - if !l.cfg.DisableMemory { + if l.cfg.EnableMemory { log.Memory = memory.Data() } if !l.cfg.DisableStack { - //TODO(@holiman) improve this - logstack := make([]*big.Int, len(stack.Data())) - for i, item := range stack.Data() { - logstack[i] = item.ToBig() - } - log.Stack = logstack + log.Stack = stack.data + } + if l.cfg.EnableReturnData { + log.ReturnData = rData } l.encoder.Encode(log) } @@ -90,3 +87,8 @@ func (l *JSONLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, } l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, errMsg}) } + +func (l *JSONLogger) CaptureEnter(typ OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { +} + +func (l *JSONLogger) CaptureExit(output []byte, gasUsed uint64, err error) {} diff --git a/core/vm/logger_test.go b/core/vm/logger_test.go index ef5fb125100c..f668d068fb75 100644 --- a/core/vm/logger_test.go +++ b/core/vm/logger_test.go @@ -63,11 +63,11 @@ func TestStoreCapture(t *testing.T) { scope.Stack.push(new(uint256.Int)) var index common.Hash logger.CaptureState(env, 0, SSTORE, 0, 0, scope, nil, 0, nil) - if len(logger.changedValues[contract.Address()]) == 0 { - t.Fatalf("expected exactly 1 changed value on address %x, got %d", contract.Address(), len(logger.changedValues[contract.Address()])) + if len(logger.storage[contract.Address()]) == 0 { + t.Fatalf("expected exactly 1 changed value on address %x, got %d", contract.Address(), len(logger.storage[contract.Address()])) } exp := common.BigToHash(big.NewInt(1)) - if logger.changedValues[contract.Address()][index] != exp { - t.Errorf("expected %x, got %x", exp, logger.changedValues[contract.Address()][index]) + if logger.storage[contract.Address()][index] != exp { + t.Errorf("expected %x, got %x", exp, logger.storage[contract.Address()][index]) } } diff --git a/crypto/crypto.go b/crypto/crypto.go index 2872bb098bfe..f8387cb73317 100644 --- a/crypto/crypto.go +++ b/crypto/crypto.go @@ -142,11 +142,11 @@ func toECDSA(d []byte, strict bool) (*ecdsa.PrivateKey, error) { // The priv.D must < N if priv.D.Cmp(secp256k1N) >= 0 { - return nil, fmt.Errorf("invalid private key, >=N") + return nil, errors.New("invalid private key, >=N") } // The priv.D must not be zero or negative. if priv.D.Sign() <= 0 { - return nil, fmt.Errorf("invalid private key, zero or negative") + return nil, errors.New("invalid private key, zero or negative") } priv.PublicKey.X, priv.PublicKey.Y = priv.PublicKey.Curve.ScalarBaseMult(d) @@ -205,7 +205,7 @@ func LoadECDSA(file string) (*ecdsa.PrivateKey, error) { if err != nil { return nil, err } else if n != len(buf) { - return nil, fmt.Errorf("key file too short, want 64 hex characters") + return nil, errors.New("key file too short, want 64 hex characters") } if err := checkKeyFileEnd(r); err != nil { return nil, err diff --git a/crypto/ecies/ecies.go b/crypto/ecies/ecies.go index 1474181482b6..bb1c8d2ff4ac 100644 --- a/crypto/ecies/ecies.go +++ b/crypto/ecies/ecies.go @@ -35,6 +35,7 @@ import ( "crypto/elliptic" "crypto/hmac" "crypto/subtle" + "errors" "fmt" "hash" "io" @@ -42,12 +43,12 @@ import ( ) var ( - ErrImport = fmt.Errorf("ecies: failed to import key") - ErrInvalidCurve = fmt.Errorf("ecies: invalid elliptic curve") - ErrInvalidParams = fmt.Errorf("ecies: invalid ECIES parameters") - ErrInvalidPublicKey = fmt.Errorf("ecies: invalid public key") - ErrSharedKeyIsPointAtInfinity = fmt.Errorf("ecies: shared key is point at infinity") - ErrSharedKeyTooBig = fmt.Errorf("ecies: shared key params are too big") + ErrImport = errors.New("ecies: failed to import key") + ErrInvalidCurve = errors.New("ecies: invalid elliptic curve") + ErrInvalidParams = errors.New("ecies: invalid ECIES parameters") + ErrInvalidPublicKey = errors.New("ecies: invalid public key") + ErrSharedKeyIsPointAtInfinity = errors.New("ecies: shared key is point at infinity") + ErrSharedKeyTooBig = errors.New("ecies: shared key params are too big") ) // PublicKey is a representation of an elliptic curve public key. @@ -138,9 +139,9 @@ func (prv *PrivateKey) GenerateShared(pub *PublicKey, skLen, macLen int) (sk []b } var ( - ErrKeyDataTooLong = fmt.Errorf("ecies: can't supply requested key data") - ErrSharedTooLong = fmt.Errorf("ecies: shared secret is too long") - ErrInvalidMessage = fmt.Errorf("ecies: invalid message") + ErrKeyDataTooLong = errors.New("ecies: can't supply requested key data") + ErrSharedTooLong = errors.New("ecies: shared secret is too long") + ErrInvalidMessage = errors.New("ecies: invalid message") ) var ( diff --git a/crypto/ecies/ecies_test.go b/crypto/ecies/ecies_test.go index 15f5b392e568..0d3fb3bbaccc 100644 --- a/crypto/ecies/ecies_test.go +++ b/crypto/ecies/ecies_test.go @@ -35,6 +35,7 @@ import ( "crypto/rand" "crypto/sha256" "encoding/hex" + "errors" "fmt" "math/big" "testing" @@ -64,7 +65,7 @@ func TestKDF(t *testing.T) { } } -var ErrBadSharedKeys = fmt.Errorf("ecies: shared keys don't match") +var ErrBadSharedKeys = errors.New("ecies: shared keys don't match") // cmpParams compares a set of ECIES parameters. We assume, as per the // docs, that AES is the only supported symmetric encryption algorithm. diff --git a/crypto/ecies/params.go b/crypto/ecies/params.go index 969cc4a3bdba..bd5969f1a91d 100644 --- a/crypto/ecies/params.go +++ b/crypto/ecies/params.go @@ -39,7 +39,7 @@ import ( "crypto/elliptic" "crypto/sha256" "crypto/sha512" - "fmt" + "errors" "hash" ethcrypto "github.com/XinFinOrg/XDPoSChain/crypto" @@ -47,8 +47,8 @@ import ( var ( DefaultCurve = ethcrypto.S256() - ErrUnsupportedECDHAlgorithm = fmt.Errorf("ecies: unsupported ECDH algorithm") - ErrUnsupportedECIESParameters = fmt.Errorf("ecies: unsupported ECIES parameters") + ErrUnsupportedECDHAlgorithm = errors.New("ecies: unsupported ECDH algorithm") + ErrUnsupportedECIESParameters = errors.New("ecies: unsupported ECIES parameters") ) type ECIESParams struct { diff --git a/eth/api_backend.go b/eth/api_backend.go index c087ec2d3804..f9a585ba5e7f 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -20,7 +20,6 @@ import ( "context" "encoding/json" "errors" - "fmt" "math/big" "os" "path/filepath" @@ -40,6 +39,7 @@ import ( "github.com/XinFinOrg/XDPoSChain/contracts" "github.com/XinFinOrg/XDPoSChain/core" "github.com/XinFinOrg/XDPoSChain/core/bloombits" + "github.com/XinFinOrg/XDPoSChain/core/rawdb" "github.com/XinFinOrg/XDPoSChain/core/state" stateDatabase "github.com/XinFinOrg/XDPoSChain/core/state" "github.com/XinFinOrg/XDPoSChain/core/types" @@ -159,6 +159,17 @@ func (b *EthApiBackend) BlockByHash(ctx context.Context, hash common.Hash) (*typ return b.eth.blockchain.GetBlockByHash(hash), nil } +// GetBody returns body of a block. It does not resolve special block numbers. +func (b *EthApiBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) { + if number < 0 || hash == (common.Hash{}) { + return nil, errors.New("invalid arguments; expect hash and no special block numbers") + } + if body := b.eth.blockchain.GetBody(hash); body != nil { + return body, nil + } + return nil, errors.New("block body not found") +} + func (b *EthApiBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) { if blockNr, ok := blockNrOrHash.Number(); ok { return b.BlockByNumber(ctx, blockNr) @@ -180,6 +191,10 @@ func (b *EthApiBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash r return nil, errors.New("invalid arguments; neither block nor hash specified") } +func (b *EthApiBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { + return b.eth.miner.PendingBlockAndReceipts() +} + func (b *EthApiBackend) StateAndHeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*state.StateDB, *types.Header, error) { // Pending state is only known by the miner if blockNr == rpc.PendingBlockNumber { @@ -229,16 +244,8 @@ func (b *EthApiBackend) GetReceipts(ctx context.Context, blockHash common.Hash) return core.GetBlockReceipts(b.eth.chainDb, blockHash, core.GetBlockNumber(b.eth.chainDb, blockHash)), nil } -func (b *EthApiBackend) GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error) { - receipts := core.GetBlockReceipts(b.eth.chainDb, blockHash, core.GetBlockNumber(b.eth.chainDb, blockHash)) - if receipts == nil { - return nil, nil - } - logs := make([][]*types.Log, len(receipts)) - for i, receipt := range receipts { - logs[i] = receipt.Logs - } - return logs, nil +func (b *EthApiBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) { + return rawdb.ReadLogs(b.eth.chainDb, hash, number), nil } func (b *EthApiBackend) GetTd(blockHash common.Hash) *big.Int { @@ -259,6 +266,10 @@ func (b *EthApiBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEven return b.eth.BlockChain().SubscribeRemovedLogsEvent(ch) } +func (b *EthApiBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription { + return b.eth.miner.SubscribePendingLogs(ch) +} + func (b *EthApiBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { return b.eth.BlockChain().SubscribeChainEvent(ch) } @@ -356,6 +367,10 @@ func (b *EthApiBackend) AccountManager() *accounts.Manager { return b.eth.AccountManager() } +func (b *EthApiBackend) RPCTxFeeCap() float64 { + return b.eth.config.RPCTxFeeCap +} + func (b *EthApiBackend) BloomStatus() (uint64, uint64) { sections, _, _ := b.eth.bloomIndexer.Sections() return params.BloomBitsBlocks, sections @@ -439,7 +454,11 @@ func (b *EthApiBackend) GetVotersRewards(masternodeAddr common.Address) map[comm state, err := chain.StateAt(lastCheckpointBlock.Root()) if err != nil { - fmt.Println("ERROR Trying to getting state at", lastCheckpointNumber, " Error ", err) + log.Error("fail to get state in GetVotersRewards", "lastCheckpointNumber", lastCheckpointNumber, "err", err) + return nil + } + if state == nil { + log.Error("fail to get state in GetVotersRewards", "lastCheckpointNumber", lastCheckpointNumber) return nil } @@ -499,7 +518,11 @@ func (b *EthApiBackend) GetVotersCap(checkpoint *big.Int, masterAddr common.Addr state, err := chain.StateAt(checkpointBlock.Root()) if err != nil { - fmt.Println("ERROR Trying to getting state at", checkpoint, " Error ", err) + log.Error("fail to get state in GetVotersCap", "checkpoint", checkpoint, "err", err) + return nil + } + if state != nil { + log.Error("fail to get state in GetVotersCap", "checkpoint", checkpoint) return nil } @@ -529,9 +552,12 @@ func (b *EthApiBackend) GetEpochDuration() *big.Int { func (b *EthApiBackend) GetMasternodesCap(checkpoint uint64) map[common.Address]*big.Int { checkpointBlock := b.eth.blockchain.GetBlockByNumber(checkpoint) state, err := b.eth.blockchain.StateAt(checkpointBlock.Root()) - if err != nil { - fmt.Println("ERROR Trying to getting state at", checkpoint, " Error ", err) + log.Error("fail to get state in GetMasternodesCap", "checkpoint", checkpoint, "err", err) + return nil + } + if state == nil { + log.Error("fail to get state in GetMasternodesCap", "checkpoint", checkpoint) return nil } diff --git a/eth/api_tracer.go b/eth/api_tracer.go index 78788e41bb1f..d0f5e8f14b63 100644 --- a/eth/api_tracer.go +++ b/eth/api_tracer.go @@ -62,13 +62,6 @@ type TraceConfig struct { Reexec *uint64 } -// txTraceContext is the contextual infos about a transaction before it gets run. -type txTraceContext struct { - index int // Index of the transaction within the block - hash common.Hash // Hash of the transaction - block common.Hash // Hash of the block containing the transaction -} - // TraceCallConfig is the config for traceCall API. It holds one more // field to override the state for tracing. type TraceCallConfig struct { @@ -106,6 +99,32 @@ type txTraceTask struct { index int // Transaction offset in the block } +// blockByNumber is the wrapper of the chain access function offered by the backend. +// It will return an error if the block is not found. +func (api *PrivateDebugAPI) blockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { + block, err := api.eth.ApiBackend.BlockByNumber(ctx, number) + if err != nil { + return nil, err + } + if block == nil { + return nil, fmt.Errorf("block #%d not found", number) + } + return block, nil +} + +// blockByHash is the wrapper of the chain access function offered by the backend. +// It will return an error if the block is not found. +func (api *PrivateDebugAPI) blockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + block, err := api.eth.ApiBackend.BlockByHash(ctx, hash) + if err != nil { + return nil, err + } + if block == nil { + return nil, fmt.Errorf("block %s not found", hash.Hex()) + } + return block, nil +} + // TraceChain returns the structured logs created during the execution of EVM // between two blocks (excluding start) and returns them as a JSON object. func (api *PrivateDebugAPI) TraceChain(ctx context.Context, start, end rpc.BlockNumber, config *TraceConfig) (*rpc.Subscription, error) { @@ -223,10 +242,10 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl } } msg, _ := tx.AsMessage(signer, balacne, task.block.Number()) - txctx := &txTraceContext{ - index: i, - hash: tx.Hash(), - block: task.block.Hash(), + txctx := &tracers.Context{ + BlockHash: task.block.Hash(), + TxIndex: i, + TxHash: tx.Hash(), } vmctx := core.NewEVMContext(msg, task.block.Header(), api.eth.blockchain, nil) @@ -469,10 +488,10 @@ func (api *PrivateDebugAPI) traceBlock(ctx context.Context, block *types.Block, } } msg, _ := txs[task.index].AsMessage(signer, balacne, block.Number()) - txctx := &txTraceContext{ - index: task.index, - hash: txs[task.index].Hash(), - block: blockHash, + txctx := &tracers.Context{ + BlockHash: blockHash, + TxIndex: task.index, + TxHash: txs[task.index].Hash(), } vmctx := core.NewEVMContext(msg, block.Header(), api.eth.blockchain, nil) @@ -621,10 +640,10 @@ func (api *PrivateDebugAPI) TraceTransaction(ctx context.Context, hash common.Ha return nil, err } - txctx := &txTraceContext{ - index: int(index), - hash: hash, - block: blockHash, + txctx := &tracers.Context{ + BlockHash: blockHash, + TxIndex: int(index), + TxHash: hash, } return api.traceTx(ctx, msg, txctx, vmctx, statedb, config) } @@ -633,14 +652,14 @@ func (api *PrivateDebugAPI) TraceTransaction(ctx context.Context, hash common.Ha // created during the execution of EVM if the given transaction was added on // top of the provided block and returns them as a JSON object. // You can provide -2 as a block number to trace on top of the pending block. -func (api *PrivateDebugAPI) TraceCall(ctx context.Context, args ethapi.CallArgs, blockNrOrHash rpc.BlockNumberOrHash, config *TraceCallConfig) (interface{}, error) { +func (api *PrivateDebugAPI) TraceCall(ctx context.Context, args ethapi.TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, config *TraceCallConfig) (interface{}, error) { // Try to retrieve the specified block var ( err error block *types.Block ) if hash, ok := blockNrOrHash.Hash(); ok { - block, err = api.eth.ApiBackend.BlockByHash(ctx, hash) + block, err = api.blockByHash(ctx, hash) } else if number, ok := blockNrOrHash.Number(); ok { if number == rpc.PendingBlockNumber { // We don't have access to the miner here. For tracing 'future' transactions, @@ -650,7 +669,7 @@ func (api *PrivateDebugAPI) TraceCall(ctx context.Context, args ethapi.CallArgs, // of what the next actual block is likely to contain. return nil, errors.New("tracing on top of pending is not supported") } - block, err = api.eth.ApiBackend.BlockByNumber(ctx, number) + block, err = api.blockByNumber(ctx, number) } else { return nil, errors.New("invalid arguments; neither block nor hash specified") } @@ -679,20 +698,22 @@ func (api *PrivateDebugAPI) TraceCall(ctx context.Context, args ethapi.CallArgs, if config != nil { traceConfig = &config.TraceConfig } - return api.traceTx(ctx, msg, new(txTraceContext), vmctx, statedb, traceConfig) + return api.traceTx(ctx, msg, new(tracers.Context), vmctx, statedb, traceConfig) } // traceTx configures a new tracer according to the provided configuration, and // executes the given message in the provided environment. The return value will // be tracer dependent. -func (api *PrivateDebugAPI) traceTx(ctx context.Context, message core.Message, txctx *txTraceContext, vmctx vm.Context, statedb *state.StateDB, config *TraceConfig) (interface{}, error) { +func (api *PrivateDebugAPI) traceTx(ctx context.Context, message core.Message, txctx *tracers.Context, vmctx vm.Context, statedb *state.StateDB, config *TraceConfig) (interface{}, error) { // Assemble the structured logger or the JavaScript tracer var ( - tracer vm.Tracer + tracer vm.EVMLogger err error ) switch { - case config != nil && config.Tracer != nil: + case config == nil: + tracer = vm.NewStructLogger(nil) + case config.Tracer != nil: // Define a meaningful timeout of a single transaction trace timeout := defaultTraceTimeout if config.Timeout != nil { @@ -700,20 +721,19 @@ func (api *PrivateDebugAPI) traceTx(ctx context.Context, message core.Message, t return nil, err } } - // Constuct the JavaScript tracer to execute with - if tracer, err = tracers.New(*config.Tracer); err != nil { + if t, err := tracers.New(*config.Tracer, txctx); err != nil { return nil, err + } else { + deadlineCtx, cancel := context.WithTimeout(ctx, timeout) + go func() { + <-deadlineCtx.Done() + if errors.Is(deadlineCtx.Err(), context.DeadlineExceeded) { + t.Stop(errors.New("execution timeout")) + } + }() + defer cancel() + tracer = t } - // Handle timeouts and RPC cancellations - deadlineCtx, cancel := context.WithTimeout(ctx, timeout) - go func() { - <-deadlineCtx.Done() - tracer.(*tracers.Tracer).Stop(errors.New("execution timeout")) - }() - defer cancel() - - case config == nil: - tracer = vm.NewStructLogger(nil) default: tracer = vm.NewStructLogger(config.LogConfig) @@ -722,7 +742,7 @@ func (api *PrivateDebugAPI) traceTx(ctx context.Context, message core.Message, t vmenv := vm.NewEVM(vmctx, statedb, nil, api.config, vm.Config{Debug: true, Tracer: tracer}) // Call Prepare to clear out the statedb access list - statedb.Prepare(txctx.hash, txctx.block, txctx.index) + statedb.Prepare(txctx.TxHash, txctx.BlockHash, txctx.TxIndex) owner := common.Address{} ret, gas, failed, err, _ := core.ApplyMessage(vmenv, message, new(core.GasPool).AddGas(message.Gas()), owner) @@ -739,7 +759,7 @@ func (api *PrivateDebugAPI) traceTx(ctx context.Context, message core.Message, t StructLogs: ethapi.FormatLogs(tracer.StructLogs()), }, nil - case *tracers.Tracer: + case tracers.Tracer: return tracer.GetResult() default: @@ -765,7 +785,7 @@ func (api *PrivateDebugAPI) computeTxEnv(blockHash common.Hash, txIndex int, ree // Recompute transactions up to the target index. feeCapacity := state.GetTRC21FeeCapacityFromState(statedb) if common.TIPSigning.Cmp(block.Header().Number) == 0 { - statedb.DeleteAddress(common.HexToAddress(common.BlockSigners)) + statedb.DeleteAddress(common.BlockSignersBinary) } core.InitSignerInTransactions(api.config, block.Header(), block.Transactions()) balanceUpdated := map[common.Address]*big.Int{} diff --git a/eth/backend.go b/eth/backend.go index 91c60d40d9ee..45e1bd95ec1a 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -25,15 +25,11 @@ import ( "sync" "sync/atomic" + "github.com/XinFinOrg/XDPoSChain/XDCx" "github.com/XinFinOrg/XDPoSChain/XDCxlending" - - "github.com/XinFinOrg/XDPoSChain/common/hexutil" - "github.com/XinFinOrg/XDPoSChain/eth/filters" - "github.com/XinFinOrg/XDPoSChain/eth/hooks" - "github.com/XinFinOrg/XDPoSChain/rlp" - "github.com/XinFinOrg/XDPoSChain/accounts" "github.com/XinFinOrg/XDPoSChain/common" + "github.com/XinFinOrg/XDPoSChain/common/hexutil" "github.com/XinFinOrg/XDPoSChain/consensus" "github.com/XinFinOrg/XDPoSChain/consensus/XDPoS" "github.com/XinFinOrg/XDPoSChain/consensus/XDPoS/utils" @@ -41,12 +37,13 @@ import ( "github.com/XinFinOrg/XDPoSChain/contracts" "github.com/XinFinOrg/XDPoSChain/core" "github.com/XinFinOrg/XDPoSChain/core/bloombits" - - "github.com/XinFinOrg/XDPoSChain/XDCx" "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/core/vm" "github.com/XinFinOrg/XDPoSChain/eth/downloader" + "github.com/XinFinOrg/XDPoSChain/eth/ethconfig" + "github.com/XinFinOrg/XDPoSChain/eth/filters" "github.com/XinFinOrg/XDPoSChain/eth/gasprice" + "github.com/XinFinOrg/XDPoSChain/eth/hooks" "github.com/XinFinOrg/XDPoSChain/ethdb" "github.com/XinFinOrg/XDPoSChain/event" "github.com/XinFinOrg/XDPoSChain/internal/ethapi" @@ -55,6 +52,7 @@ import ( "github.com/XinFinOrg/XDPoSChain/node" "github.com/XinFinOrg/XDPoSChain/p2p" "github.com/XinFinOrg/XDPoSChain/params" + "github.com/XinFinOrg/XDPoSChain/rlp" "github.com/XinFinOrg/XDPoSChain/rpc" ) @@ -67,7 +65,7 @@ type LesServer interface { // Ethereum implements the Ethereum full node service. type Ethereum struct { - config *Config + config *ethconfig.Config chainConfig *params.ChainConfig // Channel for shutting down the service @@ -112,7 +110,7 @@ func (s *Ethereum) AddLesServer(ls LesServer) { // New creates a new Ethereum object (including the // initialisation of the common Ethereum object) -func New(ctx *node.ServiceContext, config *Config, XDCXServ *XDCx.XDCX, lendingServ *XDCxlending.Lending) (*Ethereum, error) { +func New(ctx *node.ServiceContext, config *ethconfig.Config, XDCXServ *XDCx.XDCX, lendingServ *XDCxlending.Lending) (*Ethereum, error) { if config.SyncMode == downloader.LightSync { return nil, errors.New("can't run eth.Ethereum in light sync mode, use les.LightEthereum") } @@ -191,11 +189,14 @@ func New(ctx *node.ServiceContext, config *Config, XDCXServ *XDCx.XDCX, lendingS eth.txPool = core.NewTxPool(config.TxPool, eth.chainConfig, eth.blockchain) eth.orderPool = core.NewOrderPool(eth.chainConfig, eth.blockchain) eth.lendingPool = core.NewLendingPool(eth.chainConfig, eth.blockchain) - if common.RollbackHash != common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000") { + if common.RollbackHash != (common.Hash{}) { curBlock := eth.blockchain.CurrentBlock() + if curBlock == nil { + log.Warn("not find current block when rollback") + } prevBlock := eth.blockchain.GetBlockByHash(common.RollbackHash) - if curBlock.NumberU64() > prevBlock.NumberU64() { + if curBlock != nil && prevBlock != nil && curBlock.NumberU64() > prevBlock.NumberU64() { for ; curBlock != nil && curBlock.NumberU64() != prevBlock.NumberU64(); curBlock = eth.blockchain.GetBlock(curBlock.ParentHash(), curBlock.NumberU64()-1) { eth.blockchain.Rollback([]common.Hash{curBlock.Hash()}) } @@ -207,6 +208,8 @@ func New(ctx *node.ServiceContext, config *Config, XDCXServ *XDCx.XDCX, lendingS log.Crit("Err Rollback", "err", err) return nil, err } + } else { + log.Error("skip SetHead because target block is nil when rollback") } } @@ -326,7 +329,7 @@ func makeExtraData(extra []byte) []byte { } // CreateDB creates the chain database. -func CreateDB(ctx *node.ServiceContext, config *Config, name string) (ethdb.Database, error) { +func CreateDB(ctx *node.ServiceContext, config *ethconfig.Config, name string) (ethdb.Database, error) { db, err := ctx.OpenDatabase(name, config.DatabaseCache, config.DatabaseHandles) if err != nil { return nil, err @@ -399,7 +402,7 @@ func (s *Ethereum) APIs() []rpc.API { }, { Namespace: "eth", Version: "1.0", - Service: filters.NewPublicFilterAPI(s.ApiBackend, false), + Service: filters.NewFilterAPI(filters.NewFilterSystem(s.ApiBackend, filters.Config{LogCacheSize: s.config.FilterLogCacheSize}), false), Public: true, }, { Namespace: "admin", @@ -447,7 +450,7 @@ func (s *Ethereum) Etherbase() (eb common.Address, err error) { return etherbase, nil } } - return common.Address{}, fmt.Errorf("etherbase must be explicitly specified") + return common.Address{}, errors.New("etherbase must be explicitly specified") } // set in js console via admin interface or wrapper from cli flags @@ -475,33 +478,11 @@ func (s *Ethereum) ValidateMasternode() (bool, error) { return false, nil } } else { - return false, fmt.Errorf("Only verify masternode permission in XDPoS protocol") + return false, errors.New("Only verify masternode permission in XDPoS protocol") } return true, nil } -// ValidateMasternodeTestNet checks if node's address is in set of masternodes in Testnet -func (s *Ethereum) ValidateMasternodeTestnet() (bool, error) { - eb, err := s.Etherbase() - if err != nil { - return false, err - } - if s.chainConfig.XDPoS == nil { - return false, fmt.Errorf("Only verify masternode permission in XDPoS protocol") - } - masternodes := []common.Address{ - common.HexToAddress("0x3Ea0A3555f9B1dE983572BfF6444aeb1899eC58C"), - common.HexToAddress("0x4F7900282F3d371d585ab1361205B0940aB1789C"), - common.HexToAddress("0x942a5885A8844Ee5587C8AC5e371Fc39FFE61896"), - } - for _, m := range masternodes { - if m == eb { - return true, nil - } - } - return false, nil -} - func (s *Ethereum) StartStaking(local bool) error { eb, err := s.Etherbase() if err != nil { diff --git a/eth/bft/bft_handler_test.go b/eth/bft/bft_handler_test.go index 5426fc5699ac..11d052ba3f72 100644 --- a/eth/bft/bft_handler_test.go +++ b/eth/bft/bft_handler_test.go @@ -1,7 +1,7 @@ package bft import ( - "fmt" + "errors" "math/big" "sync/atomic" "testing" @@ -102,7 +102,7 @@ func TestNotBoardcastInvalidVote(t *testing.T) { targetVotes := 0 tester.bfter.consensus.verifyVote = func(chain consensus.ChainReader, vote *types.Vote) (bool, error) { - return false, fmt.Errorf("This is invalid vote") + return false, errors.New("This is invalid vote") } tester.bfter.consensus.voteHandler = func(chain consensus.ChainReader, vote *types.Vote) error { diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 1df5ab80a5cf..bbf5889d0e9f 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -1347,7 +1347,7 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) { defer tester.terminate() for i, tt := range tests { - // Register a new peer and ensure it's presence + // Register a new peer and ensure its presence id := fmt.Sprintf("test %d", i) if err := tester.newPeer(id, protocol, []common.Hash{tester.genesis.Hash()}, nil, nil, nil); err != nil { t.Fatalf("test %d: failed to register new peer: %v", i, err) diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index b9f16e673f94..7f55ce733574 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -235,8 +235,7 @@ func (q *queue) ShouldThrottleReceipts() bool { } // resultSlots calculates the number of results slots available for requests -// whilst adhering to both the item and the memory limit too of the results -// cache. +// whilst adhering to both the item and the memory limits of the result cache. func (q *queue) resultSlots(pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}) int { // Calculate the maximum length capped by the memory limit limit := len(q.resultCache) @@ -349,7 +348,7 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { } // Results retrieves and permanently removes a batch of fetch results from -// the cache. the result slice will be empty if the queue has been closed. +// the cache. The result slice will be empty if the queue has been closed. func (q *queue) Results(block bool) []*fetchResult { q.lock.Lock() defer q.lock.Unlock() @@ -511,7 +510,6 @@ func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common index := int(header.Number.Int64() - int64(q.resultOffset)) if index >= len(q.resultCache) || index < 0 { log.Error("index allocation went beyond available resultCache space", "index", index, "len.resultCache", len(q.resultCache), "blockNum", header.Number.Int64(), "resultOffset", q.resultOffset) - common.Report("index allocation went beyond available resultCache space") return nil, false, errInvalidChain } if q.resultCache[index] == nil { @@ -566,26 +564,29 @@ func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common // CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue. func (q *queue) CancelHeaders(request *fetchRequest) { + q.lock.Lock() + defer q.lock.Unlock() q.cancel(request, q.headerTaskQueue, q.headerPendPool) } // CancelBodies aborts a body fetch request, returning all pending headers to the // task queue. func (q *queue) CancelBodies(request *fetchRequest) { + q.lock.Lock() + defer q.lock.Unlock() q.cancel(request, q.blockTaskQueue, q.blockPendPool) } // CancelReceipts aborts a body fetch request, returning all pending headers to // the task queue. func (q *queue) CancelReceipts(request *fetchRequest) { + q.lock.Lock() + defer q.lock.Unlock() q.cancel(request, q.receiptTaskQueue, q.receiptPendPool) } // Cancel aborts a fetch request, returning all pending hashes to the task queue. func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) { - q.lock.Lock() - defer q.lock.Unlock() - if request.From > 0 { taskQueue.Push(request.From, -int64(request.From)) } diff --git a/eth/config.go b/eth/ethconfig/config.go similarity index 67% rename from eth/config.go rename to eth/ethconfig/config.go index 73a95aa6756f..73cc4e7cddbe 100644 --- a/eth/config.go +++ b/eth/ethconfig/config.go @@ -14,7 +14,8 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package eth +// Package ethconfig contains the configuration of the ETH and LES protocols. +package ethconfig import ( "math/big" @@ -33,8 +34,24 @@ import ( "github.com/XinFinOrg/XDPoSChain/params" ) -// DefaultConfig contains default settings for use on the Ethereum main net. -var DefaultConfig = Config{ +// FullNodeGPO contains default gasprice oracle settings for full node. +var FullNodeGPO = gasprice.Config{ + Blocks: 20, + Percentile: 60, + MaxPrice: gasprice.DefaultMaxPrice, + IgnorePrice: gasprice.DefaultIgnorePrice, +} + +// LightClientGPO contains default gasprice oracle settings for light client. +var LightClientGPO = gasprice.Config{ + Blocks: 2, + Percentile: 60, + MaxPrice: gasprice.DefaultMaxPrice, + IgnorePrice: gasprice.DefaultIgnorePrice, +} + +// Defaults contains default settings for use on the Ethereum main net. +var Defaults = Config{ SyncMode: downloader.FullSync, Ethash: ethash.Config{ CacheDir: "ethash", @@ -43,19 +60,18 @@ var DefaultConfig = Config{ DatasetsInMem: 1, DatasetsOnDisk: 2, }, - NetworkId: 88, - LightPeers: 100, - DatabaseCache: 768, - TrieCache: 256, - TrieTimeout: 5 * time.Minute, - GasPrice: big.NewInt(0.25 * params.Shannon), - - TxPool: core.DefaultTxPoolConfig, - RPCGasCap: 25000000, - GPO: gasprice.Config{ - Blocks: 20, - Percentile: 60, - }, + NetworkId: 88, + LightPeers: 100, + DatabaseCache: 768, + TrieCache: 256, + TrieTimeout: 5 * time.Minute, + FilterLogCacheSize: 32, + GasPrice: big.NewInt(0.25 * params.Shannon), + + TxPool: core.DefaultTxPoolConfig, + RPCGasCap: 25000000, + GPO: FullNodeGPO, + RPCTxFeeCap: 1, // 1 ether } func init() { @@ -66,14 +82,15 @@ func init() { } } if runtime.GOOS == "windows" { - DefaultConfig.Ethash.DatasetDir = filepath.Join(home, "AppData", "Ethash") + Defaults.Ethash.DatasetDir = filepath.Join(home, "AppData", "Ethash") } else { - DefaultConfig.Ethash.DatasetDir = filepath.Join(home, ".ethash") + Defaults.Ethash.DatasetDir = filepath.Join(home, ".ethash") } } //go:generate gencodec -type Config -field-override configMarshaling -formats toml -out gen_config.go +// Config contains configuration options for of the ETH and LES protocols. type Config struct { // The genesis block, which is inserted if the database is empty. // If nil, the Ethereum main net block is used. @@ -95,6 +112,9 @@ type Config struct { TrieCache int TrieTimeout time.Duration + // This is the number of blocks for which logs will be cached in the filter system. + FilterLogCacheSize int + // Mining-related options Etherbase common.Address `toml:",omitempty"` MinerThreads int `toml:",omitempty"` @@ -118,6 +138,10 @@ type Config struct { // RPCGasCap is the global gas cap for eth-call variants. RPCGasCap uint64 + + // RPCTxFeeCap is the global transaction fee(price * gaslimit) cap for + // send-transction variants. The unit is ether. + RPCTxFeeCap float64 } type configMarshaling struct { diff --git a/eth/gen_config.go b/eth/ethconfig/gen_config.go similarity index 92% rename from eth/gen_config.go rename to eth/ethconfig/gen_config.go index 1959ee559d53..6b27542f1933 100644 --- a/eth/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -1,6 +1,6 @@ // Code generated by github.com/fjl/gencodec. DO NOT EDIT. -package eth +package ethconfig import ( "math/big" @@ -31,12 +31,14 @@ func (c Config) MarshalTOML() (interface{}, error) { MinerThreads int `toml:",omitempty"` ExtraData []byte `toml:",omitempty"` GasPrice *big.Int + FilterLogCacheSize int Ethash ethash.Config TxPool core.TxPoolConfig GPO gasprice.Config EnablePreimageRecording bool DocRoot string `toml:"-"` RPCGasCap uint64 + RPCTxFeeCap float64 } var enc Config enc.Genesis = c.Genesis @@ -54,12 +56,14 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.MinerThreads = c.MinerThreads enc.ExtraData = c.ExtraData enc.GasPrice = c.GasPrice + enc.FilterLogCacheSize = c.FilterLogCacheSize enc.Ethash = c.Ethash enc.TxPool = c.TxPool enc.GPO = c.GPO enc.EnablePreimageRecording = c.EnablePreimageRecording enc.DocRoot = c.DocRoot enc.RPCGasCap = c.RPCGasCap + enc.RPCTxFeeCap = c.RPCTxFeeCap return &enc, nil } @@ -81,12 +85,14 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { MinerThreads *int `toml:",omitempty"` ExtraData []byte `toml:",omitempty"` GasPrice *big.Int + FilterLogCacheSize *int Ethash *ethash.Config TxPool *core.TxPoolConfig GPO *gasprice.Config EnablePreimageRecording *bool DocRoot *string `toml:"-"` RPCGasCap *uint64 + RPCTxFeeCap *float64 } var dec Config if err := unmarshal(&dec); err != nil { @@ -137,6 +143,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.GasPrice != nil { c.GasPrice = dec.GasPrice } + if dec.FilterLogCacheSize != nil { + c.FilterLogCacheSize = *dec.FilterLogCacheSize + } if dec.Ethash != nil { c.Ethash = *dec.Ethash } @@ -155,5 +164,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.RPCGasCap != nil { c.RPCGasCap = *dec.RPCGasCap } + if dec.RPCTxFeeCap != nil { + c.RPCTxFeeCap = *dec.RPCTxFeeCap + } return nil } diff --git a/eth/filters/api.go b/eth/filters/api.go index 952598046d2c..7762ee199286 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -29,8 +29,8 @@ import ( "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/common/hexutil" "github.com/XinFinOrg/XDPoSChain/core/types" - "github.com/XinFinOrg/XDPoSChain/ethdb" - "github.com/XinFinOrg/XDPoSChain/event" + + // "github.com/XinFinOrg/XDPoSChain/ethdb" "github.com/XinFinOrg/XDPoSChain/rpc" ) @@ -41,10 +41,6 @@ var ( // The maximum number of topic criteria allowed, vm.LOG4 - vm.LOG0 const maxTopics = 4 -var ( - deadline = 5 * time.Minute // consider a filter inactive if it has not been polled for within deadline -) - // filter is a helper struct that holds meta information over the filter type // and associated subscription in the event system. type filter struct { @@ -56,49 +52,55 @@ type filter struct { s *Subscription // associated subscription in event system } -// PublicFilterAPI offers support to create and manage filters. This will allow external clients to retrieve various +// FilterAPI offers support to create and manage filters. This will allow external clients to retrieve various // information related to the Ethereum protocol such als blocks, transactions and logs. -type PublicFilterAPI struct { - backend Backend - mux *event.TypeMux - quit chan struct{} - chainDb ethdb.Database +type FilterAPI struct { + sys *FilterSystem events *EventSystem filtersMu sync.Mutex filters map[rpc.ID]*filter + timeout time.Duration } -// NewPublicFilterAPI returns a new PublicFilterAPI instance. -func NewPublicFilterAPI(backend Backend, lightMode bool) *PublicFilterAPI { - api := &PublicFilterAPI{ - backend: backend, - mux: backend.EventMux(), - chainDb: backend.ChainDb(), - events: NewEventSystem(backend.EventMux(), backend, lightMode), +// NewFilterAPI returns a new FilterAPI instance. +func NewFilterAPI(system *FilterSystem, lightMode bool) *FilterAPI { + api := &FilterAPI{ + sys: system, + events: NewEventSystem(system, lightMode), filters: make(map[rpc.ID]*filter), + timeout: system.cfg.Timeout, } - go api.timeoutLoop() + go api.timeoutLoop(system.cfg.Timeout) return api } // timeoutLoop runs every 5 minutes and deletes filters that have not been recently used. // Tt is started when the api is created. -func (api *PublicFilterAPI) timeoutLoop() { - ticker := time.NewTicker(5 * time.Minute) +func (api *FilterAPI) timeoutLoop(timeout time.Duration) { + var toUninstall []*Subscription + ticker := time.NewTicker(timeout) for { <-ticker.C api.filtersMu.Lock() for id, f := range api.filters { select { case <-f.deadline.C: - f.s.Unsubscribe() + toUninstall = append(toUninstall, f.s) delete(api.filters, id) default: continue } } api.filtersMu.Unlock() + + // Unsubscribes are processed outside the lock to avoid the following scenario: + // event loop attempts broadcasting events to still active filters while + // Unsubscribe is waiting for it to process the uninstall request. + for _, s := range toUninstall { + s.Unsubscribe() + } + toUninstall = nil } } @@ -109,14 +111,14 @@ func (api *PublicFilterAPI) timeoutLoop() { // `eth_getFilterChanges` polling method that is also used for log filters. // // https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newpendingtransactionfilter -func (api *PublicFilterAPI) NewPendingTransactionFilter() rpc.ID { +func (api *FilterAPI) NewPendingTransactionFilter() rpc.ID { var ( pendingTxs = make(chan []common.Hash) pendingTxSub = api.events.SubscribePendingTxs(pendingTxs) ) api.filtersMu.Lock() - api.filters[pendingTxSub.ID] = &filter{typ: PendingTransactionsSubscription, deadline: time.NewTimer(deadline), hashes: make([]common.Hash, 0), s: pendingTxSub} + api.filters[pendingTxSub.ID] = &filter{typ: PendingTransactionsSubscription, deadline: time.NewTimer(api.timeout), hashes: make([]common.Hash, 0), s: pendingTxSub} api.filtersMu.Unlock() go func() { @@ -142,7 +144,7 @@ func (api *PublicFilterAPI) NewPendingTransactionFilter() rpc.ID { // NewPendingTransactions creates a subscription that is triggered each time a transaction // enters the transaction pool and was signed from one of the transactions this nodes manages. -func (api *PublicFilterAPI) NewPendingTransactions(ctx context.Context) (*rpc.Subscription, error) { +func (api *FilterAPI) NewPendingTransactions(ctx context.Context) (*rpc.Subscription, error) { notifier, supported := rpc.NotifierFromContext(ctx) if !supported { return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported @@ -179,14 +181,14 @@ func (api *PublicFilterAPI) NewPendingTransactions(ctx context.Context) (*rpc.Su // It is part of the filter package since polling goes with eth_getFilterChanges. // // https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newblockfilter -func (api *PublicFilterAPI) NewBlockFilter() rpc.ID { +func (api *FilterAPI) NewBlockFilter() rpc.ID { var ( headers = make(chan *types.Header) headerSub = api.events.SubscribeNewHeads(headers) ) api.filtersMu.Lock() - api.filters[headerSub.ID] = &filter{typ: BlocksSubscription, deadline: time.NewTimer(deadline), hashes: make([]common.Hash, 0), s: headerSub} + api.filters[headerSub.ID] = &filter{typ: BlocksSubscription, deadline: time.NewTimer(api.timeout), hashes: make([]common.Hash, 0), s: headerSub} api.filtersMu.Unlock() go func() { @@ -211,7 +213,7 @@ func (api *PublicFilterAPI) NewBlockFilter() rpc.ID { } // NewHeads send a notification each time a new (header) block is appended to the chain. -func (api *PublicFilterAPI) NewHeads(ctx context.Context) (*rpc.Subscription, error) { +func (api *FilterAPI) NewHeads(ctx context.Context) (*rpc.Subscription, error) { notifier, supported := rpc.NotifierFromContext(ctx) if !supported { return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported @@ -241,7 +243,7 @@ func (api *PublicFilterAPI) NewHeads(ctx context.Context) (*rpc.Subscription, er } // Logs creates a subscription that fires for all new log that match the given filter criteria. -func (api *PublicFilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc.Subscription, error) { +func (api *FilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc.Subscription, error) { notifier, supported := rpc.NotifierFromContext(ctx) if !supported { return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported @@ -295,7 +297,7 @@ type FilterCriteria ethereum.FilterQuery // In case "fromBlock" > "toBlock" an error is returned. // // https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newfilter -func (api *PublicFilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) { +func (api *FilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) { logs := make(chan []*types.Log) logsSub, err := api.events.SubscribeLogs(ethereum.FilterQuery(crit), logs) if err != nil { @@ -303,7 +305,7 @@ func (api *PublicFilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) { } api.filtersMu.Lock() - api.filters[logsSub.ID] = &filter{typ: LogsSubscription, crit: crit, deadline: time.NewTimer(deadline), logs: make([]*types.Log, 0), s: logsSub} + api.filters[logsSub.ID] = &filter{typ: LogsSubscription, crit: crit, deadline: time.NewTimer(api.timeout), logs: make([]*types.Log, 0), s: logsSub} api.filtersMu.Unlock() go func() { @@ -330,7 +332,7 @@ func (api *PublicFilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) { // GetLogs returns logs matching the given argument that are stored within the state. // // https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getlogs -func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*types.Log, error) { +func (api *FilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*types.Log, error) { if len(crit.Topics) > maxTopics { return nil, errExceedMaxTopics } @@ -338,7 +340,7 @@ func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([ var filter *Filter if crit.BlockHash != nil { // Block filter requested, construct a single-shot filter - filter = NewBlockFilter(api.backend, *crit.BlockHash, crit.Addresses, crit.Topics) + filter = api.sys.NewBlockFilter(*crit.BlockHash, crit.Addresses, crit.Topics) } else { // Convert the RPC block numbers into internal representations begin := rpc.LatestBlockNumber.Int64() @@ -350,7 +352,7 @@ func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([ end = crit.ToBlock.Int64() } // Construct the range filter - filter = NewRangeFilter(api.backend, begin, end, crit.Addresses, crit.Topics) + filter = api.sys.NewRangeFilter(begin, end, crit.Addresses, crit.Topics) } // Run the filter and return all the logs logs, err := filter.Logs(ctx) @@ -363,7 +365,7 @@ func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([ // UninstallFilter removes the filter with the given filter id. // // https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_uninstallfilter -func (api *PublicFilterAPI) UninstallFilter(id rpc.ID) bool { +func (api *FilterAPI) UninstallFilter(id rpc.ID) bool { api.filtersMu.Lock() f, found := api.filters[id] if found { @@ -381,19 +383,19 @@ func (api *PublicFilterAPI) UninstallFilter(id rpc.ID) bool { // If the filter could not be found an empty array of logs is returned. // // https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getfilterlogs -func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*types.Log, error) { +func (api *FilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*types.Log, error) { api.filtersMu.Lock() f, found := api.filters[id] api.filtersMu.Unlock() if !found || f.typ != LogsSubscription { - return nil, fmt.Errorf("filter not found") + return nil, errors.New("filter not found") } var filter *Filter if f.crit.BlockHash != nil { // Block filter requested, construct a single-shot filter - filter = NewBlockFilter(api.backend, *f.crit.BlockHash, f.crit.Addresses, f.crit.Topics) + filter = api.sys.NewBlockFilter(*f.crit.BlockHash, f.crit.Addresses, f.crit.Topics) } else { // Convert the RPC block numbers into internal representations begin := rpc.LatestBlockNumber.Int64() @@ -405,7 +407,7 @@ func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*ty end = f.crit.ToBlock.Int64() } // Construct the range filter - filter = NewRangeFilter(api.backend, begin, end, f.crit.Addresses, f.crit.Topics) + filter = api.sys.NewRangeFilter(begin, end, f.crit.Addresses, f.crit.Topics) } // Run the filter and return all the logs logs, err := filter.Logs(ctx) @@ -422,7 +424,7 @@ func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*ty // (pending)Log filters return []Log. // // https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getfilterchanges -func (api *PublicFilterAPI) GetFilterChanges(id rpc.ID) (interface{}, error) { +func (api *FilterAPI) GetFilterChanges(id rpc.ID) (interface{}, error) { api.filtersMu.Lock() defer api.filtersMu.Unlock() @@ -432,21 +434,21 @@ func (api *PublicFilterAPI) GetFilterChanges(id rpc.ID) (interface{}, error) { // receive timer value and reset timer <-f.deadline.C } - f.deadline.Reset(deadline) + f.deadline.Reset(api.timeout) switch f.typ { case PendingTransactionsSubscription, BlocksSubscription: hashes := f.hashes f.hashes = nil return returnHashes(hashes), nil - case LogsSubscription: + case LogsSubscription, MinedAndPendingLogsSubscription: logs := f.logs f.logs = nil return returnLogs(logs), nil } } - return []interface{}{}, fmt.Errorf("filter not found") + return []interface{}{}, errors.New("filter not found") } // returnHashes is a helper that will return an empty hash array case the given hash array is nil, @@ -485,7 +487,7 @@ func (args *FilterCriteria) UnmarshalJSON(data []byte) error { if raw.BlockHash != nil { if raw.FromBlock != nil || raw.ToBlock != nil { // BlockHash is mutually exclusive with FromBlock/ToBlock criteria - return fmt.Errorf("cannot specify both BlockHash and FromBlock/ToBlock, choose one or the other") + return errors.New("cannot specify both BlockHash and FromBlock/ToBlock, choose one or the other") } args.BlockHash = raw.BlockHash } else { @@ -558,11 +560,11 @@ func (args *FilterCriteria) UnmarshalJSON(data []byte) error { } args.Topics[i] = append(args.Topics[i], parsed) } else { - return fmt.Errorf("invalid topic(s)") + return errors.New("invalid topic(s)") } } default: - return fmt.Errorf("invalid topic(s)") + return errors.New("invalid topic(s)") } } } diff --git a/eth/filters/bench_test.go b/eth/filters/bench_test.go index 98635b9de7b9..f465d67f7069 100644 --- a/eth/filters/bench_test.go +++ b/eth/filters/bench_test.go @@ -30,7 +30,6 @@ import ( "github.com/XinFinOrg/XDPoSChain/core/rawdb" "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/ethdb" - "github.com/XinFinOrg/XDPoSChain/event" "github.com/XinFinOrg/XDPoSChain/node" ) @@ -124,21 +123,25 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) { b.Log("Running filter benchmarks...") start = time.Now() - mux := new(event.TypeMux) - var backend *testBackend + + var ( + backend *testBackend + sys *FilterSystem + ) for i := 0; i < benchFilterCnt; i++ { if i%20 == 0 { db.Close() db, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "") - backend = &testBackend{mux, db, cnt, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)} + backend = &testBackend{db: db, sections: cnt} + sys = NewFilterSystem(backend, Config{}) } var addr common.Address addr[0] = byte(i) addr[1] = byte(i / 256) - filter := NewRangeFilter(backend, 0, int64(cnt*sectionSize-1), []common.Address{addr}, nil) + filter := sys.NewRangeFilter(0, int64(cnt*sectionSize-1), []common.Address{addr}, nil) if _, err := filter.Logs(context.Background()); err != nil { - b.Error("filter.Find error:", err) + b.Error("filter.Logs error:", err) } } d = time.Since(start) @@ -188,11 +191,11 @@ func BenchmarkNoBloomBits(b *testing.B) { clearBloomBits(db) + _, sys := newTestFilterSystem(b, db, Config{}) + b.Log("Running filter benchmarks...") start := time.Now() - mux := new(event.TypeMux) - backend := &testBackend{mux, db, 0, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)} - filter := NewRangeFilter(backend, 0, int64(headNum), []common.Address{{}}, nil) + filter := sys.NewRangeFilter(0, int64(headNum), []common.Address{{}}, nil) filter.Logs(context.Background()) d := time.Since(start) b.Log("Finished running filter benchmarks") diff --git a/eth/filters/filter.go b/eth/filters/filter.go index 4d47bafc0fc7..3defe854f86f 100644 --- a/eth/filters/filter.go +++ b/eth/filters/filter.go @@ -22,36 +22,15 @@ import ( "math/big" "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/core" "github.com/XinFinOrg/XDPoSChain/core/bloombits" "github.com/XinFinOrg/XDPoSChain/core/types" - "github.com/XinFinOrg/XDPoSChain/ethdb" - "github.com/XinFinOrg/XDPoSChain/event" "github.com/XinFinOrg/XDPoSChain/rpc" ) -type Backend interface { - ChainDb() ethdb.Database - EventMux() *event.TypeMux - HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) - HeaderByHash(ctx context.Context, blockHash common.Hash) (*types.Header, error) - GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) - GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error) - - SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription - SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription - SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription - SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription - - BloomStatus() (uint64, uint64) - ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) -} - // Filter can be used to retrieve and filter logs. type Filter struct { - backend Backend + sys *FilterSystem - db ethdb.Database addresses []common.Address topics [][]common.Hash @@ -63,7 +42,7 @@ type Filter struct { // NewRangeFilter creates a new filter which uses a bloom filter on blocks to // figure out whether a particular block is interesting or not. -func NewRangeFilter(backend Backend, begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter { +func (sys *FilterSystem) NewRangeFilter(begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter { // Flatten the address and topic filter clauses into a single bloombits filter // system. Since the bloombits are not positional, nil topics are permitted, // which get flattened into a nil byte slice. @@ -82,10 +61,10 @@ func NewRangeFilter(backend Backend, begin, end int64, addresses []common.Addres } filters = append(filters, filter) } - size, _ := backend.BloomStatus() + size, _ := sys.backend.BloomStatus() // Create a generic filter and convert it into a range filter - filter := newFilter(backend, addresses, topics) + filter := newFilter(sys, addresses, topics) filter.matcher = bloombits.NewMatcher(size, filters) filter.begin = begin @@ -96,21 +75,20 @@ func NewRangeFilter(backend Backend, begin, end int64, addresses []common.Addres // NewBlockFilter creates a new filter which directly inspects the contents of // a block to figure out whether it is interesting or not. -func NewBlockFilter(backend Backend, block common.Hash, addresses []common.Address, topics [][]common.Hash) *Filter { +func (sys *FilterSystem) NewBlockFilter(block common.Hash, addresses []common.Address, topics [][]common.Hash) *Filter { // Create a generic filter and convert it into a block filter - filter := newFilter(backend, addresses, topics) + filter := newFilter(sys, addresses, topics) filter.block = block return filter } // newFilter creates a generic filter that can either filter based on a block hash, // or based on range queries. The search criteria needs to be explicitly set. -func newFilter(backend Backend, addresses []common.Address, topics [][]common.Hash) *Filter { +func newFilter(sys *FilterSystem, addresses []common.Address, topics [][]common.Hash) *Filter { return &Filter{ - backend: backend, + sys: sys, addresses: addresses, topics: topics, - db: backend.ChainDb(), } } @@ -119,7 +97,7 @@ func newFilter(backend Backend, addresses []common.Address, topics [][]common.Ha func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { // If we're doing singleton block filtering, execute and return if f.block != (common.Hash{}) { - header, err := f.backend.HeaderByHash(ctx, f.block) + header, err := f.sys.backend.HeaderByHash(ctx, f.block) if err != nil { return nil, err } @@ -128,26 +106,35 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { } return f.blockLogs(ctx, header) } + // Short-cut if all we care about is pending logs + if f.begin == rpc.PendingBlockNumber.Int64() { + if f.end != rpc.PendingBlockNumber.Int64() { + return nil, errors.New("invalid block range") + } + return f.pendingLogs() + } // Figure out the limits of the filter range - header, _ := f.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber) + header, _ := f.sys.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber) if header == nil { return nil, nil } - head := header.Number.Uint64() - - if f.begin == -1 { + var ( + head = header.Number.Uint64() + end = uint64(f.end) + pending = f.end == rpc.PendingBlockNumber.Int64() + ) + if f.begin == rpc.LatestBlockNumber.Int64() { f.begin = int64(head) } - end := uint64(f.end) - if f.end == -1 { + if f.end == rpc.LatestBlockNumber.Int64() || f.end == rpc.PendingBlockNumber.Int64() { end = head } // Gather all indexed logs, and finish with non indexed ones var ( - logs []*types.Log - err error + logs []*types.Log + err error + size, sections = f.sys.backend.BloomStatus() ) - size, sections := f.backend.BloomStatus() if indexed := sections * size; indexed > uint64(f.begin) { if indexed > end { logs, err = f.indexedLogs(ctx, end) @@ -160,6 +147,13 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { } rest, err := f.unindexedLogs(ctx, end) logs = append(logs, rest...) + if pending { + pendingLogs, err := f.pendingLogs() + if err != nil { + return nil, err + } + logs = append(logs, pendingLogs...) + } return logs, err } @@ -175,7 +169,7 @@ func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, err } defer session.Close() - f.backend.ServiceFilter(ctx, session) + f.sys.backend.ServiceFilter(ctx, session) // Iterate over the matches until exhausted or context closed var logs []*types.Log @@ -194,11 +188,11 @@ func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, err f.begin = int64(number) + 1 // Retrieve the suggested block and pull any truly matching logs - header, err := f.backend.HeaderByNumber(ctx, rpc.BlockNumber(number)) + header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(number)) if header == nil || err != nil { return logs, err } - found, err := f.checkMatches(ctx, header) + found, err := f.blockLogs(ctx, header) if err != nil { return logs, err } @@ -216,7 +210,7 @@ func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, e var logs []*types.Log for ; f.begin <= int64(end); f.begin++ { - header, err := f.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin)) + header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin)) if header == nil || err != nil { return logs, err } @@ -230,45 +224,58 @@ func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, e } // blockLogs returns the logs matching the filter criteria within a single block. -func (f *Filter) blockLogs(ctx context.Context, header *types.Header) (logs []*types.Log, err error) { +func (f *Filter) blockLogs(ctx context.Context, header *types.Header) ([]*types.Log, error) { if bloomFilter(header.Bloom, f.addresses, f.topics) { - found, err := f.checkMatches(ctx, header) - if err != nil { - return logs, err - } - logs = append(logs, found...) + return f.checkMatches(ctx, header) } - return logs, nil + return nil, nil } // checkMatches checks if the receipts belonging to the given header contain any log events that // match the filter criteria. This function is called when the bloom filter signals a potential match. -func (f *Filter) checkMatches(ctx context.Context, header *types.Header) (logs []*types.Log, err error) { - // Get the logs of the block - logsList, err := f.backend.GetLogs(ctx, header.Hash()) +// skipFilter signals all logs of the given block are requested. +func (f *Filter) checkMatches(ctx context.Context, header *types.Header) ([]*types.Log, error) { + hash := header.Hash() + // Logs in cache are partially filled with context data + // such as tx index, block hash, etc. + // Notably tx hash is NOT filled in because it needs + // access to block body data. + cached, err := f.sys.cachedLogElem(ctx, hash, header.Number.Uint64()) if err != nil { return nil, err } - var unfiltered []*types.Log - for _, logs := range logsList { - unfiltered = append(unfiltered, logs...) + logs := filterLogs(cached.logs, nil, nil, f.addresses, f.topics) + if len(logs) == 0 { + return nil, nil } - logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics) - if len(logs) > 0 { - // We have matching logs, check if we need to resolve full logs via the light client - if logs[0].TxHash == (common.Hash{}) { - receipts, err := f.backend.GetReceipts(ctx, header.Hash()) - if err != nil { - return nil, err - } - unfiltered = unfiltered[:0] - for _, receipt := range receipts { - unfiltered = append(unfiltered, receipt.Logs...) - } - logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics) - } + // Most backends will deliver un-derived logs, but check nevertheless. + if len(logs) > 0 && logs[0].TxHash != (common.Hash{}) { return logs, nil } + + body, err := f.sys.cachedGetBody(ctx, cached, hash, header.Number.Uint64()) + if err != nil { + return nil, err + } + for i, log := range logs { + // Copy log not to modify cache elements + logcopy := *log + logcopy.TxHash = body.Transactions[logcopy.TxIndex].Hash() + logs[i] = &logcopy + } + return logs, nil +} + +// pendingLogs returns the logs matching the filter criteria within the pending block. +func (f *Filter) pendingLogs() ([]*types.Log, error) { + block, receipts := f.sys.backend.PendingBlockAndReceipts() + if bloomFilter(block.Bloom(), f.addresses, f.topics) { + var unfiltered []*types.Log + for _, r := range receipts { + unfiltered = append(unfiltered, r.Logs...) + } + return filterLogs(unfiltered, nil, nil, f.addresses, f.topics), nil + } return nil, nil } diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go index 2d91b771ef9a..ad592154efa9 100644 --- a/eth/filters/filter_system.go +++ b/eth/filters/filter_system.go @@ -23,17 +23,123 @@ import ( "errors" "fmt" "sync" + "sync/atomic" "time" ethereum "github.com/XinFinOrg/XDPoSChain" "github.com/XinFinOrg/XDPoSChain/common" + "github.com/XinFinOrg/XDPoSChain/common/lru" "github.com/XinFinOrg/XDPoSChain/core" + "github.com/XinFinOrg/XDPoSChain/core/bloombits" "github.com/XinFinOrg/XDPoSChain/core/types" + "github.com/XinFinOrg/XDPoSChain/ethdb" "github.com/XinFinOrg/XDPoSChain/event" "github.com/XinFinOrg/XDPoSChain/log" "github.com/XinFinOrg/XDPoSChain/rpc" ) +// Config represents the configuration of the filter system. +type Config struct { + LogCacheSize int // maximum number of cached blocks (default: 32) + Timeout time.Duration // how long filters stay active (default: 5min) +} + +func (cfg Config) withDefaults() Config { + if cfg.Timeout == 0 { + cfg.Timeout = 5 * time.Minute + } + if cfg.LogCacheSize == 0 { + cfg.LogCacheSize = 32 + } + return cfg +} + +type Backend interface { + ChainDb() ethdb.Database + HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) + HeaderByHash(ctx context.Context, blockHash common.Hash) (*types.Header, error) + GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) + GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) + GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) + PendingBlockAndReceipts() (*types.Block, types.Receipts) + + SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription + SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription + SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription + SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription + SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription + + BloomStatus() (uint64, uint64) + ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) +} + +// FilterSystem holds resources shared by all filters. +type FilterSystem struct { + backend Backend + logsCache *lru.Cache[common.Hash, *logCacheElem] + cfg *Config +} + +// NewFilterSystem creates a filter system. +func NewFilterSystem(backend Backend, config Config) *FilterSystem { + config = config.withDefaults() + return &FilterSystem{ + backend: backend, + logsCache: lru.NewCache[common.Hash, *logCacheElem](config.LogCacheSize), + cfg: &config, + } +} + +type logCacheElem struct { + logs []*types.Log + body atomic.Pointer[types.Body] +} + +// cachedLogElem loads block logs from the backend and caches the result. +func (sys *FilterSystem) cachedLogElem(ctx context.Context, blockHash common.Hash, number uint64) (*logCacheElem, error) { + cached, ok := sys.logsCache.Get(blockHash) + if ok { + return cached, nil + } + + logs, err := sys.backend.GetLogs(ctx, blockHash, number) + if err != nil { + return nil, err + } + if logs == nil { + return nil, fmt.Errorf("failed to get logs for block #%d (0x%s)", number, blockHash.TerminalString()) + } + // Database logs are un-derived. + // Fill in whatever we can (txHash is inaccessible at this point). + flattened := make([]*types.Log, 0) + var logIdx uint + for i, txLogs := range logs { + for _, log := range txLogs { + log.BlockHash = blockHash + log.BlockNumber = number + log.TxIndex = uint(i) + log.Index = logIdx + logIdx++ + flattened = append(flattened, log) + } + } + elem := &logCacheElem{logs: flattened} + sys.logsCache.Add(blockHash, elem) + return elem, nil +} + +func (sys *FilterSystem) cachedGetBody(ctx context.Context, elem *logCacheElem, hash common.Hash, number uint64) (*types.Body, error) { + if body := elem.body.Load(); body != nil { + return body, nil + } + body, err := sys.backend.GetBody(ctx, hash, rpc.BlockNumber(number)) + if err != nil { + return nil, err + } + elem.body.Store(body) + return body, nil +} + // Type determines the kind of filter and is used to put the filter in to // the correct bucket when added. type Type byte @@ -68,10 +174,6 @@ const ( chainEvChanSize = 10 ) -var ( - ErrInvalidSubscriptionID = errors.New("invalid id") -) - type subscription struct { id rpc.ID typ Type @@ -87,25 +189,26 @@ type subscription struct { // EventSystem creates subscriptions, processes events and broadcasts them to the // subscription which match the subscription criteria. type EventSystem struct { - mux *event.TypeMux backend Backend + sys *FilterSystem lightMode bool lastHead *types.Header // Subscriptions - txsSub event.Subscription // Subscription for new transaction event - logsSub event.Subscription // Subscription for new log event - rmLogsSub event.Subscription // Subscription for removed log event - chainSub event.Subscription // Subscription for new chain event - pendingLogSub *event.TypeMuxSubscription // Subscription for pending log event + txsSub event.Subscription // Subscription for new transaction event + logsSub event.Subscription // Subscription for new log event + rmLogsSub event.Subscription // Subscription for removed log event + pendingLogsSub event.Subscription // Subscription for pending log event + chainSub event.Subscription // Subscription for new chain event // Channels - install chan *subscription // install filter for event notification - uninstall chan *subscription // remove filter for event notification - txsCh chan core.NewTxsEvent // Channel to receive new transactions event - logsCh chan []*types.Log // Channel to receive new log event - rmLogsCh chan core.RemovedLogsEvent // Channel to receive removed log event - chainCh chan core.ChainEvent // Channel to receive new chain event + install chan *subscription // install filter for event notification + uninstall chan *subscription // remove filter for event notification + txsCh chan core.NewTxsEvent // Channel to receive new transactions event + logsCh chan []*types.Log // Channel to receive new log event + pendingLogsCh chan []*types.Log // Channel to receive new log event + rmLogsCh chan core.RemovedLogsEvent // Channel to receive removed log event + chainCh chan core.ChainEvent // Channel to receive new chain event } // NewEventSystem creates a new manager that listens for event on the given mux, @@ -114,17 +217,18 @@ type EventSystem struct { // // The returned manager has a loop that needs to be stopped with the Stop function // or by stopping the given mux. -func NewEventSystem(mux *event.TypeMux, backend Backend, lightMode bool) *EventSystem { +func NewEventSystem(sys *FilterSystem, lightMode bool) *EventSystem { m := &EventSystem{ - mux: mux, - backend: backend, - lightMode: lightMode, - install: make(chan *subscription), - uninstall: make(chan *subscription), - txsCh: make(chan core.NewTxsEvent, txChanSize), - logsCh: make(chan []*types.Log, logsChanSize), - rmLogsCh: make(chan core.RemovedLogsEvent, rmLogsChanSize), - chainCh: make(chan core.ChainEvent, chainEvChanSize), + sys: sys, + backend: sys.backend, + lightMode: lightMode, + install: make(chan *subscription), + uninstall: make(chan *subscription), + txsCh: make(chan core.NewTxsEvent, txChanSize), + logsCh: make(chan []*types.Log, logsChanSize), + rmLogsCh: make(chan core.RemovedLogsEvent, rmLogsChanSize), + pendingLogsCh: make(chan []*types.Log, logsChanSize), + chainCh: make(chan core.ChainEvent, chainEvChanSize), } // Subscribe events @@ -132,12 +236,10 @@ func NewEventSystem(mux *event.TypeMux, backend Backend, lightMode bool) *EventS m.logsSub = m.backend.SubscribeLogsEvent(m.logsCh) m.rmLogsSub = m.backend.SubscribeRemovedLogsEvent(m.rmLogsCh) m.chainSub = m.backend.SubscribeChainEvent(m.chainCh) - // TODO(rjl493456442): use feed to subscribe pending log event - m.pendingLogSub = m.mux.Subscribe(core.PendingLogsEvent{}) + m.pendingLogsSub = m.backend.SubscribePendingLogsEvent(m.pendingLogsCh) // Make sure none of the subscriptions are empty - if m.txsSub == nil || m.logsSub == nil || m.rmLogsSub == nil || m.chainSub == nil || - m.pendingLogSub.Closed() { + if m.txsSub == nil || m.logsSub == nil || m.rmLogsSub == nil || m.chainSub == nil || m.pendingLogsSub == nil { log.Crit("Subscribe for event system failed") } @@ -227,7 +329,7 @@ func (es *EventSystem) SubscribeLogs(crit ethereum.FilterQuery, logs chan []*typ if from >= 0 && to == rpc.LatestBlockNumber { return es.subscribeLogs(crit, logs), nil } - return nil, fmt.Errorf("invalid from and to block combination: from > to") + return nil, errors.New("invalid from and to block combination: from > to") } // subscribeMinedPendingLogs creates a subscription that returned mined and @@ -315,58 +417,67 @@ func (es *EventSystem) SubscribePendingTxs(hashes chan []common.Hash) *Subscript type filterIndex map[Type]map[rpc.ID]*subscription -// broadcast event to filters that match criteria. -func (es *EventSystem) broadcast(filters filterIndex, ev interface{}) { - if ev == nil { +func (es *EventSystem) handleLogs(filters filterIndex, ev []*types.Log) { + if len(ev) == 0 { return } + for _, f := range filters[LogsSubscription] { + matchedLogs := filterLogs(ev, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics) + if len(matchedLogs) > 0 { + f.logs <- matchedLogs + } + } +} - switch e := ev.(type) { - case []*types.Log: - if len(e) > 0 { - for _, f := range filters[LogsSubscription] { - if matchedLogs := filterLogs(e, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics); len(matchedLogs) > 0 { - f.logs <- matchedLogs - } - } +func (es *EventSystem) handlePendingLogs(filters filterIndex, ev []*types.Log) { + if len(ev) == 0 { + return + } + for _, f := range filters[PendingLogsSubscription] { + matchedLogs := filterLogs(ev, nil, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics) + if len(matchedLogs) > 0 { + f.logs <- matchedLogs } - case core.RemovedLogsEvent: - for _, f := range filters[LogsSubscription] { - if matchedLogs := filterLogs(e.Logs, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics); len(matchedLogs) > 0 { - f.logs <- matchedLogs - } + } +} + +func (es *EventSystem) handleRemovedLogs(filters filterIndex, ev core.RemovedLogsEvent) { + for _, f := range filters[LogsSubscription] { + matchedLogs := filterLogs(ev.Logs, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics) + if len(matchedLogs) > 0 { + f.logs <- matchedLogs } - case *event.TypeMuxEvent: - if muxe, ok := e.Data.(core.PendingLogsEvent); ok { - for _, f := range filters[PendingLogsSubscription] { - if e.Time.After(f.created) { - if matchedLogs := filterLogs(muxe.Logs, nil, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics); len(matchedLogs) > 0 { - f.logs <- matchedLogs - } + } +} + +func (es *EventSystem) handleTxsEvent(filters filterIndex, ev core.NewTxsEvent) { + hashes := make([]common.Hash, 0, len(ev.Txs)) + for _, tx := range ev.Txs { + hashes = append(hashes, tx.Hash()) + } + for _, f := range filters[PendingTransactionsSubscription] { + f.hashes <- hashes + } +} + +func (es *EventSystem) handleChainEvent(filters filterIndex, ev core.ChainEvent) { + for _, f := range filters[BlocksSubscription] { + f.headers <- ev.Block.Header() + } + if es.lightMode && len(filters[LogsSubscription]) > 0 { + es.lightFilterNewHead(ev.Block.Header(), func(header *types.Header, remove bool) { + for _, f := range filters[LogsSubscription] { + if f.logsCrit.FromBlock != nil && header.Number.Cmp(f.logsCrit.FromBlock) < 0 { + continue } - } - } - case core.NewTxsEvent: - hashes := make([]common.Hash, 0, len(e.Txs)) - for _, tx := range e.Txs { - hashes = append(hashes, tx.Hash()) - } - for _, f := range filters[PendingTransactionsSubscription] { - f.hashes <- hashes - } - case core.ChainEvent: - for _, f := range filters[BlocksSubscription] { - f.headers <- e.Block.Header() - } - if es.lightMode && len(filters[LogsSubscription]) > 0 { - es.lightFilterNewHead(e.Block.Header(), func(header *types.Header, remove bool) { - for _, f := range filters[LogsSubscription] { - if matchedLogs := es.lightFilterLogs(header, f.logsCrit.Addresses, f.logsCrit.Topics, remove); len(matchedLogs) > 0 { - f.logs <- matchedLogs - } + if f.logsCrit.ToBlock != nil && header.Number.Cmp(f.logsCrit.ToBlock) > 0 { + continue } - }) - } + if matchedLogs := es.lightFilterLogs(header, f.logsCrit.Addresses, f.logsCrit.Topics, remove); len(matchedLogs) > 0 { + f.logs <- matchedLogs + } + } + }) } } @@ -405,52 +516,49 @@ func (es *EventSystem) lightFilterNewHead(newHeader *types.Header, callBack func // filter logs of a single header in light client mode func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.Address, topics [][]common.Hash, remove bool) []*types.Log { - if bloomFilter(header.Bloom, addresses, topics) { - // Get the logs of the block - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - logsList, err := es.backend.GetLogs(ctx, header.Hash()) - if err != nil { - return nil - } - var unfiltered []*types.Log - for _, logs := range logsList { - for _, log := range logs { - logcopy := *log - logcopy.Removed = remove - unfiltered = append(unfiltered, &logcopy) - } - } - logs := filterLogs(unfiltered, nil, nil, addresses, topics) - if len(logs) > 0 && logs[0].TxHash == (common.Hash{}) { - // We have matching but non-derived logs - receipts, err := es.backend.GetReceipts(ctx, header.Hash()) - if err != nil { - return nil - } - unfiltered = unfiltered[:0] - for _, receipt := range receipts { - for _, log := range receipt.Logs { - logcopy := *log - logcopy.Removed = remove - unfiltered = append(unfiltered, &logcopy) - } - } - logs = filterLogs(unfiltered, nil, nil, addresses, topics) - } + if !bloomFilter(header.Bloom, addresses, topics) { + return nil + } + // Get the logs of the block + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + cached, err := es.sys.cachedLogElem(ctx, header.Hash(), header.Number.Uint64()) + if err != nil { + return nil + } + unfiltered := append([]*types.Log{}, cached.logs...) + for i, log := range unfiltered { + // Don't modify in-cache elements + logcopy := *log + logcopy.Removed = remove + // Swap copy in-place + unfiltered[i] = &logcopy + } + logs := filterLogs(unfiltered, nil, nil, addresses, topics) + // Txhash is already resolved + if len(logs) > 0 && logs[0].TxHash != (common.Hash{}) { return logs } - return nil + // Resolve txhash + body, err := es.sys.cachedGetBody(ctx, cached, header.Hash(), header.Number.Uint64()) + if err != nil { + return nil + } + for _, log := range logs { + // logs are already copied, safe to modify + log.TxHash = body.Transactions[log.TxIndex].Hash() + } + return logs } // eventLoop (un)installs filters and processes mux events. func (es *EventSystem) eventLoop() { // Ensure all subscriptions get cleaned up defer func() { - es.pendingLogSub.Unsubscribe() es.txsSub.Unsubscribe() es.logsSub.Unsubscribe() es.rmLogsSub.Unsubscribe() + es.pendingLogsSub.Unsubscribe() es.chainSub.Unsubscribe() }() @@ -461,20 +569,16 @@ func (es *EventSystem) eventLoop() { for { select { - // Handle subscribed events case ev := <-es.txsCh: - es.broadcast(index, ev) + es.handleTxsEvent(index, ev) case ev := <-es.logsCh: - es.broadcast(index, ev) + es.handleLogs(index, ev) case ev := <-es.rmLogsCh: - es.broadcast(index, ev) + es.handleRemovedLogs(index, ev) + case ev := <-es.pendingLogsCh: + es.handlePendingLogs(index, ev) case ev := <-es.chainCh: - es.broadcast(index, ev) - case ev, active := <-es.pendingLogSub.Chan(): - if !active { // system stopped - return - } - es.broadcast(index, ev) + es.handleChainEvent(index, ev) case f := <-es.install: if f.typ == MinedAndPendingLogsSubscription { diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go index fb4f7e7b8bc2..2610376d3bf3 100644 --- a/eth/filters/filter_system_test.go +++ b/eth/filters/filter_system_test.go @@ -18,10 +18,12 @@ package filters import ( "context" + "errors" "fmt" "math/big" "math/rand" "reflect" + "runtime" "testing" "time" @@ -32,6 +34,7 @@ import ( "github.com/XinFinOrg/XDPoSChain/core/bloombits" "github.com/XinFinOrg/XDPoSChain/core/rawdb" "github.com/XinFinOrg/XDPoSChain/core/types" + "github.com/XinFinOrg/XDPoSChain/crypto" "github.com/XinFinOrg/XDPoSChain/ethdb" "github.com/XinFinOrg/XDPoSChain/event" "github.com/XinFinOrg/XDPoSChain/params" @@ -39,23 +42,20 @@ import ( ) type testBackend struct { - mux *event.TypeMux - db ethdb.Database - sections uint64 - txFeed *event.Feed - rmLogsFeed *event.Feed - logsFeed *event.Feed - chainFeed *event.Feed + mux *event.TypeMux + db ethdb.Database + sections uint64 + txFeed event.Feed + logsFeed event.Feed + rmLogsFeed event.Feed + pendingLogsFeed event.Feed + chainFeed event.Feed } func (b *testBackend) ChainDb() ethdb.Database { return b.db } -func (b *testBackend) EventMux() *event.TypeMux { - return b.mux -} - func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) { var hash common.Hash var num uint64 @@ -74,22 +74,27 @@ func (b *testBackend) HeaderByHash(ctx context.Context, blockHash common.Hash) ( return core.GetHeader(b.db, blockHash, num), nil } +func (b *testBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) { + if body := rawdb.ReadBody(b.db, hash, uint64(number)); body != nil { + return body, nil + } + return nil, errors.New("block body not found") +} + func (b *testBackend) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) { number := core.GetBlockNumber(b.db, blockHash) return core.GetBlockReceipts(b.db, blockHash, number), nil } -func (b *testBackend) GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error) { - number := core.GetBlockNumber(b.db, blockHash) - receipts := core.GetBlockReceipts(b.db, blockHash, number) - - logs := make([][]*types.Log, len(receipts)) - for i, receipt := range receipts { - logs[i] = receipt.Logs - } +func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) { + logs := rawdb.ReadLogs(b.db, hash, number) return logs, nil } +func (b *testBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { + return nil, nil +} + func (b *testBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { return b.txFeed.Subscribe(ch) } @@ -102,6 +107,10 @@ func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscript return b.logsFeed.Subscribe(ch) } +func (b *testBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription { + return b.pendingLogsFeed.Subscribe(ch) +} + func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { return b.chainFeed.Subscribe(ch) } @@ -137,6 +146,12 @@ func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.Matc }() } +func newTestFilterSystem(t testing.TB, db ethdb.Database, cfg Config) (*testBackend, *FilterSystem) { + backend := &testBackend{db: db} + sys := NewFilterSystem(backend, cfg) + return backend, sys +} + // TestBlockSubscription tests if a block subscription returns block hashes for posted chain events. // It creates multiple subscriptions: // - one at the start and should receive all posted chain events and a second (blockHashes) @@ -146,17 +161,12 @@ func TestBlockSubscription(t *testing.T) { t.Parallel() var ( - mux = new(event.TypeMux) - db = rawdb.NewMemoryDatabase() - txFeed = new(event.Feed) - rmLogsFeed = new(event.Feed) - logsFeed = new(event.Feed) - chainFeed = new(event.Feed) - backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} - api = NewPublicFilterAPI(backend, false) - genesis = new(core.Genesis).MustCommit(db) - chain, _ = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {}) - chainEvents = []core.ChainEvent{} + db = rawdb.NewMemoryDatabase() + backend, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, false) + genesis = new(core.Genesis).MustCommit(db) + chain, _ = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {}) + chainEvents = []core.ChainEvent{} ) for _, blk := range chain { @@ -191,7 +201,7 @@ func TestBlockSubscription(t *testing.T) { time.Sleep(1 * time.Second) for _, e := range chainEvents { - chainFeed.Send(e) + backend.chainFeed.Send(e) } <-sub0.Err() @@ -203,14 +213,9 @@ func TestPendingTxFilter(t *testing.T) { t.Parallel() var ( - mux = new(event.TypeMux) - db = rawdb.NewMemoryDatabase() - txFeed = new(event.Feed) - rmLogsFeed = new(event.Feed) - logsFeed = new(event.Feed) - chainFeed = new(event.Feed) - backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} - api = NewPublicFilterAPI(backend, false) + db = rawdb.NewMemoryDatabase() + backend, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, false) transactions = []*types.Transaction{ types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), @@ -226,7 +231,7 @@ func TestPendingTxFilter(t *testing.T) { fid0 := api.NewPendingTransactionFilter() time.Sleep(1 * time.Second) - txFeed.Send(core.NewTxsEvent{Txs: transactions}) + backend.txFeed.Send(core.NewTxsEvent{Txs: transactions}) timeout := time.Now().Add(1 * time.Second) for { @@ -263,14 +268,9 @@ func TestPendingTxFilter(t *testing.T) { // If not it must return an error. func TestLogFilterCreation(t *testing.T) { var ( - mux = new(event.TypeMux) - db = rawdb.NewMemoryDatabase() - txFeed = new(event.Feed) - rmLogsFeed = new(event.Feed) - logsFeed = new(event.Feed) - chainFeed = new(event.Feed) - backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} - api = NewPublicFilterAPI(backend, false) + db = rawdb.NewMemoryDatabase() + _, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, false) testCases = []struct { crit FilterCriteria @@ -312,14 +312,9 @@ func TestInvalidLogFilterCreation(t *testing.T) { t.Parallel() var ( - mux = new(event.TypeMux) - db = rawdb.NewMemoryDatabase() - txFeed = new(event.Feed) - rmLogsFeed = new(event.Feed) - logsFeed = new(event.Feed) - chainFeed = new(event.Feed) - backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} - api = NewPublicFilterAPI(backend, false) + db = rawdb.NewMemoryDatabase() + _, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, false) ) // different situations where log filter creation should fail. @@ -339,15 +334,10 @@ func TestInvalidLogFilterCreation(t *testing.T) { func TestInvalidGetLogsRequest(t *testing.T) { var ( - mux = new(event.TypeMux) - db = rawdb.NewMemoryDatabase() - txFeed = new(event.Feed) - rmLogsFeed = new(event.Feed) - logsFeed = new(event.Feed) - chainFeed = new(event.Feed) - backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} - api = NewPublicFilterAPI(backend, false) - blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") + db = rawdb.NewMemoryDatabase() + _, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, false) + blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") ) // Reason: Cannot specify both BlockHash and FromBlock/ToBlock) @@ -369,14 +359,9 @@ func TestLogFilter(t *testing.T) { t.Parallel() var ( - mux = new(event.TypeMux) - db = rawdb.NewMemoryDatabase() - txFeed = new(event.Feed) - rmLogsFeed = new(event.Feed) - logsFeed = new(event.Feed) - chainFeed = new(event.Feed) - backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} - api = NewPublicFilterAPI(backend, false) + db = rawdb.NewMemoryDatabase() + backend, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, false) firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") @@ -386,7 +371,7 @@ func TestLogFilter(t *testing.T) { secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999") - // posted twice, once as vm.Logs and once as core.PendingLogsEvent + // posted twice, once as regular logs and once as pending logs. allLogs = []*types.Log{ {Address: firstAddr}, {Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}, @@ -439,11 +424,11 @@ func TestLogFilter(t *testing.T) { // raise events time.Sleep(1 * time.Second) - if nsend := logsFeed.Send(allLogs); nsend == 0 { - t.Fatal("Shoud have at least one subscription") + if nsend := backend.logsFeed.Send(allLogs); nsend == 0 { + t.Fatal("Logs event not delivered") } - if err := mux.Post(core.PendingLogsEvent{Logs: allLogs}); err != nil { - t.Fatal(err) + if nsend := backend.pendingLogsFeed.Send(allLogs); nsend == 0 { + t.Fatal("Pending logs event not delivered") } for i, tt := range testCases { @@ -488,14 +473,9 @@ func TestPendingLogsSubscription(t *testing.T) { t.Parallel() var ( - mux = new(event.TypeMux) - db = rawdb.NewMemoryDatabase() - txFeed = new(event.Feed) - rmLogsFeed = new(event.Feed) - logsFeed = new(event.Feed) - chainFeed = new(event.Feed) - backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} - api = NewPublicFilterAPI(backend, false) + db = rawdb.NewMemoryDatabase() + backend, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, false) firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") @@ -507,26 +487,18 @@ func TestPendingLogsSubscription(t *testing.T) { fourthTopic = common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444") notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999") - allLogs = []core.PendingLogsEvent{ - {Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}}}, - {Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}}}, - {Logs: []*types.Log{{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}}}, - {Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}}}, - {Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}}}, - {Logs: []*types.Log{ + allLogs = [][]*types.Log{ + {{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}}, + {{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}}, + {{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}}, + {{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}}, + {{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}}, + { {Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5}, {Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5}, {Address: thirdAddress, Topics: []common.Hash{fourthTopic}, BlockNumber: 5}, {Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5}, - }}, - } - - convertLogs = func(pl []core.PendingLogsEvent) []*types.Log { - var logs []*types.Log - for _, l := range pl { - logs = append(logs, l.Logs...) - } - return logs + }, } testCases = []struct { @@ -536,21 +508,52 @@ func TestPendingLogsSubscription(t *testing.T) { sub *Subscription }{ // match all - {ethereum.FilterQuery{}, convertLogs(allLogs), nil, nil}, + { + ethereum.FilterQuery{}, flattenLogs(allLogs), + nil, nil, + }, // match none due to no matching addresses - {ethereum.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, nil, nil}, + { + ethereum.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, + nil, + nil, nil, + }, // match logs based on addresses, ignore topics - {ethereum.FilterQuery{Addresses: []common.Address{firstAddr}}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil}, + { + ethereum.FilterQuery{Addresses: []common.Address{firstAddr}}, + append(flattenLogs(allLogs[:2]), allLogs[5][3]), + nil, nil, + }, // match none due to no matching topics (match with address) - {ethereum.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, nil, nil}, + { + ethereum.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, + nil, nil, nil, + }, // match logs based on addresses and topics - {ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[3:5]), allLogs[5].Logs[0]), nil, nil}, + { + ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, + append(flattenLogs(allLogs[3:5]), allLogs[5][0]), + nil, nil, + }, // match logs based on multiple addresses and "or" topics - {ethereum.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[2:5]), allLogs[5].Logs[0]), nil, nil}, + { + ethereum.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, + append(flattenLogs(allLogs[2:5]), allLogs[5][0]), + nil, + nil, + }, // block numbers are ignored for filters created with New***Filter, these return all logs that match the given criteria when the state changes - {ethereum.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(2), ToBlock: big.NewInt(3)}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil}, + { + ethereum.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(2), ToBlock: big.NewInt(3)}, + append(flattenLogs(allLogs[:2]), allLogs[5][3]), + nil, nil, + }, // multiple pending logs, should match only 2 topics from the logs in block 5 - {ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}}, []*types.Log{allLogs[5].Logs[0], allLogs[5].Logs[2]}, nil, nil}, + { + ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}}, + []*types.Log{allLogs[5][0], allLogs[5][2]}, + nil, nil, + }, } ) @@ -593,10 +596,219 @@ func TestPendingLogsSubscription(t *testing.T) { // raise events time.Sleep(1 * time.Second) - // allLogs are type of core.PendingLogsEvent - for _, l := range allLogs { - if err := mux.Post(l); err != nil { + for _, ev := range allLogs { + backend.pendingLogsFeed.Send(ev) + } +} + +func TestLightFilterLogs(t *testing.T) { + t.Parallel() + + var ( + db = rawdb.NewMemoryDatabase() + backend, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, true) + signer = types.HomesteadSigner{} + + firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") + secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") + thirdAddress = common.HexToAddress("0x3333333333333333333333333333333333333333") + notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999") + firstTopic = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") + secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") + + // posted twice, once as regular logs and once as pending logs. + allLogs = []*types.Log{ + // Block 1 + {Address: firstAddr, Topics: []common.Hash{}, Data: []byte{}, BlockNumber: 2, Index: 0}, + // Block 2 + {Address: firstAddr, Topics: []common.Hash{firstTopic}, Data: []byte{}, BlockNumber: 3, Index: 0}, + {Address: secondAddr, Topics: []common.Hash{firstTopic}, Data: []byte{}, BlockNumber: 3, Index: 1}, + {Address: thirdAddress, Topics: []common.Hash{secondTopic}, Data: []byte{}, BlockNumber: 3, Index: 2}, + // Block 3 + {Address: thirdAddress, Topics: []common.Hash{secondTopic}, Data: []byte{}, BlockNumber: 4, Index: 0}, + } + + testCases = []struct { + crit FilterCriteria + expected []*types.Log + id rpc.ID + }{ + // match all + 0: {FilterCriteria{}, allLogs, ""}, + // match none due to no matching addresses + 1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""}, + // match logs based on addresses, ignore topics + 2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""}, + // match logs based on addresses and topics + 3: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""}, + // all logs with block num >= 3 + 4: {FilterCriteria{FromBlock: big.NewInt(3), ToBlock: big.NewInt(5)}, allLogs[1:], ""}, + // all logs + 5: {FilterCriteria{FromBlock: big.NewInt(0), ToBlock: big.NewInt(5)}, allLogs, ""}, + // all logs with 1>= block num <=2 and topic secondTopic + 6: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(3), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""}, + } + + key, _ = crypto.GenerateKey() + addr = crypto.PubkeyToAddress(key.PublicKey) + genesis = &core.Genesis{Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{ + addr: {Balance: big.NewInt(params.Ether)}, + }, + } + receipts = []*types.Receipt{{ + Logs: []*types.Log{allLogs[0]}, + }, { + Logs: []*types.Log{allLogs[1], allLogs[2], allLogs[3]}, + }, { + Logs: []*types.Log{allLogs[4]}, + }} + ) + + _, blocks, _ := core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), 4, func(i int, b *core.BlockGen) { + if i == 0 { + return + } + receipts[i-1].Bloom = types.CreateBloom(types.Receipts{receipts[i-1]}) + b.AddUncheckedReceipt(receipts[i-1]) + tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i - 1), To: &common.Address{}, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: big.NewInt(2100), Data: nil}), signer, key) + b.AddTx(tx) + }) + for i, block := range blocks { + rawdb.WriteBlock(db, block) + rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) + rawdb.WriteHeadBlockHash(db, block.Hash()) + if i > 0 { + rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), []*types.Receipt{receipts[i-1]}) + } + } + // create all filters + for i := range testCases { + id, err := api.NewFilter(testCases[i].crit) + if err != nil { t.Fatal(err) } + testCases[i].id = id + } + + // raise events + time.Sleep(1 * time.Second) + for _, block := range blocks { + backend.chainFeed.Send(core.ChainEvent{Block: block, Hash: common.Hash{}, Logs: allLogs}) + } + + for i, tt := range testCases { + var fetched []*types.Log + timeout := time.Now().Add(1 * time.Second) + for { // fetch all expected logs + results, err := api.GetFilterChanges(tt.id) + if err != nil { + t.Fatalf("Unable to fetch logs: %v", err) + } + fetched = append(fetched, results.([]*types.Log)...) + if len(fetched) >= len(tt.expected) { + break + } + // check timeout + if time.Now().After(timeout) { + break + } + + time.Sleep(100 * time.Millisecond) + } + + if len(fetched) != len(tt.expected) { + t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)) + return + } + + for l := range fetched { + if fetched[l].Removed { + t.Errorf("expected log not to be removed for log %d in case %d", l, i) + } + expected := *tt.expected[l] + blockNum := expected.BlockNumber - 1 + expected.BlockHash = blocks[blockNum].Hash() + expected.TxHash = blocks[blockNum].Transactions()[0].Hash() + if !reflect.DeepEqual(fetched[l], &expected) { + t.Errorf("invalid log on index %d for case %d", l, i) + } + } + } +} + +// TestPendingTxFilterDeadlock tests if the event loop hangs when pending +// txes arrive at the same time that one of multiple filters is timing out. +// Please refer to #22131 for more details. +func TestPendingTxFilterDeadlock(t *testing.T) { + t.Parallel() + timeout := 100 * time.Millisecond + + var ( + db = rawdb.NewMemoryDatabase() + backend, sys = newTestFilterSystem(t, db, Config{Timeout: timeout}) + api = NewFilterAPI(sys, false) + done = make(chan struct{}) + ) + + go func() { + // Bombard feed with txes until signal was received to stop + i := uint64(0) + for { + select { + case <-done: + return + default: + } + + tx := types.NewTransaction(i, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil) + backend.txFeed.Send(core.NewTxsEvent{Txs: []*types.Transaction{tx}}) + i++ + } + }() + + // Create a bunch of filters that will + // timeout either in 100ms or 200ms + fids := make([]rpc.ID, 20) + for i := 0; i < len(fids); i++ { + fid := api.NewPendingTransactionFilter() + fids[i] = fid + // Wait for at least one tx to arrive in filter + for { + hashes, err := api.GetFilterChanges(fid) + if err != nil { + t.Fatalf("Filter should exist: %v\n", err) + } + if len(hashes.([]common.Hash)) > 0 { + break + } + runtime.Gosched() + } + } + + // Wait until filters have timed out + time.Sleep(3 * timeout) + + // If tx loop doesn't consume `done` after a second + // it's hanging. + select { + case done <- struct{}{}: + // Check that all filters have been uninstalled + for _, fid := range fids { + if _, err := api.GetFilterChanges(fid); err == nil { + t.Errorf("Filter %s should have been uninstalled\n", fid) + } + } + case <-time.After(1 * time.Second): + t.Error("Tx sending loop hangs") + } +} + +func flattenLogs(pl [][]*types.Log) []*types.Log { + var logs []*types.Log + for _, l := range pl { + logs = append(logs, l...) } + return logs } diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go index 1b74d21df7e0..489917d17c2b 100644 --- a/eth/filters/filter_test.go +++ b/eth/filters/filter_test.go @@ -29,7 +29,6 @@ import ( "github.com/XinFinOrg/XDPoSChain/core" "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/crypto" - "github.com/XinFinOrg/XDPoSChain/event" "github.com/XinFinOrg/XDPoSChain/params" ) @@ -50,18 +49,13 @@ func BenchmarkFilters(b *testing.B) { defer os.RemoveAll(dir) var ( - db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "") - mux = new(event.TypeMux) - txFeed = new(event.Feed) - rmLogsFeed = new(event.Feed) - logsFeed = new(event.Feed) - chainFeed = new(event.Feed) - backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - addr1 = crypto.PubkeyToAddress(key1.PublicKey) - addr2 = common.BytesToAddress([]byte("jeff")) - addr3 = common.BytesToAddress([]byte("ethereum")) - addr4 = common.BytesToAddress([]byte("random addresses please")) + db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "") + _, sys = newTestFilterSystem(b, db, Config{}) + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + addr2 = common.BytesToAddress([]byte("jeff")) + addr3 = common.BytesToAddress([]byte("ethereum")) + addr4 = common.BytesToAddress([]byte("random addresses please")) ) defer db.Close() @@ -84,20 +78,16 @@ func BenchmarkFilters(b *testing.B) { } }) for i, block := range chain { - core.WriteBlock(db, block) - if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil { - b.Fatalf("failed to insert block number: %v", err) - } - if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil { - b.Fatalf("failed to insert block number: %v", err) - } + rawdb.WriteBlock(db, block) + rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) + rawdb.WriteHeadBlockHash(db, block.Hash()) if err := core.WriteBlockReceipts(db, block.Hash(), block.NumberU64(), receipts[i]); err != nil { b.Fatal("error writing block receipts:", err) } } b.ResetTimer() - filter := NewRangeFilter(backend, 0, -1, []common.Address{addr1, addr2, addr3, addr4}, nil) + filter := sys.NewRangeFilter(0, -1, []common.Address{addr1, addr2, addr3, addr4}, nil) for i := 0; i < b.N; i++ { logs, _ := filter.Logs(context.Background()) @@ -115,15 +105,10 @@ func TestFilters(t *testing.T) { defer os.RemoveAll(dir) var ( - db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "") - mux = new(event.TypeMux) - txFeed = new(event.Feed) - rmLogsFeed = new(event.Feed) - logsFeed = new(event.Feed) - chainFeed = new(event.Feed) - backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - addr = crypto.PubkeyToAddress(key1.PublicKey) + db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "") + _, sys = newTestFilterSystem(t, db, Config{}) + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr = crypto.PubkeyToAddress(key1.PublicKey) hash1 = common.BytesToHash([]byte("topic1")) hash2 = common.BytesToHash([]byte("topic2")) @@ -144,6 +129,7 @@ func TestFilters(t *testing.T) { }, } gen.AddUncheckedReceipt(receipt) + gen.AddUncheckedTx(types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, big.NewInt(2100), nil)) case 2: receipt := types.NewReceipt(nil, false, 0) receipt.Logs = []*types.Log{ @@ -153,6 +139,7 @@ func TestFilters(t *testing.T) { }, } gen.AddUncheckedReceipt(receipt) + gen.AddUncheckedTx(types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, big.NewInt(2100), nil)) case 998: receipt := types.NewReceipt(nil, false, 0) receipt.Logs = []*types.Log{ @@ -162,6 +149,7 @@ func TestFilters(t *testing.T) { }, } gen.AddUncheckedReceipt(receipt) + gen.AddUncheckedTx(types.NewTransaction(998, common.HexToAddress("0x998"), big.NewInt(998), 998, big.NewInt(2100), nil)) case 999: receipt := types.NewReceipt(nil, false, 0) receipt.Logs = []*types.Log{ @@ -171,29 +159,26 @@ func TestFilters(t *testing.T) { }, } gen.AddUncheckedReceipt(receipt) + gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, big.NewInt(2100), nil)) } }) for i, block := range chain { - core.WriteBlock(db, block) - if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil { - t.Fatalf("failed to insert block number: %v", err) - } - if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil { - t.Fatalf("failed to insert block number: %v", err) - } + rawdb.WriteBlock(db, block) + rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) + rawdb.WriteHeadBlockHash(db, block.Hash()) if err := core.WriteBlockReceipts(db, block.Hash(), block.NumberU64(), receipts[i]); err != nil { t.Fatal("error writing block receipts:", err) } } - filter := NewRangeFilter(backend, 0, -1, []common.Address{addr}, [][]common.Hash{{hash1, hash2, hash3, hash4}}) + filter := sys.NewRangeFilter(0, -1, []common.Address{addr}, [][]common.Hash{{hash1, hash2, hash3, hash4}}) logs, _ := filter.Logs(context.Background()) if len(logs) != 4 { t.Error("expected 4 log, got", len(logs)) } - filter = NewRangeFilter(backend, 900, 999, []common.Address{addr}, [][]common.Hash{{hash3}}) + filter = sys.NewRangeFilter(900, 999, []common.Address{addr}, [][]common.Hash{{hash3}}) logs, _ = filter.Logs(context.Background()) if len(logs) != 1 { t.Error("expected 1 log, got", len(logs)) @@ -202,7 +187,7 @@ func TestFilters(t *testing.T) { t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0]) } - filter = NewRangeFilter(backend, 990, -1, []common.Address{addr}, [][]common.Hash{{hash3}}) + filter = sys.NewRangeFilter(990, -1, []common.Address{addr}, [][]common.Hash{{hash3}}) logs, _ = filter.Logs(context.Background()) if len(logs) != 1 { t.Error("expected 1 log, got", len(logs)) @@ -211,7 +196,7 @@ func TestFilters(t *testing.T) { t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0]) } - filter = NewRangeFilter(backend, 1, 10, nil, [][]common.Hash{{hash1, hash2}}) + filter = sys.NewRangeFilter(1, 10, nil, [][]common.Hash{{hash1, hash2}}) logs, _ = filter.Logs(context.Background()) if len(logs) != 2 { @@ -219,7 +204,7 @@ func TestFilters(t *testing.T) { } failHash := common.BytesToHash([]byte("fail")) - filter = NewRangeFilter(backend, 0, -1, nil, [][]common.Hash{{failHash}}) + filter = sys.NewRangeFilter(0, -1, nil, [][]common.Hash{{failHash}}) logs, _ = filter.Logs(context.Background()) if len(logs) != 0 { @@ -227,14 +212,14 @@ func TestFilters(t *testing.T) { } failAddr := common.BytesToAddress([]byte("failmenow")) - filter = NewRangeFilter(backend, 0, -1, []common.Address{failAddr}, nil) + filter = sys.NewRangeFilter(0, -1, []common.Address{failAddr}, nil) logs, _ = filter.Logs(context.Background()) if len(logs) != 0 { t.Error("expected 0 log, got", len(logs)) } - filter = NewRangeFilter(backend, 0, -1, nil, [][]common.Hash{{failHash}, {hash1}}) + filter = sys.NewRangeFilter(0, -1, nil, [][]common.Hash{{failHash}, {hash1}}) logs, _ = filter.Logs(context.Background()) if len(logs) != 0 { diff --git a/eth/gasprice/gasprice.go b/eth/gasprice/gasprice.go index 54492790371b..fe3a4b511126 100644 --- a/eth/gasprice/gasprice.go +++ b/eth/gasprice/gasprice.go @@ -24,120 +24,152 @@ import ( "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/core/types" - "github.com/XinFinOrg/XDPoSChain/internal/ethapi" + "github.com/XinFinOrg/XDPoSChain/log" "github.com/XinFinOrg/XDPoSChain/params" "github.com/XinFinOrg/XDPoSChain/rpc" ) -var maxPrice = big.NewInt(500 * params.Shannon) +const sampleNumber = 3 // Number of transactions sampled in a block + +var DefaultMaxPrice = big.NewInt(500 * params.GWei) +var DefaultIgnorePrice = big.NewInt(2 * params.Wei) type Config struct { - Blocks int - Percentile int - Default *big.Int `toml:",omitempty"` + Blocks int + Percentile int + Default *big.Int `toml:",omitempty"` + MaxPrice *big.Int `toml:",omitempty"` + IgnorePrice *big.Int `toml:",omitempty"` +} + +// OracleBackend includes all necessary background APIs for oracle. +type OracleBackend interface { + HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) + BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) + ChainConfig() *params.ChainConfig } // Oracle recommends gas prices based on the content of recent // blocks. Suitable for both light and full clients. type Oracle struct { - backend ethapi.Backend - lastHead common.Hash - lastPrice *big.Int - cacheLock sync.RWMutex - fetchLock sync.Mutex - - checkBlocks, maxEmpty, maxBlocks int - percentile int + backend OracleBackend + lastHead common.Hash + lastPrice *big.Int + maxPrice *big.Int + ignorePrice *big.Int + cacheLock sync.RWMutex + fetchLock sync.Mutex + + checkBlocks int + percentile int } -// NewOracle returns a new oracle. -func NewOracle(backend ethapi.Backend, params Config) *Oracle { +// NewOracle returns a new gasprice oracle which can recommend suitable +// gasprice for newly created transaction. +func NewOracle(backend OracleBackend, params Config) *Oracle { blocks := params.Blocks if blocks < 1 { blocks = 1 + log.Warn("Sanitizing invalid gasprice oracle sample blocks", "provided", params.Blocks, "updated", blocks) } percent := params.Percentile if percent < 0 { percent = 0 + log.Warn("Sanitizing invalid gasprice oracle sample percentile", "provided", params.Percentile, "updated", percent) } if percent > 100 { percent = 100 + log.Warn("Sanitizing invalid gasprice oracle sample percentile", "provided", params.Percentile, "updated", percent) + } + maxPrice := params.MaxPrice + if maxPrice == nil || maxPrice.Int64() <= 0 { + maxPrice = DefaultMaxPrice + log.Warn("Sanitizing invalid gasprice oracle price cap", "provided", params.MaxPrice, "updated", maxPrice) + } + ignorePrice := params.IgnorePrice + if ignorePrice == nil || ignorePrice.Int64() <= 0 { + ignorePrice = DefaultIgnorePrice + log.Warn("Sanitizing invalid gasprice oracle ignore price", "provided", params.IgnorePrice, "updated", ignorePrice) + } else if ignorePrice.Int64() > 0 { + log.Info("Gasprice oracle is ignoring threshold set", "threshold", ignorePrice) } return &Oracle{ backend: backend, lastPrice: params.Default, + maxPrice: maxPrice, + ignorePrice: ignorePrice, checkBlocks: blocks, - maxEmpty: blocks / 2, - maxBlocks: blocks * 5, percentile: percent, } } // SuggestPrice returns the recommended gas price. func (gpo *Oracle) SuggestPrice(ctx context.Context) (*big.Int, error) { - gpo.cacheLock.RLock() - lastHead := gpo.lastHead - lastPrice := gpo.lastPrice - gpo.cacheLock.RUnlock() - head, _ := gpo.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber) headHash := head.Hash() + + // If the latest gasprice is still available, return it. + gpo.cacheLock.RLock() + lastHead, lastPrice := gpo.lastHead, gpo.lastPrice + gpo.cacheLock.RUnlock() if headHash == lastHead { return lastPrice, nil } - gpo.fetchLock.Lock() defer gpo.fetchLock.Unlock() - // try checking the cache again, maybe the last fetch fetched what we need + // Try checking the cache again, maybe the last fetch fetched what we need gpo.cacheLock.RLock() - lastHead = gpo.lastHead - lastPrice = gpo.lastPrice + lastHead, lastPrice = gpo.lastHead, gpo.lastPrice gpo.cacheLock.RUnlock() if headHash == lastHead { return lastPrice, nil } - - blockNum := head.Number.Uint64() - ch := make(chan getBlockPricesResult, gpo.checkBlocks) - sent := 0 - exp := 0 - var blockPrices []*big.Int - for sent < gpo.checkBlocks && blockNum > 0 { - go gpo.getBlockPrices(ctx, types.MakeSigner(gpo.backend.ChainConfig(), big.NewInt(int64(blockNum))), blockNum, ch) + var ( + sent, exp int + number = head.Number.Uint64() + result = make(chan getBlockPricesResult, gpo.checkBlocks) + quit = make(chan struct{}) + txPrices []*big.Int + ) + for sent < gpo.checkBlocks && number > 0 { + go gpo.getBlockPrices(ctx, types.MakeSigner(gpo.backend.ChainConfig(), big.NewInt(int64(number))), number, sampleNumber, gpo.ignorePrice, result, quit) sent++ exp++ - blockNum-- + number-- } - maxEmpty := gpo.maxEmpty for exp > 0 { - res := <-ch + res := <-result if res.err != nil { + close(quit) return lastPrice, res.err } exp-- - if res.price != nil { - blockPrices = append(blockPrices, res.price) - continue - } - if maxEmpty > 0 { - maxEmpty-- - continue + // Nothing returned. There are two special cases here: + // - The block is empty + // - All the transactions included are sent by the miner itself. + // In these cases, use the latest calculated price for samping. + if len(res.prices) == 0 { + res.prices = []*big.Int{lastPrice} } - if blockNum > 0 && sent < gpo.maxBlocks { - go gpo.getBlockPrices(ctx, types.MakeSigner(gpo.backend.ChainConfig(), big.NewInt(int64(blockNum))), blockNum, ch) + // Besides, in order to collect enough data for sampling, if nothing + // meaningful returned, try to query more blocks. But the maximum + // is 2*checkBlocks. + if len(res.prices) == 1 && len(txPrices)+1+exp < gpo.checkBlocks*2 && number > 0 { + go gpo.getBlockPrices(ctx, types.MakeSigner(gpo.backend.ChainConfig(), big.NewInt(int64(number))), number, sampleNumber, gpo.ignorePrice, result, quit) sent++ exp++ - blockNum-- + number-- } + txPrices = append(txPrices, res.prices...) } price := lastPrice - if len(blockPrices) > 0 { - sort.Sort(bigIntArray(blockPrices)) - price = blockPrices[(len(blockPrices)-1)*gpo.percentile/100] + if len(txPrices) > 0 { + sort.Sort(bigIntArray(txPrices)) + price = txPrices[(len(txPrices)-1)*gpo.percentile/100] } - if price.Cmp(maxPrice) > 0 { - price = new(big.Int).Set(maxPrice) + if price.Cmp(gpo.maxPrice) > 0 { + price = new(big.Int).Set(gpo.maxPrice) } // Check gas price min. @@ -154,8 +186,8 @@ func (gpo *Oracle) SuggestPrice(ctx context.Context) (*big.Int, error) { } type getBlockPricesResult struct { - price *big.Int - err error + prices []*big.Int + err error } type transactionsByGasPrice []*types.Transaction @@ -165,27 +197,40 @@ func (t transactionsByGasPrice) Swap(i, j int) { t[i], t[j] = t[j], t[i] } func (t transactionsByGasPrice) Less(i, j int) bool { return t[i].GasPriceCmp(t[j]) < 0 } // getBlockPrices calculates the lowest transaction gas price in a given block -// and sends it to the result channel. If the block is empty, price is nil. -func (gpo *Oracle) getBlockPrices(ctx context.Context, signer types.Signer, blockNum uint64, ch chan getBlockPricesResult) { +// and sends it to the result channel. If the block is empty or all transactions +// are sent by the miner itself(it doesn't make any sense to include this kind of +// transaction prices for sampling), nil gasprice is returned. +func (gpo *Oracle) getBlockPrices(ctx context.Context, signer types.Signer, blockNum uint64, limit int, ignoreUnder *big.Int, result chan getBlockPricesResult, quit chan struct{}) { block, err := gpo.backend.BlockByNumber(ctx, rpc.BlockNumber(blockNum)) if block == nil { - ch <- getBlockPricesResult{nil, err} + select { + case result <- getBlockPricesResult{nil, err}: + case <-quit: + } return } - blockTxs := block.Transactions() txs := make([]*types.Transaction, len(blockTxs)) copy(txs, blockTxs) sort.Sort(transactionsByGasPrice(txs)) + var prices []*big.Int for _, tx := range txs { + if ignoreUnder != nil && tx.GasPrice().Cmp(ignoreUnder) == -1 { + continue + } sender, err := types.Sender(signer, tx) if err == nil && sender != block.Coinbase() { - ch <- getBlockPricesResult{tx.GasPrice(), nil} - return + prices = append(prices, tx.GasPrice()) + if len(prices) >= limit { + break + } } } - ch <- getBlockPricesResult{nil, nil} + select { + case result <- getBlockPricesResult{prices, nil}: + case <-quit: + } } type bigIntArray []*big.Int diff --git a/eth/handler_test.go b/eth/handler_test.go index 14dbcb3b8150..651a33a28c49 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -23,16 +23,16 @@ import ( "testing" "time" - "github.com/XinFinOrg/XDPoSChain/core/rawdb" - "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/consensus/ethash" "github.com/XinFinOrg/XDPoSChain/core" + "github.com/XinFinOrg/XDPoSChain/core/rawdb" "github.com/XinFinOrg/XDPoSChain/core/state" "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/core/vm" "github.com/XinFinOrg/XDPoSChain/crypto" "github.com/XinFinOrg/XDPoSChain/eth/downloader" + "github.com/XinFinOrg/XDPoSChain/eth/ethconfig" "github.com/XinFinOrg/XDPoSChain/event" "github.com/XinFinOrg/XDPoSChain/p2p" "github.com/XinFinOrg/XDPoSChain/params" @@ -477,7 +477,7 @@ func testDAOChallenge(t *testing.T, localForked, remoteForked bool, timeout bool genesis = gspec.MustCommit(db) blockchain, _ = core.NewBlockChain(db, nil, config, pow, vm.Config{}) ) - pm, err := NewProtocolManager(config, downloader.FullSync, DefaultConfig.NetworkId, evmux, new(testTxPool), pow, blockchain, db) + pm, err := NewProtocolManager(config, downloader.FullSync, ethconfig.Defaults.NetworkId, evmux, new(testTxPool), pow, blockchain, db) if err != nil { t.Fatalf("failed to start test protocol manager: %v", err) } diff --git a/eth/helper_test.go b/eth/helper_test.go index 07c6d92ee166..4963093c1aa0 100644 --- a/eth/helper_test.go +++ b/eth/helper_test.go @@ -27,15 +27,15 @@ import ( "sync" "testing" - "github.com/XinFinOrg/XDPoSChain/core/rawdb" - "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/consensus/ethash" "github.com/XinFinOrg/XDPoSChain/core" + "github.com/XinFinOrg/XDPoSChain/core/rawdb" "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/core/vm" "github.com/XinFinOrg/XDPoSChain/crypto" "github.com/XinFinOrg/XDPoSChain/eth/downloader" + "github.com/XinFinOrg/XDPoSChain/eth/ethconfig" "github.com/XinFinOrg/XDPoSChain/ethdb" "github.com/XinFinOrg/XDPoSChain/event" "github.com/XinFinOrg/XDPoSChain/p2p" @@ -68,7 +68,7 @@ func newTestProtocolManager(mode downloader.SyncMode, blocks int, generator func panic(err) } - pm, err := NewProtocolManager(gspec.Config, mode, DefaultConfig.NetworkId, evmux, &testTxPool{added: newtx}, engine, blockchain, db) + pm, err := NewProtocolManager(gspec.Config, mode, ethconfig.Defaults.NetworkId, evmux, &testTxPool{added: newtx}, engine, blockchain, db) if err != nil { return nil, nil, err } @@ -183,7 +183,7 @@ func newTestPeer(name string, version int, pm *ProtocolManager, shake bool) (*te func (p *testPeer) handshake(t *testing.T, td *big.Int, head common.Hash, genesis common.Hash) { msg := &statusData{ ProtocolVersion: uint32(p.version), - NetworkId: DefaultConfig.NetworkId, + NetworkId: ethconfig.Defaults.NetworkId, TD: td, CurrentBlock: head, GenesisBlock: genesis, diff --git a/eth/hooks/engine_v1_hooks.go b/eth/hooks/engine_v1_hooks.go index adb131b2ea2e..65c84d1c30b0 100644 --- a/eth/hooks/engine_v1_hooks.go +++ b/eth/hooks/engine_v1_hooks.go @@ -216,7 +216,7 @@ func AttachConsensusV1Hooks(adaptor *XDPoS.XDPoS, bc *core.BlockChain, chainConf if err != nil { return nil, err } - addr := common.HexToAddress(common.MasternodeVotingSMC) + addr := common.MasternodeVotingSMCBinary validator, err := contractValidator.NewXDCValidator(addr, client) if err != nil { return nil, err @@ -228,11 +228,14 @@ func AttachConsensusV1Hooks(adaptor *XDPoS.XDPoS, bc *core.BlockChain, chainConf ) stateDB, err := bc.StateAt(bc.GetBlockByHash(block).Root()) - candidateAddresses = state.GetCandidates(stateDB) - if err != nil { return nil, err } + if stateDB == nil { + return nil, errors.New("nil stateDB in HookGetSignersFromContract") + } + + candidateAddresses = state.GetCandidates(stateDB) for _, address := range candidateAddresses { v, err := validator.GetCandidateCap(opts, address) if err != nil { @@ -318,9 +321,6 @@ func getValidators(bc *core.BlockChain, masternodes []common.Address) ([]byte, e // Get secrets and opening at epoc block checkpoint. var candidates []int64 - if err != nil { - return nil, err - } lenSigners := int64(len(masternodes)) if lenSigners > 0 { for _, addr := range masternodes { diff --git a/eth/hooks/engine_v2_hooks.go b/eth/hooks/engine_v2_hooks.go index 4047014a4d41..f356af27a74c 100644 --- a/eth/hooks/engine_v2_hooks.go +++ b/eth/hooks/engine_v2_hooks.go @@ -2,7 +2,6 @@ package hooks import ( "errors" - "fmt" "math/big" "time" @@ -41,7 +40,7 @@ func AttachConsensusV2Hooks(adaptor *XDPoS.XDPoS, bc *core.BlockChain, chainConf if timeout > 30 { // wait over 30s log.Error("[V2 Hook Penalty] parentHeader is nil, wait too long not writen in to disk", "parentNumber", parentNumber) - return []common.Address{}, fmt.Errorf("parentHeader is nil") + return []common.Address{}, errors.New("parentHeader is nil") } } diff --git a/eth/protocol_test.go b/eth/protocol_test.go index 91788594f210..8c5283cd8b18 100644 --- a/eth/protocol_test.go +++ b/eth/protocol_test.go @@ -26,6 +26,7 @@ import ( "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/crypto" "github.com/XinFinOrg/XDPoSChain/eth/downloader" + "github.com/XinFinOrg/XDPoSChain/eth/ethconfig" "github.com/XinFinOrg/XDPoSChain/p2p" "github.com/XinFinOrg/XDPoSChain/rlp" ) @@ -59,7 +60,7 @@ func testStatusMsgErrors(t *testing.T, protocol int) { wantError: errResp(ErrNoStatusMsg, "first msg has code 2 (!= 0)"), }, { - code: StatusMsg, data: statusData{10, DefaultConfig.NetworkId, td, head.Hash(), genesis.Hash()}, + code: StatusMsg, data: statusData{10, ethconfig.Defaults.NetworkId, td, head.Hash(), genesis.Hash()}, wantError: errResp(ErrProtocolVersionMismatch, "10 (!= %d)", protocol), }, { @@ -67,7 +68,7 @@ func testStatusMsgErrors(t *testing.T, protocol int) { wantError: errResp(ErrNetworkIdMismatch, "999 (!= 88)"), }, { - code: StatusMsg, data: statusData{uint32(protocol), DefaultConfig.NetworkId, td, head.Hash(), common.Hash{3}}, + code: StatusMsg, data: statusData{uint32(protocol), ethconfig.Defaults.NetworkId, td, head.Hash(), common.Hash{3}}, wantError: errResp(ErrGenesisBlockMismatch, "0300000000000000 (!= %x)", genesis.Hash().Bytes()[:8]), }, } diff --git a/eth/tracers/internal/tracers/4byte_tracer.js b/eth/tracers/internal/tracers/4byte_tracer.js index 462b4ad4cb55..9ec3209f8b18 100644 --- a/eth/tracers/internal/tracers/4byte_tracer.js +++ b/eth/tracers/internal/tracers/4byte_tracer.js @@ -31,48 +31,27 @@ // ids aggregates the 4byte ids found. ids : {}, - // callType returns 'false' for non-calls, or the peek-index for the first param - // after 'value', i.e. meminstart. - callType: function(opstr){ - switch(opstr){ - case "CALL": case "CALLCODE": - // gas, addr, val, memin, meminsz, memout, memoutsz - return 3; // stack ptr to memin - - case "DELEGATECALL": case "STATICCALL": - // gas, addr, memin, meminsz, memout, memoutsz - return 2; // stack ptr to memin - } - return false; - }, - // store save the given indentifier and datasize. store: function(id, size){ var key = "" + toHex(id) + "-" + size; this.ids[key] = this.ids[key] + 1 || 1; }, - // step is invoked for every opcode that the VM executes. - step: function(log, db) { - // Skip any opcodes that are not internal calls - var ct = this.callType(log.op.toString()); - if (!ct) { - return; - } + enter: function(frame) { // Skip any pre-compile invocations, those are just fancy opcodes - if (isPrecompiled(toAddress(log.stack.peek(1).toString(16)))) { + if (isPrecompiled(frame.getTo())) { return; } - // Gather internal call details - var inSz = log.stack.peek(ct + 1).valueOf(); - if (inSz >= 4) { - var inOff = log.stack.peek(ct).valueOf(); - this.store(log.memory.slice(inOff, inOff + 4), inSz-4); + var input = frame.getInput() + if (input.length >= 4) { + this.store(slice(input, 0, 4), input.length - 4); } }, + exit: function(frameResult) {}, + // fault is invoked when the actual execution of an opcode fails. - fault: function(log, db) { }, + fault: function(log, db) {}, // result is invoked when all the opcodes have been iterated over and returns // the final result of the tracing. diff --git a/eth/tracers/internal/tracers/4byte_tracer_legacy.js b/eth/tracers/internal/tracers/4byte_tracer_legacy.js new file mode 100644 index 000000000000..462b4ad4cb55 --- /dev/null +++ b/eth/tracers/internal/tracers/4byte_tracer_legacy.js @@ -0,0 +1,86 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// 4byteTracer searches for 4byte-identifiers, and collects them for post-processing. +// It collects the methods identifiers along with the size of the supplied data, so +// a reversed signature can be matched against the size of the data. +// +// Example: +// > debug.traceTransaction( "0x214e597e35da083692f5386141e69f47e973b2c56e7a8073b1ea08fd7571e9de", {tracer: "4byteTracer"}) +// { +// 0x27dc297e-128: 1, +// 0x38cc4831-0: 2, +// 0x524f3889-96: 1, +// 0xadf59f99-288: 1, +// 0xc281d19e-0: 1 +// } +{ + // ids aggregates the 4byte ids found. + ids : {}, + + // callType returns 'false' for non-calls, or the peek-index for the first param + // after 'value', i.e. meminstart. + callType: function(opstr){ + switch(opstr){ + case "CALL": case "CALLCODE": + // gas, addr, val, memin, meminsz, memout, memoutsz + return 3; // stack ptr to memin + + case "DELEGATECALL": case "STATICCALL": + // gas, addr, memin, meminsz, memout, memoutsz + return 2; // stack ptr to memin + } + return false; + }, + + // store save the given indentifier and datasize. + store: function(id, size){ + var key = "" + toHex(id) + "-" + size; + this.ids[key] = this.ids[key] + 1 || 1; + }, + + // step is invoked for every opcode that the VM executes. + step: function(log, db) { + // Skip any opcodes that are not internal calls + var ct = this.callType(log.op.toString()); + if (!ct) { + return; + } + // Skip any pre-compile invocations, those are just fancy opcodes + if (isPrecompiled(toAddress(log.stack.peek(1).toString(16)))) { + return; + } + // Gather internal call details + var inSz = log.stack.peek(ct + 1).valueOf(); + if (inSz >= 4) { + var inOff = log.stack.peek(ct).valueOf(); + this.store(log.memory.slice(inOff, inOff + 4), inSz-4); + } + }, + + // fault is invoked when the actual execution of an opcode fails. + fault: function(log, db) { }, + + // result is invoked when all the opcodes have been iterated over and returns + // the final result of the tracing. + result: function(ctx) { + // Save the outer calldata also + if (ctx.input.length >= 4) { + this.store(slice(ctx.input, 0, 4), ctx.input.length-4) + } + return this.ids; + }, +} diff --git a/eth/tracers/internal/tracers/assets.go b/eth/tracers/internal/tracers/assets.go index d974de5c2e50..185967bdb634 100644 --- a/eth/tracers/internal/tracers/assets.go +++ b/eth/tracers/internal/tracers/assets.go @@ -1,14 +1,16 @@ // Code generated by go-bindata. DO NOT EDIT. // sources: -// 4byte_tracer.js (2.933kB) +// 4byte_tracer.js (2.224kB) +// 4byte_tracer_legacy.js (2.933kB) // bigram_tracer.js (1.712kB) -// call_tracer.js (8.643kB) +// call_tracer.js (3.497kB) +// call_tracer_legacy.js (8.956kB) // evmdis_tracer.js (4.195kB) // noop_tracer.js (1.271kB) // opcount_tracer.js (1.372kB) -// prestate_tracer.js (4.234kB) +// prestate_tracer.js (4.287kB) // trigram_tracer.js (1.788kB) -// unigram_tracer.js (1.51kB) +// unigram_tracer.js (1.469kB) package tracers @@ -18,6 +20,7 @@ import ( "crypto/sha256" "fmt" "io" + "io/ioutil" "os" "path/filepath" "strings" @@ -76,7 +79,7 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var __4byte_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x56\x5b\x6f\xdb\x4a\x0e\x7e\xb6\x7f\x05\xd7\x2f\xb5\x51\x59\x8e\x2f\x89\x2f\xd9\x16\xf0\xe6\xa4\x6d\x80\x9c\x24\x88\xdd\x3d\x28\x16\xfb\x30\x9e\xa1\xac\xd9\xc8\x33\xc2\x0c\xe5\x4b\x73\xf2\xdf\x17\x1c\x49\x89\x93\xd3\x62\xbb\x4f\x96\x47\xc3\x8f\x1f\xc9\x8f\xa4\x7a\x3d\xb8\xb0\xf9\xc1\xe9\x75\x4a\x30\x38\xe9\x8f\x61\x99\x22\xac\x6d\x17\x29\x45\x87\xc5\x06\xe6\x05\xa5\xd6\xf9\x66\xaf\x07\xcb\x54\x7b\x48\x74\x86\xa0\x3d\xe4\xc2\x11\xd8\x04\xe8\xcd\xfd\x4c\xaf\x9c\x70\x87\xb8\xd9\xeb\x95\x36\x3f\x7c\xcd\x08\x89\x43\x04\x6f\x13\xda\x09\x87\x33\x38\xd8\x02\xa4\x30\xe0\x50\x69\x4f\x4e\xaf\x0a\x42\xd0\x04\xc2\xa8\x9e\x75\xb0\xb1\x4a\x27\x07\x86\xd4\x04\x85\x51\xe8\x82\x6b\x42\xb7\xf1\x35\x8f\xcf\x37\x5f\xe1\x1a\xbd\x47\x07\x9f\xd1\xa0\x13\x19\xdc\x15\xab\x4c\x4b\xb8\xd6\x12\x8d\x47\x10\x1e\x72\x3e\xf1\x29\x2a\x58\x05\x38\x36\xfc\xc4\x54\x16\x15\x15\xf8\x64\x0b\xa3\x04\x69\x6b\x22\x40\xcd\xcc\x61\x8b\xce\x6b\x6b\x60\x58\xbb\xaa\x00\x23\xb0\x8e\x41\xda\x82\x38\x00\x07\x36\x67\xbb\x0e\x08\x73\x80\x4c\xd0\x8b\xe9\x2f\x24\xe4\x25\x6e\x05\xda\x04\x37\xa9\xcd\x11\x28\x15\xc4\x51\xef\x74\x96\xc1\x0a\xa1\xf0\x98\x14\x59\xc4\x68\xab\x82\xe0\x8f\xab\xe5\x97\xdb\xaf\x4b\x98\xdf\x7c\x83\x3f\xe6\xf7\xf7\xf3\x9b\xe5\xb7\x73\xd8\x69\x4a\x6d\x41\x80\x5b\x2c\xa1\xf4\x26\xcf\x34\x2a\xd8\x09\xe7\x84\xa1\x03\xd8\x84\x11\x7e\xbf\xbc\xbf\xf8\x32\xbf\x59\xce\xff\x71\x75\x7d\xb5\xfc\x06\xd6\xc1\xa7\xab\xe5\xcd\xe5\x62\x01\x9f\x6e\xef\x61\x0e\x77\xf3\xfb\xe5\xd5\xc5\xd7\xeb\xf9\x3d\xdc\x7d\xbd\xbf\xbb\x5d\x5c\xc6\xb0\x40\x66\x85\x6c\xff\xbf\x73\x9e\x84\xea\x39\x04\x85\x24\x74\xe6\xeb\x4c\x7c\xb3\x05\xf8\xd4\x16\x99\x82\x54\x6c\x11\x1c\x4a\xd4\x5b\x54\x20\x40\xda\xfc\xf0\xcb\x45\x65\x2c\x91\x59\xb3\x0e\x31\xff\x54\x90\x70\x95\x80\xb1\x14\x81\x47\x84\xbf\xa7\x44\xf9\xac\xd7\xdb\xed\x76\xf1\xda\x14\xb1\x75\xeb\x5e\x56\xc2\xf9\xde\xc7\xb8\xc9\x98\xa3\xd5\x81\x70\xe9\x84\x44\x07\x1e\x85\x93\x29\xfa\x10\x4c\x78\xd1\xd5\x0a\x0d\xe9\x44\xa3\xf3\x11\x8b\x14\xa4\xcd\x32\x94\xe4\x99\xc1\x26\x5c\xcc\xad\xa7\x6e\xee\xac\x44\xef\xb5\x59\x73\xe0\x70\x45\xaf\x2e\xc2\x06\x29\xb5\xca\xc3\x11\xdc\xdb\x68\xbc\xfe\x8e\x75\x36\x7c\x91\x97\x65\x54\x82\x44\x04\xde\x86\xe8\xc1\x21\xcb\x0c\x15\x78\xbd\x36\x82\x0a\x87\xa1\x97\x56\x08\x1b\x41\x92\xc5\x2e\xd6\x42\x1b\x4f\x7f\x01\x64\x9c\xba\x22\x97\x7b\xb1\xc9\x33\x9c\xf1\x33\xc0\x47\x50\xb8\x2a\xd6\x31\x71\x0a\x96\x4e\x18\x2f\x24\x8b\xbb\x0d\xad\x93\xfd\xa0\x3f\xc2\xd3\xe9\x18\x87\xa7\x4a\x9c\x4c\x86\x67\xd3\x41\x72\x3a\x9c\x9c\xf5\x47\x7d\x3c\x9b\x26\xa3\x31\x4e\xc7\xc3\xd5\x40\x9e\x9e\xe1\x58\x4c\x4e\xc6\xc3\x55\x1f\xc5\xc9\x24\x51\xe3\xd3\x71\x1f\xa7\x0a\x5b\x11\x3c\x06\x60\x37\x83\xd6\x51\xa6\x5b\x4f\x9d\xd2\xfb\x63\xf9\x03\x70\xb2\x1f\x8c\x95\x1c\x4c\xc7\xd8\xed\x0f\x26\x33\xe8\x47\x2f\x6f\x86\x13\x29\x47\x93\x61\xbf\x7b\x32\x83\xc1\xd1\xf9\xe9\x60\x94\x0c\x27\x93\x69\x77\x7a\xf6\xda\x40\xa8\xe4\x74\x9a\x4c\xa7\xdd\xc1\xe4\x0d\x94\x1c\x4c\xfa\xaa\x3f\x45\x86\xea\x97\xc7\x4f\xcd\xc7\x66\x83\x07\x8e\xf2\x20\xd6\x6b\x87\x6b\x41\x58\x56\x2d\x30\x0e\x2f\x12\x1e\x16\x71\xb3\xc1\xcf\x33\x78\x7c\x8a\x9a\xc1\x46\x8a\x2c\x5b\x1e\x72\x56\x35\x15\xce\x78\x78\x97\x88\xcc\xe3\xbb\xa0\x0b\x63\x4d\x97\x2f\x78\x1e\x1f\x01\x2f\x47\x7c\xe8\x6a\xa3\x70\x1f\x2e\xf0\x51\xa2\x9d\x27\x1e\xb3\x62\x13\x10\x45\xc2\xd3\xe4\xdd\x56\x64\x05\xbe\x8b\x40\xc7\x18\xc3\x06\x37\x5c\x54\xe1\x28\x6e\x36\x6a\x97\x33\x48\x0a\x53\x56\xca\xe6\x9e\x5c\xe7\xb1\xd9\x68\xf8\x9d\x26\x99\x1e\x1d\x48\xe1\x11\x5a\x17\xf3\xeb\xeb\xd6\x0c\x5e\xfe\x5c\xdc\xfe\x76\xd9\x9a\x35\x1b\x0d\x76\xb9\x16\x2c\x6d\xa5\x5c\x04\x5b\x91\x45\xa5\xbb\xea\xc7\x7f\x0f\x0f\xb6\xa0\xfa\xd7\x7f\x67\xb3\x32\x5e\x18\x9e\x43\xaf\x07\x9e\x84\x7c\x80\x9c\x1c\x90\x2d\xcd\x9a\xcf\xae\x7f\xbb\xbc\xbe\xfc\x3c\x5f\x5e\xbe\xa2\xb0\x58\xce\x97\x57\x17\xe5\xd1\x5f\x49\xfc\x1f\xfe\x07\x3f\xf3\xdf\x68\x3c\x35\x9f\x6f\x85\x9a\x9c\x37\x1b\x75\xd5\x3c\xf1\x9c\xf2\x3c\x8d\xc2\x18\xd1\x3c\x3c\xb9\x2c\x55\x6b\x86\x3e\xe7\x8e\xe1\x0e\x8a\x9b\x8d\x70\xff\x28\xdf\x5a\x45\xa1\xb9\x42\x86\xb7\xc2\xc1\x03\x1e\xe0\x03\xb4\x5a\xf0\x1e\xc8\x7e\xc1\x7d\x5b\xab\x0e\xbc\x87\x56\x97\x4f\xf8\xe6\x79\xb3\xd1\xa0\x54\xfb\x58\x2b\xff\xaf\x07\x3c\xfc\x1b\x3e\xc0\xeb\xff\xef\xa1\x0f\x7f\xfe\x09\xfd\x57\x34\x31\xe7\x85\xa1\xcd\xd6\x3e\xa0\x0a\x92\xe1\x01\x70\x00\x9b\x4b\xab\xaa\x8d\xc1\x11\xfc\xf3\x77\xc0\x3d\xca\x82\xd0\x07\xba\x98\x1f\xb1\xcd\xec\x3a\x02\xb5\xea\x00\xb3\xed\xf5\x60\xf1\xa0\xf3\xb0\xb8\x4a\x14\x5f\xc2\xf0\x46\x34\x96\x40\x1b\x42\x67\x44\x16\xa4\xed\xab\xf8\x24\xd5\x7c\x6b\xf5\x31\x6a\x6c\xf3\x98\xec\x82\x9c\x36\xeb\x76\xa7\xc3\x31\xea\x04\xda\x7f\x93\x54\xfa\xaa\xd2\x7f\x5e\x15\xe3\xd8\x75\xee\xb0\x2b\xed\x26\x0f\x5f\x19\x66\x6b\x65\xd8\xc3\x3e\x02\x4a\x2d\xef\x6f\x87\xf0\x9f\xc2\x13\x24\xc2\xc8\x67\xa2\x15\xbe\xf6\x77\x0e\x2b\x63\xd5\x26\x3b\x57\xca\xa1\xf7\x81\x51\x50\x42\xcc\x6d\xd6\xee\x77\x5e\xc8\xf5\xcf\x3a\x9d\xce\xcf\x48\x7d\x16\x61\xf7\xbf\x0a\xbc\x5e\x62\x55\xfc\xda\x2c\xbe\xc3\x07\x78\xe3\x41\x12\x57\xad\x13\x87\x5e\xbd\x4d\xda\xcf\x19\x08\xd7\x3f\x7e\x80\x51\xe5\xb2\x84\xb8\x4d\x92\x1f\x61\xbc\xb1\x2f\x65\x12\x14\x17\x22\x62\xd1\xbb\x43\xec\x79\x6d\xb5\x03\x48\x54\x61\xbd\x87\x51\x27\x0a\xd4\xba\xa3\x4e\x15\x4f\x2d\x9d\x44\x14\x19\x1d\x6b\x67\x97\x56\xdf\x07\x42\x52\x21\xb2\x4a\x2e\xfc\xad\x63\x13\x10\xa6\x56\x54\x52\x6e\xee\x46\xb0\xff\xa1\x86\xa0\x76\xe1\xd0\xff\xc8\x07\x27\x8f\xfd\xd4\xe2\x0a\x3b\x7f\x85\xdc\x60\x84\x4e\xf0\x47\x8f\xdd\x56\x2d\x56\x0d\xcd\x00\x57\xce\x42\xce\x7f\x05\x5c\x2d\x2e\xde\x1e\x61\xa9\x36\xca\xf3\x23\x52\x92\xf6\x2f\xa2\xae\x9b\xd9\x16\x3c\x3f\xb9\x86\xdc\xc0\x20\x32\x6f\xab\xaa\x48\xda\xc7\xda\xe4\x05\xc5\x19\x9a\x35\xa5\xc7\x15\x3a\x4a\x7a\x99\xe9\xe7\xcb\x11\x9c\x44\x21\xd1\x6f\xcd\xbb\xa3\xce\xeb\x29\x53\xf7\x73\xd9\xc1\x4f\xcd\xff\x06\x00\x00\xff\xff\x8e\xc8\x27\x72\x75\x0b\x00\x00") +var __4byte_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x55\x5b\x6f\x22\x39\x13\x7d\x86\x5f\x71\xc4\x13\x68\x9a\x4b\x73\x09\x97\xf9\x32\x12\xdf\x28\x99\x41\xca\x66\x22\x42\x34\x8a\x56\xfb\x60\xda\xd5\xdd\xde\x18\xbb\x65\xbb\xb9\x6c\x26\xff\x7d\x65\x37\xe4\x36\xbb\xda\x79\x02\xec\xaa\x73\xaa\x4e\x1d\x17\xdd\x2e\x3e\xeb\xe2\x60\x44\x96\x3b\xf4\x7b\xf1\x18\xab\x9c\x90\xe9\x36\xb9\x9c\x0c\x95\x1b\xcc\x4b\x97\x6b\x63\xeb\xdd\x2e\x56\xb9\xb0\x48\x85\x24\x08\x8b\x82\x19\x07\x9d\xc2\xbd\x8b\x97\x62\x6d\x98\x39\x74\xea\xdd\x6e\x95\xf3\x8f\xd7\x1e\x21\x35\x44\xb0\x3a\x75\x3b\x66\x68\x86\x83\x2e\x91\x30\x05\x43\x5c\x58\x67\xc4\xba\x74\x04\xe1\xc0\x14\xef\x6a\x83\x8d\xe6\x22\x3d\x78\x48\xe1\x50\x2a\x4e\x26\x50\x3b\x32\x1b\x7b\xaa\xe3\xcb\xf5\x1d\xae\xc8\x5a\x32\xf8\x42\x8a\x0c\x93\xb8\x29\xd7\x52\x24\xb8\x12\x09\x29\x4b\x60\x16\x85\x3f\xb1\x39\x71\xac\x03\x9c\x4f\xbc\xf4\xa5\xdc\x1e\x4b\xc1\xa5\x2e\x15\x67\x4e\x68\x15\x81\x84\xaf\x1c\x5b\x32\x56\x68\x85\xc1\x89\xea\x08\x18\x41\x1b\x0f\xd2\x64\xce\x37\x60\xa0\x0b\x9f\xd7\x02\x53\x07\x48\xe6\x5e\x52\x7f\x41\x90\x97\xbe\x39\x84\x0a\x34\xb9\x2e\x08\x2e\x67\xce\x77\xbd\x13\x52\x62\x4d\x28\x2d\xa5\xa5\x8c\x3c\xda\xba\x74\xf8\xbe\x58\x7d\xfd\x76\xb7\xc2\xfc\xfa\x1e\xdf\xe7\xcb\xe5\xfc\x7a\x75\xff\x11\x3b\xe1\x72\x5d\x3a\xd0\x96\x2a\x28\xb1\x29\xa4\x20\x8e\x1d\x33\x86\x29\x77\x80\x4e\x3d\xc2\x6f\x17\xcb\xcf\x5f\xe7\xd7\xab\xf9\xff\x17\x57\x8b\xd5\x3d\xb4\xc1\xe5\x62\x75\x7d\x71\x7b\x8b\xcb\x6f\x4b\xcc\x71\x33\x5f\xae\x16\x9f\xef\xae\xe6\x4b\xdc\xdc\x2d\x6f\xbe\xdd\x5e\x74\x70\x4b\xbe\x2a\xf2\xf9\xff\xad\x79\x1a\xa6\x67\x08\x9c\x1c\x13\xd2\x9e\x94\xb8\xd7\x25\x6c\xae\x4b\xc9\x91\xb3\x2d\xc1\x50\x42\x62\x4b\x1c\x0c\x89\x2e\x0e\xbf\x3c\x54\x8f\xc5\xa4\x56\x59\xe8\xf9\x5f\x0d\x89\x45\x0a\xa5\x5d\x04\x4b\x84\xff\xe5\xce\x15\xb3\x6e\x77\xb7\xdb\x75\x32\x55\x76\xb4\xc9\xba\xb2\x82\xb3\xdd\x4f\x9d\xba\xc7\x1c\xae\x0f\x8e\x56\x86\x25\x64\x60\x89\x99\x24\x27\x1b\x9a\x09\x17\x6d\xc1\x49\x39\x91\x0a\x32\x36\xf2\x26\x45\xa2\xa5\xa4\xc4\x59\x5f\xc1\x26\x04\x16\xda\xba\x76\x61\x74\x42\xd6\x0a\x95\xf9\xc6\xb1\x70\x6f\x02\xb1\x21\x97\x6b\x6e\xf1\x0a\xee\x7d\x37\x56\xfc\x45\x27\x35\x6c\x59\x54\x63\xe4\xcc\xb1\x08\x56\x87\xee\x61\xc8\xdb\x8c\x38\xac\xc8\x14\x73\xa5\xa1\xf0\x96\xd6\x84\x0d\x73\x89\x37\x3b\xcb\x98\x50\xd6\xfd\x04\xe8\x71\x4e\x13\xb9\xd8\xb3\x4d\x21\x69\xe6\xbf\x03\x9f\xc0\x69\x5d\x66\x1d\xe7\x25\x58\x19\xa6\x2c\x4b\xbc\xb9\x9b\x68\xf4\xf6\xfd\x78\x48\xa3\xe9\x98\x06\x23\xce\x7a\x93\xc1\xd9\xb4\x9f\x8e\x06\x93\xb3\x78\x18\xd3\xd9\x34\x1d\x8e\x69\x3a\x1e\xac\xfb\xc9\xe8\x8c\xc6\x6c\xd2\x1b\x0f\xd6\x31\xb1\xde\x24\xe5\xe3\xd1\x38\xa6\x29\xa7\x46\x84\xc7\x00\x6c\x66\x68\xbc\x52\xba\xf1\xd4\xaa\xd8\x1f\xab\x0f\xa0\xb7\xef\x8f\x79\xd2\x9f\x8e\xa9\x1d\xf7\x27\x33\xc4\xd1\xcb\xcd\x60\x92\x24\xc3\xc9\x20\x6e\xf7\x66\xe8\xbf\x3a\x1f\xf5\x87\xe9\x60\x32\x99\xb6\xa7\x67\x6f\x13\x18\x4f\x47\xd3\x74\x3a\x6d\xf7\x27\xef\xa0\x92\xfe\x24\xe6\xf1\x94\x3c\x54\x5c\x1d\x3f\xd5\x1f\xeb\x35\xbf\x70\xb8\x05\xcb\x32\x43\x19\x73\x54\x4d\x2d\x54\x1c\x2e\x52\xbf\x2c\x3a\xf5\x9a\xff\x3e\xc3\xe3\x53\x54\x0f\x39\xd6\x79\xc7\x5b\xef\xeb\x60\x48\xe1\x9f\xa1\x50\xcf\x43\x0e\x8e\xf1\xda\xfb\x59\x74\xea\xb5\x10\x3f\x43\x5a\xaa\x4a\x63\xc1\xa3\x30\xa6\xd6\x63\xbd\x56\xdb\x32\x83\x07\x3a\xe0\x1c\x8d\x06\x3e\xc0\xe9\xaf\xb4\x6f\x0a\xde\xc2\x07\x34\xda\xfe\xc4\x47\x7e\xac\xd7\x6a\x2e\x17\xb6\x23\xb8\xfd\xfd\x81\x0e\x7f\xe0\x1c\x6f\x7f\x7f\x40\x8c\x1f\x3f\x10\x7f\xac\xd7\x42\x99\xa4\x9c\x97\xff\x99\x33\x35\x6c\x43\x2d\x78\xc6\x6e\x17\xb7\x0f\xa2\x08\x6b\xac\x30\xd4\x4e\xf4\xa6\x08\x8b\x5f\x6d\x75\x12\x56\xa3\x8d\xe0\x72\xed\x57\xaa\x21\xfc\x59\x5a\x87\x94\xa9\xe4\x00\x5d\x24\x9a\x93\xad\xd7\x6a\x22\x45\x53\xd8\x1b\x43\xc7\x64\x5e\x11\x74\x32\x72\x2b\xdd\x6c\xb5\x2a\xa6\x9a\x21\x57\x1a\xe5\xab\x7f\x3a\xb6\x2a\x54\x51\x3a\x9c\xe3\x39\x7c\xe1\x0f\x9a\xad\x13\xa6\xff\xd5\x91\xa4\x32\x97\xe3\xd3\x39\x86\x47\xa0\xd0\x6c\xd0\xb1\x69\xfd\x5b\xae\x02\x23\xf4\x22\x0c\x5b\x11\xde\xa4\xb5\x31\x6c\x1d\x29\x2b\x29\xf6\xc2\xbd\x57\x62\x49\xb6\x94\xae\xf5\x32\xd3\x94\x95\xd2\xf9\x45\xed\x55\x78\xf0\xab\x34\x3f\xee\x56\x96\xb8\x92\x49\xd0\x9e\x92\xd2\x03\xf8\xc7\xc5\xd4\x51\x0b\xa4\xd5\xd6\xab\x85\xfc\x57\x2c\x52\x67\x11\xf8\xfa\x15\x83\x09\x94\x3f\x51\x30\x29\x03\xcd\x51\xdb\x6a\x5d\xae\xc9\x3b\xca\x91\x61\xfe\xff\x42\x6f\x8f\x9e\xaa\xe4\xb4\x01\xce\xe7\xa4\x42\x31\x79\x02\x3e\xbe\x79\xff\xf0\xc2\x3e\xaa\x55\xe7\xaf\x6a\x4a\xdc\xfe\xc5\x01\x27\xf7\xea\xd2\xff\x91\x25\x4c\x4a\xef\x58\x30\x69\xf5\x71\x16\x89\xdb\x77\x7e\x79\x1e\xcf\xc1\xcf\x33\x79\x9f\xde\x1e\xb6\x8e\x3e\xa8\xda\x78\x36\x70\x65\xd9\xa7\xfa\xdf\x01\x00\x00\xff\xff\xf6\xa8\xa1\xb9\xb0\x08\x00\x00") func _4byte_tracerJsBytes() ([]byte, error) { return bindataRead( @@ -92,6 +95,26 @@ func _4byte_tracerJs() (*asset, error) { } info := bindataFileInfo{name: "4byte_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6b, 0xa8, 0x46, 0xa2, 0x3a, 0x2b, 0xaa, 0xb9, 0xb9, 0xba, 0xe2, 0x22, 0x10, 0xe, 0xe7, 0x4c, 0x24, 0xfc, 0x4c, 0x85, 0xeb, 0x96, 0x48, 0xe8, 0x7f, 0xc8, 0xe0, 0xd0, 0xd, 0x26, 0xa1, 0xb2}} + return a, nil +} + +var __4byte_tracer_legacyJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x56\x5b\x6f\xdb\x4a\x0e\x7e\xb6\x7f\x05\xd7\x2f\xb5\x51\x59\x8e\x2f\x89\x2f\xd9\x16\xf0\xe6\xa4\x6d\x80\x9c\x24\x88\xdd\x3d\x28\x16\xfb\x30\x9e\xa1\xac\xd9\xc8\x33\xc2\x0c\xe5\x4b\x73\xf2\xdf\x17\x1c\x49\x89\x93\xd3\x62\xbb\x4f\x96\x47\xc3\x8f\x1f\xc9\x8f\xa4\x7a\x3d\xb8\xb0\xf9\xc1\xe9\x75\x4a\x30\x38\xe9\x8f\x61\x99\x22\xac\x6d\x17\x29\x45\x87\xc5\x06\xe6\x05\xa5\xd6\xf9\x66\xaf\x07\xcb\x54\x7b\x48\x74\x86\xa0\x3d\xe4\xc2\x11\xd8\x04\xe8\xcd\xfd\x4c\xaf\x9c\x70\x87\xb8\xd9\xeb\x95\x36\x3f\x7c\xcd\x08\x89\x43\x04\x6f\x13\xda\x09\x87\x33\x38\xd8\x02\xa4\x30\xe0\x50\x69\x4f\x4e\xaf\x0a\x42\xd0\x04\xc2\xa8\x9e\x75\xb0\xb1\x4a\x27\x07\x86\xd4\x04\x85\x51\xe8\x82\x6b\x42\xb7\xf1\x35\x8f\xcf\x37\x5f\xe1\x1a\xbd\x47\x07\x9f\xd1\xa0\x13\x19\xdc\x15\xab\x4c\x4b\xb8\xd6\x12\x8d\x47\x10\x1e\x72\x3e\xf1\x29\x2a\x58\x05\x38\x36\xfc\xc4\x54\x16\x15\x15\xf8\x64\x0b\xa3\x04\x69\x6b\x22\x40\xcd\xcc\x61\x8b\xce\x6b\x6b\x60\x58\xbb\xaa\x00\x23\xb0\x8e\x41\xda\x82\x38\x00\x07\x36\x67\xbb\x0e\x08\x73\x80\x4c\xd0\x8b\xe9\x2f\x24\xe4\x25\x6e\x05\xda\x04\x37\xa9\xcd\x11\x28\x15\xc4\x51\xef\x74\x96\xc1\x0a\xa1\xf0\x98\x14\x59\xc4\x68\xab\x82\xe0\x8f\xab\xe5\x97\xdb\xaf\x4b\x98\xdf\x7c\x83\x3f\xe6\xf7\xf7\xf3\x9b\xe5\xb7\x73\xd8\x69\x4a\x6d\x41\x80\x5b\x2c\xa1\xf4\x26\xcf\x34\x2a\xd8\x09\xe7\x84\xa1\x03\xd8\x84\x11\x7e\xbf\xbc\xbf\xf8\x32\xbf\x59\xce\xff\x71\x75\x7d\xb5\xfc\x06\xd6\xc1\xa7\xab\xe5\xcd\xe5\x62\x01\x9f\x6e\xef\x61\x0e\x77\xf3\xfb\xe5\xd5\xc5\xd7\xeb\xf9\x3d\xdc\x7d\xbd\xbf\xbb\x5d\x5c\xc6\xb0\x40\x66\x85\x6c\xff\xbf\x73\x9e\x84\xea\x39\x04\x85\x24\x74\xe6\xeb\x4c\x7c\xb3\x05\xf8\xd4\x16\x99\x82\x54\x6c\x11\x1c\x4a\xd4\x5b\x54\x20\x40\xda\xfc\xf0\xcb\x45\x65\x2c\x91\x59\xb3\x0e\x31\xff\x54\x90\x70\x95\x80\xb1\x14\x81\x47\x84\xbf\xa7\x44\xf9\xac\xd7\xdb\xed\x76\xf1\xda\x14\xb1\x75\xeb\x5e\x56\xc2\xf9\xde\xc7\xb8\xc9\x98\xa3\xd5\x81\x70\xe9\x84\x44\x07\x1e\x85\x93\x29\xfa\x10\x4c\x78\xd1\xd5\x0a\x0d\xe9\x44\xa3\xf3\x11\x8b\x14\xa4\xcd\x32\x94\xe4\x99\xc1\x26\x5c\xcc\xad\xa7\x6e\xee\xac\x44\xef\xb5\x59\x73\xe0\x70\x45\xaf\x2e\xc2\x06\x29\xb5\xca\xc3\x11\xdc\xdb\x68\xbc\xfe\x8e\x75\x36\x7c\x91\x97\x65\x54\x82\x44\x04\xde\x86\xe8\xc1\x21\xcb\x0c\x15\x78\xbd\x36\x82\x0a\x87\xa1\x97\x56\x08\x1b\x41\x92\xc5\x2e\xd6\x42\x1b\x4f\x7f\x01\x64\x9c\xba\x22\x97\x7b\xb1\xc9\x33\x9c\xf1\x33\xc0\x47\x50\xb8\x2a\xd6\x31\x71\x0a\x96\x4e\x18\x2f\x24\x8b\xbb\x0d\xad\x93\xfd\xa0\x3f\xc2\xd3\xe9\x18\x87\xa7\x4a\x9c\x4c\x86\x67\xd3\x41\x72\x3a\x9c\x9c\xf5\x47\x7d\x3c\x9b\x26\xa3\x31\x4e\xc7\xc3\xd5\x40\x9e\x9e\xe1\x58\x4c\x4e\xc6\xc3\x55\x1f\xc5\xc9\x24\x51\xe3\xd3\x71\x1f\xa7\x0a\x5b\x11\x3c\x06\x60\x37\x83\xd6\x51\xa6\x5b\x4f\x9d\xd2\xfb\x63\xf9\x03\x70\xb2\x1f\x8c\x95\x1c\x4c\xc7\xd8\xed\x0f\x26\x33\xe8\x47\x2f\x6f\x86\x13\x29\x47\x93\x61\xbf\x7b\x32\x83\xc1\xd1\xf9\xe9\x60\x94\x0c\x27\x93\x69\x77\x7a\xf6\xda\x40\xa8\xe4\x74\x9a\x4c\xa7\xdd\xc1\xe4\x0d\x94\x1c\x4c\xfa\xaa\x3f\x45\x86\xea\x97\xc7\x4f\xcd\xc7\x66\x83\x07\x8e\xf2\x20\xd6\x6b\x87\x6b\x41\x58\x56\x2d\x30\x0e\x2f\x12\x1e\x16\x71\xb3\xc1\xcf\x33\x78\x7c\x8a\x9a\xc1\x46\x8a\x2c\x5b\x1e\x72\x56\x35\x15\xce\x78\x78\x97\x88\xcc\xe3\xbb\xa0\x0b\x63\x4d\x97\x2f\x78\x1e\x1f\x01\x2f\x47\x7c\xe8\x6a\xa3\x70\x1f\x2e\xf0\x51\xa2\x9d\x27\x1e\xb3\x62\x13\x10\x45\xc2\xd3\xe4\xdd\x56\x64\x05\xbe\x8b\x40\xc7\x18\xc3\x06\x37\x5c\x54\xe1\x28\x6e\x36\x6a\x97\x33\x48\x0a\x53\x56\xca\xe6\x9e\x5c\xe7\xb1\xd9\x68\xf8\x9d\x26\x99\x1e\x1d\x48\xe1\x11\x5a\x17\xf3\xeb\xeb\xd6\x0c\x5e\xfe\x5c\xdc\xfe\x76\xd9\x9a\x35\x1b\x0d\x76\xb9\x16\x2c\x6d\xa5\x5c\x04\x5b\x91\x45\xa5\xbb\xea\xc7\x7f\x0f\x0f\xb6\xa0\xfa\xd7\x7f\x67\xb3\x32\x5e\x18\x9e\x43\xaf\x07\x9e\x84\x7c\x80\x9c\x1c\x90\x2d\xcd\x9a\xcf\xae\x7f\xbb\xbc\xbe\xfc\x3c\x5f\x5e\xbe\xa2\xb0\x58\xce\x97\x57\x17\xe5\xd1\x5f\x49\xfc\x1f\xfe\x07\x3f\xf3\xdf\x68\x3c\x35\x9f\x6f\x85\x9a\x9c\x37\x1b\x75\xd5\x3c\xf1\x9c\xf2\x3c\x8d\xc2\x18\xd1\x3c\x3c\xb9\x2c\x55\x6b\x86\x3e\xe7\x8e\xe1\x0e\x8a\x9b\x8d\x70\xff\x28\xdf\x5a\x45\xa1\xb9\x42\x86\xb7\xc2\xc1\x03\x1e\xe0\x03\xb4\x5a\xf0\x1e\xc8\x7e\xc1\x7d\x5b\xab\x0e\xbc\x87\x56\x97\x4f\xf8\xe6\x79\xb3\xd1\xa0\x54\xfb\x58\x2b\xff\xaf\x07\x3c\xfc\x1b\x3e\xc0\xeb\xff\xef\xa1\x0f\x7f\xfe\x09\xfd\x57\x34\x31\xe7\x85\xa1\xcd\xd6\x3e\xa0\x0a\x92\xe1\x01\x70\x00\x9b\x4b\xab\xaa\x8d\xc1\x11\xfc\xf3\x77\xc0\x3d\xca\x82\xd0\x07\xba\x98\x1f\xb1\xcd\xec\x3a\x02\xb5\xea\x00\xb3\xed\xf5\x60\xf1\xa0\xf3\xb0\xb8\x4a\x14\x5f\xc2\xf0\x46\x34\x96\x40\x1b\x42\x67\x44\x16\xa4\xed\xab\xf8\x24\xd5\x7c\x6b\xf5\x31\x6a\x6c\xf3\x98\xec\x82\x9c\x36\xeb\x76\xa7\xc3\x31\xea\x04\xda\x7f\x93\x54\xfa\xaa\xd2\x7f\x5e\x15\xe3\xd8\x75\xee\xb0\x2b\xed\x26\x0f\x5f\x19\x66\x6b\x65\xd8\xc3\x3e\x02\x4a\x2d\xef\x6f\x87\xf0\x9f\xc2\x13\x24\xc2\xc8\x67\xa2\x15\xbe\xf6\x77\x0e\x2b\x63\xd5\x26\x3b\x57\xca\xa1\xf7\x81\x51\x50\x42\xcc\x6d\xd6\xee\x77\x5e\xc8\xf5\xcf\x3a\x9d\xce\xcf\x48\x7d\x16\x61\xf7\xbf\x0a\xbc\x5e\x62\x55\xfc\xda\x2c\xbe\xc3\x07\x78\xe3\x41\x12\x57\xad\x13\x87\x5e\xbd\x4d\xda\xcf\x19\x08\xd7\x3f\x7e\x80\x51\xe5\xb2\x84\xb8\x4d\x92\x1f\x61\xbc\xb1\x2f\x65\x12\x14\x17\x22\x62\xd1\xbb\x43\xec\x79\x6d\xb5\x03\x48\x54\x61\xbd\x87\x51\x27\x0a\xd4\xba\xa3\x4e\x15\x4f\x2d\x9d\x44\x14\x19\x1d\x6b\x67\x97\x56\xdf\x07\x42\x52\x21\xb2\x4a\x2e\xfc\xad\x63\x13\x10\xa6\x56\x54\x52\x6e\xee\x46\xb0\xff\xa1\x86\xa0\x76\xe1\xd0\xff\xc8\x07\x27\x8f\xfd\xd4\xe2\x0a\x3b\x7f\x85\xdc\x60\x84\x4e\xf0\x47\x8f\xdd\x56\x2d\x56\x0d\xcd\x00\x57\xce\x42\xce\x7f\x05\x5c\x2d\x2e\xde\x1e\x61\xa9\x36\xca\xf3\x23\x52\x92\xf6\x2f\xa2\xae\x9b\xd9\x16\x3c\x3f\xb9\x86\xdc\xc0\x20\x32\x6f\xab\xaa\x48\xda\xc7\xda\xe4\x05\xc5\x19\x9a\x35\xa5\xc7\x15\x3a\x4a\x7a\x99\xe9\xe7\xcb\x11\x9c\x44\x21\xd1\x6f\xcd\xbb\xa3\xce\xeb\x29\x53\xf7\x73\xd9\xc1\x4f\xcd\xff\x06\x00\x00\xff\xff\x8e\xc8\x27\x72\x75\x0b\x00\x00") + +func _4byte_tracer_legacyJsBytes() ([]byte, error) { + return bindataRead( + __4byte_tracer_legacyJs, + "4byte_tracer_legacy.js", + ) +} + +func _4byte_tracer_legacyJs() (*asset, error) { + bytes, err := _4byte_tracer_legacyJsBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "4byte_tracer_legacy.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb4, 0xc5, 0x48, 0x2d, 0xd9, 0x43, 0x95, 0x93, 0x3b, 0x93, 0x2c, 0x47, 0x8c, 0x84, 0x32, 0x3c, 0x8b, 0x2e, 0xf3, 0x72, 0xc4, 0x57, 0xe6, 0x3a, 0xb3, 0xdf, 0x1d, 0xbf, 0x45, 0x3, 0xfc, 0xa}} return a, nil } @@ -116,7 +139,7 @@ func bigram_tracerJs() (*asset, error) { return a, nil } -var _call_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x59\x5f\x6f\x1b\xb7\xb2\x7f\x96\x3e\xc5\x24\x0f\xb5\x84\x28\x92\x93\xf4\xf6\x02\x76\xd5\x0b\x5d\x47\x49\x0d\xb8\x71\x60\x2b\x0d\x82\x20\x0f\xd4\xee\xac\xc4\x9a\x4b\x6e\x49\xae\xe4\x3d\xa9\xbf\xfb\xc1\x0c\xb9\xab\xd5\x1f\x3b\x6e\x0f\xce\x41\xcf\x8b\xa0\x5d\xce\x0c\x87\x33\xbf\xf9\xc7\x1d\x8d\xe0\xcc\x14\x95\x95\x8b\xa5\x87\x97\xc7\x2f\xfe\x17\x66\x4b\x84\x85\x79\x8e\x7e\x89\x16\xcb\x1c\x26\xa5\x5f\x1a\xeb\xba\xa3\x11\xcc\x96\xd2\x41\x26\x15\x82\x74\x50\x08\xeb\xc1\x64\xe0\x77\xe8\x95\x9c\x5b\x61\xab\x61\x77\x34\x0a\x3c\x07\x97\x49\x42\x66\x11\xc1\x99\xcc\xaf\x85\xc5\x13\xa8\x4c\x09\x89\xd0\x60\x31\x95\xce\x5b\x39\x2f\x3d\x82\xf4\x20\x74\x3a\x32\x16\x72\x93\xca\xac\x22\x91\xd2\x43\xa9\x53\xb4\xbc\xb5\x47\x9b\xbb\x5a\x8f\xb7\xef\x3e\xc0\x05\x3a\x87\x16\xde\xa2\x46\x2b\x14\xbc\x2f\xe7\x4a\x26\x70\x21\x13\xd4\x0e\x41\x38\x28\xe8\x8d\x5b\x62\x0a\x73\x16\x47\x8c\x6f\x48\x95\xeb\xa8\x0a\xbc\x31\xa5\x4e\x85\x97\x46\x0f\x00\x25\x69\x0e\x2b\xb4\x4e\x1a\x0d\xaf\xea\xad\xa2\xc0\x01\x18\x4b\x42\x7a\xc2\xd3\x01\x2c\x98\x82\xf8\xfa\x20\x74\x05\x4a\xf8\x0d\xeb\x23\x0c\xb2\x39\x77\x0a\x52\xf3\x36\x4b\x53\x20\xf8\xa5\xf0\x74\xea\xb5\x54\x0a\xe6\x08\xa5\xc3\xac\x54\x03\x92\x36\x2f\x3d\x7c\x3c\x9f\xfd\x7c\xf9\x61\x06\x93\x77\x9f\xe0\xe3\xe4\xea\x6a\xf2\x6e\xf6\xe9\x14\xd6\xd2\x2f\x4d\xe9\x01\x57\x18\x44\xc9\xbc\x50\x12\x53\x58\x0b\x6b\x85\xf6\x15\x98\x8c\x24\xfc\x32\xbd\x3a\xfb\x79\xf2\x6e\x36\xf9\xff\xf3\x8b\xf3\xd9\x27\x30\x16\xde\x9c\xcf\xde\x4d\xaf\xaf\xe1\xcd\xe5\x15\x4c\xe0\xfd\xe4\x6a\x76\x7e\xf6\xe1\x62\x72\x05\xef\x3f\x5c\xbd\xbf\xbc\x9e\x0e\xe1\x1a\x49\x2b\x24\xfe\x6f\xdb\x3c\x63\xef\x59\x84\x14\xbd\x90\xca\xd5\x96\xf8\x64\x4a\x70\x4b\x53\xaa\x14\x96\x62\x85\x60\x31\x41\xb9\xc2\x14\x04\x24\xa6\xa8\x1e\xed\x54\x92\x25\x94\xd1\x0b\x3e\xf3\xbd\x80\x84\xf3\x0c\xb4\xf1\x03\x70\x88\xf0\xe3\xd2\xfb\xe2\x64\x34\x5a\xaf\xd7\xc3\x85\x2e\x87\xc6\x2e\x46\x2a\x88\x73\xa3\x9f\x86\x5d\x92\x99\x08\xa5\x66\x56\x24\x68\xc9\x39\x02\xb2\x92\xcc\xaf\xcc\x5a\x83\xb7\x42\x3b\x91\x90\xab\xe9\x7f\xc2\x60\x14\x1e\xf0\x96\x9e\xbc\x23\xd0\x82\xc5\xc2\x58\xfa\xaf\x54\x8d\x33\xa9\x3d\x5a\x2d\x14\xcb\x76\x90\x8b\x14\x61\x5e\x81\x68\x0b\x1c\xb4\x0f\x43\x30\x0a\xee\x06\xa9\x33\x63\x73\x86\xe5\xb0\xfb\xb5\xdb\x89\x1a\x3a\x2f\x92\x1b\x52\x90\xe4\x27\xa5\xb5\xa8\x3d\x99\xb2\xb4\x4e\xae\x90\x49\x20\xd0\x44\x7b\x4e\x7f\xfd\x05\xf0\x16\x93\x32\x48\xea\x34\x42\x4e\xe0\xf3\xd7\xbb\x2f\x83\x2e\x8b\x4e\xd1\x25\xa8\x53\x4c\xf9\x7c\x37\x0e\xd6\x4b\xb6\x28\xac\xf1\x68\x85\xf0\x5b\xe9\x7c\x8b\x26\xb3\x26\x07\xa1\xc1\x94\x84\xf8\xb6\x75\xa4\xf6\x86\x05\x0a\xfa\xaf\xd1\xb2\x46\xc3\x6e\xa7\x61\x3e\x81\x4c\x28\x87\x71\x5f\xe7\xb1\xa0\xd3\x48\xbd\x32\x37\x24\xd9\x58\x82\xb0\xad\xc0\x14\x89\x49\x63\x30\xd0\x39\x9a\x63\xa0\x1b\x76\x3b\xc4\x77\x02\x59\xa9\x79\xdb\x9e\x32\x8b\x01\xa4\xf3\x3e\x7c\xed\x76\x48\xec\x99\x28\x7c\x69\x91\xed\x89\xd6\x1a\xeb\x40\xe6\x39\xa6\x52\x78\x54\x55\xb7\xd3\x59\x09\x1b\x16\x60\x0c\xca\x2c\x86\x0b\xf4\x53\x7a\xec\xf5\x4f\xbb\x9d\x8e\xcc\xa0\x17\x56\x9f\x8c\xc7\x9c\x7d\x32\xa9\x31\x0d\xe2\x3b\x7e\x29\xdd\x30\x13\xa5\xf2\xcd\xbe\xc4\xd4\xb1\xe8\x4b\xab\xe9\xef\x5d\xd0\xe2\x23\x82\xd1\xaa\x82\x84\xb2\x8c\x98\x53\x78\xba\xca\x79\xcc\xe3\xe1\xdc\x00\x32\xe1\xc8\x84\x32\x83\x35\x42\x61\xf1\x79\xb2\x44\xf2\x9d\x4e\x30\x6a\xe9\x2a\xc7\x4e\x1d\x03\xed\x36\x34\xc5\xd0\x9b\x77\x65\x3e\x47\xdb\xeb\xc3\x77\x70\x7c\x9b\x1d\xf7\x61\x3c\xe6\x3f\xb5\xee\x91\x27\xea\x4b\x52\x4c\x11\x0f\xca\xfc\xd7\xde\x4a\xbd\x08\x67\x8d\xba\x9e\x67\x20\x40\xe3\x1a\x12\xa3\x19\xd4\xe4\x95\x39\x4a\xbd\x80\xc4\xa2\xf0\x98\x0e\x40\xa4\x29\x78\x13\x90\xd7\xe0\x6c\x7b\x4b\xf8\xee\x3b\xe8\xd1\x66\x63\x38\x3a\xbb\x9a\x4e\x66\xd3\x23\xf8\xe3\x0f\x08\x6f\x9e\x86\x37\x2f\x9f\xf6\x5b\x9a\x49\x7d\x99\x65\x51\x39\x16\x38\x2c\x10\x6f\x7a\x2f\xfa\xc3\x95\x50\x25\x5e\x66\x41\xcd\x48\x3b\xd5\x29\x8c\x23\xcf\xb3\x5d\x9e\x97\x5b\x3c\xc4\x34\x1a\xc1\xc4\x39\xcc\xe7\x0a\xf7\x03\x32\x46\x2c\x07\xaf\xf3\x94\xb1\x08\x7d\x89\xc9\x0b\x85\x84\xaa\x7a\xd7\x68\x7e\xd6\xb8\xe3\xab\x02\x4f\x00\x00\x4c\x31\xe0\x17\x14\x0b\xfc\xc2\x9b\x9f\xf1\x96\x7d\x54\x9b\x90\x50\x35\x49\x53\x8b\xce\xf5\xfa\xfd\x40\x2e\x75\x51\xfa\x93\x2d\xf2\x1c\x73\x63\xab\xa1\xa3\x84\xd4\xe3\xa3\x0d\xc2\x49\x6b\x9e\x85\x70\xe7\x9a\x78\x22\x52\xdf\x0a\xd7\xdb\x2c\x9d\x19\xe7\x4f\xea\x25\x7a\xa8\xd7\xd8\x16\xc4\x76\x74\x7c\x7b\xb4\x6f\xad\xe3\xfe\x06\x09\x2f\x7e\xe8\x13\xcb\xdd\x69\x83\xef\x26\x4d\x0c\x8b\xd2\x2d\x7b\x0c\xa7\xcd\xea\x26\x15\x8c\xc1\xdb\x12\x0f\xc2\x9f\x21\xb5\x0f\x27\x87\x2a\xa3\x5c\xe2\x6d\x99\x30\xac\x16\x82\x33\x0d\x47\xba\xa0\xcc\xeb\xca\x39\xdb\xdc\x1b\xb3\x8f\xae\x08\xae\xeb\xe9\xc5\x9b\xd7\xd3\xeb\xd9\xd5\x87\xb3\xd9\x51\x0b\x4e\x0a\x33\x4f\x4a\x6d\x9f\x41\xa1\x5e\xf8\x25\xeb\x4f\xe2\xb6\x57\x3f\x13\xcf\xf3\x17\x5f\xc2\x1b\x18\x1f\x08\xf9\xce\xc3\x1c\xf0\xf9\x0b\xcb\xbe\xdb\x37\xdf\x36\x69\x30\xe6\xd7\x00\x22\x53\xdc\xb5\x13\xc7\x81\x58\xcc\xd1\x2f\x4d\xca\xc9\x31\x11\x21\xbf\xd6\x56\x4c\x8d\xc6\x3f\x1f\x91\x93\x8b\x8b\x56\x3c\xf2\xf3\xd9\xe5\xeb\x76\x8c\x1e\xbd\x9e\x5e\x4c\xdf\x4e\x66\xd3\x5d\xda\xeb\xd9\x64\x76\x7e\xc6\x6f\xeb\xf0\x1d\x8d\xe0\xfa\x46\x16\x9c\x65\x39\x77\x99\xbc\xe0\x76\xb1\xd1\xd7\x0d\xc0\x2f\x0d\x35\x62\x36\x16\x91\x4c\xe8\xa4\x4e\xee\xae\x76\x9a\x37\xe4\x32\x53\xc7\xca\x7e\x2a\x68\x03\xb5\xdf\xb8\x51\xba\xf7\x16\xe3\xa6\x69\xcf\x9b\x5a\xaf\x8d\x41\x83\x47\x38\x01\x72\x92\xe9\x3d\xfe\x90\xf0\x7f\x70\x0c\x27\xf0\x22\x66\x92\x07\x52\xd5\x4b\x78\x46\xe2\xff\x42\xc2\x7a\x75\x80\xf3\xef\x99\xb6\xbc\x61\xe2\x9a\xdc\x9b\xff\x7c\x3a\x33\xa5\xbf\xcc\xb2\x13\xd8\x35\xe2\xf7\x7b\x46\x6c\xe8\x2f\x50\xef\xd3\xff\xcf\x1e\xfd\x26\xf5\x11\xaa\x4c\x01\x4f\xf6\x20\x12\x12\xcf\x93\x9d\x38\x88\xc6\xe5\x16\x87\xa5\xc1\xf8\x9e\x64\xfb\x72\x1b\xc3\xf7\x65\x8b\x7f\x29\xd9\x1e\x6c\xd5\xa8\x21\xdb\x6e\xc6\x06\x60\xd1\x5b\x89\x2b\x1a\xb7\x8e\x1c\x8b\xa4\xa6\xd5\xac\x85\x4e\x70\x08\x1f\x31\x48\xd4\x88\x9c\x5c\x62\x93\x4b\x3d\x0a\xf7\x7d\xd4\xa8\xc6\x71\x85\x21\x26\xb8\x17\xb5\x08\xb9\xa8\x68\x5c\xc9\x4a\x7d\x53\xc1\x42\x38\x48\x2b\x2d\x72\x99\xb8\x20\x8f\x1b\x5c\x8b\x0b\x61\x59\xac\xc5\xdf\x4b\x74\x34\xfb\x10\x90\x45\xe2\x4b\xa1\x54\x05\x0b\x49\x03\x0c\x71\xf7\x5e\xbe\x3a\x3e\x06\xe7\x65\x81\x3a\x1d\xc0\x0f\xaf\x46\x3f\x7c\x0f\xb6\x54\xd8\x1f\x76\x5b\x69\xbc\x39\x6a\xf4\x06\x2d\x44\xf4\xbc\xc6\xc2\x2f\x7b\x7d\xf8\xe9\x9e\x7a\x70\x4f\x72\x3f\x48\x0b\xcf\xe1\xc5\x97\x21\xe9\x35\xde\xc2\x6d\xf0\x24\xa0\x72\x18\xa5\xd1\xd0\x77\xf9\xfa\xb2\x77\x23\xac\x50\x62\x8e\xfd\x13\x1e\x02\xd9\x56\x6b\x11\xa7\x00\x72\x0a\x14\x4a\x48\x0d\x22\x49\x4c\xa9\x3d\x19\xbe\x6e\xe8\x55\x45\xf9\xfd\xc8\xd7\xf2\x78\x5e\x12\x49\x82\xce\xd5\xe9\x9e\xbd\x46\xea\x88\x9c\xb8\x41\x6a\x27\x53\x6c\x79\x85\xb2\x83\xe1\xd4\x1c\x29\x68\x9c\xac\x05\xe6\xc6\xd1\x26\x73\x84\xb5\xa5\xe1\xc3\x49\x9d\xf0\xf4\x9d\x22\x59\xdb\x81\xd1\x20\x40\x19\x1e\xf9\x39\xc6\x41\xd8\x85\x1b\x86\x7c\x4f\xdb\x52\xce\xd1\x66\x3d\xdc\x06\x72\x1b\xaa\xdc\xe6\xef\xb4\x03\x1a\xf0\x56\x3a\xcf\x5d\x25\x69\x29\x1d\x04\x24\x4b\xbd\x18\x40\x61\x0a\xce\xd3\xdf\x2a\x67\x31\x59\x5f\x4d\x7f\x9d\x5e\x35\xc5\xff\xf1\x4e\xac\xfb\xfe\xa7\xcd\x58\x04\x96\x66\x0e\x8f\xe9\xd3\x03\x8d\xfc\x01\x40\x8d\xef\x01\x14\xc9\xdf\xd4\xc6\xf7\xad\xe3\x28\xe1\xfc\xc6\x31\x0b\x0c\x33\x4d\x5b\x01\x57\x2a\xef\x76\x72\xf7\x6e\x72\x30\x45\x5d\x21\x48\x29\x4e\x3b\x94\xd8\x77\xbb\xed\xad\x85\x4d\xd3\xbd\xc1\xe7\x79\xcb\xc6\x6b\x6e\xb9\x02\x51\x2b\x35\xf0\x7a\xdd\xbb\x89\x50\x0d\x58\x77\x53\x7a\x82\x03\xd5\xef\x4d\xf2\x5b\x08\xf7\xc1\xb1\xd7\x63\xfa\x9b\xcb\xc5\xb9\xf6\xbd\x7a\xf1\x5c\xc3\x73\xa8\x1f\x28\xa9\xc3\xf3\xad\x28\x3a\x90\x1d\x3b\x29\x2a\xf4\x08\x1b\x11\xa7\xb0\xf3\x8a\x04\x05\x73\xb0\xd1\x2c\xfa\xfd\xe2\x7c\x1c\xa5\x91\xc1\x9e\x58\xf4\x43\xfc\xbd\x14\xca\xf5\x8e\x9b\x66\x21\x9c\xc0\x1b\x2e\x6f\xe3\xa6\xc0\xd5\x15\x90\x78\xb6\xda\x8f\x28\x30\xb0\x45\x6b\xd4\x6c\xe9\x3c\x54\xad\x14\x1f\x94\x10\x45\xc4\xb4\xd1\xf8\x32\x02\xf3\x50\xff\xd9\x69\x13\xc0\xd3\xa6\x21\xc8\x84\x54\xa5\xc5\xa7\xa7\x70\x20\xed\xb8\xd2\x66\x22\x61\x5f\x3a\x04\x9e\x58\x1d\x38\x93\xe3\xd2\xac\x83\x02\x87\x92\xd7\x3e\x38\x1a\x1c\xec\x94\x0f\xbe\x7a\x11\x0e\x4a\x27\x16\xd8\x02\x47\x63\xf0\xda\x51\x07\xc7\xe8\xbf\x0c\x9d\x67\xcd\xe3\x37\x50\x14\x76\xf9\x26\x34\x1e\xc2\xc6\x41\x2f\xef\x75\x39\x35\x11\xf7\x3a\xad\x87\x5a\xd5\xd0\x8a\x34\xc8\xf9\x33\x7e\xff\xf7\x38\x3e\x78\x3e\xfe\x3e\x36\xd0\x76\x69\xc3\x19\xb7\x89\xc3\x49\x37\xed\xcd\xb7\x51\xd0\xac\xde\x07\x80\xfb\x3a\x27\x82\xaa\xfe\x0d\x13\xbf\x81\x2b\x37\x3b\xf4\x54\x58\x5c\x49\x53\x52\x1d\xc3\xff\xa6\xc9\xb0\xe9\xfc\xee\xba\x9d\xbb\x78\x45\xc6\xee\x6b\xdf\x91\xad\x97\xf1\x8a\x37\x34\x4d\xad\x2a\x62\xb8\xc4\xc6\x9b\xb3\x2c\x5c\xbe\x76\x98\xff\x81\xbb\xb2\x18\xef\xde\x14\xd4\x15\xc4\x22\xa5\x2c\x8a\xb4\x6a\xea\xe2\x20\xf4\x23\xb0\x14\x3a\x8d\x33\x89\x48\x53\x49\xf2\x18\x8b\xa4\xa1\x58\x08\xa9\xbb\x07\xcd\xf8\xcd\x62\x7c\x08\x19\x7b\x2d\x6e\xbb\x9e\xc6\x59\x92\x06\x3f\xd6\xb8\xfb\x88\xba\xb9\x13\x4b\xbb\xd7\x7e\xf1\xe6\xd0\x68\x57\xe6\xdc\x10\x83\x58\x09\xa9\x04\x0d\x61\xdc\x68\xe9\x14\x12\x85\x42\x87\xcb\x7e\xcc\xbc\x59\xa1\x75\xdd\x47\x80\xfc\xaf\x60\x7c\x27\x39\xd6\x8f\xd1\x1c\x8f\x8f\xd9\xc7\x46\x6c\x38\xfe\x1b\x25\xbc\x8f\xf0\x6a\x99\x37\x44\x96\xf4\xfc\x1d\x08\xb5\xef\x3e\x2e\xa4\xb8\x75\x22\x9a\x9f\xe0\xb8\xd5\x9e\xff\x5d\x82\x6c\x1f\x62\x17\x4d\x9b\x16\x0f\xef\x8d\x19\x80\x42\xc1\xc3\x52\xfd\x95\xa6\x6e\x4b\x1f\x9a\xdd\xea\xe8\x0d\x8d\xdd\x5e\xf8\xf2\xf5\xd6\x12\xeb\x8b\x90\xd0\xe1\xcf\x11\x35\x48\x8f\x56\xd0\x58\x44\xe8\x8a\x1f\x16\x48\x4b\xc7\xe2\xd8\x2f\x92\x82\x2e\x0a\x8e\xb7\xfc\x54\x9f\xa5\x5e\x0c\xbb\x9d\xf0\xbe\x15\xef\x89\xbf\xdd\xc4\x7b\x28\x86\xcc\x19\xaf\x06\x9a\x9b\x81\xc4\xdf\x72\xd3\xc8\xd3\xf3\xce\xf5\x00\xad\xd1\xab\x30\x5a\xef\x5c\x06\x30\x63\xbc\x10\xd8\xbd\x73\xa4\x35\x7e\xb7\x05\x70\x26\x5d\x08\x17\xc4\xec\x84\x84\xbf\xdd\x8f\x88\x9a\x81\x82\xe1\xe4\x30\x03\x2d\x1d\x60\xda\xb9\xa0\x20\x62\x7e\x15\x56\x43\x61\x3f\x69\xaf\x86\x57\xf1\xa0\x32\x6f\xd9\x46\xe6\x6c\x9b\xbb\xd3\xc3\x49\xee\xb8\xc6\xe3\xe1\x64\x46\x36\x6f\x00\x7b\x0f\x6b\x7b\xe4\xd8\x27\x79\x28\x55\xb2\xf4\x3a\xb3\xdd\xc3\xca\xd2\x5b\xad\x87\xbf\x7d\xbc\xc8\x86\xb8\xad\xe2\x16\xcd\x21\x21\x31\xcf\x44\xba\x60\xd9\x5a\x40\x40\x75\xd0\x95\x11\x2d\xff\x81\x51\x62\x3b\x7e\xea\x25\xb0\x18\xbe\x43\x70\x43\x4a\xe1\x63\xe6\x5c\xfc\x4b\x47\xd3\xe4\x26\x2e\x52\x74\xd2\x62\x0a\x99\x44\x95\x82\x49\xd1\xf2\xac\xfa\x9b\x33\x3a\x7c\x71\x42\x2b\x49\x62\xf8\xb2\x16\x3e\x72\xf3\xf7\x3e\x2d\x13\xf4\x15\x64\x28\xf8\xd3\x91\x37\x50\x08\xe7\x20\x47\x41\xd3\x69\x56\x2a\x55\x81\xb1\x29\x92\xf0\x66\x5c\xa3\x90\x34\x50\x3a\xb4\x0e\xd6\x4b\x13\xcb\x24\x77\x69\x05\x35\x9d\xd2\x0f\xe2\x8d\x8c\x74\x85\x12\x15\x48\x4f\x25\x39\x1e\xaa\x1d\xa5\xcd\xf7\x1a\xfe\xe8\x63\xa8\xea\xee\x87\x68\x3d\xd8\x6d\xc7\x28\xbf\xa6\xa7\xed\xe8\x8c\x73\xcd\x76\x5c\x6e\xee\xaa\xb6\x83\xb0\x2e\x1b\xdb\x91\xd6\x2e\x42\xdb\xe1\xc4\x2b\xfc\xb4\x1d\x48\xad\x7e\x99\x17\x18\x1c\x0d\x03\x3f\xed\x84\x16\x6b\x19\x63\x2b\x7c\x9d\x6c\xc8\xf9\x69\x10\x01\x43\x5e\xec\x91\x71\x6e\xb0\xa2\x4c\x1c\x6c\xd4\x2a\x2b\xe1\xc5\xe7\x1b\xac\xbe\x1c\xae\x22\x11\x8e\x2d\xba\xa6\x6c\xd4\x90\x0e\x6b\x0f\x04\x72\xa3\x85\x1c\x1f\x9f\x82\xfc\xb1\xcd\x50\x57\x3e\x90\xcf\x9e\xd5\x7b\xb6\xd7\x3f\xcb\x2f\x75\x74\x36\x88\xdf\x59\xef\x6f\x69\x14\x63\x24\xd0\x50\x50\x74\xef\xba\xff\x0c\x00\x00\xff\xff\x00\x24\x55\x1f\xc3\x21\x00\x00") +var _call_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x56\x5f\x6f\xdb\x38\x0c\x7f\x8e\x3f\x05\xaf\x0f\x4b\x82\x65\x71\xbb\x03\xf6\xd0\x2d\x03\x72\x45\xbb\x05\xe8\xb5\x45\x9a\xde\x50\x14\x7d\x50\x6c\xda\xd6\xa6\x48\x86\x44\x37\xcd\x6d\xfd\xee\x07\x4a\x76\x6a\x67\x59\x6f\x2f\x06\x2c\x92\x3f\xfe\xfb\x51\x54\x1c\xc3\x89\x29\x37\x56\xe6\x05\xc1\xdb\xc3\xb7\x47\xb0\x28\x10\x72\xf3\x06\xa9\x40\x8b\xd5\x0a\xa6\x15\x15\xc6\xba\x28\x8e\x61\x51\x48\x07\x99\x54\x08\xd2\x41\x29\x2c\x81\xc9\x80\x76\xf4\x95\x5c\x5a\x61\x37\xe3\x28\x8e\x83\xcd\x5e\x31\x23\x64\x16\x11\x9c\xc9\x68\x2d\x2c\x1e\xc3\xc6\x54\x90\x08\x0d\x16\x53\xe9\xc8\xca\x65\x45\x08\x92\x40\xe8\x34\x36\x16\x56\x26\x95\xd9\x86\x21\x25\x41\xa5\x53\xb4\xde\x35\xa1\x5d\xb9\x26\x8e\x4f\x17\x37\x70\x8e\xce\xa1\x85\x4f\xa8\xd1\x0a\x05\x57\xd5\x52\xc9\x04\xce\x65\x82\xda\x21\x08\x07\x25\x9f\xb8\x02\x53\x58\x7a\x38\x36\x3c\xe3\x50\xae\xeb\x50\xe0\xcc\x54\x3a\x15\x24\x8d\x1e\x01\x4a\x8e\x1c\x1e\xd0\x3a\x69\x34\xfc\xd9\xb8\xaa\x01\x47\x60\x2c\x83\x0c\x04\x71\x02\x16\x4c\xc9\x76\x43\x10\x7a\x03\x4a\xd0\xb3\xe9\x6f\x14\xe4\x39\xef\x14\xa4\xf6\x6e\x0a\x53\x22\x50\x21\x88\xb3\x5e\x4b\xa5\x60\x89\x50\x39\xcc\x2a\x35\x62\xb4\x65\x45\xf0\x65\xb6\xf8\x7c\x79\xb3\x80\xe9\xc5\x2d\x7c\x99\xce\xe7\xd3\x8b\xc5\xed\x7b\x58\x4b\x2a\x4c\x45\x80\x0f\x18\xa0\xe4\xaa\x54\x12\x53\x58\x0b\x6b\x85\xa6\x0d\x98\x8c\x11\xfe\x3e\x9d\x9f\x7c\x9e\x5e\x2c\xa6\x7f\xcd\xce\x67\x8b\x5b\x30\x16\xce\x66\x8b\x8b\xd3\xeb\x6b\x38\xbb\x9c\xc3\x14\xae\xa6\xf3\xc5\xec\xe4\xe6\x7c\x3a\x87\xab\x9b\xf9\xd5\xe5\xf5\xe9\x18\xae\x91\xa3\x42\xb6\xff\xff\x9a\x67\xbe\x7b\x16\x21\x45\x12\x52\xb9\xa6\x12\xb7\xa6\x02\x57\x98\x4a\xa5\x50\x88\x07\x04\x8b\x09\xca\x07\x4c\x41\x40\x62\xca\xcd\x6f\x37\x95\xb1\x84\x32\x3a\xf7\x39\xff\x92\x90\x30\xcb\x40\x1b\x1a\x81\x43\x84\x0f\x05\x51\x79\x1c\xc7\xeb\xf5\x7a\x9c\xeb\x6a\x6c\x6c\x1e\xab\x00\xe7\xe2\x8f\xe3\x28\x62\xd0\x44\x28\x75\x66\xc5\x0a\x17\x56\x24\x68\xb9\xee\xce\xc3\x6b\x5c\x7b\x21\x64\x2c\x05\xb2\x22\x91\x3a\x87\x15\x52\x61\x52\x07\x64\xc0\x62\x69\x2c\xd5\x9d\x02\xa9\x33\x63\x57\x9e\x51\x3e\xd8\x25\x37\x46\x6a\x42\xab\x85\x82\x15\x3a\x27\x72\xf4\x2c\x16\x0c\xa6\x9d\x48\xc8\x53\xe6\x7b\xd4\x63\x3f\x8e\x44\xf2\xed\x18\xee\xbe\x3f\xdd\x8f\xa2\x5e\x26\x2a\x45\xc7\x90\x55\xda\x6b\x0d\x94\xc9\x47\x90\x2e\x87\xf0\xfd\x69\x14\xf5\x2c\xba\xae\x38\xa1\xc7\x5a\x1c\xf5\x7a\x71\x0c\x57\x16\x4b\x66\xb9\xa9\x98\x9d\xb5\x73\x1f\x62\xd4\xeb\x3d\x08\x0b\x01\x01\x26\xde\xa0\x47\x9b\x12\x8f\x01\x00\x12\x7a\x1c\xf3\xcf\x88\x4f\x33\x6b\x56\xfe\x94\xcc\x67\x7c\x64\x1f\x63\x3e\x1a\x7a\x21\x19\x2f\x6a\x0b\xc9\x04\xd1\x83\x50\x95\x87\xeb\x1f\x3e\xf6\xe1\xb5\x07\xf5\x67\x63\x32\xd7\x64\xa5\xce\x07\x47\xef\x82\x6a\x2e\x5c\x80\xa9\x55\x97\x32\x9f\x69\xf2\x68\xb9\x70\xc3\xbd\x06\x37\x0e\xd3\xe3\xfd\x06\x2c\xda\x63\x24\x75\x59\xd1\x71\x27\x56\x7f\x14\xa4\xa6\xa2\x20\x7e\x96\x86\x23\x2f\x7e\x8a\x7a\x3d\x99\xc1\x80\x0a\xe9\xc6\xdb\x3e\xdd\x1d\xde\x87\x1f\xf8\x63\x32\xf1\x37\x55\x26\x35\xa6\xa1\xfe\x75\x7b\x6a\x85\x09\xfc\xc2\xf4\x45\x70\xb4\xd6\xd8\x97\xc0\x83\xc2\x3e\x70\x2f\x61\x70\x40\xe5\x10\x18\x9f\x73\xfa\x6d\xc4\xad\x72\x2b\xc0\x8e\x4a\x07\x03\x5e\xbd\xda\x23\x3e\xc0\x47\x4c\x2a\xa6\x26\x58\x7c\x40\x4b\x98\x1e\xc0\x8f\x1f\x35\xed\xea\xfa\xc2\x64\x32\x39\x38\x7c\x3c\x18\xd6\x71\xa4\xa8\x90\xb0\xab\xe3\x63\x88\x38\x46\xaa\xac\x0e\xd9\x66\x52\x0b\x25\xff\xc5\xda\xed\x30\xea\xf1\x4c\x20\x8f\x5a\x6b\x24\xfc\xd8\x06\x64\x26\xbc\x1f\xe5\x0e\xdd\xbd\xc2\x38\x47\x5a\x6c\x4a\x1c\x0c\x5b\x94\x0f\x44\xd8\xca\xcf\xac\x59\x0d\x86\xcf\xb4\xdf\x11\x2f\x4c\x23\xac\x79\xb6\x23\x9f\xf1\x69\xa3\xe2\x09\xdf\xe5\xee\x56\xf1\x93\x70\x83\x61\x8b\xbe\xfd\xa3\x77\xfd\x0e\x07\xb7\x9a\xff\xf0\x34\x0d\x86\x3b\xdd\xf4\xb9\x71\x9e\x61\xda\x26\xbf\x70\x53\x1b\x77\xe7\xa4\xf6\xd2\x65\xd3\xb8\xac\x5c\x31\xe0\xdf\xa6\xc6\x8f\x92\x76\x4b\x3c\x0f\x4d\xd8\x16\x5a\xa1\xfe\x89\x96\x63\x85\x3a\xa7\xa2\x4e\x83\x35\x3e\xc2\x51\xdd\xf5\x56\x73\x76\xbd\x9b\x72\x30\xdc\xe6\x54\x8f\x37\x4c\xf6\x95\x2f\x04\x51\x17\x91\xd5\x7e\x2e\x64\xe3\xab\xa1\xf9\x8e\xdd\x29\x1f\x07\x77\x1c\x63\xad\xb5\x67\x5a\x42\x34\x0d\x83\xdb\xcd\x7e\x06\xbb\xf4\xd2\xc1\xd0\xc3\xd5\x73\xd8\x32\x6e\x42\x68\xa6\x2c\xb8\xf4\x22\xa6\xa6\x77\xdb\x3f\x99\x9f\x4e\x17\xa7\x7d\x9e\x9a\xbd\x92\xb7\xfd\x26\xa0\x66\x70\x82\x9a\xf1\x67\x4f\x51\xf3\xe1\x6a\xbf\x99\xc0\x51\x93\xd9\xce\x85\xa1\x50\xbf\x39\x6a\x2e\xb3\xbd\xf9\xbe\x68\x00\x77\xf7\x5b\x4f\x2f\x28\x76\x98\xc4\xda\xcc\xa6\x38\x86\x66\x94\xf9\x5d\x60\x51\x10\x3a\x7e\x18\x30\x1b\xcc\xf2\x2b\x26\xbc\x5c\x79\xe9\xf2\x3e\xf6\xaa\x90\xa2\x93\x16\x53\xc8\x24\xaa\x14\x0c\xbf\x10\xf9\xe9\xf1\xd5\x19\xed\x01\x1d\x5a\xc9\x88\x7e\x0f\x8f\xc3\x6b\x56\x32\xa8\x96\x09\xd2\x06\x32\x14\x54\x59\xe4\xf5\x5d\x0a\xe7\x60\x85\x42\x4b\x9d\x67\x95\x52\x1b\x30\x36\x45\x06\x0f\xf7\x8a\xf3\x80\x64\x78\xc1\x5b\x07\xeb\xc2\x40\x6a\x74\xbf\x5e\xea\xa5\x45\x7e\xaf\x8d\xe0\x6b\xe5\x88\x5f\x75\xa5\x12\x1b\x90\x34\x8e\x7a\x4d\x52\xed\xfd\xcc\x99\x6f\x47\xc4\x19\xbe\x10\x7f\x5e\xbe\x4d\x9b\xbb\xdb\xd7\x1f\xf3\x5f\x77\xef\xd6\xdd\xee\x6e\xdc\xe7\xe9\xef\xae\xd7\x66\x82\xba\x3b\xb4\x3d\x57\xdd\x45\xe9\x25\xfe\xaf\xbb\x22\x5b\xdc\xf7\x02\xcf\xe0\xad\x81\xff\x0b\x51\xca\x55\x3b\x27\xb9\x0a\xf1\x78\x2e\x6c\xd5\xfd\x5f\x73\xbf\x71\x17\x07\x5c\x9c\x6f\xb8\xe1\x87\x71\xa8\x51\xcd\x41\xe6\x6d\x38\xb8\xfb\x86\x9b\xfb\xfd\x3c\xad\xa7\xa0\xa5\xd7\x30\xb3\xb9\x3f\x83\xe8\x85\xc5\xbd\x0d\x42\x4e\x0e\xdf\x83\xfc\xd0\x36\xa8\xef\xb0\xf7\x20\x5f\xbf\x6e\x5c\xb6\xe5\x77\xf2\xbe\xb9\xc2\xb6\x0b\x6a\x47\x3e\x6c\x07\x54\x6f\xb4\xa0\x12\xf5\x9e\xa2\xa7\xe8\xbf\x00\x00\x00\xff\xff\x2a\xac\x9f\xff\xa9\x0d\x00\x00") func call_tracerJsBytes() ([]byte, error) { return bindataRead( @@ -132,7 +155,27 @@ func call_tracerJs() (*asset, error) { } info := bindataFileInfo{name: "call_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe9, 0xef, 0x68, 0xda, 0xd8, 0x9, 0xf5, 0xd5, 0x71, 0xa8, 0x8a, 0xfb, 0x30, 0xe8, 0xf0, 0x72, 0x14, 0x36, 0x6b, 0x62, 0x5a, 0x4e, 0xff, 0x16, 0xdc, 0xd3, 0x2c, 0x68, 0x7b, 0x79, 0x9f, 0xd3}} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x42, 0x13, 0x7a, 0x14, 0xbf, 0xa7, 0x49, 0x4f, 0xb4, 0x4f, 0x45, 0x1, 0xbc, 0x9e, 0xd1, 0x8e, 0xc7, 0xee, 0x61, 0xfa, 0x82, 0x52, 0xa4, 0x78, 0xfe, 0xff, 0xb1, 0x68, 0x1d, 0xcc, 0x1d, 0x8e}} + return a, nil +} + +var _call_tracer_legacyJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x5a\xdf\x6f\x1b\x37\xf2\x7f\x96\xfe\x8a\x89\x1f\x6a\x09\x51\x24\x39\xe9\xb7\x5f\xc0\xae\x7a\x50\x1d\x25\x35\xe0\xc6\x81\xad\x34\x08\x82\x3c\x50\xbb\xb3\x12\x6b\x8a\xdc\x92\x5c\xc9\xba\xd6\xff\xfb\x61\x86\xdc\xd5\xae\x24\x3b\xbe\x5e\x71\xe8\xbd\x69\x97\x33\xc3\xe1\xcc\x67\x7e\x71\x35\x18\xc0\xb9\xc9\x37\x56\xce\x17\x1e\x5e\x0e\x4f\xfe\x1f\xa6\x0b\x84\xb9\x79\x81\x7e\x81\x16\x8b\x25\x8c\x0b\xbf\x30\xd6\xb5\x07\x03\x98\x2e\xa4\x83\x4c\x2a\x04\xe9\x20\x17\xd6\x83\xc9\xc0\xef\xd0\x2b\x39\xb3\xc2\x6e\xfa\xed\xc1\x20\xf0\x1c\x5c\x26\x09\x99\x45\x04\x67\x32\xbf\x16\x16\x4f\x61\x63\x0a\x48\x84\x06\x8b\xa9\x74\xde\xca\x59\xe1\x11\xa4\x07\xa1\xd3\x81\xb1\xb0\x34\xa9\xcc\x36\x24\x52\x7a\x28\x74\x8a\x96\xb7\xf6\x68\x97\xae\xd4\xe3\xed\xbb\x0f\x70\x89\xce\xa1\x85\xb7\xa8\xd1\x0a\x05\xef\x8b\x99\x92\x09\x5c\xca\x04\xb5\x43\x10\x0e\x72\x7a\xe3\x16\x98\xc2\x8c\xc5\x11\xe3\x1b\x52\xe5\x26\xaa\x02\x6f\x4c\xa1\x53\xe1\xa5\xd1\x3d\x40\x49\x9a\xc3\x0a\xad\x93\x46\xc3\xab\x72\xab\x28\xb0\x07\xc6\x92\x90\x8e\xf0\x74\x00\x0b\x26\x27\xbe\x2e\x08\xbd\x01\x25\xfc\x96\xf5\x09\x06\xd9\x9e\x3b\x05\xa9\x79\x9b\x85\xc9\x11\xfc\x42\x78\x3a\xf5\x5a\x2a\x05\x33\x84\xc2\x61\x56\xa8\x1e\x49\x9b\x15\x1e\x3e\x5e\x4c\x7f\xba\xfa\x30\x85\xf1\xbb\x4f\xf0\x71\x7c\x7d\x3d\x7e\x37\xfd\x74\x06\x6b\xe9\x17\xa6\xf0\x80\x2b\x0c\xa2\xe4\x32\x57\x12\x53\x58\x0b\x6b\x85\xf6\x1b\x30\x19\x49\xf8\x79\x72\x7d\xfe\xd3\xf8\xdd\x74\xfc\xe3\xc5\xe5\xc5\xf4\x13\x18\x0b\x6f\x2e\xa6\xef\x26\x37\x37\xf0\xe6\xea\x1a\xc6\xf0\x7e\x7c\x3d\xbd\x38\xff\x70\x39\xbe\x86\xf7\x1f\xae\xdf\x5f\xdd\x4c\xfa\x70\x83\xa4\x15\x12\xff\xd7\x6d\x9e\xb1\xf7\x2c\x42\x8a\x5e\x48\xe5\x4a\x4b\x7c\x32\x05\xb8\x85\x29\x54\x0a\x0b\xb1\x42\xb0\x98\xa0\x5c\x61\x0a\x02\x12\x93\x6f\x9e\xec\x54\x92\x25\x94\xd1\x73\x3e\xf3\x83\x80\x84\x8b\x0c\xb4\xf1\x3d\x70\x88\xf0\xfd\xc2\xfb\xfc\x74\x30\x58\xaf\xd7\xfd\xb9\x2e\xfa\xc6\xce\x07\x2a\x88\x73\x83\x1f\xfa\x6d\x92\x99\x08\xa5\xa6\x56\x24\x68\xc9\x39\x02\xb2\x82\xcc\xaf\xcc\x5a\x83\xb7\x42\x3b\x91\x90\xab\xe9\x77\xc2\x60\x14\x1e\xf0\x8e\x9e\xbc\x23\xd0\x82\xc5\xdc\x58\xfa\xad\x54\x89\x33\xa9\x3d\x5a\x2d\x14\xcb\x76\xb0\x14\x29\xc2\x6c\x03\xa2\x2e\xb0\x57\x3f\x0c\xc1\x28\xb8\x1b\xa4\xce\x8c\x5d\x32\x2c\xfb\xed\xdf\xdb\xad\xa8\xa1\xf3\x22\xb9\x25\x05\x49\x7e\x52\x58\x8b\xda\x93\x29\x0b\xeb\xe4\x0a\x99\x04\x02\x4d\xb4\xe7\xe4\x97\x9f\x01\xef\x30\x29\x82\xa4\x56\x25\xe4\x14\x3e\xff\x7e\xff\xa5\xd7\x66\xd1\x29\xba\x04\x75\x8a\x29\x9f\xef\xd6\xc1\x7a\xc1\x16\x85\x35\x1e\xaf\x10\x7e\x2d\x9c\xaf\xd1\x64\xd6\x2c\x41\x68\x30\x05\x21\xbe\x6e\x1d\xa9\xbd\x61\x81\x82\x7e\x6b\xb4\xac\x51\xbf\xdd\xaa\x98\x4f\x21\x13\xca\x61\xdc\xd7\x79\xcc\xe9\x34\x52\xaf\xcc\x2d\x49\x36\x96\x20\x6c\x37\x60\xf2\xc4\xa4\x31\x18\xe8\x1c\xd5\x31\xd0\xf5\xdb\x2d\xe2\x3b\x85\xac\xd0\xbc\x6d\x47\x99\x79\x0f\xd2\x59\x17\x7e\x6f\xb7\x48\xec\xb9\xc8\x7d\x61\x91\xed\x89\xd6\x1a\xeb\x40\x2e\x97\x98\x4a\xe1\x51\x6d\xda\xad\xd6\x4a\xd8\xb0\x00\x23\x50\x66\xde\x9f\xa3\x9f\xd0\x63\xa7\x7b\xd6\x6e\xb5\x64\x06\x9d\xb0\xfa\x6c\x34\xe2\xec\x93\x49\x8d\x69\x10\xdf\xf2\x0b\xe9\xfa\x99\x28\x94\xaf\xf6\x25\xa6\x96\x45\x5f\x58\x4d\x3f\xef\x83\x16\x1f\x11\x8c\x56\x1b\x48\x28\xcb\x88\x19\x85\xa7\xdb\x38\x8f\xcb\x78\x38\xd7\x83\x4c\x38\x32\xa1\xcc\x60\x8d\x90\x5b\x7c\x91\x2c\x90\x7c\xa7\x13\x8c\x5a\xba\x8d\x63\xa7\x8e\x80\x76\xeb\x9b\xbc\xef\xcd\xbb\x62\x39\x43\xdb\xe9\xc2\x37\x30\xbc\xcb\x86\x5d\x18\x8d\xf8\x47\xa9\x7b\xe4\x89\xfa\x92\x14\x93\xc7\x83\x32\xff\x8d\xb7\x52\xcf\xc3\x59\xa3\xae\x17\x19\x08\xd0\xb8\x86\xc4\x68\x06\x35\x79\x65\x86\x52\xcf\x21\xb1\x28\x3c\xa6\x3d\x10\x69\x0a\xde\x04\xe4\x55\x38\x6b\x6e\x09\xdf\x7c\x03\x1d\xda\x6c\x04\xc7\xe7\xd7\x93\xf1\x74\x72\x0c\x7f\xfc\x01\xe1\xcd\x51\x78\xf3\xf2\xa8\x5b\xd3\x4c\xea\xab\x2c\x8b\xca\xb1\xc0\x7e\x8e\x78\xdb\x39\xe9\xf6\x57\x42\x15\x78\x95\x05\x35\x23\xed\x44\xa7\x30\x8a\x3c\xcf\x77\x79\x5e\x36\x78\x88\x69\x30\x80\xb1\x73\xb8\x9c\x29\xdc\x0f\xc8\x18\xb1\x1c\xbc\xce\x53\xc6\x22\xf4\x25\x66\x99\x2b\x24\x54\x95\xbb\x46\xf3\xb3\xc6\x2d\xbf\xc9\xf1\x14\x00\xc0\xe4\x3d\x7e\x41\xb1\xc0\x2f\xbc\xf9\x09\xef\xd8\x47\xa5\x09\x09\x55\xe3\x34\xb5\xe8\x5c\xa7\xdb\x0d\xe4\x52\xe7\x85\x3f\x6d\x90\x2f\x71\x69\xec\xa6\xef\x28\x21\x75\xf8\x68\xbd\x70\xd2\x92\x67\x2e\xdc\x85\x26\x9e\x88\xd4\xb7\xc2\x75\xb6\x4b\xe7\xc6\xf9\xd3\x72\x89\x1e\xca\x35\xb6\x05\xb1\x1d\x0f\xef\x8e\xf7\xad\x35\xec\x6e\x91\x70\xf2\x5d\x97\x58\xee\xcf\x2a\x7c\x57\x69\xa2\x9f\x17\x6e\xd1\x61\x38\x6d\x57\xb7\xa9\x60\x04\xde\x16\x78\x10\xfe\x0c\xa9\x7d\x38\x39\x54\x19\xe5\x12\x6f\x8b\x84\x61\x35\x17\x9c\x69\x38\xd2\x05\x65\x5e\x57\xcc\xd8\xe6\xde\x98\x7d\x74\x45\x70\xdd\x4c\x2e\xdf\xbc\x9e\xdc\x4c\xaf\x3f\x9c\x4f\x8f\x6b\x70\x52\x98\x79\x52\xaa\x79\x06\x85\x7a\xee\x17\xac\x3f\x89\x6b\xae\x7e\x26\x9e\x17\x27\x5f\xc2\x1b\x18\x1d\x08\xf9\xd6\xe3\x1c\xf0\xf9\x0b\xcb\xbe\xdf\x37\x5f\x93\x34\x18\xf3\xaf\x41\x92\x37\x4c\x5c\x92\x7b\x53\x12\x3c\xee\xe7\xbf\x18\x54\xe9\x8c\x28\x7e\x14\x4a\xe8\x04\x1f\xd1\x79\x1f\x6b\xf5\xa4\x79\x20\x0f\x2d\xd1\x2f\x4c\xca\x85\x21\x11\xa1\xb6\x94\x08\x4a\x8d\xc6\x7f\x3f\x1b\x8d\x2f\x2f\x6b\xb9\x88\x9f\xcf\xaf\x5e\xd7\xf3\xd3\xf1\xeb\xc9\xe5\xe4\xed\x78\x3a\xd9\xa5\xbd\x99\x8e\xa7\x17\xe7\xfc\xb6\x4c\x5d\x83\x01\xdc\xdc\xca\x9c\x2b\x0c\xe7\x6d\xb3\xcc\xb9\x55\xae\xf4\x75\x3d\xf0\x0b\x43\x4d\xa8\x8d\x05\x34\x13\x3a\x29\x0b\x9b\x2b\x01\xeb\x0d\xc1\xf5\x21\xe7\x9d\xec\x38\xaf\x82\xb0\x74\xef\x2d\xc6\x4d\xd3\x8e\x37\xa5\x5e\x5b\x83\x06\x34\x72\xf2\xe7\x04\xdb\x79\xfa\x21\xe1\x1f\x30\x84\x53\x38\x89\x59\xf4\x91\x34\xfd\x12\x9e\x93\xf8\x3f\x91\xac\x5f\x1d\xe0\xfc\x7b\xa6\xec\xbd\x40\xfb\xef\xa7\x72\x53\xf8\xab\x2c\x3b\x85\x5d\x23\x7e\xbb\x67\xc4\x8a\xfe\x12\xf5\x3e\xfd\xff\xed\xd1\x6f\xd3\x3e\xa1\xca\xe4\xf0\x6c\x0f\x22\x21\xe9\x3e\xdb\x89\x83\x68\x5c\x6e\xef\x58\x1a\x8c\x1e\x28\x34\x2f\x9b\x18\x7e\x28\x53\xfe\x47\x85\xe6\x60\x9b\x4a\xcd\x68\xb3\x11\xed\x81\x45\x6f\x25\xae\x68\xd4\x3c\x76\x2c\x92\x1a\x76\xb3\xa6\xf4\xd5\x87\x8f\x18\x24\x6a\x44\x4e\x2e\xb1\xc1\xa7\xfe\x8c\x7b\x5e\x6a\xd2\xe3\xa8\xc6\x10\x13\xdc\x87\x5b\x84\xa5\xd8\xd0\xa8\x96\x15\xfa\x76\x03\x73\xe1\x20\xdd\x68\xb1\x94\x89\x0b\xf2\xb8\xb9\xb7\x38\x17\x96\xc5\x5a\xfc\xad\x40\x47\x73\x1f\x01\x59\x24\xbe\x10\x4a\x6d\x60\x2e\x69\x78\x23\xee\xce\xcb\x57\xc3\x21\x38\x2f\x73\xd4\x69\x0f\xbe\x7b\x35\xf8\xee\x5b\xb0\x85\xc2\x6e\xbf\x5d\x2b\x61\xd5\x51\xa3\x37\x68\x21\xa2\xe7\x35\xe6\x7e\xd1\xe9\xc2\x0f\x0f\xd4\xc2\x07\x0a\xdb\x41\x5a\x78\x01\x27\x5f\xfa\xa4\xd7\xa8\x81\xdb\xe0\x49\x40\xe5\x30\x4a\xa3\x81\xf7\xea\xf5\x55\xe7\x56\x58\xa1\xc4\x0c\xbb\xa7\x3c\x00\xb3\xad\xd6\x22\x4e\x40\xe4\x14\xc8\x95\x90\x1a\x44\x92\x98\x42\x7b\x32\x7c\x39\xcc\xa8\x0d\xe5\xf7\x63\x5f\xca\xe3\x59\x51\x24\x09\x3a\x57\xa6\x7b\xf6\x1a\xa9\x23\x96\xc4\x0d\x52\x3b\x99\x62\xcd\x2b\x94\x1d\x0c\xa7\xe6\x48\x41\xa3\x74\x29\x70\x69\x1c\x6d\x32\x43\x58\x5b\x1a\xbc\x9c\xd4\x09\xdf\x3c\xa4\x48\xd6\x76\x60\x34\x08\x50\x86\xaf\x3b\x38\xc6\x41\xd8\xb9\xeb\x87\x7c\x4f\xdb\x52\xce\xd1\x66\xdd\x6f\x02\xb9\x0e\x55\x1e\x71\x76\x5a\x21\x0d\x78\x27\x9d\xe7\x8e\x9a\xb4\x94\x0e\x02\x92\xa5\x9e\xf7\x20\x37\x39\xe7\xe9\xaf\x95\xb3\x98\xac\xaf\x27\xbf\x4c\xae\xab\xc6\xe7\xe9\x4e\x2c\x67\x9e\xa3\x6a\x24\x04\x4b\xf3\x96\xc7\xf4\xe8\xc0\x10\x73\x00\x50\xa3\x07\x00\x45\xf2\xb7\xb5\xf1\x7d\xed\x38\x4a\x38\xbf\x75\xcc\x1c\xc3\x3c\x57\x57\xc0\x15\xca\xbb\x9d\xdc\xbd\x9b\x1c\x4c\x5e\x56\x08\x52\x8a\xd3\x0e\x25\xf6\xdd\x49\xa3\xb1\xb0\x1d\x38\xb6\xf8\xbc\xa8\xd9\x78\xcd\xed\x66\x20\xaa\xa5\x06\x5e\x2f\xfb\x56\x11\xaa\x01\xeb\x6e\x0a\x4f\x70\xa0\xfa\xbd\x4d\x7e\x73\xe1\x3e\x38\xf6\x7a\x4c\x7f\x33\x39\xbf\xd0\xbe\x53\x2e\x5e\x68\x78\x01\xe5\x03\x25\x75\x78\xd1\x88\xa2\x03\xd9\xb1\x95\xa2\x42\x8f\xb0\x15\x71\x06\x3b\xaf\x48\x50\x30\x07\x1b\xcd\xa2\xdf\x2f\xce\xc3\x28\x8d\x0c\xf6\xcc\xa2\xef\xe3\x6f\x85\x50\xae\x33\xac\x9a\x85\x70\x02\x6f\xb8\xbc\x8d\xf6\x3a\x49\xe2\x69\xf6\x8e\x67\x35\xb6\x68\x8d\x92\x2d\x74\x82\xe7\x26\xc5\x47\x25\x44\x11\x31\x6d\x54\xbe\x8c\xc0\x3c\xd4\x7b\xb7\xea\x04\x70\x54\x35\x04\x99\x90\xaa\xb0\x78\x74\x06\x07\xd2\x8e\x2b\x6c\x26\x12\xf6\xa5\x43\xe0\x69\xdd\x81\x33\x4b\x5c\x98\x75\x50\xe0\x50\xf2\xda\x07\x47\x85\x83\x9d\xf2\xc1\xd7\x4e\xc2\x41\xe1\xc4\x1c\x6b\xe0\xa8\x0c\x5e\x3a\xea\xe0\x15\xc2\x9f\x86\xce\xf3\xea\xf1\x09\x28\xba\xff\x6b\xe0\xb1\xe3\xe7\xbd\x3e\xa7\x24\xe2\x6e\xa7\xf6\x50\x2a\x1b\x9a\x91\xbf\x97\xe3\x9f\x1c\x61\xbb\xb4\xe1\x68\x4d\xe2\x70\xc0\x6d\x5f\xf3\x75\xf7\x57\xab\x0f\x79\xfe\xa1\x96\x89\x30\xaa\x7f\xc5\xc4\x6f\x71\xca\x5d\x0e\x3d\xe5\x16\x57\xd2\x14\x54\xc0\xf0\x7f\x69\x1c\xae\x5a\xbe\xfb\x76\xeb\x3e\xde\x0b\xb2\xdf\xea\x17\x83\xeb\x45\xbc\xd7\x0e\xdd\x52\xad\x7c\x18\xae\xad\xf1\xba\x30\x0b\x37\xce\x2d\xe6\x7f\xe4\x82\x30\x06\xba\x37\x39\xb5\x03\xb1\x3a\x29\x8b\x22\xdd\x54\x05\xb1\x17\x1a\x11\x58\x08\x9d\xc6\x61\x44\xa4\xa9\x24\x79\x0c\x42\xd2\x50\xcc\x85\xd4\xed\x83\x66\xfc\x6a\x15\x3e\x84\x8c\xbd\xde\xb6\x5e\x48\xe3\x10\x49\x13\x1f\x6b\xdc\x7e\x42\xc1\xdc\x09\xa2\xdd\xbb\xce\x78\x5d\x6a\xb4\x2b\x96\xdc\x09\x83\x58\x09\xa9\x04\x4d\x5f\xdc\x61\xe9\x14\x12\x85\x42\x87\x2f\x1c\x98\x79\xb3\x42\xeb\xda\x4f\x00\xf9\x9f\xc1\xf8\x4e\x56\x2c\x1f\xa3\x39\x9e\x1e\xb3\x4f\x8d\xd8\x70\xfc\x37\x4a\x78\x1f\xe1\x55\x33\x6f\x88\x2c\xe9\xf9\xe3\x17\x6a\xdf\x7e\x5a\x48\x71\xcf\x44\x34\x3f\xc0\xb0\xd6\x97\xff\x5d\x82\x6c\x1f\x62\x97\x55\x7f\x16\x0f\xef\x8d\xe9\x81\x42\xc1\x53\x52\xf9\x69\xaa\xec\x47\x1f\x1b\xda\xca\xe8\x0d\x1d\xdd\x5e\xf8\xf2\x9d\xde\x02\xcb\x1b\x90\xd0\xda\xcf\x10\x35\x48\x8f\x56\xd0\x3c\x44\xe8\x8a\x5f\x53\x48\x4b\xc7\xe2\xd8\x2f\x92\x82\x2e\x0a\x8e\x9f\x36\xa8\x30\x4b\x3d\xef\xb7\x5b\xe1\x7d\x2d\xde\x13\x7f\xb7\x8d\xf7\x50\x01\x99\x33\xde\x09\x54\x57\x02\x89\xbf\xe3\x6e\x91\xc7\xe6\x9d\x7b\x01\x5a\xa3\x57\x61\xa6\xde\xb9\x05\x60\xc6\x78\x13\xb0\x7b\x27\x46\x6b\xfc\xae\x01\x70\x26\x9d\x0b\x17\xc4\xec\x84\x84\xbf\xdb\x8f\x88\x92\x81\x82\xe1\xf4\x30\x03\x2d\x1d\x60\xda\xb9\x99\x20\x62\x7e\x15\x56\x43\x3d\x3f\xad\xaf\x86\x57\xf1\xa0\x72\x59\xb3\x8d\x5c\xb2\x6d\xee\xcf\x0e\x27\xb9\x61\x89\xc7\xc3\xc9\x8c\x6c\x5e\x01\xf6\x01\xd6\xfa\xac\xb1\x4f\xf2\x58\xaa\x64\xe9\x65\x66\x7b\x80\x95\xa5\xd7\x5a\x0e\x7f\xf7\x74\x91\x15\x71\x5d\xc5\x06\x4d\x43\x08\xdf\x36\xee\x2d\x1f\x9a\xb4\x68\x50\x89\x84\x65\x73\x35\x1a\x1d\x0d\xef\xaa\x0f\x23\x31\x57\x35\x68\x4a\x25\x42\x64\x84\xf3\x72\x54\xc8\x7f\x62\xdc\xb6\x1e\x83\xe5\x12\x58\x0c\x1f\x70\xb8\x9b\xa5\x10\x34\x33\x6e\x20\x0a\x47\xa3\xe8\x36\xb6\x52\x74\xd2\x62\x0a\x99\x44\x95\x82\x49\xd1\xf2\xa0\xfb\xab\x33\x3a\x7c\xaa\x43\x2b\x49\x62\xf8\x24\x19\xfe\x1d\xc0\x1f\x4a\xb5\x4c\xd0\x6f\x20\x43\xc1\xdf\xdc\xbc\x81\x5c\x38\x07\x4b\x14\x34\xda\x66\x85\x52\x1b\x30\x36\x45\x12\x5e\xcd\x7a\x14\xd6\x06\x0a\x87\xd6\xc1\x7a\x61\x62\xa9\xe5\x16\x2f\xa7\x6e\x55\xfa\x5e\xbc\xce\x91\x2e\x57\x62\x03\xd2\x53\x59\x8f\x87\xaa\x47\x7a\xf5\xa1\x8b\xbf\x96\x19\x32\xf0\x7e\x98\x97\x53\x61\x33\xce\xf9\x35\x3d\x35\x23\x3c\x0e\x45\xcd\xd8\xde\x5e\x74\x35\x03\xb9\x2c\x3d\xcd\x68\xad\x17\xb2\x66\x48\xf2\x0a\x3f\x35\x83\xb1\xd6\x6a\xf3\x02\x23\xa8\x62\xe0\xa7\x9d\xf0\x64\x2d\x63\x7c\x86\xcf\xba\x15\x39\x3f\xf5\x22\x60\xc8\x8b\x1d\x32\xce\x2d\x6e\x28\x9b\x07\x1b\xd5\x4a\x53\x78\xf1\xf9\x16\x37\x5f\x0e\x57\xa2\x08\xc7\x1a\x5d\x55\x7a\xca\xb0\x08\x6b\x8f\x24\x83\x4a\x0b\x39\x1a\x9e\x81\xfc\xbe\xce\x50\x56\x4f\x90\xcf\x9f\x97\x7b\xd6\xd7\x3f\xcb\x2f\x65\x84\x57\x88\xdf\x59\xef\x36\x34\x8a\x31\x12\x68\x28\x28\xda\xf7\xed\x7f\x05\x00\x00\xff\xff\xfb\x65\x93\x4f\xfc\x22\x00\x00") + +func call_tracer_legacyJsBytes() ([]byte, error) { + return bindataRead( + _call_tracer_legacyJs, + "call_tracer_legacy.js", + ) +} + +func call_tracer_legacyJs() (*asset, error) { + bytes, err := call_tracer_legacyJsBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "call_tracer_legacy.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x46, 0x79, 0xb6, 0xbc, 0xd2, 0xc, 0x25, 0xb1, 0x22, 0x56, 0xef, 0x77, 0xb9, 0x5e, 0x2e, 0xf4, 0xda, 0xb2, 0x2f, 0x53, 0xa4, 0xff, 0xc8, 0xac, 0xbb, 0x75, 0x22, 0x46, 0x59, 0xe3, 0x1d, 0x7d}} return a, nil } @@ -196,7 +239,7 @@ func opcount_tracerJs() (*asset, error) { return a, nil } -var _prestate_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\xdd\x6f\xdb\x38\x12\x7f\x96\xfe\x8a\x41\x5f\x6c\xa3\xae\xdc\x64\x81\x3d\xc0\xb9\x1c\xa0\xba\x6e\x1b\x20\x9b\x04\xb6\x7b\xb9\xdc\x62\x1f\x28\x72\x24\x73\x4d\x93\x02\x49\xd9\xf1\x15\xf9\xdf\x0f\x43\x7d\xf8\xa3\x49\xd3\xdd\x37\x9b\x1c\xfe\xe6\xfb\x37\xa3\xd1\x08\x26\xa6\xdc\x59\x59\x2c\x3d\x9c\xbf\x3f\xfb\x07\x2c\x96\x08\x85\x79\x87\x7e\x89\x16\xab\x35\xa4\x95\x5f\x1a\xeb\xe2\xd1\x08\x16\x4b\xe9\x20\x97\x0a\x41\x3a\x28\x99\xf5\x60\x72\xf0\x27\xf2\x4a\x66\x96\xd9\x5d\x12\x8f\x46\xf5\x9b\x67\xaf\x09\x21\xb7\x88\xe0\x4c\xee\xb7\xcc\xe2\x18\x76\xa6\x02\xce\x34\x58\x14\xd2\x79\x2b\xb3\xca\x23\x48\x0f\x4c\x8b\x91\xb1\xb0\x36\x42\xe6\x3b\x82\x94\x1e\x2a\x2d\xd0\x06\xd5\x1e\xed\xda\xb5\x76\x7c\xbe\xf9\x0a\xd7\xe8\x1c\x5a\xf8\x8c\x1a\x2d\x53\x70\x57\x65\x4a\x72\xb8\x96\x1c\xb5\x43\x60\x0e\x4a\x3a\x71\x4b\x14\x90\x05\x38\x7a\xf8\x89\x4c\x99\x37\xa6\xc0\x27\x53\x69\xc1\xbc\x34\x7a\x08\x28\xc9\x72\xd8\xa0\x75\xd2\x68\xf8\xa5\x55\xd5\x00\x0e\xc1\x58\x02\xe9\x33\x4f\x0e\x58\x30\x25\xbd\x1b\x00\xd3\x3b\x50\xcc\xef\x9f\xfe\x44\x40\xf6\x7e\x0b\x90\x3a\xa8\x59\x9a\x12\xc1\x2f\x99\x27\xaf\xb7\x52\x29\xc8\x10\x2a\x87\x79\xa5\x86\x84\x96\x55\x1e\xee\xaf\x16\x5f\x6e\xbf\x2e\x20\xbd\x79\x80\xfb\x74\x36\x4b\x6f\x16\x0f\x17\xb0\x95\x7e\x69\x2a\x0f\xb8\xc1\x1a\x4a\xae\x4b\x25\x51\xc0\x96\x59\xcb\xb4\xdf\x81\xc9\x09\xe1\xb7\xe9\x6c\xf2\x25\xbd\x59\xa4\x1f\xae\xae\xaf\x16\x0f\x60\x2c\x7c\xba\x5a\xdc\x4c\xe7\x73\xf8\x74\x3b\x83\x14\xee\xd2\xd9\xe2\x6a\xf2\xf5\x3a\x9d\xc1\xdd\xd7\xd9\xdd\xed\x7c\x9a\xc0\x1c\xc9\x2a\xa4\xf7\xaf\xc7\x3c\x0f\xd9\xb3\x08\x02\x3d\x93\xca\xb5\x91\x78\x30\x15\xb8\xa5\xa9\x94\x80\x25\xdb\x20\x58\xe4\x28\x37\x28\x80\x01\x37\xe5\xee\xa7\x93\x4a\x58\x4c\x19\x5d\x04\x9f\x5f\x2c\x48\xb8\xca\x41\x1b\x3f\x04\x87\x08\xff\x5c\x7a\x5f\x8e\x47\xa3\xed\x76\x9b\x14\xba\x4a\x8c\x2d\x46\xaa\x86\x73\xa3\x7f\x25\x31\x61\x96\x16\x9d\x67\x1e\x17\x96\x71\xb4\x60\x2a\x5f\x56\xde\x81\xab\xf2\x5c\x72\x89\xda\x83\xd4\xb9\xb1\xeb\x50\x29\xe0\x0d\x70\x8b\xcc\x23\x30\x50\x86\x33\x05\xf8\x88\xbc\x0a\x77\x75\xa4\x43\xb9\x5a\xa6\x1d\xe3\xe1\x34\xb7\x66\x4d\xbe\x56\xce\xd3\x0f\xe7\x70\x9d\x29\x14\x50\xa0\x46\x27\x1d\x64\xca\xf0\x55\x12\x7f\x8b\xa3\x03\x63\xa8\x4e\x82\x87\x8d\x50\xa8\x8d\x2d\xf6\x2c\x42\x56\x49\x25\xa4\x2e\x92\x38\x6a\xa5\xc7\xa0\x2b\xa5\x86\x71\x80\x50\xc6\xac\xaa\x32\xe5\xdc\x54\xc1\xf6\x3f\x91\xfb\x1a\xcc\x95\xc8\x65\x4e\xc5\xc1\xba\x5b\x6f\xc2\x55\xa7\xd7\x64\x24\x9f\xc4\xd1\x11\xcc\x18\xf2\x4a\x07\x77\xfa\x4c\x08\x3b\x04\x91\x0d\xbe\xc5\x51\xb4\x61\x96\xb0\xe0\x12\xbc\xf9\x82\x8f\xe1\x72\x70\x11\x47\x91\xcc\xa1\xef\x97\xd2\x25\x2d\xf0\xef\x8c\xf3\x3f\xe0\xf2\xf2\x32\x34\x75\x2e\x35\x8a\x01\x10\x44\xf4\x9c\x58\x7d\x13\x65\x4c\x31\xcd\x71\x0c\xbd\xf7\x8f\x3d\x78\x0b\x22\x4b\x0a\xf4\x1f\xea\xd3\x5a\x59\xe2\xcd\xdc\x5b\xa9\x8b\xfe\xd9\xaf\x83\x61\x78\xa5\x4d\x78\x03\x8d\xf8\x8d\xe9\x84\xeb\x7b\x6e\x44\xb8\x6e\x6c\xae\xa5\x26\x46\x34\x42\x8d\x94\xf3\xc6\xb2\x02\xc7\xf0\xed\x89\xfe\x3f\x91\x57\x4f\x71\xf4\x74\x14\xe5\x79\x2d\xf4\x42\x94\x1b\x08\x40\xed\x6d\x57\xe7\x85\xa4\x4e\x3d\x4c\x40\xc0\xfb\x51\x12\xe6\xad\x29\x27\x49\x58\xe1\xee\xf5\x4c\xd0\x85\x14\x8f\xdd\xc5\x0a\x77\x83\x8b\xf8\xc5\x14\x25\x8d\xd1\xbf\x4b\xf1\xf8\xb3\xf9\x3a\x79\x73\x14\xd7\x39\x49\xed\xed\x1d\x0c\x4e\xe2\x68\xd1\x55\xca\x53\xb9\x4b\xbd\x31\x2b\x22\xae\x25\xc5\x47\xa9\x10\x12\x53\x52\xb6\x5c\xcd\x1c\x19\xa2\x06\xe9\xd1\x32\xa2\x4e\xb3\x41\x4b\x53\x03\x2c\xfa\xca\x6a\xd7\x85\x31\x97\x9a\xa9\x16\xb8\x89\xba\xb7\x8c\xd7\x3d\x53\x9f\x1f\xc4\x92\xfb\xc7\x10\xc5\xe0\xdd\x68\x04\xa9\x07\x72\x11\x4a\x23\xb5\x1f\xc2\x16\x41\x23\x0a\x6a\x7c\x81\xa2\xe2\x3e\xe0\xf5\x36\x4c\x55\xd8\xab\x9b\x9b\x28\x32\x3c\x35\x15\x4d\x82\x83\xe6\x1f\x06\x03\xd7\x66\x13\x46\x5c\xc6\xf8\x0a\x9a\x86\x33\x56\x16\x52\xc7\x4d\x38\x8f\x9a\x8d\x2c\x4a\x08\x38\x98\x15\x72\x45\x49\xa4\x93\x0f\x4c\xc1\x25\x64\xb2\xb8\xd2\xfe\x24\x79\x75\xd0\xdb\xa7\x83\x3f\x92\xa6\x79\x12\x47\x84\xd7\x3f\x1f\x0c\xe1\xec\xd7\xae\x22\xbc\x21\x28\x78\x1d\xcc\x9b\x97\xa1\xe2\xd3\x62\x78\xfe\x59\x50\x43\x1d\xfc\x36\x68\x4d\x5c\x95\x51\x3a\x6a\x3f\x43\x1c\x8f\xbb\xf8\xe2\x07\xb8\xc7\xbe\xb5\xb8\x4d\x68\x12\x26\xc4\xcb\xa0\x75\x8a\x3e\x22\xb7\xb8\x26\x56\xa7\x2c\x70\xa6\x14\xda\x9e\x83\xc0\x19\xc3\xa6\x9c\x42\xbe\x70\x5d\xfa\x5d\xcb\xf5\x9e\xd9\x02\xbd\x7b\xdd\xb0\x80\xf3\xee\x5d\x4b\x81\x21\x14\xbb\x12\xe1\xf2\x12\x7a\x93\xd9\x34\x5d\x4c\x7b\x4d\x1b\x8d\x46\x70\x8f\x61\x13\xca\x94\xcc\x84\xda\x81\x40\x85\x1e\x6b\xbb\x8c\x0e\x21\xea\x28\x61\x48\x2b\x0d\x2d\x1b\xf8\x28\x9d\x97\xba\x80\x9a\x29\xb6\x34\x57\x1b\xb8\xd0\x23\x9c\x55\x8e\xaa\xf5\x64\x08\x79\x43\x1b\x85\x45\xe2\x15\xe2\xff\xd0\x6e\x4c\xc9\x6e\x03\xc9\xa5\x75\x1e\x4a\xc5\x38\x26\x84\xd7\x19\xf3\x72\x7e\x9b\x4e\x26\xd5\xb3\xd0\x82\x01\x68\x3f\xe0\x98\xa2\x01\x49\xea\x1d\xf4\x5b\x8c\x41\x1c\x45\xb6\x95\x3e\xc0\xbe\xd8\x53\x82\xf3\x58\x1e\x12\x02\x2d\x16\xb8\x41\xa2\xd0\xc0\x06\xf5\x30\x24\x5d\xff\xfe\xad\x99\xbe\xe8\x92\x38\xa2\x77\x07\x7d\xad\x4c\x71\xdc\xd7\xa2\x0e\x0b\xaf\xac\xa5\xfc\x77\x14\x9c\x53\x8f\xff\x59\x39\x4f\x31\xb5\x14\x9e\x86\x2d\x9e\x23\xc9\x40\x89\x34\x6d\x07\xdf\x93\x21\xcd\xad\x30\x27\x48\x5d\x33\xa5\xea\x6d\xae\x34\x1e\xb5\x97\x4c\xa9\x1d\xe5\x61\x6b\x69\x8d\xa1\xc5\x65\x08\x4e\x92\x54\x60\x9c\x20\x2a\x35\x57\x95\xa8\xcb\x20\xd4\x71\x83\xe7\x82\xcd\xc7\xfb\xcf\x1a\x9d\x63\x05\x26\x54\x49\xb9\x7c\x6c\x36\x48\x0d\xbd\x9a\xe4\xfa\x83\x5e\xd2\x19\x79\x4c\x31\xca\x14\x49\x5b\x64\x44\xd3\xa9\x10\x16\x9d\xeb\x0f\x1a\xce\xe9\x32\x7b\xbf\x44\x4d\xc1\x07\x8d\x5b\xe8\x56\x13\xc6\x39\xad\x6a\x62\x08\x4c\x08\xa2\xb6\x93\x35\x22\x8e\x22\xb7\x95\x9e\x2f\x21\x68\x32\xe5\xbe\x17\x07\x4d\xfd\x73\xe6\x10\xde\x4c\xff\xb3\x98\xdc\x7e\x9c\x4e\x6e\xef\x1e\xde\x8c\xe1\xe8\x6c\x7e\xf5\xdf\x69\x77\xf6\x21\xbd\x4e\x6f\x26\xd3\x37\xe3\x30\x9b\x9f\x71\xc8\x9b\xd6\x05\x52\xe8\x3c\xe3\xab\xa4\x44\x5c\xf5\xdf\x1f\xf3\xc0\xde\xc1\x28\xca\x2c\xb2\xd5\xc5\xde\x98\xba\x41\x1b\x1d\x2d\xe5\xc2\x25\xbc\x18\xac\x8b\x97\xad\x99\x34\xf2\xfd\x96\xc8\xf7\xab\x48\xa0\x8a\xd7\xed\x38\xff\xcb\x86\x84\xde\x61\x7c\x35\x06\xc7\x14\x6d\xc0\xf2\x7f\xf4\xe5\x92\xe7\x0e\xfd\x10\x50\x0b\xb3\x25\xe6\xeb\x50\xeb\x9b\x06\xf7\x20\x64\x67\x83\x9a\x41\x6f\xf3\xfe\xa0\x13\x26\xb0\xef\x45\xcf\x9f\x13\x45\x2d\xe0\xb2\x45\x7f\x1b\x5e\xbe\x1e\xa8\xf3\x26\x52\x27\x0a\x7e\x39\xd9\xf0\xc2\xfd\x1a\xd7\xc6\xee\x9a\x71\x74\xe0\xdf\x8f\xa3\x9a\x5e\x5f\x77\xf5\x44\x7f\xa8\xc8\xba\x83\x8f\xd3\xeb\xe9\xe7\x74\x31\x3d\x92\x9a\x2f\xd2\xc5\xd5\xa4\x3e\xfa\xcb\x85\x77\xf6\xd3\x85\xd7\x9b\xcf\x17\xb7\xb3\x69\x6f\xdc\xfc\xbb\xbe\x4d\x3f\xf6\xbe\x53\xd8\x6c\x81\x3f\x6a\x5d\x6f\xee\x8d\x15\x7f\xa7\x03\x0e\x36\xb2\x9c\x3d\xb7\x90\x05\x6a\xe7\xbe\x3a\xf9\xe0\x01\xa6\x5b\x56\xce\xeb\x8f\xbe\x28\xbc\x7f\x96\x87\x9f\xe2\xa7\xf8\xff\x01\x00\x00\xff\xff\xb1\x28\x85\x2a\x8a\x10\x00\x00") +var _prestate_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\xdd\x6f\xdb\x38\x12\x7f\xb6\xfe\x8a\x41\x5f\x6c\x5d\x5d\xb9\xcd\x02\x7b\x80\x73\x39\x40\x75\xdd\x36\x40\x36\x09\x6c\xe7\x72\xb9\xc5\x3e\x50\xe4\x48\xe6\x9a\x26\x05\x92\xb2\xe3\x2b\xf2\xbf\x1f\x86\xfa\xf0\x47\x93\xa6\x7b\x6f\x16\x39\xfc\xcd\xf7\x6f\xc6\xa3\x11\x4c\x4c\xb9\xb3\xb2\x58\x7a\x38\x7b\xff\xe1\xef\xb0\x58\x22\x14\xe6\x1d\xfa\x25\x5a\xac\xd6\x90\x56\x7e\x69\xac\x8b\x46\x23\x58\x2c\xa5\x83\x5c\x2a\x04\xe9\xa0\x64\xd6\x83\xc9\xc1\x9f\xc8\x2b\x99\x59\x66\x77\x49\x34\x1a\xd5\x6f\x9e\xbd\x26\x84\xdc\x22\x82\x33\xb9\xdf\x32\x8b\x63\xd8\x99\x0a\x38\xd3\x60\x51\x48\xe7\xad\xcc\x2a\x8f\x20\x3d\x30\x2d\x46\xc6\xc2\xda\x08\x99\xef\x08\x52\x7a\xa8\xb4\x40\x1b\x54\x7b\xb4\x6b\xd7\xda\xf1\xe5\xfa\x0e\xae\xd0\x39\xb4\xf0\x05\x35\x5a\xa6\xe0\xb6\xca\x94\xe4\x70\x25\x39\x6a\x87\xc0\x1c\x94\x74\xe2\x96\x28\x20\x0b\x70\xf4\xf0\x33\x99\x32\x6f\x4c\x81\xcf\xa6\xd2\x82\x79\x69\xf4\x10\x50\x92\xe5\xb0\x41\xeb\xa4\xd1\xf0\x4b\xab\xaa\x01\x1c\x82\xb1\x04\x32\x60\x9e\x1c\xb0\x60\x4a\x7a\x17\x03\xd3\x3b\x50\xcc\xef\x9f\xfe\x44\x40\xf6\x7e\x0b\x90\x3a\xa8\x59\x9a\x12\xc1\x2f\x99\x27\xaf\xb7\x52\x29\xc8\x10\x2a\x87\x79\xa5\x86\x84\x96\x55\x1e\xee\x2f\x17\x5f\x6f\xee\x16\x90\x5e\x3f\xc0\x7d\x3a\x9b\xa5\xd7\x8b\x87\x73\xd8\x4a\xbf\x34\x95\x07\xdc\x60\x0d\x25\xd7\xa5\x92\x28\x60\xcb\xac\x65\xda\xef\xc0\xe4\x84\xf0\xdb\x74\x36\xf9\x9a\x5e\x2f\xd2\x8f\x97\x57\x97\x8b\x07\x30\x16\x3e\x5f\x2e\xae\xa7\xf3\x39\x7c\xbe\x99\x41\x0a\xb7\xe9\x6c\x71\x39\xb9\xbb\x4a\x67\x70\x7b\x37\xbb\xbd\x99\x4f\x13\x98\x23\x59\x85\xf4\xfe\xf5\x98\xe7\x21\x7b\x16\x41\xa0\x67\x52\xb9\x36\x12\x0f\xa6\x02\xb7\x34\x95\x12\xb0\x64\x1b\x04\x8b\x1c\xe5\x06\x05\x30\xe0\xa6\xdc\xfd\x74\x52\x09\x8b\x29\xa3\x8b\xe0\xf3\x8b\x05\x09\x97\x39\x68\xe3\x87\xe0\x10\xe1\x1f\x4b\xef\xcb\xf1\x68\xb4\xdd\x6e\x93\x42\x57\x89\xb1\xc5\x48\xd5\x70\x6e\xf4\xcf\x24\x22\xcc\xd2\xa2\xf3\xcc\xe3\xc2\x32\x8e\x16\x4c\xe5\xcb\xca\x3b\x70\x55\x9e\x4b\x2e\x51\x7b\x90\x3a\x37\x76\x1d\x2a\x05\xbc\x01\x6e\x91\x79\x04\x06\xca\x70\xa6\x00\x1f\x91\x57\xe1\xae\x8e\x74\x28\x57\xcb\xb4\x63\x3c\x9c\xe6\xd6\xac\xc9\xd7\xca\x79\xfa\xe1\x1c\xae\x33\x85\x02\x0a\xd4\xe8\xa4\x83\x4c\x19\xbe\x4a\xa2\x6f\x51\xef\xc0\x18\xaa\x93\xe0\x61\x23\x14\x6a\x63\x8b\x7d\x8b\x90\x55\x52\x09\xa9\x8b\x24\xea\xb5\xd2\x63\xd0\x95\x52\xc3\x28\x40\x28\x63\x56\x55\x99\x72\x6e\xaa\x60\xfb\x9f\xc8\x7d\x0d\xe6\x4a\xe4\x32\xa7\xe2\x60\xdd\xad\x37\xe1\xaa\xd3\x6b\x32\x92\x4f\xa2\xde\x11\xcc\x18\xf2\x4a\x07\x77\x06\x4c\x08\x3b\x04\x91\xc5\xdf\xa2\x5e\x6f\xc3\x2c\x61\xc1\x05\x78\xf3\x15\x1f\xc3\x65\x7c\x1e\xf5\x7a\x32\x87\x81\x5f\x4a\x97\xb4\xc0\xbf\x33\xce\xff\x80\x8b\x8b\x8b\xd0\xd4\xb9\xd4\x28\x62\x20\x88\xde\x73\x62\xf5\x4d\x2f\x63\x8a\x69\x8e\x63\xe8\xbf\x7f\xec\xc3\x5b\x10\x59\x52\xa0\xff\x58\x9f\xd6\xca\x12\x6f\xe6\xde\x4a\x5d\x0c\x3e\xfc\x1a\x0f\xc3\x2b\x6d\xc2\x1b\x68\xc4\xaf\x4d\x27\x5c\xdf\x73\x23\xc2\x75\x63\x73\x2d\x35\x31\xa2\x11\x6a\xa4\x9c\x37\x96\x15\x38\x86\x6f\x4f\xf4\xfd\x44\x5e\x3d\x45\xbd\xa7\xa3\x28\xcf\x6b\xa1\x17\xa2\xdc\x40\x00\x6a\x6f\xbb\x3a\x2f\x24\x75\xea\x61\x02\x02\xde\x8f\x92\x30\x6f\x4d\x39\x49\xc2\x0a\x77\xaf\x67\x82\x2e\xa4\x78\xec\x2e\x56\xb8\x8b\xcf\xa3\x17\x53\x94\x34\x46\xff\x2e\xc5\xe3\xcf\xe6\xeb\xe4\xcd\x51\x5c\xe7\x24\xb5\xb7\x37\x8e\x4f\xe2\x68\xd1\x55\xca\x53\xb9\x4b\xbd\x31\x2b\x22\xae\x25\xc5\x47\xa9\x10\x12\x53\x52\xb6\x5c\xcd\x1c\x19\xa2\x06\xe9\xd1\x32\xa2\x4e\xb3\x41\x4b\x53\x03\x2c\xfa\xca\x6a\xd7\x85\x31\x97\x9a\xa9\x16\xb8\x89\xba\xb7\x8c\xd7\x3d\x53\x9f\x1f\xc4\x92\xfb\xc7\x10\xc5\xe0\xdd\x68\x04\xa9\x07\x72\x11\x4a\x23\xb5\x1f\xc2\x16\x41\x23\x0a\x6a\x7c\x81\xa2\xe2\x3e\xe0\xf5\x37\x4c\x55\xd8\xaf\x9b\x9b\x28\x32\x3c\x35\x15\x4d\x82\x83\xe6\x1f\x06\x03\xd7\x66\x13\x46\x5c\xc6\xf8\x0a\x9a\x86\x33\x56\x16\x52\x47\x4d\x38\x8f\x9a\x8d\x2c\x4a\x08\x38\x98\x15\x72\x45\x49\xa4\x93\x8f\x4c\xc1\x05\x64\xb2\xb8\xd4\xfe\x24\x79\x75\xd0\xdb\xa7\xf1\x1f\x49\xd3\x3c\x89\x23\xc2\x1b\x9c\xc5\x43\xf8\xf0\x6b\x57\x11\xde\x10\x14\xbc\x0e\xe6\xcd\xcb\x50\xd1\x69\x31\x3c\xff\x2c\xa8\xa1\x0e\x7e\x1b\xb4\x26\xae\xca\x28\x1d\xb5\x9f\x21\x8e\xc7\x5d\x7c\xfe\x03\xdc\x63\xdf\x5a\xdc\x26\x34\x09\x13\xe2\x10\x94\x3e\xc3\x77\xc1\xdc\x9d\x43\x01\x6f\x81\xbe\xa4\x26\x55\x4e\xf2\x2f\xcc\xc5\xf0\x37\x68\x24\x6e\xad\xe4\xdf\x59\x52\xe7\xf5\x13\x72\x8b\x6b\x1a\x05\x94\x3a\xce\x94\x42\xdb\x77\x10\x88\x66\xd8\xd4\x60\x48\x32\xae\x4b\xbf\x6b\x07\x84\x67\xb6\x40\xef\x5e\xf7\x26\xe0\xbc\x7b\xd7\xf2\x66\x88\xdf\xae\x44\xb8\xb8\x80\xfe\x64\x36\x4d\x17\xd3\x7e\xd3\x7b\xa3\x11\xdc\x63\x58\x9f\x32\x25\x33\xa1\x76\x20\x50\xa1\xc7\xda\x2e\xa3\x43\x5c\x3b\x1e\x19\xd2\x1e\x44\x1b\x0a\x3e\x4a\xe7\xa5\x2e\xa0\xa6\x97\x2d\x0d\xe3\x06\x2e\x34\x16\x67\x15\x85\xe7\x74\x72\x79\x43\x6b\x88\x45\x22\x23\x1a\x1a\xa1\x47\x99\x92\xdd\xda\x92\x4b\xeb\x3c\x94\x8a\x71\x4c\x08\xaf\x33\xe6\xe5\xa2\x68\xda\x9f\x54\xcf\x42\xdf\x06\xa0\xfd\x54\x64\x8a\xa6\x2a\xa9\x77\x30\x68\x31\xe2\xa8\xd7\xb3\xad\xf4\x01\xf6\xf9\x9e\x47\x9c\xc7\xf2\x90\x45\x68\x1b\xc1\x0d\x12\xef\x06\x0a\xa9\x27\x28\xe9\xfa\xd7\x6f\xcd\xc8\x46\x97\x44\x3d\x7a\x77\x40\x06\xca\x14\xc7\x64\x20\xea\xb0\xf0\xca\x5a\xca\x7f\xc7\xdb\x39\x11\xc3\x9f\x95\xf3\x14\x53\x4b\xe1\x69\x28\xe6\x39\x66\x0d\x3c\x4a\x23\x3a\xfe\x9e\x41\x69\xd8\x85\xe1\x42\xea\x9a\xd1\x56\xaf\x80\xa5\xf1\xa8\xbd\x64\x4a\xed\x28\x0f\x5b\x4b\xbb\x0f\x6d\x3b\x43\x70\x92\xa4\x02\x4d\x05\x51\xa9\xb9\xaa\x44\x5d\x06\xa1\xf8\x1b\x3c\x17\x6c\x3e\x5e\x9a\xd6\xe8\x1c\x2b\x30\xa1\x4a\xca\xe5\x63\xb3\x76\x6a\xe8\xd7\xcc\x38\x88\xfb\x49\x67\xe4\x31\x2f\x29\x53\x24\x6d\x91\x11\xb7\xa7\x42\x58\x74\x6e\x10\x37\x44\xd5\x65\xf6\x7e\x89\x9a\x82\x0f\x1a\xb7\xd0\xed\x33\x8c\x73\xda\xef\xc4\x10\x98\x10\xc4\x87\x27\xbb\x47\xd4\xeb\xb9\xad\xf4\x7c\x09\x41\x93\x29\xf7\xbd\x18\x37\xf5\xcf\x99\x43\x78\x33\xfd\xf7\x62\x72\xf3\x69\x3a\xb9\xb9\x7d\x78\x33\x86\xa3\xb3\xf9\xe5\x7f\xa6\xdd\xd9\xc7\xf4\x2a\xbd\x9e\x4c\xdf\x8c\xc3\x40\x7f\xc6\x21\x6f\x5a\x17\x48\xa1\xf3\x8c\xaf\x92\x12\x71\x35\x78\x7f\xcc\x03\x7b\x07\x7b\xbd\xcc\x22\x5b\x9d\xef\x8d\xa9\x1b\xb4\xd1\xd1\xf2\x34\x5c\xc0\x8b\xc1\x3a\x7f\xd9\x9a\x49\x23\x3f\x68\xd9\x7f\xbf\xbf\x04\xaa\x78\xdd\x8e\xb3\xbf\x6c\x48\xe8\x1d\xc6\x57\x63\x70\x4c\xd1\xda\x2c\xff\x4b\x7f\x77\xf2\xdc\xa1\x1f\x02\x6a\x61\xb6\xc4\x7c\x1d\x6a\x7d\xd3\xe0\x1e\x84\xec\x43\x5c\xd3\xee\x4d\x3e\x88\x3b\x61\x02\xfb\x5e\xf4\xec\x39\x51\xd4\x02\x2e\x5a\xf4\xb7\xe1\xe5\xeb\x81\x3a\x6b\x22\x75\xa2\xe0\x97\x93\xb5\x30\xdc\xaf\x71\x6d\xec\xae\x99\x61\x07\xfe\xfd\x38\xaa\xe9\xd5\x55\x57\x4f\xf4\x41\x45\xd6\x1d\x7c\x9a\x5e\x4d\xbf\xa4\x8b\xe9\x91\xd4\x7c\x91\x2e\x2e\x27\xf5\xd1\x5f\x2e\xbc\x0f\x3f\x5d\x78\xfd\xf9\x7c\x71\x33\x9b\xf6\xc7\xcd\xd7\xd5\x4d\xfa\xa9\xff\x9d\xc2\x66\x75\xfc\x51\xeb\x7a\x73\x6f\xac\xf8\x7f\x3a\xe0\x60\x8d\xcb\xd9\x73\x5b\x5c\xa0\x76\xee\xab\x93\x7f\x49\xc0\x74\xcb\xca\x79\xfd\x4f\xb1\x17\xde\x3f\xcb\xc3\x4f\xd1\x53\xf4\xbf\x00\x00\x00\xff\xff\x3a\xb7\x37\x41\xbf\x10\x00\x00") func prestate_tracerJsBytes() ([]byte, error) { return bindataRead( @@ -212,7 +255,7 @@ func prestate_tracerJs() (*asset, error) { } info := bindataFileInfo{name: "prestate_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe9, 0x79, 0x70, 0x4f, 0xc5, 0x78, 0x57, 0x63, 0x6f, 0x5, 0x31, 0xce, 0x3e, 0x5d, 0xbd, 0x71, 0x4, 0x46, 0x78, 0xcd, 0x1d, 0xcd, 0xb9, 0xd8, 0x10, 0xff, 0xe6, 0xc5, 0x59, 0xb9, 0x25, 0x6e}} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd4, 0x9, 0xf9, 0x44, 0x13, 0x31, 0x89, 0xf7, 0x35, 0x9a, 0xc6, 0xf0, 0x86, 0x9d, 0xb2, 0xe3, 0x57, 0xe2, 0xc0, 0xde, 0xc9, 0x3a, 0x4c, 0x4a, 0x94, 0x90, 0xa5, 0x92, 0x2f, 0xbf, 0xc0, 0xb8}} return a, nil } @@ -236,7 +279,7 @@ func trigram_tracerJs() (*asset, error) { return a, nil } -var _unigram_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x54\x4d\x6f\xdb\x46\x10\xbd\xeb\x57\xbc\xa3\x8c\xa8\xa4\xd3\x5e\x0a\xa5\x09\xc0\x1a\x76\x22\xc0\x91\x0d\x89\x6e\x60\x14\x3d\x2c\xc9\x21\xb9\xe8\x6a\x87\xd8\x9d\x95\x42\x04\xfa\xef\xc5\x92\xa2\xe5\x1a\x6e\x13\x9e\x04\xcd\xbc\x8f\x79\x33\x64\x9a\xe2\x8a\xbb\xde\xe9\xa6\x15\xfc\x7c\xf9\xf6\x57\xe4\x2d\xa1\xe1\x9f\x48\x5a\x72\x14\x76\xc8\x82\xb4\xec\xfc\x2c\x4d\x91\xb7\xda\xa3\xd6\x86\xa0\x3d\x3a\xe5\x04\x5c\x43\x5e\xf4\x1b\x5d\x38\xe5\xfa\x64\x96\xa6\x23\xe6\xd5\x72\x64\xa8\x1d\x11\x3c\xd7\x72\x50\x8e\x96\xe8\x39\xa0\x54\x16\x8e\x2a\xed\xc5\xe9\x22\x08\x41\x0b\x94\xad\x52\x76\xd8\x71\xa5\xeb\x3e\x52\x6a\x41\xb0\x15\xb9\x41\x5a\xc8\xed\xfc\xe4\xe3\xe3\xfa\x01\xb7\xe4\x3d\x39\x7c\x24\x4b\x4e\x19\xdc\x87\xc2\xe8\x12\xb7\xba\x24\xeb\x09\xca\xa3\x8b\xff\xf8\x96\x2a\x14\x03\x5d\x04\xde\x44\x2b\xdb\x93\x15\xdc\x70\xb0\x95\x12\xcd\x76\x01\xd2\xd1\x39\xf6\xe4\xbc\x66\x8b\x5f\x26\xa9\x13\xe1\x02\xec\x22\xc9\x5c\x49\x1c\xc0\x81\xbb\x88\xbb\x80\xb2\x3d\x8c\x92\x33\xf4\x07\x02\x39\xcf\x5d\x41\xdb\x41\xa6\xe5\x8e\x20\xad\x92\x38\xf5\x41\x1b\x83\x82\x10\x3c\xd5\xc1\x2c\x22\x5b\x11\x04\x5f\x56\xf9\xa7\xbb\x87\x1c\xd9\xfa\x11\x5f\xb2\xcd\x26\x5b\xe7\x8f\xef\x70\xd0\xd2\x72\x10\xd0\x9e\x46\x2a\xbd\xeb\x8c\xa6\x0a\x07\xe5\x9c\xb2\xd2\x83\xeb\xc8\xf0\xf9\x7a\x73\xf5\x29\x5b\xe7\xd9\xef\xab\xdb\x55\xfe\x08\x76\xb8\x59\xe5\xeb\xeb\xed\x16\x37\x77\x1b\x64\xb8\xcf\x36\xf9\xea\xea\xe1\x36\xdb\xe0\xfe\x61\x73\x7f\xb7\xbd\x4e\xb0\xa5\xe8\x8a\x22\xfe\xfb\x99\xd7\xc3\xf6\x1c\xa1\x22\x51\xda\xf8\x29\x89\x47\x0e\xf0\x2d\x07\x53\xa1\x55\x7b\x82\xa3\x92\xf4\x9e\x2a\x28\x94\xdc\xf5\x3f\xbc\xd4\xc8\xa5\x0c\xdb\x66\x98\xf9\x3f\x0f\x12\xab\x1a\x96\x65\x01\x4f\x84\xdf\x5a\x91\x6e\x99\xa6\x87\xc3\x21\x69\x6c\x48\xd8\x35\xa9\x19\xe9\x7c\xfa\x21\x99\xcd\xbe\xcd\x00\x20\x4d\xd1\x6a\x2f\x71\x39\x91\x76\xa7\xba\xe8\x8a\xbb\x92\x2b\xf2\x10\x46\xc9\xc1\x0a\x39\x3f\x74\xc7\xd6\x25\xbe\x1d\x17\x13\xd6\x72\xe7\xc7\x16\x0f\x1b\x76\x05\xb9\x11\x3e\xb6\xc7\xea\x12\x97\x4f\xdd\x5e\xa8\x8b\x4a\xda\xee\xf9\x6f\xaa\x86\xdc\x68\x4f\xae\x3f\x09\x8e\x77\x10\x7d\xfc\xf1\x19\xf4\x95\xca\x20\xe4\x93\x01\x1d\xa1\x4b\xd4\xc1\x96\xf1\xfa\xe6\x86\x9b\x05\xaa\xe2\x02\xe3\x14\xf1\xd9\xab\x78\x9b\x78\x0f\xc3\x4d\xc2\x5d\x22\xbc\x15\xa7\x6d\x33\xbf\x78\xf7\xd4\xa3\x6b\xcc\xa5\xd5\x3e\x89\x83\xfc\xc9\xdd\x5f\x17\x67\x7c\x7c\xfe\x55\x7b\xf3\xe6\x0c\x3c\x3e\xfd\x22\xe3\x09\xff\x83\xc2\x7b\xbc\x7d\x0d\x37\x34\xc5\x40\x26\xda\x73\x88\xb5\x0a\x46\x9e\xe7\x72\x68\x4f\x17\xad\x4a\x09\xca\x9c\xa2\x88\x6f\x27\xd7\x50\x76\x4a\xab\x1e\x6f\x2d\xb2\x0c\x14\xaf\xe6\x73\x5c\xcc\x26\x1d\x47\xfe\x35\x21\x65\xcc\x20\x36\x2d\x7d\x38\xd5\x82\xc8\x42\x0b\x39\x15\xdf\x55\xde\x93\x8b\x9f\x29\x38\x92\xe0\xac\x9f\x18\x23\xac\xd6\x56\x99\x89\xfb\x74\xd1\xe2\x54\xa9\x6d\x33\x7a\x1b\x4b\xcf\xcc\x95\xf2\xf5\xf9\xe2\x74\x3d\x7f\x0a\x07\x1f\x70\xf9\x62\x27\xa3\xe4\x39\xe4\x97\xe1\x1e\x17\xb3\xe3\xec\x9f\x00\x00\x00\xff\xff\x8d\xba\x8d\xa8\xe6\x05\x00\x00") +var _unigram_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x94\x41\x6f\xdb\xc6\x13\xc5\xef\xfa\x14\xef\x68\x23\xfa\x8b\xc9\xbf\x97\x42\x69\x0a\xb0\x86\x9d\x08\x70\x64\x43\xa2\x1b\x18\x45\x0f\x4b\x72\x48\x2e\xba\xda\x21\x76\x67\xa5\x08\x81\xbf\x7b\x31\xa4\x68\xb9\x85\xdb\x86\x27\x41\x3b\xef\x37\x6f\xde\x0e\x99\x65\xb8\xe2\xfe\x18\x6c\xdb\x09\xfe\xff\xf6\xdd\x8f\x28\x3a\x42\xcb\xff\x23\xe9\x28\x50\xda\x21\x4f\xd2\x71\x88\xb3\x2c\x43\xd1\xd9\x88\xc6\x3a\x82\x8d\xe8\x4d\x10\x70\x03\xf9\x5b\xbd\xb3\x65\x30\xe1\xb8\x98\x65\xd9\xa8\x79\xf5\x58\x09\x4d\x20\x42\xe4\x46\x0e\x26\xd0\x12\x47\x4e\xa8\x8c\x47\xa0\xda\x46\x09\xb6\x4c\x42\xb0\x02\xe3\xeb\x8c\x03\x76\x5c\xdb\xe6\xa8\x48\x2b\x48\xbe\xa6\x30\xb4\x16\x0a\xbb\x38\xf9\xf8\xb8\x7e\xc0\x2d\xc5\x48\x01\x1f\xc9\x53\x30\x0e\xf7\xa9\x74\xb6\xc2\xad\xad\xc8\x47\x82\x89\xe8\xf5\x9f\xd8\x51\x8d\x72\xc0\xa9\xf0\x46\xad\x6c\x4f\x56\x70\xc3\xc9\xd7\x46\x2c\xfb\x39\xc8\xaa\x73\xec\x29\x44\xcb\x1e\x3f\x4c\xad\x4e\xc0\x39\x38\x28\xe4\xc2\x88\x0e\x10\xc0\xbd\xea\x2e\x61\xfc\x11\xce\xc8\x59\xfa\x1d\x81\x9c\xe7\xae\x61\xfd\xd0\xa6\xe3\x9e\x20\x9d\x11\x9d\xfa\x60\x9d\x43\x49\x48\x91\x9a\xe4\xe6\x4a\x2b\x93\xe0\xcb\xaa\xf8\x74\xf7\x50\x20\x5f\x3f\xe2\x4b\xbe\xd9\xe4\xeb\xe2\xf1\x3d\x0e\x56\x3a\x4e\x02\xda\xd3\x88\xb2\xbb\xde\x59\xaa\x71\x30\x21\x18\x2f\x47\x70\xa3\x84\xcf\xd7\x9b\xab\x4f\xf9\xba\xc8\x7f\x59\xdd\xae\x8a\x47\x70\xc0\xcd\xaa\x58\x5f\x6f\xb7\xb8\xb9\xdb\x20\xc7\x7d\xbe\x29\x56\x57\x0f\xb7\xf9\x06\xf7\x0f\x9b\xfb\xbb\xed\xf5\x02\x5b\x52\x57\xa4\xfa\xff\xce\xbc\x19\x6e\x2f\x10\x6a\x12\x63\x5d\x9c\x92\x78\xe4\x84\xd8\x71\x72\x35\x3a\xb3\x27\x04\xaa\xc8\xee\xa9\x86\x41\xc5\xfd\xf1\xbb\x2f\x55\x59\xc6\xb1\x6f\x87\x99\xff\x71\x21\xb1\x6a\xe0\x59\xe6\x88\x44\xf8\xa9\x13\xe9\x97\x59\x76\x38\x1c\x16\xad\x4f\x0b\x0e\x6d\xe6\x46\x5c\xcc\x7e\x5e\xcc\x66\xdf\x66\x00\x90\x65\xe8\x6c\x14\xbd\x1c\xc5\xee\x4c\xaf\xae\xb8\xaf\xb8\xa6\x08\x61\x54\x9c\xbc\x50\x88\x43\xb5\x96\x2e\xf1\xed\x69\x3e\x69\x3d\xf7\x71\x2c\x89\xf0\x69\x57\x52\x18\xe5\x63\xb9\x9e\x2e\xf1\xf6\xb9\x3a\x0a\xf5\xda\xc9\xfa\x3d\xff\x41\xf5\x90\x1b\xed\x29\x1c\x4f\x0d\xc7\x3d\x50\x1f\xbf\x7e\x06\x7d\xa5\x2a\x09\xc5\xc5\xa0\x56\xe9\x12\x4d\xf2\x95\x6e\xdf\x85\xe3\x76\x8e\xba\xbc\xc4\x38\x85\x3e\x7b\xa3\xbb\x89\x0f\x70\xdc\x2e\xb8\x5f\x08\x6f\x25\x58\xdf\x5e\x5c\xbe\x7f\xae\xb1\x0d\x2e\xa4\xb3\x71\xa1\x83\xfc\xc6\xfd\xef\x97\x67\xbd\x3e\x7f\x39\x7b\xf3\xe6\x2c\x7c\x7a\xfe\x45\x2e\x12\xfe\x45\x85\x0f\x78\xf7\x9a\x6e\x28\xd2\x40\x26\xec\x39\xc4\xc6\x24\x27\x2f\x73\x39\x74\xa7\x8d\x36\x95\x24\xe3\x4e\x51\xe8\xdb\xc9\x0d\x8c\x9f\xd2\x6a\xc6\x5d\x53\xca\x80\x78\x35\x9f\xa7\xf9\x6c\xea\x13\x28\xbe\xd6\xc8\x38\x37\x34\x9b\x2e\x7d\x58\xd5\x92\xc8\xc3\x0a\x05\xa3\xef\x2a\xef\x29\xe8\x67\x0a\x81\x24\x05\x1f\x27\xa2\xca\x1a\xeb\x8d\x9b\xd8\xa7\x8d\x96\x60\x2a\xeb\xdb\xd1\xdb\x78\xf4\xc2\x5c\x25\x5f\x5f\x5e\xdc\xc8\x3c\xa7\xf8\x1c\xcf\xd3\xec\xcf\x00\x00\x00\xff\xff\xf1\x91\x30\xae\xbd\x05\x00\x00") func unigram_tracerJsBytes() ([]byte, error) { return bindataRead( @@ -252,7 +295,7 @@ func unigram_tracerJs() (*asset, error) { } info := bindataFileInfo{name: "unigram_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2f, 0x36, 0x14, 0xc2, 0xf6, 0xc3, 0x80, 0x2b, 0x4a, 0x11, 0x7d, 0xd5, 0x3e, 0xef, 0x23, 0xb5, 0xd6, 0xe6, 0xe6, 0x5, 0x41, 0xf6, 0x14, 0x7a, 0x39, 0xf7, 0xf8, 0xac, 0x89, 0x8e, 0x43, 0xe6}} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc, 0xe6, 0x5c, 0x88, 0x18, 0xa7, 0x85, 0x61, 0x18, 0xc6, 0xec, 0x17, 0xfc, 0xdf, 0x9d, 0xc0, 0x1b, 0x49, 0xf8, 0x8d, 0xf1, 0xeb, 0x35, 0xf3, 0xd, 0x3e, 0xf6, 0xa3, 0xac, 0x8c, 0xba, 0x74}} return a, nil } @@ -347,15 +390,17 @@ func AssetNames() []string { // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ - "4byte_tracer.js": _4byte_tracerJs, - "bigram_tracer.js": bigram_tracerJs, - "call_tracer.js": call_tracerJs, - "evmdis_tracer.js": evmdis_tracerJs, - "noop_tracer.js": noop_tracerJs, - "opcount_tracer.js": opcount_tracerJs, - "prestate_tracer.js": prestate_tracerJs, - "trigram_tracer.js": trigram_tracerJs, - "unigram_tracer.js": unigram_tracerJs, + "4byte_tracer.js": _4byte_tracerJs, + "4byte_tracer_legacy.js": _4byte_tracer_legacyJs, + "bigram_tracer.js": bigram_tracerJs, + "call_tracer.js": call_tracerJs, + "call_tracer_legacy.js": call_tracer_legacyJs, + "evmdis_tracer.js": evmdis_tracerJs, + "noop_tracer.js": noop_tracerJs, + "opcount_tracer.js": opcount_tracerJs, + "prestate_tracer.js": prestate_tracerJs, + "trigram_tracer.js": trigram_tracerJs, + "unigram_tracer.js": unigram_tracerJs, } // AssetDebug is true if the assets were built with the debug flag enabled. @@ -365,13 +410,11 @@ const AssetDebug = false // directory embedded in the file by go-bindata. // For example if you run go-bindata on data/... and data contains the // following hierarchy: -// -// data/ -// foo.txt -// img/ -// a.png -// b.png -// +// data/ +// foo.txt +// img/ +// a.png +// b.png // then AssetDir("data") would return []string{"foo.txt", "img"}, // AssetDir("data/img") would return []string{"a.png", "b.png"}, // AssetDir("foo.txt") and AssetDir("notexist") would return an error, and @@ -404,15 +447,17 @@ type bintree struct { } var _bintree = &bintree{nil, map[string]*bintree{ - "4byte_tracer.js": {_4byte_tracerJs, map[string]*bintree{}}, - "bigram_tracer.js": {bigram_tracerJs, map[string]*bintree{}}, - "call_tracer.js": {call_tracerJs, map[string]*bintree{}}, - "evmdis_tracer.js": {evmdis_tracerJs, map[string]*bintree{}}, - "noop_tracer.js": {noop_tracerJs, map[string]*bintree{}}, - "opcount_tracer.js": {opcount_tracerJs, map[string]*bintree{}}, - "prestate_tracer.js": {prestate_tracerJs, map[string]*bintree{}}, - "trigram_tracer.js": {trigram_tracerJs, map[string]*bintree{}}, - "unigram_tracer.js": {unigram_tracerJs, map[string]*bintree{}}, + "4byte_tracer.js": {_4byte_tracerJs, map[string]*bintree{}}, + "4byte_tracer_legacy.js": {_4byte_tracer_legacyJs, map[string]*bintree{}}, + "bigram_tracer.js": {bigram_tracerJs, map[string]*bintree{}}, + "call_tracer.js": {call_tracerJs, map[string]*bintree{}}, + "call_tracer_legacy.js": {call_tracer_legacyJs, map[string]*bintree{}}, + "evmdis_tracer.js": {evmdis_tracerJs, map[string]*bintree{}}, + "noop_tracer.js": {noop_tracerJs, map[string]*bintree{}}, + "opcount_tracer.js": {opcount_tracerJs, map[string]*bintree{}}, + "prestate_tracer.js": {prestate_tracerJs, map[string]*bintree{}}, + "trigram_tracer.js": {trigram_tracerJs, map[string]*bintree{}}, + "unigram_tracer.js": {unigram_tracerJs, map[string]*bintree{}}, }} // RestoreAsset restores an asset under the given directory. @@ -429,7 +474,7 @@ func RestoreAsset(dir, name string) error { if err != nil { return err } - err = os.WriteFile(_filePath(dir, name), data, info.Mode()) + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) if err != nil { return err } diff --git a/eth/tracers/internal/tracers/call_tracer.js b/eth/tracers/internal/tracers/call_tracer_legacy.js similarity index 92% rename from eth/tracers/internal/tracers/call_tracer.js rename to eth/tracers/internal/tracers/call_tracer_legacy.js index f8b383cd96e4..3ca7377738b7 100644 --- a/eth/tracers/internal/tracers/call_tracer.js +++ b/eth/tracers/internal/tracers/call_tracer_legacy.js @@ -61,7 +61,14 @@ if (this.callstack[left-1].calls === undefined) { this.callstack[left-1].calls = []; } - this.callstack[left-1].calls.push({type: op}); + this.callstack[left-1].calls.push({ + type: op, + from: toHex(log.contract.getAddress()), + to: toHex(toAddress(log.stack.peek(0).toString(16))), + gasIn: log.getGas(), + gasCost: log.getCost(), + value: '0x' + db.getBalance(log.contract.getAddress()).toString(16) + }); return } // If a new method invocation is being done, add to the call stack @@ -132,13 +139,12 @@ // If the call was a contract call, retrieve the gas usage and output if (call.gas !== undefined) { call.gasUsed = '0x' + bigInt(call.gasIn - call.gasCost + call.gas - log.getGas()).toString(16); - - var ret = log.stack.peek(0); - if (!ret.equals(0)) { - call.output = toHex(log.memory.slice(call.outOff, call.outOff + call.outLen)); - } else if (call.error === undefined) { - call.error = "internal failure"; // TODO(karalabe): surface these faults somehow - } + } + var ret = log.stack.peek(0); + if (!ret.equals(0)) { + call.output = toHex(log.memory.slice(call.outOff, call.outOff + call.outLen)); + } else if (call.error === undefined) { + call.error = "internal failure"; // TODO(karalabe): surface these faults somehow } delete call.gasIn; delete call.gasCost; delete call.outOff; delete call.outLen; @@ -208,7 +214,7 @@ } else if (ctx.error !== undefined) { result.error = ctx.error; } - if (result.error !== undefined) { + if (result.error !== undefined && (result.error !== "execution reverted" || result.output ==="0x")) { delete result.output; } return this.finalize(result); diff --git a/eth/tracers/internal/tracers/noop_tracer.js b/eth/tracers/internal/tracers/noop_tracer.js deleted file mode 100644 index fe7ddc85ab41..000000000000 --- a/eth/tracers/internal/tracers/noop_tracer.js +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// noopTracer is just the barebone boilerplate code required from a JavaScript -// object to be usable as a transaction tracer. -{ - // step is invoked for every opcode that the VM executes. - step: function(log, db) { }, - - // fault is invoked when the actual execution of an opcode fails. - fault: function(log, db) { }, - - // result is invoked when all the opcodes have been iterated over and returns - // the final result of the tracing. - result: function(ctx, db) { return {}; } -} diff --git a/eth/tracers/internal/tracers/prestate_tracer.js b/eth/tracers/internal/tracers/prestate_tracer.js index e0a22bf157d3..084c04ec46b8 100644 --- a/eth/tracers/internal/tracers/prestate_tracer.js +++ b/eth/tracers/internal/tracers/prestate_tracer.js @@ -55,7 +55,7 @@ var toBal = bigInt(this.prestate[toHex(ctx.to)].balance.slice(2), 16); this.prestate[toHex(ctx.to)].balance = '0x'+toBal.subtract(ctx.value).toString(16); - this.prestate[toHex(ctx.from)].balance = '0x'+fromBal.add(ctx.value).toString(16); + this.prestate[toHex(ctx.from)].balance = '0x'+fromBal.add(ctx.value).add((ctx.gasUsed + ctx.intrinsicGas) * ctx.gasPrice).toString(16); // Decrement the caller's nonce, and remove empty create targets this.prestate[toHex(ctx.from)].nonce--; diff --git a/eth/tracers/internal/tracers/unigram_tracer.js b/eth/tracers/internal/tracers/unigram_tracer.js index 000fb13b1e9a..51107d8f3d6c 100644 --- a/eth/tracers/internal/tracers/unigram_tracer.js +++ b/eth/tracers/internal/tracers/unigram_tracer.js @@ -36,8 +36,6 @@ // result is invoked when all the opcodes have been iterated over and returns // the final result of the tracing. result: function(ctx) { - if(this.nops > 0){ - return this.hist; - } + return this.hist; }, } diff --git a/eth/tracers/native/call.go b/eth/tracers/native/call.go new file mode 100644 index 000000000000..532068e6ae94 --- /dev/null +++ b/eth/tracers/native/call.go @@ -0,0 +1,170 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package native + +import ( + "encoding/json" + "errors" + "math/big" + "strconv" + "sync/atomic" + "time" + + "github.com/XinFinOrg/XDPoSChain/common" + "github.com/XinFinOrg/XDPoSChain/core/vm" + "github.com/XinFinOrg/XDPoSChain/eth/tracers" +) + +func init() { + tracers.RegisterNativeTracer("callTracer", NewCallTracer) +} + +type callFrame struct { + Type string `json:"type"` + From string `json:"from"` + To string `json:"to,omitempty"` + Value string `json:"value,omitempty"` + Gas string `json:"gas"` + GasUsed string `json:"gasUsed"` + Input string `json:"input"` + Output string `json:"output,omitempty"` + Error string `json:"error,omitempty"` + Calls []callFrame `json:"calls,omitempty"` +} + +type callTracer struct { + callstack []callFrame + interrupt uint32 // Atomic flag to signal execution interruption + reason error // Textual reason for the interruption +} + +// NewCallTracer returns a native go tracer which tracks +// call frames of a tx, and implements vm.EVMLogger. +func NewCallTracer() tracers.Tracer { + // First callframe contains tx context info + // and is populated on start and end. + t := &callTracer{callstack: make([]callFrame, 1)} + return t +} + +func (t *callTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { + t.callstack[0] = callFrame{ + Type: "CALL", + From: addrToHex(from), + To: addrToHex(to), + Input: bytesToHex(input), + Gas: uintToHex(gas), + Value: bigToHex(value), + } + if create { + t.callstack[0].Type = "CREATE" + } +} + +func (t *callTracer) CaptureEnd(output []byte, gasUsed uint64, _ time.Duration, err error) { + t.callstack[0].GasUsed = uintToHex(gasUsed) + if err != nil { + t.callstack[0].Error = err.Error() + if err.Error() == "execution reverted" && len(output) > 0 { + t.callstack[0].Output = bytesToHex(output) + } + } else { + t.callstack[0].Output = bytesToHex(output) + } +} + +func (t *callTracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { +} + +func (t *callTracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, _ *vm.ScopeContext, depth int, err error) { +} + +func (t *callTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { + // Skip if tracing was interrupted + if atomic.LoadUint32(&t.interrupt) > 0 { + // TODO: env.Cancel() + return + } + + call := callFrame{ + Type: typ.String(), + From: addrToHex(from), + To: addrToHex(to), + Input: bytesToHex(input), + Gas: uintToHex(gas), + Value: bigToHex(value), + } + t.callstack = append(t.callstack, call) +} + +func (t *callTracer) CaptureExit(output []byte, gasUsed uint64, err error) { + size := len(t.callstack) + if size <= 1 { + return + } + // pop call + call := t.callstack[size-1] + t.callstack = t.callstack[:size-1] + size -= 1 + + call.GasUsed = uintToHex(gasUsed) + if err == nil { + call.Output = bytesToHex(output) + } else { + call.Error = err.Error() + if call.Type == "CREATE" || call.Type == "CREATE2" { + call.To = "" + } + } + t.callstack[size-1].Calls = append(t.callstack[size-1].Calls, call) +} + +func (t *callTracer) GetResult() (json.RawMessage, error) { + if len(t.callstack) != 1 { + return nil, errors.New("incorrect number of top-level calls") + } + res, err := json.Marshal(t.callstack[0]) + if err != nil { + return nil, err + } + return json.RawMessage(res), t.reason +} + +func (t *callTracer) Stop(err error) { + t.reason = err + atomic.StoreUint32(&t.interrupt, 1) +} + +func bytesToHex(s []byte) string { + return "0x" + common.Bytes2Hex(s) +} + +func bigToHex(n *big.Int) string { + if n == nil { + return "" + } + return "0x" + n.Text(16) +} + +func uintToHex(n uint64) string { + return "0x" + strconv.FormatUint(n, 16) +} + +func addrToHex(a common.Address) string { + s, _ := a.MarshalText() + return string(s) +} diff --git a/eth/tracers/native/noop.go b/eth/tracers/native/noop.go new file mode 100644 index 000000000000..8dd2405bc626 --- /dev/null +++ b/eth/tracers/native/noop.go @@ -0,0 +1,46 @@ +package native + +import ( + "encoding/json" + "math/big" + "time" + + "github.com/XinFinOrg/XDPoSChain/common" + "github.com/XinFinOrg/XDPoSChain/core/vm" + "github.com/XinFinOrg/XDPoSChain/eth/tracers" +) + +func init() { + tracers.RegisterNativeTracer("noopTracer", NewNoopTracer) +} + +type noopTracer struct{} + +func NewNoopTracer() tracers.Tracer { + return &noopTracer{} +} + +func (t *noopTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { +} + +func (t *noopTracer) CaptureEnd(output []byte, gasUsed uint64, _ time.Duration, err error) { +} + +func (t *noopTracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { +} + +func (t *noopTracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, _ *vm.ScopeContext, depth int, err error) { +} + +func (t *noopTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { +} + +func (t *noopTracer) CaptureExit(output []byte, gasUsed uint64, err error) { +} + +func (t *noopTracer) GetResult() (json.RawMessage, error) { + return json.RawMessage(`{}`), nil +} + +func (t *noopTracer) Stop(err error) { +} diff --git a/eth/tracers/testdata/call_tracer_create.json b/eth/tracers/testdata/call_tracer/create.json similarity index 100% rename from eth/tracers/testdata/call_tracer_create.json rename to eth/tracers/testdata/call_tracer/create.json diff --git a/eth/tracers/testdata/call_tracer_deep_calls.json b/eth/tracers/testdata/call_tracer/deep_calls.json similarity index 100% rename from eth/tracers/testdata/call_tracer_deep_calls.json rename to eth/tracers/testdata/call_tracer/deep_calls.json diff --git a/eth/tracers/testdata/call_tracer_delegatecall.json b/eth/tracers/testdata/call_tracer/delegatecall.json similarity index 100% rename from eth/tracers/testdata/call_tracer_delegatecall.json rename to eth/tracers/testdata/call_tracer/delegatecall.json diff --git a/eth/tracers/testdata/call_tracer/inner_create_oog_outer_throw.json b/eth/tracers/testdata/call_tracer/inner_create_oog_outer_throw.json new file mode 100644 index 000000000000..9395eb401c2a --- /dev/null +++ b/eth/tracers/testdata/call_tracer/inner_create_oog_outer_throw.json @@ -0,0 +1,77 @@ +{ + "context": { + "difficulty": "3451177886", + "gasLimit": "4709286", + "miner": "0x1585936b53834b021f68cc13eeefdec2efc8e724", + "number": "2290744", + "timestamp": "1513616439" + }, + "genesis": { + "alloc": { + "0x1d3ddf7caf024f253487e18bc4a15b1a360c170a": { + "balance": "0x0", + "code": "0x606060405263ffffffff60e060020a6000350416633b91f50681146100505780635bb47808146100715780635f51fca01461008c578063bc7647a9146100ad578063f1bd0d7a146100c8575b610000565b346100005761006f600160a060020a03600435811690602435166100e9565b005b346100005761006f600160a060020a0360043516610152565b005b346100005761006f600160a060020a036004358116906024351661019c565b005b346100005761006f600160a060020a03600435166101fa565b005b346100005761006f600160a060020a0360043581169060243516610db8565b005b600160a060020a038083166000908152602081905260408120549091908116903316811461011657610000565b839150600160a060020a038316151561012d573392505b6101378284610e2e565b6101418284610db8565b61014a826101fa565b5b5b50505050565b600154600160a060020a03908116903316811461016e57610000565b6002805473ffffffffffffffffffffffffffffffffffffffff1916600160a060020a0384161790555b5b5050565b600254600160a060020a0390811690331681146101b857610000565b600160a060020a038381166000908152602081905260409020805473ffffffffffffffffffffffffffffffffffffffff19169184169190911790555b5b505050565b6040805160e260020a631a481fc102815260016024820181905260026044830152606482015262093a8060848201819052600060a4830181905260c06004840152601e60c48401527f736574456e7469747953746174757328616464726573732c75696e743829000060e484015292519091600160a060020a038516916369207f049161010480820192879290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526000602482018190526001604483015260606004830152602360648301527f626567696e506f6c6c28616464726573732c75696e7436342c626f6f6c2c626f60848301527f6f6c29000000000000000000000000000000000000000000000000000000000060a48301529151600160a060020a038716935063de64e15c9260c48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc102815260016024820181905260026044830152606482015267ffffffffffffffff8416608482015260ff851660a482015260c06004820152601960c48201527f61646453746f636b28616464726573732c75696e74323536290000000000000060e48201529051600160a060020a03861692506369207f04916101048082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc102815260016024820181905260026044830152606482015267ffffffffffffffff8416608482015260ff851660a482015260c06004820152601960c48201527f697373756553746f636b2875696e74382c75696e74323536290000000000000060e48201529051600160a060020a03861692506369207f04916101048082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526002602482015260006044820181905260606004830152602160648301527f6772616e7453746f636b2875696e74382c75696e743235362c61646472657373608483015260f860020a60290260a48301529151600160a060020a038716935063de64e15c9260c48084019391929182900301818387803b156100005760325a03f115610000575050604080517f010555b8000000000000000000000000000000000000000000000000000000008152600160a060020a03338116602483015260006044830181905260606004840152603c60648401527f6772616e7456657374656453746f636b2875696e74382c75696e743235362c6160848401527f6464726573732c75696e7436342c75696e7436342c75696e743634290000000060a48401529251908716935063010555b89260c48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc102815260016024820181905260026044830152606482015267ffffffffffffffff8416608482015260ff851660a482015260c06004820152601260c48201527f626567696e53616c65286164647265737329000000000000000000000000000060e48201529051600160a060020a03861692506369207f04916101048082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526002602482015260006044820181905260606004830152601a60648301527f7472616e7366657253616c6546756e64732875696e743235362900000000000060848301529151600160a060020a038716935063de64e15c9260a48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc102815260016024820181905260026044830152606482015267ffffffffffffffff8416608482015260ff851660a482015260c06004820152602d60c48201527f7365744163636f756e74696e6753657474696e67732875696e743235362c756960e48201527f6e7436342c75696e7432353629000000000000000000000000000000000000006101048201529051600160a060020a03861692506369207f04916101248082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526002602482015260006044820181905260606004830152603460648301527f637265617465526563757272696e6752657761726428616464726573732c756960848301527f6e743235362c75696e7436342c737472696e672900000000000000000000000060a48301529151600160a060020a038716935063de64e15c9260c48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526002602482015260006044820181905260606004830152601b60648301527f72656d6f7665526563757272696e675265776172642875696e7429000000000060848301529151600160a060020a038716935063de64e15c9260a48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526002602482015260006044820181905260606004830152602360648301527f697373756552657761726428616464726573732c75696e743235362c7374726960848301527f6e6729000000000000000000000000000000000000000000000000000000000060a48301529151600160a060020a038716935063de64e15c9260c48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a6337993857028152600160248201819052604482015260606004820152602260648201527f61737369676e53746f636b2875696e74382c616464726573732c75696e743235608482015260f060020a6136290260a48201529051600160a060020a038616925063de64e15c9160c48082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a6337993857028152600160248201819052604482015260606004820152602260648201527f72656d6f766553746f636b2875696e74382c616464726573732c75696e743235608482015260f060020a6136290260a48201529051600160a060020a038616925063de64e15c9160c48082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc102815260026024808301919091526003604483015260006064830181905267ffffffffffffffff8616608484015260ff871660a484015260c0600484015260c48301919091527f7365744164647265737342796c617728737472696e672c616464726573732c6260e48301527f6f6f6c29000000000000000000000000000000000000000000000000000000006101048301529151600160a060020a03871693506369207f04926101248084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc1028152600260248201526003604482015260006064820181905267ffffffffffffffff8516608483015260ff861660a483015260c06004830152602160c48301527f73657453746174757342796c617728737472696e672c75696e74382c626f6f6c60e483015260f860020a6029026101048301529151600160a060020a03871693506369207f04926101248084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc1028152600260248201526003604482015260006064820181905267ffffffffffffffff8516608483015260ff861660a483015260c06004830152603860c48301527f736574566f74696e6742796c617728737472696e672c75696e743235362c756960e48301527f6e743235362c626f6f6c2c75696e7436342c75696e74382900000000000000006101048301529151600160a060020a03871693506369207f04926101248084019391929182900301818387803b156100005760325a03f115610000575050505b505050565b604080517f225553a4000000000000000000000000000000000000000000000000000000008152600160a060020a0383811660048301526002602483015291519184169163225553a49160448082019260009290919082900301818387803b156100005760325a03f115610000575050505b5050565b600082604051611fd280610f488339600160a060020a03909216910190815260405190819003602001906000f0801561000057905082600160a060020a03166308b027418260016040518363ffffffff1660e060020a0281526004018083600160a060020a0316600160a060020a0316815260200182815260200192505050600060405180830381600087803b156100005760325a03f115610000575050604080517fa14e3ee300000000000000000000000000000000000000000000000000000000815260006004820181905260016024830152600160a060020a0386811660448401529251928716935063a14e3ee39260648084019382900301818387803b156100005760325a03f115610000575050505b5050505600606060405234620000005760405160208062001fd283398101604052515b805b600a8054600160a060020a031916600160a060020a0383161790555b506001600d819055600e81905560408051808201909152600c8082527f566f74696e672053746f636b00000000000000000000000000000000000000006020928301908152600b805460008290528251601860ff1990911617825590947f0175b7a638427703f0dbe7bb9bbf987a2551717b34e79f33b5b1008d1fa01db9600291831615610100026000190190921604601f0193909304830192906200010c565b828001600101855582156200010c579182015b828111156200010c578251825591602001919060010190620000ef565b5b50620001309291505b808211156200012c576000815560010162000116565b5090565b50506040805180820190915260038082527f43565300000000000000000000000000000000000000000000000000000000006020928301908152600c805460008290528251600660ff1990911617825590937fdf6966c971051c3d54ec59162606531493a51404a002842f56009d7e5cf4a8c760026001841615610100026000190190931692909204601f010481019291620001f7565b82800160010185558215620001f7579182015b82811115620001f7578251825591602001919060010190620001da565b5b506200021b9291505b808211156200012c576000815560010162000116565b5090565b50505b505b611da280620002306000396000f3006060604052361561019a5763ffffffff60e060020a600035041662e1986d811461019f57806302a72a4c146101d657806306eb4e421461020157806306fdde0314610220578063095ea7b3146102ad578063158ccb99146102dd57806318160ddd146102f85780631cf65a781461031757806323b872dd146103365780632c71e60a1461036c57806333148fd6146103ca578063435ebc2c146103f55780635eeb6e451461041e578063600e85b71461043c5780636103d70b146104a157806362c1e46a146104b05780636c182e99146104ba578063706dc87c146104f057806370a082311461052557806377174f851461055057806395d89b411461056f578063a7771ee3146105fc578063a9059cbb14610629578063ab377daa14610659578063b25dbb5e14610685578063b89a73cb14610699578063ca5eb5e1146106c6578063cbcf2e5a146106e1578063d21f05ba1461070e578063d347c2051461072d578063d96831e114610765578063dd62ed3e14610777578063df3c211b146107a8578063e2982c21146107d6578063eb944e4c14610801575b610000565b34610000576101d4600160a060020a036004351660243567ffffffffffffffff6044358116906064358116906084351661081f565b005b34610000576101ef600160a060020a0360043516610a30565b60408051918252519081900360200190f35b34610000576101ef610a4f565b60408051918252519081900360200190f35b346100005761022d610a55565b604080516020808252835181830152835191928392908301918501908083838215610273575b80518252602083111561027357601f199092019160209182019101610253565b505050905090810190601f16801561029f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34610000576102c9600160a060020a0360043516602435610ae3565b604080519115158252519081900360200190f35b34610000576101d4600160a060020a0360043516610b4e565b005b34610000576101ef610b89565b60408051918252519081900360200190f35b34610000576101ef610b8f565b60408051918252519081900360200190f35b34610000576102c9600160a060020a0360043581169060243516604435610b95565b604080519115158252519081900360200190f35b3461000057610388600160a060020a0360043516602435610bb7565b60408051600160a060020a039096168652602086019490945267ffffffffffffffff928316858501529082166060850152166080830152519081900360a00190f35b34610000576101ef600160a060020a0360043516610c21565b60408051918252519081900360200190f35b3461000057610402610c40565b60408051600160a060020a039092168252519081900360200190f35b34610000576101d4600160a060020a0360043516602435610c4f565b005b3461000057610458600160a060020a0360043516602435610cc9565b60408051600160a060020a03909716875260208701959095528585019390935267ffffffffffffffff9182166060860152811660808501521660a0830152519081900360c00190f35b34610000576101d4610d9e565b005b6101d4610e1e565b005b34610000576104d3600160a060020a0360043516610e21565b6040805167ffffffffffffffff9092168252519081900360200190f35b3461000057610402600160a060020a0360043516610ead565b60408051600160a060020a039092168252519081900360200190f35b34610000576101ef600160a060020a0360043516610ef9565b60408051918252519081900360200190f35b34610000576101ef610f18565b60408051918252519081900360200190f35b346100005761022d610f1e565b604080516020808252835181830152835191928392908301918501908083838215610273575b80518252602083111561027357601f199092019160209182019101610253565b505050905090810190601f16801561029f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34610000576102c9600160a060020a0360043516610fac565b604080519115158252519081900360200190f35b34610000576102c9600160a060020a0360043516602435610fc2565b604080519115158252519081900360200190f35b3461000057610402600435610fe2565b60408051600160a060020a039092168252519081900360200190f35b34610000576101d46004351515610ffd565b005b34610000576102c9600160a060020a036004351661104c565b604080519115158252519081900360200190f35b34610000576101d4600160a060020a0360043516611062565b005b34610000576102c9600160a060020a0360043516611070565b604080519115158252519081900360200190f35b34610000576101ef6110f4565b60408051918252519081900360200190f35b34610000576101ef600160a060020a036004351667ffffffffffffffff602435166110fa565b60408051918252519081900360200190f35b34610000576101d4600435611121565b005b34610000576101ef600160a060020a03600435811690602435166111c6565b60408051918252519081900360200190f35b34610000576101ef6004356024356044356064356084356111f3565b60408051918252519081900360200190f35b34610000576101ef600160a060020a036004351661128c565b60408051918252519081900360200190f35b34610000576101d4600160a060020a036004351660243561129e565b005b6040805160a08101825260008082526020820181905291810182905260608101829052608081019190915267ffffffffffffffff848116908416101561086457610000565b8367ffffffffffffffff168267ffffffffffffffff16101561088557610000565b8267ffffffffffffffff168267ffffffffffffffff1610156108a657610000565b506040805160a081018252600160a060020a033381168252602080830188905267ffffffffffffffff80871684860152858116606085015287166080840152908816600090815260039091529190912080546001810180835582818380158290116109615760030281600302836000526020600020918201910161096191905b8082111561095d578054600160a060020a031916815560006001820155600281018054600160c060020a0319169055600301610926565b5090565b5b505050916000526020600020906003020160005b5082518154600160a060020a031916600160a060020a03909116178155602083015160018201556040830151600290910180546060850151608086015167ffffffffffffffff1990921667ffffffffffffffff948516176fffffffffffffffff00000000000000001916604060020a918516919091021777ffffffffffffffff000000000000000000000000000000001916608060020a939091169290920291909117905550610a268686610fc2565b505b505050505050565b600160a060020a0381166000908152600360205260409020545b919050565b60055481565b600b805460408051602060026001851615610100026000190190941693909304601f81018490048402820184019092528181529291830182828015610adb5780601f10610ab057610100808354040283529160200191610adb565b820191906000526020600020905b815481529060010190602001808311610abe57829003601f168201915b505050505081565b600160a060020a03338116600081815260026020908152604080832094871680845294825280832086905580518681529051929493927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925929181900390910190a35060015b92915050565b600a5433600160a060020a03908116911614610b6957610000565b600a8054600160a060020a031916600160a060020a0383161790555b5b50565b60005481565b60005b90565b6000610ba2848484611600565b610bad8484846116e2565b90505b9392505050565b600360205281600052604060002081815481101561000057906000526020600020906003020160005b5080546001820154600290920154600160a060020a03909116935090915067ffffffffffffffff80821691604060020a8104821691608060020a9091041685565b600160a060020a0381166000908152600860205260409020545b919050565b600a54600160a060020a031681565b600a5433600160a060020a03908116911614610c6a57610000565b610c7660005482611714565b6000908155600160a060020a038316815260016020526040902054610c9b9082611714565b600160a060020a038316600090815260016020526040812091909155610cc390839083611600565b5b5b5050565b6000600060006000600060006000600360008a600160a060020a0316600160a060020a0316815260200190815260200160002088815481101561000057906000526020600020906003020160005b508054600182015460028301546040805160a081018252600160a060020a039094168085526020850184905267ffffffffffffffff808416928601839052604060020a8404811660608701819052608060020a9094041660808601819052909c50929a509197509095509350909150610d90904261172d565b94505b509295509295509295565b33600160a060020a038116600090815260066020526040902054801515610dc457610000565b8030600160a060020a0316311015610ddb57610000565b600160a060020a0382166000818152600660205260408082208290555183156108fc0291849190818181858888f193505050501515610cc357610000565b5b5050565b5b565b600160a060020a03811660009081526003602052604081205442915b81811015610ea557600160a060020a03841660009081526003602052604090208054610e9a9190839081101561000057906000526020600020906003020160005b5060020154604060020a900467ffffffffffffffff168461177d565b92505b600101610e3d565b5b5050919050565b600160a060020a0380821660009081526007602052604081205490911615610eef57600160a060020a0380831660009081526007602052604090205416610ef1565b815b90505b919050565b600160a060020a0381166000908152600160205260409020545b919050565b600d5481565b600c805460408051602060026001851615610100026000190190941693909304601f81018490048402820184019092528181529291830182828015610adb5780601f10610ab057610100808354040283529160200191610adb565b820191906000526020600020905b815481529060010190602001808311610abe57829003601f168201915b505050505081565b60006000610fb983610c21565b1190505b919050565b6000610fcf338484611600565b610fd983836117ac565b90505b92915050565b600460205260009081526040902054600160a060020a031681565b8015801561101a575061100f33610ef9565b61101833610c21565b115b1561102457610000565b33600160a060020a03166000908152600960205260409020805460ff19168215151790555b50565b60006000610fb983610ef9565b1190505b919050565b610b8533826117dc565b5b50565b600a54604080516000602091820181905282517fcbcf2e5a000000000000000000000000000000000000000000000000000000008152600160a060020a03868116600483015293519194939093169263cbcf2e5a92602480830193919282900301818787803b156100005760325a03f115610000575050604051519150505b919050565b600e5481565b6000610fd961110984846118b2565b61111385856119b6565b611a05565b90505b92915050565b600a5433600160a060020a0390811691161461113c57610000565b61114860005482611a1f565b600055600554600190101561116c57600a5461116c90600160a060020a0316611a47565b5b600a54600160a060020a03166000908152600160205260409020546111929082611a1f565b600a8054600160a060020a039081166000908152600160205260408120939093559054610b8592911683611600565b5b5b50565b600160a060020a038083166000908152600260209081526040808320938516835292905220545b92915050565b6000600060008487101561120a5760009250611281565b8387111561121a57879250611281565b61123f6112308961122b888a611714565b611a90565b61123a8689611714565b611abc565b915081925061124e8883611714565b905061127e8361127961126a8461122b8c8b611714565b611a90565b61123a888b611714565b611abc565b611a1f565b92505b505095945050505050565b60066020526000908152604090205481565b600160a060020a03821660009081526003602052604081208054829190849081101561000057906000526020600020906003020160005b50805490925033600160a060020a039081169116146112f357610000565b6040805160a0810182528354600160a060020a0316815260018401546020820152600284015467ffffffffffffffff80821693830193909352604060020a810483166060830152608060020a900490911660808201526113539042611af9565b600160a060020a0385166000908152600360205260409020805491925090849081101561000057906000526020600020906003020160005b508054600160a060020a031916815560006001820181905560029091018054600160c060020a0319169055600160a060020a0385168152600360205260409020805460001981019081101561000057906000526020600020906003020160005b50600160a060020a03851660009081526003602052604090208054859081101561000057906000526020600020906003020160005b5081548154600160a060020a031916600160a060020a03918216178255600180840154908301556002928301805493909201805467ffffffffffffffff191667ffffffffffffffff948516178082558354604060020a908190048616026fffffffffffffffff000000000000000019909116178082559254608060020a9081900490941690930277ffffffffffffffff00000000000000000000000000000000199092169190911790915584166000908152600360205260409020805460001981018083559190829080158290116115485760030281600302836000526020600020918201910161154891905b8082111561095d578054600160a060020a031916815560006001820155600281018054600160c060020a0319169055600301610926565b5090565b5b505050600160a060020a033316600090815260016020526040902054611570915082611a1f565b600160a060020a03338116600090815260016020526040808220939093559086168152205461159f9082611714565b600160a060020a038086166000818152600160209081526040918290209490945580518581529051339093169391927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a35b50505050565b600160a060020a0383161561166e576116466008600061161f86610ead565b600160a060020a0316600160a060020a031681526020019081526020016000205482611714565b6008600061165386610ead565b600160a060020a031681526020810191909152604001600020555b600160a060020a038216156116dc576116b46008600061168d85610ead565b600160a060020a0316600160a060020a031681526020019081526020016000205482611a1f565b600860006116c185610ead565b600160a060020a031681526020810191909152604001600020555b5b505050565b600083826116f082426110fa565b8111156116fc57610000565b611707868686611b1b565b92505b5b50509392505050565b600061172283831115611b4d565b508082035b92915050565b6000610fd983602001518367ffffffffffffffff16856080015167ffffffffffffffff16866040015167ffffffffffffffff16876060015167ffffffffffffffff166111f3565b90505b92915050565b60008167ffffffffffffffff168367ffffffffffffffff1610156117a15781610fd9565b825b90505b92915050565b600033826117ba82426110fa565b8111156117c657610000565b6117d08585611b5d565b92505b5b505092915050565b6117e582610ef9565b6117ee83610c21565b11156117f957610000565b600160a060020a03811660009081526009602052604090205460ff16158015611834575081600160a060020a031681600160a060020a031614155b1561183e57610000565b61184782611070565b1561185157610000565b611864828261185f85610ef9565b611600565b600160a060020a0382811660009081526007602052604090208054600160a060020a031916918316918217905561189a82610ead565b600160a060020a031614610cc357610000565b5b5050565b600160a060020a038216600090815260036020526040812054815b818110156119885761197d836112796003600089600160a060020a0316600160a060020a0316815260200190815260200160002084815481101561000057906000526020600020906003020160005b506040805160a0810182528254600160a060020a031681526001830154602082015260029092015467ffffffffffffffff80821692840192909252604060020a810482166060840152608060020a900416608082015287611af9565b611a1f565b92505b6001016118cd565b600160a060020a0385166000908152600160205260409020546117d09084611714565b92505b505092915050565b600060006119c384611070565b80156119d157506000600d54115b90506119fb816119e9576119e485610ef9565b6119ec565b60005b6111138686611b7b565b611a05565b91505b5092915050565b60008183106117a15781610fd9565b825b90505b92915050565b6000828201611a3c848210801590611a375750838210155b611b4d565b8091505b5092915050565b611a508161104c565b15611a5a57610b85565b6005805460009081526004602052604090208054600160a060020a031916600160a060020a038416179055805460010190555b50565b6000828202611a3c841580611a37575083858381156100005704145b611b4d565b8091505b5092915050565b60006000611acc60008411611b4d565b8284811561000057049050611a3c838581156100005706828502018514611b4d565b8091505b5092915050565b6000610fd98360200151611b0d858561172d565b611714565b90505b92915050565b60008382611b2982426110fa565b811115611b3557610000565b611707868686611b8f565b92505b5b50509392505050565b801515610b8557610000565b5b50565b6000611b6883611a47565b610fd98383611c92565b90505b92915050565b6000610fd983610ef9565b90505b92915050565b600160a060020a038084166000908152600260209081526040808320338516845282528083205493861683526001909152812054909190611bd09084611a1f565b600160a060020a038086166000908152600160205260408082209390935590871681522054611bff9084611714565b600160a060020a038616600090815260016020526040902055611c228184611714565b600160a060020a038087166000818152600260209081526040808320338616845282529182902094909455805187815290519288169391927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a3600191505b509392505050565b60003382611ca082426110fa565b811115611cac57610000565b6117d08585611cc2565b92505b5b505092915050565b600160a060020a033316600090815260016020526040812054611ce59083611714565b600160a060020a033381166000908152600160205260408082209390935590851681522054611d149083611a1f565b600160a060020a038085166000818152600160209081526040918290209490945580518681529051919333909316927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef92918290030190a35060015b929150505600a165627a7a72305820bfa5ddd3fecf3f43aed25385ec7ec3ef79638c2e58d99f85d9a3cc494183bf160029a165627a7a723058200e78a5f7e0f91739035d0fbf5eca02f79377210b722f63431f29a22e2880b3bd0029", + "nonce": "789", + "storage": { + "0xfe9ec0542a1c009be8b1f3acf43af97100ffff42eb736850fb038fa1151ad4d9": "0x000000000000000000000000e4a13bc304682a903e9472f469c33801dd18d9e8" + } + }, + "0x5cb4a6b902fcb21588c86c3517e797b07cdaadb9": { + "balance": "0x0", + "code": "0x", + "nonce": "0", + "storage": {} + }, + "0xe4a13bc304682a903e9472f469c33801dd18d9e8": { + "balance": "0x33c763c929f62c4f", + "code": "0x", + "nonce": "14", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3451177886", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "4713874", + "hash": "0x5d52a672417cd1269bf4f7095e25dcbf837747bba908cd5ef809dc1bd06144b5", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0x01a12845ed546b94a038a7a03e8df8d7952024ed41ccb3db7a7ade4abc290ce1", + "nonce": "0x28c446f1cb9748c1", + "number": "2290743", + "stateRoot": "0x4898aceede76739daef76448a367d10015a2c022c9e7909b99a10fbf6fb16708", + "timestamp": "1513616414", + "totalDifficulty": "7146523769022564" + }, + "input": "0xf8aa0e8509502f9000830493e0941d3ddf7caf024f253487e18bc4a15b1a360c170a80b8443b91f506000000000000000000000000a14bdd7e5666d784dcce98ad24d383a6b1cd4182000000000000000000000000e4a13bc304682a903e9472f469c33801dd18d9e829a0524564944fa419f5c189b5074044f89210c6d6b2d77ee8f7f12a927d59b636dfa0015b28986807a424b18b186ee6642d76739df36cad802d20e8c00e79a61d7281", + "result": { + "calls": [ + { + "error": "contract creation code storage out of gas", + "from": "0x1d3ddf7caf024f253487e18bc4a15b1a360c170a", + "gas": "0x39ff0", + "gasUsed": "0x39ff0", + "input": "0x606060405234620000005760405160208062001fd283398101604052515b805b600a8054600160a060020a031916600160a060020a0383161790555b506001600d819055600e81905560408051808201909152600c8082527f566f74696e672053746f636b00000000000000000000000000000000000000006020928301908152600b805460008290528251601860ff1990911617825590947f0175b7a638427703f0dbe7bb9bbf987a2551717b34e79f33b5b1008d1fa01db9600291831615610100026000190190921604601f0193909304830192906200010c565b828001600101855582156200010c579182015b828111156200010c578251825591602001919060010190620000ef565b5b50620001309291505b808211156200012c576000815560010162000116565b5090565b50506040805180820190915260038082527f43565300000000000000000000000000000000000000000000000000000000006020928301908152600c805460008290528251600660ff1990911617825590937fdf6966c971051c3d54ec59162606531493a51404a002842f56009d7e5cf4a8c760026001841615610100026000190190931692909204601f010481019291620001f7565b82800160010185558215620001f7579182015b82811115620001f7578251825591602001919060010190620001da565b5b506200021b9291505b808211156200012c576000815560010162000116565b5090565b50505b505b611da280620002306000396000f3006060604052361561019a5763ffffffff60e060020a600035041662e1986d811461019f57806302a72a4c146101d657806306eb4e421461020157806306fdde0314610220578063095ea7b3146102ad578063158ccb99146102dd57806318160ddd146102f85780631cf65a781461031757806323b872dd146103365780632c71e60a1461036c57806333148fd6146103ca578063435ebc2c146103f55780635eeb6e451461041e578063600e85b71461043c5780636103d70b146104a157806362c1e46a146104b05780636c182e99146104ba578063706dc87c146104f057806370a082311461052557806377174f851461055057806395d89b411461056f578063a7771ee3146105fc578063a9059cbb14610629578063ab377daa14610659578063b25dbb5e14610685578063b89a73cb14610699578063ca5eb5e1146106c6578063cbcf2e5a146106e1578063d21f05ba1461070e578063d347c2051461072d578063d96831e114610765578063dd62ed3e14610777578063df3c211b146107a8578063e2982c21146107d6578063eb944e4c14610801575b610000565b34610000576101d4600160a060020a036004351660243567ffffffffffffffff6044358116906064358116906084351661081f565b005b34610000576101ef600160a060020a0360043516610a30565b60408051918252519081900360200190f35b34610000576101ef610a4f565b60408051918252519081900360200190f35b346100005761022d610a55565b604080516020808252835181830152835191928392908301918501908083838215610273575b80518252602083111561027357601f199092019160209182019101610253565b505050905090810190601f16801561029f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34610000576102c9600160a060020a0360043516602435610ae3565b604080519115158252519081900360200190f35b34610000576101d4600160a060020a0360043516610b4e565b005b34610000576101ef610b89565b60408051918252519081900360200190f35b34610000576101ef610b8f565b60408051918252519081900360200190f35b34610000576102c9600160a060020a0360043581169060243516604435610b95565b604080519115158252519081900360200190f35b3461000057610388600160a060020a0360043516602435610bb7565b60408051600160a060020a039096168652602086019490945267ffffffffffffffff928316858501529082166060850152166080830152519081900360a00190f35b34610000576101ef600160a060020a0360043516610c21565b60408051918252519081900360200190f35b3461000057610402610c40565b60408051600160a060020a039092168252519081900360200190f35b34610000576101d4600160a060020a0360043516602435610c4f565b005b3461000057610458600160a060020a0360043516602435610cc9565b60408051600160a060020a03909716875260208701959095528585019390935267ffffffffffffffff9182166060860152811660808501521660a0830152519081900360c00190f35b34610000576101d4610d9e565b005b6101d4610e1e565b005b34610000576104d3600160a060020a0360043516610e21565b6040805167ffffffffffffffff9092168252519081900360200190f35b3461000057610402600160a060020a0360043516610ead565b60408051600160a060020a039092168252519081900360200190f35b34610000576101ef600160a060020a0360043516610ef9565b60408051918252519081900360200190f35b34610000576101ef610f18565b60408051918252519081900360200190f35b346100005761022d610f1e565b604080516020808252835181830152835191928392908301918501908083838215610273575b80518252602083111561027357601f199092019160209182019101610253565b505050905090810190601f16801561029f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34610000576102c9600160a060020a0360043516610fac565b604080519115158252519081900360200190f35b34610000576102c9600160a060020a0360043516602435610fc2565b604080519115158252519081900360200190f35b3461000057610402600435610fe2565b60408051600160a060020a039092168252519081900360200190f35b34610000576101d46004351515610ffd565b005b34610000576102c9600160a060020a036004351661104c565b604080519115158252519081900360200190f35b34610000576101d4600160a060020a0360043516611062565b005b34610000576102c9600160a060020a0360043516611070565b604080519115158252519081900360200190f35b34610000576101ef6110f4565b60408051918252519081900360200190f35b34610000576101ef600160a060020a036004351667ffffffffffffffff602435166110fa565b60408051918252519081900360200190f35b34610000576101d4600435611121565b005b34610000576101ef600160a060020a03600435811690602435166111c6565b60408051918252519081900360200190f35b34610000576101ef6004356024356044356064356084356111f3565b60408051918252519081900360200190f35b34610000576101ef600160a060020a036004351661128c565b60408051918252519081900360200190f35b34610000576101d4600160a060020a036004351660243561129e565b005b6040805160a08101825260008082526020820181905291810182905260608101829052608081019190915267ffffffffffffffff848116908416101561086457610000565b8367ffffffffffffffff168267ffffffffffffffff16101561088557610000565b8267ffffffffffffffff168267ffffffffffffffff1610156108a657610000565b506040805160a081018252600160a060020a033381168252602080830188905267ffffffffffffffff80871684860152858116606085015287166080840152908816600090815260039091529190912080546001810180835582818380158290116109615760030281600302836000526020600020918201910161096191905b8082111561095d578054600160a060020a031916815560006001820155600281018054600160c060020a0319169055600301610926565b5090565b5b505050916000526020600020906003020160005b5082518154600160a060020a031916600160a060020a03909116178155602083015160018201556040830151600290910180546060850151608086015167ffffffffffffffff1990921667ffffffffffffffff948516176fffffffffffffffff00000000000000001916604060020a918516919091021777ffffffffffffffff000000000000000000000000000000001916608060020a939091169290920291909117905550610a268686610fc2565b505b505050505050565b600160a060020a0381166000908152600360205260409020545b919050565b60055481565b600b805460408051602060026001851615610100026000190190941693909304601f81018490048402820184019092528181529291830182828015610adb5780601f10610ab057610100808354040283529160200191610adb565b820191906000526020600020905b815481529060010190602001808311610abe57829003601f168201915b505050505081565b600160a060020a03338116600081815260026020908152604080832094871680845294825280832086905580518681529051929493927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925929181900390910190a35060015b92915050565b600a5433600160a060020a03908116911614610b6957610000565b600a8054600160a060020a031916600160a060020a0383161790555b5b50565b60005481565b60005b90565b6000610ba2848484611600565b610bad8484846116e2565b90505b9392505050565b600360205281600052604060002081815481101561000057906000526020600020906003020160005b5080546001820154600290920154600160a060020a03909116935090915067ffffffffffffffff80821691604060020a8104821691608060020a9091041685565b600160a060020a0381166000908152600860205260409020545b919050565b600a54600160a060020a031681565b600a5433600160a060020a03908116911614610c6a57610000565b610c7660005482611714565b6000908155600160a060020a038316815260016020526040902054610c9b9082611714565b600160a060020a038316600090815260016020526040812091909155610cc390839083611600565b5b5b5050565b6000600060006000600060006000600360008a600160a060020a0316600160a060020a0316815260200190815260200160002088815481101561000057906000526020600020906003020160005b508054600182015460028301546040805160a081018252600160a060020a039094168085526020850184905267ffffffffffffffff808416928601839052604060020a8404811660608701819052608060020a9094041660808601819052909c50929a509197509095509350909150610d90904261172d565b94505b509295509295509295565b33600160a060020a038116600090815260066020526040902054801515610dc457610000565b8030600160a060020a0316311015610ddb57610000565b600160a060020a0382166000818152600660205260408082208290555183156108fc0291849190818181858888f193505050501515610cc357610000565b5b5050565b5b565b600160a060020a03811660009081526003602052604081205442915b81811015610ea557600160a060020a03841660009081526003602052604090208054610e9a9190839081101561000057906000526020600020906003020160005b5060020154604060020a900467ffffffffffffffff168461177d565b92505b600101610e3d565b5b5050919050565b600160a060020a0380821660009081526007602052604081205490911615610eef57600160a060020a0380831660009081526007602052604090205416610ef1565b815b90505b919050565b600160a060020a0381166000908152600160205260409020545b919050565b600d5481565b600c805460408051602060026001851615610100026000190190941693909304601f81018490048402820184019092528181529291830182828015610adb5780601f10610ab057610100808354040283529160200191610adb565b820191906000526020600020905b815481529060010190602001808311610abe57829003601f168201915b505050505081565b60006000610fb983610c21565b1190505b919050565b6000610fcf338484611600565b610fd983836117ac565b90505b92915050565b600460205260009081526040902054600160a060020a031681565b8015801561101a575061100f33610ef9565b61101833610c21565b115b1561102457610000565b33600160a060020a03166000908152600960205260409020805460ff19168215151790555b50565b60006000610fb983610ef9565b1190505b919050565b610b8533826117dc565b5b50565b600a54604080516000602091820181905282517fcbcf2e5a000000000000000000000000000000000000000000000000000000008152600160a060020a03868116600483015293519194939093169263cbcf2e5a92602480830193919282900301818787803b156100005760325a03f115610000575050604051519150505b919050565b600e5481565b6000610fd961110984846118b2565b61111385856119b6565b611a05565b90505b92915050565b600a5433600160a060020a0390811691161461113c57610000565b61114860005482611a1f565b600055600554600190101561116c57600a5461116c90600160a060020a0316611a47565b5b600a54600160a060020a03166000908152600160205260409020546111929082611a1f565b600a8054600160a060020a039081166000908152600160205260408120939093559054610b8592911683611600565b5b5b50565b600160a060020a038083166000908152600260209081526040808320938516835292905220545b92915050565b6000600060008487101561120a5760009250611281565b8387111561121a57879250611281565b61123f6112308961122b888a611714565b611a90565b61123a8689611714565b611abc565b915081925061124e8883611714565b905061127e8361127961126a8461122b8c8b611714565b611a90565b61123a888b611714565b611abc565b611a1f565b92505b505095945050505050565b60066020526000908152604090205481565b600160a060020a03821660009081526003602052604081208054829190849081101561000057906000526020600020906003020160005b50805490925033600160a060020a039081169116146112f357610000565b6040805160a0810182528354600160a060020a0316815260018401546020820152600284015467ffffffffffffffff80821693830193909352604060020a810483166060830152608060020a900490911660808201526113539042611af9565b600160a060020a0385166000908152600360205260409020805491925090849081101561000057906000526020600020906003020160005b508054600160a060020a031916815560006001820181905560029091018054600160c060020a0319169055600160a060020a0385168152600360205260409020805460001981019081101561000057906000526020600020906003020160005b50600160a060020a03851660009081526003602052604090208054859081101561000057906000526020600020906003020160005b5081548154600160a060020a031916600160a060020a03918216178255600180840154908301556002928301805493909201805467ffffffffffffffff191667ffffffffffffffff948516178082558354604060020a908190048616026fffffffffffffffff000000000000000019909116178082559254608060020a9081900490941690930277ffffffffffffffff00000000000000000000000000000000199092169190911790915584166000908152600360205260409020805460001981018083559190829080158290116115485760030281600302836000526020600020918201910161154891905b8082111561095d578054600160a060020a031916815560006001820155600281018054600160c060020a0319169055600301610926565b5090565b5b505050600160a060020a033316600090815260016020526040902054611570915082611a1f565b600160a060020a03338116600090815260016020526040808220939093559086168152205461159f9082611714565b600160a060020a038086166000818152600160209081526040918290209490945580518581529051339093169391927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a35b50505050565b600160a060020a0383161561166e576116466008600061161f86610ead565b600160a060020a0316600160a060020a031681526020019081526020016000205482611714565b6008600061165386610ead565b600160a060020a031681526020810191909152604001600020555b600160a060020a038216156116dc576116b46008600061168d85610ead565b600160a060020a0316600160a060020a031681526020019081526020016000205482611a1f565b600860006116c185610ead565b600160a060020a031681526020810191909152604001600020555b5b505050565b600083826116f082426110fa565b8111156116fc57610000565b611707868686611b1b565b92505b5b50509392505050565b600061172283831115611b4d565b508082035b92915050565b6000610fd983602001518367ffffffffffffffff16856080015167ffffffffffffffff16866040015167ffffffffffffffff16876060015167ffffffffffffffff166111f3565b90505b92915050565b60008167ffffffffffffffff168367ffffffffffffffff1610156117a15781610fd9565b825b90505b92915050565b600033826117ba82426110fa565b8111156117c657610000565b6117d08585611b5d565b92505b5b505092915050565b6117e582610ef9565b6117ee83610c21565b11156117f957610000565b600160a060020a03811660009081526009602052604090205460ff16158015611834575081600160a060020a031681600160a060020a031614155b1561183e57610000565b61184782611070565b1561185157610000565b611864828261185f85610ef9565b611600565b600160a060020a0382811660009081526007602052604090208054600160a060020a031916918316918217905561189a82610ead565b600160a060020a031614610cc357610000565b5b5050565b600160a060020a038216600090815260036020526040812054815b818110156119885761197d836112796003600089600160a060020a0316600160a060020a0316815260200190815260200160002084815481101561000057906000526020600020906003020160005b506040805160a0810182528254600160a060020a031681526001830154602082015260029092015467ffffffffffffffff80821692840192909252604060020a810482166060840152608060020a900416608082015287611af9565b611a1f565b92505b6001016118cd565b600160a060020a0385166000908152600160205260409020546117d09084611714565b92505b505092915050565b600060006119c384611070565b80156119d157506000600d54115b90506119fb816119e9576119e485610ef9565b6119ec565b60005b6111138686611b7b565b611a05565b91505b5092915050565b60008183106117a15781610fd9565b825b90505b92915050565b6000828201611a3c848210801590611a375750838210155b611b4d565b8091505b5092915050565b611a508161104c565b15611a5a57610b85565b6005805460009081526004602052604090208054600160a060020a031916600160a060020a038416179055805460010190555b50565b6000828202611a3c841580611a37575083858381156100005704145b611b4d565b8091505b5092915050565b60006000611acc60008411611b4d565b8284811561000057049050611a3c838581156100005706828502018514611b4d565b8091505b5092915050565b6000610fd98360200151611b0d858561172d565b611714565b90505b92915050565b60008382611b2982426110fa565b811115611b3557610000565b611707868686611b8f565b92505b5b50509392505050565b801515610b8557610000565b5b50565b6000611b6883611a47565b610fd98383611c92565b90505b92915050565b6000610fd983610ef9565b90505b92915050565b600160a060020a038084166000908152600260209081526040808320338516845282528083205493861683526001909152812054909190611bd09084611a1f565b600160a060020a038086166000908152600160205260408082209390935590871681522054611bff9084611714565b600160a060020a038616600090815260016020526040902055611c228184611714565b600160a060020a038087166000818152600260209081526040808320338616845282529182902094909455805187815290519288169391927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a3600191505b509392505050565b60003382611ca082426110fa565b811115611cac57610000565b6117d08585611cc2565b92505b5b505092915050565b600160a060020a033316600090815260016020526040812054611ce59083611714565b600160a060020a033381166000908152600160205260408082209390935590851681522054611d149083611a1f565b600160a060020a038085166000818152600160209081526040918290209490945580518681529051919333909316927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef92918290030190a35060015b929150505600a165627a7a72305820bfa5ddd3fecf3f43aed25385ec7ec3ef79638c2e58d99f85d9a3cc494183bf160029000000000000000000000000a14bdd7e5666d784dcce98ad24d383a6b1cd4182", + "type": "CREATE", + "value": "0x0" + } + ], + "error": "invalid jump destination", + "from": "0xe4a13bc304682a903e9472f469c33801dd18d9e8", + "gas": "0x435c8", + "gasUsed": "0x435c8", + "input": "0x3b91f506000000000000000000000000a14bdd7e5666d784dcce98ad24d383a6b1cd4182000000000000000000000000e4a13bc304682a903e9472f469c33801dd18d9e8", + "to": "0x1d3ddf7caf024f253487e18bc4a15b1a360c170a", + "type": "CALL", + "value": "0x0" + } +} diff --git a/eth/tracers/testdata/call_tracer/inner_instafail.json b/eth/tracers/testdata/call_tracer/inner_instafail.json new file mode 100644 index 000000000000..6e221b3c079b --- /dev/null +++ b/eth/tracers/testdata/call_tracer/inner_instafail.json @@ -0,0 +1,63 @@ +{ + "genesis": { + "difficulty": "117067574", + "extraData": "0xd783010502846765746887676f312e372e33856c696e7578", + "gasLimit": "4712380", + "hash": "0xe05db05eeb3f288041ecb10a787df121c0ed69499355716e17c307de313a4486", + "miner": "0x0c062b329265c965deef1eede55183b3acb8f611", + "mixHash": "0xb669ae39118a53d2c65fd3b1e1d3850dd3f8c6842030698ed846a2762d68b61d", + "nonce": "0x2b469722b8e28c45", + "number": "24973", + "stateRoot": "0x532a5c3f75453a696428db078e32ae283c85cb97e4d8560dbdf022adac6df369", + "timestamp": "1479891145", + "totalDifficulty": "1892250259406", + "alloc": { + "0x6c06b16512b332e6cd8293a2974872674716ce18": { + "balance": "0x0", + "nonce": "1", + "code": "0x60606040526000357c0100000000000000000000000000000000000000000000000000000000900480632e1a7d4d146036575b6000565b34600057604e60048080359060200190919050506050565b005b3373ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051809050600060405180830381858888f19350505050505b5056", + "storage": {} + }, + "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31": { + "balance": "0x229ebbb36c3e0f20", + "nonce": "3", + "code": "0x", + "storage": {} + } + }, + "config": { + "chainId": 3, + "homesteadBlock": 0, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "byzantiumBlock": 1700000, + "constantinopleBlock": 4230000, + "petersburgBlock": 4939394, + "istanbulBlock": 6485846, + "muirGlacierBlock": 7117117, + "ethash": {} + } + }, + "context": { + "number": "24974", + "difficulty": "117067574", + "timestamp": "1479891162", + "gasLimit": "4712388", + "miner": "0xc822ef32e6d26e170b70cf761e204c1806265914" + }, + "input": "0xf889038504a81557008301f97e946c06b16512b332e6cd8293a2974872674716ce1880a42e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b1600002aa0e2a6558040c5d72bc59f2fb62a38993a314c849cd22fb393018d2c5af3112095a01bdb6d7ba32263ccc2ecc880d38c49d9f0c5a72d8b7908e3122b31356d349745", + "result": { + "type": "CALL", + "from": "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31", + "to": "0x6c06b16512b332e6cd8293a2974872674716ce18", + "value": "0x0", + "gas": "0x1a466", + "gasUsed": "0x1dc6", + "input": "0x2e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b160000", + "output": "0x", + "calls": [] + } +} diff --git a/eth/tracers/testdata/call_tracer_inner_throw_outer_revert.json b/eth/tracers/testdata/call_tracer/inner_throw_outer_revert.json similarity index 100% rename from eth/tracers/testdata/call_tracer_inner_throw_outer_revert.json rename to eth/tracers/testdata/call_tracer/inner_throw_outer_revert.json diff --git a/eth/tracers/testdata/call_tracer_oog.json b/eth/tracers/testdata/call_tracer/oog.json similarity index 100% rename from eth/tracers/testdata/call_tracer_oog.json rename to eth/tracers/testdata/call_tracer/oog.json diff --git a/eth/tracers/testdata/call_tracer_revert.json b/eth/tracers/testdata/call_tracer/revert.json similarity index 100% rename from eth/tracers/testdata/call_tracer_revert.json rename to eth/tracers/testdata/call_tracer/revert.json diff --git a/eth/tracers/testdata/call_tracer/revert_reason.json b/eth/tracers/testdata/call_tracer/revert_reason.json new file mode 100644 index 000000000000..b4f29898c5b3 --- /dev/null +++ b/eth/tracers/testdata/call_tracer/revert_reason.json @@ -0,0 +1,64 @@ +{ + "context": { + "difficulty": "2", + "gasLimit": "8000000", + "miner": "0x0000000000000000000000000000000000000000", + "number": "3212651", + "timestamp": "1597246515" + }, + "genesis": { + "alloc": { + "0xf58833cf0c791881b494eb79d461e08a1f043f52": { + "balance": "0x0", + "code": "0x608060405234801561001057600080fd5b50600436106100a5576000357c010000000000000000000000000000000000000000000000000000000090048063609ff1bd11610078578063609ff1bd146101af5780639e7b8d61146101cd578063a3ec138d14610211578063e2ba53f0146102ae576100a5565b80630121b93f146100aa578063013cf08b146100d85780632e4176cf146101215780635c19a95c1461016b575b600080fd5b6100d6600480360360208110156100c057600080fd5b81019080803590602001909291905050506102cc565b005b610104600480360360208110156100ee57600080fd5b8101908080359060200190929190505050610469565b604051808381526020018281526020019250505060405180910390f35b61012961049a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6101ad6004803603602081101561018157600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506104bf565b005b6101b76108db565b6040518082815260200191505060405180910390f35b61020f600480360360208110156101e357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610952565b005b6102536004803603602081101561022757600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610b53565b60405180858152602001841515151581526020018373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200182815260200194505050505060405180910390f35b6102b6610bb0565b6040518082815260200191505060405180910390f35b6000600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020905060008160000154141561038a576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260148152602001807f486173206e6f20726967687420746f20766f746500000000000000000000000081525060200191505060405180910390fd5b8060010160009054906101000a900460ff161561040f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600e8152602001807f416c726561647920766f7465642e00000000000000000000000000000000000081525060200191505060405180910390fd5b60018160010160006101000a81548160ff02191690831515021790555081816002018190555080600001546002838154811061044757fe5b9060005260206000209060020201600101600082825401925050819055505050565b6002818154811061047657fe5b90600052602060002090600202016000915090508060000154908060010154905082565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002090508060010160009054906101000a900460ff1615610587576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f596f7520616c726561647920766f7465642e000000000000000000000000000081525060200191505060405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161415610629576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601e8152602001807f53656c662d64656c65676174696f6e20697320646973616c6c6f7765642e000081525060200191505060405180910390fd5b5b600073ffffffffffffffffffffffffffffffffffffffff16600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16146107cc57600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff1691503373ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614156107c7576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260198152602001807f466f756e64206c6f6f7020696e2064656c65676174696f6e2e0000000000000081525060200191505060405180910390fd5b61062a565b60018160010160006101000a81548160ff021916908315150217905550818160010160016101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506000600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002090508060010160009054906101000a900460ff16156108bf578160000154600282600201548154811061089c57fe5b9060005260206000209060020201600101600082825401925050819055506108d6565b816000015481600001600082825401925050819055505b505050565b6000806000905060008090505b60028054905081101561094d57816002828154811061090357fe5b9060005260206000209060020201600101541115610940576002818154811061092857fe5b90600052602060002090600202016001015491508092505b80806001019150506108e8565b505090565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16146109f7576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526028815260200180610bde6028913960400191505060405180910390fd5b600160008273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010160009054906101000a900460ff1615610aba576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260188152602001807f54686520766f74657220616c726561647920766f7465642e000000000000000081525060200191505060405180910390fd5b6000600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000015414610b0957600080fd5b60018060008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000018190555050565b60016020528060005260406000206000915090508060000154908060010160009054906101000a900460ff16908060010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff16908060020154905084565b60006002610bbc6108db565b81548110610bc657fe5b90600052602060002090600202016000015490509056fe4f6e6c79206368616972706572736f6e2063616e206769766520726967687420746f20766f74652ea26469706673582212201d282819f8f06fed792100d60a8b08809b081a34a1ecd225e83a4b41122165ed64736f6c63430006060033", + "nonce": "1", + "storage": { + "0x6200beec95762de01ce05f2a0e58ce3299dbb53c68c9f3254a242121223cdf58": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0xf7579c3d8a669c89d5ed246a22eb6db8f6fedbf1": { + "balance": "0x57af9d6b3df812900", + "code": "0x", + "nonce": "6", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "IstanbulBlock":1561651, + "chainId": 5, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3509749784", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "4727564", + "hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada", + "nonce": "0x4eb12e19c16d43da", + "number": "2289805", + "stateRoot": "0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f", + "timestamp": "1513601261", + "totalDifficulty": "7143276353481064" + }, + "input": "0xf888068449504f80832dc6c094f58833cf0c791881b494eb79d461e08a1f043f5280a45c19a95c000000000000000000000000f7579c3d8a669c89d5ed246a22eb6db8f6fedbf12da0264664db3e71fae1dbdaf2f53954be149ad3b7ba8a5054b4d89c70febfacc8b1a0212e8398757963f419681839ae8c5a54b411e252473c82d93dda68405ca63294", + "result": { + "error": "execution reverted", + "from": "0xf7579c3d8a669c89d5ed246a22eb6db8f6fedbf1", + "gas": "0x2d6e28", + "gasUsed": "0x588", + "input": "0x5c19a95c000000000000000000000000f7579c3d8a669c89d5ed246a22eb6db8f6fedbf1", + "to": "0xf58833cf0c791881b494eb79d461e08a1f043f52", + "type": "CALL", + "value": "0x0", + "output": "0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001e53656c662d64656c65676174696f6e20697320646973616c6c6f7765642e0000" + } +} diff --git a/eth/tracers/testdata/call_tracer/selfdestruct.json b/eth/tracers/testdata/call_tracer/selfdestruct.json new file mode 100644 index 000000000000..dd717906bc03 --- /dev/null +++ b/eth/tracers/testdata/call_tracer/selfdestruct.json @@ -0,0 +1,75 @@ +{ + "context": { + "difficulty": "3502894804", + "gasLimit": "4722976", + "miner": "0x1585936b53834b021f68cc13eeefdec2efc8e724", + "number": "2289806", + "timestamp": "1513601314" + }, + "genesis": { + "alloc": { + "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": { + "balance": "0x0", + "code": "0x", + "nonce": "22", + "storage": {} + }, + "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": { + "balance": "0x4d87094125a369d9bd5", + "code": "0x61deadff", + "nonce": "1", + "storage": {} + }, + "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": { + "balance": "0x1780d77678137ac1b775", + "code": "0x", + "nonce": "29072", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3509749784", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "4727564", + "hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada", + "nonce": "0x4eb12e19c16d43da", + "number": "2289805", + "stateRoot": "0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f", + "timestamp": "1513601261", + "totalDifficulty": "7143276353481064" + }, + "input": "0xf88b8271908506fc23ac0083015f90943b873a919aa0512d5a0f09e6dcceaa4a6727fafe80a463e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c52aa0bdce0b59e8761854e857fe64015f06dd08a4fbb7624f6094893a79a72e6ad6bea01d9dde033cff7bb235a3163f348a6d7ab8d6b52bc0963a95b91612e40ca766a4", + "result": { + "calls": [ + { + "from": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe", + "gas": "0x0", + "gasUsed": "0x0", + "input": "0x", + "to": "0x000000000000000000000000000000000000dEaD", + "type": "SELFDESTRUCT", + "value": "0x4d87094125a369d9bd5" + } + ], + "from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb", + "gas": "0x10738", + "gasUsed": "0x7533", + "input": "0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5", + "output": "0x", + "to": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe", + "type": "CALL", + "value": "0x0" + } +} diff --git a/eth/tracers/testdata/call_tracer/simple.json b/eth/tracers/testdata/call_tracer/simple.json new file mode 100644 index 000000000000..08cb7b2d00c0 --- /dev/null +++ b/eth/tracers/testdata/call_tracer/simple.json @@ -0,0 +1,80 @@ +{ + "context": { + "difficulty": "3502894804", + "gasLimit": "4722976", + "miner": "0x1585936b53834b021f68cc13eeefdec2efc8e724", + "number": "2289806", + "timestamp": "1513601314" + }, + "genesis": { + "alloc": { + "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": { + "balance": "0x0", + "code": "0x", + "nonce": "22", + "storage": {} + }, + "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": { + "balance": "0x4d87094125a369d9bd5", + "code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000001b436ba50d378d4bbc8660d312a13df6af6e89dfb", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000006f05b59d3b20000", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000000000000000000000000000000000000000003c", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b834" + } + }, + "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": { + "balance": "0x1780d77678137ac1b775", + "code": "0x", + "nonce": "29072", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3509749784", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "4727564", + "hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada", + "nonce": "0x4eb12e19c16d43da", + "number": "2289805", + "stateRoot": "0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f", + "timestamp": "1513601261", + "totalDifficulty": "7143276353481064" + }, + "input": "0xf88b8271908506fc23ac0083015f90943b873a919aa0512d5a0f09e6dcceaa4a6727fafe80a463e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c52aa0bdce0b59e8761854e857fe64015f06dd08a4fbb7624f6094893a79a72e6ad6bea01d9dde033cff7bb235a3163f348a6d7ab8d6b52bc0963a95b91612e40ca766a4", + "result": { + "calls": [ + { + "from": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe", + "gas": "0x6d05", + "gasUsed": "0x0", + "input": "0x", + "to": "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5", + "type": "CALL", + "value": "0x6f05b59d3b20000" + } + ], + "from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb", + "gas": "0x10738", + "gasUsed": "0x3ef9", + "input": "0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "to": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe", + "type": "CALL", + "value": "0x0" + } +} diff --git a/eth/tracers/testdata/call_tracer_throw.json b/eth/tracers/testdata/call_tracer/throw.json similarity index 100% rename from eth/tracers/testdata/call_tracer_throw.json rename to eth/tracers/testdata/call_tracer/throw.json diff --git a/eth/tracers/testdata/call_tracer_legacy/create.json b/eth/tracers/testdata/call_tracer_legacy/create.json new file mode 100644 index 000000000000..8699bf3e7e9c --- /dev/null +++ b/eth/tracers/testdata/call_tracer_legacy/create.json @@ -0,0 +1,58 @@ +{ + "context": { + "difficulty": "3755480783", + "gasLimit": "5401723", + "miner": "0xd049bfd667cb46aa3ef5df0da3e57db3be39e511", + "number": "2294702", + "timestamp": "1513676146" + }, + "genesis": { + "alloc": { + "0x13e4acefe6a6700604929946e70e6443e4e73447": { + "balance": "0xcf3e0938579f000", + "code": "0x", + "nonce": "9", + "storage": {} + }, + "0x7dc9c9730689ff0b0fd506c67db815f12d90a448": { + "balance": "0x0", + "code": "0x", + "nonce": "0", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3757315409", + "extraData": "0x566961425443", + "gasLimit": "5406414", + "hash": "0xae107f592eebdd9ff8d6ba00363676096e6afb0e1007a7d3d0af88173077378d", + "miner": "0xd049bfd667cb46aa3ef5df0da3e57db3be39e511", + "mixHash": "0xc927aa05a38bc3de864e95c33b3ae559d3f39c4ccd51cef6f113f9c50ba0caf1", + "nonce": "0x93363bbd2c95f410", + "number": "2294701", + "stateRoot": "0x6b6737d5bde8058990483e915866bd1578014baeff57bd5e4ed228a2bfad635c", + "timestamp": "1513676127", + "totalDifficulty": "7160808139332585" + }, + "input": "0xf907ef098504e3b29200830897be8080b9079c606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a1129a01060f46676a5dff6f407f0f51eb6f37f5c8c54e238c70221e18e65fc29d3ea65a0557b01c50ff4ffaac8ed6e5d31237a4ecbac843ab1bfe8bb0165a0060df7c54f", + "result": { + "from": "0x13e4acefe6a6700604929946e70e6443e4e73447", + "gas": "0x5e106", + "gasUsed": "0x5e106", + "input": "0x606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a11", + "output": "0x606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029", + "to": "0x7dc9c9730689ff0b0fd506c67db815f12d90a448", + "type": "CREATE", + "value": "0x0" + } +} diff --git a/eth/tracers/testdata/call_tracer_legacy/deep_calls.json b/eth/tracers/testdata/call_tracer_legacy/deep_calls.json new file mode 100644 index 000000000000..0353d4cfa9ac --- /dev/null +++ b/eth/tracers/testdata/call_tracer_legacy/deep_calls.json @@ -0,0 +1,415 @@ +{ + "context": { + "difficulty": "117066904", + "gasLimit": "4712384", + "miner": "0x1977c248e1014cc103929dd7f154199c916e39ec", + "number": "25001", + "timestamp": "1479891545" + }, + "genesis": { + "alloc": { + "0x2a98c5f40bfa3dee83431103c535f6fae9a8ad38": { + "balance": "0x0", + "code": "0x606060405236156100825760e060020a600035046302d05d3f811461008a5780630accce061461009c5780631ab9075a146100c757806331ed274614610102578063645a3b7214610133578063772fdae314610155578063a7f4377914610180578063ae5f80801461019e578063c9bded21146101ea578063f905c15a14610231575b61023a610002565b61023c600054600160a060020a031681565b61023a600435602435604435606435608435600254600160a060020a03166000141561024657610002565b61023a600435600254600160a060020a03166000148015906100f8575060025433600160a060020a03908116911614155b156102f457610002565b61023a60043560243560443560643560843560a43560c435600254600160a060020a03166000141561031657610002565b61023a600435602435600254600160a060020a0316600014156103d057610002565b61023a600435602435604435606435608435600254600160a060020a03166000141561046157610002565b61023a60025433600160a060020a0390811691161461051657610002565b61023a6004356024356044356060828152600160a060020a0382169060ff8516907fa6c2f0913db6f79ff0a4365762c61718973b3413d6e40382e704782a9a5099f690602090a3505050565b61023a600435602435600160a060020a038116606090815260ff8316907fee6348a7ec70f74e3d6cba55a53e9f9110d180d7698e9117fc466ae29a43e34790602090a25050565b61023c60035481565b005b6060908152602090f35b60025460e060020a6313bc6d4b02606090815233600160a060020a0390811660645291909116906313bc6d4b906084906020906024816000876161da5a03f115610002575050604051511515905061029d57610002565b60408051858152602081018390528151600160a060020a03858116939087169260ff8a16927f5a690ecd0cb15c1c1fd6b6f8a32df0d4f56cb41a54fea7e94020f013595de796929181900390910190a45050505050565b6002805473ffffffffffffffffffffffffffffffffffffffff19168217905550565b60025460e060020a6313bc6d4b02606090815233600160a060020a0390811660645291909116906313bc6d4b906084906020906024816000876161da5a03f115610002575050604051511515905061036d57610002565b6040805186815260208101869052808201859052606081018490529051600160a060020a03831691889160ff8b16917fd65d9ddafbad8824e2bbd6f56cc9f4ac27ba60737035c10a321ea2f681c94d47919081900360800190a450505050505050565b60025460e060020a6313bc6d4b02606090815233600160a060020a0390811660645291909116906313bc6d4b906084906020906024816000876161da5a03f115610002575050604051511515905061042757610002565b60408051828152905183917fa9c6cbc4bd352a6940479f6d802a1001550581858b310d7f68f7bea51218cda6919081900360200190a25050565b60025460e060020a6313bc6d4b02606090815233600160a060020a0390811660645291909116906313bc6d4b906084906020906024816000876161da5a03f11561000257505060405151151590506104b857610002565b80600160a060020a031684600160a060020a03168660ff167f69bdaf789251e1d3a0151259c0c715315496a7404bce9fd0b714674685c2cab78686604051808381526020018281526020019250505060405180910390a45050505050565b600254600160a060020a0316ff", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396" + } + }, + "0x2cccf5e0538493c235d1c5ef6580f77d99e91396": { + "balance": "0x0", + "code": "0x606060405236156100775760e060020a600035046302d05d3f811461007f57806313bc6d4b146100915780633688a877146100b95780635188f9961461012f5780637eadc976146101545780638ad79680146101d3578063a43e04d814610238578063a7f437791461025e578063e16c7d981461027c575b61029f610002565b6102a1600054600160a060020a031681565b6102be600435600160a060020a03811660009081526002602052604090205460ff165b919050565b6102d26004356040805160208181018352600080835284815260038252835190849020805460026001821615610100026000190190911604601f8101849004840283018401909552848252929390929183018282801561037d5780601f106103525761010080835404028352916020019161037d565b61029f6004356024356000805433600160a060020a039081169116146104a957610002565b61034060043560008181526001602090815260408083205481517ff905c15a0000000000000000000000000000000000000000000000000000000081529151600160a060020a03909116928392839263f905c15a92600483810193919291829003018189876161da5a03f1156100025750506040515195945050505050565b60408051602060248035600481810135601f810185900485028601850190965285855261029f9581359591946044949293909201918190840183828082843750949650505050505050600054600160a060020a0390811633909116146104f657610002565b61029f6004355b600080548190600160a060020a0390811633909116146105a457610002565b61029f60005433600160a060020a0390811691161461072957610002565b6102a1600435600081815260016020526040902054600160a060020a03166100b4565b005b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156103325780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60408051918252519081900360200190f35b820191906000526020600020905b81548152906001019060200180831161036057829003601f168201915b505050505090506100b4565b506000828152600160208181526040808420805473ffffffffffffffffffffffffffffffffffffffff191686179055600160a060020a038581168086526002909352818520805460ff191690941790935580517f1ab9075a0000000000000000000000000000000000000000000000000000000081523090931660048401525184939192631ab9075a926024828101939192829003018183876161da5a03f11561000257505060408051602081018690528082019290925243606083015260808083526003908301527f414444000000000000000000000000000000000000000000000000000000000060a0830152517f8ac68d4e97d65912f220b4c5f87978b8186320a5e378c1369850b5b5f90323d39181900360c00190a15b505050565b600083815260016020526040902054600160a060020a03838116911614156104d0576104a4565b600083815260016020526040812054600160a060020a031614610389576103898361023f565b600082815260036020908152604082208054845182855293839020919360026001831615610100026000190190921691909104601f90810184900483019391929186019083901061056a57805160ff19168380011785555b5061059a9291505b808211156105a05760008155600101610556565b8280016001018555821561054e579182015b8281111561054e57825182600050559160200191906001019061057c565b50505050565b5090565b600083815260016020526040812054600160a060020a031614156105c757610002565b50506000818152600160205260408082205481517fa7f437790000000000000000000000000000000000000000000000000000000081529151600160a060020a0391909116928392839263a7f4377992600483810193919291829003018183876161da5a03f11561000257505050600160005060008460001916815260200190815260200160002060006101000a815490600160a060020a0302191690556002600050600083600160a060020a0316815260200190815260200160002060006101000a81549060ff02191690557f8ac68d4e97d65912f220b4c5f87978b8186320a5e378c1369850b5b5f90323d383834360405180806020018560001916815260200184600160a060020a03168152602001838152602001828103825260038152602001807f44454c000000000000000000000000000000000000000000000000000000000081526020015060200194505050505060405180910390a1505050565b600054600160a060020a0316ff", + "nonce": "1", + "storage": { + "0x0684ac65a9fa32414dda56996f4183597d695987fdb82b145d722743891a6fe8": "0x0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690", + "0x1cd76f78169a420d99346e3501dd3e541622c38a226f9b63e01cfebc69879dc7": "0x000000000000000000000000b4fe7aa695b326c9d219158d2ca50db77b39f99f", + "0x8e54a4494fe5da016bfc01363f4f6cdc91013bb5434bd2a4a3359f13a23afa2f": "0x000000000000000000000000cf00ffd997ad14939736f026006498e3f099baaf", + "0x94edf7f600ba56655fd65fca1f1424334ce369326c1dc3e53151dcd1ad06bc13": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0xbbee47108b275f55f98482c6800f6372165e88b0330d3f5dae6419df4734366c": "0x0000000000000000000000002a98c5f40bfa3dee83431103c535f6fae9a8ad38", + "0xd38c0c4e84de118cfdcc775130155d83b8bbaaf23dc7f3c83a626b10473213bd": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0xfb3aa5c655c2ec9d40609401f88d505d1da61afaa550e36ef5da0509ada257ba": "0x0000000000000000000000007986bad81f4cbd9317f5a46861437dae58d69113" + } + }, + "0x3e9286eafa2db8101246c2131c09b49080d00690": { + "balance": "0x0", + "code": "0x606060405236156100cf5760e060020a600035046302d05d3f81146100d7578063056d4470146100e957806316c66cc61461010c5780631ab9075a146101935780633ae1005c146101ce57806358541662146101fe5780635ed61af014610231578063644e3b791461025457806384dbac3b146102db578063949ae479146102fd5780639859387b14610321578063a7f4377914610340578063ab03fc261461035e578063e8161b7814610385578063e964d4e114610395578063f905c15a146103a5578063f92eb774146103ae575b6103be610002565b6103c0600054600160a060020a031681565b6103be6004356002546000908190600160a060020a031681141561040357610002565b6103dd60043560006108365b6040805160025460e360020a631c2d8fb30282527f636f6e747261637464620000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f1156100025750506040515191506104e29050565b6103be600435600254600160a060020a03166000148015906101c4575060025433600160a060020a03908116911614155b1561088d57610002565b6103be600435602435604435606435600254600090819081908190600160a060020a03168114156108af57610002565b6103c0600435602435604435606435608435600254600090819081908190600160a060020a03168114156110e857610002565b6103be6004356002546000908190600160a060020a03168114156115ec57610002565b6103c06004356000611b635b6040805160025460e360020a631c2d8fb30282527f6d61726b6574646200000000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f1156100025750506040515191506104e29050565b6103be600435602435600254600160a060020a031660001415611bb557610002565b6103be600435602435600254600090600160a060020a0316811415611d2e57610002565b6103be600435600254600160a060020a031660001415611fc657610002565b6103be60025433600160a060020a0390811691161461207e57610002565b6103be600435602435604435600254600090600160a060020a031681141561208c57610002565b6103dd60043560006124b8610260565b6103c0600435600061250a610118565b6103f160035481565b6103f16004356000612561610260565b005b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60408051918252519081900360200190f35b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061046557610002565b8291506104e55b6040805160025460e360020a631c2d8fb30282527f63706f6f6c00000000000000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f115610002575050604051519150505b90565b600160a060020a031663b2206e6d83600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517fb2206e6d0000000000000000000000000000000000000000000000000000000082526004820152600160a060020a038816602482015290516044808301935060209282900301816000876161da5a03f11561000257505060405151915061059b90506106ba565b600160a060020a031663d5b205ce83600160a060020a03166336da44686040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a636ad902e7028252600160a060020a0390811660048301526024820187905288166044820152905160648281019350600092829003018183876161da5a03f115610002575050506107355b6040805160025460e360020a631c2d8fb30282527f6c6f676d6772000000000000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f1156100025750506040515191506104e29050565b50826120ee5b6040805160025460e360020a631c2d8fb30282527f6163636f756e7463746c0000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f1156100025750506040515191506104e29050565b600160a060020a0316630accce06600684600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e360020a6306db488d02825291519192899290916336da446891600482810192602092919082900301816000876161da5a03f1156100025750505060405180519060200150866040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f11561000257505050505050565b600160a060020a03166316c66cc6836040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f115610002575050604051519150505b919050565b6002805473ffffffffffffffffffffffffffffffffffffffff19168217905550565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061091157610002565b87935061091c610260565b600160a060020a031663bdbdb08685600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517fbdbdb0860000000000000000000000000000000000000000000000000000000082526004820152602481018a905290516044808301935060209282900301816000876161da5a03f1156100025750506040515193506109ca90506106ba565b600160a060020a03166381982a7a8885876040518460e060020a0281526004018084600160a060020a0316815260200183815260200182600160a060020a0316815260200193505050506000604051808303816000876161da5a03f11561000257505050610a3661046c565b600160a060020a03166308636bdb85600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517f08636bdb000000000000000000000000000000000000000000000000000000008252600482015260248101889052604481019290925251606482810192602092919082900301816000876161da5a03f11561000257505060408051805160e160020a630a5d50db028252600482018190529151919450600160a060020a03871692506314baa1b6916024828101926000929190829003018183876161da5a03f11561000257505050610b3561046c565b600160a060020a0316630a3b6ede85600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a63051db76f0282526004820152600160a060020a038d16602482015290516044808301935060209282900301816000876161da5a03f115610002575050604051519150610bd590506106ba565b600160a060020a031663d5b205ce87838b6040518460e060020a0281526004018084600160a060020a0316815260200183815260200182600160a060020a0316815260200193505050506000604051808303816000876161da5a03f11561000257505050610c41610118565b600160a060020a031663988db79c888a6040518360e060020a0281526004018083600160a060020a0316815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050610ca5610260565b600160a060020a031663f4f2821b896040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f11561000257505050610d6f5b6040805160025460e360020a631c2d8fb30282527f747261646564620000000000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f1156100025750506040515191506104e29050565b600160a060020a0316635f539d69896040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f11561000257505050610dc2610639565b600160a060020a0316630accce06600386600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e360020a6315b1ea01028252915191928e928e9263ad8f500891600482810192602092919082900301816000876161da5a03f11561000257505050604051805190602001506040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f11561000257505050610ec5610639565b600160a060020a0316630accce06600386600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e360020a6315b1ea01028252915191928e928d9263ad8f500891600482810192602092919082900301816000876161da5a03f11561000257505050604051805190602001506040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f11561000257505050610fc8610639565b600160a060020a031663645a3b7285600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060405151905061101e610260565b600160a060020a031663f92eb77488600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e260020a633e4baddd028252600482015290516024828101935060209282900301816000876161da5a03f11561000257505060408051805160e060020a86028252600482019490945260248101939093525160448381019360009350829003018183876161da5a03f115610002575050505050505050505050565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061114a57610002565b604051600254600160a060020a0316908a908a908a908a908a90611579806125b38339018087600160a060020a0316815260200186600160a060020a03168152602001856000191681526020018481526020018381526020018281526020019650505050505050604051809103906000f092506111c5610118565b600160a060020a031663b9858a288a856040518360e060020a0281526004018083600160a060020a0316815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050611229610260565b600160a060020a0316635188f99689856040518360e060020a028152600401808360001916815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050611288610260565b600160a060020a031663bdbdb08689896040518360e060020a0281526004018083600019168152602001828152602001925050506020604051808303816000876161da5a03f1156100025750506040515192506112e590506106ba565b600160a060020a03166346d88e7d8a858a6040518460e060020a0281526004018084600160a060020a0316815260200183600160a060020a0316815260200182815260200193505050506000604051808303816000876161da5a03f115610002575050506113516106ba565b600160a060020a03166381982a7a8a84866040518460e060020a0281526004018084600160a060020a0316815260200183815260200182600160a060020a0316815260200193505050506000604051808303816000876161da5a03f115610002575050506113bd61046c565b600160a060020a0316632b58469689856040518360e060020a028152600401808360001916815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f1156100025750505061141c61046c565b600160a060020a03166308636bdb8984866040518460e060020a028152600401808460001916815260200183815260200182600160a060020a0316815260200193505050506020604051808303816000876161da5a03f11561000257505060408051805160e160020a630a5d50db028252600482018190529151919350600160a060020a03861692506314baa1b6916024828101926000929190829003018183876161da5a03f115610002575050506114d3610639565b6040805160e160020a630566670302815260016004820152602481018b9052600160a060020a0386811660448301528c811660648301526000608483018190529251931692630accce069260a480840193919291829003018183876161da5a03f11561000257505050611544610639565b600160a060020a031663645a3b728961155b610260565b600160a060020a031663f92eb7748c6040518260e060020a02815260040180826000191681526020019150506020604051808303816000876161da5a03f11561000257505060408051805160e060020a86028252600482019490945260248101939093525160448084019360009350829003018183876161da5a03f1156100025750939a9950505050505050505050565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061164e57610002565b82915061165961046c565b600160a060020a0316630a3b6ede83600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a63051db76f0282526004820152600160a060020a038816602482015290516044808301935060209282900301816000876161da5a03f1156100025750506040515191506116f990506106ba565b600160a060020a031663d5b205ce83600160a060020a03166336da44686040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a636ad902e7028252600160a060020a0390811660048301526024820187905288166044820152905160648281019350600092829003018183876161da5a03f1156100025750505061179b6106ba565b600160a060020a031663d653078983600160a060020a03166336da44686040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517ff1ff78a0000000000000000000000000000000000000000000000000000000008252915191929163f1ff78a09160048181019260209290919082900301816000876161da5a03f1156100025750505060405180519060200150866040518460e060020a0281526004018084600160a060020a0316815260200183815260200182600160a060020a0316815260200193505050506000604051808303816000876161da5a03f1156100025750505061189f610260565b600160a060020a031663f4f2821b846040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f115610002575050506118f2610118565b600160a060020a031663f4f2821b846040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f11561000257505050611945610639565b600160a060020a0316630accce06600484600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e360020a6306db488d02825291519192899290916336da44689181870191602091908190038801816000876161da5a03f115610002575050506040518051906020015060006040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f11561000257505050611a48610639565b600160a060020a031663645a3b7283600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051519050611a9e610260565b600160a060020a031663f92eb77486600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e260020a633e4baddd028252600482015290516024828101935060209282900301816000876161da5a03f11561000257505060408051805160e060020a86028252600482019490945260248101939093525160448381019360009350829003018183876161da5a03f11561000257505050505050565b600160a060020a03166381738c59836040518260e060020a02815260040180826000191681526020019150506020604051808303816000876161da5a03f1156100025750506040515191506108889050565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f1156100025750506040515115159050611c1757610002565b611c1f610260565b600160a060020a03166338a699a4836040518260e060020a02815260040180826000191681526020019150506020604051808303816000876161da5a03f11561000257505060405151159050611c7457610002565b611c7c610260565b600160a060020a0316632243118a836040518260e060020a02815260040180826000191681526020019150506000604051808303816000876161da5a03f11561000257505050611cca610639565b600160a060020a031663ae5f8080600184846040518460e060020a028152600401808481526020018360001916815260200182600160a060020a0316815260200193505050506000604051808303816000876161da5a03f115610002575050505050565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f1156100025750506040515115159050611d9057610002565b5081611d9a610260565b600160a060020a031663581d5d6084846040518360e060020a0281526004018083600160a060020a03168152602001828152602001925050506000604051808303816000876161da5a03f11561000257505050611df5610639565b600160a060020a0316630accce06600283600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a630566670302825260048201949094526024810193909352600160a060020a038816604484015260006064840181905260848401819052905160a4808501949293509091829003018183876161da5a03f11561000257505050611eab610639565b600160a060020a031663645a3b7282600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051519050611f01610260565b600160a060020a031663f92eb77485600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e260020a633e4baddd028252600482015290516024828101935060209282900301816000876161da5a03f11561000257505060408051805160e060020a86028252600482019490945260248101939093525160448381019360009350829003018183876161da5a03f11561000257505050505050565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061202857610002565b612030610118565b600160a060020a0316639859387b826040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f1156100025750505050565b600254600160a060020a0316ff5b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f11561000257505060405151151590506106b457610002565b600160a060020a031663d65307898383600160a060020a031663f1ff78a06040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517fd6530789000000000000000000000000000000000000000000000000000000008252600160a060020a039485166004830152602482015292891660448401525160648381019360009350829003018183876161da5a03f115610002575050506121a5610118565b600160a060020a031663f4f2821b856040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f115610002575050506121f8610cf4565b600160a060020a031663f4f2821b856040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f1156100025750505061224b610639565b600160a060020a0316630accce06600583600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e360020a6306db488d028252915191928a9290916336da446891600482810192602092919082900301816000876161da5a03f1156100025750505060405180519060200150886040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f1156100025750505080600160a060020a031663ea71b02d6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060405151600160a060020a031660001490506124b25761239f610639565b600160a060020a0316630accce06600583600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517fea71b02d000000000000000000000000000000000000000000000000000000008252915191928a92909163ea71b02d91600482810192602092919082900301816000876161da5a03f1156100025750505060405180519060200150886040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f115610002575050505b50505050565b600160a060020a03166338a699a4836040518260e060020a02815260040180826000191681526020019150506020604051808303816000876161da5a03f1156100025750506040515191506108889050565b600160a060020a031663213fe2b7836040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515191506108889050565b600160a060020a031663f92eb774836040518260e060020a02815260040180826000191681526020019150506020604051808303816000876161da5a03f115610002575050604051519150610888905056606060405260405160c08061157983396101206040819052825160805160a051935160e0516101005160008054600160a060020a03199081163317909155600180546005805484168817905560048a90556006869055600b8590556008849055909116861760a060020a60ff02191690554360038190556002558686526101408390526101608190529396929594919390929091600160a060020a033016917f76885d242fb71c6f74a7e717416e42eff4d96faf54f6de75c6a0a6bbd8890c6b91a230600160a060020a03167fa609f6bd4ad0b4f419ddad4ac9f0d02c2b9295c5e6891469055cf73c2b568fff600b600050546040518082815260200191505060405180910390a250505050505061145e8061011b6000396000f3606060405236156101745760e060020a600035046302d05d3f811461017c57806304a7fdbc1461018e5780630e90f957146101fb5780630fb5a6b41461021257806314baa1b61461021b57806317fc45e21461023a5780632b096926146102435780632e94420f1461025b578063325a19f11461026457806336da44681461026d5780633f81a2c01461027f5780633fc306821461029757806345ecd3d7146102d45780634665096d146102dd5780634e71d92d146102e657806351a34eb8146103085780636111bb951461032d5780636f265b93146103445780637e9014e11461034d57806390ba009114610360578063927df5e014610393578063a7f437791461046c578063ad8f50081461046e578063bc6d909414610477578063bdec3ad114610557578063c19d93fb1461059a578063c9503fe2146105ad578063e0a73a93146105b6578063ea71b02d146105bf578063ea8a1af0146105d1578063ee4a96f9146105f3578063f1ff78a01461065c575b61046c610002565b610665600054600160a060020a031681565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600554600090600160a060020a0390811633909116146106a857610002565b61068260015460a060020a900460ff166000145b90565b61069660085481565b61046c600435600154600160a060020a03166000141561072157610002565b610696600d5481565b610696600435600f8160068110156100025750015481565b61069660045481565b61069660035481565b610665600554600160a060020a031681565b61069660043560158160068110156100025750015481565b6106966004355b600b54600f5460009160028202808203928083039290810191018386101561078357601054840186900394505b50505050919050565b61069660025481565b61069660095481565b61046c600554600090600160a060020a03908116339091161461085857610002565b61046c600435600554600090600160a060020a03908116339091161461092e57610002565b6106826001805460a060020a900460ff161461020f565b610696600b5481565b61068260075460a060020a900460ff1681565b6106966004355b600b54601554600091600282028082039280830392908101910183861015610a6c5760165494506102cb565b61046c6004356024356044356040805160015460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b02600483015291516000928392600160a060020a03919091169163e16c7d9891602481810192602092909190829003018187876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610b4657610002565b005b610696600a5481565b61046c60006000600060006000600160009054906101000a9004600160a060020a0316600160a060020a031663e16c7d986040518160e060020a028152600401808060b260020a691858d8dbdd5b9d18dd1b0281526020015060200190506020604051808303816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610f1757610002565b61046c5b60015b60058160ff16101561071e57600f6001820160ff166006811015610002578101549060ff83166006811015610002570154101561129057610002565b61069660015460a060020a900460ff1681565b61069660065481565b610696600c5481565b610665600754600160a060020a031681565b61046c600554600090600160a060020a0390811633909116146112c857610002565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600154600090600160a060020a03168114156113fb57610002565b610696600e5481565b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60408051918252519081900360200190f35b5060005b60068160ff16101561070857828160ff166006811015610002576020020151600f60ff831660068110156100025701558160ff82166006811015610002576020020151601560ff831660068110156100025701556001016106ac565b61071061055b565b505050565b600e8054820190555b50565b6040805160015460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061071557610002565b83861015801561079257508286105b156107b457600f546010546011548689039082030291909104900394506102cb565b8286101580156107c55750600b5486105b156107e757600f546011546012548589039082030291909104900394506102cb565b600b5486108015906107f857508186105b1561081d57600b54600f546012546013549289039281039290920204900394506102cb565b81861015801561082c57508086105b1561084e57600f546013546014548489039082030291909104900394506102cb565b60145494506102cb565b60015460a060020a900460ff1660001461087157610002565b600254600a01431161088257610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663771d50e16040518160e060020a0281526004018090506000604051808303816000876161da5a03f1156100025750505050565b60015460a060020a900460ff1660001461094757610002565b600254600a01431161095857610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180517f51a34eb8000000000000000000000000000000000000000000000000000000008252600482018690529151919350600160a060020a03841692506351a34eb8916024808301926000929190829003018183876161da5a03f11561000257505050600b8290554360025560408051838152905130600160a060020a0316917fa609f6bd4ad0b4f419ddad4ac9f0d02c2b9295c5e6891469055cf73c2b568fff919081900360200190a25050565b838610158015610a7b57508286105b15610a9d576015546016546017548689039082900302919091040194506102cb565b828610158015610aae5750600b5486105b15610ad0576015546017546018548589039082900302919091040194506102cb565b600b548610801590610ae157508186105b15610b0657600b546015546018546019549289039281900392909202040194506102cb565b818610158015610b1557508086105b15610b3757601554601954601a548489039082900302919091040194506102cb565b601a54860181900394506102cb565b60015460a060020a900460ff16600014610b5f57610002565b6001805460a060020a60ff02191660a060020a17908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919450600160a060020a038516925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604080518051600a556005547ffebf661200000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015216602482015260448101879052905163febf661291606480820192600092909190829003018183876161da5a03f115610002575050508215610cc7576007805473ffffffffffffffffffffffffffffffffffffffff191633179055610dbb565b6040805160055460065460e060020a63599efa6b028352600160a060020a039182166004840152602483015291519184169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050604080516006547f56ccb6f000000000000000000000000000000000000000000000000000000000825233600160a060020a03166004830152602482015290516356ccb6f091604480820192600092909190829003018183876161da5a03f115610002575050600580546007805473ffffffffffffffffffffffffffffffffffffffff19908116600160a060020a038416179091551633179055505b6007805460a060020a60ff02191660a060020a87810291909117918290556008544301600955900460ff1615610df757600a54610e039061029e565b600a54610e0b90610367565b600c55610e0f565b600c555b600c54670de0b6b3a7640000850204600d55600754600554604080517f759297bb000000000000000000000000000000000000000000000000000000008152600160a060020a039384166004820152918316602483015260448201879052519184169163759297bb91606481810192600092909190829003018183876161da5a03f11561000257505060408051600754600a54600d54600554600c5460a060020a850460ff161515865260208601929092528486019290925260608401529251600160a060020a0391821694509281169230909116917f3b3d1986083d191be01d28623dc19604728e29ae28bdb9ba52757fdee1a18de2919081900360800190a45050505050565b600954431015610f2657610002565b6001805460a060020a900460ff1614610f3e57610002565b6001805460a060020a60ff0219167402000000000000000000000000000000000000000017908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919750600160a060020a038816925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604051516007549095506000945060a060020a900460ff1615905061105c57600a5484111561105757600a54600d54670de0b6b3a7640000918603020492505b61107e565b600a5484101561107e57600a54600d54670de0b6b3a764000091869003020492505b60065483111561108e5760065492505b6006548390039150600083111561111857604080516005546007547f5928d37f000000000000000000000000000000000000000000000000000000008352600160a060020a0391821660048401528116602483015260448201869052915191871691635928d37f91606481810192600092909190829003018183876161da5a03f115610002575050505b600082111561117a576040805160055460e060020a63599efa6b028252600160a060020a0390811660048301526024820185905291519187169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050505b6040805185815260208101849052808201859052905130600160a060020a0316917f89e690b1d5aaae14f3e85f108dc92d9ab3763a58d45aed8b59daedbbae8fe794919081900360600190a260008311156112285784600160a060020a0316634cc927d785336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050611282565b84600160a060020a0316634cc927d7600a60005054336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f115610002575050505b600054600160a060020a0316ff5b60156001820160ff166006811015610002578101549060ff8316600681101561000257015411156112c057610002565b60010161055e565b60015460a060020a900460ff166000146112e157610002565b600254600a0143116112f257610002565b6001546040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f11561000257505060408051805160055460065460e060020a63599efa6b028452600160a060020a03918216600485015260248401529251909450918416925063599efa6b916044808301926000929190829003018183876161da5a03f1156100025750505080600160a060020a0316632b68bb2d6040518160e060020a0281526004018090506000604051808303816000876161da5a03f115610002575050600054600160a060020a03169050ff5b6001546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602480830192602092919082900301816000876161da5a03f11561000257505060405151151590506106a85761000256", + "nonce": "16", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396" + } + }, + "0x70c9217d814985faef62b124420f8dfbddd96433": { + "balance": "0x4ef436dcbda6cd4a", + "code": "0x", + "nonce": "1634", + "storage": {} + }, + "0x7986bad81f4cbd9317f5a46861437dae58d69113": { + "balance": "0x0", + "code": "0x6060604052361561008d5760e060020a600035046302d05d3f811461009557806316c66cc6146100a75780631ab9075a146100d7578063213fe2b7146101125780639859387b1461013f578063988db79c1461015e578063a7f4377914610180578063b9858a281461019e578063c8e40fbf146101c0578063f4f2821b146101e8578063f905c15a14610209575b610212610002565b610214600054600160a060020a031681565b600160a060020a0360043581811660009081526005602052604081205461023193168114610257575060016101e3565b610212600435600254600160a060020a0316600014801590610108575060025433600160a060020a03908116911614155b1561025f57610002565b610214600435600160a060020a03811660009081526004602052604081205460ff16151561027557610002565b610212600435600254600160a060020a03166000141561029b57610002565b610212600435602435600254600160a060020a03166000141561050457610002565b61021260025433600160a060020a0390811691161461056757610002565b610212600435602435600254600160a060020a03166000141561057557610002565b610231600435600160a060020a03811660009081526004602052604090205460ff165b919050565b610212600435600254600090600160a060020a031681141561072057610002565b61024560035481565b005b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60408051918252519081900360200190f35b5060006101e3565b60028054600160a060020a031916821790555b50565b50600160a060020a038181166000908152600460205260409020546101009004166101e3565b6002546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602482810192602092919082900301816000876161da5a03f11561000257505060405151151590506102fe57610002565b600160a060020a03811660009081526004602052604090205460ff161515610272576040516104028061092e833901809050604051809103906000f06004600050600083600160a060020a0316815260200190815260200160002060005060000160016101000a815481600160a060020a030219169083021790555060016004600050600083600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff0219169083021790555050565b600160a060020a03821660009081526004602052604090205460ff1615156104725760405161040280610d30833901809050604051809103906000f06004600050600084600160a060020a0316815260200190815260200160002060005060000160016101000a815481600160a060020a030219169083021790555060016004600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff021916908302179055505b600160a060020a03828116600090815260046020819052604080518184205460e060020a630a3b0a4f02825286861693820193909352905161010090920490931692630a3b0a4f926024828101939192829003018183876161da5a03f11561000257505050600160a060020a03811660009081526006602052604090208054600160a060020a031916831790555b5050565b6002546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602482810192602092919082900301816000876161da5a03f11561000257505060405151151590506103b957610002565b600254600160a060020a0316ff5b6002546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602482810192602092919082900301816000876161da5a03f11561000257505060405151151590506105d857610002565b600160a060020a03821660009081526004602052604090205460ff1615156106915760405161040280611132833901809050604051809103906000f06004600050600084600160a060020a0316815260200190815260200160002060005060000160016101000a815481600160a060020a030219169083021790555060016004600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff021916908302179055505b600160a060020a03828116600090815260046020819052604080518184205460e060020a630a3b0a4f02825286861693820193909352905161010090920490931692630a3b0a4f926024828101939192829003018183876161da5a03f11561000257505050600160a060020a031660009081526005602052604090208054600160a060020a0319169091179055565b6002546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602482810192602092919082900301816000876161da5a03f115610002575050604051511515905061078357610002565b50600160a060020a0381811660009081526005602090815260408083205490931680835260049091529190205460ff161561080f576040600081812054825160e260020a632e72bafd028152600160a060020a03868116600483015293516101009092049093169263b9caebf4926024828101939192829003018183876161da5a03f115610002575050505b600160a060020a03828116600090815260056020526040812054909116146108545760406000908120600160a060020a0384169091528054600160a060020a03191690555b50600160a060020a0381811660009081526006602090815260408083205490931680835260049091529190205460ff16156108e657600160a060020a038181166000908152604080518183205460e260020a632e72bafd028252868516600483015291516101009092049093169263b9caebf4926024828101939192829003018183876161da5a03f115610002575050505b600160a060020a03828116600090815260066020526040812054909116146105005760406000908120600160a060020a0384169091528054600160a060020a0319169055505056606060405260008054600160a060020a031916331790556103de806100246000396000f3606060405236156100615760e060020a600035046302d05d3f81146100695780630a3b0a4f1461007b5780630d327fa7146100f6578063524d81d314610109578063a7f4377914610114578063b9caebf414610132578063bbec3bae14610296575b6102ce610002565b6102d0600054600160a060020a031681565b6102ce600435600254600090600160a060020a03168114156102ed5760028054600160a060020a03199081168417808355600160a060020a03808616855260036020526040852060018101805493831694909316939093179091559154815461010060a860020a031916921661010002919091179055610372565b6102d0600254600160a060020a03165b90565b6102e3600154610106565b6102ce60005433600160a060020a039081169116146103c657610002565b6102ce600435600160a060020a038116600090815260036020526040812054819060ff16801561016457506001548190115b1561029157506040808220600180820154915461010090819004600160a060020a039081168087528587209093018054600160a060020a031916948216948517905583865293909420805461010060a860020a03191694820294909417909355600254909190811690841614156101e85760028054600160a060020a031916821790555b600254600160a060020a0390811690841614156102105760028054600160a060020a03191690555b6003600050600084600160a060020a0316815260200190815260200160002060006000820160006101000a81549060ff02191690556000820160016101000a815490600160a060020a0302191690556001820160006101000a815490600160a060020a03021916905550506001600081815054809291906001900391905055505b505050565b600160a060020a036004358181166000908152600360205260408120600101546002546102d09491821691168114156103d4576103d8565b005b600160a060020a03166060908152602090f35b6060908152602090f35b60028054600160a060020a03908116835260036020526040808420805461010060a860020a0319808216610100808a029190911790935590829004841680875283872060019081018054600160a060020a03199081168b179091559654868a168952949097209687018054949095169390951692909217909255835416908202179091555b60016003600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff0219169083021790555060016000818150548092919060010191905055505050565b600054600160a060020a0316ff5b8091505b5091905056606060405260008054600160a060020a031916331790556103de806100246000396000f3606060405236156100615760e060020a600035046302d05d3f81146100695780630a3b0a4f1461007b5780630d327fa7146100f6578063524d81d314610109578063a7f4377914610114578063b9caebf414610132578063bbec3bae14610296575b6102ce610002565b6102d0600054600160a060020a031681565b6102ce600435600254600090600160a060020a03168114156102ed5760028054600160a060020a03199081168417808355600160a060020a03808616855260036020526040852060018101805493831694909316939093179091559154815461010060a860020a031916921661010002919091179055610372565b6102d0600254600160a060020a03165b90565b6102e3600154610106565b6102ce60005433600160a060020a039081169116146103c657610002565b6102ce600435600160a060020a038116600090815260036020526040812054819060ff16801561016457506001548190115b1561029157506040808220600180820154915461010090819004600160a060020a039081168087528587209093018054600160a060020a031916948216948517905583865293909420805461010060a860020a03191694820294909417909355600254909190811690841614156101e85760028054600160a060020a031916821790555b600254600160a060020a0390811690841614156102105760028054600160a060020a03191690555b6003600050600084600160a060020a0316815260200190815260200160002060006000820160006101000a81549060ff02191690556000820160016101000a815490600160a060020a0302191690556001820160006101000a815490600160a060020a03021916905550506001600081815054809291906001900391905055505b505050565b600160a060020a036004358181166000908152600360205260408120600101546002546102d09491821691168114156103d4576103d8565b005b600160a060020a03166060908152602090f35b6060908152602090f35b60028054600160a060020a03908116835260036020526040808420805461010060a860020a0319808216610100808a029190911790935590829004841680875283872060019081018054600160a060020a03199081168b179091559654868a168952949097209687018054949095169390951692909217909255835416908202179091555b60016003600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff0219169083021790555060016000818150548092919060010191905055505050565b600054600160a060020a0316ff5b8091505b5091905056606060405260008054600160a060020a031916331790556103de806100246000396000f3606060405236156100615760e060020a600035046302d05d3f81146100695780630a3b0a4f1461007b5780630d327fa7146100f6578063524d81d314610109578063a7f4377914610114578063b9caebf414610132578063bbec3bae14610296575b6102ce610002565b6102d0600054600160a060020a031681565b6102ce600435600254600090600160a060020a03168114156102ed5760028054600160a060020a03199081168417808355600160a060020a03808616855260036020526040852060018101805493831694909316939093179091559154815461010060a860020a031916921661010002919091179055610372565b6102d0600254600160a060020a03165b90565b6102e3600154610106565b6102ce60005433600160a060020a039081169116146103c657610002565b6102ce600435600160a060020a038116600090815260036020526040812054819060ff16801561016457506001548190115b1561029157506040808220600180820154915461010090819004600160a060020a039081168087528587209093018054600160a060020a031916948216948517905583865293909420805461010060a860020a03191694820294909417909355600254909190811690841614156101e85760028054600160a060020a031916821790555b600254600160a060020a0390811690841614156102105760028054600160a060020a03191690555b6003600050600084600160a060020a0316815260200190815260200160002060006000820160006101000a81549060ff02191690556000820160016101000a815490600160a060020a0302191690556001820160006101000a815490600160a060020a03021916905550506001600081815054809291906001900391905055505b505050565b600160a060020a036004358181166000908152600360205260408120600101546002546102d09491821691168114156103d4576103d8565b005b600160a060020a03166060908152602090f35b6060908152602090f35b60028054600160a060020a03908116835260036020526040808420805461010060a860020a0319808216610100808a029190911790935590829004841680875283872060019081018054600160a060020a03199081168b179091559654868a168952949097209687018054949095169390951692909217909255835416908202179091555b60016003600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff0219169083021790555060016000818150548092919060010191905055505050565b600054600160a060020a0316ff5b8091505b5091905056", + "nonce": "7", + "storage": { + "0xffc4df2d4f3d2cffad590bed6296406ab7926ca9e74784f74a95191fa069a174": "0x00000000000000000000000070c9217d814985faef62b124420f8dfbddd96433" + } + }, + "0xb4fe7aa695b326c9d219158d2ca50db77b39f99f": { + "balance": "0x0", + "code": "0x606060405236156100ae5760e060020a600035046302d05d3f81146100b65780631ab9075a146100c85780632b68bb2d146101035780634cc927d7146101c557806351a34eb81461028e57806356ccb6f0146103545780635928d37f1461041d578063599efa6b146104e9578063759297bb146105b2578063771d50e11461067e578063a7f4377914610740578063f905c15a1461075e578063f92eb77414610767578063febf661214610836575b610902610002565b610904600054600160a060020a031681565b610902600435600254600160a060020a03166000148015906100f9575060025433600160a060020a03908116911614155b1561092057610002565b60025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b02606452610902916000918291600160a060020a03169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f115610002575050604051511515905061094257610002565b61090260043560243560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610a0d57610002565b61090260043560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610ae957610002565b61090260043560243560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610bbc57610002565b61090260043560243560443560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610c9657610002565b61090260043560243560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610de057610002565b61090260043560243560443560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610ebb57610002565b60025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b02606452610902916000918291600160a060020a03169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610f9e57610002565b61090260025433600160a060020a0390811691161461106957610002565b61090e60035481565b61090e60043560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750506040805180517ff92eb774000000000000000000000000000000000000000000000000000000008252600482018790529151919350600160a060020a038416925063f92eb774916024828101926020929190829003018188876161da5a03f11561000257505060405151949350505050565b61090260043560243560443560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f115610002575050604051511515905061107757610002565b005b6060908152602090f35b60408051918252519081900360200190f35b6002805473ffffffffffffffffffffffffffffffffffffffff19168217905550565b6040805160025460e360020a631c2d8fb302825260aa60020a6a18dbdb9d1c9858dd18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f5ed61af000000000000000000000000000000000000000000000000000000000825233600160a060020a039081166004840152925190959286169350635ed61af092602483810193919291829003018183876161da5a03f115610002575050505050565b6040805160025460e360020a631c2d8fb302825260aa60020a6a18dbdb9d1c9858dd18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517fab03fc2600000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015260248301899052808816604484015292519095928616935063ab03fc2692606483810193919291829003018183876161da5a03f1156100025750505050505050565b6040805160025460e360020a631c2d8fb302825260aa60020a6a18dbdb9d1c9858dd18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f949ae47900000000000000000000000000000000000000000000000000000000825233600160a060020a0390811660048401526024830188905292519095928616935063949ae47992604483810193919291829003018183876161da5a03f11561000257505050505050565b6040805160025460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f46d88e7d000000000000000000000000000000000000000000000000000000008252600160a060020a0380891660048401523381166024840152604483018890529251909592861693506346d88e7d92606483810193919291829003018183876161da5a03f1156100025750505050505050565b6040805160025460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f5315cdde00000000000000000000000000000000000000000000000000000000825233600160a060020a039081166004840152808a16602484015260448301889052925190959286169350635315cdde92606483810193919291829003018183876161da5a03f115610002575050604080517f5928d37f00000000000000000000000000000000000000000000000000000000815233600160a060020a03908116600483015287166024820152604481018690529051635928d37f91606481810192600092909190829003018183876161da5a03f115610002575050505050505050565b6040805160025460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517fe68e401c00000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015280891660248401526044830188905292519095928616935063e68e401c92606483810193919291829003018183876161da5a03f1156100025750505050505050565b6040805160025460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f5152f381000000000000000000000000000000000000000000000000000000008252600160a060020a03808a1660048401528089166024840152604483018890523381166064840152925190959286169350635152f38192608483810193919291829003018183876161da5a03f115610002575050505050505050565b6040805160025460e360020a631c2d8fb302825260aa60020a6a18dbdb9d1c9858dd18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f056d447000000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015292519095928616935063056d447092602483810193919291829003018183876161da5a03f115610002575050505050565b600254600160a060020a0316ff5b6040805160025460e360020a631c2d8fb302825260aa60020a6a18dbdb9d1c9858dd18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f3ae1005c00000000000000000000000000000000000000000000000000000000825233600160a060020a039081166004840152808a166024840152808916604484015260648301889052925190959286169350633ae1005c92608483810193919291829003018183876161da5a03f11561000257505050505050505056", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396" + } + }, + "0xc212e03b9e060e36facad5fd8f4435412ca22e6b": { + "balance": "0x0", + "code": "0x606060405236156101745760e060020a600035046302d05d3f811461017c57806304a7fdbc1461018e5780630e90f957146101fb5780630fb5a6b41461021257806314baa1b61461021b57806317fc45e21461023a5780632b096926146102435780632e94420f1461025b578063325a19f11461026457806336da44681461026d5780633f81a2c01461027f5780633fc306821461029757806345ecd3d7146102d45780634665096d146102dd5780634e71d92d146102e657806351a34eb8146103085780636111bb951461032d5780636f265b93146103445780637e9014e11461034d57806390ba009114610360578063927df5e014610393578063a7f437791461046c578063ad8f50081461046e578063bc6d909414610477578063bdec3ad114610557578063c19d93fb1461059a578063c9503fe2146105ad578063e0a73a93146105b6578063ea71b02d146105bf578063ea8a1af0146105d1578063ee4a96f9146105f3578063f1ff78a01461065c575b61046c610002565b610665600054600160a060020a031681565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600554600090600160a060020a0390811633909116146106a857610002565b61068260015460a060020a900460ff166000145b90565b61069660085481565b61046c600435600154600160a060020a03166000141561072157610002565b610696600d5481565b610696600435600f8160068110156100025750015481565b61069660045481565b61069660035481565b610665600554600160a060020a031681565b61069660043560158160068110156100025750015481565b6106966004355b600b54600f5460009160028202808203928083039290810191018386101561078357601054840186900394505b50505050919050565b61069660025481565b61069660095481565b61046c600554600090600160a060020a03908116339091161461085857610002565b61046c600435600554600090600160a060020a03908116339091161461092e57610002565b6106826001805460a060020a900460ff161461020f565b610696600b5481565b61068260075460a060020a900460ff1681565b6106966004355b600b54601554600091600282028082039280830392908101910183861015610a6c5760165494506102cb565b61046c6004356024356044356040805160015460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b02600483015291516000928392600160a060020a03919091169163e16c7d9891602481810192602092909190829003018187876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610b4657610002565b005b610696600a5481565b61046c60006000600060006000600160009054906101000a9004600160a060020a0316600160a060020a031663e16c7d986040518160e060020a028152600401808060b260020a691858d8dbdd5b9d18dd1b0281526020015060200190506020604051808303816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610f1757610002565b61046c5b60015b60058160ff16101561071e57600f6001820160ff166006811015610002578101549060ff83166006811015610002570154101561129057610002565b61069660015460a060020a900460ff1681565b61069660065481565b610696600c5481565b610665600754600160a060020a031681565b61046c600554600090600160a060020a0390811633909116146112c857610002565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600154600090600160a060020a03168114156113fb57610002565b610696600e5481565b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60408051918252519081900360200190f35b5060005b60068160ff16101561070857828160ff166006811015610002576020020151600f60ff831660068110156100025701558160ff82166006811015610002576020020151601560ff831660068110156100025701556001016106ac565b61071061055b565b505050565b600e8054820190555b50565b6040805160015460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061071557610002565b83861015801561079257508286105b156107b457600f546010546011548689039082030291909104900394506102cb565b8286101580156107c55750600b5486105b156107e757600f546011546012548589039082030291909104900394506102cb565b600b5486108015906107f857508186105b1561081d57600b54600f546012546013549289039281039290920204900394506102cb565b81861015801561082c57508086105b1561084e57600f546013546014548489039082030291909104900394506102cb565b60145494506102cb565b60015460a060020a900460ff1660001461087157610002565b600254600a01431161088257610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663771d50e16040518160e060020a0281526004018090506000604051808303816000876161da5a03f1156100025750505050565b60015460a060020a900460ff1660001461094757610002565b600254600a01431161095857610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180517f51a34eb8000000000000000000000000000000000000000000000000000000008252600482018690529151919350600160a060020a03841692506351a34eb8916024808301926000929190829003018183876161da5a03f11561000257505050600b8290554360025560408051838152905130600160a060020a0316917fa609f6bd4ad0b4f419ddad4ac9f0d02c2b9295c5e6891469055cf73c2b568fff919081900360200190a25050565b838610158015610a7b57508286105b15610a9d576015546016546017548689039082900302919091040194506102cb565b828610158015610aae5750600b5486105b15610ad0576015546017546018548589039082900302919091040194506102cb565b600b548610801590610ae157508186105b15610b0657600b546015546018546019549289039281900392909202040194506102cb565b818610158015610b1557508086105b15610b3757601554601954601a548489039082900302919091040194506102cb565b601a54860181900394506102cb565b60015460a060020a900460ff16600014610b5f57610002565b6001805460a060020a60ff02191660a060020a17908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919450600160a060020a038516925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604080518051600a556005547ffebf661200000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015216602482015260448101879052905163febf661291606480820192600092909190829003018183876161da5a03f115610002575050508215610cc7576007805473ffffffffffffffffffffffffffffffffffffffff191633179055610dbb565b6040805160055460065460e060020a63599efa6b028352600160a060020a039182166004840152602483015291519184169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050604080516006547f56ccb6f000000000000000000000000000000000000000000000000000000000825233600160a060020a03166004830152602482015290516356ccb6f091604480820192600092909190829003018183876161da5a03f115610002575050600580546007805473ffffffffffffffffffffffffffffffffffffffff19908116600160a060020a038416179091551633179055505b6007805460a060020a60ff02191660a060020a87810291909117918290556008544301600955900460ff1615610df757600a54610e039061029e565b600a54610e0b90610367565b600c55610e0f565b600c555b600c54670de0b6b3a7640000850204600d55600754600554604080517f759297bb000000000000000000000000000000000000000000000000000000008152600160a060020a039384166004820152918316602483015260448201879052519184169163759297bb91606481810192600092909190829003018183876161da5a03f11561000257505060408051600754600a54600d54600554600c5460a060020a850460ff161515865260208601929092528486019290925260608401529251600160a060020a0391821694509281169230909116917f3b3d1986083d191be01d28623dc19604728e29ae28bdb9ba52757fdee1a18de2919081900360800190a45050505050565b600954431015610f2657610002565b6001805460a060020a900460ff1614610f3e57610002565b6001805460a060020a60ff0219167402000000000000000000000000000000000000000017908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919750600160a060020a038816925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604051516007549095506000945060a060020a900460ff1615905061105c57600a5484111561105757600a54600d54670de0b6b3a7640000918603020492505b61107e565b600a5484101561107e57600a54600d54670de0b6b3a764000091869003020492505b60065483111561108e5760065492505b6006548390039150600083111561111857604080516005546007547f5928d37f000000000000000000000000000000000000000000000000000000008352600160a060020a0391821660048401528116602483015260448201869052915191871691635928d37f91606481810192600092909190829003018183876161da5a03f115610002575050505b600082111561117a576040805160055460e060020a63599efa6b028252600160a060020a0390811660048301526024820185905291519187169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050505b6040805185815260208101849052808201859052905130600160a060020a0316917f89e690b1d5aaae14f3e85f108dc92d9ab3763a58d45aed8b59daedbbae8fe794919081900360600190a260008311156112285784600160a060020a0316634cc927d785336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050611282565b84600160a060020a0316634cc927d7600a60005054336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f115610002575050505b600054600160a060020a0316ff5b60156001820160ff166006811015610002578101549060ff8316600681101561000257015411156112c057610002565b60010161055e565b60015460a060020a900460ff166000146112e157610002565b600254600a0143116112f257610002565b6001546040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f11561000257505060408051805160055460065460e060020a63599efa6b028452600160a060020a03918216600485015260248401529251909450918416925063599efa6b916044808301926000929190829003018183876161da5a03f1156100025750505080600160a060020a0316632b68bb2d6040518160e060020a0281526004018090506000604051808303816000876161da5a03f115610002575050600054600160a060020a03169050ff5b6001546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602480830192602092919082900301816000876161da5a03f11561000257505060405151151590506106a85761000256", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000000000000000000000000000000000000000006195", + "0x0000000000000000000000000000000000000000000000000000000000000004": "0x5842545553440000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000005": "0x00000000000000000000000070c9217d814985faef62b124420f8dfbddd96433", + "0x0000000000000000000000000000000000000000000000000000000000000006": "0x0000000000000000000000000000000000000000000000008ac7230489e80000", + "0x000000000000000000000000000000000000000000000000000000000000000b": "0x0000000000000000000000000000000000000000000000283c7b9181eca20000" + } + }, + "0xcf00ffd997ad14939736f026006498e3f099baaf": { + "balance": "0x0", + "code": "0x606060405236156100cf5760e060020a600035046302d05d3f81146100d7578063031e7f5d146100e95780631ab9075a1461010b5780632243118a1461014657806327aad68a1461016557806338a699a4146101da5780635188f996146101f8578063581d5d601461021e57806381738c5914610246578063977da54014610269578063a07421ce14610288578063a7f43779146102be578063bdbdb086146102dc578063e1c7111914610303578063f4f2821b14610325578063f905c15a1461034a578063f92eb77414610353575b610387610002565b610389600054600160a060020a031681565b610387600435602435600254600160a060020a0316600014156103a857610002565b610387600435600254600160a060020a031660001480159061013c575060025433600160a060020a03908116911614155b1561042957610002565b610387600435600254600160a060020a03166000141561044b57610002565b6102ac60043560008181526004602081815260408320547f524d81d3000000000000000000000000000000000000000000000000000000006060908152610100909104600160a060020a031692839263524d81d3926064928188876161da5a03f1156100025750506040515192506103819050565b61039c60043560008181526004602052604090205460ff165b919050565b6103876004356024356002546000908190600160a060020a031681141561079457610002565b61038760043560243560025460009081908190600160a060020a031681141561080457610002565b61038960043560008181526004602052604081205460ff1615156109e357610002565b610387600435600254600160a060020a0316600014156109fb57610002565b600435600090815260096020526040902054670de0b6b3a764000090810360243502045b60408051918252519081900360200190f35b61038760025433600160a060020a03908116911614610a9257610002565b600435600090815260086020526040902054670de0b6b3a7640000602435909102046102ac565b610387600435602435600254600160a060020a031660001415610aa057610002565b61038760043560025460009081908190600160a060020a0316811415610b3657610002565b6102ac60035481565b6102ac600435600081815260076020908152604080832054600690925290912054670de0b6b3a76400000204805b50919050565b005b600160a060020a03166060908152602090f35b15156060908152602090f35b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b906084906020906024816000876161da5a03f11561000257505060405151151590506103fe57610002565b60008281526004602052604090205460ff16151561041b57610002565b600860205260406000205550565b6002805473ffffffffffffffffffffffffffffffffffffffff19168217905550565b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b906084906020906024816000876161da5a03f11561000257505060405151151590506104a157610002565b604080516000838152600460205291909120805460ff1916600117905561040280610de2833901809050604051809103906000f0600460005060008360001916815260200190815260200160002060005060000160016101000a815481600160a060020a030219169083021790555066470de4df8200006008600050600083600019168152602001908152602001600020600050819055506703782dace9d9000060096000506000836000191681526020019081526020016000206000508190555050565b600460005060008560001916815260200190815260200160002060005060000160019054906101000a9004600160a060020a0316915081600160a060020a031663524d81d36040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060405151821415905061060057838152600660209081526040808320839055600790915281208190555b81600160a060020a0316630a3b0a4f846040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f11561000257505050600160a060020a038316808252600560209081526040808420879055805160e160020a6364a81ff102815290518694670de0b6b3a7640000949363c9503fe29360048181019492939183900301908290876161da5a03f11561000257505060408051805160e060020a636f265b930282529151919291636f265b939160048181019260209290919082900301816000876161da5a03f11561000257505050604051805190602001500204600660005060008660001916815260200190815260200160002060008282825054019250508190555080600160a060020a031663c9503fe26040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050506040518051906020015060076000506000866000191681526020019081526020016000206000828282505401925050819055505b50505050565b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b9060849060209060248187876161da5a03f11561000257505060405151151590506107e957610002565b8381526004602052604081205460ff16151561056657610002565b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b9060849060209060248187876161da5a03f115610002575050604051511515905061085957610002565b849250670de0b6b3a764000083600160a060020a031663c9503fe26040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575060408051805160e160020a6364a81ff102825291519189028590049650600481810192602092909190829003018188876161da5a03f11561000257505060408051805160e060020a636f265b930282529151919291636f265b9391600481810192602092909190829003018189876161da5a03f115610002575050506040518051906020015002049050806006600050600085600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750604080518051855260208681528286208054989098039097557f2e94420f00000000000000000000000000000000000000000000000000000000815290518896600483810193919291829003018187876161da5a03f115610002575050604080515183526020939093525020805490910190555050505050565b60409020546101009004600160a060020a03166101f3565b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b906084906020906024816000876161da5a03f1156100025750506040515115159050610a5157610002565b60008181526004602052604090205460ff161515610a6e57610002565b6040600020805474ffffffffffffffffffffffffffffffffffffffffff1916905550565b600254600160a060020a0316ff5b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b906084906020906024816000876161da5a03f1156100025750506040515115159050610af657610002565b60008281526004602052604090205460ff161515610b1357610002565b670de0b6b3a7640000811115610b2857610002565b600960205260406000205550565b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b9060849060209060248187876161da5a03f1156100025750506040515115159050610b8b57610002565b600160a060020a038416815260056020908152604080832054808452600490925282205490935060ff161515610bc057610002565b600460005060008460001916815260200190815260200160002060005060000160019054906101000a9004600160a060020a0316915081600160a060020a031663b9caebf4856040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f115610002575050506005600050600085600160a060020a0316815260200190815260200160002060005060009055839050600082600160a060020a031663524d81d36040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051519190911115905061078e57670de0b6b3a764000081600160a060020a031663c9503fe26040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e060020a636f265b930282529151919291636f265b939160048181019260209290919082900301816000876161da5a03f11561000257505050604051805190602001500204600660005060008560001916815260200190815260200160002060008282825054039250508190555080600160a060020a031663c9503fe26040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050506040518051906020015060076000506000856000191681526020019081526020016000206000828282505403925050819055505050505056606060405260008054600160a060020a031916331790556103de806100246000396000f3606060405236156100615760e060020a600035046302d05d3f81146100695780630a3b0a4f1461007b5780630d327fa7146100f6578063524d81d314610109578063a7f4377914610114578063b9caebf414610132578063bbec3bae14610296575b6102ce610002565b6102d0600054600160a060020a031681565b6102ce600435600254600090600160a060020a03168114156102ed5760028054600160a060020a03199081168417808355600160a060020a03808616855260036020526040852060018101805493831694909316939093179091559154815461010060a860020a031916921661010002919091179055610372565b6102d0600254600160a060020a03165b90565b6102e3600154610106565b6102ce60005433600160a060020a039081169116146103c657610002565b6102ce600435600160a060020a038116600090815260036020526040812054819060ff16801561016457506001548190115b1561029157506040808220600180820154915461010090819004600160a060020a039081168087528587209093018054600160a060020a031916948216948517905583865293909420805461010060a860020a03191694820294909417909355600254909190811690841614156101e85760028054600160a060020a031916821790555b600254600160a060020a0390811690841614156102105760028054600160a060020a03191690555b6003600050600084600160a060020a0316815260200190815260200160002060006000820160006101000a81549060ff02191690556000820160016101000a815490600160a060020a0302191690556001820160006101000a815490600160a060020a03021916905550506001600081815054809291906001900391905055505b505050565b600160a060020a036004358181166000908152600360205260408120600101546002546102d09491821691168114156103d4576103d8565b005b600160a060020a03166060908152602090f35b6060908152602090f35b60028054600160a060020a03908116835260036020526040808420805461010060a860020a0319808216610100808a029190911790935590829004841680875283872060019081018054600160a060020a03199081168b179091559654868a168952949097209687018054949095169390951692909217909255835416908202179091555b60016003600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff0219169083021790555060016000818150548092919060010191905055505050565b600054600160a060020a0316ff5b8091505b5091905056", + "nonce": "3", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396", + "0x3571d73f14f31a1463bd0a2f92f7fde1653d4e1ead7aedf4b0a5df02f16092ab": "0x0000000000000000000000000000000000000000000007d634e4c55188be0000", + "0x4e64fe2d1b72d95a0a31945cc6e4f4e524ac5ad56d6bd44a85ec7bc9cc0462c0": "0x000000000000000000000000000000000000000000000002b5e3af16b1880000" + } + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "117124093", + "extraData": "0xd5830105008650617269747986312e31322e31826d61", + "gasLimit": "4707788", + "hash": "0xad325e4c49145fb7a4058a68ac741cc8607a71114e23fc88083c7e881dd653e7", + "miner": "0x00714b9ac97fd6bd9325a059a70c9b9fa94ce050", + "mixHash": "0x0af918f65cb4af04b608fc1f14a849707696986a0e7049e97ef3981808bcc65f", + "nonce": "0x38dee147326a8d40", + "number": "25000", + "stateRoot": "0xc5d6bbcd46236fcdcc80b332ffaaa5476b980b01608f9708408cfef01b58bd5b", + "timestamp": "1479891517", + "totalDifficulty": "1895410389427" + }, + "input": "0xf88b8206628504a817c8008303d09094c212e03b9e060e36facad5fd8f4435412ca22e6b80a451a34eb80000000000000000000000000000000000000000000000280faf689c35ac00002aa0a7ee5b7877811bf671d121b40569462e722657044808dc1d6c4f1e4233ec145ba0417e7543d52b65738d9df419cbe40a708424f4d54b0fc145c0a64545a2bb1065", + "result": { + "calls": [ + { + "from": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "gas": "0x31217", + "gasUsed": "0x334", + "input": "0xe16c7d98636f6e7472616374617069000000000000000000000000000000000000000000", + "output": "0x000000000000000000000000b4fe7aa695b326c9d219158d2ca50db77b39f99f", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + }, + { + "calls": [ + { + "from": "0xb4fe7aa695b326c9d219158d2ca50db77b39f99f", + "gas": "0x2a68d", + "gasUsed": "0x334", + "input": "0xe16c7d98636f6e747261637463746c000000000000000000000000000000000000000000", + "output": "0x0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + }, + { + "calls": [ + { + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x23ac9", + "gasUsed": "0x334", + "input": "0xe16c7d98636f6e7472616374646200000000000000000000000000000000000000000000", + "output": "0x0000000000000000000000007986bad81f4cbd9317f5a46861437dae58d69113", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x23366", + "gasUsed": "0x273", + "input": "0x16c66cc6000000000000000000000000c212e03b9e060e36facad5fd8f4435412ca22e6b", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "to": "0x7986bad81f4cbd9317f5a46861437dae58d69113", + "type": "CALL", + "value": "0x0" + } + ], + "from": "0xb4fe7aa695b326c9d219158d2ca50db77b39f99f", + "gas": "0x29f35", + "gasUsed": "0xf8d", + "input": "0x16c66cc6000000000000000000000000c212e03b9e060e36facad5fd8f4435412ca22e6b", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "to": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0xb4fe7aa695b326c9d219158d2ca50db77b39f99f", + "gas": "0x28a9e", + "gasUsed": "0x334", + "input": "0xe16c7d98636f6e747261637463746c000000000000000000000000000000000000000000", + "output": "0x0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + }, + { + "calls": [ + { + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x21d79", + "gasUsed": "0x24d", + "input": "0x13bc6d4b000000000000000000000000b4fe7aa695b326c9d219158d2ca50db77b39f99f", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x2165b", + "gasUsed": "0x334", + "input": "0xe16c7d986d61726b65746462000000000000000000000000000000000000000000000000", + "output": "0x000000000000000000000000cf00ffd997ad14939736f026006498e3f099baaf", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + }, + { + "calls": [ + { + "from": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "gas": "0x1a8e8", + "gasUsed": "0x24d", + "input": "0x13bc6d4b0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "gas": "0x1a2c6", + "gasUsed": "0x3cb", + "input": "0xc9503fe2", + "output": "0x0000000000000000000000000000000000000000000000008ac7230489e80000", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "gas": "0x19b72", + "gasUsed": "0x3cb", + "input": "0xc9503fe2", + "output": "0x0000000000000000000000000000000000000000000000008ac7230489e80000", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "gas": "0x19428", + "gasUsed": "0x305", + "input": "0x6f265b93", + "output": "0x0000000000000000000000000000000000000000000000283c7b9181eca20000", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "gas": "0x18d45", + "gasUsed": "0x229", + "input": "0x2e94420f", + "output": "0x5842545553440000000000000000000000000000000000000000000000000000", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "gas": "0x1734e", + "gasUsed": "0x229", + "input": "0x2e94420f", + "output": "0x5842545553440000000000000000000000000000000000000000000000000000", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "type": "CALL", + "value": "0x0" + } + ], + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x20ee1", + "gasUsed": "0x5374", + "input": "0x581d5d60000000000000000000000000c212e03b9e060e36facad5fd8f4435412ca22e6b0000000000000000000000000000000000000000000000280faf689c35ac0000", + "output": "0x", + "to": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x1b6c1", + "gasUsed": "0x334", + "input": "0xe16c7d986c6f676d67720000000000000000000000000000000000000000000000000000", + "output": "0x0000000000000000000000002a98c5f40bfa3dee83431103c535f6fae9a8ad38", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x1af69", + "gasUsed": "0x229", + "input": "0x2e94420f", + "output": "0x5842545553440000000000000000000000000000000000000000000000000000", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "type": "CALL", + "value": "0x0" + }, + { + "calls": [ + { + "from": "0x2a98c5f40bfa3dee83431103c535f6fae9a8ad38", + "gas": "0x143a5", + "gasUsed": "0x24d", + "input": "0x13bc6d4b0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + } + ], + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x1a91d", + "gasUsed": "0x12fa", + "input": "0x0accce0600000000000000000000000000000000000000000000000000000000000000025842545553440000000000000000000000000000000000000000000000000000000000000000000000000000c212e03b9e060e36facad5fd8f4435412ca22e6b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "output": "0x", + "to": "0x2a98c5f40bfa3dee83431103c535f6fae9a8ad38", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x19177", + "gasUsed": "0x334", + "input": "0xe16c7d986c6f676d67720000000000000000000000000000000000000000000000000000", + "output": "0x0000000000000000000000002a98c5f40bfa3dee83431103c535f6fae9a8ad38", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x18a22", + "gasUsed": "0x229", + "input": "0x2e94420f", + "output": "0x5842545553440000000000000000000000000000000000000000000000000000", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x18341", + "gasUsed": "0x334", + "input": "0xe16c7d986d61726b65746462000000000000000000000000000000000000000000000000", + "output": "0x000000000000000000000000cf00ffd997ad14939736f026006498e3f099baaf", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x17bec", + "gasUsed": "0x229", + "input": "0x2e94420f", + "output": "0x5842545553440000000000000000000000000000000000000000000000000000", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x1764e", + "gasUsed": "0x45c", + "input": "0xf92eb7745842545553440000000000000000000000000000000000000000000000000000", + "output": "0x00000000000000000000000000000000000000000000002816d180e30c390000", + "to": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "type": "CALL", + "value": "0x0" + }, + { + "calls": [ + { + "from": "0x2a98c5f40bfa3dee83431103c535f6fae9a8ad38", + "gas": "0x108ba", + "gasUsed": "0x24d", + "input": "0x13bc6d4b0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + } + ], + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x16e62", + "gasUsed": "0xebb", + "input": "0x645a3b72584254555344000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002816d180e30c390000", + "output": "0x", + "to": "0x2a98c5f40bfa3dee83431103c535f6fae9a8ad38", + "type": "CALL", + "value": "0x0" + } + ], + "from": "0xb4fe7aa695b326c9d219158d2ca50db77b39f99f", + "gas": "0x283b9", + "gasUsed": "0xc51c", + "input": "0x949ae479000000000000000000000000c212e03b9e060e36facad5fd8f4435412ca22e6b0000000000000000000000000000000000000000000000280faf689c35ac0000", + "output": "0x", + "to": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "type": "CALL", + "value": "0x0" + } + ], + "from": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "gas": "0x30b4a", + "gasUsed": "0xedb7", + "input": "0x51a34eb80000000000000000000000000000000000000000000000280faf689c35ac0000", + "output": "0x", + "to": "0xb4fe7aa695b326c9d219158d2ca50db77b39f99f", + "type": "CALL", + "value": "0x0" + } + ], + "from": "0x70c9217d814985faef62b124420f8dfbddd96433", + "gas": "0x37b38", + "gasUsed": "0x12bb3", + "input": "0x51a34eb80000000000000000000000000000000000000000000000280faf689c35ac0000", + "output": "0x", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "type": "CALL", + "value": "0x0" + } +} diff --git a/eth/tracers/testdata/call_tracer_legacy/delegatecall.json b/eth/tracers/testdata/call_tracer_legacy/delegatecall.json new file mode 100644 index 000000000000..f7ad6df5f526 --- /dev/null +++ b/eth/tracers/testdata/call_tracer_legacy/delegatecall.json @@ -0,0 +1,97 @@ +{ + "context": { + "difficulty": "31927752", + "gasLimit": "4707788", + "miner": "0x5659922ce141eedbc2733678f9806c77b4eebee8", + "number": "11495", + "timestamp": "1479735917" + }, + "genesis": { + "alloc": { + "0x13204f5d64c28326fd7bd05fd4ea855302d7f2ff": { + "balance": "0x0", + "code": "0x606060405236156100825760e060020a60003504630a0313a981146100875780630a3b0a4f146101095780630cd40fea1461021257806329092d0e1461021f5780634cd06a5f146103295780635dbe47e8146103395780637a9e5410146103d9578063825db5f7146103e6578063a820b44d146103f3578063efa52fb31461047a575b610002565b34610002576104fc600435600060006000507342b02b5deeb78f34cd5ac896473b63e6c99a71a26333556e849091846000604051602001526040518360e060020a028152600401808381526020018281526020019250505060206040518083038186803b156100025760325a03f415610002575050604051519150505b919050565b346100025761051060043560006000507342b02b5deeb78f34cd5ac896473b63e6c99a71a2637d65837a9091336000604051602001526040518360e060020a0281526004018083815260200182600160a060020a031681526020019250505060206040518083038186803b156100025760325a03f4156100025750506040515115905061008257604080517f21ce24d4000000000000000000000000000000000000000000000000000000008152600060048201819052600160a060020a038416602483015291517342b02b5deeb78f34cd5ac896473b63e6c99a71a2926321ce24d49260448082019391829003018186803b156100025760325a03f415610002575050505b50565b3461000257610512600181565b346100025761051060043560006000507342b02b5deeb78f34cd5ac896473b63e6c99a71a2637d65837a9091336000604051602001526040518360e060020a0281526004018083815260200182600160a060020a031681526020019250505060206040518083038186803b156100025760325a03f4156100025750506040515115905061008257604080517f89489a87000000000000000000000000000000000000000000000000000000008152600060048201819052600160a060020a038416602483015291517342b02b5deeb78f34cd5ac896473b63e6c99a71a2926389489a879260448082019391829003018186803b156100025760325a03f4156100025750505061020f565b3461000257610528600435610403565b34610002576104fc600435604080516000602091820181905282517f7d65837a00000000000000000000000000000000000000000000000000000000815260048101829052600160a060020a0385166024820152925190927342b02b5deeb78f34cd5ac896473b63e6c99a71a292637d65837a92604480840193829003018186803b156100025760325a03f4156100025750506040515191506101049050565b3461000257610512600c81565b3461000257610512600081565b3461000257610528600061055660005b600060006000507342b02b5deeb78f34cd5ac896473b63e6c99a71a263685a1f3c9091846000604051602001526040518360e060020a028152600401808381526020018281526020019250505060206040518083038186803b156100025760325a03f4156100025750506040515191506101049050565b346100025761053a600435600060006000507342b02b5deeb78f34cd5ac896473b63e6c99a71a263f775b6b59091846000604051602001526040518360e060020a028152600401808381526020018281526020019250505060206040518083038186803b156100025760325a03f4156100025750506040515191506101049050565b604080519115158252519081900360200190f35b005b6040805160ff9092168252519081900360200190f35b60408051918252519081900360200190f35b60408051600160a060020a039092168252519081900360200190f35b90509056", + "nonce": "1", + "storage": { + "0x4d140b25abf3c71052885c66f73ce07cff141c1afabffdaf5cba04d625b7ebcc": "0x0000000000000000000000000000000000000000000000000000000000000001" + } + }, + "0x269296dddce321a6bcbaa2f0181127593d732cba": { + "balance": "0x0", + "code": "0x606060405236156101275760e060020a60003504630cd40fea811461012c578063173825d9146101395780631849cb5a146101c7578063285791371461030f5780632a58b3301461033f5780632cb0d48a146103565780632f54bf6e1461036a578063332b9f061461039d5780633ca8b002146103c55780633df4ddf4146103d557806341c0e1b5146103f457806347799da81461040557806362a51eee1461042457806366907d13146104575780637065cb48146104825780637a9e541014610496578063825db5f7146104a3578063949d225d146104b0578063a51687df146104c7578063b4da4e37146104e6578063b4e6850b146104ff578063bd7474ca14610541578063e75623d814610541578063e9938e1114610555578063f5d241d314610643575b610002565b3461000257610682600181565b34610002576106986004356106ff335b60006001600a9054906101000a9004600160a060020a0316600160a060020a0316635dbe47e8836000604051602001526040518260e060020a0281526004018082600160a060020a03168152602001915050602060405180830381600087803b156100025760325a03f1156100025750506040515191506103989050565b3461000257604080516101008082018352600080835260208084018290528385018290526060808501839052608080860184905260a080870185905260c080880186905260e09788018690526001605060020a0360043581168752600586529589902089519788018a528054808816808a52605060020a91829004600160a060020a0316978a01889052600183015463ffffffff8082169d8c018e905264010000000082048116988c01899052604060020a90910416958a018690526002830154948a01859052600390920154808916938a01849052049096169690970186905293969495949293604080516001605060020a03998a16815297891660208901529590971686860152600160a060020a03909316606086015263ffffffff9182166080860152811660a08501521660c083015260e08201929092529051908190036101000190f35b346100025761069a60043560018054600091829160ff60f060020a909104161515141561063d5761072833610376565b34610002576106ae6004546001605060020a031681565b34610002576106986004356108b333610149565b346100025761069a6004355b600160a060020a03811660009081526002602052604090205460ff1615156001145b919050565b34610002576106986001805460ff60f060020a9091041615151415610913576108ed33610376565b346100025761069a600435610149565b34610002576106ae6003546001605060020a03605060020a9091041681565b346100025761069861091533610149565b34610002576106ae6003546001605060020a0360a060020a9091041681565b346100025761069a60043560243560018054600091829160ff60f060020a909104161515141561095e5761092633610376565b34610002576106986004356001805460ff60f060020a909104161515141561072557610a8b33610376565b3461000257610698600435610aa533610149565b3461000257610682600c81565b3461000257610682600081565b34610002576106ae6003546001605060020a031681565b34610002576106ca600154600160a060020a03605060020a9091041681565b346100025761069a60015460ff60f060020a9091041681565b346100025761069a60043560243560443560643560843560a43560c43560018054600091829160ff60f060020a9091041615151415610b5857610ad233610376565b3461000257610698600435610bd633610149565b34610002576106e6600435604080516101008181018352600080835260208084018290528385018290526060808501839052608080860184905260a080870185905260c080880186905260e09788018690526001605060020a03808b168752600586529589902089519788018a5280548088168952600160a060020a03605060020a918290041696890196909652600181015463ffffffff8082169b8a019b909b5264010000000081048b1695890195909552604060020a90940490981691860182905260028301549086015260039091015480841696850196909652940416918101919091525b50919050565b346100025761069a60043560243560443560643560843560a43560018054600091829160ff60f060020a9091041615151415610c8e57610bfb33610376565b6040805160ff9092168252519081900360200190f35b005b604080519115158252519081900360200190f35b604080516001605060020a039092168252519081900360200190f35b60408051600160a060020a039092168252519081900360200190f35b6040805163ffffffff9092168252519081900360200190f35b1561012757600160a060020a0381166000908152600260205260409020805460ff191690555b50565b1561063d57506001605060020a0380831660009081526005602052604090208054909116151561075b576000915061063d565b604080516101008101825282546001605060020a038082168352600160a060020a03605060020a92839004166020840152600185015463ffffffff80821695850195909552640100000000810485166060850152604060020a90049093166080830152600284015460a0830152600384015480841660c08401520490911660e0820152610817905b8051600354600090819060016001605060020a0390911611610c995760038054605060020a60f060020a0319169055610ddf565b600380546001605060020a031981166000196001605060020a03928316011782558416600090815260056020526040812080547fffff000000000000000000000000000000000000000000000000000000000000168155600181810180546bffffffffffffffffffffffff191690556002820192909255909101805473ffffffffffffffffffffffffffffffffffffffff19169055915061063d565b1561012757600180547fff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1660f060020a8302179055610725565b1561091357600480546001605060020a031981166001605060020a039091166001011790555b565b156101275733600160a060020a0316ff5b1561095e57506001605060020a03808416600090815260056020526040902080549091161515610965576000915061095e565b600191505b5092915050565b60038101546001605060020a0384811691161415610986576001915061095e565b604080516101008101825282546001605060020a038082168352600160a060020a03605060020a92839004166020840152600185015463ffffffff80821695850195909552640100000000810485166060850152604060020a90049093166080830152600284015460a0830152600384015480841660c08401520490911660e0820152610a12906107e3565b61095983825b80546003546001605060020a0391821691600091161515610de55760038054605060020a60a060020a031916605060020a84021760a060020a69ffffffffffffffffffff02191660a060020a84021781558301805473ffffffffffffffffffffffffffffffffffffffff19169055610ddf565b1561072557600480546001605060020a0319168217905550565b1561012757600160a060020a0381166000908152600260205260409020805460ff19166001179055610725565b15610b5857506001605060020a038088166000908152600560205260409020805490911615610b645760009150610b58565b6004546001605060020a0390811690891610610b3057600480546001605060020a03191660018a011790555b6003805460016001605060020a03821681016001605060020a03199092169190911790915591505b50979650505050505050565b80546001605060020a0319168817605060020a60f060020a031916605060020a880217815560018101805463ffffffff1916871767ffffffff0000000019166401000000008702176bffffffff00000000000000001916604060020a860217905560028101839055610b048982610a18565b156101275760018054605060020a60f060020a031916605060020a8302179055610725565b15610c8e57506001605060020a03808816600090815260056020526040902080549091161515610c2e5760009150610c8e565b8054605060020a60f060020a031916605060020a88021781556001808201805463ffffffff1916881767ffffffff0000000019166401000000008802176bffffffff00000000000000001916604060020a87021790556002820184905591505b509695505050505050565b6003546001605060020a03848116605060020a909204161415610d095760e084015160038054605060020a928302605060020a60a060020a031990911617808255919091046001605060020a031660009081526005602052604090200180546001605060020a0319169055610ddf565b6003546001605060020a0384811660a060020a909204161415610d825760c08401516003805460a060020a92830260a060020a69ffffffffffffffffffff021990911617808255919091046001605060020a03166000908152600560205260409020018054605060020a60a060020a0319169055610ddf565b505060c082015160e08301516001605060020a0380831660009081526005602052604080822060039081018054605060020a60a060020a031916605060020a8702179055928416825290200180546001605060020a031916831790555b50505050565b6001605060020a0384161515610e6457600380546001605060020a03605060020a9182900481166000908152600560205260409020830180546001605060020a0319908116871790915583548785018054918590049093168402605060020a60a060020a03199182161790911690915582549185029116179055610ddf565b506001605060020a038381166000908152600560205260409020600390810180549185018054605060020a60a060020a0319908116605060020a94859004909516808502959095176001605060020a0319168817909155815416918402919091179055801515610ef4576003805460a060020a69ffffffffffffffffffff02191660a060020a8402179055610ddf565b6003808401546001605060020a03605060020a9091041660009081526005602052604090200180546001605060020a031916831790555050505056", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x000113204f5d64c28326fd7bd05fd4ea855302d7f2ff00000000000000000000" + } + }, + "0x42b02b5deeb78f34cd5ac896473b63e6c99a71a2": { + "balance": "0x0", + "code": "0x6504032353da7150606060405236156100695760e060020a60003504631bf7509d811461006e57806321ce24d41461008157806333556e84146100ec578063685a1f3c146101035780637d65837a1461011757806389489a8714610140578063f775b6b5146101fc575b610007565b61023460043560006100fd82600061010d565b610246600435602435600160a060020a03811660009081526020839052604081205415156102cb57826001016000508054806001018281815481835581811511610278576000838152602090206102789181019083015b808211156102d057600081556001016100d8565b610248600435602435600182015481105b92915050565b6102346004356024355b60018101906100fd565b610248600435602435600160a060020a03811660009081526020839052604090205415156100fd565b61024660043560243580600160a060020a031632600160a060020a03161415156101f857600160a060020a038116600090815260208390526040902054156101f857600160a060020a038116600090815260208390526040902054600183018054909160001901908110156100075760009182526020808320909101805473ffffffffffffffffffffffffffffffffffffffff19169055600160a060020a038316825283905260408120556002820180546000190190555b5050565b61025c60043560243560008260010160005082815481101561000757600091825260209091200154600160a060020a03169392505050565b60408051918252519081900360200190f35b005b604080519115158252519081900360200190f35b60408051600160a060020a039092168252519081900360200190f35b50505060009283526020808420909201805473ffffffffffffffffffffffffffffffffffffffff191686179055600160a060020a0385168352908590526040909120819055600284018054600101905590505b505050565b509056", + "nonce": "1", + "storage": {} + }, + "0xa529806c67cc6486d4d62024471772f47f6fd672": { + "balance": "0x67820e39ac8fe9800", + "code": "0x", + "nonce": "68", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "31912170", + "extraData": "0xd783010502846765746887676f312e372e33856c696e7578", + "gasLimit": "4712388", + "hash": "0x0855914bdc581bccdc62591fd438498386ffb59ea4d5361ed5c3702e26e2c72f", + "miner": "0x334391aa808257952a462d1475562ee2106a6c90", + "mixHash": "0x64bb70b8ca883cadb8fbbda2c70a861612407864089ed87b98e5de20acceada6", + "nonce": "0x684129f283aaef18", + "number": "11494", + "stateRoot": "0x7057f31fe3dab1d620771adad35224aae43eb70e94861208bc84c557ff5b9d10", + "timestamp": "1479735912", + "totalDifficulty": "90744064339" + }, + "input": "0xf889448504a817c800832dc6c094269296dddce321a6bcbaa2f0181127593d732cba80a47065cb480000000000000000000000001523e55a1ca4efbae03355775ae89f8d7699ad9e29a080ed81e4c5e9971a730efab4885566e2c868cd80bd4166d0ed8c287fdf181650a069d7c49215e3d4416ad239cd09dbb71b9f04c16b33b385d14f40b618a7a65115", + "result": { + "calls": [ + { + "calls": [ + { + "from": "0x13204f5d64c28326fd7bd05fd4ea855302d7f2ff", + "gas": "0x2bf459", + "gasUsed": "0x2aa", + "input": "0x7d65837a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a529806c67cc6486d4d62024471772f47f6fd672", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "to": "0x42b02b5deeb78f34cd5ac896473b63e6c99a71a2", + "type": "DELEGATECALL" + } + ], + "from": "0x269296dddce321a6bcbaa2f0181127593d732cba", + "gas": "0x2cae73", + "gasUsed": "0xa9d", + "input": "0x5dbe47e8000000000000000000000000a529806c67cc6486d4d62024471772f47f6fd672", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "to": "0x13204f5d64c28326fd7bd05fd4ea855302d7f2ff", + "type": "CALL", + "value": "0x0" + } + ], + "from": "0xa529806c67cc6486d4d62024471772f47f6fd672", + "gas": "0x2d6e28", + "gasUsed": "0x64bd", + "input": "0x7065cb480000000000000000000000001523e55a1ca4efbae03355775ae89f8d7699ad9e", + "output": "0x", + "to": "0x269296dddce321a6bcbaa2f0181127593d732cba", + "type": "CALL", + "value": "0x0" + } +} diff --git a/eth/tracers/testdata/call_tracer_inner_create_oog_outer_throw.json b/eth/tracers/testdata/call_tracer_legacy/inner_create_oog_outer_throw.json similarity index 100% rename from eth/tracers/testdata/call_tracer_inner_create_oog_outer_throw.json rename to eth/tracers/testdata/call_tracer_legacy/inner_create_oog_outer_throw.json diff --git a/eth/tracers/testdata/call_tracer_legacy/inner_instafail.json b/eth/tracers/testdata/call_tracer_legacy/inner_instafail.json new file mode 100644 index 000000000000..86070d130857 --- /dev/null +++ b/eth/tracers/testdata/call_tracer_legacy/inner_instafail.json @@ -0,0 +1,72 @@ +{ + "genesis": { + "difficulty": "117067574", + "extraData": "0xd783010502846765746887676f312e372e33856c696e7578", + "gasLimit": "4712380", + "hash": "0xe05db05eeb3f288041ecb10a787df121c0ed69499355716e17c307de313a4486", + "miner": "0x0c062b329265c965deef1eede55183b3acb8f611", + "mixHash": "0xb669ae39118a53d2c65fd3b1e1d3850dd3f8c6842030698ed846a2762d68b61d", + "nonce": "0x2b469722b8e28c45", + "number": "24973", + "stateRoot": "0x532a5c3f75453a696428db078e32ae283c85cb97e4d8560dbdf022adac6df369", + "timestamp": "1479891145", + "totalDifficulty": "1892250259406", + "alloc": { + "0x6c06b16512b332e6cd8293a2974872674716ce18": { + "balance": "0x0", + "nonce": "1", + "code": "0x60606040526000357c0100000000000000000000000000000000000000000000000000000000900480632e1a7d4d146036575b6000565b34600057604e60048080359060200190919050506050565b005b3373ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051809050600060405180830381858888f19350505050505b5056", + "storage": {} + }, + "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31": { + "balance": "0x229ebbb36c3e0f20", + "nonce": "3", + "code": "0x", + "storage": {} + } + }, + "config": { + "chainId": 3, + "homesteadBlock": 0, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "byzantiumBlock": 1700000, + "constantinopleBlock": 4230000, + "petersburgBlock": 4939394, + "istanbulBlock": 6485846, + "muirGlacierBlock": 7117117, + "ethash": {} + } + }, + "context": { + "number": "24974", + "difficulty": "117067574", + "timestamp": "1479891162", + "gasLimit": "4712388", + "miner": "0xc822ef32e6d26e170b70cf761e204c1806265914" + }, + "input": "0xf889038504a81557008301f97e946c06b16512b332e6cd8293a2974872674716ce1880a42e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b1600002aa0e2a6558040c5d72bc59f2fb62a38993a314c849cd22fb393018d2c5af3112095a01bdb6d7ba32263ccc2ecc880d38c49d9f0c5a72d8b7908e3122b31356d349745", + "result": { + "type": "CALL", + "from": "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31", + "to": "0x6c06b16512b332e6cd8293a2974872674716ce18", + "value": "0x0", + "gas": "0x1a466", + "gasUsed": "0x1dc6", + "input": "0x2e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b160000", + "output": "0x", + "calls": [ + { + "type": "CALL", + "from": "0x6c06b16512b332e6cd8293a2974872674716ce18", + "to": "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31", + "value": "0x14d1120d7b160000", + "error":"internal failure", + "input": "0x" + } + ] + } +} diff --git a/eth/tracers/testdata/call_tracer_legacy/inner_throw_outer_revert.json b/eth/tracers/testdata/call_tracer_legacy/inner_throw_outer_revert.json new file mode 100644 index 000000000000..ec2ceb426fda --- /dev/null +++ b/eth/tracers/testdata/call_tracer_legacy/inner_throw_outer_revert.json @@ -0,0 +1,81 @@ +{ + "context": { + "difficulty": "3956606365", + "gasLimit": "5413248", + "miner": "0x00d8ae40d9a06d0e7a2877b62e32eb959afbe16d", + "number": "2295104", + "timestamp": "1513681256" + }, + "genesis": { + "alloc": { + "0x33056b5dcac09a9b4becad0e1dcf92c19bd0af76": { + "balance": "0x0", + "code": "0x60606040526004361061015e576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680625b4487146101a257806311df9995146101cb578063278ecde11461022057806330adce0e146102435780633197cbb61461026c5780634bb278f3146102955780636103d70b146102aa57806363a599a4146102bf5780636a2d1cb8146102d457806375f12b21146102fd57806378e979251461032a578063801db9cc1461035357806386d1a69f1461037c5780638da5cb5b146103915780638ef26a71146103e65780639890220b1461040f5780639b39caef14610424578063b85dfb801461044d578063be9a6555146104a1578063ccb07cef146104b6578063d06c91e4146104e3578063d669e1d414610538578063df40503c14610561578063e2982c2114610576578063f02e030d146105c3578063f2fde38b146105d8578063f3283fba14610611575b600060149054906101000a900460ff1615151561017a57600080fd5b60075442108061018b575060085442115b15151561019757600080fd5b6101a03361064a565b005b34156101ad57600080fd5b6101b5610925565b6040518082815260200191505060405180910390f35b34156101d657600080fd5b6101de61092b565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561022b57600080fd5b6102416004808035906020019091905050610951565b005b341561024e57600080fd5b610256610c48565b6040518082815260200191505060405180910390f35b341561027757600080fd5b61027f610c4e565b6040518082815260200191505060405180910390f35b34156102a057600080fd5b6102a8610c54565b005b34156102b557600080fd5b6102bd610f3e565b005b34156102ca57600080fd5b6102d261105d565b005b34156102df57600080fd5b6102e76110d5565b6040518082815260200191505060405180910390f35b341561030857600080fd5b6103106110e1565b604051808215151515815260200191505060405180910390f35b341561033557600080fd5b61033d6110f4565b6040518082815260200191505060405180910390f35b341561035e57600080fd5b6103666110fa565b6040518082815260200191505060405180910390f35b341561038757600080fd5b61038f611104565b005b341561039c57600080fd5b6103a4611196565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156103f157600080fd5b6103f96111bb565b6040518082815260200191505060405180910390f35b341561041a57600080fd5b6104226111c1565b005b341561042f57600080fd5b610437611296565b6040518082815260200191505060405180910390f35b341561045857600080fd5b610484600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190505061129c565b604051808381526020018281526020019250505060405180910390f35b34156104ac57600080fd5b6104b46112c0565b005b34156104c157600080fd5b6104c9611341565b604051808215151515815260200191505060405180910390f35b34156104ee57600080fd5b6104f6611354565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561054357600080fd5b61054b61137a565b6040518082815260200191505060405180910390f35b341561056c57600080fd5b610574611385565b005b341561058157600080fd5b6105ad600480803573ffffffffffffffffffffffffffffffffffffffff169060200190919050506116c3565b6040518082815260200191505060405180910390f35b34156105ce57600080fd5b6105d66116db565b005b34156105e357600080fd5b61060f600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050611829565b005b341561061c57600080fd5b610648600480803573ffffffffffffffffffffffffffffffffffffffff169060200190919050506118fe565b005b600080670de0b6b3a7640000341015151561066457600080fd5b61069b610696670de0b6b3a7640000610688610258346119d990919063ffffffff16565b611a0c90919063ffffffff16565b611a27565b9150660221b262dd80006106ba60065484611a7e90919063ffffffff16565b111515156106c757600080fd5b600a60008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000209050600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663a9059cbb84846000604051602001526040518363ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200182815260200192505050602060405180830381600087803b15156107d557600080fd5b6102c65a03f115156107e657600080fd5b5050506040518051905050610808828260010154611a7e90919063ffffffff16565b8160010181905550610827348260000154611a7e90919063ffffffff16565b816000018190555061084434600554611a7e90919063ffffffff16565b60058190555061085f82600654611a7e90919063ffffffff16565b6006819055503373ffffffffffffffffffffffffffffffffffffffff167ff3c1c7c0eb1328ddc834c4c9e579c06d35f443bf1102b034653624a239c7a40c836040518082815260200191505060405180910390a27fd1dc370699ae69fb860ed754789a4327413ec1cd379b93f2cbedf449a26b0e8583600554604051808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390a1505050565b60025481565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600060085442108061096b5750651b48eb57e00060065410155b15151561097757600080fd5b600a60003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010154821415156109c757600080fd5b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166323b872dd3330856000604051602001526040518463ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019350505050602060405180830381600087803b1515610ac857600080fd5b6102c65a03f11515610ad957600080fd5b5050506040518051905050600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166342966c68836000604051602001526040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180828152602001915050602060405180830381600087803b1515610b7d57600080fd5b6102c65a03f11515610b8e57600080fd5b505050604051805190501515610ba357600080fd5b600a60003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000015490506000600a60003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600001819055506000811115610c4457610c433382611a9c565b5b5050565b60055481565b60085481565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141515610cb157600080fd5b600854421015610cd357660221b262dd8000600654141515610cd257600080fd5b5b651b48eb57e000600654108015610cf057506213c6806008540142105b151515610cfc57600080fd5b600460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc3073ffffffffffffffffffffffffffffffffffffffff16319081150290604051600060405180830381858888f193505050501515610d7557600080fd5b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166370a08231306000604051602001526040518263ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001915050602060405180830381600087803b1515610e3a57600080fd5b6102c65a03f11515610e4b57600080fd5b5050506040518051905090506000811115610f2057600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166342966c68826000604051602001526040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180828152602001915050602060405180830381600087803b1515610ef957600080fd5b6102c65a03f11515610f0a57600080fd5b505050604051805190501515610f1f57600080fd5b5b6001600960006101000a81548160ff02191690831515021790555050565b600080339150600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054905060008114151515610f9657600080fd5b803073ffffffffffffffffffffffffffffffffffffffff163110151515610fbc57600080fd5b610fd181600254611b5090919063ffffffff16565b6002819055506000600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508173ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561105957fe5b5050565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156110b857600080fd5b6001600060146101000a81548160ff021916908315150217905550565b670de0b6b3a764000081565b600060149054906101000a900460ff1681565b60075481565b651b48eb57e00081565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561115f57600080fd5b600060149054906101000a900460ff16151561117a57600080fd5b60008060146101000a81548160ff021916908315150217905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60065481565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561121c57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc3073ffffffffffffffffffffffffffffffffffffffff16319081150290604051600060405180830381858888f19350505050151561129457600080fd5b565b61025881565b600a6020528060005260406000206000915090508060000154908060010154905082565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561131b57600080fd5b600060075414151561132c57600080fd5b4260078190555062278d004201600881905550565b600960009054906101000a900460ff1681565b600460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b660221b262dd800081565b60008060008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156113e557600080fd5b600654660221b262dd800003925061142b670de0b6b3a764000061141c610258670de0b6b3a76400006119d990919063ffffffff16565b81151561142557fe5b04611a27565b915081831115151561143c57600080fd5b600a60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000209050600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663a9059cbb6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff16856000604051602001526040518363ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200182815260200192505050602060405180830381600087803b151561158c57600080fd5b6102c65a03f1151561159d57600080fd5b50505060405180519050506115bf838260010154611a7e90919063ffffffff16565b81600101819055506115dc83600654611a7e90919063ffffffff16565b6006819055503073ffffffffffffffffffffffffffffffffffffffff167ff3c1c7c0eb1328ddc834c4c9e579c06d35f443bf1102b034653624a239c7a40c846040518082815260200191505060405180910390a27fd1dc370699ae69fb860ed754789a4327413ec1cd379b93f2cbedf449a26b0e856000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff16600554604051808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390a1505050565b60016020528060005260406000206000915090505481565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561173657600080fd5b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663f2fde38b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff166040518263ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001915050600060405180830381600087803b151561181357600080fd5b6102c65a03f1151561182457600080fd5b505050565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561188457600080fd5b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff161415156118fb57806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b50565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561195957600080fd5b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff161415151561199557600080fd5b80600460006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b600080828402905060008414806119fa57508284828115156119f757fe5b04145b1515611a0257fe5b8091505092915050565b6000808284811515611a1a57fe5b0490508091505092915050565b6000611a416202a300600754611a7e90919063ffffffff16565b421015611a7557611a6e611a5f600584611a0c90919063ffffffff16565b83611a7e90919063ffffffff16565b9050611a79565b8190505b919050565b6000808284019050838110151515611a9257fe5b8091505092915050565b611aee81600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054611a7e90919063ffffffff16565b600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002081905550611b4681600254611a7e90919063ffffffff16565b6002819055505050565b6000828211151515611b5e57fe5b8183039050929150505600a165627a7a72305820ec0d82a406896ccf20989b3d6e650abe4dc104e400837f1f58e67ef499493ae90029", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000008d69d00910d0b2afb2a99ed6c16c8129fa8e1751", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000e819f024b41358d2c08e3a868a5c5dd0566078d4", + "0x0000000000000000000000000000000000000000000000000000000000000007": "0x000000000000000000000000000000000000000000000000000000005a388981", + "0x0000000000000000000000000000000000000000000000000000000000000008": "0x000000000000000000000000000000000000000000000000000000005a3b38e6" + } + }, + "0xd4fcab9f0a6dc0493af47c864f6f17a8a5e2e826": { + "balance": "0x2a2dd979a35cf000", + "code": "0x", + "nonce": "0", + "storage": {} + }, + "0xe819f024b41358d2c08e3a868a5c5dd0566078d4": { + "balance": "0x0", + "code": "0x6060604052600436106100ba576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806306fdde03146100bf578063095ea7b31461014d57806318160ddd146101a757806323b872dd146101d0578063313ce5671461024957806342966c681461027257806370a08231146102ad5780638da5cb5b146102fa57806395d89b411461034f578063a9059cbb146103dd578063dd62ed3e14610437578063f2fde38b146104a3575b600080fd5b34156100ca57600080fd5b6100d26104dc565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101125780820151818401526020810190506100f7565b50505050905090810190601f16801561013f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561015857600080fd5b61018d600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610515565b604051808215151515815260200191505060405180910390f35b34156101b257600080fd5b6101ba61069c565b6040518082815260200191505060405180910390f35b34156101db57600080fd5b61022f600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff169060200190919080359060200190919050506106a2565b604051808215151515815260200191505060405180910390f35b341561025457600080fd5b61025c610952565b6040518082815260200191505060405180910390f35b341561027d57600080fd5b6102936004808035906020019091905050610957565b604051808215151515815260200191505060405180910390f35b34156102b857600080fd5b6102e4600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610abe565b6040518082815260200191505060405180910390f35b341561030557600080fd5b61030d610b07565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561035a57600080fd5b610362610b2d565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156103a2578082015181840152602081019050610387565b50505050905090810190601f1680156103cf5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34156103e857600080fd5b61041d600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610b66565b604051808215151515815260200191505060405180910390f35b341561044257600080fd5b61048d600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610d01565b6040518082815260200191505060405180910390f35b34156104ae57600080fd5b6104da600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610d88565b005b6040805190810160405280600b81526020017f416c6c436f6465436f696e00000000000000000000000000000000000000000081525081565b6000808214806105a157506000600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054145b15156105ac57600080fd5b81600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925846040518082815260200191505060405180910390a36001905092915050565b60005481565b600080600260008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054905061077683600160008773ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610e5f90919063ffffffff16565b600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000208190555061080b83600160008873ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610e7d90919063ffffffff16565b600160008773ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055506108618382610e7d90919063ffffffff16565b600260008773ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508373ffffffffffffffffffffffffffffffffffffffff168573ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef856040518082815260200191505060405180910390a360019150509392505050565b600681565b6000600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156109b557600080fd5b610a0782600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610e7d90919063ffffffff16565b600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002081905550610a5f82600054610e7d90919063ffffffff16565b60008190555060003373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a360019050919050565b6000600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020549050919050565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6040805190810160405280600481526020017f414c4c430000000000000000000000000000000000000000000000000000000081525081565b6000610bba82600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610e7d90919063ffffffff16565b600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002081905550610c4f82600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610e5f90919063ffffffff16565b600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a36001905092915050565b6000600260008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054905092915050565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141515610de457600080fd5b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16141515610e5c5780600360006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b50565b6000808284019050838110151515610e7357fe5b8091505092915050565b6000828211151515610e8b57fe5b8183039050929150505600a165627a7a7230582059f3ea3df0b054e9ab711f37969684ba83fe38f255ffe2c8d850d951121c51100029", + "nonce": "1", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3956606365", + "extraData": "0x566961425443", + "gasLimit": "5418523", + "hash": "0x6f37eb930a25da673ea1bb80fd9e32ddac19cdf7cd4bb2eac62cc13598624077", + "miner": "0xd049bfd667cb46aa3ef5df0da3e57db3be39e511", + "mixHash": "0x10971cde68c587c750c23b8589ae868ce82c2c646636b97e7d9856470c5297c7", + "nonce": "0x810f923ff4b450a1", + "number": "2295103", + "stateRoot": "0xff403612573d76dfdaf4fea2429b77dbe9764021ae0e38dc8ac79a3cf551179e", + "timestamp": "1513681246", + "totalDifficulty": "7162347056825919" + }, + "input": "0xf86d808504e3b292008307dfa69433056b5dcac09a9b4becad0e1dcf92c19bd0af76880e92596fd62900008029a0e5f27bb66431f7081bb7f1f242003056d7f3f35414c352cd3d1848b52716dac2a07d0be78980edb0bd2a0678fc53aa90ea9558ce346b0d947967216918ac74ccea", + "result": { + "calls": [ + { + "error": "invalid opcode: INVALID", + "from": "0x33056b5dcac09a9b4becad0e1dcf92c19bd0af76", + "gas": "0x75fe3", + "gasUsed": "0x75fe3", + "input": "0xa9059cbb000000000000000000000000d4fcab9f0a6dc0493af47c864f6f17a8a5e2e82600000000000000000000000000000000000000000000000000000000000002f4", + "to": "0xe819f024b41358d2c08e3a868a5c5dd0566078d4", + "type": "CALL", + "value": "0x0" + } + ], + "error": "execution reverted", + "from": "0xd4fcab9f0a6dc0493af47c864f6f17a8a5e2e826", + "gas": "0x78d9e", + "gasUsed": "0x76fc0", + "input": "0x", + "to": "0x33056b5dcac09a9b4becad0e1dcf92c19bd0af76", + "type": "CALL", + "value": "0xe92596fd6290000" + } +} diff --git a/eth/tracers/testdata/call_tracer_legacy/oog.json b/eth/tracers/testdata/call_tracer_legacy/oog.json new file mode 100644 index 000000000000..de4fed6ab1fb --- /dev/null +++ b/eth/tracers/testdata/call_tracer_legacy/oog.json @@ -0,0 +1,60 @@ +{ + "context": { + "difficulty": "3699098917", + "gasLimit": "5258985", + "miner": "0xd049bfd667cb46aa3ef5df0da3e57db3be39e511", + "number": "2294631", + "timestamp": "1513675366" + }, + "genesis": { + "alloc": { + "0x43064693d3d38ad6a7cb579e0d6d9718c8aa6b62": { + "balance": "0x0", + "code": "0x6060604052600436106100ba576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806306fdde03146100bf578063095ea7b31461014d57806318160ddd146101a757806323b872dd146101d0578063313ce5671461024957806342966c68146102785780635a3b7e42146102b357806370a082311461034157806379cc67901461038e57806395d89b41146103e8578063a9059cbb14610476578063dd62ed3e146104b8575b600080fd5b34156100ca57600080fd5b6100d2610524565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101125780820151818401526020810190506100f7565b50505050905090810190601f16801561013f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561015857600080fd5b61018d600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803590602001909190505061055d565b604051808215151515815260200191505060405180910390f35b34156101b257600080fd5b6101ba6105ea565b6040518082815260200191505060405180910390f35b34156101db57600080fd5b61022f600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff169060200190919080359060200190919050506105f0565b604051808215151515815260200191505060405180910390f35b341561025457600080fd5b61025c610910565b604051808260ff1660ff16815260200191505060405180910390f35b341561028357600080fd5b6102996004808035906020019091905050610915565b604051808215151515815260200191505060405180910390f35b34156102be57600080fd5b6102c6610a18565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156103065780820151818401526020810190506102eb565b50505050905090810190601f1680156103335780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561034c57600080fd5b610378600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610a51565b6040518082815260200191505060405180910390f35b341561039957600080fd5b6103ce600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610a69565b604051808215151515815260200191505060405180910390f35b34156103f357600080fd5b6103fb610bf8565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561043b578082015181840152602081019050610420565b50505050905090810190601f1680156104685780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561048157600080fd5b6104b6600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610c31565b005b34156104c357600080fd5b61050e600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610e34565b6040518082815260200191505060405180910390f35b6040805190810160405280600881526020017f446f70616d696e6500000000000000000000000000000000000000000000000081525081565b600081600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055506001905092915050565b60005481565b6000808373ffffffffffffffffffffffffffffffffffffffff161415151561061757600080fd5b81600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020541015151561066557600080fd5b600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205482600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205401101515156106f157fe5b600260008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054821115151561077c57600080fd5b81600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254039250508190555081600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254019250508190555081600260008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055508273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a3600190509392505050565b601281565b600081600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020541015151561096557600080fd5b81600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055508160008082825403925050819055503373ffffffffffffffffffffffffffffffffffffffff167fcc16f5dbb4873280815c1ee09dbd06736cffcc184412cf7a71a0fdb75d397ca5836040518082815260200191505060405180910390a260019050919050565b6040805190810160405280600981526020017f446f706d6e20302e32000000000000000000000000000000000000000000000081525081565b60016020528060005260406000206000915090505481565b600081600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205410151515610ab957600080fd5b600260008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020548211151515610b4457600080fd5b81600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055508160008082825403925050819055508273ffffffffffffffffffffffffffffffffffffffff167fcc16f5dbb4873280815c1ee09dbd06736cffcc184412cf7a71a0fdb75d397ca5836040518082815260200191505060405180910390a26001905092915050565b6040805190810160405280600581526020017f444f504d4e00000000000000000000000000000000000000000000000000000081525081565b60008273ffffffffffffffffffffffffffffffffffffffff1614151515610c5757600080fd5b80600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205410151515610ca557600080fd5b600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205481600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020540110151515610d3157fe5b80600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254039250508190555080600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055508173ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a35050565b60026020528160005260406000206020528060005260406000206000915091505054815600a165627a7a723058206d93424f4e7b11929b8276a269038402c10c0ddf21800e999916ddd9dff4a7630029", + "nonce": "1", + "storage": { + "0x296b66049cc4f9c8bf3d4f14752add261d1a980b39bdd194a7897baf39ac7579": "0x0000000000000000000000000000000000000000033b2e3c9fc9653f9e72b1e0" + } + }, + "0x94194bc2aaf494501d7880b61274a169f6502a54": { + "balance": "0xea8c39a876d19888d", + "code": "0x", + "nonce": "265", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3699098917", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "5263953", + "hash": "0x03a0f62a8106793dafcfae7b75fd2654322062d585a19cea568314d7205790dc", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0x15482cc64b7c00a947f5bf015dfc010db1a6a668c74df61974d6a7848c174408", + "nonce": "0xd1bdb150f6fd170e", + "number": "2294630", + "stateRoot": "0x1ab1a534e84cc787cda1db21e0d5920ab06017948075b759166cfea7274657a1", + "timestamp": "1513675347", + "totalDifficulty": "7160543502214733" + }, + "input": "0xf8ab820109855d21dba00082ca1d9443064693d3d38ad6a7cb579e0d6d9718c8aa6b6280b844a9059cbb000000000000000000000000e77b1ac803616503510bed0086e3a7be2627a69900000000000000000000000000000000000000000000000000000009502f90001ba0ce3ad83f5530136467b7c2bb225f406bd170f4ad59c254e5103c34eeabb5bd69a0455154527224a42ab405cacf0fe92918a75641ce4152f8db292019a5527aa956", + "result": { + "error": "out of gas", + "from": "0x94194bc2aaf494501d7880b61274a169f6502a54", + "gas": "0x7045", + "gasUsed": "0x7045", + "input": "0xa9059cbb000000000000000000000000e77b1ac803616503510bed0086e3a7be2627a69900000000000000000000000000000000000000000000000000000009502f9000", + "to": "0x43064693d3d38ad6a7cb579e0d6d9718c8aa6b62", + "type": "CALL", + "value": "0x0" + } +} diff --git a/eth/tracers/testdata/call_tracer_legacy/revert.json b/eth/tracers/testdata/call_tracer_legacy/revert.json new file mode 100644 index 000000000000..059040a1c811 --- /dev/null +++ b/eth/tracers/testdata/call_tracer_legacy/revert.json @@ -0,0 +1,58 @@ +{ + "context": { + "difficulty": "3665057456", + "gasLimit": "5232723", + "miner": "0xf4d8e706cfb25c0decbbdd4d2e2cc10c66376a3f", + "number": "2294501", + "timestamp": "1513673601" + }, + "genesis": { + "alloc": { + "0x0f6cef2b7fbb504782e35aa82a2207e816a2b7a9": { + "balance": "0x2a3fc32bcc019283", + "code": "0x", + "nonce": "10", + "storage": {} + }, + "0xabbcd5b340c80b5f1c0545c04c987b87310296ae": { + "balance": "0x0", + "code": "0x606060405236156100755763ffffffff7c01000000000000000000000000000000000000000000000000000000006000350416632d0335ab811461007a578063548db174146100ab5780637f649783146100fc578063b092145e1461014d578063c3f44c0a14610186578063c47cf5de14610203575b600080fd5b341561008557600080fd5b610099600160a060020a0360043516610270565b60405190815260200160405180910390f35b34156100b657600080fd5b6100fa600460248135818101908301358060208181020160405190810160405280939291908181526020018383602002808284375094965061028f95505050505050565b005b341561010757600080fd5b6100fa600460248135818101908301358060208181020160405190810160405280939291908181526020018383602002808284375094965061029e95505050505050565b005b341561015857600080fd5b610172600160a060020a03600435811690602435166102ad565b604051901515815260200160405180910390f35b341561019157600080fd5b6100fa6004803560ff1690602480359160443591606435600160a060020a0316919060a49060843590810190830135806020601f8201819004810201604051908101604052818152929190602084018383808284375094965050509235600160a060020a031692506102cd915050565b005b341561020e57600080fd5b61025460046024813581810190830135806020601f8201819004810201604051908101604052818152929190602084018383808284375094965061056a95505050505050565b604051600160a060020a03909116815260200160405180910390f35b600160a060020a0381166000908152602081905260409020545b919050565b61029a816000610594565b5b50565b61029a816001610594565b5b50565b600160209081526000928352604080842090915290825290205460ff1681565b60008080600160a060020a038416158061030d5750600160a060020a038085166000908152600160209081526040808320339094168352929052205460ff165b151561031857600080fd5b6103218561056a565b600160a060020a038116600090815260208190526040808220549295507f19000000000000000000000000000000000000000000000000000000000000009230918891908b908b90517fff000000000000000000000000000000000000000000000000000000000000008089168252871660018201526c01000000000000000000000000600160a060020a038088168202600284015286811682026016840152602a8301869052841602604a820152605e810182805190602001908083835b6020831061040057805182525b601f1990920191602091820191016103e0565b6001836020036101000a0380198251168184511617909252505050919091019850604097505050505050505051809103902091506001828a8a8a6040516000815260200160405260006040516020015260405193845260ff90921660208085019190915260408085019290925260608401929092526080909201915160208103908084039060008661646e5a03f1151561049957600080fd5b5050602060405103519050600160a060020a03838116908216146104bc57600080fd5b600160a060020a0380841660009081526020819052604090819020805460010190559087169086905180828051906020019080838360005b8381101561050d5780820151818401525b6020016104f4565b50505050905090810190601f16801561053a5780820380516001836020036101000a031916815260200191505b5091505060006040518083038160008661646e5a03f1915050151561055e57600080fd5b5b505050505050505050565b600060248251101561057e5750600061028a565b600160a060020a0360248301511690505b919050565b60005b825181101561060157600160a060020a033316600090815260016020526040812083918584815181106105c657fe5b90602001906020020151600160a060020a031681526020810191909152604001600020805460ff19169115159190911790555b600101610597565b5b5050505600a165627a7a723058200027e8b695e9d2dea9f3629519022a69f3a1d23055ce86406e686ea54f31ee9c0029", + "nonce": "1", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3672229776", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "5227619", + "hash": "0xa07b3d6c6bf63f5f981016db9f2d1d93033833f2c17e8bf7209e85f1faf08076", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0x806e151ce2817be922e93e8d5921fa0f0d0fd213d6b2b9a3fa17458e74a163d0", + "nonce": "0xbc5d43adc2c30c7d", + "number": "2294500", + "stateRoot": "0xca645b335888352ef9d8b1ef083e9019648180b259026572e3139717270de97d", + "timestamp": "1513673552", + "totalDifficulty": "7160066586979149" + }, + "input": "0xf9018b0a8505d21dba00832dc6c094abbcd5b340c80b5f1c0545c04c987b87310296ae80b9012473b40a5c000000000000000000000000400de2e016bda6577407dfc379faba9899bc73ef0000000000000000000000002cc31912b2b0f3075a87b3640923d45a26cef3ee000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064d79d8e6c7265636f76657279416464726573730000000000000000000000000000000000000000000000000000000000383e3ec32dc0f66d8fe60dbdc2f6815bdf73a988383e3ec32dc0f66d8fe60dbdc2f6815bdf73a988000000000000000000000000000000000000000000000000000000000000000000000000000000001ba0fd659d76a4edbd2a823e324c93f78ad6803b30ff4a9c8bce71ba82798975c70ca06571eecc0b765688ec6c78942c5ee8b585e00988c0141b518287e9be919bc48a", + "result": { + "error": "execution reverted", + "from": "0x0f6cef2b7fbb504782e35aa82a2207e816a2b7a9", + "gas": "0x2d55e8", + "gasUsed": "0xc3", + "input": "0x73b40a5c000000000000000000000000400de2e016bda6577407dfc379faba9899bc73ef0000000000000000000000002cc31912b2b0f3075a87b3640923d45a26cef3ee000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064d79d8e6c7265636f76657279416464726573730000000000000000000000000000000000000000000000000000000000383e3ec32dc0f66d8fe60dbdc2f6815bdf73a988383e3ec32dc0f66d8fe60dbdc2f6815bdf73a98800000000000000000000000000000000000000000000000000000000000000000000000000000000", + "to": "0xabbcd5b340c80b5f1c0545c04c987b87310296ae", + "type": "CALL", + "value": "0x0" + } +} diff --git a/eth/tracers/testdata/call_tracer_legacy/revert_reason.json b/eth/tracers/testdata/call_tracer_legacy/revert_reason.json new file mode 100644 index 000000000000..b4f29898c5b3 --- /dev/null +++ b/eth/tracers/testdata/call_tracer_legacy/revert_reason.json @@ -0,0 +1,64 @@ +{ + "context": { + "difficulty": "2", + "gasLimit": "8000000", + "miner": "0x0000000000000000000000000000000000000000", + "number": "3212651", + "timestamp": "1597246515" + }, + "genesis": { + "alloc": { + "0xf58833cf0c791881b494eb79d461e08a1f043f52": { + "balance": "0x0", + "code": "0x608060405234801561001057600080fd5b50600436106100a5576000357c010000000000000000000000000000000000000000000000000000000090048063609ff1bd11610078578063609ff1bd146101af5780639e7b8d61146101cd578063a3ec138d14610211578063e2ba53f0146102ae576100a5565b80630121b93f146100aa578063013cf08b146100d85780632e4176cf146101215780635c19a95c1461016b575b600080fd5b6100d6600480360360208110156100c057600080fd5b81019080803590602001909291905050506102cc565b005b610104600480360360208110156100ee57600080fd5b8101908080359060200190929190505050610469565b604051808381526020018281526020019250505060405180910390f35b61012961049a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6101ad6004803603602081101561018157600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506104bf565b005b6101b76108db565b6040518082815260200191505060405180910390f35b61020f600480360360208110156101e357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610952565b005b6102536004803603602081101561022757600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610b53565b60405180858152602001841515151581526020018373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200182815260200194505050505060405180910390f35b6102b6610bb0565b6040518082815260200191505060405180910390f35b6000600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020905060008160000154141561038a576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260148152602001807f486173206e6f20726967687420746f20766f746500000000000000000000000081525060200191505060405180910390fd5b8060010160009054906101000a900460ff161561040f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600e8152602001807f416c726561647920766f7465642e00000000000000000000000000000000000081525060200191505060405180910390fd5b60018160010160006101000a81548160ff02191690831515021790555081816002018190555080600001546002838154811061044757fe5b9060005260206000209060020201600101600082825401925050819055505050565b6002818154811061047657fe5b90600052602060002090600202016000915090508060000154908060010154905082565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002090508060010160009054906101000a900460ff1615610587576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f596f7520616c726561647920766f7465642e000000000000000000000000000081525060200191505060405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161415610629576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601e8152602001807f53656c662d64656c65676174696f6e20697320646973616c6c6f7765642e000081525060200191505060405180910390fd5b5b600073ffffffffffffffffffffffffffffffffffffffff16600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16146107cc57600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff1691503373ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614156107c7576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260198152602001807f466f756e64206c6f6f7020696e2064656c65676174696f6e2e0000000000000081525060200191505060405180910390fd5b61062a565b60018160010160006101000a81548160ff021916908315150217905550818160010160016101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506000600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002090508060010160009054906101000a900460ff16156108bf578160000154600282600201548154811061089c57fe5b9060005260206000209060020201600101600082825401925050819055506108d6565b816000015481600001600082825401925050819055505b505050565b6000806000905060008090505b60028054905081101561094d57816002828154811061090357fe5b9060005260206000209060020201600101541115610940576002818154811061092857fe5b90600052602060002090600202016001015491508092505b80806001019150506108e8565b505090565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16146109f7576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526028815260200180610bde6028913960400191505060405180910390fd5b600160008273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010160009054906101000a900460ff1615610aba576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260188152602001807f54686520766f74657220616c726561647920766f7465642e000000000000000081525060200191505060405180910390fd5b6000600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000015414610b0957600080fd5b60018060008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000018190555050565b60016020528060005260406000206000915090508060000154908060010160009054906101000a900460ff16908060010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff16908060020154905084565b60006002610bbc6108db565b81548110610bc657fe5b90600052602060002090600202016000015490509056fe4f6e6c79206368616972706572736f6e2063616e206769766520726967687420746f20766f74652ea26469706673582212201d282819f8f06fed792100d60a8b08809b081a34a1ecd225e83a4b41122165ed64736f6c63430006060033", + "nonce": "1", + "storage": { + "0x6200beec95762de01ce05f2a0e58ce3299dbb53c68c9f3254a242121223cdf58": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0xf7579c3d8a669c89d5ed246a22eb6db8f6fedbf1": { + "balance": "0x57af9d6b3df812900", + "code": "0x", + "nonce": "6", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "IstanbulBlock":1561651, + "chainId": 5, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3509749784", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "4727564", + "hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada", + "nonce": "0x4eb12e19c16d43da", + "number": "2289805", + "stateRoot": "0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f", + "timestamp": "1513601261", + "totalDifficulty": "7143276353481064" + }, + "input": "0xf888068449504f80832dc6c094f58833cf0c791881b494eb79d461e08a1f043f5280a45c19a95c000000000000000000000000f7579c3d8a669c89d5ed246a22eb6db8f6fedbf12da0264664db3e71fae1dbdaf2f53954be149ad3b7ba8a5054b4d89c70febfacc8b1a0212e8398757963f419681839ae8c5a54b411e252473c82d93dda68405ca63294", + "result": { + "error": "execution reverted", + "from": "0xf7579c3d8a669c89d5ed246a22eb6db8f6fedbf1", + "gas": "0x2d6e28", + "gasUsed": "0x588", + "input": "0x5c19a95c000000000000000000000000f7579c3d8a669c89d5ed246a22eb6db8f6fedbf1", + "to": "0xf58833cf0c791881b494eb79d461e08a1f043f52", + "type": "CALL", + "value": "0x0", + "output": "0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001e53656c662d64656c65676174696f6e20697320646973616c6c6f7765642e0000" + } +} diff --git a/eth/tracers/testdata/call_tracer_legacy/selfdestruct.json b/eth/tracers/testdata/call_tracer_legacy/selfdestruct.json new file mode 100644 index 000000000000..132cefa1681a --- /dev/null +++ b/eth/tracers/testdata/call_tracer_legacy/selfdestruct.json @@ -0,0 +1,73 @@ +{ + "context": { + "difficulty": "3502894804", + "gasLimit": "4722976", + "miner": "0x1585936b53834b021f68cc13eeefdec2efc8e724", + "number": "2289806", + "timestamp": "1513601314" + }, + "genesis": { + "alloc": { + "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": { + "balance": "0x0", + "code": "0x", + "nonce": "22", + "storage": {} + }, + "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": { + "balance": "0x4d87094125a369d9bd5", + "code": "0x61deadff", + "nonce": "1", + "storage": {} + }, + "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": { + "balance": "0x1780d77678137ac1b775", + "code": "0x", + "nonce": "29072", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3509749784", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "4727564", + "hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada", + "nonce": "0x4eb12e19c16d43da", + "number": "2289805", + "stateRoot": "0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f", + "timestamp": "1513601261", + "totalDifficulty": "7143276353481064" + }, + "input": "0xf88b8271908506fc23ac0083015f90943b873a919aa0512d5a0f09e6dcceaa4a6727fafe80a463e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c52aa0bdce0b59e8761854e857fe64015f06dd08a4fbb7624f6094893a79a72e6ad6bea01d9dde033cff7bb235a3163f348a6d7ab8d6b52bc0963a95b91612e40ca766a4", + "result": { + "calls": [ + { + "from": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe", + "input": "0x", + "to": "0x000000000000000000000000000000000000dEaD", + "type": "SELFDESTRUCT", + "value": "0x4d87094125a369d9bd5" + } + ], + "from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb", + "gas": "0x10738", + "gasUsed": "0x7533", + "input": "0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5", + "output": "0x", + "to": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe", + "type": "CALL", + "value": "0x0" + } +} diff --git a/eth/tracers/testdata/call_tracer_simple.json b/eth/tracers/testdata/call_tracer_legacy/simple.json similarity index 100% rename from eth/tracers/testdata/call_tracer_simple.json rename to eth/tracers/testdata/call_tracer_legacy/simple.json diff --git a/eth/tracers/testdata/call_tracer_legacy/throw.json b/eth/tracers/testdata/call_tracer_legacy/throw.json new file mode 100644 index 000000000000..09cf449776fb --- /dev/null +++ b/eth/tracers/testdata/call_tracer_legacy/throw.json @@ -0,0 +1,62 @@ +{ + "context": { + "difficulty": "117009631", + "gasLimit": "4712388", + "miner": "0x294e5d6c39a36ce38af1dca70c1060f78dee8070", + "number": "25009", + "timestamp": "1479891666" + }, + "genesis": { + "alloc": { + "0x70c9217d814985faef62b124420f8dfbddd96433": { + "balance": "0x4ecd70668f5d854a", + "code": "0x", + "nonce": "1638", + "storage": {} + }, + "0xc212e03b9e060e36facad5fd8f4435412ca22e6b": { + "balance": "0x0", + "code": "0x606060405236156101745760e060020a600035046302d05d3f811461017c57806304a7fdbc1461018e5780630e90f957146101fb5780630fb5a6b41461021257806314baa1b61461021b57806317fc45e21461023a5780632b096926146102435780632e94420f1461025b578063325a19f11461026457806336da44681461026d5780633f81a2c01461027f5780633fc306821461029757806345ecd3d7146102d45780634665096d146102dd5780634e71d92d146102e657806351a34eb8146103085780636111bb951461032d5780636f265b93146103445780637e9014e11461034d57806390ba009114610360578063927df5e014610393578063a7f437791461046c578063ad8f50081461046e578063bc6d909414610477578063bdec3ad114610557578063c19d93fb1461059a578063c9503fe2146105ad578063e0a73a93146105b6578063ea71b02d146105bf578063ea8a1af0146105d1578063ee4a96f9146105f3578063f1ff78a01461065c575b61046c610002565b610665600054600160a060020a031681565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600554600090600160a060020a0390811633909116146106a857610002565b61068260015460a060020a900460ff166000145b90565b61069660085481565b61046c600435600154600160a060020a03166000141561072157610002565b610696600d5481565b610696600435600f8160068110156100025750015481565b61069660045481565b61069660035481565b610665600554600160a060020a031681565b61069660043560158160068110156100025750015481565b6106966004355b600b54600f5460009160028202808203928083039290810191018386101561078357601054840186900394505b50505050919050565b61069660025481565b61069660095481565b61046c600554600090600160a060020a03908116339091161461085857610002565b61046c600435600554600090600160a060020a03908116339091161461092e57610002565b6106826001805460a060020a900460ff161461020f565b610696600b5481565b61068260075460a060020a900460ff1681565b6106966004355b600b54601554600091600282028082039280830392908101910183861015610a6c5760165494506102cb565b61046c6004356024356044356040805160015460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b02600483015291516000928392600160a060020a03919091169163e16c7d9891602481810192602092909190829003018187876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610b4657610002565b005b610696600a5481565b61046c60006000600060006000600160009054906101000a9004600160a060020a0316600160a060020a031663e16c7d986040518160e060020a028152600401808060b260020a691858d8dbdd5b9d18dd1b0281526020015060200190506020604051808303816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610f1757610002565b61046c5b60015b60058160ff16101561071e57600f6001820160ff166006811015610002578101549060ff83166006811015610002570154101561129057610002565b61069660015460a060020a900460ff1681565b61069660065481565b610696600c5481565b610665600754600160a060020a031681565b61046c600554600090600160a060020a0390811633909116146112c857610002565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600154600090600160a060020a03168114156113fb57610002565b610696600e5481565b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60408051918252519081900360200190f35b5060005b60068160ff16101561070857828160ff166006811015610002576020020151600f60ff831660068110156100025701558160ff82166006811015610002576020020151601560ff831660068110156100025701556001016106ac565b61071061055b565b505050565b600e8054820190555b50565b6040805160015460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061071557610002565b83861015801561079257508286105b156107b457600f546010546011548689039082030291909104900394506102cb565b8286101580156107c55750600b5486105b156107e757600f546011546012548589039082030291909104900394506102cb565b600b5486108015906107f857508186105b1561081d57600b54600f546012546013549289039281039290920204900394506102cb565b81861015801561082c57508086105b1561084e57600f546013546014548489039082030291909104900394506102cb565b60145494506102cb565b60015460a060020a900460ff1660001461087157610002565b600254600a01431161088257610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663771d50e16040518160e060020a0281526004018090506000604051808303816000876161da5a03f1156100025750505050565b60015460a060020a900460ff1660001461094757610002565b600254600a01431161095857610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180517f51a34eb8000000000000000000000000000000000000000000000000000000008252600482018690529151919350600160a060020a03841692506351a34eb8916024808301926000929190829003018183876161da5a03f11561000257505050600b8290554360025560408051838152905130600160a060020a0316917fa609f6bd4ad0b4f419ddad4ac9f0d02c2b9295c5e6891469055cf73c2b568fff919081900360200190a25050565b838610158015610a7b57508286105b15610a9d576015546016546017548689039082900302919091040194506102cb565b828610158015610aae5750600b5486105b15610ad0576015546017546018548589039082900302919091040194506102cb565b600b548610801590610ae157508186105b15610b0657600b546015546018546019549289039281900392909202040194506102cb565b818610158015610b1557508086105b15610b3757601554601954601a548489039082900302919091040194506102cb565b601a54860181900394506102cb565b60015460a060020a900460ff16600014610b5f57610002565b6001805460a060020a60ff02191660a060020a17908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919450600160a060020a038516925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604080518051600a556005547ffebf661200000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015216602482015260448101879052905163febf661291606480820192600092909190829003018183876161da5a03f115610002575050508215610cc7576007805473ffffffffffffffffffffffffffffffffffffffff191633179055610dbb565b6040805160055460065460e060020a63599efa6b028352600160a060020a039182166004840152602483015291519184169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050604080516006547f56ccb6f000000000000000000000000000000000000000000000000000000000825233600160a060020a03166004830152602482015290516356ccb6f091604480820192600092909190829003018183876161da5a03f115610002575050600580546007805473ffffffffffffffffffffffffffffffffffffffff19908116600160a060020a038416179091551633179055505b6007805460a060020a60ff02191660a060020a87810291909117918290556008544301600955900460ff1615610df757600a54610e039061029e565b600a54610e0b90610367565b600c55610e0f565b600c555b600c54670de0b6b3a7640000850204600d55600754600554604080517f759297bb000000000000000000000000000000000000000000000000000000008152600160a060020a039384166004820152918316602483015260448201879052519184169163759297bb91606481810192600092909190829003018183876161da5a03f11561000257505060408051600754600a54600d54600554600c5460a060020a850460ff161515865260208601929092528486019290925260608401529251600160a060020a0391821694509281169230909116917f3b3d1986083d191be01d28623dc19604728e29ae28bdb9ba52757fdee1a18de2919081900360800190a45050505050565b600954431015610f2657610002565b6001805460a060020a900460ff1614610f3e57610002565b6001805460a060020a60ff0219167402000000000000000000000000000000000000000017908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919750600160a060020a038816925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604051516007549095506000945060a060020a900460ff1615905061105c57600a5484111561105757600a54600d54670de0b6b3a7640000918603020492505b61107e565b600a5484101561107e57600a54600d54670de0b6b3a764000091869003020492505b60065483111561108e5760065492505b6006548390039150600083111561111857604080516005546007547f5928d37f000000000000000000000000000000000000000000000000000000008352600160a060020a0391821660048401528116602483015260448201869052915191871691635928d37f91606481810192600092909190829003018183876161da5a03f115610002575050505b600082111561117a576040805160055460e060020a63599efa6b028252600160a060020a0390811660048301526024820185905291519187169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050505b6040805185815260208101849052808201859052905130600160a060020a0316917f89e690b1d5aaae14f3e85f108dc92d9ab3763a58d45aed8b59daedbbae8fe794919081900360600190a260008311156112285784600160a060020a0316634cc927d785336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050611282565b84600160a060020a0316634cc927d7600a60005054336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f115610002575050505b600054600160a060020a0316ff5b60156001820160ff166006811015610002578101549060ff8316600681101561000257015411156112c057610002565b60010161055e565b60015460a060020a900460ff166000146112e157610002565b600254600a0143116112f257610002565b6001546040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f11561000257505060408051805160055460065460e060020a63599efa6b028452600160a060020a03918216600485015260248401529251909450918416925063599efa6b916044808301926000929190829003018183876161da5a03f1156100025750505080600160a060020a0316632b68bb2d6040518160e060020a0281526004018090506000604051808303816000876161da5a03f115610002575050600054600160a060020a03169050ff5b6001546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602480830192602092919082900301816000876161da5a03f11561000257505060405151151590506106a85761000256", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x00000000000000000000000000000000000000000000000000000000000061a9", + "0x0000000000000000000000000000000000000000000000000000000000000005": "0x00000000000000000000000070c9217d814985faef62b124420f8dfbddd96433" + } + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "117066792", + "extraData": "0xd783010502846765746887676f312e372e33856c696e7578", + "gasLimit": "4712388", + "hash": "0xe23e8d4562a1045b70cbc99fefb20c101a8f0fc8559a80d65fea8896e2f1d46e", + "miner": "0x71842f946b98800fe6feb49f0ae4e253259031c9", + "mixHash": "0x0aada9d6e93dd4db0d09c0488dc0a048fca2ccdc1f3fc7b83ba2a8d393a3a4ff", + "nonce": "0x70849d5838dee2e9", + "number": "25008", + "stateRoot": "0x1e01d2161794768c5b917069e73d86e8dca80cd7f3168c0597de420ab93a3b7b", + "timestamp": "1479891641", + "totalDifficulty": "1896347038589" + }, + "input": "0xf88b8206668504a817c8008303d09094c212e03b9e060e36facad5fd8f4435412ca22e6b80a451a34eb8000000000000000000000000000000000000000000000027fad02094277c000029a0692a3b4e7b2842f8dd7832e712c21e09f451f416c8976d5b8d02e8c0c2b4bea9a07645e90fc421b63dd755767fd93d3c03b4ec0c4d8fafa059558d08cf11d59750", + "result": { + "error": "invalid jump destination", + "from": "0x70c9217d814985faef62b124420f8dfbddd96433", + "gas": "0x37b38", + "gasUsed": "0x37b38", + "input": "0x51a34eb8000000000000000000000000000000000000000000000027fad02094277c0000", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "type": "CALL", + "value": "0x0" + } +} diff --git a/eth/tracers/testing/calltrace_test.go b/eth/tracers/testing/calltrace_test.go new file mode 100644 index 000000000000..9db470251927 --- /dev/null +++ b/eth/tracers/testing/calltrace_test.go @@ -0,0 +1,238 @@ +package testing + +import ( + "encoding/json" + "io/ioutil" + "math/big" + "path/filepath" + "reflect" + "strings" + "testing" + "unicode" + + "github.com/XinFinOrg/XDPoSChain/common" + "github.com/XinFinOrg/XDPoSChain/common/hexutil" + "github.com/XinFinOrg/XDPoSChain/common/math" + "github.com/XinFinOrg/XDPoSChain/core" + "github.com/XinFinOrg/XDPoSChain/core/rawdb" + "github.com/XinFinOrg/XDPoSChain/core/types" + "github.com/XinFinOrg/XDPoSChain/core/vm" + "github.com/XinFinOrg/XDPoSChain/eth/tracers" + "github.com/XinFinOrg/XDPoSChain/rlp" + "github.com/XinFinOrg/XDPoSChain/tests" + + // Force-load the native, to trigger registration + _ "github.com/XinFinOrg/XDPoSChain/eth/tracers/native" +) + +type callContext struct { + Number math.HexOrDecimal64 `json:"number"` + Difficulty *math.HexOrDecimal256 `json:"difficulty"` + Time math.HexOrDecimal64 `json:"timestamp"` + GasLimit math.HexOrDecimal64 `json:"gasLimit"` + Miner common.Address `json:"miner"` +} + +// callTrace is the result of a callTracer run. +type callTrace struct { + Type string `json:"type"` + From common.Address `json:"from"` + To common.Address `json:"to"` + Input hexutil.Bytes `json:"input"` + Output hexutil.Bytes `json:"output"` + Gas *hexutil.Uint64 `json:"gas,omitempty"` + GasUsed *hexutil.Uint64 `json:"gasUsed,omitempty"` + Value *hexutil.Big `json:"value,omitempty"` + Error string `json:"error,omitempty"` + Calls []callTrace `json:"calls,omitempty"` +} + +// callTracerTest defines a single test to check the call tracer against. +type callTracerTest struct { + Genesis *core.Genesis `json:"genesis"` + Context *callContext `json:"context"` + Input string `json:"input"` + Result *callTrace `json:"result"` +} + +// Iterates over all the input-output datasets in the tracer test harness and +// runs the JavaScript tracers against them. +func TestCallTracerLegacy(t *testing.T) { + testCallTracer("callTracerLegacy", "call_tracer_legacy", t) +} + +func TestCallTracer(t *testing.T) { + testCallTracer("callTracer", "call_tracer", t) +} + +func testCallTracer(tracerName string, dirPath string, t *testing.T) { + files, err := ioutil.ReadDir(filepath.Join("..", "testdata", dirPath)) + if err != nil { + t.Fatalf("failed to retrieve tracer test suite: %v", err) + } + for _, file := range files { + if !strings.HasSuffix(file.Name(), ".json") { + continue + } + file := file // capture range variable + t.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(t *testing.T) { + t.Parallel() + + var ( + test = new(callTracerTest) + tx = new(types.Transaction) + ) + // Call tracer test found, read if from disk + if blob, err := ioutil.ReadFile(filepath.Join("..", "testdata", dirPath, file.Name())); err != nil { + t.Fatalf("failed to read testcase: %v", err) + } else if err := json.Unmarshal(blob, test); err != nil { + t.Fatalf("failed to parse testcase: %v", err) + } + if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil { + t.Fatalf("failed to parse testcase input: %v", err) + } + // Configure a blockchain with the given prestate + var ( + signer = types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number))) + origin, _ = signer.Sender(tx) + context = vm.Context{ + CanTransfer: core.CanTransfer, + Transfer: core.Transfer, + Coinbase: test.Context.Miner, + BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)), + Time: new(big.Int).SetUint64(uint64(test.Context.Time)), + Difficulty: (*big.Int)(test.Context.Difficulty), + GasLimit: uint64(test.Context.GasLimit), + Origin: origin, + GasPrice: tx.GasPrice(), + } + statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc) + ) + tracer, err := tracers.New(tracerName, new(tracers.Context)) + if err != nil { + t.Fatalf("failed to create call tracer: %v", err) + } + evm := vm.NewEVM(context, statedb, nil, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer}) + msg, err := tx.AsMessage(signer, nil, nil) + if err != nil { + t.Fatalf("failed to prepare transaction for tracing: %v", err) + } + st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas())) + if _, _, _, err, _ = st.TransitionDb(common.Address{}); err != nil { + t.Fatalf("failed to execute transaction: %v", err) + } + // Retrieve the trace result and compare against the etalon + res, err := tracer.GetResult() + if err != nil { + t.Fatalf("failed to retrieve trace result: %v", err) + } + ret := new(callTrace) + if err := json.Unmarshal(res, ret); err != nil { + t.Fatalf("failed to unmarshal trace result: %v", err) + } + + if !jsonEqual(ret, test.Result) { + // uncomment this for easier debugging + //have, _ := json.MarshalIndent(ret, "", " ") + //want, _ := json.MarshalIndent(test.Result, "", " ") + //t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", string(have), string(want)) + t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", ret, test.Result) + } + }) + } +} + +// jsonEqual is similar to reflect.DeepEqual, but does a 'bounce' via json prior to +// comparison +func jsonEqual(x, y interface{}) bool { + xTrace := new(callTrace) + yTrace := new(callTrace) + if xj, err := json.Marshal(x); err == nil { + json.Unmarshal(xj, xTrace) + } else { + return false + } + if yj, err := json.Marshal(y); err == nil { + json.Unmarshal(yj, yTrace) + } else { + return false + } + return reflect.DeepEqual(xTrace, yTrace) +} + +// camel converts a snake cased input string into a camel cased output. +func camel(str string) string { + pieces := strings.Split(str, "_") + for i := 1; i < len(pieces); i++ { + pieces[i] = string(unicode.ToUpper(rune(pieces[i][0]))) + pieces[i][1:] + } + return strings.Join(pieces, "") +} +func BenchmarkTracers(b *testing.B) { + files, err := ioutil.ReadDir(filepath.Join("..", "testdata", "call_tracer")) + if err != nil { + b.Fatalf("failed to retrieve tracer test suite: %v", err) + } + for _, file := range files { + if !strings.HasSuffix(file.Name(), ".json") { + continue + } + file := file // capture range variable + b.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(b *testing.B) { + blob, err := ioutil.ReadFile(filepath.Join("..", "testdata", "call_tracer", file.Name())) + if err != nil { + b.Fatalf("failed to read testcase: %v", err) + } + test := new(callTracerTest) + if err := json.Unmarshal(blob, test); err != nil { + b.Fatalf("failed to parse testcase: %v", err) + } + benchTracer("callTracer", test, b) + }) + } +} + +func benchTracer(tracerName string, test *callTracerTest, b *testing.B) { + // Configure a blockchain with the given prestate + tx := new(types.Transaction) + if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil { + b.Fatalf("failed to parse testcase input: %v", err) + } + signer := types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number))) + msg, err := tx.AsMessage(signer, nil, nil) + if err != nil { + b.Fatalf("failed to prepare transaction for tracing: %v", err) + } + origin, _ := signer.Sender(tx) + context := vm.Context{ + CanTransfer: core.CanTransfer, + Transfer: core.Transfer, + Coinbase: test.Context.Miner, + BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)), + Time: new(big.Int).SetUint64(uint64(test.Context.Time)), + Difficulty: (*big.Int)(test.Context.Difficulty), + GasLimit: uint64(test.Context.GasLimit), + Origin: origin, + GasPrice: tx.GasPrice(), + } + statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc) + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + tracer, err := tracers.New(tracerName, new(tracers.Context)) + if err != nil { + b.Fatalf("failed to create call tracer: %v", err) + } + evm := vm.NewEVM(context, statedb, nil, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer}) + snap := statedb.Snapshot() + st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas())) + if _, _, _, err, _ = st.TransitionDb(common.Address{}); err != nil { + b.Fatalf("failed to execute transaction: %v", err) + } + if _, err = tracer.GetResult(); err != nil { + b.Fatal(err) + } + statedb.RevertToSnapshot(snap) + } +} diff --git a/eth/tracers/tracer.go b/eth/tracers/tracer.go index 6216bc48a8a7..bbec5ad8a952 100644 --- a/eth/tracers/tracer.go +++ b/eth/tracers/tracer.go @@ -284,9 +284,89 @@ func (cw *contractWrapper) pushObject(vm *duktape.Context) { vm.PutPropString(obj, "getInput") } -// Tracer provides an implementation of Tracer that evaluates a Javascript +type frame struct { + typ *string + from *common.Address + to *common.Address + input []byte + gas *uint + value *big.Int +} + +func newFrame() *frame { + return &frame{ + typ: new(string), + from: new(common.Address), + to: new(common.Address), + gas: new(uint), + } +} + +func (f *frame) pushObject(vm *duktape.Context) { + obj := vm.PushObject() + + vm.PushGoFunction(func(ctx *duktape.Context) int { pushValue(ctx, *f.typ); return 1 }) + vm.PutPropString(obj, "getType") + + vm.PushGoFunction(func(ctx *duktape.Context) int { pushValue(ctx, *f.from); return 1 }) + vm.PutPropString(obj, "getFrom") + + vm.PushGoFunction(func(ctx *duktape.Context) int { pushValue(ctx, *f.to); return 1 }) + vm.PutPropString(obj, "getTo") + + vm.PushGoFunction(func(ctx *duktape.Context) int { pushValue(ctx, f.input); return 1 }) + vm.PutPropString(obj, "getInput") + + vm.PushGoFunction(func(ctx *duktape.Context) int { pushValue(ctx, *f.gas); return 1 }) + vm.PutPropString(obj, "getGas") + + vm.PushGoFunction(func(ctx *duktape.Context) int { + if f.value != nil { + pushValue(ctx, f.value) + } else { + ctx.PushUndefined() + } + return 1 + }) + vm.PutPropString(obj, "getValue") +} + +type frameResult struct { + gasUsed *uint + output []byte + errorValue *string +} + +func newFrameResult() *frameResult { + return &frameResult{ + gasUsed: new(uint), + } +} + +func (r *frameResult) pushObject(vm *duktape.Context) { + obj := vm.PushObject() + + vm.PushGoFunction(func(ctx *duktape.Context) int { pushValue(ctx, *r.gasUsed); return 1 }) + vm.PutPropString(obj, "getGasUsed") + + vm.PushGoFunction(func(ctx *duktape.Context) int { pushValue(ctx, r.output); return 1 }) + vm.PutPropString(obj, "getOutput") + + vm.PushGoFunction(func(ctx *duktape.Context) int { + if r.errorValue != nil { + pushValue(ctx, *r.errorValue) + } else { + ctx.PushUndefined() + } + return 1 + }) + vm.PutPropString(obj, "getError") +} + +// JsTracer provides an implementation of JsTracer that evaluates a Javascript // function for each VM execution step. -type Tracer struct { +// TODO gerui rename to private func +type JsTracer struct { vm *duktape.Context // Javascript VM instance tracerObject int // Stack index of the tracer JavaScript object @@ -305,22 +385,34 @@ type Tracer struct { errorValue *string // Swappable error value wrapped by a log accessor refundValue *uint // Swappable refund value wrapped by a log accessor + frame *frame // Represents entry into call frame. Fields are swappable + frameResult *frameResult // Represents exit from a call frame. Fields are swappable + ctx map[string]interface{} // Transaction context gathered throughout execution err error // Error, if one has occurred interrupt uint32 // Atomic flag to signal execution interruption reason error // Textual reason for the interruption + + activePrecompiles []common.Address // Updated on CaptureStart based on given rules + traceSteps bool // When true, will invoke step() on each opcode + traceCallFrames bool // When true, will invoke enter() and exit() js funcs +} + +// Context contains some contextual infos for a transaction execution that is not +// available from within the EVM object. +type Context struct { + BlockHash common.Hash // Hash of the block the tx is contained within (zero if dangling tx or call) + TxIndex int // Index of the transaction within a block (zero if dangling tx or call) + TxHash common.Hash // Hash of the transaction being traced (zero if dangling call) } // New instantiates a new tracer instance. code specifies a Javascript snippet, // which must evaluate to an expression returning an object with 'step', 'fault' // and 'result' functions. -func New(code string) (*Tracer, error) { - // Resolve any tracers by name and assemble the tracer object - if tracer, ok := tracer(code); ok { - code = tracer - } - tracer := &Tracer{ +// TODO gerui rename to private func +func NewJsTracer(code string, ctx *Context) (*JsTracer, error) { + tracer := &JsTracer{ vm: duktape.New(), ctx: make(map[string]interface{}), opWrapper: new(opWrapper), @@ -333,6 +425,16 @@ func New(code string) (*Tracer, error) { costValue: new(uint), depthValue: new(uint), refundValue: new(uint), + frame: newFrame(), + frameResult: newFrameResult(), + } + if ctx.BlockHash != (common.Hash{}) { + tracer.ctx["blockHash"] = ctx.BlockHash + + if ctx.TxHash != (common.Hash{}) { + tracer.ctx["txIndex"] = ctx.TxIndex + tracer.ctx["txHash"] = ctx.TxHash + } } // Set up builtins for this environment tracer.vm.PushGlobalGoFunction("toHex", func(ctx *duktape.Context) int { @@ -398,8 +500,14 @@ func New(code string) (*Tracer, error) { return 1 }) tracer.vm.PushGlobalGoFunction("isPrecompiled", func(ctx *duktape.Context) int { - _, ok := vm.PrecompiledContractsIstanbul[common.BytesToAddress(popSlice(ctx))] - ctx.PushBoolean(ok) + addr := common.BytesToAddress(popSlice(ctx)) + for _, p := range tracer.activePrecompiles { + if p == addr { + ctx.PushBoolean(true) + return 1 + } + } + ctx.PushBoolean(false) return 1 }) tracer.vm.PushGlobalGoFunction("slice", func(ctx *duktape.Context) int { @@ -426,21 +534,29 @@ func New(code string) (*Tracer, error) { } tracer.tracerObject = 0 // yeah, nice, eval can't return the index itself - if !tracer.vm.GetPropString(tracer.tracerObject, "step") { - return nil, fmt.Errorf("trace object must expose a function step()") - } + hasStep := tracer.vm.GetPropString(tracer.tracerObject, "step") tracer.vm.Pop() if !tracer.vm.GetPropString(tracer.tracerObject, "fault") { - return nil, fmt.Errorf("trace object must expose a function fault()") + return nil, errors.New("trace object must expose a function fault()") } tracer.vm.Pop() if !tracer.vm.GetPropString(tracer.tracerObject, "result") { - return nil, fmt.Errorf("trace object must expose a function result()") + return nil, errors.New("trace object must expose a function result()") } tracer.vm.Pop() + hasEnter := tracer.vm.GetPropString(tracer.tracerObject, "enter") + tracer.vm.Pop() + hasExit := tracer.vm.GetPropString(tracer.tracerObject, "exit") + tracer.vm.Pop() + if hasEnter != hasExit { + return nil, fmt.Errorf("trace object must expose either both or none of enter() and exit()") + } + tracer.traceCallFrames = hasEnter && hasExit + tracer.traceSteps = hasStep + // Tracer is valid, inject the big int library to access large numbers tracer.vm.EvalString(bigIntegerJS) tracer.vm.PutGlobalString("bigInt") @@ -489,6 +605,12 @@ func New(code string) (*Tracer, error) { tracer.vm.PutPropString(tracer.stateObject, "log") + tracer.frame.pushObject(tracer.vm) + tracer.vm.PutPropString(tracer.stateObject, "frame") + + tracer.frameResult.pushObject(tracer.vm) + tracer.vm.PutPropString(tracer.stateObject, "frameResult") + tracer.dbWrapper.pushObject(tracer.vm) tracer.vm.PutPropString(tracer.stateObject, "db") @@ -496,14 +618,14 @@ func New(code string) (*Tracer, error) { } // Stop terminates execution of the tracer at the first opportune moment. -func (jst *Tracer) Stop(err error) { +func (jst *JsTracer) Stop(err error) { jst.reason = err atomic.StoreUint32(&jst.interrupt, 1) } // call executes a method on a JS object, catching any errors, formatting and // returning them as error objects. -func (jst *Tracer) call(method string, args ...string) (json.RawMessage, error) { +func (jst *JsTracer) call(noret bool, method string, args ...string) (json.RawMessage, error) { // Execute the JavaScript call and return any error jst.vm.PushString(method) for _, arg := range args { @@ -517,7 +639,21 @@ func (jst *Tracer) call(method string, args ...string) (json.RawMessage, error) return nil, errors.New(err) } // No error occurred, extract return value and return - return json.RawMessage(jst.vm.JsonEncode(-1)), nil + if noret { + return nil, nil + } + // Push a JSON marshaller onto the stack. We can't marshal from the out- + // side because duktape can crash on large nestings and we can't catch + // C++ exceptions ourselves from Go. TODO(karalabe): Yuck, why wrap?! + jst.vm.PushString("(JSON.stringify)") + jst.vm.Eval() + + jst.vm.Swap(-1, -2) + if code = jst.vm.Pcall(1); code != 0 { + err := jst.vm.SafeToString(-1) + return nil, errors.New(err) + } + return json.RawMessage(jst.vm.SafeToString(-1)), nil } func wrapError(context string, err error) error { @@ -525,7 +661,7 @@ func wrapError(context string, err error) error { } // CaptureStart implements the Tracer interface to initialize the tracing operation. -func (jst *Tracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { +func (jst *JsTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { jst.ctx["type"] = "CALL" if create { jst.ctx["type"] = "CREATE" @@ -534,13 +670,20 @@ func (jst *Tracer) CaptureStart(env *vm.EVM, from common.Address, to common.Addr jst.ctx["to"] = to jst.ctx["input"] = input jst.ctx["gas"] = gas + jst.ctx["gasPrice"] = env.Context.GasPrice jst.ctx["value"] = value // Initialize the context jst.ctx["block"] = env.Context.BlockNumber.Uint64() jst.dbWrapper.db = env.StateDB + // Update list of precompiles based on current block + rules := env.ChainConfig().Rules(env.Context.BlockNumber) + jst.activePrecompiles = vm.ActivePrecompiles(rules) + // Compute intrinsic gas isHomestead := env.ChainConfig().IsHomestead(env.Context.BlockNumber) + // after update core.IntrinsicGas, use isIstanbul in it + // isIstanbul := env.ChainConfig().IsIstanbul(env.Context.BlockNumber) intrinsicGas, err := core.IntrinsicGas(input, nil, jst.ctx["type"] == "CREATE", isHomestead) if err != nil { return @@ -548,15 +691,18 @@ func (jst *Tracer) CaptureStart(env *vm.EVM, from common.Address, to common.Addr jst.ctx["intrinsicGas"] = intrinsicGas } - // CaptureState implements the Tracer interface to trace a single step of VM execution. -func (jst *Tracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { +func (jst *JsTracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { + if !jst.traceSteps { + return + } if jst.err != nil { return } // If tracing was interrupted, set the error and stop if atomic.LoadUint32(&jst.interrupt) > 0 { jst.err = jst.reason + env.Cancel() return } jst.opWrapper.op = op @@ -576,14 +722,13 @@ func (jst *Tracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost *jst.errorValue = err.Error() } - if _, err := jst.call("step", "log", "db"); err != nil { + if _, err := jst.call(true, "step", "log", "db"); err != nil { jst.err = wrapError("step", err) } } - // CaptureFault implements the Tracer interface to trace an execution fault -func (jst *Tracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { +func (jst *JsTracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { if jst.err != nil { return } @@ -591,13 +736,13 @@ func (jst *Tracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost jst.errorValue = new(string) *jst.errorValue = err.Error() - if _, err := jst.call("fault", "log", "db"); err != nil { + if _, err := jst.call(true, "fault", "log", "db"); err != nil { jst.err = wrapError("fault", err) } } // CaptureEnd is called after the call finishes to finalize the tracing. -func (jst *Tracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) { +func (jst *JsTracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) { jst.ctx["output"] = output jst.ctx["time"] = t.String() jst.ctx["gasUsed"] = gasUsed @@ -607,39 +752,72 @@ func (jst *Tracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, er } } -// GetResult calls the Javascript 'result' function and returns its value, or any accumulated error -func (jst *Tracer) GetResult() (json.RawMessage, error) { - // Transform the context into a JavaScript object and inject into the state - obj := jst.vm.PushObject() +// CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct). +func (jst *JsTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { + if !jst.traceCallFrames { + return + } + if jst.err != nil { + return + } + // If tracing was interrupted, set the error and stop + if atomic.LoadUint32(&jst.interrupt) > 0 { + jst.err = jst.reason + return + } - for key, val := range jst.ctx { - switch val := val.(type) { - case uint64: - jst.vm.PushUint(uint(val)) + *jst.frame.typ = typ.String() + *jst.frame.from = from + *jst.frame.to = to + jst.frame.input = common.CopyBytes(input) + *jst.frame.gas = uint(gas) + jst.frame.value = nil + if value != nil { + jst.frame.value = new(big.Int).SetBytes(value.Bytes()) + } - case string: - jst.vm.PushString(val) + if _, err := jst.call(true, "enter", "frame"); err != nil { + jst.err = wrapError("enter", err) + } +} - case []byte: - ptr := jst.vm.PushFixedBuffer(len(val)) - copy(makeSlice(ptr, uint(len(val))), val) +// CaptureExit is called when EVM exits a scope, even if the scope didn't +// execute any code. +func (jst *JsTracer) CaptureExit(output []byte, gasUsed uint64, err error) { + if !jst.traceCallFrames { + return + } + // If tracing was interrupted, set the error and stop + if atomic.LoadUint32(&jst.interrupt) > 0 { + jst.err = jst.reason + return + } + + jst.frameResult.output = common.CopyBytes(output) + *jst.frameResult.gasUsed = uint(gasUsed) + jst.frameResult.errorValue = nil + if err != nil { + jst.frameResult.errorValue = new(string) + *jst.frameResult.errorValue = err.Error() + } - case common.Address: - ptr := jst.vm.PushFixedBuffer(20) - copy(makeSlice(ptr, 20), val[:]) + if _, err := jst.call(true, "exit", "frameResult"); err != nil { + jst.err = wrapError("exit", err) + } +} - case *big.Int: - pushBigInt(val, jst.vm) +// GetResult calls the Javascript 'result' function and returns its value, or any accumulated error +func (jst *JsTracer) GetResult() (json.RawMessage, error) { + // Transform the context into a JavaScript object and inject into the state + obj := jst.vm.PushObject() - default: - panic(fmt.Sprintf("unsupported type: %T", val)) - } - jst.vm.PutPropString(obj, key) + for key, val := range jst.ctx { + jst.addToObj(obj, key, val) } jst.vm.PutPropString(jst.stateObject, "ctx") // Finalize the trace and return the results - result, err := jst.call("result", "ctx", "db") + result, err := jst.call(false, "result", "ctx", "db") if err != nil { jst.err = wrapError("result", err) } @@ -649,3 +827,35 @@ func (jst *Tracer) GetResult() (json.RawMessage, error) { return result, jst.err } + +// addToObj pushes a field to a JS object. +func (jst *JsTracer) addToObj(obj int, key string, val interface{}) { + pushValue(jst.vm, val) + jst.vm.PutPropString(obj, key) +} + +func pushValue(ctx *duktape.Context, val interface{}) { + switch val := val.(type) { + case uint64: + ctx.PushUint(uint(val)) + case string: + ctx.PushString(val) + case []byte: + ptr := ctx.PushFixedBuffer(len(val)) + copy(makeSlice(ptr, uint(len(val))), val) + case common.Address: + ptr := ctx.PushFixedBuffer(20) + copy(makeSlice(ptr, 20), val[:]) + case *big.Int: + pushBigInt(val, ctx) + case int: + ctx.PushInt(val) + case uint: + ctx.PushUint(val) + case common.Hash: + ptr := ctx.PushFixedBuffer(32) + copy(makeSlice(ptr, 32), val[:]) + default: + panic(fmt.Sprintf("unsupported type: %T", val)) + } +} diff --git a/eth/tracers/tracer_test.go b/eth/tracers/tracer_test.go index e648973481e2..d1b81cfbc6e1 100644 --- a/eth/tracers/tracer_test.go +++ b/eth/tracers/tracer_test.go @@ -50,13 +50,21 @@ type dummyStatedb struct { func (*dummyStatedb) GetRefund() uint64 { return 1337 } func (*dummyStatedb) GetBalance(addr common.Address) *big.Int { return new(big.Int) } -func runTrace(tracer *Tracer) (json.RawMessage, error) { - env := vm.NewEVM(vm.Context{BlockNumber: big.NewInt(1)}, &dummyStatedb{}, nil, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) +type vmContext struct { + ctx vm.Context // future pr should distinguish blockContext and txContext +} + +func testCtx() *vmContext { + return &vmContext{ctx: vm.Context{BlockNumber: big.NewInt(1), GasPrice: big.NewInt(100000)}} +} + +func runTrace(tracer Tracer, vmctx *vmContext, chaincfg *params.ChainConfig) (json.RawMessage, error) { + env := vm.NewEVM(vmctx.ctx, &dummyStatedb{}, nil, chaincfg, vm.Config{Debug: true, Tracer: tracer}) var ( startGas uint64 = 10000 value = big.NewInt(0) ) - contract := vm.NewContract(account{}, account{}, big.NewInt(0), 10000) + contract := vm.NewContract(account{}, account{}, value, startGas) contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x1, 0x0} tracer.CaptureStart(env, contract.Caller(), contract.Address(), false, []byte{}, startGas, value) @@ -69,21 +77,22 @@ func runTrace(tracer *Tracer) (json.RawMessage, error) { } func TestTracer(t *testing.T) { - execTracer := func(code string) []byte { + execTracer := func(code string) ([]byte, string) { t.Helper() - tracer, err := New(code) + tracer, err := New(code, new(Context)) if err != nil { t.Fatal(err) } - ret, err := runTrace(tracer) + ret, err := runTrace(tracer, testCtx(), params.TestChainConfig) if err != nil { - t.Fatal(err) + return nil, err.Error() // Stringify to allow comparison without nil checks } - return ret + return ret, "" } for i, tt := range []struct { code string want string + fail string }{ { // tests that we don't panic on bad arguments to memory access code: "{depths: [], step: function(log) { this.depths.push(log.memory.slice(-1,-2)); }, fault: function() {}, result: function() { return this.depths; }}", @@ -104,12 +113,15 @@ func TestTracer(t *testing.T) { code: "{opcodes: [], step: function(log) { this.opcodes.push(log.op.toString()); }, fault: function() {}, result: function() { return this.opcodes; }}", want: `["PUSH1","PUSH1","STOP"]`, }, { // tests intrinsic gas - code: "{depths: [], step: function() {}, fault: function() {}, result: function(ctx) { return ctx.gasUsed+'.'+ctx.intrinsicGas; }}", - want: `"6.21000"`, + code: "{depths: [], step: function() {}, fault: function() {}, result: function(ctx) { return ctx.gasPrice+'.'+ctx.gasUsed+'.'+ctx.intrinsicGas; }}", + want: `"100000.6.21000"`, + }, { // tests too deep object / serialization crash + code: "{step: function() {}, fault: function() {}, result: function() { var o={}; var x=o; for (var i=0; i<1000; i++){ o.foo={}; o=o.foo; } return x; }}", + fail: "RangeError: json encode recursion limit in server-side tracer function 'result'", }, } { - if have := execTracer(tt.code); tt.want != string(have) { - t.Errorf("testcase %d: expected return value to be %s got %s\n\tcode: %v", i, tt.want, string(have), tt.code) + if have, err := execTracer(tt.code); tt.want != string(have) || tt.fail != err { + t.Errorf("testcase %d: expected return value to be '%s' got '%s', error to be '%s' got '%s'\n\tcode: %v", i, tt.want, string(have), tt.fail, err, tt.code) } } } @@ -118,23 +130,21 @@ func TestHalt(t *testing.T) { t.Skip("duktape doesn't support abortion") timeout := errors.New("stahp") - tracer, err := New("{step: function() { while(1); }, result: function() { return null; }}") + tracer, err := New("{step: function() { while(1); }, result: function() { return null; }}", new(Context)) if err != nil { t.Fatal(err) } - go func() { time.Sleep(1 * time.Second) tracer.Stop(timeout) }() - - if _, err = runTrace(tracer); err.Error() != "stahp in server-side tracer function 'step'" { + if _, err = runTrace(tracer, testCtx(), params.TestChainConfig); err.Error() != "stahp in server-side tracer function 'step'" { t.Errorf("Expected timeout error, got %v", err) } } func TestHaltBetweenSteps(t *testing.T) { - tracer, err := New("{step: function() {}, fault: function() {}, result: function() { return null; }}") + tracer, err := New("{step: function() {}, fault: function() {}, result: function() { return null; }}", new(Context)) if err != nil { t.Fatal(err) } @@ -142,7 +152,6 @@ func TestHaltBetweenSteps(t *testing.T) { scope := &vm.ScopeContext{ Contract: vm.NewContract(&account{}, &account{}, big.NewInt(0), 0), } - tracer.CaptureState(env, 0, 0, 0, 0, scope, nil, 0, nil) timeout := errors.New("stahp") tracer.Stop(timeout) @@ -156,8 +165,8 @@ func TestHaltBetweenSteps(t *testing.T) { // TestNoStepExec tests a regular value transfer (no exec), and accessing the statedb // in 'result' func TestNoStepExec(t *testing.T) { - runEmptyTrace := func(tracer *Tracer) (json.RawMessage, error) { - env := vm.NewEVM(vm.Context{BlockNumber: big.NewInt(1)}, &dummyStatedb{}, nil, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) + runEmptyTrace := func(tracer Tracer, vmctx *vmContext) (json.RawMessage, error) { + env := vm.NewEVM(vmctx.ctx, &dummyStatedb{}, nil, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) startGas := uint64(10000) contract := vm.NewContract(account{}, account{}, big.NewInt(0), startGas) tracer.CaptureStart(env, contract.Caller(), contract.Address(), false, []byte{}, startGas, big.NewInt(0)) @@ -166,11 +175,11 @@ func TestNoStepExec(t *testing.T) { } execTracer := func(code string) []byte { t.Helper() - tracer, err := New(code) + tracer, err := New(code, new(Context)) if err != nil { t.Fatal(err) } - ret, err := runEmptyTrace(tracer) + ret, err := runEmptyTrace(tracer, testCtx()) if err != nil { t.Fatal(err) } @@ -191,46 +200,107 @@ func TestNoStepExec(t *testing.T) { } } +func TestIsPrecompile(t *testing.T) { + chaincfg := ¶ms.ChainConfig{ChainId: big.NewInt(1), HomesteadBlock: big.NewInt(0), DAOForkBlock: nil, DAOForkSupport: false, EIP150Block: big.NewInt(0), EIP150Hash: common.Hash{}, EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(100), ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(200), BerlinBlock: big.NewInt(300), LondonBlock: big.NewInt(0), Ethash: new(params.EthashConfig), Clique: nil} + chaincfg.ByzantiumBlock = big.NewInt(100) + chaincfg.IstanbulBlock = big.NewInt(200) + chaincfg.BerlinBlock = big.NewInt(300) + ctx := vm.Context{BlockNumber: big.NewInt(150), GasPrice: big.NewInt(100000)} + tracer, err := New("{addr: toAddress('0000000000000000000000000000000000000009'), res: null, step: function() { this.res = isPrecompiled(this.addr); }, fault: function() {}, result: function() { return this.res; }}", new(Context)) + if err != nil { + t.Fatal(err) + } + res, err := runTrace(tracer, &vmContext{ctx}, chaincfg) + if err != nil { + t.Error(err) + } + if string(res) != "false" { + t.Errorf("Tracer should not consider blake2f as precompile in byzantium") + } + + tracer, _ = New("{addr: toAddress('0000000000000000000000000000000000000009'), res: null, step: function() { this.res = isPrecompiled(this.addr); }, fault: function() {}, result: function() { return this.res; }}", new(Context)) + ctx = vm.Context{BlockNumber: big.NewInt(250), GasPrice: big.NewInt(100000)} + res, err = runTrace(tracer, &vmContext{ctx}, chaincfg) + if err != nil { + t.Error(err) + } + if string(res) != "true" { + t.Errorf("Tracer should consider blake2f as precompile in istanbul") + } +} + +func TestEnterExit(t *testing.T) { + // test that either both or none of enter() and exit() are defined + if _, err := New("{step: function() {}, fault: function() {}, result: function() { return null; }, enter: function() {}}", new(Context)); err == nil { + t.Fatal("tracer creation should've failed without exit() definition") + } + if _, err := New("{step: function() {}, fault: function() {}, result: function() { return null; }, enter: function() {}, exit: function() {}}", new(Context)); err != nil { + t.Fatal(err) + } + + // test that the enter and exit method are correctly invoked and the values passed + tracer, err := New("{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, step: function() {}, fault: function() {}, result: function() { return {enters: this.enters, exits: this.exits, enterGas: this.enterGas, gasUsed: this.gasUsed} }, enter: function(frame) { this.enters++; this.enterGas = frame.getGas(); }, exit: function(res) { this.exits++; this.gasUsed = res.getGasUsed(); }}", new(Context)) + if err != nil { + t.Fatal(err) + } + + scope := &vm.ScopeContext{ + Contract: vm.NewContract(&account{}, &account{}, big.NewInt(0), 0), + } + + tracer.CaptureEnter(vm.CALL, scope.Contract.Caller(), scope.Contract.Address(), []byte{}, 1000, new(big.Int)) + tracer.CaptureExit([]byte{}, 400, nil) + + have, err := tracer.GetResult() + if err != nil { + t.Fatal(err) + } + want := `{"enters":1,"exits":1,"enterGas":1000,"gasUsed":400}` + if string(have) != want { + t.Errorf("Number of invocations of enter() and exit() is wrong. Have %s, want %s\n", have, want) + } +} + // TestRegressionPanicSlice tests that we don't panic on bad arguments to memory access func TestRegressionPanicSlice(t *testing.T) { - tracer, err := New("{depths: [], step: function(log) { this.depths.push(log.memory.slice(-1,-2)); }, fault: function() {}, result: function() { return this.depths; }}") + tracer, err := New("{depths: [], step: function(log) { this.depths.push(log.memory.slice(-1,-2)); }, fault: function() {}, result: function() { return this.depths; }}", new(Context)) if err != nil { t.Fatal(err) } - if _, err = runTrace(tracer); err != nil { + if _, err = runTrace(tracer, testCtx(), params.TestChainConfig); err != nil { t.Fatal(err) } } // TestRegressionPanicSlice tests that we don't panic on bad arguments to stack peeks func TestRegressionPanicPeek(t *testing.T) { - tracer, err := New("{depths: [], step: function(log) { this.depths.push(log.stack.peek(-1)); }, fault: function() {}, result: function() { return this.depths; }}") + tracer, err := New("{depths: [], step: function(log) { this.depths.push(log.stack.peek(-1)); }, fault: function() {}, result: function() { return this.depths; }}", new(Context)) if err != nil { t.Fatal(err) } - if _, err = runTrace(tracer); err != nil { + if _, err = runTrace(tracer, testCtx(), params.TestChainConfig); err != nil { t.Fatal(err) } } // TestRegressionPanicSlice tests that we don't panic on bad arguments to memory getUint func TestRegressionPanicGetUint(t *testing.T) { - tracer, err := New("{ depths: [], step: function(log, db) { this.depths.push(log.memory.getUint(-64));}, fault: function() {}, result: function() { return this.depths; }}") + tracer, err := New("{ depths: [], step: function(log, db) { this.depths.push(log.memory.getUint(-64));}, fault: function() {}, result: function() { return this.depths; }}", new(Context)) if err != nil { t.Fatal(err) } - if _, err = runTrace(tracer); err != nil { + if _, err = runTrace(tracer, testCtx(), params.TestChainConfig); err != nil { t.Fatal(err) } } func TestTracing(t *testing.T) { - tracer, err := New("{count: 0, step: function() { this.count += 1; }, fault: function() {}, result: function() { return this.count; }}") + tracer, err := New("{count: 0, step: function() { this.count += 1; }, fault: function() {}, result: function() { return this.count; }}", new(Context)) if err != nil { t.Fatal(err) } - ret, err := runTrace(tracer) + ret, err := runTrace(tracer, testCtx(), params.TestChainConfig) if err != nil { t.Fatal(err) } @@ -240,12 +310,12 @@ func TestTracing(t *testing.T) { } func TestStack(t *testing.T) { - tracer, err := New("{depths: [], step: function(log) { this.depths.push(log.stack.length()); }, fault: function() {}, result: function() { return this.depths; }}") + tracer, err := New("{depths: [], step: function(log) { this.depths.push(log.stack.length()); }, fault: function() {}, result: function() { return this.depths; }}", new(Context)) if err != nil { t.Fatal(err) } - ret, err := runTrace(tracer) + ret, err := runTrace(tracer, testCtx(), params.TestChainConfig) if err != nil { t.Fatal(err) } @@ -255,12 +325,12 @@ func TestStack(t *testing.T) { } func TestOpcodes(t *testing.T) { - tracer, err := New("{opcodes: [], step: function(log) { this.opcodes.push(log.op.toString()); }, fault: function() {}, result: function() { return this.opcodes; }}") + tracer, err := New("{opcodes: [], step: function(log) { this.opcodes.push(log.op.toString()); }, fault: function() {}, result: function() { return this.opcodes; }}", new(Context)) if err != nil { t.Fatal(err) } - ret, err := runTrace(tracer) + ret, err := runTrace(tracer, testCtx(), params.TestChainConfig) if err != nil { t.Fatal(err) } diff --git a/eth/tracers/tracers.go b/eth/tracers/tracers.go index 5fd8ff49be7c..65469d46df91 100644 --- a/eth/tracers/tracers.go +++ b/eth/tracers/tracers.go @@ -18,14 +18,53 @@ package tracers import ( + "encoding/json" "strings" "unicode" + "github.com/XinFinOrg/XDPoSChain/core/vm" "github.com/XinFinOrg/XDPoSChain/eth/tracers/internal/tracers" ) -// all contains all the built in JavaScript tracers by name. -var all = make(map[string]string) +// Tracer interface extends vm.EVMLogger and additionally +// allows collecting the tracing result. +type Tracer interface { + vm.EVMLogger + GetResult() (json.RawMessage, error) + // Stop terminates execution of the tracer at the first opportune moment. + Stop(err error) +} + +var ( + nativeTracers map[string]func() Tracer = make(map[string]func() Tracer) + jsTracers = make(map[string]string) +) + +// RegisterNativeTracer makes native tracers which adhere +// to the `Tracer` interface available to the rest of the codebase. +// It is typically invoked in the `init()` function, e.g. see the `native/call.go`. +func RegisterNativeTracer(name string, ctor func() Tracer) { + nativeTracers[name] = ctor +} + +// New returns a new instance of a tracer, +// 1. If 'code' is the name of a registered native tracer, then that tracer +// is instantiated and returned +// 2. If 'code' is the name of a registered js-tracer, then that tracer is +// instantiated and returned +// 3. Otherwise, the code is interpreted as the js code of a js-tracer, and +// is evaluated and returned. +func New(code string, ctx *Context) (Tracer, error) { + // Resolve native tracer + if fn, ok := nativeTracers[code]; ok { + return fn(), nil + } + // Resolve js-tracers by name and assemble the tracer object + if tracer, ok := jsTracers[code]; ok { + code = tracer + } + return NewJsTracer(code, ctx) +} // camel converts a snake cased input string into a camel cased output. func camel(str string) string { @@ -40,14 +79,7 @@ func camel(str string) string { func init() { for _, file := range tracers.AssetNames() { name := camel(strings.TrimSuffix(file, ".js")) - all[name] = string(tracers.MustAsset(file)) - } -} + jsTracers[name] = string(tracers.MustAsset(file)) -// tracer retrieves a specific JavaScript tracer by name. -func tracer(name string) (string, bool) { - if tracer, ok := all[name]; ok { - return tracer, true } - return "", false } diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go index 98b8c8e9abf8..74c3ff284539 100644 --- a/eth/tracers/tracers_test.go +++ b/eth/tracers/tracers_test.go @@ -21,22 +21,17 @@ import ( "crypto/rand" "encoding/json" "math/big" - "os" - "path/filepath" "reflect" - "strings" "testing" "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/common/hexutil" - "github.com/XinFinOrg/XDPoSChain/common/math" "github.com/XinFinOrg/XDPoSChain/core" "github.com/XinFinOrg/XDPoSChain/core/rawdb" "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/core/vm" "github.com/XinFinOrg/XDPoSChain/crypto" "github.com/XinFinOrg/XDPoSChain/params" - "github.com/XinFinOrg/XDPoSChain/rlp" "github.com/XinFinOrg/XDPoSChain/tests" ) @@ -104,20 +99,81 @@ type callTrace struct { Calls []callTrace `json:"calls,omitempty"` } -type callContext struct { - Number math.HexOrDecimal64 `json:"number"` - Difficulty *math.HexOrDecimal256 `json:"difficulty"` - Time math.HexOrDecimal64 `json:"timestamp"` - GasLimit math.HexOrDecimal64 `json:"gasLimit"` - Miner common.Address `json:"miner"` -} - -// callTracerTest defines a single test to check the call tracer against. -type callTracerTest struct { - Genesis *core.Genesis `json:"genesis"` - Context *callContext `json:"context"` - Input string `json:"input"` - Result *callTrace `json:"result"` +// TestZeroValueToNotExitCall tests the calltracer(s) on the following: +// Tx to A, A calls B with zero value. B does not already exist. +// Expected: that enter/exit is invoked and the inner call is shown in the result +func TestZeroValueToNotExitCall(t *testing.T) { + var to = common.HexToAddress("0x00000000000000000000000000000000deadbeef") + privkey, err := crypto.HexToECDSA("0000000000000000deadbeef00000000000000000000000000000000deadbeef") + if err != nil { + t.Fatalf("err %v", err) + } + signer := types.NewEIP155Signer(big.NewInt(1)) + tx, err := types.SignNewTx(privkey, signer, &types.LegacyTx{ + GasPrice: big.NewInt(0), + Gas: 50000, + To: &to, + }) + if err != nil { + t.Fatalf("err %v", err) + } + origin, _ := signer.Sender(tx) + context := vm.Context{ + CanTransfer: core.CanTransfer, + Transfer: core.Transfer, + Coinbase: common.Address{}, + BlockNumber: new(big.Int).SetUint64(8000000), + Time: new(big.Int).SetUint64(5), + Difficulty: big.NewInt(0x30000), + GasLimit: uint64(6000000), + Origin: origin, + GasPrice: big.NewInt(1), + } + var code = []byte{ + byte(vm.PUSH1), 0x0, byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1), // in and outs zero + byte(vm.DUP1), byte(vm.PUSH1), 0xff, byte(vm.GAS), // value=0,address=0xff, gas=GAS + byte(vm.CALL), + } + var alloc = core.GenesisAlloc{ + to: core.GenesisAccount{ + Nonce: 1, + Code: code, + }, + origin: core.GenesisAccount{ + Nonce: 0, + Balance: big.NewInt(500000000000000), + }, + } + statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc) + // Create the tracer, the EVM environment and run it + tracer, err := New("callTracer", new(Context)) + if err != nil { + t.Fatalf("failed to create call tracer: %v", err) + } + evm := vm.NewEVM(context, statedb, nil, params.MainnetChainConfig, vm.Config{Debug: true, Tracer: tracer}) + msg, err := tx.AsMessage(signer, nil, nil) + if err != nil { + t.Fatalf("failed to prepare transaction for tracing: %v", err) + } + st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas())) + if _, _, _, err, _ = st.TransitionDb(common.Address{}); err != nil { + t.Fatalf("failed to execute transaction: %v", err) + } + // Retrieve the trace result and compare against the etalon + res, err := tracer.GetResult() + if err != nil { + t.Fatalf("failed to retrieve trace result: %v", err) + } + have := new(callTrace) + if err := json.Unmarshal(res, have); err != nil { + t.Fatalf("failed to unmarshal trace result: %v", err) + } + wantStr := `{"type":"CALL","from":"0x682a80a6f560eec50d54e63cbeda1c324c5f8d1b","to":"0x00000000000000000000000000000000deadbeef","value":"0x0","gas":"0x7148","gasUsed":"0x2d0","input":"0x","output":"0x","calls":[{"type":"CALL","from":"0x00000000000000000000000000000000deadbeef","to":"0x00000000000000000000000000000000000000ff","value":"0x0","gas":"0x6cbf","gasUsed":"0x0","input":"0x","output":"0x"}]}` + want := new(callTrace) + json.Unmarshal([]byte(wantStr), want) + if !jsonEqual(have, want) { + t.Error("have != want") + } } func TestPrestateTracerCreate2(t *testing.T) { @@ -173,7 +229,7 @@ func TestPrestateTracerCreate2(t *testing.T) { statedb := tests.MakePreState(db, alloc) // Create the tracer, the EVM environment and run it - tracer, err := New("prestateTracer") + tracer, err := New("prestateTracer", new(Context)) if err != nil { t.Fatalf("failed to create call tracer: %v", err) } @@ -201,80 +257,97 @@ func TestPrestateTracerCreate2(t *testing.T) { } } -// Iterates over all the input-output datasets in the tracer test harness and -// runs the JavaScript tracers against them. -func TestCallTracer(t *testing.T) { - files, err := os.ReadDir("testdata") - if err != nil { - t.Fatalf("failed to retrieve tracer test suite: %v", err) +// jsonEqual is similar to reflect.DeepEqual, but does a 'bounce' via json prior to +// comparison +func jsonEqual(x, y interface{}) bool { + xTrace := new(callTrace) + yTrace := new(callTrace) + if xj, err := json.Marshal(x); err == nil { + json.Unmarshal(xj, xTrace) + } else { + return false } - for _, file := range files { - if !strings.HasPrefix(file.Name(), "call_tracer_") { - continue - } - file := file // capture range variable - t.Run(camel(strings.TrimSuffix(strings.TrimPrefix(file.Name(), "call_tracer_"), ".json")), func(t *testing.T) { - t.Parallel() - - // Call tracer test found, read if from disk - blob, err := os.ReadFile(filepath.Join("testdata", file.Name())) - if err != nil { - t.Fatalf("failed to read testcase: %v", err) - } - test := new(callTracerTest) - if err := json.Unmarshal(blob, test); err != nil { - t.Fatalf("failed to parse testcase: %v", err) - } - // Configure a blockchain with the given prestate - tx := new(types.Transaction) - if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil { - t.Fatalf("failed to parse testcase input: %v", err) - } - signer := types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number))) - origin, _ := signer.Sender(tx) - - context := vm.Context{ - CanTransfer: core.CanTransfer, - Transfer: core.Transfer, - Origin: origin, - Coinbase: test.Context.Miner, - BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)), - Time: new(big.Int).SetUint64(uint64(test.Context.Time)), - Difficulty: (*big.Int)(test.Context.Difficulty), - GasLimit: uint64(test.Context.GasLimit), - GasPrice: tx.GasPrice(), - } - db := rawdb.NewMemoryDatabase() - statedb := tests.MakePreState(db, test.Genesis.Alloc) - - // Create the tracer, the EVM environment and run it - tracer, err := New("callTracer") - if err != nil { - t.Fatalf("failed to create call tracer: %v", err) - } - evm := vm.NewEVM(context, statedb, nil, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer}) - - msg, err := tx.AsMessage(signer, nil, common.Big0) - if err != nil { - t.Fatalf("failed to prepare transaction for tracing: %v", err) - } - st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas())) - if _, _, _, err, _ = st.TransitionDb(common.Address{}); err != nil { - t.Fatalf("failed to execute transaction: %v", err) - } - // Retrieve the trace result and compare against the etalon - res, err := tracer.GetResult() - if err != nil { - t.Fatalf("failed to retrieve trace result: %v", err) - } - ret := new(callTrace) - if err := json.Unmarshal(res, ret); err != nil { - t.Fatalf("failed to unmarshal trace result: %v", err) - } + if yj, err := json.Marshal(y); err == nil { + json.Unmarshal(yj, yTrace) + } else { + return false + } + return reflect.DeepEqual(xTrace, yTrace) +} - if !reflect.DeepEqual(ret, test.Result) { - t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", ret, test.Result) - } +func BenchmarkTransactionTrace(b *testing.B) { + key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + from := crypto.PubkeyToAddress(key.PublicKey) + gas := uint64(1000000) // 1M gas + to := common.HexToAddress("0x00000000000000000000000000000000deadbeef") + signer := types.LatestSignerForChainID(big.NewInt(1337)) + tx, err := types.SignNewTx(key, signer, + &types.LegacyTx{ + Nonce: 1, + GasPrice: big.NewInt(500), + Gas: gas, + To: &to, }) + if err != nil { + b.Fatal(err) + } + context := vm.Context{ + CanTransfer: core.CanTransfer, + Transfer: core.Transfer, + Coinbase: common.Address{}, + BlockNumber: new(big.Int).SetUint64(uint64(5)), + Time: new(big.Int).SetUint64(uint64(5)), + Difficulty: big.NewInt(0xffffffff), + GasLimit: gas, + // BaseFee: big.NewInt(8), + Origin: from, + GasPrice: tx.GasPrice(), + } + alloc := core.GenesisAlloc{} + // The code pushes 'deadbeef' into memory, then the other params, and calls CREATE2, then returns + // the address + loop := []byte{ + byte(vm.JUMPDEST), // [ count ] + byte(vm.PUSH1), 0, // jumpdestination + byte(vm.JUMP), + } + alloc[common.HexToAddress("0x00000000000000000000000000000000deadbeef")] = core.GenesisAccount{ + Nonce: 1, + Code: loop, + Balance: big.NewInt(1), + } + alloc[from] = core.GenesisAccount{ + Nonce: 1, + Code: []byte{}, + Balance: big.NewInt(500000000000000), + } + statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc) + // Create the tracer, the EVM environment and run it + tracer := vm.NewStructLogger(&vm.LogConfig{ + Debug: false, + //DisableStorage: true, + //EnableMemory: false, + //EnableReturnData: false, + }) + evm := vm.NewEVM(context, statedb, nil, params.AllEthashProtocolChanges, vm.Config{Debug: true, Tracer: tracer}) + msg, err := tx.AsMessage(signer, nil, nil) + if err != nil { + b.Fatalf("failed to prepare transaction for tracing: %v", err) + } + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + snap := statedb.Snapshot() + st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas())) + _, _, _, err, _ = st.TransitionDb(common.Address{}) + if err != nil { + b.Fatal(err) + } + statedb.RevertToSnapshot(snap) + if have, want := len(tracer.StructLogs()), 244752; have != want { + b.Fatalf("trace wrong, want %d steps, have %d", want, have) + } + tracer.Reset() } } diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index 22dc3a0fd852..7e3a43932752 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -105,16 +105,16 @@ func (ec *Client) getBlock(ctx context.Context, method string, args ...interface } // Quick-verify transaction and uncle lists. This mostly helps with debugging the server. if head.UncleHash == types.EmptyUncleHash && len(body.UncleHashes) > 0 { - return nil, fmt.Errorf("server returned non-empty uncle list but block header indicates no uncles") + return nil, errors.New("server returned non-empty uncle list but block header indicates no uncles") } if head.UncleHash != types.EmptyUncleHash && len(body.UncleHashes) == 0 { - return nil, fmt.Errorf("server returned empty uncle list but block header indicates uncles") + return nil, errors.New("server returned empty uncle list but block header indicates uncles") } if head.TxHash == types.EmptyRootHash && len(body.Transactions) > 0 { - return nil, fmt.Errorf("server returned non-empty transaction list but block header indicates no transactions") + return nil, errors.New("server returned non-empty transaction list but block header indicates no transactions") } if head.TxHash != types.EmptyRootHash && len(body.Transactions) == 0 { - return nil, fmt.Errorf("server returned empty transaction list but block header indicates transactions") + return nil, errors.New("server returned empty transaction list but block header indicates transactions") } // Load uncles because they are not included in the block response. var uncles []*types.Header @@ -197,7 +197,7 @@ func (ec *Client) TransactionByHash(ctx context.Context, hash common.Hash) (tx * } else if json == nil { return nil, false, ethereum.NotFound } else if _, r, _ := json.tx.RawSignatureValues(); r == nil { - return nil, false, fmt.Errorf("server returned transaction without signature") + return nil, false, errors.New("server returned transaction without signature") } setSenderFromServer(json.tx, json.From, json.BlockHash) return json.tx, json.BlockNumber == nil, nil @@ -243,7 +243,7 @@ func (ec *Client) TransactionInBlock(ctx context.Context, blockHash common.Hash, if json == nil { return nil, ethereum.NotFound } else if _, r, _ := json.tx.RawSignatureValues(); r == nil { - return nil, fmt.Errorf("server returned transaction without signature") + return nil, errors.New("server returned transaction without signature") } } setSenderFromServer(json.tx, json.From, json.BlockHash) @@ -273,10 +273,15 @@ func (ec *Client) GetTransactionReceiptResult(ctx context.Context, txHash common } return r, result, err } + func toBlockNumArg(number *big.Int) string { if number == nil { return "latest" } + pending := big.NewInt(-1) + if number.Cmp(pending) == 0 { + return "pending" + } return hexutil.EncodeBig(number) } diff --git a/event/feed.go b/event/feed.go index 78fa3d98d8eb..d94bd820f0c5 100644 --- a/event/feed.go +++ b/event/feed.go @@ -39,10 +39,9 @@ type Feed struct { sendCases caseList // the active set of select cases used by Send // The inbox holds newly subscribed channels until they are added to sendCases. - mu sync.Mutex - inbox caseList - etype reflect.Type - closed bool + mu sync.Mutex + inbox caseList + etype reflect.Type } // This is the index of the first actual subscription channel in sendCases. @@ -58,7 +57,8 @@ func (e feedTypeError) Error() string { return "event: wrong type in " + e.op + " got " + e.got.String() + ", want " + e.want.String() } -func (f *Feed) init() { +func (f *Feed) init(etype reflect.Type) { + f.etype = etype f.removeSub = make(chan interface{}) f.sendLock = make(chan struct{}, 1) f.sendLock <- struct{}{} @@ -71,8 +71,6 @@ func (f *Feed) init() { // The channel should have ample buffer space to avoid blocking other subscribers. // Slow subscribers are not dropped. func (f *Feed) Subscribe(channel interface{}) Subscription { - f.once.Do(f.init) - chanval := reflect.ValueOf(channel) chantyp := chanval.Type() if chantyp.Kind() != reflect.Chan || chantyp.ChanDir()&reflect.SendDir == 0 { @@ -80,11 +78,13 @@ func (f *Feed) Subscribe(channel interface{}) Subscription { } sub := &feedSub{feed: f, channel: chanval, err: make(chan error, 1)} - f.mu.Lock() - defer f.mu.Unlock() - if !f.typecheck(chantyp.Elem()) { + f.once.Do(func() { f.init(chantyp.Elem()) }) + if f.etype != chantyp.Elem() { panic(feedTypeError{op: "Subscribe", got: chantyp, want: reflect.ChanOf(reflect.SendDir, f.etype)}) } + + f.mu.Lock() + defer f.mu.Unlock() // Add the select case to the inbox. // The next Send will add it to f.sendCases. cas := reflect.SelectCase{Dir: reflect.SelectSend, Chan: chanval} @@ -92,15 +92,6 @@ func (f *Feed) Subscribe(channel interface{}) Subscription { return sub } -// note: callers must hold f.mu -func (f *Feed) typecheck(typ reflect.Type) bool { - if f.etype == nil { - f.etype = typ - return true - } - return f.etype == typ -} - func (f *Feed) remove(sub *feedSub) { // Delete from inbox first, which covers channels // that have not been added to f.sendCases yet. @@ -129,18 +120,17 @@ func (f *Feed) remove(sub *feedSub) { func (f *Feed) Send(value interface{}) (nsent int) { rvalue := reflect.ValueOf(value) - f.once.Do(f.init) + f.once.Do(func() { f.init(rvalue.Type()) }) + if f.etype != rvalue.Type() { + panic(feedTypeError{op: "Send", got: rvalue.Type(), want: f.etype}) + } + <-f.sendLock // Add new cases from the inbox after taking the send lock. f.mu.Lock() f.sendCases = append(f.sendCases, f.inbox...) f.inbox = nil - - if !f.typecheck(rvalue.Type()) { - f.sendLock <- struct{}{} - panic(feedTypeError{op: "Send", got: rvalue.Type(), want: f.etype}) - } f.mu.Unlock() // Set the sent value on all channels. @@ -148,7 +138,9 @@ func (f *Feed) Send(value interface{}) (nsent int) { f.sendCases[i].Send = rvalue } - // Send until all channels except removeSub have been chosen. + // Send until all channels except removeSub have been chosen. 'cases' tracks a prefix + // of sendCases. When a send succeeds, the corresponding case moves to the end of + // 'cases' and it shrinks by one element. cases := f.sendCases for { // Fast path: try sending without blocking before adding to the select set. @@ -170,6 +162,7 @@ func (f *Feed) Send(value interface{}) (nsent int) { index := f.sendCases.find(recv.Interface()) f.sendCases = f.sendCases.delete(index) if index >= 0 && index < len(cases) { + // Shrink 'cases' too because the removed case was still active. cases = f.sendCases[:len(cases)-1] } } else { diff --git a/event/feed_test.go b/event/feed_test.go index a82c10303362..8713e79bd181 100644 --- a/event/feed_test.go +++ b/event/feed_test.go @@ -17,6 +17,7 @@ package event import ( + "errors" "fmt" "reflect" "sync" @@ -68,7 +69,7 @@ func checkPanic(want error, fn func()) (err error) { defer func() { panic := recover() if panic == nil { - err = fmt.Errorf("didn't panic") + err = errors.New("didn't panic") } else if !reflect.DeepEqual(panic, want) { err = fmt.Errorf("panicked with wrong error: got %q, want %q", panic, want) } @@ -235,6 +236,45 @@ func TestFeedUnsubscribeBlockedPost(t *testing.T) { wg.Wait() } +// Checks that unsubscribing a channel during Send works even if that +// channel has already been sent on. +func TestFeedUnsubscribeSentChan(t *testing.T) { + var ( + feed Feed + ch1 = make(chan int) + ch2 = make(chan int) + sub1 = feed.Subscribe(ch1) + sub2 = feed.Subscribe(ch2) + wg sync.WaitGroup + ) + defer sub2.Unsubscribe() + + wg.Add(1) + go func() { + feed.Send(0) + wg.Done() + }() + + // Wait for the value on ch1. + <-ch1 + // Unsubscribe ch1, removing it from the send cases. + sub1.Unsubscribe() + + // Receive ch2, finishing Send. + <-ch2 + wg.Wait() + + // Send again. This should send to ch2 only, so the wait group will unblock + // as soon as a value is received on ch2. + wg.Add(1) + go func() { + feed.Send(0) + wg.Done() + }() + <-ch2 + wg.Wait() +} + func TestFeedUnsubscribeFromInbox(t *testing.T) { var ( feed Feed diff --git a/go.mod b/go.mod index cbea7de42b43..b7baee7256b0 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( bazil.org/fuse v0.0.0-20180421153158-65cc252bf669 - github.com/VictoriaMetrics/fastcache v1.12.1 + github.com/VictoriaMetrics/fastcache v1.12.2 github.com/aristanetworks/goarista v0.0.0-20231019142648-8c6f0862ab98 github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6 github.com/cespare/cp v1.1.1 @@ -40,7 +40,7 @@ require ( golang.org/x/crypto v0.15.0 golang.org/x/net v0.17.0 golang.org/x/sync v0.4.0 - golang.org/x/sys v0.14.0 + golang.org/x/sys v0.24.0 golang.org/x/tools v0.14.0 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce @@ -52,7 +52,7 @@ require github.com/deckarep/golang-set v1.8.0 require ( github.com/StackExchange/wmi v1.2.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/dlclark/regexp2 v1.10.0 // indirect github.com/dop251/goja v0.0.0-20200106141417-aaec0e7bde29 // indirect github.com/elastic/gosigar v0.8.1-0.20180330100440-37f05ff46ffa // indirect diff --git a/go.sum b/go.sum index 81851047d213..98d3a63aed14 100644 --- a/go.sum +++ b/go.sum @@ -22,6 +22,8 @@ github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9 github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE= github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= +github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= +github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= @@ -41,6 +43,8 @@ github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic= github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -124,6 +128,7 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/pprof v0.0.0-20230207041349-798e818bf904 h1:4/hN5RUoecvl+RmJRE2YxKWtnnQls6rQjjW5oV7qg2U= github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -330,6 +335,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8= diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 50c614a98a6e..62e0f101ece4 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -358,9 +358,9 @@ func (s *PrivateAccountAPI) LockAccount(addr common.Address) bool { // signTransactions sets defaults and signs the given transaction // NOTE: the caller needs to ensure that the nonceLock is held, if applicable, // and release it after the transaction has been submitted to the tx pool -func (s *PrivateAccountAPI) signTransaction(ctx context.Context, args SendTxArgs, passwd string) (*types.Transaction, error) { +func (s *PrivateAccountAPI) signTransaction(ctx context.Context, args *TransactionArgs, passwd string) (*types.Transaction, error) { // Look up the wallet containing the requested signer - account := accounts.Account{Address: args.From} + account := accounts.Account{Address: args.from()} wallet, err := s.am.Find(account) if err != nil { return nil, err @@ -380,17 +380,18 @@ func (s *PrivateAccountAPI) signTransaction(ctx context.Context, args SendTxArgs } // SendTransaction will create a transaction from the given arguments and -// tries to sign it with the key associated with args.To. If the given passwd isn't -// able to decrypt the key it fails. -func (s *PrivateAccountAPI) SendTransaction(ctx context.Context, args SendTxArgs, passwd string) (common.Hash, error) { +// tries to sign it with the key associated with args.From. If the given +// passwd isn't able to decrypt the key it fails. +func (s *PrivateAccountAPI) SendTransaction(ctx context.Context, args TransactionArgs, passwd string) (common.Hash, error) { if args.Nonce == nil { // Hold the addresse's mutex around signing to prevent concurrent assignment of // the same nonce to multiple accounts. - s.nonceLock.LockAddr(args.From) - defer s.nonceLock.UnlockAddr(args.From) + s.nonceLock.LockAddr(args.from()) + defer s.nonceLock.UnlockAddr(args.from()) } - signed, err := s.signTransaction(ctx, args, passwd) + signed, err := s.signTransaction(ctx, &args, passwd) if err != nil { + log.Warn("Failed transaction send attempt", "from", args.from(), "to", args.To, "value", args.Value.ToInt(), "err", err) return common.Hash{}, err } return SubmitTransaction(ctx, s.b, signed) @@ -400,21 +401,28 @@ func (s *PrivateAccountAPI) SendTransaction(ctx context.Context, args SendTxArgs // tries to sign it with the key associated with args.To. If the given passwd isn't // able to decrypt the key it fails. The transaction is returned in RLP-form, not broadcast // to other nodes -func (s *PrivateAccountAPI) SignTransaction(ctx context.Context, args SendTxArgs, passwd string) (*SignTransactionResult, error) { +func (s *PrivateAccountAPI) SignTransaction(ctx context.Context, args TransactionArgs, passwd string) (*SignTransactionResult, error) { // No need to obtain the noncelock mutex, since we won't be sending this // tx into the transaction pool, but right back to the user + if args.From == nil { + return nil, fmt.Errorf("sender not specified") + } if args.Gas == nil { - return nil, fmt.Errorf("gas not specified") + return nil, errors.New("gas not specified") } if args.GasPrice == nil { - return nil, fmt.Errorf("gasPrice not specified") + return nil, errors.New("gasPrice not specified") } if args.Nonce == nil { - return nil, fmt.Errorf("nonce not specified") + return nil, errors.New("nonce not specified") + } + // Before actually sign the transaction, ensure the transaction fee is reasonable. + if err := checkTxFee(args.GasPrice.ToInt(), uint64(*args.Gas), s.b.RPCTxFeeCap()); err != nil { + return nil, err } - signed, err := s.signTransaction(ctx, args, passwd) + signed, err := s.signTransaction(ctx, &args, passwd) if err != nil { - log.Warn("Failed transaction sign attempt", "from", args.From, "to", args.To, "value", args.Value.ToInt(), "err", err) + log.Warn("Failed transaction sign attempt", "from", args.from(), "to", args.To, "value", args.Value.ToInt(), "err", err) return nil, err } data, err := signed.MarshalBinary() @@ -478,7 +486,7 @@ func (s *PrivateAccountAPI) EcRecover(ctx context.Context, data, sig hexutil.Byt return common.Address{}, fmt.Errorf("signature must be %d bytes long", crypto.SignatureLength) } if sig[crypto.RecoveryIDOffset] != 27 && sig[crypto.RecoveryIDOffset] != 28 { - return common.Address{}, fmt.Errorf("invalid Ethereum signature (V is not 27 or 28)") + return common.Address{}, errors.New("invalid Ethereum signature (V is not 27 or 28)") } sig[crypto.RecoveryIDOffset] -= 27 // Transform yellow paper V from 27/28 to 0/1 @@ -491,7 +499,7 @@ func (s *PrivateAccountAPI) EcRecover(ctx context.Context, data, sig hexutil.Byt // SignAndSendTransaction was renamed to SendTransaction. This method is deprecated // and will be removed in the future. It primary goal is to give clients time to update. -func (s *PrivateAccountAPI) SignAndSendTransaction(ctx context.Context, args SendTxArgs, passwd string) (common.Hash, error) { +func (s *PrivateAccountAPI) SignAndSendTransaction(ctx context.Context, args TransactionArgs, passwd string) (common.Hash, error) { return s.SendTransaction(ctx, args, passwd) } @@ -893,6 +901,10 @@ func (s *PublicBlockChainAPI) GetCandidateStatus(ctx context.Context, coinbaseAd result[fieldSuccess] = false return result, err } + if statedb == nil { + result[fieldSuccess] = false + return result, errors.New("nil statedb in GetCandidateStatus") + } candidatesAddresses := state.GetCandidates(statedb) candidates = make([]utils.Masternode, 0, len(candidatesAddresses)) for _, address := range candidatesAddresses { @@ -915,7 +927,7 @@ func (s *PublicBlockChainAPI) GetCandidateStatus(ctx context.Context, coinbaseAd } maxMasternodes = s.b.ChainConfig().XDPoS.V2.Config(uint64(round)).MaxMasternodes } else { - return result, fmt.Errorf("undefined XDPoS consensus engine") + return result, errors.New("undefined XDPoS consensus engine") } } else if s.b.ChainConfig().IsTIPIncreaseMasternodes(block.Number()) { maxMasternodes = common.MaxMasternodesV2 @@ -1048,6 +1060,10 @@ func (s *PublicBlockChainAPI) GetCandidates(ctx context.Context, epoch rpc.Epoch result[fieldSuccess] = false return result, err } + if statedb == nil { + result[fieldSuccess] = false + return result, errors.New("nil statedb in GetCandidates") + } candidatesAddresses := state.GetCandidates(statedb) candidates = make([]utils.Masternode, 0, len(candidatesAddresses)) for _, address := range candidatesAddresses { @@ -1106,7 +1122,7 @@ func (s *PublicBlockChainAPI) GetCandidates(ctx context.Context, epoch rpc.Epoch } maxMasternodes = s.b.ChainConfig().XDPoS.V2.Config(uint64(round)).MaxMasternodes } else { - return result, fmt.Errorf("undefined XDPoS consensus engine") + return result, errors.New("undefined XDPoS consensus engine") } } else if s.b.ChainConfig().IsTIPIncreaseMasternodes(block.Number()) { maxMasternodes = common.MaxMasternodesV2 @@ -1197,7 +1213,7 @@ func (s *PublicBlockChainAPI) getCandidatesFromSmartContract() ([]utils.Masterno return []utils.Masternode{}, err } - addr := common.HexToAddress(common.MasternodeVotingSMC) + addr := common.MasternodeVotingSMCBinary validator, err := contractValidator.NewXDCValidator(addr, client) if err != nil { return []utils.Masternode{}, err @@ -1225,87 +1241,22 @@ func (s *PublicBlockChainAPI) getCandidatesFromSmartContract() ([]utils.Masterno return candidatesWithStakeInfo, nil } -// CallArgs represents the arguments for a call. -type CallArgs struct { - From *common.Address `json:"from"` - To *common.Address `json:"to"` - Gas *hexutil.Uint64 `json:"gas"` - GasPrice *hexutil.Big `json:"gasPrice"` - Value *hexutil.Big `json:"value"` - Data *hexutil.Bytes `json:"data"` - AccessList *types.AccessList `json:"accessList"` -} - -// ToMessage converts CallArgs to the Message type used by the core evm -// TODO: set balanceTokenFee -func (args *CallArgs) ToMessage(b Backend, number *big.Int, globalGasCap uint64) types.Message { - // Set sender address or use a default if none specified - var addr common.Address - if args.From == nil || *args.From == (common.Address{}) { - if wallets := b.AccountManager().Wallets(); len(wallets) > 0 { - if accounts := wallets[0].Accounts(); len(accounts) > 0 { - addr = accounts[0].Address - } - } - } else { - addr = *args.From - } - - // Set default gas & gas price if none were set - gas := globalGasCap - if gas == 0 { - gas = uint64(math.MaxUint64 / 2) - } - if args.Gas != nil { - gas = uint64(*args.Gas) - } - if globalGasCap != 0 && globalGasCap < gas { - log.Warn("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap) - gas = globalGasCap - } - gasPrice := new(big.Int) - if args.GasPrice != nil { - gasPrice = args.GasPrice.ToInt() - } - if gasPrice.Sign() <= 0 { - gasPrice = new(big.Int).SetUint64(defaultGasPrice) - } - - value := new(big.Int) - if args.Value != nil { - value = args.Value.ToInt() - } - - var data []byte - if args.Data != nil { - data = *args.Data - } - - var accessList types.AccessList - if args.AccessList != nil { - accessList = *args.AccessList - } - - balanceTokenFee := big.NewInt(0).SetUint64(gas) - balanceTokenFee = balanceTokenFee.Mul(balanceTokenFee, gasPrice) - - // Create new call message - msg := types.NewMessage(addr, args.To, 0, value, gas, gasPrice, data, accessList, false, balanceTokenFee, number) - return msg -} - -func DoCall(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, vmCfg vm.Config, timeout time.Duration, globalGasCap uint64) ([]byte, uint64, bool, error, error) { +func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, vmCfg vm.Config, timeout time.Duration, globalGasCap uint64) ([]byte, uint64, bool, error, error) { defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now()) statedb, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) if statedb == nil || err != nil { return nil, 0, false, err, nil } + if header == nil { + return nil, 0, false, errors.New("nil header in DoCall"), nil + } if err := overrides.Apply(statedb); err != nil { return nil, 0, false, err, nil } msg := args.ToMessage(b, header.Number, globalGasCap) + msg.SetBalanceTokenFeeForCall() // Setup context so it may be cancelled the call has completed // or, in case of unmetered gas, setup a context with a timeout. @@ -1323,6 +1274,9 @@ func DoCall(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.Blo if err != nil { return nil, 0, false, err, nil } + if block == nil { + return nil, 0, false, fmt.Errorf("nil block in DoCall: number=%d, hash=%s", header.Number.Uint64(), header.Hash().Hex()), nil + } author, err := b.GetEngine().Author(block.Header()) if err != nil { return nil, 0, false, err, nil @@ -1394,14 +1348,13 @@ func (e *revertError) ErrorData() interface{} { // Call executes the given transaction on the state for the given block number. // It doesn't make and changes in the state/blockchain and is useful to execute and retrieve values. -func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNrOrHash *rpc.BlockNumberOrHash, overrides *StateOverride) (hexutil.Bytes, error) { +func (s *PublicBlockChainAPI) Call(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash, overrides *StateOverride) (hexutil.Bytes, error) { if blockNrOrHash == nil { latest := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber) blockNrOrHash = &latest } timeout := 5 * time.Second - MasternodeVotingSMCBinary := common.Address{19: 0x88} // xdc0000000000000000000000000000000000000088 - if args.To != nil && *args.To == MasternodeVotingSMCBinary { + if args.To != nil && *args.To == common.MasternodeVotingSMCBinary { timeout = 0 } result, _, failed, err, vmErr := DoCall(ctx, s.b, args, *blockNrOrHash, overrides, vm.Config{}, timeout, s.b.RPCGasCap()) @@ -1416,7 +1369,7 @@ func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNrOr return (hexutil.Bytes)(result), vmErr } -func DoEstimateGas(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, gasCap uint64) (hexutil.Uint64, error) { +func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, gasCap uint64) (hexutil.Uint64, error) { // Retrieve the base state and mutate it with any overrides state, _, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) if state == nil || err != nil { @@ -1530,7 +1483,7 @@ func DoEstimateGas(ctx context.Context, b Backend, args CallArgs, blockNrOrHash // EstimateGas returns an estimate of the amount of gas needed to execute the // given transaction against the current pending block. -func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs, blockNrOrHash *rpc.BlockNumberOrHash, overrides *StateOverride) (hexutil.Uint64, error) { +func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash, overrides *StateOverride) (hexutil.Uint64, error) { bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber) if blockNrOrHash != nil { bNrOrHash = *blockNrOrHash @@ -1577,7 +1530,7 @@ func FormatLogs(logs []vm.StructLog) []StructLogRes { if trace.Stack != nil { stack := make([]string, len(trace.Stack)) for i, stackValue := range trace.Stack { - stack[i] = fmt.Sprintf("%x", math.PaddedBigBytes(stackValue, 32)) + stack[i] = stackValue.Hex() } formatted[index].Stack = &stack } @@ -1916,7 +1869,7 @@ type accessListResult struct { // CreateAccessList creates a EIP-2930 type AccessList for the given transaction. // Reexec and BlockNrOrHash can be specified to create the accessList on top of a certain state. -func (s *PublicBlockChainAPI) CreateAccessList(ctx context.Context, args SendTxArgs, blockNrOrHash *rpc.BlockNumberOrHash) (*accessListResult, error) { +func (s *PublicBlockChainAPI) CreateAccessList(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash) (*accessListResult, error) { bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) if blockNrOrHash != nil { bNrOrHash = *blockNrOrHash @@ -1935,7 +1888,7 @@ func (s *PublicBlockChainAPI) CreateAccessList(ctx context.Context, args SendTxA // AccessList creates an access list for the given transaction. // If the accesslist creation fails an error is returned. // If the transaction itself fails, an vmErr is returned. -func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrHash, args SendTxArgs) (acl types.AccessList, gasUsed uint64, vmErr error, err error) { +func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrHash, args TransactionArgs) (acl types.AccessList, gasUsed uint64, vmErr error, err error) { // Retrieve the execution context db, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) if db == nil || err != nil { @@ -1945,6 +1898,9 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH if err != nil { return nil, 0, nil, err } + if block == nil { + return nil, 0, nil, fmt.Errorf("nil block in AccessList: number=%d, hash=%s", header.Number.Uint64(), header.Hash().Hex()) + } author, err := b.GetEngine().Author(block.Header()) if err != nil { return nil, 0, nil, err @@ -1967,21 +1923,15 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH if args.To != nil { to = *args.To } else { - to = crypto.CreateAddress(args.From, uint64(*args.Nonce)) - } - var input []byte - if args.Input != nil { - input = *args.Input - } else if args.Data != nil { - input = *args.Data + to = crypto.CreateAddress(args.from(), uint64(*args.Nonce)) } // Retrieve the precompiles since they don't need to be added to the access list precompiles := vm.ActivePrecompiles(b.ChainConfig().Rules(header.Number)) // Create an initial tracer - prevTracer := vm.NewAccessListTracer(nil, args.From, to, precompiles) + prevTracer := vm.NewAccessListTracer(nil, args.from(), to, precompiles) if args.AccessList != nil { - prevTracer = vm.NewAccessListTracer(*args.AccessList, args.From, to, precompiles) + prevTracer = vm.NewAccessListTracer(*args.AccessList, args.from(), to, precompiles) } for { // Retrieve the current access list to expand @@ -2004,10 +1954,10 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH if value, ok := feeCapacity[to]; ok { balanceTokenFee = value } - msg := types.NewMessage(args.From, args.To, uint64(*args.Nonce), args.Value.ToInt(), uint64(*args.Gas), args.GasPrice.ToInt(), input, accessList, false, balanceTokenFee, header.Number) + msg := types.NewMessage(args.from(), args.To, uint64(*args.Nonce), args.Value.ToInt(), uint64(*args.Gas), args.GasPrice.ToInt(), args.data(), accessList, false, balanceTokenFee, header.Number) // Apply the transaction with the access list tracer - tracer := vm.NewAccessListTracer(accessList, args.From, to, precompiles) + tracer := vm.NewAccessListTracer(accessList, args.from(), to, precompiles) config := vm.Config{Tracer: tracer, Debug: true} vmenv, _, err := b.GetEVM(ctx, msg, statedb, XDCxState, header, &config) if err != nil { @@ -2254,129 +2204,17 @@ func (s *PublicTransactionPoolAPI) sign(addr common.Address, tx *types.Transacti return wallet.SignTx(account, tx, chainID) } -// SendTxArgs represents the arguments to sumbit a new transaction into the transaction pool. -type SendTxArgs struct { - From common.Address `json:"from"` - To *common.Address `json:"to"` - Gas *hexutil.Uint64 `json:"gas"` - GasPrice *hexutil.Big `json:"gasPrice"` - Value *hexutil.Big `json:"value"` - Nonce *hexutil.Uint64 `json:"nonce"` - // We accept "data" and "input" for backwards-compatibility reasons. "input" is the - // newer name and should be preferred by clients. - Data *hexutil.Bytes `json:"data"` - Input *hexutil.Bytes `json:"input"` - - // For non-legacy transactions - AccessList *types.AccessList `json:"accessList,omitempty"` - ChainID *hexutil.Big `json:"chainId,omitempty"` -} - -// setDefaults fills in default values for unspecified tx fields. -func (args *SendTxArgs) setDefaults(ctx context.Context, b Backend) error { - if args.GasPrice == nil { - price, err := b.SuggestPrice(ctx) - if err != nil { - return err - } - args.GasPrice = (*hexutil.Big)(price) - } - if args.Value == nil { - args.Value = new(hexutil.Big) - } - if args.Nonce == nil { - nonce, err := b.GetPoolNonce(ctx, args.From) - if err != nil { - return err - } - args.Nonce = (*hexutil.Uint64)(&nonce) - } - if args.Data != nil && args.Input != nil && !bytes.Equal(*args.Data, *args.Input) { - return errors.New(`Both "data" and "input" are set and not equal. Please use "input" to pass transaction call data.`) - } - if args.To == nil { - // Contract creation - var input []byte - if args.Data != nil { - input = *args.Data - } else if args.Input != nil { - input = *args.Input - } - if len(input) == 0 { - return errors.New(`contract creation without any data provided`) - } - } - // Estimate the gas usage if necessary. - if args.Gas == nil { - // For backwards-compatibility reason, we try both input and data - // but input is preferred. - input := args.Input - if input == nil { - input = args.Data - } - callArgs := CallArgs{ - From: &args.From, // From shouldn't be nil - To: args.To, - GasPrice: args.GasPrice, - Value: args.Value, - Data: input, - AccessList: args.AccessList, - } - pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) - estimated, err := DoEstimateGas(ctx, b, callArgs, pendingBlockNr, nil, b.RPCGasCap()) - if err != nil { - return err - } - args.Gas = &estimated - log.Trace("Estimate gas usage automatically", "gas", args.Gas) - - } - if args.ChainID == nil { - id := (*hexutil.Big)(b.ChainConfig().ChainId) - args.ChainID = id - } - return nil -} - -// toTransaction converts the arguments to a transaction. -// This assumes that setDefaults has been called. -func (args *SendTxArgs) toTransaction() *types.Transaction { - var input []byte - if args.Input != nil { - input = *args.Input - } else if args.Data != nil { - input = *args.Data - } - var data types.TxData - if args.AccessList == nil { - data = &types.LegacyTx{ - To: args.To, - Nonce: uint64(*args.Nonce), - Gas: uint64(*args.Gas), - GasPrice: (*big.Int)(args.GasPrice), - Value: (*big.Int)(args.Value), - Data: input, - } - } else { - data = &types.AccessListTx{ - To: args.To, - ChainID: (*big.Int)(args.ChainID), - Nonce: uint64(*args.Nonce), - Gas: uint64(*args.Gas), - GasPrice: (*big.Int)(args.GasPrice), - Value: (*big.Int)(args.Value), - Data: input, - AccessList: *args.AccessList, - } - } - return types.NewTx(data) -} - // SubmitTransaction is a helper function that submits tx to txPool and logs a message. func SubmitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (common.Hash, error) { if tx.To() != nil && tx.IsSpecialTransaction() { return common.Hash{}, errors.New("Dont allow transaction sent to BlockSigners & RandomizeSMC smart contract via API") } + + // If the transaction fee cap is already specified, ensure the + // fee of the given transaction is _reasonable_. + if err := checkTxFee(tx.GasPrice(), tx.Gas(), b.RPCTxFeeCap()); err != nil { + return common.Hash{}, err + } if err := b.SendTx(ctx, tx); err != nil { return common.Hash{}, err } @@ -2417,10 +2255,10 @@ func submitLendingTransaction(ctx context.Context, b Backend, tx *types.LendingT // SendTransaction creates a transaction for the given argument, sign it and submit it to the // transaction pool. -func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args SendTxArgs) (common.Hash, error) { +func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args TransactionArgs) (common.Hash, error) { // Look up the wallet containing the requested signer - account := accounts.Account{Address: args.From} + account := accounts.Account{Address: args.from()} wallet, err := s.b.AccountManager().Find(account) if err != nil { @@ -2430,8 +2268,8 @@ func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args Sen if args.Nonce == nil { // Hold the addresse's mutex around signing to prevent concurrent assignment of // the same nonce to multiple accounts. - s.nonceLock.LockAddr(args.From) - defer s.nonceLock.UnlockAddr(args.From) + s.nonceLock.LockAddr(args.from()) + defer s.nonceLock.UnlockAddr(args.from()) } // Set some sanity defaults and terminate on failure @@ -2454,7 +2292,7 @@ func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args Sen // FillTransaction fills the defaults (nonce, gas, gasPrice) on a given unsigned transaction, // and returns it to the caller for further processing (signing + broadcast) -func (s *PublicTransactionPoolAPI) FillTransaction(ctx context.Context, args SendTxArgs) (*SignTransactionResult, error) { +func (s *PublicTransactionPoolAPI) FillTransaction(ctx context.Context, args TransactionArgs) (*SignTransactionResult, error) { // Set some sanity defaults and terminate on failure if err := args.setDefaults(ctx, s.b); err != nil { return nil, err @@ -3351,20 +3189,24 @@ type SignTransactionResult struct { // SignTransaction will sign the given transaction with the from account. // The node needs to have the private key of the account corresponding with // the given from address and it needs to be unlocked. -func (s *PublicTransactionPoolAPI) SignTransaction(ctx context.Context, args SendTxArgs) (*SignTransactionResult, error) { +func (s *PublicTransactionPoolAPI) SignTransaction(ctx context.Context, args TransactionArgs) (*SignTransactionResult, error) { if args.Gas == nil { - return nil, fmt.Errorf("gas not specified") + return nil, errors.New("gas not specified") } if args.GasPrice == nil { - return nil, fmt.Errorf("gasPrice not specified") + return nil, errors.New("gasPrice not specified") } if args.Nonce == nil { - return nil, fmt.Errorf("nonce not specified") + return nil, errors.New("nonce not specified") } if err := args.setDefaults(ctx, s.b); err != nil { return nil, err } - tx, err := s.sign(args.From, args.toTransaction()) + // Before actually sign the transaction, ensure the transaction fee is reasonable. + if err := checkTxFee(args.GasPrice.ToInt(), uint64(*args.Gas), s.b.RPCTxFeeCap()); err != nil { + return nil, err + } + tx, err := s.sign(args.from(), args.toTransaction()) if err != nil { return nil, err } @@ -3400,15 +3242,28 @@ func (s *PublicTransactionPoolAPI) PendingTransactions() ([]*RPCTransaction, err // Resend accepts an existing transaction and a new gas price and limit. It will remove // the given transaction from the pool and reinsert it with the new gas price and limit. -func (s *PublicTransactionPoolAPI) Resend(ctx context.Context, sendArgs SendTxArgs, gasPrice *hexutil.Big, gasLimit *hexutil.Uint64) (common.Hash, error) { +func (s *PublicTransactionPoolAPI) Resend(ctx context.Context, sendArgs TransactionArgs, gasPrice *hexutil.Big, gasLimit *hexutil.Uint64) (common.Hash, error) { if sendArgs.Nonce == nil { - return common.Hash{}, fmt.Errorf("missing transaction nonce in transaction spec") + return common.Hash{}, errors.New("missing transaction nonce in transaction spec") } if err := sendArgs.setDefaults(ctx, s.b); err != nil { return common.Hash{}, err } matchTx := sendArgs.toTransaction() + // Before replacing the old transaction, ensure the _new_ transaction fee is reasonable. + var price = matchTx.GasPrice() + if gasPrice != nil { + price = gasPrice.ToInt() + } + var gas = matchTx.Gas() + if gasLimit != nil { + gas = uint64(*gasLimit) + } + if err := checkTxFee(price, gas, s.b.RPCTxFeeCap()); err != nil { + return common.Hash{}, err + } + // Iterate the pending list for replacement pending, err := s.b.GetPoolTransactions() if err != nil { @@ -3417,7 +3272,7 @@ func (s *PublicTransactionPoolAPI) Resend(ctx context.Context, sendArgs SendTxAr for _, p := range pending { wantSigHash := s.signer.Hash(matchTx) pFrom, err := types.Sender(s.signer, p) - if err == nil && pFrom == sendArgs.From && s.signer.Hash(p) == wantSigHash { + if err == nil && pFrom == sendArgs.from() && s.signer.Hash(p) == wantSigHash { // Match. Re-sign and send the transaction. if gasPrice != nil && (*big.Int)(gasPrice).Sign() != 0 { sendArgs.GasPrice = gasPrice @@ -3425,7 +3280,7 @@ func (s *PublicTransactionPoolAPI) Resend(ctx context.Context, sendArgs SendTxAr if gasLimit != nil && *gasLimit != 0 { sendArgs.Gas = gasLimit } - signedTx, err := s.sign(sendArgs.From, sendArgs.toTransaction()) + signedTx, err := s.sign(sendArgs.from(), sendArgs.toTransaction()) if err != nil { return common.Hash{}, err } @@ -3499,7 +3354,7 @@ func (api *PrivateDebugAPI) ChaindbProperty(property string) (string, error) { LDB() *leveldb.DB }) if !ok { - return "", fmt.Errorf("chaindbProperty does not work for memory databases") + return "", errors.New("chaindbProperty does not work for memory databases") } if property == "" { property = "leveldb.stats" @@ -3514,7 +3369,7 @@ func (api *PrivateDebugAPI) ChaindbCompact() error { LDB() *leveldb.DB }) if !ok { - return fmt.Errorf("chaindbCompact does not work for memory databases") + return errors.New("chaindbCompact does not work for memory databases") } for b := byte(0); b < 255; b++ { log.Info("Compacting chain database", "range", fmt.Sprintf("0x%0.2X-0x%0.2X", b, b+1)) @@ -3558,6 +3413,21 @@ func (s *PublicNetAPI) Version() string { return fmt.Sprintf("%d", s.networkVersion) } +// checkTxFee is an internal function used to check whether the fee of +// the given transaction is _reasonable_(under the cap). +func checkTxFee(gasPrice *big.Int, gas uint64, cap float64) error { + // Short circuit if there is no cap for transaction fee at all. + if cap == 0 { + return nil + } + feeEth := new(big.Float).Quo(new(big.Float).SetInt(new(big.Int).Mul(gasPrice, new(big.Int).SetUint64(gas))), new(big.Float).SetInt(big.NewInt(params.Ether))) + feeFloat, _ := feeEth.Float64() + if feeFloat > cap { + return fmt.Errorf("tx fee (%.2f ether) exceeds the configured cap (%.2f ether)", feeFloat, cap) + } + return nil +} + func GetSignersFromBlocks(b Backend, blockNumber uint64, blockHash common.Hash, masternodes []common.Address) ([]common.Address, error) { var addrs []common.Address mapMN := map[common.Address]bool{} @@ -3576,10 +3446,16 @@ func GetSignersFromBlocks(b Backend, blockNumber uint64, blockHash common.Hash, if err != nil { return addrs, err } + if header == nil { + return addrs, errors.New("nil header in GetSignersFromBlocks") + } blockData, err := b.BlockByNumber(nil, rpc.BlockNumber(i)) if err != nil { return addrs, err } + if blockData == nil { + return addrs, errors.New("nil blockData in GetSignersFromBlocks") + } signTxs := engine.CacheSigningTxs(header.Hash(), blockData.Transactions()) for _, signtx := range signTxs { blkHash := common.BytesToHash(signtx.Data()[len(signtx.Data())-32:]) @@ -3658,18 +3534,3 @@ func (s *PublicBlockChainAPI) GetStakerROIMasternode(masternode common.Address) return 100.0 / float64(totalCap.Div(totalCap, voterRewardAYear).Uint64()) } - -// checkTxFee is an internal function used to check whether the fee of -// the given transaction is _reasonable_(under the cap). -func checkTxFee(gasPrice *big.Int, gas uint64, cap float64) error { - // Short circuit if there is no cap for transaction fee at all. - if cap == 0 { - return nil - } - feeEth := new(big.Float).Quo(new(big.Float).SetInt(new(big.Int).Mul(gasPrice, new(big.Int).SetUint64(gas))), new(big.Float).SetInt(big.NewInt(params.Ether))) - feeFloat, _ := feeEth.Float64() - if feeFloat > cap { - return fmt.Errorf("tx fee (%.2f ether) exceeds the configured cap (%.2f ether)", feeFloat, cap) - } - return nil -} diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index 00a9aea8acf9..a365e5278020 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -35,6 +35,7 @@ import ( "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/core/vm" "github.com/XinFinOrg/XDPoSChain/eth/downloader" + "github.com/XinFinOrg/XDPoSChain/eth/filters" "github.com/XinFinOrg/XDPoSChain/ethdb" "github.com/XinFinOrg/XDPoSChain/event" "github.com/XinFinOrg/XDPoSChain/params" @@ -49,9 +50,9 @@ type Backend interface { ProtocolVersion() int SuggestPrice(ctx context.Context) (*big.Int, error) ChainDb() ethdb.Database - EventMux() *event.TypeMux AccountManager() *accounts.Manager - RPCGasCap() uint64 // global gas cap for eth_call over rpc: DoS protection + RPCGasCap() uint64 // global gas cap for eth_call over rpc: DoS protection + RPCTxFeeCap() float64 // global tx fee cap for all transaction related APIs XDCxService() *XDCx.XDCX LendingService() *XDCxlending.Lending @@ -66,6 +67,7 @@ type Backend interface { StateAndHeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*state.StateDB, *types.Header, error) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) GetBlock(ctx context.Context, blockHash common.Hash) (*types.Block, error) + PendingBlockAndReceipts() (*types.Block, types.Receipts) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) GetTd(blockHash common.Hash) *big.Int GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, XDCxState *tradingstate.TradingStateDB, header *types.Header, vmConfig *vm.Config) (*vm.EVM, func() error, error) @@ -87,6 +89,7 @@ type Backend interface { OrderTxPoolContent() (map[common.Address]types.OrderTransactions, map[common.Address]types.OrderTransactions) OrderStats() (pending int, queued int) SendLendingTx(ctx context.Context, signedTx *types.LendingTransaction) error + SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription ChainConfig() *params.ChainConfig CurrentBlock() *types.Block @@ -101,6 +104,11 @@ type Backend interface { GetBlocksHashCache(blockNr uint64) []common.Hash AreTwoBlockSamePath(newBlock common.Hash, oldBlock common.Hash) bool GetOrderNonce(address common.Hash) (uint64, error) + + // eth/filters needs to be initialized from this backend type, so methods needed by + // it must also be included here. + GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) + filters.Backend } func GetAPIs(apiBackend Backend, chainReader consensus.ChainReader) []rpc.API { diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go new file mode 100644 index 000000000000..48374f8fa9d8 --- /dev/null +++ b/internal/ethapi/transaction_args.go @@ -0,0 +1,198 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethapi + +import ( + "bytes" + "context" + "errors" + "math/big" + + "github.com/XinFinOrg/XDPoSChain/common" + "github.com/XinFinOrg/XDPoSChain/common/hexutil" + "github.com/XinFinOrg/XDPoSChain/common/math" + "github.com/XinFinOrg/XDPoSChain/core/types" + "github.com/XinFinOrg/XDPoSChain/log" + "github.com/XinFinOrg/XDPoSChain/rpc" +) + +// TransactionArgs represents the arguments to construct a new transaction +// or a message call. +type TransactionArgs struct { + From *common.Address `json:"from"` + To *common.Address `json:"to"` + Gas *hexutil.Uint64 `json:"gas"` + GasPrice *hexutil.Big `json:"gasPrice"` + Value *hexutil.Big `json:"value"` + Nonce *hexutil.Uint64 `json:"nonce"` + + // We accept "data" and "input" for backwards-compatibility reasons. + // "input" is the newer name and should be preferred by clients. + // Issue detail: https://github.com/ethereum/go-ethereum/issues/15628 + Data *hexutil.Bytes `json:"data"` + Input *hexutil.Bytes `json:"input"` + + // For non-legacy transactions + AccessList *types.AccessList `json:"accessList,omitempty"` + ChainID *hexutil.Big `json:"chainId,omitempty"` +} + +// from retrieves the transaction sender address. +func (arg *TransactionArgs) from() common.Address { + if arg.From == nil { + return common.Address{} + } + return *arg.From +} + +// data retrieves the transaction calldata. Input field is preferred. +func (arg *TransactionArgs) data() []byte { + if arg.Input != nil { + return *arg.Input + } + if arg.Data != nil { + return *arg.Data + } + return nil +} + +// setDefaults fills in default values for unspecified tx fields. +func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error { + if args.GasPrice == nil { + price, err := b.SuggestPrice(ctx) + if err != nil { + return err + } + args.GasPrice = (*hexutil.Big)(price) + } + if args.Value == nil { + args.Value = new(hexutil.Big) + } + if args.Nonce == nil { + nonce, err := b.GetPoolNonce(ctx, args.from()) + if err != nil { + return err + } + args.Nonce = (*hexutil.Uint64)(&nonce) + } + if args.Data != nil && args.Input != nil && !bytes.Equal(*args.Data, *args.Input) { + return errors.New(`both "data" and "input" are set and not equal. Please use "input" to pass transaction call data`) + } + if args.To == nil && len(args.data()) == 0 { + return errors.New(`contract creation without any data provided`) + } + // Estimate the gas usage if necessary. + if args.Gas == nil { + // These fields are immutable during the estimation, safe to + // pass the pointer directly. + data := args.data() + callArgs := TransactionArgs{ + From: args.From, + To: args.To, + GasPrice: args.GasPrice, + Value: args.Value, + Data: (*hexutil.Bytes)(&data), + AccessList: args.AccessList, + } + pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) + estimated, err := DoEstimateGas(ctx, b, callArgs, pendingBlockNr, nil, b.RPCGasCap()) + if err != nil { + return err + } + args.Gas = &estimated + log.Trace("Estimate gas usage automatically", "gas", args.Gas) + } + if args.ChainID == nil { + id := (*hexutil.Big)(b.ChainConfig().ChainId) + args.ChainID = id + } + return nil +} + +// ToMessage converts TransactionArgs to the Message type used by the core evm +func (args *TransactionArgs) ToMessage(b Backend, number *big.Int, globalGasCap uint64) types.Message { + // Set sender address or use zero address if none specified. + addr := args.from() + if addr == (common.Address{}) { + if wallets := b.AccountManager().Wallets(); len(wallets) > 0 { + if accounts := wallets[0].Accounts(); len(accounts) > 0 { + addr = accounts[0].Address + } + } + } + + // Set default gas & gas price if none were set + gas := globalGasCap + if args.Gas != nil { + gas = uint64(*args.Gas) + } + if gas == 0 { + gas = math.MaxUint64 / 2 + } + if globalGasCap != 0 && globalGasCap < gas { + log.Warn("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap) + gas = globalGasCap + } + gasPrice := new(big.Int) + if args.GasPrice != nil { + gasPrice = args.GasPrice.ToInt() + } + if gasPrice.Sign() <= 0 { + gasPrice = new(big.Int).SetUint64(defaultGasPrice) + } + value := new(big.Int) + if args.Value != nil { + value = args.Value.ToInt() + } + data := args.data() + var accessList types.AccessList + if args.AccessList != nil { + accessList = *args.AccessList + } + + // Create new call message + msg := types.NewMessage(addr, args.To, 0, value, gas, gasPrice, data, accessList, false, nil, number) + return msg +} + +// toTransaction converts the arguments to a transaction. +// This assumes that setDefaults has been called. +func (args *TransactionArgs) toTransaction() *types.Transaction { + var data types.TxData + if args.AccessList == nil { + data = &types.LegacyTx{ + To: args.To, + Nonce: uint64(*args.Nonce), + Gas: uint64(*args.Gas), + GasPrice: (*big.Int)(args.GasPrice), + Value: (*big.Int)(args.Value), + Data: args.data(), + } + } else { + data = &types.AccessListTx{ + To: args.To, + ChainID: (*big.Int)(args.ChainID), + Nonce: uint64(*args.Nonce), + Gas: uint64(*args.Gas), + GasPrice: (*big.Int)(args.GasPrice), + Value: (*big.Int)(args.Value), + Data: args.data(), + AccessList: *args.AccessList, + } + } + return types.NewTx(data) +} diff --git a/internal/ethapi/trie_proof_test.go b/internal/ethapi/trie_proof_test.go index 10dd9988dbc5..34922b9c60ff 100644 --- a/internal/ethapi/trie_proof_test.go +++ b/internal/ethapi/trie_proof_test.go @@ -2,7 +2,7 @@ package ethapi import ( "bytes" - "fmt" + "errors" "math/big" "reflect" "testing" @@ -36,7 +36,7 @@ func (n *proofPairList) Get(key []byte) ([]byte, error) { return b, nil } } - return nil, fmt.Errorf("key not found") + return nil, errors.New("key not found") } func TestTransactionProof(t *testing.T) { diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index dbb3f41df4ad..688f8df33af1 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -162,6 +162,12 @@ web3._extend({ params: 1, inputFormatter: [web3._extend.formatters.inputBlockNumberFormatter] }), + new web3._extend.Method({ + name: 'getEpochNumbersBetween', + call: 'XDPoS_getEpochNumbersBetween', + params: 2, + inputFormatter: [web3._extend.formatters.inputBlockNumberFormatter, web3._extend.formatters.inputBlockNumberFormatter] + }), ], properties: [ new web3._extend.Property({ diff --git a/les/api_backend.go b/les/api_backend.go index abdd952a6ebf..876a1dcbe9f8 100644 --- a/les/api_backend.go +++ b/les/api_backend.go @@ -130,6 +130,14 @@ func (b *LesApiBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash r return nil, errors.New("invalid arguments; neither block nor hash specified") } +func (b *LesApiBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) { + return light.GetBody(ctx, b.eth.odr, hash, uint64(number)) +} + +func (b *LesApiBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { + return nil, nil +} + func (b *LesApiBackend) StateAndHeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*state.StateDB, *types.Header, error) { header, err := b.HeaderByNumber(ctx, blockNr) if header == nil || err != nil { @@ -163,8 +171,8 @@ func (b *LesApiBackend) GetReceipts(ctx context.Context, blockHash common.Hash) return light.GetBlockReceipts(ctx, b.eth.odr, blockHash, core.GetBlockNumber(b.eth.chainDb, blockHash)) } -func (b *LesApiBackend) GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error) { - return light.GetBlockLogs(ctx, b.eth.odr, blockHash, core.GetBlockNumber(b.eth.chainDb, blockHash)) +func (b *LesApiBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) { + return light.GetBlockLogs(ctx, b.eth.odr, hash, number) } func (b *LesApiBackend) GetTd(blockHash common.Hash) *big.Int { @@ -241,6 +249,13 @@ func (b *LesApiBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscri return b.eth.blockchain.SubscribeLogsEvent(ch) } +func (b *LesApiBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription { + return event.NewSubscription(func(quit <-chan struct{}) error { + <-quit + return nil + }) +} + func (b *LesApiBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { return b.eth.blockchain.SubscribeRemovedLogsEvent(ch) } @@ -261,10 +276,6 @@ func (b *LesApiBackend) ChainDb() ethdb.Database { return b.eth.chainDb } -func (b *LesApiBackend) EventMux() *event.TypeMux { - return b.eth.eventMux -} - func (b *LesApiBackend) AccountManager() *accounts.Manager { return b.eth.accountManager } @@ -273,6 +284,10 @@ func (b *LesApiBackend) RPCGasCap() uint64 { return b.eth.config.RPCGasCap } +func (b *LesApiBackend) RPCTxFeeCap() float64 { + return b.eth.config.RPCTxFeeCap +} + func (b *LesApiBackend) BloomStatus() (uint64, uint64) { if b.eth.bloomIndexer == nil { return 0, 0 diff --git a/les/backend.go b/les/backend.go index b172e8fc0022..cc6fdf962df4 100644 --- a/les/backend.go +++ b/les/backend.go @@ -18,7 +18,7 @@ package les import ( - "fmt" + "errors" "sync" "time" @@ -31,6 +31,7 @@ import ( "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/eth" "github.com/XinFinOrg/XDPoSChain/eth/downloader" + "github.com/XinFinOrg/XDPoSChain/eth/ethconfig" "github.com/XinFinOrg/XDPoSChain/eth/filters" "github.com/XinFinOrg/XDPoSChain/eth/gasprice" "github.com/XinFinOrg/XDPoSChain/ethdb" @@ -46,7 +47,7 @@ import ( ) type LightEthereum struct { - config *eth.Config + config *ethconfig.Config odr *LesOdr relay *LesTxRelay @@ -79,7 +80,7 @@ type LightEthereum struct { wg sync.WaitGroup } -func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) { +func New(ctx *node.ServiceContext, config *ethconfig.Config) (*LightEthereum, error) { chainDb, err := eth.CreateDB(ctx, config, "lightchaindata") if err != nil { return nil, err @@ -155,12 +156,12 @@ type LightDummyAPI struct{} // Etherbase is the address that mining rewards will be send to func (s *LightDummyAPI) Etherbase() (common.Address, error) { - return common.Address{}, fmt.Errorf("not supported") + return common.Address{}, errors.New("not supported") } // Coinbase is the address that mining rewards will be send to (alias for Etherbase) func (s *LightDummyAPI) Coinbase() (common.Address, error) { - return common.Address{}, fmt.Errorf("not supported") + return common.Address{}, errors.New("not supported") } // Hashrate returns the POW hashrate @@ -190,7 +191,7 @@ func (s *LightEthereum) APIs() []rpc.API { }, { Namespace: "eth", Version: "1.0", - Service: filters.NewPublicFilterAPI(s.ApiBackend, true), + Service: filters.NewFilterAPI(filters.NewFilterSystem(s.ApiBackend, filters.Config{LogCacheSize: s.config.FilterLogCacheSize}), true), Public: true, }, { Namespace: "net", diff --git a/les/peer.go b/les/peer.go index ef635795bfcb..cbc7b9957020 100644 --- a/les/peer.go +++ b/les/peer.go @@ -291,7 +291,7 @@ func (p *peer) RequestHelperTrieProofs(reqID, cost uint64, reqs []HelperTrieReq) reqsV1 := make([]ChtReq, len(reqs)) for i, req := range reqs { if req.Type != htCanonical || req.AuxReq != auxHeader || len(req.Key) != 8 { - return fmt.Errorf("Request invalid in LES/1 mode") + return errors.New("Request invalid in LES/1 mode") } blockNum := binary.BigEndian.Uint64(req.Key) // convert HelperTrie request to old CHT request diff --git a/les/retrieve.go b/les/retrieve.go index 91014f1964ab..509b8a3e356c 100644 --- a/les/retrieve.go +++ b/les/retrieve.go @@ -22,7 +22,7 @@ import ( "context" "crypto/rand" "encoding/binary" - "fmt" + "errors" "sync" "time" @@ -119,7 +119,7 @@ func (rm *retrieveManager) retrieve(ctx context.Context, reqID uint64, req *dist case <-ctx.Done(): sentReq.stop(ctx.Err()) case <-shutdown: - sentReq.stop(fmt.Errorf("Client is shutting down")) + sentReq.stop(errors.New("Client is shutting down")) } return sentReq.getError() } diff --git a/les/server.go b/les/server.go index 68762c441430..db494726e332 100644 --- a/les/server.go +++ b/les/server.go @@ -27,6 +27,7 @@ import ( "github.com/XinFinOrg/XDPoSChain/core" "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/eth" + "github.com/XinFinOrg/XDPoSChain/eth/ethconfig" "github.com/XinFinOrg/XDPoSChain/ethdb" "github.com/XinFinOrg/XDPoSChain/les/flowcontrol" "github.com/XinFinOrg/XDPoSChain/light" @@ -37,7 +38,7 @@ import ( ) type LesServer struct { - config *eth.Config + config *ethconfig.Config protocolManager *ProtocolManager fcManager *flowcontrol.ClientManager // nil if our node is client only fcCostStats *requestCostStats @@ -49,7 +50,7 @@ type LesServer struct { chtIndexer, bloomTrieIndexer *core.ChainIndexer } -func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) { +func NewLesServer(eth *eth.Ethereum, config *ethconfig.Config) (*LesServer, error) { quitSync := make(chan struct{}) pm, err := NewProtocolManager(eth.BlockChain().Config(), false, ServerProtocolVersions, config.NetworkId, eth.EventMux(), eth.Engine(), newPeerSet(), eth.BlockChain(), eth.TxPool(), eth.ChainDb(), nil, nil, quitSync, new(sync.WaitGroup)) if err != nil { diff --git a/light/lightchain.go b/light/lightchain.go index 72873a57c2fd..dd3e57f624dd 100644 --- a/light/lightchain.go +++ b/light/lightchain.go @@ -27,6 +27,7 @@ import ( "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/consensus" "github.com/XinFinOrg/XDPoSChain/core" + "github.com/XinFinOrg/XDPoSChain/core/rawdb" "github.com/XinFinOrg/XDPoSChain/core/state" "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/ethdb" @@ -34,7 +35,7 @@ import ( "github.com/XinFinOrg/XDPoSChain/log" "github.com/XinFinOrg/XDPoSChain/params" "github.com/XinFinOrg/XDPoSChain/rlp" - "github.com/hashicorp/golang-lru" + lru "github.com/hashicorp/golang-lru" ) var ( @@ -192,9 +193,7 @@ func (bc *LightChain) ResetWithGenesisBlock(genesis *types.Block) { if err := core.WriteTd(bc.chainDb, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil { log.Crit("Failed to write genesis block TD", "err", err) } - if err := core.WriteBlock(bc.chainDb, genesis); err != nil { - log.Crit("Failed to write genesis block", "err", err) - } + rawdb.WriteBlock(bc.chainDb, genesis) bc.genesisBlock = genesis bc.hc.SetGenesis(bc.genesisBlock.Header()) bc.hc.SetCurrentHeader(bc.genesisBlock.Header()) diff --git a/light/lightchain_test.go b/light/lightchain_test.go index 98e10904949c..0de612e3b9e6 100644 --- a/light/lightchain_test.go +++ b/light/lightchain_test.go @@ -124,7 +124,7 @@ func testHeaderChainImport(chain []*types.Header, lightchain *LightChain) error // Manually insert the header into the database, but don't reorganize (allows subsequent testing) lightchain.mu.Lock() core.WriteTd(lightchain.chainDb, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, lightchain.GetTdByHash(header.ParentHash))) - core.WriteHeader(lightchain.chainDb, header) + rawdb.WriteHeader(lightchain.chainDb, header) lightchain.mu.Unlock() } return nil diff --git a/light/odr.go b/light/odr.go index eb039cfe3030..ee9aa9b352ab 100644 --- a/light/odr.go +++ b/light/odr.go @@ -24,6 +24,7 @@ import ( "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/core" + "github.com/XinFinOrg/XDPoSChain/core/rawdb" "github.com/XinFinOrg/XDPoSChain/core/types" "github.com/XinFinOrg/XDPoSChain/ethdb" ) @@ -112,7 +113,7 @@ type BlockRequest struct { // StoreResult stores the retrieved data in local database func (req *BlockRequest) StoreResult(db ethdb.Database) { - core.WriteBodyRLP(db, req.Hash, req.Number, req.Rlp) + rawdb.WriteBodyRLP(db, req.Hash, req.Number, req.Rlp) } // ReceiptsRequest is the ODR request type for retrieving block bodies @@ -141,10 +142,10 @@ type ChtRequest struct { // StoreResult stores the retrieved data in local database func (req *ChtRequest) StoreResult(db ethdb.Database) { // if there is a canonical hash, there is a header too - core.WriteHeader(db, req.Header) + rawdb.WriteHeader(db, req.Header) hash, num := req.Header.Hash(), req.Header.Number.Uint64() core.WriteTd(db, hash, num, req.Td) - core.WriteCanonicalHash(db, hash, num) + rawdb.WriteCanonicalHash(db, hash, num) } // BloomRequest is the ODR request type for retrieving bloom filters from a CHT structure diff --git a/light/odr_util.go b/light/odr_util.go index 236f5c238220..d7ebd6739f75 100644 --- a/light/odr_util.go +++ b/light/odr_util.go @@ -124,7 +124,7 @@ func GetBlock(ctx context.Context, odr OdrBackend, hash common.Hash, number uint } // GetBlockReceipts retrieves the receipts generated by the transactions included -// in a block given by its hash. +// in a block given by its hash. Receipts will be filled in with context data. func GetBlockReceipts(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (types.Receipts, error) { // Retrieve the potentially incomplete receipts from disk or network receipts := core.GetBlockReceipts(odr.Database(), hash, number) @@ -153,9 +153,8 @@ func GetBlockReceipts(ctx context.Context, odr OdrBackend, hash common.Hash, num } // GetBlockLogs retrieves the logs generated by the transactions included in a -// block given by its hash. +// block given by its hash. Logs will be filled in with context data. func GetBlockLogs(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) ([][]*types.Log, error) { - // Retrieve the potentially incomplete receipts from disk or network receipts := core.GetBlockReceipts(odr.Database(), hash, number) if receipts == nil { r := &ReceiptsRequest{Hash: hash, Number: number} diff --git a/light/trie_test.go b/light/trie_test.go index fb030af871a5..2332043d2611 100644 --- a/light/trie_test.go +++ b/light/trie_test.go @@ -19,10 +19,12 @@ package light import ( "bytes" "context" + "errors" "fmt" - "github.com/XinFinOrg/XDPoSChain/core/rawdb" "testing" + "github.com/XinFinOrg/XDPoSChain/core/rawdb" + "github.com/XinFinOrg/XDPoSChain/consensus/ethash" "github.com/XinFinOrg/XDPoSChain/core" "github.com/XinFinOrg/XDPoSChain/core/state" @@ -74,9 +76,9 @@ func diffTries(t1, t2 state.Trie) error { case i2.Err != nil: return fmt.Errorf("light trie iterator error: %v", i1.Err) case i1.Next(): - return fmt.Errorf("full trie iterator has more k/v pairs") + return errors.New("full trie iterator has more k/v pairs") case i2.Next(): - return fmt.Errorf("light trie iterator has more k/v pairs") + return errors.New("light trie iterator has more k/v pairs") } return nil } diff --git a/miner/miner.go b/miner/miner.go index 4a9d34b9f7fa..835f0f014b74 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -178,7 +178,18 @@ func (self *Miner) PendingBlock() *types.Block { return self.worker.pendingBlock() } +// PendingBlockAndReceipts returns the currently pending block and corresponding receipts. +func (miner *Miner) PendingBlockAndReceipts() (*types.Block, types.Receipts) { + return miner.worker.pendingBlockAndReceipts() +} + func (self *Miner) SetEtherbase(addr common.Address) { self.coinbase = addr self.worker.setEtherbase(addr) } + +// SubscribePendingLogs starts delivering logs from pending transactions +// to the given channel. +func (self *Miner) SubscribePendingLogs(ch chan<- []*types.Log) event.Subscription { + return self.worker.pendingLogsFeed.Subscribe(ch) +} diff --git a/miner/worker.go b/miner/worker.go index 3de0328bf8d8..0e341636d243 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -104,6 +104,9 @@ type worker struct { mu sync.Mutex + // Feeds + pendingLogsFeed event.Feed + // update loop mux *event.TypeMux txsCh chan core.NewTxsEvent @@ -125,6 +128,11 @@ type worker struct { coinbase common.Address extra []byte + snapshotMu sync.RWMutex // The lock used to protect the block snapshot and state snapshot + snapshotBlock *types.Block + snapshotReceipts types.Receipts + snapshotState *state.StateDB + currentMu sync.Mutex current *Work @@ -186,34 +194,31 @@ func (self *worker) setExtra(extra []byte) { self.extra = extra } -func (self *worker) pending() (*types.Block, *state.StateDB) { - self.currentMu.Lock() - defer self.currentMu.Unlock() - - if atomic.LoadInt32(&self.mining) == 0 { - return types.NewBlock( - self.current.header, - self.current.txs, - nil, - self.current.receipts, - ), self.current.state.Copy() +// pending returns the pending state and corresponding block. The returned +// values can be nil in case the pending block is not initialized. +func (w *worker) pending() (*types.Block, *state.StateDB) { + w.snapshotMu.RLock() + defer w.snapshotMu.RUnlock() + if w.snapshotState == nil { + return nil, nil } - return self.current.Block, self.current.state.Copy() + return w.snapshotBlock, w.snapshotState.Copy() } -func (self *worker) pendingBlock() *types.Block { - self.currentMu.Lock() - defer self.currentMu.Unlock() +// pendingBlock returns pending block. The returned block can be nil in case the +// pending block is not initialized. +func (w *worker) pendingBlock() *types.Block { + w.snapshotMu.RLock() + defer w.snapshotMu.RUnlock() + return w.snapshotBlock +} - if atomic.LoadInt32(&self.mining) == 0 { - return types.NewBlock( - self.current.header, - self.current.txs, - nil, - self.current.receipts, - ) - } - return self.current.Block +// pendingBlockAndReceipts returns pending block and corresponding receipts. +func (w *worker) pendingBlockAndReceipts() (*types.Block, types.Receipts) { + // return a snapshot to avoid contention on currentMu mutex + w.snapshotMu.RLock() + defer w.snapshotMu.RUnlock() + return w.snapshotBlock, w.snapshotReceipts } func (self *worker) start() { @@ -322,7 +327,15 @@ func (self *worker) update() { } feeCapacity := state.GetTRC21FeeCapacityFromState(self.current.state) txset, specialTxs := types.NewTransactionsByPriceAndNonce(self.current.signer, txs, nil, feeCapacity) - self.current.commitTransactions(self.mux, feeCapacity, txset, specialTxs, self.chain, self.coinbase) + + tcount := self.current.tcount + self.current.commitTransactions(self.mux, feeCapacity, txset, specialTxs, self.chain, self.coinbase, &self.pendingLogsFeed) + + // Only update the snapshot if any new transactions were added + // to the pending block + if tcount != self.current.tcount { + self.updateSnapshot() + } self.currentMu.Unlock() } else { // If we're mining, but nothing is being processed, wake on new transactions @@ -462,6 +475,32 @@ func (self *worker) push(work *Work) { } } +// copyReceipts makes a deep copy of the given receipts. +func copyReceipts(receipts []*types.Receipt) []*types.Receipt { + result := make([]*types.Receipt, len(receipts)) + for i, l := range receipts { + cpy := *l + result[i] = &cpy + } + return result +} + +// updateSnapshot updates pending snapshot block and state. +// Note this function assumes the current variable is thread safe. +func (w *worker) updateSnapshot() { + w.snapshotMu.Lock() + defer w.snapshotMu.Unlock() + + w.snapshotBlock = types.NewBlock( + w.current.header, + w.current.txs, + nil, + w.current.receipts, + ) + w.snapshotReceipts = copyReceipts(w.current.receipts) + w.snapshotState = w.current.state.Copy() +} + // makeCurrent creates a new environment for the current cycle. func (self *worker) makeCurrent(parent *types.Block, header *types.Header) error { // Retrieve the parent state to execute on top and start a prefetcher for @@ -614,7 +653,7 @@ func (self *worker) commitNewWork() { misc.ApplyDAOHardFork(work.state) } if common.TIPSigning.Cmp(header.Number) == 0 { - work.state.DeleteAddress(common.HexToAddress(common.BlockSigners)) + work.state.DeleteAddress(common.BlockSignersBinary) } // won't grasp txs at checkpoint var ( @@ -699,7 +738,7 @@ func (self *worker) commitNewWork() { return } nonce := work.state.GetNonce(self.coinbase) - tx := types.NewTransaction(nonce, common.HexToAddress(common.XDCXAddr), big.NewInt(0), txMatchGasLimit, big.NewInt(0), txMatchBytes) + tx := types.NewTransaction(nonce, common.XDCXAddrBinary, big.NewInt(0), txMatchGasLimit, big.NewInt(0), txMatchBytes) txM, err := wallet.SignTx(accounts.Account{Address: self.coinbase}, tx, self.config.ChainId) if err != nil { log.Error("Fail to create tx matches", "error", err) @@ -729,7 +768,7 @@ func (self *worker) commitNewWork() { return } nonce := work.state.GetNonce(self.coinbase) - lendingTx := types.NewTransaction(nonce, common.HexToAddress(common.XDCXLendingAddress), big.NewInt(0), txMatchGasLimit, big.NewInt(0), lendingDataBytes) + lendingTx := types.NewTransaction(nonce, common.XDCXLendingAddressBinary, big.NewInt(0), txMatchGasLimit, big.NewInt(0), lendingDataBytes) signedLendingTx, err := wallet.SignTx(accounts.Account{Address: self.coinbase}, lendingTx, self.config.ChainId) if err != nil { log.Error("Fail to create lending tx", "error", err) @@ -753,7 +792,7 @@ func (self *worker) commitNewWork() { return } nonce := work.state.GetNonce(self.coinbase) - finalizedTx := types.NewTransaction(nonce, common.HexToAddress(common.XDCXLendingFinalizedTradeAddress), big.NewInt(0), txMatchGasLimit, big.NewInt(0), finalizedTradeData) + finalizedTx := types.NewTransaction(nonce, common.XDCXLendingFinalizedTradeAddressBinary, big.NewInt(0), txMatchGasLimit, big.NewInt(0), finalizedTradeData) signedFinalizedTx, err := wallet.SignTx(accounts.Account{Address: self.coinbase}, finalizedTx, self.config.ChainId) if err != nil { log.Error("Fail to create lending tx", "error", err) @@ -772,7 +811,7 @@ func (self *worker) commitNewWork() { XDCxStateRoot := work.tradingState.IntermediateRoot() LendingStateRoot := work.lendingState.IntermediateRoot() txData := append(XDCxStateRoot.Bytes(), LendingStateRoot.Bytes()...) - tx := types.NewTransaction(work.state.GetNonce(self.coinbase), common.HexToAddress(common.TradingStateAddr), big.NewInt(0), txMatchGasLimit, big.NewInt(0), txData) + tx := types.NewTransaction(work.state.GetNonce(self.coinbase), common.TradingStateAddrBinary, big.NewInt(0), txMatchGasLimit, big.NewInt(0), txData) txStateRoot, err := wallet.SignTx(accounts.Account{Address: self.coinbase}, tx, self.config.ChainId) if err != nil { log.Error("Fail to create tx state root", "error", err) @@ -781,7 +820,7 @@ func (self *worker) commitNewWork() { specialTxs = append(specialTxs, txStateRoot) } } - work.commitTransactions(self.mux, feeCapacity, txs, specialTxs, self.chain, self.coinbase) + work.commitTransactions(self.mux, feeCapacity, txs, specialTxs, self.chain, self.coinbase, &self.pendingLogsFeed) // compute uncles for the new block. var ( uncles []*types.Header @@ -799,35 +838,37 @@ func (self *worker) commitNewWork() { self.lastParentBlockCommit = parent.Hash().Hex() } self.push(work) + self.updateSnapshot() } -func (env *Work) commitTransactions(mux *event.TypeMux, balanceFee map[common.Address]*big.Int, txs *types.TransactionsByPriceAndNonce, specialTxs types.Transactions, bc *core.BlockChain, coinbase common.Address) { +func (env *Work) commitTransactions(mux *event.TypeMux, balanceFee map[common.Address]*big.Int, txs *types.TransactionsByPriceAndNonce, specialTxs types.Transactions, bc *core.BlockChain, coinbase common.Address, pendingLogsFeed *event.Feed) { gp := new(core.GasPool).AddGas(env.header.GasLimit) balanceUpdated := map[common.Address]*big.Int{} totalFeeUsed := big.NewInt(0) var coalescedLogs []*types.Log // first priority for special Txs for _, tx := range specialTxs { - + to := tx.To() //HF number for black-list if (env.header.Number.Uint64() >= common.BlackListHFNumber) && !common.IsTestnet { + from := tx.From() // check if sender is in black list - if tx.From() != nil && common.Blacklist[*tx.From()] { - log.Debug("Skipping transaction with sender in black-list", "sender", tx.From().Hex()) + if from != nil && common.Blacklist[*from] { + log.Debug("Skipping transaction with sender in black-list", "sender", from.Hex()) continue } // check if receiver is in black list - if tx.To() != nil && common.Blacklist[*tx.To()] { - log.Debug("Skipping transaction with receiver in black-list", "receiver", tx.To().Hex()) + if to != nil && common.Blacklist[*to] { + log.Debug("Skipping transaction with receiver in black-list", "receiver", to.Hex()) continue } } - + data := tx.Data() // validate minFee slot for XDCZ if tx.IsXDCZApplyTransaction() { copyState, _ := bc.State() - if err := core.ValidateXDCZApplyTransaction(bc, nil, copyState, common.BytesToAddress(tx.Data()[4:])); err != nil { - log.Debug("XDCZApply: invalid token", "token", common.BytesToAddress(tx.Data()[4:]).Hex()) + if err := core.ValidateXDCZApplyTransaction(bc, nil, copyState, common.BytesToAddress(data[4:])); err != nil { + log.Debug("XDCZApply: invalid token", "token", common.BytesToAddress(data[4:]).Hex()) txs.Pop() continue } @@ -835,8 +876,8 @@ func (env *Work) commitTransactions(mux *event.TypeMux, balanceFee map[common.Ad // validate balance slot, token decimal for XDCX if tx.IsXDCXApplyTransaction() { copyState, _ := bc.State() - if err := core.ValidateXDCXApplyTransaction(bc, nil, copyState, common.BytesToAddress(tx.Data()[4:])); err != nil { - log.Debug("XDCXApply: invalid token", "token", common.BytesToAddress(tx.Data()[4:]).Hex()) + if err := core.ValidateXDCXApplyTransaction(bc, nil, copyState, common.BytesToAddress(data[4:])); err != nil { + log.Debug("XDCXApply: invalid token", "token", common.BytesToAddress(data[4:]).Hex()) txs.Pop() continue } @@ -853,38 +894,39 @@ func (env *Work) commitTransactions(mux *event.TypeMux, balanceFee map[common.Ad from, _ := types.Sender(env.signer, tx) // Check whether the tx is replay protected. If we're not in the EIP155 hf // phase, start ignoring the sender until we do. + hash := tx.Hash() if tx.Protected() && !env.config.IsEIP155(env.header.Number) { - log.Trace("Ignoring reply protected special transaction", "hash", tx.Hash(), "eip155", env.config.EIP155Block) + log.Trace("Ignoring reply protected special transaction", "hash", hash, "eip155", env.config.EIP155Block) continue } - if tx.To().Hex() == common.BlockSigners { - if len(tx.Data()) < 68 { - log.Trace("Data special transaction invalid length", "hash", tx.Hash(), "data", len(tx.Data())) + if *to == common.BlockSignersBinary { + if len(data) < 68 { + log.Trace("Data special transaction invalid length", "hash", hash, "data", len(data)) continue } - blkNumber := binary.BigEndian.Uint64(tx.Data()[8:40]) + blkNumber := binary.BigEndian.Uint64(data[8:40]) if blkNumber >= env.header.Number.Uint64() || blkNumber <= env.header.Number.Uint64()-env.config.XDPoS.Epoch*2 { - log.Trace("Data special transaction invalid number", "hash", tx.Hash(), "blkNumber", blkNumber, "miner", env.header.Number) + log.Trace("Data special transaction invalid number", "hash", hash, "blkNumber", blkNumber, "miner", env.header.Number) continue } } // Start executing the transaction - env.state.Prepare(tx.Hash(), common.Hash{}, env.tcount) + env.state.Prepare(hash, common.Hash{}, env.tcount) nonce := env.state.GetNonce(from) if nonce != tx.Nonce() && !tx.IsSkipNonceTransaction() { - log.Trace("Skipping account with special transaction invalid nonce", "sender", from, "nonce", nonce, "tx nonce ", tx.Nonce(), "to", tx.To()) + log.Trace("Skipping account with special transaction invalid nonce", "sender", from, "nonce", nonce, "tx nonce ", tx.Nonce(), "to", to) continue } err, logs, tokenFeeUsed, gas := env.commitTransaction(balanceFee, tx, bc, coinbase, gp) switch err { case core.ErrNonceTooLow: // New head notification data race between the transaction pool and miner, shift - log.Trace("Skipping special transaction with low nonce", "sender", from, "nonce", tx.Nonce(), "to", tx.To()) + log.Trace("Skipping special transaction with low nonce", "sender", from, "nonce", tx.Nonce(), "to", to) case core.ErrNonceTooHigh: // Reorg notification data race between the transaction pool and miner, skip account = - log.Trace("Skipping account with special transaction hight nonce", "sender", from, "nonce", tx.Nonce(), "to", tx.To()) + log.Trace("Skipping account with special transaction hight nonce", "sender", from, "nonce", tx.Nonce(), "to", to) case nil: // Everything ok, collect the logs and shift in the next transaction from the same account coalescedLogs = append(coalescedLogs, logs...) @@ -893,12 +935,12 @@ func (env *Work) commitTransactions(mux *event.TypeMux, balanceFee map[common.Ad default: // Strange error, discard the transaction and get the next in line (note, the // nonce-too-high clause will prevent us from executing in vain). - log.Debug("Add Special Transaction failed, account skipped", "hash", tx.Hash(), "sender", from, "nonce", tx.Nonce(), "to", tx.To(), "err", err) + log.Debug("Add Special Transaction failed, account skipped", "hash", hash, "sender", from, "nonce", tx.Nonce(), "to", to, "err", err) } if tokenFeeUsed { fee := common.GetGasFee(env.header.Number.Uint64(), gas) - balanceFee[*tx.To()] = new(big.Int).Sub(balanceFee[*tx.To()], fee) - balanceUpdated[*tx.To()] = balanceFee[*tx.To()] + balanceFee[*to] = new(big.Int).Sub(balanceFee[*to], fee) + balanceUpdated[*to] = balanceFee[*to] totalFeeUsed = totalFeeUsed.Add(totalFeeUsed, fee) } } @@ -920,26 +962,28 @@ func (env *Work) commitTransactions(mux *event.TypeMux, balanceFee map[common.Ad } //HF number for black-list + to := tx.To() if (env.header.Number.Uint64() >= common.BlackListHFNumber) && !common.IsTestnet { + from := tx.From() // check if sender is in black list - if tx.From() != nil && common.Blacklist[*tx.From()] { - log.Debug("Skipping transaction with sender in black-list", "sender", tx.From().Hex()) + if from != nil && common.Blacklist[*from] { + log.Debug("Skipping transaction with sender in black-list", "sender", from.Hex()) txs.Pop() continue } // check if receiver is in black list - if tx.To() != nil && common.Blacklist[*tx.To()] { - log.Debug("Skipping transaction with receiver in black-list", "receiver", tx.To().Hex()) + if to != nil && common.Blacklist[*to] { + log.Debug("Skipping transaction with receiver in black-list", "receiver", to.Hex()) txs.Shift() continue } } - + data := tx.Data() // validate minFee slot for XDCZ if tx.IsXDCZApplyTransaction() { copyState, _ := bc.State() - if err := core.ValidateXDCZApplyTransaction(bc, nil, copyState, common.BytesToAddress(tx.Data()[4:])); err != nil { - log.Debug("XDCZApply: invalid token", "token", common.BytesToAddress(tx.Data()[4:]).Hex()) + if err := core.ValidateXDCZApplyTransaction(bc, nil, copyState, common.BytesToAddress(data[4:])); err != nil { + log.Debug("XDCZApply: invalid token", "token", common.BytesToAddress(data[4:]).Hex()) txs.Pop() continue } @@ -947,8 +991,8 @@ func (env *Work) commitTransactions(mux *event.TypeMux, balanceFee map[common.Ad // validate balance slot, token decimal for XDCX if tx.IsXDCXApplyTransaction() { copyState, _ := bc.State() - if err := core.ValidateXDCXApplyTransaction(bc, nil, copyState, common.BytesToAddress(tx.Data()[4:])); err != nil { - log.Debug("XDCXApply: invalid token", "token", common.BytesToAddress(tx.Data()[4:]).Hex()) + if err := core.ValidateXDCXApplyTransaction(bc, nil, copyState, common.BytesToAddress(data[4:])); err != nil { + log.Debug("XDCXApply: invalid token", "token", common.BytesToAddress(data[4:]).Hex()) txs.Pop() continue } @@ -959,15 +1003,16 @@ func (env *Work) commitTransactions(mux *event.TypeMux, balanceFee map[common.Ad // // We use the eip155 signer regardless of the current hf. from, _ := types.Sender(env.signer, tx) + hash := tx.Hash() // Check whether the tx is replay protected. If we're not in the EIP155 hf // phase, start ignoring the sender until we do. if tx.Protected() && !env.config.IsEIP155(env.header.Number) { - log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", env.config.EIP155Block) + log.Trace("Ignoring reply protected transaction", "hash", hash, "eip155", env.config.EIP155Block) txs.Pop() continue } // Start executing the transaction - env.state.Prepare(tx.Hash(), common.Hash{}, env.tcount) + env.state.Prepare(hash, common.Hash{}, env.tcount) nonce := env.state.GetNonce(from) if nonce > tx.Nonce() { // New head notification data race between the transaction pool and miner, shift @@ -1012,40 +1057,36 @@ func (env *Work) commitTransactions(mux *event.TypeMux, balanceFee map[common.Ad default: // Strange error, discard the transaction and get the next in line (note, the // nonce-too-high clause will prevent us from executing in vain). - log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) + log.Debug("Transaction failed, account skipped", "hash", hash, "err", err) txs.Shift() } if tokenFeeUsed { fee := common.GetGasFee(env.header.Number.Uint64(), gas) - balanceFee[*tx.To()] = new(big.Int).Sub(balanceFee[*tx.To()], fee) - balanceUpdated[*tx.To()] = balanceFee[*tx.To()] + balanceFee[*to] = new(big.Int).Sub(balanceFee[*to], fee) + balanceUpdated[*to] = balanceFee[*to] totalFeeUsed = totalFeeUsed.Add(totalFeeUsed, fee) } } state.UpdateTRC21Fee(env.state, balanceUpdated, totalFeeUsed) - if len(coalescedLogs) > 0 || env.tcount > 0 { - // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined - // logs by filling in the block hash when the block was mined by the local miner. This can - // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed. + // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined + // logs by filling in the block hash when the block was mined by the local miner. This can + // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed. + if len(coalescedLogs) > 0 { cpy := make([]*types.Log, len(coalescedLogs)) for i, l := range coalescedLogs { cpy[i] = new(types.Log) *cpy[i] = *l } - go func(logs []*types.Log, tcount int) { - if len(logs) > 0 { - err := mux.Post(core.PendingLogsEvent{Logs: logs}) - if err != nil { - log.Warn("[commitTransactions] Error when sending PendingLogsEvent", "LogLength", len(logs)) - } - } - if tcount > 0 { - err := mux.Post(core.PendingStateEvent{}) - if err != nil { - log.Warn("[commitTransactions] Error when sending PendingStateEvent", "tcount", tcount) - } + pendingLogsFeed.Send(cpy) + } + if env.tcount > 0 { + go func(tcount int) { + err := mux.Post(core.PendingStateEvent{}) + if err != nil { + log.Warn("[commitTransactions] Error when sending PendingStateEvent", "tcount", tcount) } - }(cpy, env.tcount) + }(env.tcount) + } } diff --git a/mobile/bind.go b/mobile/bind.go index 0038347f6ada..23893e6d9b6b 100644 --- a/mobile/bind.go +++ b/mobile/bind.go @@ -39,7 +39,7 @@ type signer struct { } func (s *signer) Sign(addr *Address, unsignedTx *Transaction) (signedTx *Transaction, _ error) { - sig, err := s.sign(types.HomesteadSigner{}, addr.address, unsignedTx.tx) + sig, err := s.sign(addr.address, unsignedTx.tx) if err != nil { return nil, err } @@ -89,7 +89,7 @@ func (opts *TransactOpts) GetGasLimit() int64 { return int64(opts.opts.GasLimi func (opts *TransactOpts) SetFrom(from *Address) { opts.opts.From = from.address } func (opts *TransactOpts) SetNonce(nonce int64) { opts.opts.Nonce = big.NewInt(nonce) } func (opts *TransactOpts) SetSigner(s Signer) { - opts.opts.Signer = func(signer types.Signer, addr common.Address, tx *types.Transaction) (*types.Transaction, error) { + opts.opts.Signer = func(addr common.Address, tx *types.Transaction) (*types.Transaction, error) { sig, err := s.Sign(&Address{addr}, &Transaction{tx}) if err != nil { return nil, err diff --git a/mobile/geth.go b/mobile/geth.go index 02564b8e9ceb..fd62ea004971 100644 --- a/mobile/geth.go +++ b/mobile/geth.go @@ -25,8 +25,8 @@ import ( "path/filepath" "github.com/XinFinOrg/XDPoSChain/core" - "github.com/XinFinOrg/XDPoSChain/eth" "github.com/XinFinOrg/XDPoSChain/eth/downloader" + "github.com/XinFinOrg/XDPoSChain/eth/ethconfig" "github.com/XinFinOrg/XDPoSChain/ethclient" "github.com/XinFinOrg/XDPoSChain/ethstats" "github.com/XinFinOrg/XDPoSChain/les" @@ -144,7 +144,7 @@ func NewNode(datadir string, config *NodeConfig) (stack *Node, _ error) { } // Register the Ethereum protocol if requested if config.EthereumEnabled { - ethConf := eth.DefaultConfig + ethConf := ethconfig.Defaults ethConf.Genesis = genesis ethConf.SyncMode = downloader.LightSync ethConf.NetworkId = uint64(config.EthereumNetworkID) diff --git a/node/api.go b/node/api.go index e03f668b172c..8a3611524418 100644 --- a/node/api.go +++ b/node/api.go @@ -18,6 +18,7 @@ package node import ( "context" + "errors" "fmt" "strings" "time" @@ -169,7 +170,7 @@ func (api *PrivateAdminAPI) StopRPC() (bool, error) { defer api.node.lock.Unlock() if api.node.httpHandler == nil { - return false, fmt.Errorf("HTTP RPC not running") + return false, errors.New("HTTP RPC not running") } api.node.stopHTTP() return true, nil @@ -223,7 +224,7 @@ func (api *PrivateAdminAPI) StopWS() (bool, error) { defer api.node.lock.Unlock() if api.node.wsHandler == nil { - return false, fmt.Errorf("WebSocket RPC not running") + return false, errors.New("WebSocket RPC not running") } api.node.stopWS() return true, nil diff --git a/node/node.go b/node/node.go index c54328913742..a9f767bf28f1 100644 --- a/node/node.go +++ b/node/node.go @@ -48,6 +48,7 @@ type Node struct { serverConfig p2p.Config server *p2p.Server // Currently running P2P networking layer + state int // Tracks state of node lifecycle serviceFuncs []ServiceConstructor // Service constructors (in dependency order) services map[reflect.Type]Service // Currently running services @@ -74,6 +75,10 @@ type Node struct { log log.Logger } +const ( + initializingState = iota +) + // New creates a new P2P node, ready for protocol registration. func New(conf *Config) (*Node, error) { // Copy config and resolve the datadir so future changes to the current @@ -302,6 +307,17 @@ func (n *Node) stopInProc() { } } +// RegisterAPIs registers the APIs a service provides on the node. +func (n *Node) RegisterAPIs(apis []rpc.API) { + n.lock.Lock() + defer n.lock.Unlock() + + if n.state != initializingState { + panic("can't register APIs on running/stopped node") + } + n.rpcAPIs = append(n.rpcAPIs, apis...) +} + // startIPC initializes and starts the IPC RPC endpoint. func (n *Node) startIPC(apis []rpc.API) error { // Short circuit if the IPC endpoint isn't being exposed diff --git a/node/service_test.go b/node/service_test.go index 86fb1a6fbdd5..23dc80708411 100644 --- a/node/service_test.go +++ b/node/service_test.go @@ -17,6 +17,7 @@ package node import ( + "errors" "fmt" "os" "path/filepath" @@ -70,7 +71,7 @@ func TestContextServices(t *testing.T) { verifier := func(ctx *ServiceContext) (Service, error) { var objA *NoopServiceA if ctx.Service(&objA) != nil { - return nil, fmt.Errorf("former service not found") + return nil, errors.New("former service not found") } var objB *NoopServiceB if err := ctx.Service(&objB); err != ErrServiceUnknown { diff --git a/p2p/discv5/net.go b/p2p/discv5/net.go index c24823967c6a..097771553d28 100644 --- a/p2p/discv5/net.go +++ b/p2p/discv5/net.go @@ -1063,7 +1063,7 @@ func (net *Network) checkPacket(n *Node, ev nodeEvent, pkt *ingressPacket) error case pongPacket: if !bytes.Equal(pkt.data.(*pong).ReplyTok, n.pingEcho) { // fmt.Println("pong reply token mismatch") - return fmt.Errorf("pong reply token mismatch") + return errors.New("pong reply token mismatch") } n.pingEcho = nil } diff --git a/p2p/discv5/ticket.go b/p2p/discv5/ticket.go index 7c82d2db975b..2a6d40e244e1 100644 --- a/p2p/discv5/ticket.go +++ b/p2p/discv5/ticket.go @@ -19,6 +19,7 @@ package discv5 import ( "bytes" "encoding/binary" + "errors" "fmt" "math" "math/rand" @@ -95,7 +96,7 @@ func pongToTicket(localTime mclock.AbsTime, topics []Topic, node *Node, p *ingre return nil, fmt.Errorf("bad wait period list: got %d values, want %d", len(topics), len(wps)) } if rlpHash(topics) != p.data.(*pong).TopicHash { - return nil, fmt.Errorf("bad topic hash") + return nil, errors.New("bad topic hash") } t := &ticket{ issueTime: localTime, diff --git a/p2p/enr/enr.go b/p2p/enr/enr.go index 28c8e26da5b3..5aca3ab25a6c 100644 --- a/p2p/enr/enr.go +++ b/p2p/enr/enr.go @@ -275,7 +275,7 @@ func (r *Record) verifySignature() error { if err := r.Load(&entry); err != nil { return err } else if len(entry) != 33 { - return fmt.Errorf("invalid public key") + return errors.New("invalid public key") } // Verify the signature. diff --git a/p2p/nat/natpmp.go b/p2p/nat/natpmp.go index 577a424fbec1..df4764cc1074 100644 --- a/p2p/nat/natpmp.go +++ b/p2p/nat/natpmp.go @@ -17,12 +17,13 @@ package nat import ( + "errors" "fmt" "net" "strings" "time" - "github.com/jackpal/go-nat-pmp" + natpmp "github.com/jackpal/go-nat-pmp" ) // natPMPClient adapts the NAT-PMP protocol implementation so it conforms to @@ -46,7 +47,7 @@ func (n *pmp) ExternalIP() (net.IP, error) { func (n *pmp) AddMapping(protocol string, extport, intport int, name string, lifetime time.Duration) error { if lifetime <= 0 { - return fmt.Errorf("lifetime must not be <= 0") + return errors.New("lifetime must not be <= 0") } // Note order of port arguments is switched between our // AddMapping and the client's AddPortMapping. diff --git a/p2p/peer.go b/p2p/peer.go index a7eb05621f13..1d1cfc8906f6 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -17,6 +17,7 @@ package p2p import ( + "errors" "fmt" "io" "net" @@ -409,7 +410,7 @@ func (rw *protoRW) WriteMsg(msg Msg) (err error) { // as well but we don't want to rely on that. rw.werr <- err case <-rw.closed: - err = fmt.Errorf("shutting down") + err = errors.New("shutting down") } return err } diff --git a/p2p/protocols/protocol_test.go b/p2p/protocols/protocol_test.go index a4247917c76c..bcd3186f6ce3 100644 --- a/p2p/protocols/protocol_test.go +++ b/p2p/protocols/protocol_test.go @@ -43,7 +43,7 @@ type kill struct { type drop struct { } -/// protoHandshake represents module-independent aspects of the protocol and is +// / protoHandshake represents module-independent aspects of the protocol and is // the first message peers send and receive as part the initial exchange type protoHandshake struct { Version uint // local and remote peer should have identical version @@ -241,7 +241,7 @@ func runModuleHandshake(t *testing.T, resp uint, errs ...error) { } func TestModuleHandshakeError(t *testing.T) { - runModuleHandshake(t, 43, fmt.Errorf("handshake mismatch remote 43 > local 42")) + runModuleHandshake(t, 43, errors.New("handshake mismatch remote 43 > local 42")) } func TestModuleHandshakeSuccess(t *testing.T) { @@ -376,14 +376,14 @@ WAIT: func TestMultiplePeersDropSelf(t *testing.T) { runMultiplePeers(t, 0, - fmt.Errorf("subprotocol error"), - fmt.Errorf("Message handler error: (msg code 3): dropped"), + errors.New("subprotocol error"), + errors.New("Message handler error: (msg code 3): dropped"), ) } func TestMultiplePeersDropOther(t *testing.T) { runMultiplePeers(t, 1, - fmt.Errorf("Message handler error: (msg code 3): dropped"), - fmt.Errorf("subprotocol error"), + errors.New("Message handler error: (msg code 3): dropped"), + errors.New("subprotocol error"), ) } diff --git a/p2p/rlpx.go b/p2p/rlpx.go index ea26b2f2ec8c..5ceb897eae09 100644 --- a/p2p/rlpx.go +++ b/p2p/rlpx.go @@ -147,7 +147,7 @@ func readProtocolHandshake(rw MsgReader, our *protoHandshake) (*protoHandshake, return nil, err } if msg.Size > baseProtocolMaxMsgSize { - return nil, fmt.Errorf("message too big") + return nil, errors.New("message too big") } if msg.Code == discMsg { // Disconnect before protocol handshake is valid according to the diff --git a/p2p/server.go b/p2p/server.go index f20cba6ddea2..2ccb4cf17a84 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -20,7 +20,6 @@ package p2p import ( "crypto/ecdsa" "errors" - "fmt" "net" "sync" "time" @@ -365,7 +364,7 @@ type sharedUDPConn struct { func (s *sharedUDPConn) ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) { packet, ok := <-s.unhandled if !ok { - return 0, nil, fmt.Errorf("Connection was closed") + return 0, nil, errors.New("Connection was closed") } l := len(packet.Data) if l > len(b) { @@ -397,7 +396,7 @@ func (srv *Server) Start() (err error) { // static fields if srv.PrivateKey == nil { - return fmt.Errorf("Server.PrivateKey must be set to a non-nil key") + return errors.New("Server.PrivateKey must be set to a non-nil key") } if srv.newTransport == nil { srv.newTransport = newRLPX diff --git a/p2p/testing/protocolsession.go b/p2p/testing/protocolsession.go index da221189b912..2c0133b111b1 100644 --- a/p2p/testing/protocolsession.go +++ b/p2p/testing/protocolsession.go @@ -273,7 +273,7 @@ func (self *ProtocolSession) TestDisconnected(disconnects ...*Disconnect) error } delete(expects, event.Peer) case <-timeout: - return fmt.Errorf("timed out waiting for peers to disconnect") + return errors.New("timed out waiting for peers to disconnect") } } return nil diff --git a/params/config.go b/params/config.go index 650722c4af6f..6335310a8a7e 100644 --- a/params/config.go +++ b/params/config.go @@ -94,6 +94,14 @@ var ( TimeoutPeriod: 30, MinePeriod: 2, }, + 13625855: { // 2024.07.29 RPC call and reorg sync issue + MaxMasternodes: 108, + SwitchRound: 13625855, + CertThreshold: 0.4, + TimeoutSyncThreshold: 3, + TimeoutPeriod: 30, + MinePeriod: 2, + }, } UnitTestV2Configs = map[uint64]*V2Config{ @@ -627,7 +635,7 @@ func (c *ChainConfig) IsTIPXDCXReceiver(num *big.Int) bool { } func (c *ChainConfig) IsXDCxDisable(num *big.Int) bool { - return isForked(common.TIPXDCXReceiverDisable, num) + return isForked(common.TIPXDCXMinerDisable, num) } func (c *ChainConfig) IsTIPXDCXLending(num *big.Int) bool { diff --git a/params/denomination.go b/params/denomination.go index 9e1b52506f47..471a045007e1 100644 --- a/params/denomination.go +++ b/params/denomination.go @@ -25,6 +25,7 @@ const ( Wei = 1 Ada = 1e3 Babbage = 1e6 + GWei = 1e9 Shannon = 1e9 Szabo = 1e12 Finney = 1e15 diff --git a/params/version.go b/params/version.go index 1b5dea34b2f6..c60b07776e4f 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 2 // Major version component of the current release - VersionMinor = 0 // Minor version component of the current release - VersionPatch = 0 // Patch version component of the current release - VersionMeta = "stable" // Version metadata to append to the version string + VersionMajor = 2 // Major version component of the current release + VersionMinor = 3 // Minor version component of the current release + VersionPatch = 1 // Patch version component of the current release + VersionMeta = "beta1" // Version metadata to append to the version string ) // Version holds the textual version string. diff --git a/rpc/types.go b/rpc/types.go index a48dd7a1f3d0..f3b50a259be6 100644 --- a/rpc/types.go +++ b/rpc/types.go @@ -19,7 +19,7 @@ package rpc import ( "context" "encoding/json" - "fmt" + "errors" "math" "strings" @@ -109,7 +109,7 @@ func (bn *BlockNumber) UnmarshalJSON(data []byte) error { return err } if blckNum > math.MaxInt64 { - return fmt.Errorf("block number larger than int64") + return errors.New("block number larger than int64") } *bn = BlockNumber(blckNum) return nil @@ -131,7 +131,7 @@ func (e *EpochNumber) UnmarshalJSON(data []byte) error { return err } if eNum > math.MaxInt64 { - return fmt.Errorf("EpochNumber too high") + return errors.New("EpochNumber too high") } *e = EpochNumber(eNum) @@ -154,7 +154,7 @@ func (bnh *BlockNumberOrHash) UnmarshalJSON(data []byte) error { err := json.Unmarshal(data, &e) if err == nil { if e.BlockNumber != nil && e.BlockHash != nil { - return fmt.Errorf("cannot specify both BlockHash and BlockNumber, choose one or the other") + return errors.New("cannot specify both BlockHash and BlockNumber, choose one or the other") } bnh.BlockNumber = e.BlockNumber bnh.BlockHash = e.BlockHash @@ -198,7 +198,7 @@ func (bnh *BlockNumberOrHash) UnmarshalJSON(data []byte) error { return err } if blckNum > math.MaxInt64 { - return fmt.Errorf("blocknumber too high") + return errors.New("blocknumber too high") } bn := BlockNumber(blckNum) bnh.BlockNumber = &bn diff --git a/swarm/README.md b/swarm/README.md new file mode 100644 index 000000000000..f33af5095e9d --- /dev/null +++ b/swarm/README.md @@ -0,0 +1,7 @@ +# Swarm + +https://www.ethswarm.org/ + +Swarm is a distributed storage platform and content distribution service, a native base layer service of the ethereum web3 stack. The primary objective of Swarm is to provide a decentralized and redundant store for dapp code and data as well as block chain and state data. Swarm is also set out to provide various base layer services for web3, including node-to-node messaging, media streaming, decentralised database services and scalable state-channel infrastructure for decentralised service economies. + +**Note**: The codebase has been moved to [ethersphere/bee](https://github.com/ethersphere/bee) \ No newline at end of file diff --git a/swarm/api/api.go b/swarm/api/api.go deleted file mode 100644 index 5b61c1970edb..000000000000 --- a/swarm/api/api.go +++ /dev/null @@ -1,492 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package api - -import ( - "fmt" - "io" - "net/http" - "path" - "regexp" - "strings" - "sync" - - "bytes" - "mime" - "path/filepath" - "time" - - "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/metrics" - "github.com/XinFinOrg/XDPoSChain/swarm/storage" -) - -var hashMatcher = regexp.MustCompile("^[0-9A-Fa-f]{64}") - -//setup metrics -var ( - apiResolveCount = metrics.NewRegisteredCounter("api.resolve.count", nil) - apiResolveFail = metrics.NewRegisteredCounter("api.resolve.fail", nil) - apiPutCount = metrics.NewRegisteredCounter("api.put.count", nil) - apiPutFail = metrics.NewRegisteredCounter("api.put.fail", nil) - apiGetCount = metrics.NewRegisteredCounter("api.get.count", nil) - apiGetNotFound = metrics.NewRegisteredCounter("api.get.notfound", nil) - apiGetHttp300 = metrics.NewRegisteredCounter("api.get.http.300", nil) - apiModifyCount = metrics.NewRegisteredCounter("api.modify.count", nil) - apiModifyFail = metrics.NewRegisteredCounter("api.modify.fail", nil) - apiAddFileCount = metrics.NewRegisteredCounter("api.addfile.count", nil) - apiAddFileFail = metrics.NewRegisteredCounter("api.addfile.fail", nil) - apiRmFileCount = metrics.NewRegisteredCounter("api.removefile.count", nil) - apiRmFileFail = metrics.NewRegisteredCounter("api.removefile.fail", nil) - apiAppendFileCount = metrics.NewRegisteredCounter("api.appendfile.count", nil) - apiAppendFileFail = metrics.NewRegisteredCounter("api.appendfile.fail", nil) -) - -type Resolver interface { - Resolve(string) (common.Hash, error) -} - -// NoResolverError is returned by MultiResolver.Resolve if no resolver -// can be found for the address. -type NoResolverError struct { - TLD string -} - -func NewNoResolverError(tld string) *NoResolverError { - return &NoResolverError{TLD: tld} -} - -func (e *NoResolverError) Error() string { - if e.TLD == "" { - return "no ENS resolver" - } - return fmt.Sprintf("no ENS endpoint configured to resolve .%s TLD names", e.TLD) -} - -// MultiResolver is used to resolve URL addresses based on their TLDs. -// Each TLD can have multiple resolvers, and the resoluton from the -// first one in the sequence will be returned. -type MultiResolver struct { - resolvers map[string][]Resolver -} - -// MultiResolverOption sets options for MultiResolver and is used as -// arguments for its constructor. -type MultiResolverOption func(*MultiResolver) - -// MultiResolverOptionWithResolver adds a Resolver to a list of resolvers -// for a specific TLD. If TLD is an empty string, the resolver will be added -// to the list of default resolver, the ones that will be used for resolution -// of addresses which do not have their TLD resolver specified. -func MultiResolverOptionWithResolver(r Resolver, tld string) MultiResolverOption { - return func(m *MultiResolver) { - m.resolvers[tld] = append(m.resolvers[tld], r) - } -} - -// NewMultiResolver creates a new instance of MultiResolver. -func NewMultiResolver(opts ...MultiResolverOption) (m *MultiResolver) { - m = &MultiResolver{ - resolvers: make(map[string][]Resolver), - } - for _, o := range opts { - o(m) - } - return m -} - -// Resolve resolves address by choosing a Resolver by TLD. -// If there are more default Resolvers, or for a specific TLD, -// the Hash from the the first one which does not return error -// will be returned. -func (m MultiResolver) Resolve(addr string) (h common.Hash, err error) { - rs := m.resolvers[""] - tld := path.Ext(addr) - if tld != "" { - tld = tld[1:] - rstld, ok := m.resolvers[tld] - if ok { - rs = rstld - } - } - if rs == nil { - return h, NewNoResolverError(tld) - } - for _, r := range rs { - h, err = r.Resolve(addr) - if err == nil { - return - } - } - return -} - -/* -Api implements webserver/file system related content storage and retrieval -on top of the dpa -it is the public interface of the dpa which is included in the ethereum stack -*/ -type Api struct { - dpa *storage.DPA - dns Resolver -} - -//the api constructor initialises -func NewApi(dpa *storage.DPA, dns Resolver) (self *Api) { - self = &Api{ - dpa: dpa, - dns: dns, - } - return -} - -// to be used only in TEST -func (self *Api) Upload(uploadDir, index string) (hash string, err error) { - fs := NewFileSystem(self) - hash, err = fs.Upload(uploadDir, index) - return hash, err -} - -// DPA reader API -func (self *Api) Retrieve(key storage.Key) storage.LazySectionReader { - return self.dpa.Retrieve(key) -} - -func (self *Api) Store(data io.Reader, size int64, wg *sync.WaitGroup) (key storage.Key, err error) { - return self.dpa.Store(data, size, wg, nil) -} - -type ErrResolve error - -// DNS Resolver -func (self *Api) Resolve(uri *URI) (storage.Key, error) { - apiResolveCount.Inc(1) - log.Trace(fmt.Sprintf("Resolving : %v", uri.Addr)) - - // if the URI is immutable, check if the address is a hash - isHash := hashMatcher.MatchString(uri.Addr) - if uri.Immutable() || uri.DeprecatedImmutable() { - if !isHash { - return nil, fmt.Errorf("immutable address not a content hash: %q", uri.Addr) - } - return common.Hex2Bytes(uri.Addr), nil - } - - // if DNS is not configured, check if the address is a hash - if self.dns == nil { - if !isHash { - apiResolveFail.Inc(1) - return nil, fmt.Errorf("no DNS to resolve name: %q", uri.Addr) - } - return common.Hex2Bytes(uri.Addr), nil - } - - // try and resolve the address - resolved, err := self.dns.Resolve(uri.Addr) - if err == nil { - return resolved[:], nil - } else if !isHash { - apiResolveFail.Inc(1) - return nil, err - } - return common.Hex2Bytes(uri.Addr), nil -} - -// Put provides singleton manifest creation on top of dpa store -func (self *Api) Put(content, contentType string) (storage.Key, error) { - apiPutCount.Inc(1) - r := strings.NewReader(content) - wg := &sync.WaitGroup{} - key, err := self.dpa.Store(r, int64(len(content)), wg, nil) - if err != nil { - apiPutFail.Inc(1) - return nil, err - } - manifest := fmt.Sprintf(`{"entries":[{"hash":"%v","contentType":"%s"}]}`, key, contentType) - r = strings.NewReader(manifest) - key, err = self.dpa.Store(r, int64(len(manifest)), wg, nil) - if err != nil { - apiPutFail.Inc(1) - return nil, err - } - wg.Wait() - return key, nil -} - -// Get uses iterative manifest retrieval and prefix matching -// to resolve basePath to content using dpa retrieve -// it returns a section reader, mimeType, status and an error -func (self *Api) Get(key storage.Key, path string) (reader storage.LazySectionReader, mimeType string, status int, err error) { - apiGetCount.Inc(1) - trie, err := loadManifest(self.dpa, key, nil) - if err != nil { - apiGetNotFound.Inc(1) - status = http.StatusNotFound - log.Warn(fmt.Sprintf("loadManifestTrie error: %v", err)) - return - } - - log.Trace(fmt.Sprintf("getEntry(%s)", path)) - - entry, _ := trie.getEntry(path) - - if entry != nil { - key = common.Hex2Bytes(entry.Hash) - status = entry.Status - if status == http.StatusMultipleChoices { - apiGetHttp300.Inc(1) - return - } else { - mimeType = entry.ContentType - log.Trace(fmt.Sprintf("content lookup key: '%v' (%v)", key, mimeType)) - reader = self.dpa.Retrieve(key) - } - } else { - status = http.StatusNotFound - apiGetNotFound.Inc(1) - err = fmt.Errorf("manifest entry for '%s' not found", path) - log.Warn(fmt.Sprintf("%v", err)) - } - return -} - -func (self *Api) Modify(key storage.Key, path, contentHash, contentType string) (storage.Key, error) { - apiModifyCount.Inc(1) - quitC := make(chan bool) - trie, err := loadManifest(self.dpa, key, quitC) - if err != nil { - apiModifyFail.Inc(1) - return nil, err - } - if contentHash != "" { - entry := newManifestTrieEntry(&ManifestEntry{ - Path: path, - ContentType: contentType, - }, nil) - entry.Hash = contentHash - trie.addEntry(entry, quitC) - } else { - trie.deleteEntry(path, quitC) - } - - if err := trie.recalcAndStore(); err != nil { - apiModifyFail.Inc(1) - return nil, err - } - return trie.hash, nil -} - -func (self *Api) AddFile(mhash, path, fname string, content []byte, nameresolver bool) (storage.Key, string, error) { - apiAddFileCount.Inc(1) - - uri, err := Parse("bzz:/" + mhash) - if err != nil { - apiAddFileFail.Inc(1) - return nil, "", err - } - mkey, err := self.Resolve(uri) - if err != nil { - apiAddFileFail.Inc(1) - return nil, "", err - } - - // trim the root dir we added - if path[:1] == "/" { - path = path[1:] - } - - entry := &ManifestEntry{ - Path: filepath.Join(path, fname), - ContentType: mime.TypeByExtension(filepath.Ext(fname)), - Mode: 0700, - Size: int64(len(content)), - ModTime: time.Now(), - } - - mw, err := self.NewManifestWriter(mkey, nil) - if err != nil { - apiAddFileFail.Inc(1) - return nil, "", err - } - - fkey, err := mw.AddEntry(bytes.NewReader(content), entry) - if err != nil { - apiAddFileFail.Inc(1) - return nil, "", err - } - - newMkey, err := mw.Store() - if err != nil { - apiAddFileFail.Inc(1) - return nil, "", err - - } - - return fkey, newMkey.String(), nil - -} - -func (self *Api) RemoveFile(mhash, path, fname string, nameresolver bool) (string, error) { - apiRmFileCount.Inc(1) - - uri, err := Parse("bzz:/" + mhash) - if err != nil { - apiRmFileFail.Inc(1) - return "", err - } - mkey, err := self.Resolve(uri) - if err != nil { - apiRmFileFail.Inc(1) - return "", err - } - - // trim the root dir we added - if path[:1] == "/" { - path = path[1:] - } - - mw, err := self.NewManifestWriter(mkey, nil) - if err != nil { - apiRmFileFail.Inc(1) - return "", err - } - - err = mw.RemoveEntry(filepath.Join(path, fname)) - if err != nil { - apiRmFileFail.Inc(1) - return "", err - } - - newMkey, err := mw.Store() - if err != nil { - apiRmFileFail.Inc(1) - return "", err - - } - - return newMkey.String(), nil -} - -func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, content []byte, oldKey storage.Key, offset int64, addSize int64, nameresolver bool) (storage.Key, string, error) { - apiAppendFileCount.Inc(1) - - buffSize := offset + addSize - if buffSize < existingSize { - buffSize = existingSize - } - - buf := make([]byte, buffSize) - - oldReader := self.Retrieve(oldKey) - io.ReadAtLeast(oldReader, buf, int(offset)) - - newReader := bytes.NewReader(content) - io.ReadAtLeast(newReader, buf[offset:], int(addSize)) - - if buffSize < existingSize { - io.ReadAtLeast(oldReader, buf[addSize:], int(buffSize)) - } - - combinedReader := bytes.NewReader(buf) - totalSize := int64(len(buf)) - - // TODO(jmozah): to append using pyramid chunker when it is ready - //oldReader := self.Retrieve(oldKey) - //newReader := bytes.NewReader(content) - //combinedReader := io.MultiReader(oldReader, newReader) - - uri, err := Parse("bzz:/" + mhash) - if err != nil { - apiAppendFileFail.Inc(1) - return nil, "", err - } - mkey, err := self.Resolve(uri) - if err != nil { - apiAppendFileFail.Inc(1) - return nil, "", err - } - - // trim the root dir we added - if path[:1] == "/" { - path = path[1:] - } - - mw, err := self.NewManifestWriter(mkey, nil) - if err != nil { - apiAppendFileFail.Inc(1) - return nil, "", err - } - - err = mw.RemoveEntry(filepath.Join(path, fname)) - if err != nil { - apiAppendFileFail.Inc(1) - return nil, "", err - } - - entry := &ManifestEntry{ - Path: filepath.Join(path, fname), - ContentType: mime.TypeByExtension(filepath.Ext(fname)), - Mode: 0700, - Size: totalSize, - ModTime: time.Now(), - } - - fkey, err := mw.AddEntry(io.Reader(combinedReader), entry) - if err != nil { - apiAppendFileFail.Inc(1) - return nil, "", err - } - - newMkey, err := mw.Store() - if err != nil { - apiAppendFileFail.Inc(1) - return nil, "", err - - } - - return fkey, newMkey.String(), nil - -} - -func (self *Api) BuildDirectoryTree(mhash string, nameresolver bool) (key storage.Key, manifestEntryMap map[string]*manifestTrieEntry, err error) { - - uri, err := Parse("bzz:/" + mhash) - if err != nil { - return nil, nil, err - } - key, err = self.Resolve(uri) - if err != nil { - return nil, nil, err - } - - quitC := make(chan bool) - rootTrie, err := loadManifest(self.dpa, key, quitC) - if err != nil { - return nil, nil, fmt.Errorf("can't load manifest %v: %v", key.String(), err) - } - - manifestEntryMap = map[string]*manifestTrieEntry{} - err = rootTrie.listWithPrefix(uri.Path, quitC, func(entry *manifestTrieEntry, suffix string) { - manifestEntryMap[suffix] = entry - }) - - if err != nil { - return nil, nil, fmt.Errorf("list with prefix failed %v: %v", key.String(), err) - } - return key, manifestEntryMap, nil -} diff --git a/swarm/api/api_test.go b/swarm/api/api_test.go deleted file mode 100644 index 1a9345f9656b..000000000000 --- a/swarm/api/api_test.go +++ /dev/null @@ -1,363 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package api - -import ( - "errors" - "fmt" - "io" - "os" - "testing" - - "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/swarm/storage" -) - -func testApi(t *testing.T, f func(*Api)) { - datadir, err := os.MkdirTemp("", "bzz-test") - if err != nil { - t.Fatalf("unable to create temp dir: %v", err) - } - os.RemoveAll(datadir) - defer os.RemoveAll(datadir) - dpa, err := storage.NewLocalDPA(datadir) - if err != nil { - return - } - api := NewApi(dpa, nil) - dpa.Start() - f(api) - dpa.Stop() -} - -type testResponse struct { - reader storage.LazySectionReader - *Response -} - -func checkResponse(t *testing.T, resp *testResponse, exp *Response) { - - if resp.MimeType != exp.MimeType { - t.Errorf("incorrect mimeType. expected '%s', got '%s'", exp.MimeType, resp.MimeType) - } - if resp.Status != exp.Status { - t.Errorf("incorrect status. expected '%d', got '%d'", exp.Status, resp.Status) - } - if resp.Size != exp.Size { - t.Errorf("incorrect size. expected '%d', got '%d'", exp.Size, resp.Size) - } - if resp.reader != nil { - content := make([]byte, resp.Size) - read, _ := resp.reader.Read(content) - if int64(read) != exp.Size { - t.Errorf("incorrect content length. expected '%d...', got '%d...'", read, exp.Size) - } - resp.Content = string(content) - } - if resp.Content != exp.Content { - // if !bytes.Equal(resp.Content, exp.Content) - t.Errorf("incorrect content. expected '%s...', got '%s...'", string(exp.Content), string(resp.Content)) - } -} - -// func expResponse(content []byte, mimeType string, status int) *Response { -func expResponse(content string, mimeType string, status int) *Response { - log.Trace(fmt.Sprintf("expected content (%v): %v ", len(content), content)) - return &Response{mimeType, status, int64(len(content)), content} -} - -// func testGet(t *testing.T, api *Api, bzzhash string) *testResponse { -func testGet(t *testing.T, api *Api, bzzhash, path string) *testResponse { - key := storage.Key(common.Hex2Bytes(bzzhash)) - reader, mimeType, status, err := api.Get(key, path) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - quitC := make(chan bool) - size, err := reader.Size(quitC) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - log.Trace(fmt.Sprintf("reader size: %v ", size)) - s := make([]byte, size) - _, err = reader.Read(s) - if err != io.EOF { - t.Fatalf("unexpected error: %v", err) - } - reader.Seek(0, 0) - return &testResponse{reader, &Response{mimeType, status, size, string(s)}} - // return &testResponse{reader, &Response{mimeType, status, reader.Size(), nil}} -} - -func TestApiPut(t *testing.T) { - testApi(t, func(api *Api) { - content := "hello" - exp := expResponse(content, "text/plain", 0) - // exp := expResponse([]byte(content), "text/plain", 0) - key, err := api.Put(content, exp.MimeType) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - resp := testGet(t, api, key.String(), "") - checkResponse(t, resp, exp) - }) -} - -// testResolver implements the Resolver interface and either returns the given -// hash if it is set, or returns a "name not found" error -type testResolver struct { - hash *common.Hash -} - -func newTestResolver(addr string) *testResolver { - r := &testResolver{} - if addr != "" { - hash := common.HexToHash(addr) - r.hash = &hash - } - return r -} - -func (t *testResolver) Resolve(addr string) (common.Hash, error) { - if t.hash == nil { - return common.Hash{}, fmt.Errorf("DNS name not found: %q", addr) - } - return *t.hash, nil -} - -// TestAPIResolve tests resolving URIs which can either contain content hashes -// or ENS names -func TestAPIResolve(t *testing.T) { - ensAddr := "swarm.eth" - hashAddr := "1111111111111111111111111111111111111111111111111111111111111111" - resolvedAddr := "2222222222222222222222222222222222222222222222222222222222222222" - doesResolve := newTestResolver(resolvedAddr) - doesntResolve := newTestResolver("") - - type test struct { - desc string - dns Resolver - addr string - immutable bool - result string - expectErr error - } - - tests := []*test{ - { - desc: "DNS not configured, hash address, returns hash address", - dns: nil, - addr: hashAddr, - result: hashAddr, - }, - { - desc: "DNS not configured, ENS address, returns error", - dns: nil, - addr: ensAddr, - expectErr: errors.New(`no DNS to resolve name: "swarm.eth"`), - }, - { - desc: "DNS configured, hash address, hash resolves, returns resolved address", - dns: doesResolve, - addr: hashAddr, - result: resolvedAddr, - }, - { - desc: "DNS configured, immutable hash address, hash resolves, returns hash address", - dns: doesResolve, - addr: hashAddr, - immutable: true, - result: hashAddr, - }, - { - desc: "DNS configured, hash address, hash doesn't resolve, returns hash address", - dns: doesntResolve, - addr: hashAddr, - result: hashAddr, - }, - { - desc: "DNS configured, ENS address, name resolves, returns resolved address", - dns: doesResolve, - addr: ensAddr, - result: resolvedAddr, - }, - { - desc: "DNS configured, immutable ENS address, name resolves, returns error", - dns: doesResolve, - addr: ensAddr, - immutable: true, - expectErr: errors.New(`immutable address not a content hash: "swarm.eth"`), - }, - { - desc: "DNS configured, ENS address, name doesn't resolve, returns error", - dns: doesntResolve, - addr: ensAddr, - expectErr: errors.New(`DNS name not found: "swarm.eth"`), - }, - } - for _, x := range tests { - t.Run(x.desc, func(t *testing.T) { - api := &Api{dns: x.dns} - uri := &URI{Addr: x.addr, Scheme: "bzz"} - if x.immutable { - uri.Scheme = "bzz-immutable" - } - res, err := api.Resolve(uri) - if err == nil { - if x.expectErr != nil { - t.Fatalf("expected error %q, got result %q", x.expectErr, res) - } - if res.String() != x.result { - t.Fatalf("expected result %q, got %q", x.result, res) - } - } else { - if x.expectErr == nil { - t.Fatalf("expected no error, got %q", err) - } - if err.Error() != x.expectErr.Error() { - t.Fatalf("expected error %q, got %q", x.expectErr, err) - } - } - }) - } -} - -func TestMultiResolver(t *testing.T) { - doesntResolve := newTestResolver("") - - ethAddr := "swarm.eth" - ethHash := "0x2222222222222222222222222222222222222222222222222222222222222222" - ethResolve := newTestResolver(ethHash) - - testAddr := "swarm.test" - testHash := "0x1111111111111111111111111111111111111111111111111111111111111111" - testResolve := newTestResolver(testHash) - - tests := []struct { - desc string - r Resolver - addr string - result string - err error - }{ - { - desc: "No resolvers, returns error", - r: NewMultiResolver(), - err: NewNoResolverError(""), - }, - { - desc: "One default resolver, returns resolved address", - r: NewMultiResolver(MultiResolverOptionWithResolver(ethResolve, "")), - addr: ethAddr, - result: ethHash, - }, - { - desc: "Two default resolvers, returns resolved address", - r: NewMultiResolver( - MultiResolverOptionWithResolver(ethResolve, ""), - MultiResolverOptionWithResolver(ethResolve, ""), - ), - addr: ethAddr, - result: ethHash, - }, - { - desc: "Two default resolvers, first doesn't resolve, returns resolved address", - r: NewMultiResolver( - MultiResolverOptionWithResolver(doesntResolve, ""), - MultiResolverOptionWithResolver(ethResolve, ""), - ), - addr: ethAddr, - result: ethHash, - }, - { - desc: "Default resolver doesn't resolve, tld resolver resolve, returns resolved address", - r: NewMultiResolver( - MultiResolverOptionWithResolver(doesntResolve, ""), - MultiResolverOptionWithResolver(ethResolve, "eth"), - ), - addr: ethAddr, - result: ethHash, - }, - { - desc: "Three TLD resolvers, third resolves, returns resolved address", - r: NewMultiResolver( - MultiResolverOptionWithResolver(doesntResolve, "eth"), - MultiResolverOptionWithResolver(doesntResolve, "eth"), - MultiResolverOptionWithResolver(ethResolve, "eth"), - ), - addr: ethAddr, - result: ethHash, - }, - { - desc: "One TLD resolver doesn't resolve, returns error", - r: NewMultiResolver( - MultiResolverOptionWithResolver(doesntResolve, ""), - MultiResolverOptionWithResolver(ethResolve, "eth"), - ), - addr: ethAddr, - result: ethHash, - }, - { - desc: "One defautl and one TLD resolver, all doesn't resolve, returns error", - r: NewMultiResolver( - MultiResolverOptionWithResolver(doesntResolve, ""), - MultiResolverOptionWithResolver(doesntResolve, "eth"), - ), - addr: ethAddr, - result: ethHash, - err: errors.New(`DNS name not found: "swarm.eth"`), - }, - { - desc: "Two TLD resolvers, both resolve, returns resolved address", - r: NewMultiResolver( - MultiResolverOptionWithResolver(ethResolve, "eth"), - MultiResolverOptionWithResolver(testResolve, "test"), - ), - addr: testAddr, - result: testHash, - }, - { - desc: "One TLD resolver, no default resolver, returns error for different TLD", - r: NewMultiResolver( - MultiResolverOptionWithResolver(ethResolve, "eth"), - ), - addr: testAddr, - err: NewNoResolverError("test"), - }, - } - for _, x := range tests { - t.Run(x.desc, func(t *testing.T) { - res, err := x.r.Resolve(x.addr) - if err == nil { - if x.err != nil { - t.Fatalf("expected error %q, got result %q", x.err, res.Hex()) - } - if res.Hex() != x.result { - t.Fatalf("expected result %q, got %q", x.result, res.Hex()) - } - } else { - if x.err == nil { - t.Fatalf("expected no error, got %q", err) - } - if err.Error() != x.err.Error() { - t.Fatalf("expected error %q, got %q", x.err, err) - } - } - }) - } -} diff --git a/swarm/api/client/client.go b/swarm/api/client/client.go deleted file mode 100644 index 2ab56a79c66e..000000000000 --- a/swarm/api/client/client.go +++ /dev/null @@ -1,464 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package client - -import ( - "archive/tar" - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "mime" - "mime/multipart" - "net/http" - "net/textproto" - "os" - "path/filepath" - "strconv" - "strings" - - "github.com/XinFinOrg/XDPoSChain/swarm/api" -) - -var ( - DefaultGateway = "http://localhost:8500" - DefaultClient = NewClient(DefaultGateway) -) - -func NewClient(gateway string) *Client { - return &Client{ - Gateway: gateway, - } -} - -// Client wraps interaction with a swarm HTTP gateway. -type Client struct { - Gateway string -} - -// UploadRaw uploads raw data to swarm and returns the resulting hash -func (c *Client) UploadRaw(r io.Reader, size int64) (string, error) { - if size <= 0 { - return "", errors.New("data size must be greater than zero") - } - req, err := http.NewRequest("POST", c.Gateway+"/bzz-raw:/", r) - if err != nil { - return "", err - } - req.ContentLength = size - res, err := http.DefaultClient.Do(req) - if err != nil { - return "", err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return "", fmt.Errorf("unexpected HTTP status: %s", res.Status) - } - data, err := io.ReadAll(res.Body) - if err != nil { - return "", err - } - return string(data), nil -} - -// DownloadRaw downloads raw data from swarm -func (c *Client) DownloadRaw(hash string) (io.ReadCloser, error) { - uri := c.Gateway + "/bzz-raw:/" + hash - res, err := http.DefaultClient.Get(uri) - if err != nil { - return nil, err - } - if res.StatusCode != http.StatusOK { - res.Body.Close() - return nil, fmt.Errorf("unexpected HTTP status: %s", res.Status) - } - return res.Body, nil -} - -// File represents a file in a swarm manifest and is used for uploading and -// downloading content to and from swarm -type File struct { - io.ReadCloser - api.ManifestEntry -} - -// Open opens a local file which can then be passed to client.Upload to upload -// it to swarm -func Open(path string) (*File, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - stat, err := f.Stat() - if err != nil { - f.Close() - return nil, err - } - return &File{ - ReadCloser: f, - ManifestEntry: api.ManifestEntry{ - ContentType: mime.TypeByExtension(filepath.Ext(path)), - Mode: int64(stat.Mode()), - Size: stat.Size(), - ModTime: stat.ModTime(), - }, - }, nil -} - -// Upload uploads a file to swarm and either adds it to an existing manifest -// (if the manifest argument is non-empty) or creates a new manifest containing -// the file, returning the resulting manifest hash (the file will then be -// available at bzz://) -func (c *Client) Upload(file *File, manifest string) (string, error) { - if file.Size <= 0 { - return "", errors.New("file size must be greater than zero") - } - return c.TarUpload(manifest, &FileUploader{file}) -} - -// Download downloads a file with the given path from the swarm manifest with -// the given hash (i.e. it gets bzz://) -func (c *Client) Download(hash, path string) (*File, error) { - uri := c.Gateway + "/bzz:/" + hash + "/" + path - res, err := http.DefaultClient.Get(uri) - if err != nil { - return nil, err - } - if res.StatusCode != http.StatusOK { - res.Body.Close() - return nil, fmt.Errorf("unexpected HTTP status: %s", res.Status) - } - return &File{ - ReadCloser: res.Body, - ManifestEntry: api.ManifestEntry{ - ContentType: res.Header.Get("Content-Type"), - Size: res.ContentLength, - }, - }, nil -} - -// UploadDirectory uploads a directory tree to swarm and either adds the files -// to an existing manifest (if the manifest argument is non-empty) or creates a -// new manifest, returning the resulting manifest hash (files from the -// directory will then be available at bzz://path/to/file), with -// the file specified in defaultPath being uploaded to the root of the manifest -// (i.e. bzz://) -func (c *Client) UploadDirectory(dir, defaultPath, manifest string) (string, error) { - stat, err := os.Stat(dir) - if err != nil { - return "", err - } else if !stat.IsDir() { - return "", fmt.Errorf("not a directory: %s", dir) - } - return c.TarUpload(manifest, &DirectoryUploader{dir, defaultPath}) -} - -// DownloadDirectory downloads the files contained in a swarm manifest under -// the given path into a local directory (existing files will be overwritten) -func (c *Client) DownloadDirectory(hash, path, destDir string) error { - stat, err := os.Stat(destDir) - if err != nil { - return err - } else if !stat.IsDir() { - return fmt.Errorf("not a directory: %s", destDir) - } - - uri := c.Gateway + "/bzz:/" + hash + "/" + path - req, err := http.NewRequest("GET", uri, nil) - if err != nil { - return err - } - req.Header.Set("Accept", "application/x-tar") - res, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return fmt.Errorf("unexpected HTTP status: %s", res.Status) - } - tr := tar.NewReader(res.Body) - for { - hdr, err := tr.Next() - if err == io.EOF { - return nil - } else if err != nil { - return err - } - // ignore the default path file - if hdr.Name == "" { - continue - } - - dstPath := filepath.Join(destDir, filepath.Clean(strings.TrimPrefix(hdr.Name, path))) - if err := os.MkdirAll(filepath.Dir(dstPath), 0755); err != nil { - return err - } - var mode os.FileMode = 0644 - if hdr.Mode > 0 { - mode = os.FileMode(hdr.Mode) - } - dst, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode) - if err != nil { - return err - } - n, err := io.Copy(dst, tr) - dst.Close() - if err != nil { - return err - } else if n != hdr.Size { - return fmt.Errorf("expected %s to be %d bytes but got %d", hdr.Name, hdr.Size, n) - } - } -} - -// UploadManifest uploads the given manifest to swarm -func (c *Client) UploadManifest(m *api.Manifest) (string, error) { - data, err := json.Marshal(m) - if err != nil { - return "", err - } - return c.UploadRaw(bytes.NewReader(data), int64(len(data))) -} - -// DownloadManifest downloads a swarm manifest -func (c *Client) DownloadManifest(hash string) (*api.Manifest, error) { - res, err := c.DownloadRaw(hash) - if err != nil { - return nil, err - } - defer res.Close() - var manifest api.Manifest - if err := json.NewDecoder(res).Decode(&manifest); err != nil { - return nil, err - } - return &manifest, nil -} - -// List list files in a swarm manifest which have the given prefix, grouping -// common prefixes using "/" as a delimiter. -// -// For example, if the manifest represents the following directory structure: -// -// file1.txt -// file2.txt -// dir1/file3.txt -// dir1/dir2/file4.txt -// -// Then: -// -// - a prefix of "" would return [dir1/, file1.txt, file2.txt] -// - a prefix of "file" would return [file1.txt, file2.txt] -// - a prefix of "dir1/" would return [dir1/dir2/, dir1/file3.txt] -// -// where entries ending with "/" are common prefixes. -func (c *Client) List(hash, prefix string) (*api.ManifestList, error) { - res, err := http.DefaultClient.Get(c.Gateway + "/bzz-list:/" + hash + "/" + prefix) - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return nil, fmt.Errorf("unexpected HTTP status: %s", res.Status) - } - var list api.ManifestList - if err := json.NewDecoder(res.Body).Decode(&list); err != nil { - return nil, err - } - return &list, nil -} - -// Uploader uploads files to swarm using a provided UploadFn -type Uploader interface { - Upload(UploadFn) error -} - -type UploaderFunc func(UploadFn) error - -func (u UploaderFunc) Upload(upload UploadFn) error { - return u(upload) -} - -// DirectoryUploader uploads all files in a directory, optionally uploading -// a file to the default path -type DirectoryUploader struct { - Dir string - DefaultPath string -} - -// Upload performs the upload of the directory and default path -func (d *DirectoryUploader) Upload(upload UploadFn) error { - if d.DefaultPath != "" { - file, err := Open(d.DefaultPath) - if err != nil { - return err - } - if err := upload(file); err != nil { - return err - } - } - return filepath.Walk(d.Dir, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - if f.IsDir() { - return nil - } - file, err := Open(path) - if err != nil { - return err - } - relPath, err := filepath.Rel(d.Dir, path) - if err != nil { - return err - } - file.Path = filepath.ToSlash(relPath) - return upload(file) - }) -} - -// FileUploader uploads a single file -type FileUploader struct { - File *File -} - -// Upload performs the upload of the file -func (f *FileUploader) Upload(upload UploadFn) error { - return upload(f.File) -} - -// UploadFn is the type of function passed to an Uploader to perform the upload -// of a single file (for example, a directory uploader would call a provided -// UploadFn for each file in the directory tree) -type UploadFn func(file *File) error - -// TarUpload uses the given Uploader to upload files to swarm as a tar stream, -// returning the resulting manifest hash -func (c *Client) TarUpload(hash string, uploader Uploader) (string, error) { - reqR, reqW := io.Pipe() - defer reqR.Close() - req, err := http.NewRequest("POST", c.Gateway+"/bzz:/"+hash, reqR) - if err != nil { - return "", err - } - req.Header.Set("Content-Type", "application/x-tar") - - // use 'Expect: 100-continue' so we don't send the request body if - // the server refuses the request - req.Header.Set("Expect", "100-continue") - - tw := tar.NewWriter(reqW) - - // define an UploadFn which adds files to the tar stream - uploadFn := func(file *File) error { - hdr := &tar.Header{ - Name: file.Path, - Mode: file.Mode, - Size: file.Size, - ModTime: file.ModTime, - Xattrs: map[string]string{ - "user.swarm.content-type": file.ContentType, - }, - } - if err := tw.WriteHeader(hdr); err != nil { - return err - } - _, err = io.Copy(tw, file) - return err - } - - // run the upload in a goroutine so we can send the request headers and - // wait for a '100 Continue' response before sending the tar stream - go func() { - err := uploader.Upload(uploadFn) - if err == nil { - err = tw.Close() - } - reqW.CloseWithError(err) - }() - - res, err := http.DefaultClient.Do(req) - if err != nil { - return "", err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return "", fmt.Errorf("unexpected HTTP status: %s", res.Status) - } - data, err := io.ReadAll(res.Body) - if err != nil { - return "", err - } - return string(data), nil -} - -// MultipartUpload uses the given Uploader to upload files to swarm as a -// multipart form, returning the resulting manifest hash -func (c *Client) MultipartUpload(hash string, uploader Uploader) (string, error) { - reqR, reqW := io.Pipe() - defer reqR.Close() - req, err := http.NewRequest("POST", c.Gateway+"/bzz:/"+hash, reqR) - if err != nil { - return "", err - } - - // use 'Expect: 100-continue' so we don't send the request body if - // the server refuses the request - req.Header.Set("Expect", "100-continue") - - mw := multipart.NewWriter(reqW) - req.Header.Set("Content-Type", fmt.Sprintf("multipart/form-data; boundary=%q", mw.Boundary())) - - // define an UploadFn which adds files to the multipart form - uploadFn := func(file *File) error { - hdr := make(textproto.MIMEHeader) - hdr.Set("Content-Disposition", fmt.Sprintf("form-data; name=%q", file.Path)) - hdr.Set("Content-Type", file.ContentType) - hdr.Set("Content-Length", strconv.FormatInt(file.Size, 10)) - w, err := mw.CreatePart(hdr) - if err != nil { - return err - } - _, err = io.Copy(w, file) - return err - } - - // run the upload in a goroutine so we can send the request headers and - // wait for a '100 Continue' response before sending the multipart form - go func() { - err := uploader.Upload(uploadFn) - if err == nil { - err = mw.Close() - } - reqW.CloseWithError(err) - }() - - res, err := http.DefaultClient.Do(req) - if err != nil { - return "", err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return "", fmt.Errorf("unexpected HTTP status: %s", res.Status) - } - data, err := io.ReadAll(res.Body) - if err != nil { - return "", err - } - return string(data), nil -} diff --git a/swarm/api/client/client_test.go b/swarm/api/client/client_test.go deleted file mode 100644 index 37bd21e94516..000000000000 --- a/swarm/api/client/client_test.go +++ /dev/null @@ -1,325 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package client - -import ( - "bytes" - "io" - "os" - "path/filepath" - "reflect" - "sort" - "testing" - - "github.com/XinFinOrg/XDPoSChain/swarm/api" - "github.com/XinFinOrg/XDPoSChain/swarm/testutil" -) - -// TestClientUploadDownloadRaw test uploading and downloading raw data to swarm -func TestClientUploadDownloadRaw(t *testing.T) { - srv := testutil.NewTestSwarmServer(t) - defer srv.Close() - - client := NewClient(srv.URL) - - // upload some raw data - data := []byte("foo123") - hash, err := client.UploadRaw(bytes.NewReader(data), int64(len(data))) - if err != nil { - t.Fatal(err) - } - - // check we can download the same data - res, err := client.DownloadRaw(hash) - if err != nil { - t.Fatal(err) - } - defer res.Close() - gotData, err := io.ReadAll(res) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(gotData, data) { - t.Fatalf("expected downloaded data to be %q, got %q", data, gotData) - } -} - -// TestClientUploadDownloadFiles test uploading and downloading files to swarm -// manifests -func TestClientUploadDownloadFiles(t *testing.T) { - srv := testutil.NewTestSwarmServer(t) - defer srv.Close() - - client := NewClient(srv.URL) - upload := func(manifest, path string, data []byte) string { - file := &File{ - ReadCloser: io.NopCloser(bytes.NewReader(data)), - ManifestEntry: api.ManifestEntry{ - Path: path, - ContentType: "text/plain", - Size: int64(len(data)), - }, - } - hash, err := client.Upload(file, manifest) - if err != nil { - t.Fatal(err) - } - return hash - } - checkDownload := func(manifest, path string, expected []byte) { - file, err := client.Download(manifest, path) - if err != nil { - t.Fatal(err) - } - defer file.Close() - if file.Size != int64(len(expected)) { - t.Fatalf("expected downloaded file to be %d bytes, got %d", len(expected), file.Size) - } - if file.ContentType != "text/plain" { - t.Fatalf("expected downloaded file to have type %q, got %q", "text/plain", file.ContentType) - } - data, err := io.ReadAll(file) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(data, expected) { - t.Fatalf("expected downloaded data to be %q, got %q", expected, data) - } - } - - // upload a file to the root of a manifest - rootData := []byte("some-data") - rootHash := upload("", "", rootData) - - // check we can download the root file - checkDownload(rootHash, "", rootData) - - // upload another file to the same manifest - otherData := []byte("some-other-data") - newHash := upload(rootHash, "some/other/path", otherData) - - // check we can download both files from the new manifest - checkDownload(newHash, "", rootData) - checkDownload(newHash, "some/other/path", otherData) - - // replace the root file with different data - newHash = upload(newHash, "", otherData) - - // check both files have the other data - checkDownload(newHash, "", otherData) - checkDownload(newHash, "some/other/path", otherData) -} - -var testDirFiles = []string{ - "file1.txt", - "file2.txt", - "dir1/file3.txt", - "dir1/file4.txt", - "dir2/file5.txt", - "dir2/dir3/file6.txt", - "dir2/dir4/file7.txt", - "dir2/dir4/file8.txt", -} - -func newTestDirectory(t *testing.T) string { - dir, err := os.MkdirTemp("", "swarm-client-test") - if err != nil { - t.Fatal(err) - } - - for _, file := range testDirFiles { - path := filepath.Join(dir, file) - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - os.RemoveAll(dir) - t.Fatalf("error creating dir for %s: %s", path, err) - } - if err := os.WriteFile(path, []byte(file), 0644); err != nil { - os.RemoveAll(dir) - t.Fatalf("error writing file %s: %s", path, err) - } - } - - return dir -} - -// TestClientUploadDownloadDirectory tests uploading and downloading a -// directory of files to a swarm manifest -func TestClientUploadDownloadDirectory(t *testing.T) { - srv := testutil.NewTestSwarmServer(t) - defer srv.Close() - - dir := newTestDirectory(t) - defer os.RemoveAll(dir) - - // upload the directory - client := NewClient(srv.URL) - defaultPath := filepath.Join(dir, testDirFiles[0]) - hash, err := client.UploadDirectory(dir, defaultPath, "") - if err != nil { - t.Fatalf("error uploading directory: %s", err) - } - - // check we can download the individual files - checkDownloadFile := func(path string, expected []byte) { - file, err := client.Download(hash, path) - if err != nil { - t.Fatal(err) - } - defer file.Close() - data, err := io.ReadAll(file) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(data, expected) { - t.Fatalf("expected data to be %q, got %q", expected, data) - } - } - for _, file := range testDirFiles { - checkDownloadFile(file, []byte(file)) - } - - // check we can download the default path - checkDownloadFile("", []byte(testDirFiles[0])) - - // check we can download the directory - tmp, err := os.MkdirTemp("", "swarm-client-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - if err := client.DownloadDirectory(hash, "", tmp); err != nil { - t.Fatal(err) - } - for _, file := range testDirFiles { - data, err := os.ReadFile(filepath.Join(tmp, file)) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(data, []byte(file)) { - t.Fatalf("expected data to be %q, got %q", file, data) - } - } -} - -// TestClientFileList tests listing files in a swarm manifest -func TestClientFileList(t *testing.T) { - srv := testutil.NewTestSwarmServer(t) - defer srv.Close() - - dir := newTestDirectory(t) - defer os.RemoveAll(dir) - - client := NewClient(srv.URL) - hash, err := client.UploadDirectory(dir, "", "") - if err != nil { - t.Fatalf("error uploading directory: %s", err) - } - - ls := func(prefix string) []string { - list, err := client.List(hash, prefix) - if err != nil { - t.Fatal(err) - } - paths := make([]string, 0, len(list.CommonPrefixes)+len(list.Entries)) - paths = append(paths, list.CommonPrefixes...) - for _, entry := range list.Entries { - paths = append(paths, entry.Path) - } - sort.Strings(paths) - return paths - } - - tests := map[string][]string{ - "": {"dir1/", "dir2/", "file1.txt", "file2.txt"}, - "file": {"file1.txt", "file2.txt"}, - "file1": {"file1.txt"}, - "file2.txt": {"file2.txt"}, - "file12": {}, - "dir": {"dir1/", "dir2/"}, - "dir1": {"dir1/"}, - "dir1/": {"dir1/file3.txt", "dir1/file4.txt"}, - "dir1/file": {"dir1/file3.txt", "dir1/file4.txt"}, - "dir1/file3.txt": {"dir1/file3.txt"}, - "dir1/file34": {}, - "dir2/": {"dir2/dir3/", "dir2/dir4/", "dir2/file5.txt"}, - "dir2/file": {"dir2/file5.txt"}, - "dir2/dir": {"dir2/dir3/", "dir2/dir4/"}, - "dir2/dir3/": {"dir2/dir3/file6.txt"}, - "dir2/dir4/": {"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"}, - "dir2/dir4/file": {"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"}, - "dir2/dir4/file7.txt": {"dir2/dir4/file7.txt"}, - "dir2/dir4/file78": {}, - } - for prefix, expected := range tests { - actual := ls(prefix) - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("expected prefix %q to return %v, got %v", prefix, expected, actual) - } - } -} - -// TestClientMultipartUpload tests uploading files to swarm using a multipart -// upload -func TestClientMultipartUpload(t *testing.T) { - srv := testutil.NewTestSwarmServer(t) - defer srv.Close() - - // define an uploader which uploads testDirFiles with some data - data := []byte("some-data") - uploader := UploaderFunc(func(upload UploadFn) error { - for _, name := range testDirFiles { - file := &File{ - ReadCloser: io.NopCloser(bytes.NewReader(data)), - ManifestEntry: api.ManifestEntry{ - Path: name, - ContentType: "text/plain", - Size: int64(len(data)), - }, - } - if err := upload(file); err != nil { - return err - } - } - return nil - }) - - // upload the files as a multipart upload - client := NewClient(srv.URL) - hash, err := client.MultipartUpload("", uploader) - if err != nil { - t.Fatal(err) - } - - // check we can download the individual files - checkDownloadFile := func(path string) { - file, err := client.Download(hash, path) - if err != nil { - t.Fatal(err) - } - defer file.Close() - gotData, err := io.ReadAll(file) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(gotData, data) { - t.Fatalf("expected data to be %q, got %q", data, gotData) - } - } - for _, file := range testDirFiles { - checkDownloadFile(file) - } -} diff --git a/swarm/api/config.go b/swarm/api/config.go deleted file mode 100644 index 7df6096064c2..000000000000 --- a/swarm/api/config.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package api - -import ( - "crypto/ecdsa" - "fmt" - "os" - "path/filepath" - - "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/contracts/ens" - "github.com/XinFinOrg/XDPoSChain/crypto" - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/node" - "github.com/XinFinOrg/XDPoSChain/swarm/network" - "github.com/XinFinOrg/XDPoSChain/swarm/services/swap" - "github.com/XinFinOrg/XDPoSChain/swarm/storage" -) - -const ( - DefaultHTTPListenAddr = "127.0.0.1" - DefaultHTTPPort = "8500" -) - -// separate bzz directories -// allow several bzz nodes running in parallel -type Config struct { - // serialised/persisted fields - *storage.StoreParams - *storage.ChunkerParams - *network.HiveParams - Swap *swap.SwapParams - *network.SyncParams - Contract common.Address - EnsRoot common.Address - EnsAPIs []string - Path string - ListenAddr string - Port string - PublicKey string - BzzKey string - NetworkId uint64 - SwapEnabled bool - SyncEnabled bool - SwapApi string - Cors string - BzzAccount string - BootNodes string -} - -//create a default config with all parameters to set to defaults -func NewDefaultConfig() (self *Config) { - - self = &Config{ - StoreParams: storage.NewDefaultStoreParams(), - ChunkerParams: storage.NewChunkerParams(), - HiveParams: network.NewDefaultHiveParams(), - SyncParams: network.NewDefaultSyncParams(), - Swap: swap.NewDefaultSwapParams(), - ListenAddr: DefaultHTTPListenAddr, - Port: DefaultHTTPPort, - Path: node.DefaultDataDir(), - EnsAPIs: nil, - EnsRoot: ens.TestNetAddress, - NetworkId: network.NetworkId, - SwapEnabled: false, - SyncEnabled: true, - SwapApi: "", - BootNodes: "", - } - - return -} - -//some config params need to be initialized after the complete -//config building phase is completed (e.g. due to overriding flags) -func (self *Config) Init(prvKey *ecdsa.PrivateKey) { - - address := crypto.PubkeyToAddress(prvKey.PublicKey) - self.Path = filepath.Join(self.Path, "bzz-"+common.Bytes2Hex(address.Bytes())) - err := os.MkdirAll(self.Path, os.ModePerm) - if err != nil { - log.Error(fmt.Sprintf("Error creating root swarm data directory: %v", err)) - return - } - - pubkey := crypto.FromECDSAPub(&prvKey.PublicKey) - pubkeyhex := common.ToHex(pubkey) - keyhex := crypto.Keccak256Hash(pubkey).Hex() - - self.PublicKey = pubkeyhex - self.BzzKey = keyhex - - self.Swap.Init(self.Contract, prvKey) - self.SyncParams.Init(self.Path) - self.HiveParams.Init(self.Path) - self.StoreParams.Init(self.Path) -} diff --git a/swarm/api/config_test.go b/swarm/api/config_test.go deleted file mode 100644 index 18e739cf1521..000000000000 --- a/swarm/api/config_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package api - -import ( - "reflect" - "testing" - - "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/crypto" -) - -func TestConfig(t *testing.T) { - - var hexprvkey = "65138b2aa745041b372153550584587da326ab440576b2a1191dd95cee30039c" - - prvkey, err := crypto.HexToECDSA(hexprvkey) - if err != nil { - t.Fatalf("failed to load private key: %v", err) - } - - one := NewDefaultConfig() - two := NewDefaultConfig() - - if equal := reflect.DeepEqual(one, two); !equal { - t.Fatal("Two default configs are not equal") - } - - one.Init(prvkey) - - //the init function should set the following fields - if one.BzzKey == "" { - t.Fatal("Expected BzzKey to be set") - } - if one.PublicKey == "" { - t.Fatal("Expected PublicKey to be set") - } - - //the Init function should append subdirs to the given path - if one.Swap.PayProfile.Beneficiary == (common.Address{}) { - t.Fatal("Failed to correctly initialize SwapParams") - } - - if one.SyncParams.RequestDbPath == one.Path { - t.Fatal("Failed to correctly initialize SyncParams") - } - - if one.HiveParams.KadDbPath == one.Path { - t.Fatal("Failed to correctly initialize HiveParams") - } - - if one.StoreParams.ChunkDbPath == one.Path { - t.Fatal("Failed to correctly initialize StoreParams") - } -} diff --git a/swarm/api/filesystem.go b/swarm/api/filesystem.go deleted file mode 100644 index ca5d2d46415a..000000000000 --- a/swarm/api/filesystem.go +++ /dev/null @@ -1,288 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package api - -import ( - "bufio" - "fmt" - "io" - "net/http" - "os" - "path" - "path/filepath" - "sync" - - "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/swarm/storage" -) - -const maxParallelFiles = 5 - -type FileSystem struct { - api *Api -} - -func NewFileSystem(api *Api) *FileSystem { - return &FileSystem{api} -} - -// Upload replicates a local directory as a manifest file and uploads it -// using dpa store -// TODO: localpath should point to a manifest -// -// DEPRECATED: Use the HTTP API instead -func (self *FileSystem) Upload(lpath, index string) (string, error) { - var list []*manifestTrieEntry - localpath, err := filepath.Abs(filepath.Clean(lpath)) - if err != nil { - return "", err - } - - f, err := os.Open(localpath) - if err != nil { - return "", err - } - stat, err := f.Stat() - if err != nil { - return "", err - } - - var start int - if stat.IsDir() { - start = len(localpath) - log.Debug(fmt.Sprintf("uploading '%s'", localpath)) - err = filepath.Walk(localpath, func(path string, info os.FileInfo, err error) error { - if (err == nil) && !info.IsDir() { - if len(path) <= start { - return fmt.Errorf("Path is too short") - } - if path[:start] != localpath { - return fmt.Errorf("Path prefix of '%s' does not match localpath '%s'", path, localpath) - } - entry := newManifestTrieEntry(&ManifestEntry{Path: filepath.ToSlash(path)}, nil) - list = append(list, entry) - } - return err - }) - if err != nil { - return "", err - } - } else { - dir := filepath.Dir(localpath) - start = len(dir) - if len(localpath) <= start { - return "", fmt.Errorf("Path is too short") - } - if localpath[:start] != dir { - return "", fmt.Errorf("Path prefix of '%s' does not match dir '%s'", localpath, dir) - } - entry := newManifestTrieEntry(&ManifestEntry{Path: filepath.ToSlash(localpath)}, nil) - list = append(list, entry) - } - - cnt := len(list) - errors := make([]error, cnt) - done := make(chan bool, maxParallelFiles) - dcnt := 0 - awg := &sync.WaitGroup{} - - for i, entry := range list { - if i >= dcnt+maxParallelFiles { - <-done - dcnt++ - } - awg.Add(1) - go func(i int, entry *manifestTrieEntry, done chan bool) { - f, err := os.Open(entry.Path) - if err == nil { - stat, _ := f.Stat() - var hash storage.Key - wg := &sync.WaitGroup{} - hash, err = self.api.dpa.Store(f, stat.Size(), wg, nil) - if hash != nil { - list[i].Hash = hash.String() - } - wg.Wait() - awg.Done() - if err == nil { - first512 := make([]byte, 512) - fread, _ := f.ReadAt(first512, 0) - if fread > 0 { - mimeType := http.DetectContentType(first512[:fread]) - if filepath.Ext(entry.Path) == ".css" { - mimeType = "text/css" - } - list[i].ContentType = mimeType - } - } - f.Close() - } - errors[i] = err - done <- true - }(i, entry, done) - } - for dcnt < cnt { - <-done - dcnt++ - } - - trie := &manifestTrie{ - dpa: self.api.dpa, - } - quitC := make(chan bool) - for i, entry := range list { - if errors[i] != nil { - return "", errors[i] - } - entry.Path = RegularSlashes(entry.Path[start:]) - if entry.Path == index { - ientry := newManifestTrieEntry(&ManifestEntry{ - ContentType: entry.ContentType, - }, nil) - ientry.Hash = entry.Hash - trie.addEntry(ientry, quitC) - } - trie.addEntry(entry, quitC) - } - - err2 := trie.recalcAndStore() - var hs string - if err2 == nil { - hs = trie.hash.String() - } - awg.Wait() - return hs, err2 -} - -// Download replicates the manifest basePath structure on the local filesystem -// under localpath -// -// DEPRECATED: Use the HTTP API instead -func (self *FileSystem) Download(bzzpath, localpath string) error { - lpath, err := filepath.Abs(filepath.Clean(localpath)) - if err != nil { - return err - } - err = os.MkdirAll(lpath, os.ModePerm) - if err != nil { - return err - } - - //resolving host and port - uri, err := Parse(path.Join("bzz:/", bzzpath)) - if err != nil { - return err - } - key, err := self.api.Resolve(uri) - if err != nil { - return err - } - path := uri.Path - - if len(path) > 0 { - path += "/" - } - - quitC := make(chan bool) - trie, err := loadManifest(self.api.dpa, key, quitC) - if err != nil { - log.Warn(fmt.Sprintf("fs.Download: loadManifestTrie error: %v", err)) - return err - } - - type downloadListEntry struct { - key storage.Key - path string - } - - var list []*downloadListEntry - var mde error - - prevPath := lpath - err = trie.listWithPrefix(path, quitC, func(entry *manifestTrieEntry, suffix string) { - log.Trace(fmt.Sprintf("fs.Download: %#v", entry)) - - key = common.Hex2Bytes(entry.Hash) - path := lpath + "/" + suffix - dir := filepath.Dir(path) - if dir != prevPath { - mde = os.MkdirAll(dir, os.ModePerm) - prevPath = dir - } - if (mde == nil) && (path != dir+"/") { - list = append(list, &downloadListEntry{key: key, path: path}) - } - }) - if err != nil { - return err - } - - wg := sync.WaitGroup{} - errC := make(chan error) - done := make(chan bool, maxParallelFiles) - for i, entry := range list { - select { - case done <- true: - wg.Add(1) - case <-quitC: - return fmt.Errorf("aborted") - } - go func(i int, entry *downloadListEntry) { - defer wg.Done() - err := retrieveToFile(quitC, self.api.dpa, entry.key, entry.path) - if err != nil { - select { - case errC <- err: - case <-quitC: - } - return - } - <-done - }(i, entry) - } - go func() { - wg.Wait() - close(errC) - }() - select { - case err = <-errC: - return err - case <-quitC: - return fmt.Errorf("aborted") - } -} - -func retrieveToFile(quitC chan bool, dpa *storage.DPA, key storage.Key, path string) error { - f, err := os.Create(path) // TODO: basePath separators - if err != nil { - return err - } - reader := dpa.Retrieve(key) - writer := bufio.NewWriter(f) - size, err := reader.Size(quitC) - if err != nil { - return err - } - if _, err = io.CopyN(writer, reader, size); err != nil { - return err - } - if err := writer.Flush(); err != nil { - return err - } - return f.Close() -} diff --git a/swarm/api/filesystem_test.go b/swarm/api/filesystem_test.go deleted file mode 100644 index 3e40928e3529..000000000000 --- a/swarm/api/filesystem_test.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package api - -import ( - "bytes" - "os" - "path/filepath" - "sync" - "testing" - - "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/swarm/storage" -) - -var testDownloadDir, _ = os.MkdirTemp(os.TempDir(), "bzz-test") - -func testFileSystem(t *testing.T, f func(*FileSystem)) { - testApi(t, func(api *Api) { - f(NewFileSystem(api)) - }) -} - -func readPath(t *testing.T, parts ...string) string { - file := filepath.Join(parts...) - content, err := os.ReadFile(file) - - if err != nil { - t.Fatalf("unexpected error reading '%v': %v", file, err) - } - return string(content) -} - -func TestApiDirUpload0(t *testing.T) { - testFileSystem(t, func(fs *FileSystem) { - api := fs.api - bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - content := readPath(t, "testdata", "test0", "index.html") - resp := testGet(t, api, bzzhash, "index.html") - exp := expResponse(content, "text/html; charset=utf-8", 0) - checkResponse(t, resp, exp) - - content = readPath(t, "testdata", "test0", "index.css") - resp = testGet(t, api, bzzhash, "index.css") - exp = expResponse(content, "text/css", 0) - checkResponse(t, resp, exp) - - key := storage.Key(common.Hex2Bytes(bzzhash)) - _, _, _, err = api.Get(key, "") - if err == nil { - t.Fatalf("expected error: %v", err) - } - - downloadDir := filepath.Join(testDownloadDir, "test0") - defer os.RemoveAll(downloadDir) - err = fs.Download(bzzhash, downloadDir) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - newbzzhash, err := fs.Upload(downloadDir, "") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if bzzhash != newbzzhash { - t.Fatalf("download %v reuploaded has incorrect hash, expected %v, got %v", downloadDir, bzzhash, newbzzhash) - } - }) -} - -func TestApiDirUploadModify(t *testing.T) { - testFileSystem(t, func(fs *FileSystem) { - api := fs.api - bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "") - if err != nil { - t.Errorf("unexpected error: %v", err) - return - } - - key := storage.Key(common.Hex2Bytes(bzzhash)) - key, err = api.Modify(key, "index.html", "", "") - if err != nil { - t.Errorf("unexpected error: %v", err) - return - } - index, err := os.ReadFile(filepath.Join("testdata", "test0", "index.html")) - if err != nil { - t.Errorf("unexpected error: %v", err) - return - } - wg := &sync.WaitGroup{} - hash, err := api.Store(bytes.NewReader(index), int64(len(index)), wg) - wg.Wait() - if err != nil { - t.Errorf("unexpected error: %v", err) - return - } - key, err = api.Modify(key, "index2.html", hash.Hex(), "text/html; charset=utf-8") - if err != nil { - t.Errorf("unexpected error: %v", err) - return - } - key, err = api.Modify(key, "img/logo.png", hash.Hex(), "text/html; charset=utf-8") - if err != nil { - t.Errorf("unexpected error: %v", err) - return - } - bzzhash = key.String() - - content := readPath(t, "testdata", "test0", "index.html") - resp := testGet(t, api, bzzhash, "index2.html") - exp := expResponse(content, "text/html; charset=utf-8", 0) - checkResponse(t, resp, exp) - - resp = testGet(t, api, bzzhash, "img/logo.png") - exp = expResponse(content, "text/html; charset=utf-8", 0) - checkResponse(t, resp, exp) - - content = readPath(t, "testdata", "test0", "index.css") - resp = testGet(t, api, bzzhash, "index.css") - exp = expResponse(content, "text/css", 0) - checkResponse(t, resp, exp) - - _, _, _, err = api.Get(key, "") - if err == nil { - t.Errorf("expected error: %v", err) - } - }) -} - -func TestApiDirUploadWithRootFile(t *testing.T) { - testFileSystem(t, func(fs *FileSystem) { - api := fs.api - bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "index.html") - if err != nil { - t.Errorf("unexpected error: %v", err) - return - } - - content := readPath(t, "testdata", "test0", "index.html") - resp := testGet(t, api, bzzhash, "") - exp := expResponse(content, "text/html; charset=utf-8", 0) - checkResponse(t, resp, exp) - }) -} - -func TestApiFileUpload(t *testing.T) { - testFileSystem(t, func(fs *FileSystem) { - api := fs.api - bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "") - if err != nil { - t.Errorf("unexpected error: %v", err) - return - } - - content := readPath(t, "testdata", "test0", "index.html") - resp := testGet(t, api, bzzhash, "index.html") - exp := expResponse(content, "text/html; charset=utf-8", 0) - checkResponse(t, resp, exp) - }) -} - -func TestApiFileUploadWithRootFile(t *testing.T) { - testFileSystem(t, func(fs *FileSystem) { - api := fs.api - bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "index.html") - if err != nil { - t.Errorf("unexpected error: %v", err) - return - } - - content := readPath(t, "testdata", "test0", "index.html") - resp := testGet(t, api, bzzhash, "") - exp := expResponse(content, "text/html; charset=utf-8", 0) - checkResponse(t, resp, exp) - }) -} diff --git a/swarm/api/http/error.go b/swarm/api/http/error.go deleted file mode 100644 index 5c12457d0793..000000000000 --- a/swarm/api/http/error.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -/* -Show nicely (but simple) formatted HTML error pages (or respond with JSON -if the appropriate `Accept` header is set)) for the http package. -*/ -package http - -import ( - "encoding/json" - "fmt" - "html/template" - "net/http" - "strings" - "time" - - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/metrics" - "github.com/XinFinOrg/XDPoSChain/swarm/api" -) - -//templateMap holds a mapping of an HTTP error code to a template -var templateMap map[int]*template.Template -var caseErrors []CaseError - -//metrics variables -var ( - htmlCounter = metrics.NewRegisteredCounter("api.http.errorpage.html.count", nil) - jsonCounter = metrics.NewRegisteredCounter("api.http.errorpage.json.count", nil) -) - -//parameters needed for formatting the correct HTML page -type ErrorParams struct { - Msg string - Code int - Timestamp string - template *template.Template - Details template.HTML -} - -//a custom error case struct that would be used to store validators and -//additional error info to display with client responses. -type CaseError struct { - Validator func(*Request) bool - Msg func(*Request) string -} - -//we init the error handling right on boot time, so lookup and http response is fast -func init() { - initErrHandling() -} - -func initErrHandling() { - //pages are saved as strings - get these strings - genErrPage := GetGenericErrorPage() - notFoundPage := GetNotFoundErrorPage() - multipleChoicesPage := GetMultipleChoicesErrorPage() - //map the codes to the available pages - tnames := map[int]string{ - 0: genErrPage, //default - http.StatusBadRequest: genErrPage, - http.StatusNotFound: notFoundPage, - http.StatusMultipleChoices: multipleChoicesPage, - http.StatusInternalServerError: genErrPage, - } - templateMap = make(map[int]*template.Template) - for code, tname := range tnames { - //assign formatted HTML to the code - templateMap[code] = template.Must(template.New(fmt.Sprintf("%d", code)).Parse(tname)) - } - - caseErrors = []CaseError{ - { - Validator: func(r *Request) bool { return r.uri != nil && r.uri.Addr != "" && strings.HasPrefix(r.uri.Addr, "0x") }, - Msg: func(r *Request) string { - uriCopy := r.uri - uriCopy.Addr = strings.TrimPrefix(uriCopy.Addr, "0x") - return fmt.Sprintf(`The requested hash seems to be prefixed with '0x'. You will be redirected to the correct URL within 5 seconds.
- Please click here if your browser does not redirect you.`, "/"+uriCopy.String()) - }, - }} -} - -//ValidateCaseErrors is a method that process the request object through certain validators -//that assert if certain conditions are met for further information to log as an error -func ValidateCaseErrors(r *Request) string { - for _, err := range caseErrors { - if err.Validator(r) { - return err.Msg(r) - } - } - - return "" -} - -//ShowMultipeChoices is used when a user requests a resource in a manifest which results -//in ambiguous results. It returns a HTML page with clickable links of each of the entry -//in the manifest which fits the request URI ambiguity. -//For example, if the user requests bzz://read and that manifest contains entries -//"readme.md" and "readinglist.txt", a HTML page is returned with this two links. -//This only applies if the manifest has no default entry -func ShowMultipleChoices(w http.ResponseWriter, r *Request, list api.ManifestList) { - msg := "" - if list.Entries == nil { - ShowError(w, r, "Could not resolve", http.StatusInternalServerError) - return - } - //make links relative - //requestURI comes with the prefix of the ambiguous path, e.g. "read" for "readme.md" and "readinglist.txt" - //to get clickable links, need to remove the ambiguous path, i.e. "read" - idx := strings.LastIndex(r.RequestURI, "/") - if idx == -1 { - ShowError(w, r, "Internal Server Error", http.StatusInternalServerError) - return - } - //remove ambiguous part - base := r.RequestURI[:idx+1] - for _, e := range list.Entries { - //create clickable link for each entry - msg += "" + e.Path + "
" - } - respond(w, &r.Request, &ErrorParams{ - Code: http.StatusMultipleChoices, - Details: template.HTML(msg), - Timestamp: time.Now().Format(time.RFC1123), - template: getTemplate(http.StatusMultipleChoices), - }) -} - -//ShowError is used to show an HTML error page to a client. -//If there is an `Accept` header of `application/json`, JSON will be returned instead -//The function just takes a string message which will be displayed in the error page. -//The code is used to evaluate which template will be displayed -//(and return the correct HTTP status code) -func ShowError(w http.ResponseWriter, r *Request, msg string, code int) { - additionalMessage := ValidateCaseErrors(r) - if code == http.StatusInternalServerError { - log.Error(msg) - } - respond(w, &r.Request, &ErrorParams{ - Code: code, - Msg: msg, - Details: template.HTML(additionalMessage), - Timestamp: time.Now().Format(time.RFC1123), - template: getTemplate(code), - }) -} - -//evaluate if client accepts html or json response -func respond(w http.ResponseWriter, r *http.Request, params *ErrorParams) { - w.WriteHeader(params.Code) - if r.Header.Get("Accept") == "application/json" { - respondJson(w, params) - } else { - respondHtml(w, params) - } -} - -//return a HTML page -func respondHtml(w http.ResponseWriter, params *ErrorParams) { - htmlCounter.Inc(1) - err := params.template.Execute(w, params) - if err != nil { - log.Error(err.Error()) - } -} - -//return JSON -func respondJson(w http.ResponseWriter, params *ErrorParams) { - jsonCounter.Inc(1) - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(params) -} - -//get the HTML template for a given code -func getTemplate(code int) *template.Template { - if val, tmpl := templateMap[code]; tmpl { - return val - } else { - return templateMap[0] - } -} diff --git a/swarm/api/http/error_templates.go b/swarm/api/http/error_templates.go deleted file mode 100644 index cc9b996ba466..000000000000 --- a/swarm/api/http/error_templates.go +++ /dev/null @@ -1,564 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -/* -We use html templates to handle simple but as informative as possible error pages. - -To eliminate circular dependency in case of an error, we don't store error pages on swarm. -We can't save the error pages as html files on disk, or when deploying compiled binaries -they won't be found. - -For this reason we resort to save the HTML error pages as strings, which then can be -parsed by Go's html/template package -*/ -package http - -//This returns the HTML for generic errors -func GetGenericErrorPage() string { - page := ` - - - - - - - - - - - - Swarm::HTTP Error Page - - - - -
- -
-
- -
-
-

There was a problem serving the requested page

-
-
-
{{.Timestamp}}
-
-
- - -
- - - - - - - - - - - - - - - - - - - - - - - -
- Hmmmmm....Swarm was not able to serve your request! -
- Error message: -
- {{.Msg}} -
- {{.Details}} -
- Error code: -
- {{.Code}} -
-
-
- -
-

- Swarm: Serverless Hosting Incentivised Peer-To-Peer Storage And Content Distribution
- Swarm -

-
- - -
- - - -` - return page -} - -//This returns the HTML for a 404 Not Found error -func GetNotFoundErrorPage() string { - page := ` - - - - - - - - - - - - Swarm::404 HTTP Not Found - - - - -
- -
-
- -
-
-

Resource Not Found

-
-
-
{{.Timestamp}}
-
-
- - -
- - - - - - - - - - - - - - - - - - - - - - - - -
- Unfortunately, the resource you were trying to access could not be found on swarm. -
-
- {{.Msg}} -
- {{.Details}} -
- Error code: -
- {{.Code}} -
-
-
- -
-

- Swarm: Serverless Hosting Incentivised Peer-To-Peer Storage And Content Distribution
- Swarm -

-
- - -
- - - -` - return page -} - -//This returns the HTML for a page listing disambiguation options -//i.e. if user requested bzz://read and the manifest contains "readme.md" and "readinglist.txt", -//this page is returned with a clickable list the existing disambiguation links in the manifest -func GetMultipleChoicesErrorPage() string { - page := ` - - - - - - - - - - - - Swarm::HTTP Disambiguation Page - - - - -
- -
-
- -
-
-

Swarm: disambiguation

-
-
-
{{.Timestamp}}
-
-
- - -
- - - - - - - - - - - - - - - - - - - - -
- Your request yields ambiguous results! -
- Your request may refer to: -
- {{ .Details}} -
- Error code: -
- {{.Code}} -
-
-
- -
-

- Swarm: Serverless Hosting Incentivised Peer-To-Peer Storage And Content Distribution
- Swarm -

-
- - -
- - - -` - return page -} diff --git a/swarm/api/http/error_test.go b/swarm/api/http/error_test.go deleted file mode 100644 index 3bb618673d62..000000000000 --- a/swarm/api/http/error_test.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package http_test - -import ( - "encoding/json" - "io" - "net/http" - "strings" - "testing" - - "golang.org/x/net/html" - - "github.com/XinFinOrg/XDPoSChain/swarm/testutil" -) - -func TestError(t *testing.T) { - - srv := testutil.NewTestSwarmServer(t) - defer srv.Close() - - var resp *http.Response - var respbody []byte - - url := srv.URL + "/this_should_fail_as_no_bzz_protocol_present" - resp, err := http.Get(url) - - if err != nil { - t.Fatalf("Request failed: %v", err) - } - defer resp.Body.Close() - respbody, err = io.ReadAll(resp.Body) - - if resp.StatusCode != 400 && !strings.Contains(string(respbody), "Invalid URI "/this_should_fail_as_no_bzz_protocol_present": unknown scheme") { - t.Fatalf("Response body does not match, expected: %v, to contain: %v; received code %d, expected code: %d", string(respbody), "Invalid bzz URI: unknown scheme", 400, resp.StatusCode) - } - - _, err = html.Parse(strings.NewReader(string(respbody))) - if err != nil { - t.Fatalf("HTML validation failed for error page returned!") - } -} - -func Test404Page(t *testing.T) { - srv := testutil.NewTestSwarmServer(t) - defer srv.Close() - - var resp *http.Response - var respbody []byte - - url := srv.URL + "/bzz:/1234567890123456789012345678901234567890123456789012345678901234" - resp, err := http.Get(url) - - if err != nil { - t.Fatalf("Request failed: %v", err) - } - defer resp.Body.Close() - respbody, err = io.ReadAll(resp.Body) - - if resp.StatusCode != 404 || !strings.Contains(string(respbody), "404") { - t.Fatalf("Invalid Status Code received, expected 404, got %d", resp.StatusCode) - } - - _, err = html.Parse(strings.NewReader(string(respbody))) - if err != nil { - t.Fatalf("HTML validation failed for error page returned!") - } -} - -func Test500Page(t *testing.T) { - srv := testutil.NewTestSwarmServer(t) - defer srv.Close() - - var resp *http.Response - var respbody []byte - - url := srv.URL + "/bzz:/thisShouldFailWith500Code" - resp, err := http.Get(url) - - if err != nil { - t.Fatalf("Request failed: %v", err) - } - defer resp.Body.Close() - respbody, err = io.ReadAll(resp.Body) - - if resp.StatusCode != 404 { - t.Fatalf("Invalid Status Code received, expected 404, got %d", resp.StatusCode) - } - - _, err = html.Parse(strings.NewReader(string(respbody))) - if err != nil { - t.Fatalf("HTML validation failed for error page returned!") - } -} -func Test500PageWith0xHashPrefix(t *testing.T) { - srv := testutil.NewTestSwarmServer(t) - defer srv.Close() - - var resp *http.Response - var respbody []byte - - url := srv.URL + "/bzz:/0xthisShouldFailWith500CodeAndAHelpfulMessage" - resp, err := http.Get(url) - - if err != nil { - t.Fatalf("Request failed: %v", err) - } - defer resp.Body.Close() - respbody, err = io.ReadAll(resp.Body) - - if resp.StatusCode != 404 { - t.Fatalf("Invalid Status Code received, expected 404, got %d", resp.StatusCode) - } - - if !strings.Contains(string(respbody), "The requested hash seems to be prefixed with") { - t.Fatalf("Did not receive the expected error message") - } - - _, err = html.Parse(strings.NewReader(string(respbody))) - if err != nil { - t.Fatalf("HTML validation failed for error page returned!") - } -} - -func TestJsonResponse(t *testing.T) { - srv := testutil.NewTestSwarmServer(t) - defer srv.Close() - - var resp *http.Response - var respbody []byte - - url := srv.URL + "/bzz:/thisShouldFailWith500Code/" - req, err := http.NewRequest("GET", url, nil) - if err != nil { - t.Fatalf("Request failed: %v", err) - } - req.Header.Set("Accept", "application/json") - resp, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("Request failed: %v", err) - } - - defer resp.Body.Close() - respbody, err = io.ReadAll(resp.Body) - - if resp.StatusCode != 404 { - t.Fatalf("Invalid Status Code received, expected 404, got %d", resp.StatusCode) - } - - if !isJSON(string(respbody)) { - t.Fatalf("Expected response to be JSON, received invalid JSON: %s", string(respbody)) - } - -} - -func isJSON(s string) bool { - var js map[string]interface{} - return json.Unmarshal([]byte(s), &js) == nil -} diff --git a/swarm/api/http/roundtripper.go b/swarm/api/http/roundtripper.go deleted file mode 100644 index 200d7eb6ef25..000000000000 --- a/swarm/api/http/roundtripper.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package http - -import ( - "fmt" - "net/http" - - "github.com/XinFinOrg/XDPoSChain/log" -) - -/* -http roundtripper to register for bzz url scheme -see https://github.com/XinFinOrg/XDPoSChain/issues/2040 -Usage: - -import ( - "github.com/XinFinOrg/XDPoSChain/common/httpclient" - "github.com/XinFinOrg/XDPoSChain/swarm/api/http" -) -client := httpclient.New() -// for (private) swarm proxy running locally -client.RegisterScheme("bzz", &http.RoundTripper{Port: port}) -client.RegisterScheme("bzz-immutable", &http.RoundTripper{Port: port}) -client.RegisterScheme("bzz-raw", &http.RoundTripper{Port: port}) - -The port you give the Roundtripper is the port the swarm proxy is listening on. -If Host is left empty, localhost is assumed. - -Using a public gateway, the above few lines gives you the leanest -bzz-scheme aware read-only http client. You really only ever need this -if you need go-native swarm access to bzz addresses. -*/ - -type RoundTripper struct { - Host string - Port string -} - -func (self *RoundTripper) RoundTrip(req *http.Request) (resp *http.Response, err error) { - host := self.Host - if len(host) == 0 { - host = "localhost" - } - url := fmt.Sprintf("http://%s:%s/%s:/%s/%s", host, self.Port, req.Proto, req.URL.Host, req.URL.Path) - log.Info(fmt.Sprintf("roundtripper: proxying request '%s' to '%s'", req.RequestURI, url)) - reqProxy, err := http.NewRequest(req.Method, url, req.Body) - if err != nil { - return nil, err - } - return http.DefaultClient.Do(reqProxy) -} diff --git a/swarm/api/http/roundtripper_test.go b/swarm/api/http/roundtripper_test.go deleted file mode 100644 index fec282d86643..000000000000 --- a/swarm/api/http/roundtripper_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package http - -import ( - "io" - "net" - "net/http" - "net/http/httptest" - "strings" - "testing" - "time" -) - -func TestRoundTripper(t *testing.T) { - serveMux := http.NewServeMux() - serveMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - if r.Method == "GET" { - w.Header().Set("Content-Type", "text/plain") - http.ServeContent(w, r, "", time.Unix(0, 0), strings.NewReader(r.RequestURI)) - } else { - http.Error(w, "Method "+r.Method+" is not supported.", http.StatusMethodNotAllowed) - } - }) - - srv := httptest.NewServer(serveMux) - defer srv.Close() - - host, port, _ := net.SplitHostPort(srv.Listener.Addr().String()) - rt := &RoundTripper{Host: host, Port: port} - trans := &http.Transport{} - trans.RegisterProtocol("bzz", rt) - client := &http.Client{Transport: trans} - resp, err := client.Get("bzz://test.com/path") - if err != nil { - t.Errorf("expected no error, got %v", err) - return - } - - defer func() { - if resp != nil { - resp.Body.Close() - } - }() - - content, err := io.ReadAll(resp.Body) - if err != nil { - t.Errorf("expected no error, got %v", err) - return - } - if string(content) != "/HTTP/1.1:/test.com/path" { - t.Errorf("incorrect response from http server: expected '%v', got '%v'", "/HTTP/1.1:/test.com/path", string(content)) - } - -} diff --git a/swarm/api/http/server.go b/swarm/api/http/server.go deleted file mode 100644 index c1d9a36ad541..000000000000 --- a/swarm/api/http/server.go +++ /dev/null @@ -1,768 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -/* -A simple http server interface to Swarm -*/ -package http - -import ( - "archive/tar" - "encoding/json" - "errors" - "fmt" - "io" - "mime" - "mime/multipart" - "net/http" - "os" - "path" - "strconv" - "strings" - "time" - - "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/metrics" - "github.com/XinFinOrg/XDPoSChain/swarm/api" - "github.com/XinFinOrg/XDPoSChain/swarm/storage" - "github.com/rs/cors" -) - -// setup metrics -var ( - postRawCount = metrics.NewRegisteredCounter("api.http.post.raw.count", nil) - postRawFail = metrics.NewRegisteredCounter("api.http.post.raw.fail", nil) - postFilesCount = metrics.NewRegisteredCounter("api.http.post.files.count", nil) - postFilesFail = metrics.NewRegisteredCounter("api.http.post.files.fail", nil) - deleteCount = metrics.NewRegisteredCounter("api.http.delete.count", nil) - deleteFail = metrics.NewRegisteredCounter("api.http.delete.fail", nil) - getCount = metrics.NewRegisteredCounter("api.http.get.count", nil) - getFail = metrics.NewRegisteredCounter("api.http.get.fail", nil) - getFileCount = metrics.NewRegisteredCounter("api.http.get.file.count", nil) - getFileNotFound = metrics.NewRegisteredCounter("api.http.get.file.notfound", nil) - getFileFail = metrics.NewRegisteredCounter("api.http.get.file.fail", nil) - getFilesCount = metrics.NewRegisteredCounter("api.http.get.files.count", nil) - getFilesFail = metrics.NewRegisteredCounter("api.http.get.files.fail", nil) - getListCount = metrics.NewRegisteredCounter("api.http.get.list.count", nil) - getListFail = metrics.NewRegisteredCounter("api.http.get.list.fail", nil) - requestCount = metrics.NewRegisteredCounter("http.request.count", nil) - htmlRequestCount = metrics.NewRegisteredCounter("http.request.html.count", nil) - jsonRequestCount = metrics.NewRegisteredCounter("http.request.json.count", nil) - requestTimer = metrics.NewRegisteredResettingTimer("http.request.time", nil) -) - -// ServerConfig is the basic configuration needed for the HTTP server and also -// includes CORS settings. -type ServerConfig struct { - Addr string - CorsString string -} - -// browser API for registering bzz url scheme handlers: -// https://developer.mozilla.org/en/docs/Web-based_protocol_handlers -// electron (chromium) api for registering bzz url scheme handlers: -// https://github.com/atom/electron/blob/master/docs/api/protocol.md - -// starts up http server -func StartHttpServer(api *api.Api, config *ServerConfig) { - var allowedOrigins []string - for _, domain := range strings.Split(config.CorsString, ",") { - allowedOrigins = append(allowedOrigins, strings.TrimSpace(domain)) - } - c := cors.New(cors.Options{ - AllowedOrigins: allowedOrigins, - AllowedMethods: []string{"POST", "GET", "DELETE", "PATCH", "PUT"}, - MaxAge: 600, - AllowedHeaders: []string{"*"}, - }) - hdlr := c.Handler(NewServer(api)) - - go http.ListenAndServe(config.Addr, hdlr) -} - -func NewServer(api *api.Api) *Server { - return &Server{api} -} - -type Server struct { - api *api.Api -} - -// Request wraps http.Request and also includes the parsed bzz URI -type Request struct { - http.Request - - uri *api.URI -} - -// HandlePostRaw handles a POST request to a raw bzz-raw:/ URI, stores the request -// body in swarm and returns the resulting storage key as a text/plain response -func (s *Server) HandlePostRaw(w http.ResponseWriter, r *Request) { - postRawCount.Inc(1) - if r.uri.Path != "" { - postRawFail.Inc(1) - s.BadRequest(w, r, "raw POST request cannot contain a path") - return - } - - if r.Header.Get("Content-Length") == "" { - postRawFail.Inc(1) - s.BadRequest(w, r, "missing Content-Length header in request") - return - } - - key, err := s.api.Store(r.Body, r.ContentLength, nil) - if err != nil { - postRawFail.Inc(1) - s.Error(w, r, err) - return - } - s.logDebug("content for %s stored", key.Log()) - - w.Header().Set("Content-Type", "text/plain") - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, key) -} - -// HandlePostFiles handles a POST request (or deprecated PUT request) to -// bzz:// which contains either a single file or multiple files -// (either a tar archive or multipart form), adds those files either to an -// existing manifest or to a new manifest under and returns the -// resulting manifest hash as a text/plain response -func (s *Server) HandlePostFiles(w http.ResponseWriter, r *Request) { - postFilesCount.Inc(1) - contentType, params, err := mime.ParseMediaType(r.Header.Get("Content-Type")) - if err != nil { - postFilesFail.Inc(1) - s.BadRequest(w, r, err.Error()) - return - } - - var key storage.Key - if r.uri.Addr != "" { - key, err = s.api.Resolve(r.uri) - if err != nil { - postFilesFail.Inc(1) - s.Error(w, r, fmt.Errorf("error resolving %s: %s", r.uri.Addr, err)) - return - } - } else { - key, err = s.api.NewManifest() - if err != nil { - postFilesFail.Inc(1) - s.Error(w, r, err) - return - } - } - - newKey, err := s.updateManifest(key, func(mw *api.ManifestWriter) error { - switch contentType { - - case "application/x-tar": - return s.handleTarUpload(r, mw) - - case "multipart/form-data": - return s.handleMultipartUpload(r, params["boundary"], mw) - - default: - return s.handleDirectUpload(r, mw) - } - }) - if err != nil { - postFilesFail.Inc(1) - s.Error(w, r, fmt.Errorf("error creating manifest: %s", err)) - return - } - - w.Header().Set("Content-Type", "text/plain") - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, newKey) -} - -func (s *Server) handleTarUpload(req *Request, mw *api.ManifestWriter) error { - tr := tar.NewReader(req.Body) - for { - hdr, err := tr.Next() - if err == io.EOF { - return nil - } else if err != nil { - return fmt.Errorf("error reading tar stream: %s", err) - } - - // only store regular files - if !hdr.FileInfo().Mode().IsRegular() { - continue - } - - // add the entry under the path from the request - path := path.Join(req.uri.Path, hdr.Name) - entry := &api.ManifestEntry{ - Path: path, - ContentType: hdr.Xattrs["user.swarm.content-type"], - Mode: hdr.Mode, - Size: hdr.Size, - ModTime: hdr.ModTime, - } - s.logDebug("adding %s (%d bytes) to new manifest", entry.Path, entry.Size) - contentKey, err := mw.AddEntry(tr, entry) - if err != nil { - return fmt.Errorf("error adding manifest entry from tar stream: %s", err) - } - s.logDebug("content for %s stored", contentKey.Log()) - } -} - -func (s *Server) handleMultipartUpload(req *Request, boundary string, mw *api.ManifestWriter) error { - mr := multipart.NewReader(req.Body, boundary) - for { - part, err := mr.NextPart() - if err == io.EOF { - return nil - } else if err != nil { - return fmt.Errorf("error reading multipart form: %s", err) - } - - var size int64 - var reader io.Reader = part - if contentLength := part.Header.Get("Content-Length"); contentLength != "" { - size, err = strconv.ParseInt(contentLength, 10, 64) - if err != nil { - return fmt.Errorf("error parsing multipart content length: %s", err) - } - reader = part - } else { - // copy the part to a tmp file to get its size - tmp, err := os.CreateTemp("", "swarm-multipart") - if err != nil { - return err - } - defer os.Remove(tmp.Name()) - defer tmp.Close() - size, err = io.Copy(tmp, part) - if err != nil { - return fmt.Errorf("error copying multipart content: %s", err) - } - if _, err := tmp.Seek(0, io.SeekStart); err != nil { - return fmt.Errorf("error copying multipart content: %s", err) - } - reader = tmp - } - - // add the entry under the path from the request - name := part.FileName() - if name == "" { - name = part.FormName() - } - path := path.Join(req.uri.Path, name) - entry := &api.ManifestEntry{ - Path: path, - ContentType: part.Header.Get("Content-Type"), - Size: size, - ModTime: time.Now(), - } - s.logDebug("adding %s (%d bytes) to new manifest", entry.Path, entry.Size) - contentKey, err := mw.AddEntry(reader, entry) - if err != nil { - return fmt.Errorf("error adding manifest entry from multipart form: %s", err) - } - s.logDebug("content for %s stored", contentKey.Log()) - } -} - -func (s *Server) handleDirectUpload(req *Request, mw *api.ManifestWriter) error { - key, err := mw.AddEntry(req.Body, &api.ManifestEntry{ - Path: req.uri.Path, - ContentType: req.Header.Get("Content-Type"), - Mode: 0644, - Size: req.ContentLength, - ModTime: time.Now(), - }) - if err != nil { - return err - } - s.logDebug("content for %s stored", key.Log()) - return nil -} - -// HandleDelete handles a DELETE request to bzz://, removes -// from and returns the resulting manifest hash as a -// text/plain response -func (s *Server) HandleDelete(w http.ResponseWriter, r *Request) { - deleteCount.Inc(1) - key, err := s.api.Resolve(r.uri) - if err != nil { - deleteFail.Inc(1) - s.Error(w, r, fmt.Errorf("error resolving %s: %s", r.uri.Addr, err)) - return - } - - newKey, err := s.updateManifest(key, func(mw *api.ManifestWriter) error { - s.logDebug("removing %s from manifest %s", r.uri.Path, key.Log()) - return mw.RemoveEntry(r.uri.Path) - }) - if err != nil { - deleteFail.Inc(1) - s.Error(w, r, fmt.Errorf("error updating manifest: %s", err)) - return - } - - w.Header().Set("Content-Type", "text/plain") - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, newKey) -} - -// HandleGet handles a GET request to -// - bzz-raw:// and responds with the raw content stored at the -// given storage key -// - bzz-hash:// and responds with the hash of the content stored -// at the given storage key as a text/plain response -func (s *Server) HandleGet(w http.ResponseWriter, r *Request) { - getCount.Inc(1) - key, err := s.api.Resolve(r.uri) - if err != nil { - getFail.Inc(1) - s.NotFound(w, r, fmt.Errorf("error resolving %s: %s", r.uri.Addr, err)) - return - } - - // if path is set, interpret as a manifest and return the - // raw entry at the given path - if r.uri.Path != "" { - walker, err := s.api.NewManifestWalker(key, nil) - if err != nil { - getFail.Inc(1) - s.BadRequest(w, r, fmt.Sprintf("%s is not a manifest", key)) - return - } - var entry *api.ManifestEntry - walker.Walk(func(e *api.ManifestEntry) error { - // if the entry matches the path, set entry and stop - // the walk - if e.Path == r.uri.Path { - entry = e - // return an error to cancel the walk - return errors.New("found") - } - - // ignore non-manifest files - if e.ContentType != api.ManifestType { - return nil - } - - // if the manifest's path is a prefix of the - // requested path, recurse into it by returning - // nil and continuing the walk - if strings.HasPrefix(r.uri.Path, e.Path) { - return nil - } - - return api.SkipManifest - }) - if entry == nil { - getFail.Inc(1) - s.NotFound(w, r, fmt.Errorf("Manifest entry could not be loaded")) - return - } - key = storage.Key(common.Hex2Bytes(entry.Hash)) - } - - // check the root chunk exists by retrieving the file's size - reader := s.api.Retrieve(key) - if _, err := reader.Size(nil); err != nil { - getFail.Inc(1) - s.NotFound(w, r, fmt.Errorf("Root chunk not found %s: %s", key, err)) - return - } - - switch { - case r.uri.Raw() || r.uri.DeprecatedRaw(): - // allow the request to overwrite the content type using a query - // parameter - contentType := "application/octet-stream" - if typ := r.URL.Query().Get("content_type"); typ != "" { - contentType = typ - } - w.Header().Set("Content-Type", contentType) - - http.ServeContent(w, &r.Request, "", time.Now(), reader) - case r.uri.Hash(): - w.Header().Set("Content-Type", "text/plain") - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, key) - } -} - -// HandleGetFiles handles a GET request to bzz:/ with an Accept -// header of "application/x-tar" and returns a tar stream of all files -// contained in the manifest -func (s *Server) HandleGetFiles(w http.ResponseWriter, r *Request) { - getFilesCount.Inc(1) - if r.uri.Path != "" { - getFilesFail.Inc(1) - s.BadRequest(w, r, "files request cannot contain a path") - return - } - - key, err := s.api.Resolve(r.uri) - if err != nil { - getFilesFail.Inc(1) - s.NotFound(w, r, fmt.Errorf("error resolving %s: %s", r.uri.Addr, err)) - return - } - - walker, err := s.api.NewManifestWalker(key, nil) - if err != nil { - getFilesFail.Inc(1) - s.Error(w, r, err) - return - } - - tw := tar.NewWriter(w) - defer tw.Close() - w.Header().Set("Content-Type", "application/x-tar") - w.WriteHeader(http.StatusOK) - - err = walker.Walk(func(entry *api.ManifestEntry) error { - // ignore manifests (walk will recurse into them) - if entry.ContentType == api.ManifestType { - return nil - } - - // retrieve the entry's key and size - reader := s.api.Retrieve(storage.Key(common.Hex2Bytes(entry.Hash))) - size, err := reader.Size(nil) - if err != nil { - return err - } - - // write a tar header for the entry - hdr := &tar.Header{ - Name: entry.Path, - Mode: entry.Mode, - Size: size, - ModTime: entry.ModTime, - Xattrs: map[string]string{ - "user.swarm.content-type": entry.ContentType, - }, - } - if err := tw.WriteHeader(hdr); err != nil { - return err - } - - // copy the file into the tar stream - n, err := io.Copy(tw, io.LimitReader(reader, hdr.Size)) - if err != nil { - return err - } else if n != size { - return fmt.Errorf("error writing %s: expected %d bytes but sent %d", entry.Path, size, n) - } - - return nil - }) - if err != nil { - getFilesFail.Inc(1) - s.logError("error generating tar stream: %s", err) - } -} - -// HandleGetList handles a GET request to bzz-list:// and returns -// a list of all files contained in under grouped into -// common prefixes using "/" as a delimiter -func (s *Server) HandleGetList(w http.ResponseWriter, r *Request) { - getListCount.Inc(1) - // ensure the root path has a trailing slash so that relative URLs work - if r.uri.Path == "" && !strings.HasSuffix(r.URL.Path, "/") { - http.Redirect(w, &r.Request, r.URL.Path+"/", http.StatusMovedPermanently) - return - } - - key, err := s.api.Resolve(r.uri) - if err != nil { - getListFail.Inc(1) - s.NotFound(w, r, fmt.Errorf("error resolving %s: %s", r.uri.Addr, err)) - return - } - - list, err := s.getManifestList(key, r.uri.Path) - - if err != nil { - getListFail.Inc(1) - s.Error(w, r, err) - return - } - - // if the client wants HTML (e.g. a browser) then render the list as a - // HTML index with relative URLs - if strings.Contains(r.Header.Get("Accept"), "text/html") { - w.Header().Set("Content-Type", "text/html") - err := htmlListTemplate.Execute(w, &htmlListData{ - URI: &api.URI{ - Scheme: "bzz", - Addr: r.uri.Addr, - Path: r.uri.Path, - }, - List: &list, - }) - if err != nil { - getListFail.Inc(1) - s.logError("error rendering list HTML: %s", err) - } - return - } - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(&list) -} - -func (s *Server) getManifestList(key storage.Key, prefix string) (list api.ManifestList, err error) { - walker, err := s.api.NewManifestWalker(key, nil) - if err != nil { - return - } - - err = walker.Walk(func(entry *api.ManifestEntry) error { - // handle non-manifest files - if entry.ContentType != api.ManifestType { - // ignore the file if it doesn't have the specified prefix - if !strings.HasPrefix(entry.Path, prefix) { - return nil - } - - // if the path after the prefix contains a slash, add a - // common prefix to the list, otherwise add the entry - suffix := strings.TrimPrefix(entry.Path, prefix) - if index := strings.Index(suffix, "/"); index > -1 { - list.CommonPrefixes = append(list.CommonPrefixes, prefix+suffix[:index+1]) - return nil - } - if entry.Path == "" { - entry.Path = "/" - } - list.Entries = append(list.Entries, entry) - return nil - } - - // if the manifest's path is a prefix of the specified prefix - // then just recurse into the manifest by returning nil and - // continuing the walk - if strings.HasPrefix(prefix, entry.Path) { - return nil - } - - // if the manifest's path has the specified prefix, then if the - // path after the prefix contains a slash, add a common prefix - // to the list and skip the manifest, otherwise recurse into - // the manifest by returning nil and continuing the walk - if strings.HasPrefix(entry.Path, prefix) { - suffix := strings.TrimPrefix(entry.Path, prefix) - if index := strings.Index(suffix, "/"); index > -1 { - list.CommonPrefixes = append(list.CommonPrefixes, prefix+suffix[:index+1]) - return api.SkipManifest - } - return nil - } - - // the manifest neither has the prefix or needs recursing in to - // so just skip it - return api.SkipManifest - }) - - return list, nil -} - -// HandleGetFile handles a GET request to bzz:/// and responds -// with the content of the file at from the given -func (s *Server) HandleGetFile(w http.ResponseWriter, r *Request) { - getFileCount.Inc(1) - // ensure the root path has a trailing slash so that relative URLs work - if r.uri.Path == "" && !strings.HasSuffix(r.URL.Path, "/") { - http.Redirect(w, &r.Request, r.URL.Path+"/", http.StatusMovedPermanently) - return - } - - key, err := s.api.Resolve(r.uri) - if err != nil { - getFileFail.Inc(1) - s.NotFound(w, r, fmt.Errorf("error resolving %s: %s", r.uri.Addr, err)) - return - } - - reader, contentType, status, err := s.api.Get(key, r.uri.Path) - if err != nil { - switch status { - case http.StatusNotFound: - getFileNotFound.Inc(1) - s.NotFound(w, r, err) - default: - getFileFail.Inc(1) - s.Error(w, r, err) - } - return - } - - //the request results in ambiguous files - //e.g. /read with readme.md and readinglist.txt available in manifest - if status == http.StatusMultipleChoices { - list, err := s.getManifestList(key, r.uri.Path) - - if err != nil { - getFileFail.Inc(1) - s.Error(w, r, err) - return - } - - s.logDebug(fmt.Sprintf("Multiple choices! --> %v", list)) - //show a nice page links to available entries - ShowMultipleChoices(w, r, list) - return - } - - // check the root chunk exists by retrieving the file's size - if _, err := reader.Size(nil); err != nil { - getFileNotFound.Inc(1) - s.NotFound(w, r, fmt.Errorf("File not found %s: %s", r.uri, err)) - return - } - - w.Header().Set("Content-Type", contentType) - - http.ServeContent(w, &r.Request, "", time.Now(), reader) -} - -func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if metrics.Enabled { - //The increment for request count and request timer themselves have a flag check - //for metrics.Enabled. Nevertheless, we introduce the if here because we - //are looking into the header just to see what request type it is (json/html). - //So let's take advantage and add all metrics related stuff here - requestCount.Inc(1) - defer requestTimer.UpdateSince(time.Now()) - if r.Header.Get("Accept") == "application/json" { - jsonRequestCount.Inc(1) - } else { - htmlRequestCount.Inc(1) - } - } - s.logDebug("HTTP %s request URL: '%s', Host: '%s', Path: '%s', Referer: '%s', Accept: '%s'", r.Method, r.RequestURI, r.URL.Host, r.URL.Path, r.Referer(), r.Header.Get("Accept")) - - if r.RequestURI == "/" && strings.Contains(r.Header.Get("Accept"), "text/html") { - - err := landingPageTemplate.Execute(w, nil) - if err != nil { - s.logError("error rendering landing page: %s", err) - } - return - } - - uri, err := api.Parse(strings.TrimLeft(r.URL.Path, "/")) - req := &Request{Request: *r, uri: uri} - if err != nil { - s.logError("Invalid URI %q: %s", r.URL.Path, err) - s.BadRequest(w, req, fmt.Sprintf("Invalid URI %q: %s", r.URL.Path, err)) - return - } - s.logDebug("%s request received for %s", r.Method, uri) - - switch r.Method { - case "POST": - if uri.Raw() || uri.DeprecatedRaw() { - s.HandlePostRaw(w, req) - } else { - s.HandlePostFiles(w, req) - } - - case "PUT": - // DEPRECATED: - // clients should send a POST request (the request creates a - // new manifest leaving the existing one intact, so it isn't - // strictly a traditional PUT request which replaces content - // at a URI, and POST is more ubiquitous) - if uri.Raw() || uri.DeprecatedRaw() { - ShowError(w, req, fmt.Sprintf("No PUT to %s allowed.", uri), http.StatusBadRequest) - return - } else { - s.HandlePostFiles(w, req) - } - - case "DELETE": - if uri.Raw() || uri.DeprecatedRaw() { - ShowError(w, req, fmt.Sprintf("No DELETE to %s allowed.", uri), http.StatusBadRequest) - return - } - s.HandleDelete(w, req) - - case "GET": - if uri.Raw() || uri.Hash() || uri.DeprecatedRaw() { - s.HandleGet(w, req) - return - } - - if uri.List() { - s.HandleGetList(w, req) - return - } - - if r.Header.Get("Accept") == "application/x-tar" { - s.HandleGetFiles(w, req) - return - } - - s.HandleGetFile(w, req) - - default: - ShowError(w, req, fmt.Sprintf("Method "+r.Method+" is not supported.", uri), http.StatusMethodNotAllowed) - - } -} - -func (s *Server) updateManifest(key storage.Key, update func(mw *api.ManifestWriter) error) (storage.Key, error) { - mw, err := s.api.NewManifestWriter(key, nil) - if err != nil { - return nil, err - } - - if err := update(mw); err != nil { - return nil, err - } - - key, err = mw.Store() - if err != nil { - return nil, err - } - s.logDebug("generated manifest %s", key) - return key, nil -} - -func (s *Server) logDebug(format string, v ...interface{}) { - log.Debug(fmt.Sprintf("[BZZ] HTTP: "+format, v...)) -} - -func (s *Server) logError(format string, v ...interface{}) { - log.Error(fmt.Sprintf("[BZZ] HTTP: "+format, v...)) -} - -func (s *Server) BadRequest(w http.ResponseWriter, r *Request, reason string) { - ShowError(w, r, fmt.Sprintf("Bad request %s %s: %s", r.Request.Method, r.uri, reason), http.StatusBadRequest) -} - -func (s *Server) Error(w http.ResponseWriter, r *Request, err error) { - ShowError(w, r, fmt.Sprintf("Error serving %s %s: %s", r.Request.Method, r.uri, err), http.StatusInternalServerError) -} - -func (s *Server) NotFound(w http.ResponseWriter, r *Request, err error) { - ShowError(w, r, fmt.Sprintf("NOT FOUND error serving %s %s: %s", r.Request.Method, r.uri, err), http.StatusNotFound) -} diff --git a/swarm/api/http/server_test.go b/swarm/api/http/server_test.go deleted file mode 100644 index 9983fe5e033a..000000000000 --- a/swarm/api/http/server_test.go +++ /dev/null @@ -1,320 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package http_test - -import ( - "bytes" - "errors" - "fmt" - "io" - "net/http" - "strings" - "sync" - "testing" - - "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/swarm/api" - swarm "github.com/XinFinOrg/XDPoSChain/swarm/api/client" - "github.com/XinFinOrg/XDPoSChain/swarm/storage" - "github.com/XinFinOrg/XDPoSChain/swarm/testutil" -) - -func TestBzzGetPath(t *testing.T) { - - var err error - - testmanifest := []string{ - `{"entries":[{"path":"a/","hash":"674af7073604ebfc0282a4ab21e5ef1a3c22913866879ebc0816f8a89896b2ed","contentType":"application/bzz-manifest+json","status":0}]}`, - `{"entries":[{"path":"a","hash":"011b4d03dd8c01f1049143cf9c4c817e4b167f1d1b83e5c6f0f10d89ba1e7bce","contentType":"","status":0},{"path":"b/","hash":"0a87b1c3e4bf013686cdf107ec58590f2004610ee58cc2240f26939f691215f5","contentType":"application/bzz-manifest+json","status":0}]}`, - `{"entries":[{"path":"b","hash":"011b4d03dd8c01f1049143cf9c4c817e4b167f1d1b83e5c6f0f10d89ba1e7bce","contentType":"","status":0},{"path":"c","hash":"011b4d03dd8c01f1049143cf9c4c817e4b167f1d1b83e5c6f0f10d89ba1e7bce","contentType":"","status":0}]}`, - } - - testrequests := make(map[string]int) - testrequests["/"] = 0 - testrequests["/a/"] = 1 - testrequests["/a/b/"] = 2 - testrequests["/x"] = 0 - testrequests[""] = 0 - - expectedfailrequests := []string{"", "/x"} - - reader := [3]*bytes.Reader{} - - key := [3]storage.Key{} - - srv := testutil.NewTestSwarmServer(t) - defer srv.Close() - - wg := &sync.WaitGroup{} - - for i, mf := range testmanifest { - reader[i] = bytes.NewReader([]byte(mf)) - key[i], err = srv.Dpa.Store(reader[i], int64(len(mf)), wg, nil) - if err != nil { - t.Fatal(err) - } - wg.Wait() - } - - _, err = http.Get(srv.URL + "/bzz-raw:/" + common.ToHex(key[0])[2:] + "/a") - if err != nil { - t.Fatalf("Failed to connect to proxy: %v", err) - } - - for k, v := range testrequests { - var resp *http.Response - var respbody []byte - - url := srv.URL + "/bzz-raw:/" - if k[:] != "" { - url += common.ToHex(key[0])[2:] + "/" + k[1:] + "?content_type=text/plain" - } - resp, err = http.Get(url) - if err != nil { - t.Fatalf("Request failed: %v", err) - } - defer resp.Body.Close() - respbody, err = io.ReadAll(resp.Body) - - if string(respbody) != testmanifest[v] { - isexpectedfailrequest := false - - for _, r := range expectedfailrequests { - if k[:] == r { - isexpectedfailrequest = true - } - } - if !isexpectedfailrequest { - t.Fatalf("Response body does not match, expected: %v, got %v", testmanifest[v], string(respbody)) - } - } - } - - for k, v := range testrequests { - var resp *http.Response - var respbody []byte - - url := srv.URL + "/bzz-hash:/" - if k[:] != "" { - url += common.ToHex(key[0])[2:] + "/" + k[1:] - } - resp, err = http.Get(url) - if err != nil { - t.Fatalf("Request failed: %v", err) - } - defer resp.Body.Close() - respbody, err = io.ReadAll(resp.Body) - if err != nil { - t.Fatalf("Read request body: %v", err) - } - - if string(respbody) != key[v].String() { - isexpectedfailrequest := false - - for _, r := range expectedfailrequests { - if k[:] == r { - isexpectedfailrequest = true - } - } - if !isexpectedfailrequest { - t.Fatalf("Response body does not match, expected: %v, got %v", key[v], string(respbody)) - } - } - } - - for _, c := range []struct { - path string - json string - html string - }{ - { - path: "/", - json: `{"common_prefixes":["a/"]}`, - html: "\n\n\n \n \n\t\t\n\tSwarm index of bzz:/262e5c08c03c2789b6daef487dfa14b4d132f5340d781a3ecb1d5122ab65640c/\n\n\n\n

Swarm index of bzz:/262e5c08c03c2789b6daef487dfa14b4d132f5340d781a3ecb1d5122ab65640c/

\n
\n \n \n \n\t\n\t\n\t\n \n \n\n \n \n\t\n\t \n\t \n\t \n\t\n \n\n \n
PathTypeSize
a/DIR-
\n
\n\n", - }, - { - path: "/a/", - json: `{"common_prefixes":["a/b/"],"entries":[{"hash":"011b4d03dd8c01f1049143cf9c4c817e4b167f1d1b83e5c6f0f10d89ba1e7bce","path":"a/a","mod_time":"0001-01-01T00:00:00Z"}]}`, - html: "\n\n\n \n \n\t\t\n\tSwarm index of bzz:/262e5c08c03c2789b6daef487dfa14b4d132f5340d781a3ecb1d5122ab65640c/a/\n\n\n\n

Swarm index of bzz:/262e5c08c03c2789b6daef487dfa14b4d132f5340d781a3ecb1d5122ab65640c/a/

\n
\n \n \n \n\t\n\t\n\t\n \n \n\n \n \n\t\n\t \n\t \n\t \n\t\n \n\n \n\t\n\t \n\t \n\t \n\t\n \n
PathTypeSize
b/DIR-
a0
\n
\n\n", - }, - { - path: "/a/b/", - json: `{"entries":[{"hash":"011b4d03dd8c01f1049143cf9c4c817e4b167f1d1b83e5c6f0f10d89ba1e7bce","path":"a/b/b","mod_time":"0001-01-01T00:00:00Z"},{"hash":"011b4d03dd8c01f1049143cf9c4c817e4b167f1d1b83e5c6f0f10d89ba1e7bce","path":"a/b/c","mod_time":"0001-01-01T00:00:00Z"}]}`, - html: "\n\n\n \n \n\t\t\n\tSwarm index of bzz:/262e5c08c03c2789b6daef487dfa14b4d132f5340d781a3ecb1d5122ab65640c/a/b/\n\n\n\n

Swarm index of bzz:/262e5c08c03c2789b6daef487dfa14b4d132f5340d781a3ecb1d5122ab65640c/a/b/

\n
\n \n \n \n\t\n\t\n\t\n \n \n\n \n \n\n \n\t\n\t \n\t \n\t \n\t\n \n\t\n\t \n\t \n\t \n\t\n \n
PathTypeSize
b0
c0
\n
\n\n", - }, - { - path: "/x", - }, - { - path: "", - }, - } { - k := c.path - url := srv.URL + "/bzz-list:/" - if k[:] != "" { - url += common.ToHex(key[0])[2:] + "/" + k[1:] - } - t.Run("json list "+c.path, func(t *testing.T) { - resp, err := http.Get(url) - if err != nil { - t.Fatalf("HTTP request: %v", err) - } - defer resp.Body.Close() - respbody, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatalf("Read response body: %v", err) - } - - body := strings.TrimSpace(string(respbody)) - if body != c.json { - isexpectedfailrequest := false - - for _, r := range expectedfailrequests { - if k[:] == r { - isexpectedfailrequest = true - } - } - if !isexpectedfailrequest { - t.Errorf("Response list body %q does not match, expected: %v, got %v", k, c.json, body) - } - } - }) - t.Run("html list "+c.path, func(t *testing.T) { - req, err := http.NewRequest(http.MethodGet, url, nil) - if err != nil { - t.Fatalf("New request: %v", err) - } - req.Header.Set("Accept", "text/html") - resp, err := http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("HTTP request: %v", err) - } - defer resp.Body.Close() - respbody, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatalf("Read response body: %v", err) - } - - if string(respbody) != c.html { - isexpectedfailrequest := false - - for _, r := range expectedfailrequests { - if k[:] == r { - isexpectedfailrequest = true - } - } - if !isexpectedfailrequest { - t.Errorf("Response list body %q does not match, expected: %q, got %q", k, c.html, string(respbody)) - } - } - }) - } - - nonhashtests := []string{ - srv.URL + "/bzz:/name", - srv.URL + "/bzz-immutable:/nonhash", - srv.URL + "/bzz-raw:/nonhash", - srv.URL + "/bzz-list:/nonhash", - srv.URL + "/bzz-hash:/nonhash", - } - - nonhashresponses := []string{ - "error resolving name: no DNS to resolve name: "name"", - "error resolving nonhash: immutable address not a content hash: "nonhash"", - "error resolving nonhash: no DNS to resolve name: "nonhash"", - "error resolving nonhash: no DNS to resolve name: "nonhash"", - "error resolving nonhash: no DNS to resolve name: "nonhash"", - } - - for i, url := range nonhashtests { - var resp *http.Response - var respbody []byte - - resp, err = http.Get(url) - - if err != nil { - t.Fatalf("Request failed: %v", err) - } - defer resp.Body.Close() - respbody, err = io.ReadAll(resp.Body) - if err != nil { - t.Fatalf("ReadAll failed: %v", err) - } - if !strings.Contains(string(respbody), nonhashresponses[i]) { - t.Fatalf("Non-Hash response body does not match, expected: %v, got: %v", nonhashresponses[i], string(respbody)) - } - } - -} - -// TestBzzRootRedirect tests that getting the root path of a manifest without -// a trailing slash gets redirected to include the trailing slash so that -// relative URLs work as expected. -func TestBzzRootRedirect(t *testing.T) { - srv := testutil.NewTestSwarmServer(t) - defer srv.Close() - - // create a manifest with some data at the root path - client := swarm.NewClient(srv.URL) - data := []byte("data") - file := &swarm.File{ - ReadCloser: io.NopCloser(bytes.NewReader(data)), - ManifestEntry: api.ManifestEntry{ - Path: "", - ContentType: "text/plain", - Size: int64(len(data)), - }, - } - hash, err := client.Upload(file, "") - if err != nil { - t.Fatal(err) - } - - // define a CheckRedirect hook which ensures there is only a single - // redirect to the correct URL - redirected := false - httpClient := http.Client{ - CheckRedirect: func(req *http.Request, via []*http.Request) error { - if redirected { - return errors.New("too many redirects") - } - redirected = true - expectedPath := "/bzz:/" + hash + "/" - if req.URL.Path != expectedPath { - return fmt.Errorf("expected redirect to %q, got %q", expectedPath, req.URL.Path) - } - return nil - }, - } - - // perform the GET request and assert the response - res, err := httpClient.Get(srv.URL + "/bzz:/" + hash) - if err != nil { - t.Fatal(err) - } - defer res.Body.Close() - if !redirected { - t.Fatal("expected GET /bzz:/ to redirect to /bzz:// but it didn't") - } - gotData, err := io.ReadAll(res.Body) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(gotData, data) { - t.Fatalf("expected response to equal %q, got %q", data, gotData) - } -} diff --git a/swarm/api/http/templates.go b/swarm/api/http/templates.go deleted file mode 100644 index cddcfe4910ea..000000000000 --- a/swarm/api/http/templates.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package http - -import ( - "html/template" - "path" - - "github.com/XinFinOrg/XDPoSChain/swarm/api" -) - -type htmlListData struct { - URI *api.URI - List *api.ManifestList -} - -var htmlListTemplate = template.Must(template.New("html-list").Funcs(template.FuncMap{"basename": path.Base}).Parse(` - - - - - - - Swarm index of {{ .URI }} - - - -

Swarm index of {{ .URI }}

-
- - - - - - - - - - - {{ range .List.CommonPrefixes }} - - - - - - {{ end }} - - {{ range .List.Entries }} - - - - - - {{ end }} -
PathTypeSize
{{ basename . }}/DIR-
{{ basename .Path }}{{ .ContentType }}{{ .Size }}
-
- -`[1:])) - -var landingPageTemplate = template.Must(template.New("landingPage").Parse(` - - - - - - - - - Swarm :: Welcome to Swarm - - - - -
-
- -
-
-

Welcome to Swarm

-
-
- - - - -

Enter the hash or ENS of a Swarm-hosted file below:

- - - -
-
-

- Swarm: Serverless Hosting Incentivised Peer-To-Peer Storage And Content Distribution
- Swarm -

-
- - - -`[1:])) diff --git a/swarm/api/manifest.go b/swarm/api/manifest.go deleted file mode 100644 index f540de4301f0..000000000000 --- a/swarm/api/manifest.go +++ /dev/null @@ -1,504 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package api - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "strings" - "sync" - "time" - - "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/swarm/storage" -) - -const ( - ManifestType = "application/bzz-manifest+json" -) - -// Manifest represents a swarm manifest -type Manifest struct { - Entries []ManifestEntry `json:"entries,omitempty"` -} - -// ManifestEntry represents an entry in a swarm manifest -type ManifestEntry struct { - Hash string `json:"hash,omitempty"` - Path string `json:"path,omitempty"` - ContentType string `json:"contentType,omitempty"` - Mode int64 `json:"mode,omitempty"` - Size int64 `json:"size,omitempty"` - ModTime time.Time `json:"mod_time,omitempty"` - Status int `json:"status,omitempty"` -} - -// ManifestList represents the result of listing files in a manifest -type ManifestList struct { - CommonPrefixes []string `json:"common_prefixes,omitempty"` - Entries []*ManifestEntry `json:"entries,omitempty"` -} - -// NewManifest creates and stores a new, empty manifest -func (a *Api) NewManifest() (storage.Key, error) { - var manifest Manifest - data, err := json.Marshal(&manifest) - if err != nil { - return nil, err - } - return a.Store(bytes.NewReader(data), int64(len(data)), &sync.WaitGroup{}) -} - -// ManifestWriter is used to add and remove entries from an underlying manifest -type ManifestWriter struct { - api *Api - trie *manifestTrie - quitC chan bool -} - -func (a *Api) NewManifestWriter(key storage.Key, quitC chan bool) (*ManifestWriter, error) { - trie, err := loadManifest(a.dpa, key, quitC) - if err != nil { - return nil, fmt.Errorf("error loading manifest %s: %s", key, err) - } - return &ManifestWriter{a, trie, quitC}, nil -} - -// AddEntry stores the given data and adds the resulting key to the manifest -func (m *ManifestWriter) AddEntry(data io.Reader, e *ManifestEntry) (storage.Key, error) { - key, err := m.api.Store(data, e.Size, nil) - if err != nil { - return nil, err - } - entry := newManifestTrieEntry(e, nil) - entry.Hash = key.String() - m.trie.addEntry(entry, m.quitC) - return key, nil -} - -// RemoveEntry removes the given path from the manifest -func (m *ManifestWriter) RemoveEntry(path string) error { - m.trie.deleteEntry(path, m.quitC) - return nil -} - -// Store stores the manifest, returning the resulting storage key -func (m *ManifestWriter) Store() (storage.Key, error) { - return m.trie.hash, m.trie.recalcAndStore() -} - -// ManifestWalker is used to recursively walk the entries in the manifest and -// all of its submanifests -type ManifestWalker struct { - api *Api - trie *manifestTrie - quitC chan bool -} - -func (a *Api) NewManifestWalker(key storage.Key, quitC chan bool) (*ManifestWalker, error) { - trie, err := loadManifest(a.dpa, key, quitC) - if err != nil { - return nil, fmt.Errorf("error loading manifest %s: %s", key, err) - } - return &ManifestWalker{a, trie, quitC}, nil -} - -// SkipManifest is used as a return value from WalkFn to indicate that the -// manifest should be skipped -var SkipManifest = errors.New("skip this manifest") - -// WalkFn is the type of function called for each entry visited by a recursive -// manifest walk -type WalkFn func(entry *ManifestEntry) error - -// Walk recursively walks the manifest calling walkFn for each entry in the -// manifest, including submanifests -func (m *ManifestWalker) Walk(walkFn WalkFn) error { - return m.walk(m.trie, "", walkFn) -} - -func (m *ManifestWalker) walk(trie *manifestTrie, prefix string, walkFn WalkFn) error { - for _, entry := range trie.entries { - if entry == nil { - continue - } - entry.Path = prefix + entry.Path - err := walkFn(&entry.ManifestEntry) - if err != nil { - if entry.ContentType == ManifestType && err == SkipManifest { - continue - } - return err - } - if entry.ContentType != ManifestType { - continue - } - if err := trie.loadSubTrie(entry, nil); err != nil { - return err - } - if err := m.walk(entry.subtrie, entry.Path, walkFn); err != nil { - return err - } - } - return nil -} - -type manifestTrie struct { - dpa *storage.DPA - entries [257]*manifestTrieEntry // indexed by first character of basePath, entries[256] is the empty basePath entry - hash storage.Key // if hash != nil, it is stored -} - -func newManifestTrieEntry(entry *ManifestEntry, subtrie *manifestTrie) *manifestTrieEntry { - return &manifestTrieEntry{ - ManifestEntry: *entry, - subtrie: subtrie, - } -} - -type manifestTrieEntry struct { - ManifestEntry - - subtrie *manifestTrie -} - -func loadManifest(dpa *storage.DPA, hash storage.Key, quitC chan bool) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand - - log.Trace(fmt.Sprintf("manifest lookup key: '%v'.", hash.Log())) - // retrieve manifest via DPA - manifestReader := dpa.Retrieve(hash) - return readManifest(manifestReader, hash, dpa, quitC) -} - -func readManifest(manifestReader storage.LazySectionReader, hash storage.Key, dpa *storage.DPA, quitC chan bool) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand - - // TODO check size for oversized manifests - size, err := manifestReader.Size(quitC) - if err != nil { // size == 0 - // can't determine size means we don't have the root chunk - err = fmt.Errorf("Manifest not Found") - return - } - manifestData := make([]byte, size) - read, err := manifestReader.Read(manifestData) - if int64(read) < size { - log.Trace(fmt.Sprintf("Manifest %v not found.", hash.Log())) - if err == nil { - err = fmt.Errorf("Manifest retrieval cut short: read %v, expect %v", read, size) - } - return - } - - log.Trace(fmt.Sprintf("Manifest %v retrieved", hash.Log())) - var man struct { - Entries []*manifestTrieEntry `json:"entries"` - } - err = json.Unmarshal(manifestData, &man) - if err != nil { - err = fmt.Errorf("Manifest %v is malformed: %v", hash.Log(), err) - log.Trace(fmt.Sprintf("%v", err)) - return - } - - log.Trace(fmt.Sprintf("Manifest %v has %d entries.", hash.Log(), len(man.Entries))) - - trie = &manifestTrie{ - dpa: dpa, - } - for _, entry := range man.Entries { - trie.addEntry(entry, quitC) - } - return -} - -func (self *manifestTrie) addEntry(entry *manifestTrieEntry, quitC chan bool) { - self.hash = nil // trie modified, hash needs to be re-calculated on demand - - if len(entry.Path) == 0 { - self.entries[256] = entry - return - } - - b := entry.Path[0] - oldentry := self.entries[b] - if (oldentry == nil) || (oldentry.Path == entry.Path && oldentry.ContentType != ManifestType) { - self.entries[b] = entry - return - } - - cpl := 0 - for (len(entry.Path) > cpl) && (len(oldentry.Path) > cpl) && (entry.Path[cpl] == oldentry.Path[cpl]) { - cpl++ - } - - if (oldentry.ContentType == ManifestType) && (cpl == len(oldentry.Path)) { - if self.loadSubTrie(oldentry, quitC) != nil { - return - } - entry.Path = entry.Path[cpl:] - oldentry.subtrie.addEntry(entry, quitC) - oldentry.Hash = "" - return - } - - commonPrefix := entry.Path[:cpl] - - subtrie := &manifestTrie{ - dpa: self.dpa, - } - entry.Path = entry.Path[cpl:] - oldentry.Path = oldentry.Path[cpl:] - subtrie.addEntry(entry, quitC) - subtrie.addEntry(oldentry, quitC) - - self.entries[b] = newManifestTrieEntry(&ManifestEntry{ - Path: commonPrefix, - ContentType: ManifestType, - }, subtrie) -} - -func (self *manifestTrie) getCountLast() (cnt int, entry *manifestTrieEntry) { - for _, e := range self.entries { - if e != nil { - cnt++ - entry = e - } - } - return -} - -func (self *manifestTrie) deleteEntry(path string, quitC chan bool) { - self.hash = nil // trie modified, hash needs to be re-calculated on demand - - if len(path) == 0 { - self.entries[256] = nil - return - } - - b := path[0] - entry := self.entries[b] - if entry == nil { - return - } - if entry.Path == path { - self.entries[b] = nil - return - } - - epl := len(entry.Path) - if (entry.ContentType == ManifestType) && (len(path) >= epl) && (path[:epl] == entry.Path) { - if self.loadSubTrie(entry, quitC) != nil { - return - } - entry.subtrie.deleteEntry(path[epl:], quitC) - entry.Hash = "" - // remove subtree if it has less than 2 elements - cnt, lastentry := entry.subtrie.getCountLast() - if cnt < 2 { - if lastentry != nil { - lastentry.Path = entry.Path + lastentry.Path - } - self.entries[b] = lastentry - } - } -} - -func (self *manifestTrie) recalcAndStore() error { - if self.hash != nil { - return nil - } - - var buffer bytes.Buffer - buffer.WriteString(`{"entries":[`) - - list := &Manifest{} - for _, entry := range self.entries { - if entry != nil { - if entry.Hash == "" { // TODO: paralellize - err := entry.subtrie.recalcAndStore() - if err != nil { - return err - } - entry.Hash = entry.subtrie.hash.String() - } - list.Entries = append(list.Entries, entry.ManifestEntry) - } - - } - - manifest, err := json.Marshal(list) - if err != nil { - return err - } - - sr := bytes.NewReader(manifest) - wg := &sync.WaitGroup{} - key, err2 := self.dpa.Store(sr, int64(len(manifest)), wg, nil) - wg.Wait() - self.hash = key - return err2 -} - -func (self *manifestTrie) loadSubTrie(entry *manifestTrieEntry, quitC chan bool) (err error) { - if entry.subtrie == nil { - hash := common.Hex2Bytes(entry.Hash) - entry.subtrie, err = loadManifest(self.dpa, hash, quitC) - entry.Hash = "" // might not match, should be recalculated - } - return -} - -func (self *manifestTrie) listWithPrefixInt(prefix, rp string, quitC chan bool, cb func(entry *manifestTrieEntry, suffix string)) error { - plen := len(prefix) - var start, stop int - if plen == 0 { - start = 0 - stop = 256 - } else { - start = int(prefix[0]) - stop = start - } - - for i := start; i <= stop; i++ { - select { - case <-quitC: - return fmt.Errorf("aborted") - default: - } - entry := self.entries[i] - if entry != nil { - epl := len(entry.Path) - if entry.ContentType == ManifestType { - l := plen - if epl < l { - l = epl - } - if prefix[:l] == entry.Path[:l] { - err := self.loadSubTrie(entry, quitC) - if err != nil { - return err - } - err = entry.subtrie.listWithPrefixInt(prefix[l:], rp+entry.Path[l:], quitC, cb) - if err != nil { - return err - } - } - } else { - if (epl >= plen) && (prefix == entry.Path[:plen]) { - cb(entry, rp+entry.Path[plen:]) - } - } - } - } - return nil -} - -func (self *manifestTrie) listWithPrefix(prefix string, quitC chan bool, cb func(entry *manifestTrieEntry, suffix string)) (err error) { - return self.listWithPrefixInt(prefix, "", quitC, cb) -} - -func (self *manifestTrie) findPrefixOf(path string, quitC chan bool) (entry *manifestTrieEntry, pos int) { - - log.Trace(fmt.Sprintf("findPrefixOf(%s)", path)) - - if len(path) == 0 { - return self.entries[256], 0 - } - - //see if first char is in manifest entries - b := path[0] - entry = self.entries[b] - if entry == nil { - return self.entries[256], 0 - } - - epl := len(entry.Path) - log.Trace(fmt.Sprintf("path = %v entry.Path = %v epl = %v", path, entry.Path, epl)) - if len(path) <= epl { - if entry.Path[:len(path)] == path { - if entry.ContentType == ManifestType { - err := self.loadSubTrie(entry, quitC) - if err == nil && entry.subtrie != nil { - subentries := entry.subtrie.entries - for i := 0; i < len(subentries); i++ { - sub := subentries[i] - if sub != nil && sub.Path == "" { - return sub, len(path) - } - } - } - entry.Status = http.StatusMultipleChoices - } - pos = len(path) - return - } - return nil, 0 - } - if path[:epl] == entry.Path { - log.Trace(fmt.Sprintf("entry.ContentType = %v", entry.ContentType)) - //the subentry is a manifest, load subtrie - if entry.ContentType == ManifestType && (strings.Contains(entry.Path, path) || strings.Contains(path, entry.Path)) { - err := self.loadSubTrie(entry, quitC) - if err != nil { - return nil, 0 - } - sub, pos := entry.subtrie.findPrefixOf(path[epl:], quitC) - if sub != nil { - entry = sub - pos += epl - return sub, pos - } else if path == entry.Path { - entry.Status = http.StatusMultipleChoices - } - - } else { - //entry is not a manifest, return it - if path != entry.Path { - return nil, 0 - } - pos = epl - } - } - return -} - -// file system manifest always contains regularized paths -// no leading or trailing slashes, only single slashes inside -func RegularSlashes(path string) (res string) { - for i := 0; i < len(path); i++ { - if (path[i] != '/') || ((i > 0) && (path[i-1] != '/')) { - res = res + path[i:i+1] - } - } - if (len(res) > 0) && (res[len(res)-1] == '/') { - res = res[:len(res)-1] - } - return -} - -func (self *manifestTrie) getEntry(spath string) (entry *manifestTrieEntry, fullpath string) { - path := RegularSlashes(spath) - var pos int - quitC := make(chan bool) - entry, pos = self.findPrefixOf(path, quitC) - return entry, path[:pos] -} diff --git a/swarm/api/manifest_test.go b/swarm/api/manifest_test.go deleted file mode 100644 index cbe9e4f2aeab..000000000000 --- a/swarm/api/manifest_test.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package api - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "strings" - "testing" - - "github.com/XinFinOrg/XDPoSChain/swarm/storage" -) - -func manifest(paths ...string) (manifestReader storage.LazySectionReader) { - var entries []string - for _, path := range paths { - entry := fmt.Sprintf(`{"path":"%s"}`, path) - entries = append(entries, entry) - } - manifest := fmt.Sprintf(`{"entries":[%s]}`, strings.Join(entries, ",")) - return &storage.LazyTestSectionReader{ - SectionReader: io.NewSectionReader(strings.NewReader(manifest), 0, int64(len(manifest))), - } -} - -func testGetEntry(t *testing.T, path, match string, multiple bool, paths ...string) *manifestTrie { - quitC := make(chan bool) - trie, err := readManifest(manifest(paths...), nil, nil, quitC) - if err != nil { - t.Errorf("unexpected error making manifest: %v", err) - } - checkEntry(t, path, match, multiple, trie) - return trie -} - -func checkEntry(t *testing.T, path, match string, multiple bool, trie *manifestTrie) { - entry, fullpath := trie.getEntry(path) - if match == "-" && entry != nil { - t.Errorf("expected no match for '%s', got '%s'", path, fullpath) - } else if entry == nil { - if match != "-" { - t.Errorf("expected entry '%s' to match '%s', got no match", match, path) - } - } else if fullpath != match { - t.Errorf("incorrect entry retrieved for '%s'. expected path '%v', got '%s'", path, match, fullpath) - } - - if multiple && entry.Status != http.StatusMultipleChoices { - t.Errorf("Expected %d Multiple Choices Status for path %s, match %s, got %d", http.StatusMultipleChoices, path, match, entry.Status) - } else if !multiple && entry != nil && entry.Status == http.StatusMultipleChoices { - t.Errorf("Were not expecting %d Multiple Choices Status for path %s, match %s, but got it", http.StatusMultipleChoices, path, match) - } -} - -func TestGetEntry(t *testing.T) { - // file system manifest always contains regularized paths - testGetEntry(t, "a", "a", false, "a") - testGetEntry(t, "b", "-", false, "a") - testGetEntry(t, "/a//", "a", false, "a") - // fallback - testGetEntry(t, "/a", "", false, "") - testGetEntry(t, "/a/b", "a/b", false, "a/b") - // longest/deepest math - testGetEntry(t, "read", "read", true, "readme.md", "readit.md") - testGetEntry(t, "rf", "-", false, "readme.md", "readit.md") - testGetEntry(t, "readme", "readme", false, "readme.md") - testGetEntry(t, "readme", "-", false, "readit.md") - testGetEntry(t, "readme.md", "readme.md", false, "readme.md") - testGetEntry(t, "readme.md", "-", false, "readit.md") - testGetEntry(t, "readmeAmd", "-", false, "readit.md") - testGetEntry(t, "readme.mdffff", "-", false, "readme.md") - testGetEntry(t, "ab", "ab", true, "ab/cefg", "ab/cedh", "ab/kkkkkk") - testGetEntry(t, "ab/ce", "ab/ce", true, "ab/cefg", "ab/cedh", "ab/ceuuuuuuuuuu") - testGetEntry(t, "abc", "abc", true, "abcd", "abczzzzef", "abc/def", "abc/e/g") - testGetEntry(t, "a/b", "a/b", true, "a", "a/bc", "a/ba", "a/b/c") - testGetEntry(t, "a/b", "a/b", false, "a", "a/b", "a/bb", "a/b/c") - testGetEntry(t, "//a//b//", "a/b", false, "a", "a/b", "a/bb", "a/b/c") -} - -func TestExactMatch(t *testing.T) { - quitC := make(chan bool) - mf := manifest("shouldBeExactMatch.css", "shouldBeExactMatch.css.map") - trie, err := readManifest(mf, nil, nil, quitC) - if err != nil { - t.Errorf("unexpected error making manifest: %v", err) - } - entry, _ := trie.getEntry("shouldBeExactMatch.css") - if entry.Path != "" { - t.Errorf("Expected entry to match %s, got: %s", "shouldBeExactMatch.css", entry.Path) - } - if entry.Status == http.StatusMultipleChoices { - t.Errorf("Got status %d, which is unexepcted", http.StatusMultipleChoices) - } -} - -func TestDeleteEntry(t *testing.T) { - -} - -// TestAddFileWithManifestPath tests that adding an entry at a path which -// already exists as a manifest just adds the entry to the manifest rather -// than replacing the manifest with the entry -func TestAddFileWithManifestPath(t *testing.T) { - // create a manifest containing "ab" and "ac" - manifest, _ := json.Marshal(&Manifest{ - Entries: []ManifestEntry{ - {Path: "ab", Hash: "ab"}, - {Path: "ac", Hash: "ac"}, - }, - }) - reader := &storage.LazyTestSectionReader{ - SectionReader: io.NewSectionReader(bytes.NewReader(manifest), 0, int64(len(manifest))), - } - trie, err := readManifest(reader, nil, nil, nil) - if err != nil { - t.Fatal(err) - } - checkEntry(t, "ab", "ab", false, trie) - checkEntry(t, "ac", "ac", false, trie) - - // now add path "a" and check we can still get "ab" and "ac" - entry := &manifestTrieEntry{} - entry.Path = "a" - entry.Hash = "a" - trie.addEntry(entry, nil) - checkEntry(t, "ab", "ab", false, trie) - checkEntry(t, "ac", "ac", false, trie) - checkEntry(t, "a", "a", false, trie) -} diff --git a/swarm/api/storage.go b/swarm/api/storage.go deleted file mode 100644 index 0e3abecfe4b3..000000000000 --- a/swarm/api/storage.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package api - -import "path" - -type Response struct { - MimeType string - Status int - Size int64 - // Content []byte - Content string -} - -// implements a service -// -// DEPRECATED: Use the HTTP API instead -type Storage struct { - api *Api -} - -func NewStorage(api *Api) *Storage { - return &Storage{api} -} - -// Put uploads the content to the swarm with a simple manifest speficying -// its content type -// -// DEPRECATED: Use the HTTP API instead -func (self *Storage) Put(content, contentType string) (string, error) { - key, err := self.api.Put(content, contentType) - if err != nil { - return "", err - } - return key.String(), err -} - -// Get retrieves the content from bzzpath and reads the response in full -// It returns the Response object, which serialises containing the -// response body as the value of the Content field -// NOTE: if error is non-nil, sResponse may still have partial content -// the actual size of which is given in len(resp.Content), while the expected -// size is resp.Size -// -// DEPRECATED: Use the HTTP API instead -func (self *Storage) Get(bzzpath string) (*Response, error) { - uri, err := Parse(path.Join("bzz:/", bzzpath)) - if err != nil { - return nil, err - } - key, err := self.api.Resolve(uri) - if err != nil { - return nil, err - } - reader, mimeType, status, err := self.api.Get(key, uri.Path) - if err != nil { - return nil, err - } - quitC := make(chan bool) - expsize, err := reader.Size(quitC) - if err != nil { - return nil, err - } - body := make([]byte, expsize) - size, err := reader.Read(body) - if int64(size) == expsize { - err = nil - } - return &Response{mimeType, status, expsize, string(body[:size])}, err -} - -// Modify(rootHash, basePath, contentHash, contentType) takes th e manifest trie rooted in rootHash, -// and merge on to it. creating an entry w conentType (mime) -// -// DEPRECATED: Use the HTTP API instead -func (self *Storage) Modify(rootHash, path, contentHash, contentType string) (newRootHash string, err error) { - uri, err := Parse("bzz:/" + rootHash) - if err != nil { - return "", err - } - key, err := self.api.Resolve(uri) - if err != nil { - return "", err - } - key, err = self.api.Modify(key, path, contentHash, contentType) - if err != nil { - return "", err - } - return key.String(), nil -} diff --git a/swarm/api/storage_test.go b/swarm/api/storage_test.go deleted file mode 100644 index d260dd61d811..000000000000 --- a/swarm/api/storage_test.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package api - -import ( - "testing" -) - -func testStorage(t *testing.T, f func(*Storage)) { - testApi(t, func(api *Api) { - f(NewStorage(api)) - }) -} - -func TestStoragePutGet(t *testing.T) { - testStorage(t, func(api *Storage) { - content := "hello" - exp := expResponse(content, "text/plain", 0) - // exp := expResponse([]byte(content), "text/plain", 0) - bzzhash, err := api.Put(content, exp.MimeType) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - // to check put against the Api#Get - resp0 := testGet(t, api.api, bzzhash, "") - checkResponse(t, resp0, exp) - - // check storage#Get - resp, err := api.Get(bzzhash) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - checkResponse(t, &testResponse{nil, resp}, exp) - }) -} diff --git a/swarm/api/testapi.go b/swarm/api/testapi.go deleted file mode 100644 index 5b46417ad571..000000000000 --- a/swarm/api/testapi.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package api - -import ( - "github.com/XinFinOrg/XDPoSChain/swarm/network" -) - -type Control struct { - api *Api - hive *network.Hive -} - -func NewControl(api *Api, hive *network.Hive) *Control { - return &Control{api, hive} -} - -func (self *Control) BlockNetworkRead(on bool) { - self.hive.BlockNetworkRead(on) -} - -func (self *Control) SyncEnabled(on bool) { - self.hive.SyncEnabled(on) -} - -func (self *Control) SwapEnabled(on bool) { - self.hive.SwapEnabled(on) -} - -func (self *Control) Hive() string { - return self.hive.String() -} diff --git a/swarm/api/testdata/test0/img/logo.png b/swarm/api/testdata/test0/img/logo.png deleted file mode 100644 index e0fb15ab33a5..000000000000 Binary files a/swarm/api/testdata/test0/img/logo.png and /dev/null differ diff --git a/swarm/api/testdata/test0/index.css b/swarm/api/testdata/test0/index.css deleted file mode 100644 index 693b13a37c7e..000000000000 --- a/swarm/api/testdata/test0/index.css +++ /dev/null @@ -1,9 +0,0 @@ -h1 { - color: black; - font-size: 12px; - background-color: orange; - border: 4px solid black; -} -body { - background-color: orange -} diff --git a/swarm/api/testdata/test0/index.html b/swarm/api/testdata/test0/index.html deleted file mode 100644 index 321e910d7a28..000000000000 --- a/swarm/api/testdata/test0/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - -

Swarm Test

- Ethereum logo - - \ No newline at end of file diff --git a/swarm/api/uri.go b/swarm/api/uri.go deleted file mode 100644 index d8aafedf4138..000000000000 --- a/swarm/api/uri.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package api - -import ( - "fmt" - "net/url" - "strings" -) - -// URI is a reference to content stored in swarm. -type URI struct { - // Scheme has one of the following values: - // - // * bzz - an entry in a swarm manifest - // * bzz-raw - raw swarm content - // * bzz-immutable - immutable URI of an entry in a swarm manifest - // (address is not resolved) - // * bzz-list - list of all files contained in a swarm manifest - // - // Deprecated Schemes: - // * bzzr - raw swarm content - // * bzzi - immutable URI of an entry in a swarm manifest - // (address is not resolved) - // * bzz-hash - hash of swarm content - // - Scheme string - - // Addr is either a hexadecimal storage key or it an address which - // resolves to a storage key - Addr string - - // Path is the path to the content within a swarm manifest - Path string -} - -// Parse parses rawuri into a URI struct, where rawuri is expected to have one -// of the following formats: -// -// * :/ -// * :/ -// * :// -// * :// -// * :// -// * :/// -// -// with scheme one of bzz, bzz-raw, bzz-immutable, bzz-list or bzz-hash -// or deprecated ones bzzr and bzzi -func Parse(rawuri string) (*URI, error) { - u, err := url.Parse(rawuri) - if err != nil { - return nil, err - } - uri := &URI{Scheme: u.Scheme} - - // check the scheme is valid - switch uri.Scheme { - case "bzz", "bzz-raw", "bzz-immutable", "bzz-list", "bzz-hash", "bzzr", "bzzi": - default: - return nil, fmt.Errorf("unknown scheme %q", u.Scheme) - } - - // handle URIs like bzz:/// where the addr and path - // have already been split by url.Parse - if u.Host != "" { - uri.Addr = u.Host - uri.Path = strings.TrimLeft(u.Path, "/") - return uri, nil - } - - // URI is like bzz:// so split the addr and path from - // the raw path (which will be //) - parts := strings.SplitN(strings.TrimLeft(u.Path, "/"), "/", 2) - uri.Addr = parts[0] - if len(parts) == 2 { - uri.Path = parts[1] - } - return uri, nil -} - -func (u *URI) Raw() bool { - return u.Scheme == "bzz-raw" -} - -func (u *URI) Immutable() bool { - return u.Scheme == "bzz-immutable" -} - -func (u *URI) List() bool { - return u.Scheme == "bzz-list" -} - -func (u *URI) DeprecatedRaw() bool { - return u.Scheme == "bzzr" -} - -func (u *URI) DeprecatedImmutable() bool { - return u.Scheme == "bzzi" -} - -func (u *URI) Hash() bool { - return u.Scheme == "bzz-hash" -} - -func (u *URI) String() string { - return u.Scheme + ":/" + u.Addr + "/" + u.Path -} diff --git a/swarm/api/uri_test.go b/swarm/api/uri_test.go deleted file mode 100644 index 137b4505d414..000000000000 --- a/swarm/api/uri_test.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package api - -import ( - "reflect" - "testing" -) - -func TestParseURI(t *testing.T) { - type test struct { - uri string - expectURI *URI - expectErr bool - expectRaw bool - expectImmutable bool - expectList bool - expectHash bool - expectDeprecatedRaw bool - expectDeprecatedImmutable bool - } - tests := []test{ - { - uri: "", - expectErr: true, - }, - { - uri: "foo", - expectErr: true, - }, - { - uri: "bzz", - expectErr: true, - }, - { - uri: "bzz:", - expectURI: &URI{Scheme: "bzz"}, - }, - { - uri: "bzz-immutable:", - expectURI: &URI{Scheme: "bzz-immutable"}, - expectImmutable: true, - }, - { - uri: "bzz-raw:", - expectURI: &URI{Scheme: "bzz-raw"}, - expectRaw: true, - }, - { - uri: "bzz:/", - expectURI: &URI{Scheme: "bzz"}, - }, - { - uri: "bzz:/abc123", - expectURI: &URI{Scheme: "bzz", Addr: "abc123"}, - }, - { - uri: "bzz:/abc123/path/to/entry", - expectURI: &URI{Scheme: "bzz", Addr: "abc123", Path: "path/to/entry"}, - }, - { - uri: "bzz-raw:/", - expectURI: &URI{Scheme: "bzz-raw"}, - expectRaw: true, - }, - { - uri: "bzz-raw:/abc123", - expectURI: &URI{Scheme: "bzz-raw", Addr: "abc123"}, - expectRaw: true, - }, - { - uri: "bzz-raw:/abc123/path/to/entry", - expectURI: &URI{Scheme: "bzz-raw", Addr: "abc123", Path: "path/to/entry"}, - expectRaw: true, - }, - { - uri: "bzz://", - expectURI: &URI{Scheme: "bzz"}, - }, - { - uri: "bzz://abc123", - expectURI: &URI{Scheme: "bzz", Addr: "abc123"}, - }, - { - uri: "bzz://abc123/path/to/entry", - expectURI: &URI{Scheme: "bzz", Addr: "abc123", Path: "path/to/entry"}, - }, - { - uri: "bzz-hash:", - expectURI: &URI{Scheme: "bzz-hash"}, - expectHash: true, - }, - { - uri: "bzz-hash:/", - expectURI: &URI{Scheme: "bzz-hash"}, - expectHash: true, - }, - { - uri: "bzz-list:", - expectURI: &URI{Scheme: "bzz-list"}, - expectList: true, - }, - { - uri: "bzz-list:/", - expectURI: &URI{Scheme: "bzz-list"}, - expectList: true, - }, - { - uri: "bzzr:", - expectURI: &URI{Scheme: "bzzr"}, - expectDeprecatedRaw: true, - }, - { - uri: "bzzr:/", - expectURI: &URI{Scheme: "bzzr"}, - expectDeprecatedRaw: true, - }, - { - uri: "bzzi:", - expectURI: &URI{Scheme: "bzzi"}, - expectDeprecatedImmutable: true, - }, - { - uri: "bzzi:/", - expectURI: &URI{Scheme: "bzzi"}, - expectDeprecatedImmutable: true, - }, - } - for _, x := range tests { - actual, err := Parse(x.uri) - if x.expectErr { - if err == nil { - t.Fatalf("expected %s to error", x.uri) - } - continue - } - if err != nil { - t.Fatalf("error parsing %s: %s", x.uri, err) - } - if !reflect.DeepEqual(actual, x.expectURI) { - t.Fatalf("expected %s to return %#v, got %#v", x.uri, x.expectURI, actual) - } - if actual.Raw() != x.expectRaw { - t.Fatalf("expected %s raw to be %t, got %t", x.uri, x.expectRaw, actual.Raw()) - } - if actual.Immutable() != x.expectImmutable { - t.Fatalf("expected %s immutable to be %t, got %t", x.uri, x.expectImmutable, actual.Immutable()) - } - if actual.List() != x.expectList { - t.Fatalf("expected %s list to be %t, got %t", x.uri, x.expectList, actual.List()) - } - if actual.Hash() != x.expectHash { - t.Fatalf("expected %s hash to be %t, got %t", x.uri, x.expectHash, actual.Hash()) - } - if actual.DeprecatedRaw() != x.expectDeprecatedRaw { - t.Fatalf("expected %s deprecated raw to be %t, got %t", x.uri, x.expectDeprecatedRaw, actual.DeprecatedRaw()) - } - if actual.DeprecatedImmutable() != x.expectDeprecatedImmutable { - t.Fatalf("expected %s deprecated immutable to be %t, got %t", x.uri, x.expectDeprecatedImmutable, actual.DeprecatedImmutable()) - } - } -} diff --git a/swarm/dev/.dockerignore b/swarm/dev/.dockerignore deleted file mode 100644 index f9e69b37f369..000000000000 --- a/swarm/dev/.dockerignore +++ /dev/null @@ -1,2 +0,0 @@ -bin/* -cluster/* diff --git a/swarm/dev/.gitignore b/swarm/dev/.gitignore deleted file mode 100644 index f9e69b37f369..000000000000 --- a/swarm/dev/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -bin/* -cluster/* diff --git a/swarm/dev/Dockerfile b/swarm/dev/Dockerfile deleted file mode 100644 index 728bdab1fb30..000000000000 --- a/swarm/dev/Dockerfile +++ /dev/null @@ -1,42 +0,0 @@ -FROM ubuntu:xenial - -# install build + test dependencies -RUN apt-get update && \ - apt-get install --yes --no-install-recommends \ - ca-certificates \ - curl \ - fuse \ - g++ \ - gcc \ - git \ - iproute2 \ - iputils-ping \ - less \ - libc6-dev \ - make \ - pkg-config \ - && \ - apt-get clean - -# install Go -ENV GO_VERSION 1.8.1 -RUN curl -fSLo golang.tar.gz "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" && \ - tar -xzf golang.tar.gz -C /usr/local && \ - rm golang.tar.gz -ENV GOPATH /go -ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH - -# install docker CLI -RUN curl -fSLo docker.tar.gz https://get.docker.com/builds/Linux/x86_64/docker-17.04.0-ce.tgz && \ - tar -xzf docker.tar.gz -C /usr/local/bin --strip-components=1 docker/docker && \ - rm docker.tar.gz - -# install jq -RUN curl -fSLo /usr/local/bin/jq https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64 && \ - chmod +x /usr/local/bin/jq - -# install govendor -RUN go get -u github.com/kardianos/govendor - -# add custom bashrc -ADD bashrc /root/.bashrc diff --git a/swarm/dev/Makefile b/swarm/dev/Makefile deleted file mode 100644 index e28205622ac0..000000000000 --- a/swarm/dev/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -.PHONY: build cluster test - -default: build - -build: - go build -o bin/swarm github.com/XinFinOrg/XDPoSChain/cmd/swarm - go build -o bin/XDC github.com/XinFinOrg/XDPoSChain/cmd/XDC - go build -o bin/bootnode github.com/XinFinOrg/XDPoSChain/cmd/bootnode - -cluster: build - scripts/boot-cluster.sh - -test: - go test -v github.com/XinFinOrg/XDPoSChain/swarm/... diff --git a/swarm/dev/README.md b/swarm/dev/README.md deleted file mode 100644 index 81e3b53585e9..000000000000 --- a/swarm/dev/README.md +++ /dev/null @@ -1,20 +0,0 @@ -Swarm development environment -============================= - -The Swarm development environment is a Linux bash shell which can be run in a -Docker container and provides a predictable build and test environment. - -### Start the Docker container - -Run the `run.sh` script to build the Docker image and run it, you will then be -at a bash prompt inside the `swarm/dev` directory. - -### Build binaries - -Run `make` to build the `swarm`, `geth` and `bootnode` binaries into the -`swarm/dev/bin` directory. - -### Boot a cluster - -Run `make cluster` to start a 3 node Swarm cluster, or run -`scripts/boot-cluster.sh --size N` to boot a cluster of size N. diff --git a/swarm/dev/bashrc b/swarm/dev/bashrc deleted file mode 100644 index c79da49f1e68..000000000000 --- a/swarm/dev/bashrc +++ /dev/null @@ -1,21 +0,0 @@ -export ROOT="${GOPATH}/src/github.com/XinFinOrg/XDPoSChain" -export PATH="${ROOT}/swarm/dev/bin:${PATH}" - -cd "${ROOT}/swarm/dev" - -cat <&2 <&2 - exit 1 - fi - name="$2" - shift 2 - ;; - -d | --docker-args) - if [[ -z "$2" ]]; then - echo "ERROR: --docker-args flag requires an argument" >&2 - exit 1 - fi - docker_args="$2" - shift 2 - ;; - *) - break - ;; - esac - done - - if [[ $# -ne 0 ]]; then - usage - echo "ERROR: invalid arguments" >&2 - exit 1 - fi -} - -build_image() { - docker build --tag "${name}" "${ROOT}/swarm/dev" -} - -run_image() { - exec docker run \ - --privileged \ - --interactive \ - --tty \ - --rm \ - --hostname "${name}" \ - --name "${name}" \ - --volume "${ROOT}:/go/src/github.com/XinFinOrg/XDPoSChain" \ - --volume "/var/run/docker.sock:/var/run/docker.sock" \ - ${docker_args} \ - "${name}" \ - /bin/bash -} - -main "$@" diff --git a/swarm/dev/scripts/boot-cluster.sh b/swarm/dev/scripts/boot-cluster.sh deleted file mode 100755 index 98ae3c8023f0..000000000000 --- a/swarm/dev/scripts/boot-cluster.sh +++ /dev/null @@ -1,288 +0,0 @@ -#!/bin/bash -# -# A script to boot a dev swarm cluster on a Linux host (typically in a Docker -# container started with swarm/dev/run.sh). -# -# The cluster contains a bootnode, a geth node and multiple swarm nodes, with -# each node having its own data directory in a base directory passed with the -# --dir flag (default is swarm/dev/cluster). -# -# To avoid using different ports for each node and to make networking more -# realistic, each node gets its own network namespace with IPs assigned from -# the 192.168.33.0/24 subnet: -# -# bootnode: 192.168.33.2 -# geth: 192.168.33.3 -# swarm: 192.168.33.10{1,2,...,n} - -set -e - -ROOT="$(cd "$(dirname "$0")/../../.." && pwd)" -source "${ROOT}/swarm/dev/scripts/util.sh" - -# DEFAULT_BASE_DIR is the default base directory to store node data -DEFAULT_BASE_DIR="${ROOT}/swarm/dev/cluster" - -# DEFAULT_CLUSTER_SIZE is the default swarm cluster size -DEFAULT_CLUSTER_SIZE=3 - -# Linux bridge configuration for connecting the node network namespaces -BRIDGE_NAME="swarmbr0" -BRIDGE_IP="192.168.33.1" - -# static bootnode configuration -BOOTNODE_IP="192.168.33.2" -BOOTNODE_PORT="30301" -BOOTNODE_KEY="32078f313bea771848db70745225c52c00981589ad6b5b49163f0f5ee852617d" -BOOTNODE_PUBKEY="760c4460e5336ac9bbd87952a3c7ec4363fc0a97bd31c86430806e287b437fd1b01abc6e1db640cf3106b520344af1d58b00b57823db3e1407cbc433e1b6d04d" -BOOTNODE_URL="enode://${BOOTNODE_PUBKEY}@${BOOTNODE_IP}:${BOOTNODE_PORT}" - -# static geth configuration -GETH_IP="192.168.33.3" -GETH_RPC_PORT="8545" -GETH_RPC_URL="http://${GETH_IP}:${GETH_RPC_PORT}" - -usage() { - cat >&2 < "${key_file}" - - local args=( - --addr "${BOOTNODE_IP}:${BOOTNODE_PORT}" - --nodekey "${key_file}" - --verbosity "6" - ) - - start_node "bootnode" "${BOOTNODE_IP}" "$(which bootnode)" ${args[@]} -} - -# start_geth_node starts a geth node with --datadir pointing at /geth -# and a single, unlocked account with password "geth" -start_geth_node() { - local dir="${base_dir}/geth" - mkdir -p "${dir}" - - local password="geth" - echo "${password}" > "${dir}/password" - - # create an account if necessary - if [[ ! -e "${dir}/keystore" ]]; then - info "creating geth account" - create_account "${dir}" "${password}" - fi - - # get the account address - local address="$(jq --raw-output '.address' ${dir}/keystore/*)" - if [[ -z "${address}" ]]; then - fail "failed to get geth account address" - fi - - local args=( - --datadir "${dir}" - --networkid "321" - --bootnodes "${BOOTNODE_URL}" - --unlock "${address}" - --password "${dir}/password" - --rpc - --rpcaddr "${GETH_IP}" - --rpcport "${GETH_RPC_PORT}" - --verbosity "6" - ) - - start_node "geth" "${GETH_IP}" "$(which geth)" ${args[@]} -} - -start_swarm_nodes() { - for i in $(seq 1 ${cluster_size}); do - start_swarm_node "${i}" - done -} - -# start_swarm_node starts a swarm node with a name like "swarmNN" (where NN is -# a zero-padded integer like "07"), --datadir pointing at / -# (e.g. /swarm07) and a single account with as the password -start_swarm_node() { - local num=$1 - local name="swarm$(printf '%02d' ${num})" - local ip="192.168.33.1$(printf '%02d' ${num})" - - local dir="${base_dir}/${name}" - mkdir -p "${dir}" - - local password="${name}" - echo "${password}" > "${dir}/password" - - # create an account if necessary - if [[ ! -e "${dir}/keystore" ]]; then - info "creating account for ${name}" - create_account "${dir}" "${password}" - fi - - # get the account address - local address="$(jq --raw-output '.address' ${dir}/keystore/*)" - if [[ -z "${address}" ]]; then - fail "failed to get swarm account address" - fi - - local args=( - --bootnodes "${BOOTNODE_URL}" - --datadir "${dir}" - --identity "${name}" - --ens-api "${GETH_RPC_URL}" - --bzznetworkid "321" - --bzzaccount "${address}" - --password "${dir}/password" - --verbosity "6" - ) - - start_node "${name}" "${ip}" "$(which swarm)" ${args[@]} -} - -# start_node runs the node command as a daemon in a network namespace -start_node() { - local name="$1" - local ip="$2" - local path="$3" - local cmd_args=${@:4} - - info "starting ${name} with IP ${ip}" - - create_node_network "${name}" "${ip}" - - # add a marker to the log file - cat >> "${log_dir}/${name}.log" <>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> -Starting ${name} node - $(date) ->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> - -EOF - - # run the command in the network namespace using start-stop-daemon to - # daemonise the process, sending all output to the log file - local daemon_args=( - --start - --background - --no-close - --make-pidfile - --pidfile "${pid_dir}/${name}.pid" - --exec "${path}" - ) - if ! ip netns exec "${name}" start-stop-daemon ${daemon_args[@]} -- $cmd_args &>> "${log_dir}/${name}.log"; then - fail "could not start ${name}, check ${log_dir}/${name}.log" - fi -} - -# create_node_network creates a network namespace and connects it to the Linux -# bridge using a veth pair -create_node_network() { - local name="$1" - local ip="$2" - - # create the namespace - ip netns add "${name}" - - # create the veth pair - local veth0="veth${name}0" - local veth1="veth${name}1" - ip link add name "${veth0}" type veth peer name "${veth1}" - - # add one end to the bridge - ip link set dev "${veth0}" master "${BRIDGE_NAME}" - ip link set dev "${veth0}" up - - # add the other end to the namespace, rename it eth0 and give it the ip - ip link set dev "${veth1}" netns "${name}" - ip netns exec "${name}" ip link set dev "${veth1}" name "eth0" - ip netns exec "${name}" ip link set dev "eth0" up - ip netns exec "${name}" ip address add "${ip}/24" dev "eth0" -} - -create_account() { - local dir=$1 - local password=$2 - - geth --datadir "${dir}" --password /dev/stdin account new <<< "${password}" -} - -main "$@" diff --git a/swarm/dev/scripts/random-uploads.sh b/swarm/dev/scripts/random-uploads.sh deleted file mode 100755 index 563a51befcf9..000000000000 --- a/swarm/dev/scripts/random-uploads.sh +++ /dev/null @@ -1,96 +0,0 @@ -#!/bin/bash -# -# A script to upload random data to a swarm cluster. -# -# Example: -# -# random-uploads.sh --addr 192.168.33.101:8500 --size 40k --count 1000 - -set -e - -ROOT="$(cd "$(dirname "$0")/../../.." && pwd)" -source "${ROOT}/swarm/dev/scripts/util.sh" - -DEFAULT_ADDR="localhost:8500" -DEFAULT_UPLOAD_SIZE="40k" -DEFAULT_UPLOAD_COUNT="1000" - -usage() { - cat >&2 </dev/null -} - -parse_args() { - while true; do - case "$1" in - -h | --help) - usage - exit 0 - ;; - -a | --addr) - if [[ -z "$2" ]]; then - fail "--addr flag requires an argument" - fi - addr="$2" - shift 2 - ;; - -s | --size) - if [[ -z "$2" ]]; then - fail "--size flag requires an argument" - fi - upload_size="$2" - shift 2 - ;; - -c | --count) - if [[ -z "$2" ]]; then - fail "--count flag requires an argument" - fi - upload_count="$2" - shift 2 - ;; - *) - break - ;; - esac - done - - if [[ $# -ne 0 ]]; then - usage - fail "ERROR: invalid arguments: $@" - fi -} - -main "$@" diff --git a/swarm/dev/scripts/stop-cluster.sh b/swarm/dev/scripts/stop-cluster.sh deleted file mode 100755 index 89cb7b0c9ab7..000000000000 --- a/swarm/dev/scripts/stop-cluster.sh +++ /dev/null @@ -1,98 +0,0 @@ -#!/bin/bash -# -# A script to shutdown a dev swarm cluster. - -set -e - -ROOT="$(cd "$(dirname "$0")/../../.." && pwd)" -source "${ROOT}/swarm/dev/scripts/util.sh" - -DEFAULT_BASE_DIR="${ROOT}/swarm/dev/cluster" - -usage() { - cat >&2 </dev/null; then - ip link delete dev "veth${name}0" - fi -} - -delete_network() { - if ip link show "swarmbr0" &>/dev/null; then - ip link delete dev "swarmbr0" - fi -} - -main "$@" diff --git a/swarm/dev/scripts/util.sh b/swarm/dev/scripts/util.sh deleted file mode 100644 index f17a12e420d0..000000000000 --- a/swarm/dev/scripts/util.sh +++ /dev/null @@ -1,53 +0,0 @@ -# shared shell functions - -info() { - local msg="$@" - local timestamp="$(date +%H:%M:%S)" - say "===> ${timestamp} ${msg}" "green" -} - -warn() { - local msg="$@" - local timestamp=$(date +%H:%M:%S) - say "===> ${timestamp} WARN: ${msg}" "yellow" >&2 -} - -fail() { - local msg="$@" - say "ERROR: ${msg}" "red" >&2 - exit 1 -} - -# say prints the given message to STDOUT, using the optional color if -# STDOUT is a terminal. -# -# usage: -# -# say "foo" - prints "foo" -# say "bar" "red" - prints "bar" in red -# say "baz" "green" - prints "baz" in green -# say "qux" "red" | tee - prints "qux" with no colour -# -say() { - local msg=$1 - local color=$2 - - if [[ -n "${color}" ]] && [[ -t 1 ]]; then - case "${color}" in - red) - echo -e "\033[1;31m${msg}\033[0m" - ;; - green) - echo -e "\033[1;32m${msg}\033[0m" - ;; - yellow) - echo -e "\033[1;33m${msg}\033[0m" - ;; - *) - echo "${msg}" - ;; - esac - else - echo "${msg}" - fi -} diff --git a/swarm/fuse/fuse_dir.go b/swarm/fuse/fuse_dir.go deleted file mode 100644 index 91b236ae8af0..000000000000 --- a/swarm/fuse/fuse_dir.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// +build linux darwin freebsd - -package fuse - -import ( - "bazil.org/fuse" - "bazil.org/fuse/fs" - "golang.org/x/net/context" - "os" - "path/filepath" - "sync" -) - -var ( - _ fs.Node = (*SwarmDir)(nil) - _ fs.NodeRequestLookuper = (*SwarmDir)(nil) - _ fs.HandleReadDirAller = (*SwarmDir)(nil) - _ fs.NodeCreater = (*SwarmDir)(nil) - _ fs.NodeRemover = (*SwarmDir)(nil) - _ fs.NodeMkdirer = (*SwarmDir)(nil) -) - -type SwarmDir struct { - inode uint64 - name string - path string - directories []*SwarmDir - files []*SwarmFile - - mountInfo *MountInfo - lock *sync.RWMutex -} - -func NewSwarmDir(fullpath string, minfo *MountInfo) *SwarmDir { - newdir := &SwarmDir{ - inode: NewInode(), - name: filepath.Base(fullpath), - path: fullpath, - directories: []*SwarmDir{}, - files: []*SwarmFile{}, - mountInfo: minfo, - lock: &sync.RWMutex{}, - } - return newdir -} - -func (sd *SwarmDir) Attr(ctx context.Context, a *fuse.Attr) error { - a.Inode = sd.inode - a.Mode = os.ModeDir | 0700 - a.Uid = uint32(os.Getuid()) - a.Gid = uint32(os.Getegid()) - return nil -} - -func (sd *SwarmDir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (fs.Node, error) { - - for _, n := range sd.files { - if n.name == req.Name { - return n, nil - } - } - for _, n := range sd.directories { - if n.name == req.Name { - return n, nil - } - } - return nil, fuse.ENOENT -} - -func (sd *SwarmDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { - var children []fuse.Dirent - for _, file := range sd.files { - children = append(children, fuse.Dirent{Inode: file.inode, Type: fuse.DT_File, Name: file.name}) - } - for _, dir := range sd.directories { - children = append(children, fuse.Dirent{Inode: dir.inode, Type: fuse.DT_Dir, Name: dir.name}) - } - return children, nil -} - -func (sd *SwarmDir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) { - - newFile := NewSwarmFile(sd.path, req.Name, sd.mountInfo) - newFile.fileSize = 0 // 0 means, file is not in swarm yet and it is just created - - sd.lock.Lock() - defer sd.lock.Unlock() - sd.files = append(sd.files, newFile) - - return newFile, newFile, nil -} - -func (sd *SwarmDir) Remove(ctx context.Context, req *fuse.RemoveRequest) error { - - if req.Dir && sd.directories != nil { - newDirs := []*SwarmDir{} - for _, dir := range sd.directories { - if dir.name == req.Name { - removeDirectoryFromSwarm(dir) - } else { - newDirs = append(newDirs, dir) - } - } - if len(sd.directories) > len(newDirs) { - sd.lock.Lock() - defer sd.lock.Unlock() - sd.directories = newDirs - } - return nil - } else if !req.Dir && sd.files != nil { - newFiles := []*SwarmFile{} - for _, f := range sd.files { - if f.name == req.Name { - removeFileFromSwarm(f) - } else { - newFiles = append(newFiles, f) - } - } - if len(sd.files) > len(newFiles) { - sd.lock.Lock() - defer sd.lock.Unlock() - sd.files = newFiles - } - return nil - } - return fuse.ENOENT -} - -func (sd *SwarmDir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { - - newDir := NewSwarmDir(req.Name, sd.mountInfo) - - sd.lock.Lock() - defer sd.lock.Unlock() - sd.directories = append(sd.directories, newDir) - - return newDir, nil - -} diff --git a/swarm/fuse/fuse_file.go b/swarm/fuse/fuse_file.go deleted file mode 100644 index ea2ed308f5dd..000000000000 --- a/swarm/fuse/fuse_file.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// +build linux darwin freebsd - -package fuse - -import ( - "errors" - "io" - "os" - "sync" - - "bazil.org/fuse" - "bazil.org/fuse/fs" - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/swarm/storage" - "golang.org/x/net/context" -) - -const ( - MaxAppendFileSize = 10485760 // 10Mb -) - -var ( - errInvalidOffset = errors.New("Invalid offset during write") - errFileSizeMaxLimixReached = errors.New("File size exceeded max limit") -) - -var ( - _ fs.Node = (*SwarmFile)(nil) - _ fs.HandleReader = (*SwarmFile)(nil) - _ fs.HandleWriter = (*SwarmFile)(nil) -) - -type SwarmFile struct { - inode uint64 - name string - path string - key storage.Key - fileSize int64 - reader storage.LazySectionReader - - mountInfo *MountInfo - lock *sync.RWMutex -} - -func NewSwarmFile(path, fname string, minfo *MountInfo) *SwarmFile { - newFile := &SwarmFile{ - inode: NewInode(), - name: fname, - path: path, - key: nil, - fileSize: -1, // -1 means , file already exists in swarm and you need to just get the size from swarm - reader: nil, - - mountInfo: minfo, - lock: &sync.RWMutex{}, - } - return newFile -} - -func (file *SwarmFile) Attr(ctx context.Context, a *fuse.Attr) error { - - a.Inode = file.inode - //TODO: need to get permission as argument - a.Mode = 0700 - a.Uid = uint32(os.Getuid()) - a.Gid = uint32(os.Getegid()) - - if file.fileSize == -1 { - reader := file.mountInfo.swarmApi.Retrieve(file.key) - quitC := make(chan bool) - size, err := reader.Size(quitC) - if err != nil { - log.Warn("Couldnt get size of file %s : %v", file.path, err) - } - file.fileSize = size - } - a.Size = uint64(file.fileSize) - return nil -} - -func (sf *SwarmFile) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { - - sf.lock.RLock() - defer sf.lock.RUnlock() - if sf.reader == nil { - sf.reader = sf.mountInfo.swarmApi.Retrieve(sf.key) - } - buf := make([]byte, req.Size) - n, err := sf.reader.ReadAt(buf, req.Offset) - if err == io.ErrUnexpectedEOF || err == io.EOF { - err = nil - } - resp.Data = buf[:n] - sf.reader = nil - return err - -} - -func (sf *SwarmFile) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { - - if sf.fileSize == 0 && req.Offset == 0 { - - // A new file is created - err := addFileToSwarm(sf, req.Data, len(req.Data)) - if err != nil { - return err - } - resp.Size = len(req.Data) - - } else if req.Offset <= sf.fileSize { - - totalSize := sf.fileSize + int64(len(req.Data)) - if totalSize > MaxAppendFileSize { - log.Warn("Append file size reached (%v) : (%v)", sf.fileSize, len(req.Data)) - return errFileSizeMaxLimixReached - } - - err := appendToExistingFileInSwarm(sf, req.Data, req.Offset, int64(len(req.Data))) - if err != nil { - return err - } - resp.Size = len(req.Data) - } else { - log.Warn("Invalid write request size(%v) : off(%v)", sf.fileSize, req.Offset) - return errInvalidOffset - } - - return nil -} diff --git a/swarm/fuse/fuse_root.go b/swarm/fuse/fuse_root.go deleted file mode 100644 index b2262d1c5a0d..000000000000 --- a/swarm/fuse/fuse_root.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// +build linux darwin freebsd - -package fuse - -import ( - "bazil.org/fuse/fs" -) - -var ( - _ fs.Node = (*SwarmDir)(nil) -) - -type SwarmRoot struct { - root *SwarmDir -} - -func (filesystem *SwarmRoot) Root() (fs.Node, error) { - return filesystem.root, nil -} diff --git a/swarm/fuse/swarmfs.go b/swarm/fuse/swarmfs.go deleted file mode 100644 index 448ecadfaa03..000000000000 --- a/swarm/fuse/swarmfs.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package fuse - -import ( - "github.com/XinFinOrg/XDPoSChain/swarm/api" - "sync" - "time" -) - -const ( - Swarmfs_Version = "0.1" - mountTimeout = time.Second * 5 - unmountTimeout = time.Second * 10 - maxFuseMounts = 5 -) - -var ( - swarmfs *SwarmFS // Swarm file system singleton - swarmfsLock sync.Once - - inode uint64 = 1 // global inode - inodeLock sync.RWMutex -) - -type SwarmFS struct { - swarmApi *api.Api - activeMounts map[string]*MountInfo - swarmFsLock *sync.RWMutex -} - -func NewSwarmFS(api *api.Api) *SwarmFS { - swarmfsLock.Do(func() { - swarmfs = &SwarmFS{ - swarmApi: api, - swarmFsLock: &sync.RWMutex{}, - activeMounts: map[string]*MountInfo{}, - } - }) - return swarmfs - -} - -// Inode numbers need to be unique, they are used for caching inside fuse -func NewInode() uint64 { - inodeLock.Lock() - defer inodeLock.Unlock() - inode += 1 - return inode -} diff --git a/swarm/fuse/swarmfs_fallback.go b/swarm/fuse/swarmfs_fallback.go deleted file mode 100644 index 4864c8689c27..000000000000 --- a/swarm/fuse/swarmfs_fallback.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// +build !linux,!darwin,!freebsd - -package fuse - -import ( - "errors" -) - -var errNoFUSE = errors.New("FUSE is not supported on this platform") - -func isFUSEUnsupportedError(err error) bool { - return err == errNoFUSE -} - -type MountInfo struct { - MountPoint string - StartManifest string - LatestManifest string -} - -func (self *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) { - return nil, errNoFUSE -} - -func (self *SwarmFS) Unmount(mountpoint string) (bool, error) { - return false, errNoFUSE -} - -func (self *SwarmFS) Listmounts() ([]*MountInfo, error) { - return nil, errNoFUSE -} - -func (self *SwarmFS) Stop() error { - return nil -} diff --git a/swarm/fuse/swarmfs_test.go b/swarm/fuse/swarmfs_test.go deleted file mode 100644 index c9f70e3ea49b..000000000000 --- a/swarm/fuse/swarmfs_test.go +++ /dev/null @@ -1,836 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -//go:build linux || darwin || freebsd -// +build linux darwin freebsd - -package fuse - -import ( - "bytes" - "crypto/rand" - "io" - "os" - "path/filepath" - "testing" - - "github.com/XinFinOrg/XDPoSChain/swarm/api" - "github.com/XinFinOrg/XDPoSChain/swarm/storage" -) - -type fileInfo struct { - perm uint64 - uid int - gid int - contents []byte -} - -func createTestFilesAndUploadToSwarm(t *testing.T, api *api.Api, files map[string]fileInfo, uploadDir string) string { - os.RemoveAll(uploadDir) - - for fname, finfo := range files { - actualPath := filepath.Join(uploadDir, fname) - filePath := filepath.Dir(actualPath) - - err := os.MkdirAll(filePath, 0777) - if err != nil { - t.Fatalf("Error creating directory '%v' : %v", filePath, err) - } - - fd, err1 := os.OpenFile(actualPath, os.O_RDWR|os.O_CREATE, os.FileMode(finfo.perm)) - if err1 != nil { - t.Fatalf("Error creating file %v: %v", actualPath, err1) - } - - fd.Write(finfo.contents) - fd.Chown(finfo.uid, finfo.gid) - fd.Chmod(os.FileMode(finfo.perm)) - fd.Sync() - fd.Close() - } - - bzzhash, err := api.Upload(uploadDir, "") - if err != nil { - t.Fatalf("Error uploading directory %v: %v", uploadDir, err) - } - - return bzzhash -} - -func mountDir(t *testing.T, api *api.Api, files map[string]fileInfo, bzzHash string, mountDir string) *SwarmFS { - os.RemoveAll(mountDir) - os.MkdirAll(mountDir, 0777) - swarmfs := NewSwarmFS(api) - _, err := swarmfs.Mount(bzzHash, mountDir) - if isFUSEUnsupportedError(err) { - t.Skip("FUSE not supported:", err) - } else if err != nil { - t.Fatalf("Error mounting hash %v: %v", bzzHash, err) - } - - found := false - mi := swarmfs.Listmounts() - for _, minfo := range mi { - if minfo.MountPoint == mountDir { - if minfo.StartManifest != bzzHash || - minfo.LatestManifest != bzzHash || - minfo.fuseConnection == nil { - t.Fatalf("Error mounting: exp(%s): act(%s)", bzzHash, minfo.StartManifest) - } - found = true - } - } - - // Test listMounts - if !found { - t.Fatalf("Error getting mounts information for %v: %v", mountDir, err) - } - - // Check if file and their attributes are as expected - compareGeneratedFileWithFileInMount(t, files, mountDir) - - return swarmfs -} - -func compareGeneratedFileWithFileInMount(t *testing.T, files map[string]fileInfo, mountDir string) { - err := filepath.Walk(mountDir, func(path string, f os.FileInfo, err error) error { - if f.IsDir() { - return nil - } - fname := path[len(mountDir)+1:] - if _, ok := files[fname]; !ok { - t.Fatalf(" file %v present in mount dir and is not expected", fname) - } - return nil - }) - if err != nil { - t.Fatalf("Error walking dir %v", mountDir) - } - - for fname, finfo := range files { - destinationFile := filepath.Join(mountDir, fname) - - dfinfo, err := os.Stat(destinationFile) - if err != nil { - t.Fatalf("Destination file %v missing in mount: %v", fname, err) - } - - if int64(len(finfo.contents)) != dfinfo.Size() { - t.Fatalf("file %v Size mismatch source (%v) vs destination(%v)", fname, int64(len(finfo.contents)), dfinfo.Size()) - } - - if dfinfo.Mode().Perm().String() != "-rwx------" { - t.Fatalf("file %v Permission mismatch source (-rwx------) vs destination(%v)", fname, dfinfo.Mode().Perm()) - } - - fileContents, err := os.ReadFile(filepath.Join(mountDir, fname)) - if err != nil { - t.Fatalf("Could not readfile %v : %v", fname, err) - } - if !bytes.Equal(fileContents, finfo.contents) { - t.Fatalf("File %v contents mismatch: %v , %v", fname, fileContents, finfo.contents) - - } - // TODO: check uid and gid - } -} - -func checkFile(t *testing.T, testMountDir, fname string, contents []byte) { - destinationFile := filepath.Join(testMountDir, fname) - dfinfo, err1 := os.Stat(destinationFile) - if err1 != nil { - t.Fatalf("Could not stat file %v", destinationFile) - } - if dfinfo.Size() != int64(len(contents)) { - t.Fatalf("Mismatch in size actual(%v) vs expected(%v)", dfinfo.Size(), int64(len(contents))) - } - - fd, err2 := os.OpenFile(destinationFile, os.O_RDONLY, os.FileMode(0665)) - if err2 != nil { - t.Fatalf("Could not open file %v", destinationFile) - } - newcontent := make([]byte, len(contents)) - fd.Read(newcontent) - fd.Close() - - if !bytes.Equal(contents, newcontent) { - t.Fatalf("File content mismatch expected (%v): received (%v) ", contents, newcontent) - } -} - -func getRandomBtes(size int) []byte { - contents := make([]byte, size) - rand.Read(contents) - return contents -} - -func isDirEmpty(name string) bool { - f, err := os.Open(name) - if err != nil { - return false - } - defer f.Close() - - _, err = f.Readdirnames(1) - - return err == io.EOF -} - -type testAPI struct { - api *api.Api -} - -func (ta *testAPI) mountListAndUnmount(t *testing.T) { - files := make(map[string]fileInfo) - testUploadDir, _ := os.MkdirTemp(os.TempDir(), "fuse-source") - testMountDir, _ := os.MkdirTemp(os.TempDir(), "fuse-dest") - - files["1.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - files["2.txt"] = fileInfo{0711, 333, 444, getRandomBtes(10)} - files["3.txt"] = fileInfo{0622, 333, 444, getRandomBtes(100)} - files["4.txt"] = fileInfo{0533, 333, 444, getRandomBtes(1024)} - files["5.txt"] = fileInfo{0544, 333, 444, getRandomBtes(10)} - files["6.txt"] = fileInfo{0555, 333, 444, getRandomBtes(10)} - files["7.txt"] = fileInfo{0666, 333, 444, getRandomBtes(10)} - files["8.txt"] = fileInfo{0777, 333, 333, getRandomBtes(10)} - files["11.txt"] = fileInfo{0777, 333, 444, getRandomBtes(10)} - files["111.txt"] = fileInfo{0777, 333, 444, getRandomBtes(10)} - files["two/2.txt"] = fileInfo{0777, 333, 444, getRandomBtes(10)} - files["two/2/2.txt"] = fileInfo{0777, 333, 444, getRandomBtes(10)} - files["two/2./2.txt"] = fileInfo{0777, 444, 444, getRandomBtes(10)} - files["twice/2.txt"] = fileInfo{0777, 444, 333, getRandomBtes(200)} - files["one/two/three/four/five/six/seven/eight/nine/10.txt"] = fileInfo{0777, 333, 444, getRandomBtes(10240)} - files["one/two/three/four/five/six/six"] = fileInfo{0777, 333, 444, getRandomBtes(10)} - bzzHash := createTestFilesAndUploadToSwarm(t, ta.api, files, testUploadDir) - - swarmfs := mountDir(t, ta.api, files, bzzHash, testMountDir) - defer swarmfs.Stop() - - // Check unmount - _, err := swarmfs.Unmount(testMountDir) - if err != nil { - t.Fatalf("could not unmount %v", bzzHash) - } - if !isDirEmpty(testMountDir) { - t.Fatalf("unmount didnt work for %v", testMountDir) - } - -} - -func (ta *testAPI) maxMounts(t *testing.T) { - files := make(map[string]fileInfo) - files["1.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - uploadDir1, _ := os.MkdirTemp(os.TempDir(), "max-upload1") - bzzHash1 := createTestFilesAndUploadToSwarm(t, ta.api, files, uploadDir1) - mount1, _ := os.MkdirTemp(os.TempDir(), "max-mount1") - swarmfs1 := mountDir(t, ta.api, files, bzzHash1, mount1) - defer swarmfs1.Stop() - - files["2.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - uploadDir2, _ := os.MkdirTemp(os.TempDir(), "max-upload2") - bzzHash2 := createTestFilesAndUploadToSwarm(t, ta.api, files, uploadDir2) - mount2, _ := os.MkdirTemp(os.TempDir(), "max-mount2") - swarmfs2 := mountDir(t, ta.api, files, bzzHash2, mount2) - defer swarmfs2.Stop() - - files["3.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - uploadDir3, _ := os.MkdirTemp(os.TempDir(), "max-upload3") - bzzHash3 := createTestFilesAndUploadToSwarm(t, ta.api, files, uploadDir3) - mount3, _ := os.MkdirTemp(os.TempDir(), "max-mount3") - swarmfs3 := mountDir(t, ta.api, files, bzzHash3, mount3) - defer swarmfs3.Stop() - - files["4.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - uploadDir4, _ := os.MkdirTemp(os.TempDir(), "max-upload4") - bzzHash4 := createTestFilesAndUploadToSwarm(t, ta.api, files, uploadDir4) - mount4, _ := os.MkdirTemp(os.TempDir(), "max-mount4") - swarmfs4 := mountDir(t, ta.api, files, bzzHash4, mount4) - defer swarmfs4.Stop() - - files["5.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - uploadDir5, _ := os.MkdirTemp(os.TempDir(), "max-upload5") - bzzHash5 := createTestFilesAndUploadToSwarm(t, ta.api, files, uploadDir5) - mount5, _ := os.MkdirTemp(os.TempDir(), "max-mount5") - swarmfs5 := mountDir(t, ta.api, files, bzzHash5, mount5) - defer swarmfs5.Stop() - - files["6.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - uploadDir6, _ := os.MkdirTemp(os.TempDir(), "max-upload6") - bzzHash6 := createTestFilesAndUploadToSwarm(t, ta.api, files, uploadDir6) - mount6, _ := os.MkdirTemp(os.TempDir(), "max-mount6") - - os.RemoveAll(mount6) - os.MkdirAll(mount6, 0777) - _, err := swarmfs.Mount(bzzHash6, mount6) - if err == nil { - t.Fatalf("Error: Going beyond max mounts %v", bzzHash6) - } - -} - -func (ta *testAPI) remount(t *testing.T) { - files := make(map[string]fileInfo) - files["1.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - uploadDir1, _ := os.MkdirTemp(os.TempDir(), "re-upload1") - bzzHash1 := createTestFilesAndUploadToSwarm(t, ta.api, files, uploadDir1) - testMountDir1, _ := os.MkdirTemp(os.TempDir(), "re-mount1") - swarmfs := mountDir(t, ta.api, files, bzzHash1, testMountDir1) - defer swarmfs.Stop() - - uploadDir2, _ := os.MkdirTemp(os.TempDir(), "re-upload2") - bzzHash2 := createTestFilesAndUploadToSwarm(t, ta.api, files, uploadDir2) - testMountDir2, _ := os.MkdirTemp(os.TempDir(), "re-mount2") - - // try mounting the same hash second time - os.RemoveAll(testMountDir2) - os.MkdirAll(testMountDir2, 0777) - _, err := swarmfs.Mount(bzzHash1, testMountDir2) - if err != nil { - t.Fatalf("Error mounting hash %v", bzzHash1) - } - - // mount a different hash in already mounted point - _, err = swarmfs.Mount(bzzHash2, testMountDir1) - if err == nil { - t.Fatalf("Error mounting hash %v", bzzHash2) - } - - // mount nonexistent hash - _, err = swarmfs.Mount("0xfea11223344", testMountDir1) - if err == nil { - t.Fatalf("Error mounting hash %v", bzzHash2) - } -} - -func (ta *testAPI) unmount(t *testing.T) { - files := make(map[string]fileInfo) - uploadDir, _ := os.MkdirTemp(os.TempDir(), "ex-upload") - testMountDir, _ := os.MkdirTemp(os.TempDir(), "ex-mount") - - files["1.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - bzzHash := createTestFilesAndUploadToSwarm(t, ta.api, files, uploadDir) - - swarmfs := mountDir(t, ta.api, files, bzzHash, testMountDir) - defer swarmfs.Stop() - - swarmfs.Unmount(testMountDir) - - mi := swarmfs.Listmounts() - for _, minfo := range mi { - if minfo.MountPoint == testMountDir { - t.Fatalf("mount state not cleaned up in unmount case %v", testMountDir) - } - } -} - -func (ta *testAPI) unmountWhenResourceBusy(t *testing.T) { - files := make(map[string]fileInfo) - testUploadDir, _ := os.MkdirTemp(os.TempDir(), "ex-upload") - testMountDir, _ := os.MkdirTemp(os.TempDir(), "ex-mount") - - files["1.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - bzzHash := createTestFilesAndUploadToSwarm(t, ta.api, files, testUploadDir) - - swarmfs := mountDir(t, ta.api, files, bzzHash, testMountDir) - defer swarmfs.Stop() - - actualPath := filepath.Join(testMountDir, "2.txt") - d, err := os.OpenFile(actualPath, os.O_RDWR, os.FileMode(0700)) - d.Write(getRandomBtes(10)) - - _, err = swarmfs.Unmount(testMountDir) - if err != nil { - t.Fatalf("could not unmount %v", bzzHash) - } - d.Close() - - mi := swarmfs.Listmounts() - for _, minfo := range mi { - if minfo.MountPoint == testMountDir { - t.Fatalf("mount state not cleaned up in unmount case %v", testMountDir) - } - } -} - -func (ta *testAPI) seekInMultiChunkFile(t *testing.T) { - files := make(map[string]fileInfo) - testUploadDir, _ := os.MkdirTemp(os.TempDir(), "seek-upload") - testMountDir, _ := os.MkdirTemp(os.TempDir(), "seek-mount") - - files["1.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10240)} - bzzHash := createTestFilesAndUploadToSwarm(t, ta.api, files, testUploadDir) - - swarmfs := mountDir(t, ta.api, files, bzzHash, testMountDir) - defer swarmfs.Stop() - - // Create a new file seek the second chunk - actualPath := filepath.Join(testMountDir, "1.txt") - d, _ := os.OpenFile(actualPath, os.O_RDONLY, os.FileMode(0700)) - - d.Seek(5000, 0) - - contents := make([]byte, 1024) - d.Read(contents) - finfo := files["1.txt"] - - if !bytes.Equal(finfo.contents[:6024][5000:], contents) { - t.Fatalf("File seek contents mismatch") - } - d.Close() -} - -func (ta *testAPI) createNewFile(t *testing.T) { - files := make(map[string]fileInfo) - testUploadDir, _ := os.MkdirTemp(os.TempDir(), "create-upload") - testMountDir, _ := os.MkdirTemp(os.TempDir(), "create-mount") - - files["1.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - files["five.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - files["six.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - bzzHash := createTestFilesAndUploadToSwarm(t, ta.api, files, testUploadDir) - - swarmfs1 := mountDir(t, ta.api, files, bzzHash, testMountDir) - defer swarmfs1.Stop() - - // Create a new file in the root dir and check - actualPath := filepath.Join(testMountDir, "2.txt") - d, err1 := os.OpenFile(actualPath, os.O_RDWR|os.O_CREATE, os.FileMode(0665)) - if err1 != nil { - t.Fatalf("Could not create file %s : %v", actualPath, err1) - } - contents := make([]byte, 11) - rand.Read(contents) - d.Write(contents) - d.Close() - - mi, err2 := swarmfs1.Unmount(testMountDir) - if err2 != nil { - t.Fatalf("Could not unmount %v", err2) - } - - // mount again and see if things are okay - files["2.txt"] = fileInfo{0700, 333, 444, contents} - swarmfs2 := mountDir(t, ta.api, files, mi.LatestManifest, testMountDir) - defer swarmfs2.Stop() - - checkFile(t, testMountDir, "2.txt", contents) -} - -func (ta *testAPI) createNewFileInsideDirectory(t *testing.T) { - files := make(map[string]fileInfo) - testUploadDir, _ := os.MkdirTemp(os.TempDir(), "createinsidedir-upload") - testMountDir, _ := os.MkdirTemp(os.TempDir(), "createinsidedir-mount") - - files["one/1.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - bzzHash := createTestFilesAndUploadToSwarm(t, ta.api, files, testUploadDir) - - swarmfs1 := mountDir(t, ta.api, files, bzzHash, testMountDir) - defer swarmfs1.Stop() - - // Create a new file inside a existing dir and check - dirToCreate := filepath.Join(testMountDir, "one") - actualPath := filepath.Join(dirToCreate, "2.txt") - d, err1 := os.OpenFile(actualPath, os.O_RDWR|os.O_CREATE, os.FileMode(0665)) - if err1 != nil { - t.Fatalf("Could not create file %s : %v", actualPath, err1) - } - contents := make([]byte, 11) - rand.Read(contents) - d.Write(contents) - d.Close() - - mi, err2 := swarmfs1.Unmount(testMountDir) - if err2 != nil { - t.Fatalf("Could not unmount %v", err2) - } - - // mount again and see if things are okay - files["one/2.txt"] = fileInfo{0700, 333, 444, contents} - swarmfs2 := mountDir(t, ta.api, files, mi.LatestManifest, testMountDir) - defer swarmfs2.Stop() - - checkFile(t, testMountDir, "one/2.txt", contents) -} - -func (ta *testAPI) createNewFileInsideNewDirectory(t *testing.T) { - files := make(map[string]fileInfo) - testUploadDir, _ := os.MkdirTemp(os.TempDir(), "createinsidenewdir-upload") - testMountDir, _ := os.MkdirTemp(os.TempDir(), "createinsidenewdir-mount") - - files["1.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - bzzHash := createTestFilesAndUploadToSwarm(t, ta.api, files, testUploadDir) - - swarmfs1 := mountDir(t, ta.api, files, bzzHash, testMountDir) - defer swarmfs1.Stop() - - // Create a new file inside a existing dir and check - dirToCreate := filepath.Join(testMountDir, "one") - os.MkdirAll(dirToCreate, 0777) - actualPath := filepath.Join(dirToCreate, "2.txt") - d, err1 := os.OpenFile(actualPath, os.O_RDWR|os.O_CREATE, os.FileMode(0665)) - if err1 != nil { - t.Fatalf("Could not create file %s : %v", actualPath, err1) - } - contents := make([]byte, 11) - rand.Read(contents) - d.Write(contents) - d.Close() - - mi, err2 := swarmfs1.Unmount(testMountDir) - if err2 != nil { - t.Fatalf("Could not unmount %v", err2) - } - - // mount again and see if things are okay - files["one/2.txt"] = fileInfo{0700, 333, 444, contents} - swarmfs2 := mountDir(t, ta.api, files, mi.LatestManifest, testMountDir) - defer swarmfs2.Stop() - - checkFile(t, testMountDir, "one/2.txt", contents) -} - -func (ta *testAPI) removeExistingFile(t *testing.T) { - files := make(map[string]fileInfo) - testUploadDir, _ := os.MkdirTemp(os.TempDir(), "remove-upload") - testMountDir, _ := os.MkdirTemp(os.TempDir(), "remove-mount") - - files["1.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - files["five.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - files["six.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - bzzHash := createTestFilesAndUploadToSwarm(t, ta.api, files, testUploadDir) - - swarmfs1 := mountDir(t, ta.api, files, bzzHash, testMountDir) - defer swarmfs1.Stop() - - // Remove a file in the root dir and check - actualPath := filepath.Join(testMountDir, "five.txt") - os.Remove(actualPath) - - mi, err2 := swarmfs1.Unmount(testMountDir) - if err2 != nil { - t.Fatalf("Could not unmount %v", err2) - } - - // mount again and see if things are okay - delete(files, "five.txt") - swarmfs2 := mountDir(t, ta.api, files, mi.LatestManifest, testMountDir) - defer swarmfs2.Stop() -} - -func (ta *testAPI) removeExistingFileInsideDir(t *testing.T) { - files := make(map[string]fileInfo) - testUploadDir, _ := os.MkdirTemp(os.TempDir(), "remove-upload") - testMountDir, _ := os.MkdirTemp(os.TempDir(), "remove-mount") - - files["1.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - files["one/five.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - files["one/six.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - bzzHash := createTestFilesAndUploadToSwarm(t, ta.api, files, testUploadDir) - - swarmfs1 := mountDir(t, ta.api, files, bzzHash, testMountDir) - defer swarmfs1.Stop() - - // Remove a file in the root dir and check - actualPath := filepath.Join(testMountDir, "one/five.txt") - os.Remove(actualPath) - - mi, err2 := swarmfs1.Unmount(testMountDir) - if err2 != nil { - t.Fatalf("Could not unmount %v", err2) - } - - // mount again and see if things are okay - delete(files, "one/five.txt") - swarmfs2 := mountDir(t, ta.api, files, mi.LatestManifest, testMountDir) - defer swarmfs2.Stop() -} - -func (ta *testAPI) removeNewlyAddedFile(t *testing.T) { - - files := make(map[string]fileInfo) - testUploadDir, _ := os.MkdirTemp(os.TempDir(), "removenew-upload") - testMountDir, _ := os.MkdirTemp(os.TempDir(), "removenew-mount") - - files["1.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - files["five.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - files["six.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - bzzHash := createTestFilesAndUploadToSwarm(t, ta.api, files, testUploadDir) - - swarmfs1 := mountDir(t, ta.api, files, bzzHash, testMountDir) - defer swarmfs1.Stop() - - // Adda a new file and remove it - dirToCreate := filepath.Join(testMountDir, "one") - os.MkdirAll(dirToCreate, os.FileMode(0665)) - actualPath := filepath.Join(dirToCreate, "2.txt") - d, err1 := os.OpenFile(actualPath, os.O_RDWR|os.O_CREATE, os.FileMode(0665)) - if err1 != nil { - t.Fatalf("Could not create file %s : %v", actualPath, err1) - } - contents := make([]byte, 11) - rand.Read(contents) - d.Write(contents) - d.Close() - - checkFile(t, testMountDir, "one/2.txt", contents) - - os.Remove(actualPath) - - mi, err2 := swarmfs1.Unmount(testMountDir) - if err2 != nil { - t.Fatalf("Could not unmount %v", err2) - } - - // mount again and see if things are okay - swarmfs2 := mountDir(t, ta.api, files, mi.LatestManifest, testMountDir) - defer swarmfs2.Stop() - - if bzzHash != mi.LatestManifest { - t.Fatalf("same contents different hash orig(%v): new(%v)", bzzHash, mi.LatestManifest) - } -} - -func (ta *testAPI) addNewFileAndModifyContents(t *testing.T) { - files := make(map[string]fileInfo) - testUploadDir, _ := os.MkdirTemp(os.TempDir(), "modifyfile-upload") - testMountDir, _ := os.MkdirTemp(os.TempDir(), "modifyfile-mount") - - files["1.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - files["five.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - files["six.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - bzzHash := createTestFilesAndUploadToSwarm(t, ta.api, files, testUploadDir) - - swarmfs1 := mountDir(t, ta.api, files, bzzHash, testMountDir) - defer swarmfs1.Stop() - - // Create a new file in the root dir and check - actualPath := filepath.Join(testMountDir, "2.txt") - d, err1 := os.OpenFile(actualPath, os.O_RDWR|os.O_CREATE, os.FileMode(0665)) - if err1 != nil { - t.Fatalf("Could not create file %s : %v", actualPath, err1) - } - line1 := []byte("Line 1") - rand.Read(line1) - d.Write(line1) - d.Close() - - mi1, err2 := swarmfs1.Unmount(testMountDir) - if err2 != nil { - t.Fatalf("Could not unmount %v", err2) - } - - // mount again and see if things are okay - files["2.txt"] = fileInfo{0700, 333, 444, line1} - swarmfs2 := mountDir(t, ta.api, files, mi1.LatestManifest, testMountDir) - defer swarmfs2.Stop() - - checkFile(t, testMountDir, "2.txt", line1) - - mi2, err3 := swarmfs2.Unmount(testMountDir) - if err3 != nil { - t.Fatalf("Could not unmount %v", err3) - } - - // mount again and modify - swarmfs3 := mountDir(t, ta.api, files, mi2.LatestManifest, testMountDir) - defer swarmfs3.Stop() - - fd, err4 := os.OpenFile(actualPath, os.O_RDWR|os.O_APPEND, os.FileMode(0665)) - if err4 != nil { - t.Fatalf("Could not create file %s : %v", actualPath, err4) - } - line2 := []byte("Line 2") - rand.Read(line2) - fd.Seek(int64(len(line1)), 0) - fd.Write(line2) - fd.Close() - - mi3, err5 := swarmfs3.Unmount(testMountDir) - if err5 != nil { - t.Fatalf("Could not unmount %v", err5) - } - - // mount again and see if things are okay - b := [][]byte{line1, line2} - line1and2 := bytes.Join(b, []byte("")) - files["2.txt"] = fileInfo{0700, 333, 444, line1and2} - swarmfs4 := mountDir(t, ta.api, files, mi3.LatestManifest, testMountDir) - defer swarmfs4.Stop() - - checkFile(t, testMountDir, "2.txt", line1and2) -} - -func (ta *testAPI) removeEmptyDir(t *testing.T) { - files := make(map[string]fileInfo) - testUploadDir, _ := os.MkdirTemp(os.TempDir(), "rmdir-upload") - testMountDir, _ := os.MkdirTemp(os.TempDir(), "rmdir-mount") - - files["1.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - files["five.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - files["six.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - bzzHash := createTestFilesAndUploadToSwarm(t, ta.api, files, testUploadDir) - - swarmfs1 := mountDir(t, ta.api, files, bzzHash, testMountDir) - defer swarmfs1.Stop() - - os.MkdirAll(filepath.Join(testMountDir, "newdir"), 0777) - - mi, err3 := swarmfs1.Unmount(testMountDir) - if err3 != nil { - t.Fatalf("Could not unmount %v", err3) - } - if bzzHash != mi.LatestManifest { - t.Fatalf("same contents different hash orig(%v): new(%v)", bzzHash, mi.LatestManifest) - } -} - -func (ta *testAPI) removeDirWhichHasFiles(t *testing.T) { - files := make(map[string]fileInfo) - testUploadDir, _ := os.MkdirTemp(os.TempDir(), "rmdir-upload") - testMountDir, _ := os.MkdirTemp(os.TempDir(), "rmdir-mount") - - files["one/1.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - files["two/five.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - files["two/six.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - bzzHash := createTestFilesAndUploadToSwarm(t, ta.api, files, testUploadDir) - - swarmfs1 := mountDir(t, ta.api, files, bzzHash, testMountDir) - defer swarmfs1.Stop() - - dirPath := filepath.Join(testMountDir, "two") - os.RemoveAll(dirPath) - - mi, err2 := swarmfs1.Unmount(testMountDir) - if err2 != nil { - t.Fatalf("Could not unmount %v ", err2) - } - - // mount again and see if things are okay - delete(files, "two/five.txt") - delete(files, "two/six.txt") - - swarmfs2 := mountDir(t, ta.api, files, mi.LatestManifest, testMountDir) - defer swarmfs2.Stop() -} - -func (ta *testAPI) removeDirWhichHasSubDirs(t *testing.T) { - files := make(map[string]fileInfo) - testUploadDir, _ := os.MkdirTemp(os.TempDir(), "rmsubdir-upload") - testMountDir, _ := os.MkdirTemp(os.TempDir(), "rmsubdir-mount") - - files["one/1.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - files["two/three/2.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - files["two/three/3.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - files["two/four/5.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - files["two/four/6.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - files["two/four/six/7.txt"] = fileInfo{0700, 333, 444, getRandomBtes(10)} - - bzzHash := createTestFilesAndUploadToSwarm(t, ta.api, files, testUploadDir) - - swarmfs1 := mountDir(t, ta.api, files, bzzHash, testMountDir) - defer swarmfs1.Stop() - - dirPath := filepath.Join(testMountDir, "two") - os.RemoveAll(dirPath) - - mi, err2 := swarmfs1.Unmount(testMountDir) - if err2 != nil { - t.Fatalf("Could not unmount %v ", err2) - } - - // mount again and see if things are okay - delete(files, "two/three/2.txt") - delete(files, "two/three/3.txt") - delete(files, "two/four/5.txt") - delete(files, "two/four/6.txt") - delete(files, "two/four/six/7.txt") - - swarmfs2 := mountDir(t, ta.api, files, mi.LatestManifest, testMountDir) - defer swarmfs2.Stop() -} - -func (ta *testAPI) appendFileContentsToEnd(t *testing.T) { - files := make(map[string]fileInfo) - testUploadDir, _ := os.MkdirTemp(os.TempDir(), "appendlargefile-upload") - testMountDir, _ := os.MkdirTemp(os.TempDir(), "appendlargefile-mount") - - line1 := make([]byte, 10) - rand.Read(line1) - files["1.txt"] = fileInfo{0700, 333, 444, line1} - bzzHash := createTestFilesAndUploadToSwarm(t, ta.api, files, testUploadDir) - - swarmfs1 := mountDir(t, ta.api, files, bzzHash, testMountDir) - defer swarmfs1.Stop() - - actualPath := filepath.Join(testMountDir, "1.txt") - fd, err4 := os.OpenFile(actualPath, os.O_RDWR|os.O_APPEND, os.FileMode(0665)) - if err4 != nil { - t.Fatalf("Could not create file %s : %v", actualPath, err4) - } - line2 := make([]byte, 5) - rand.Read(line2) - fd.Seek(int64(len(line1)), 0) - fd.Write(line2) - fd.Close() - - mi1, err5 := swarmfs1.Unmount(testMountDir) - if err5 != nil { - t.Fatalf("Could not unmount %v ", err5) - } - - // mount again and see if things are okay - b := [][]byte{line1, line2} - line1and2 := bytes.Join(b, []byte("")) - files["1.txt"] = fileInfo{0700, 333, 444, line1and2} - swarmfs2 := mountDir(t, ta.api, files, mi1.LatestManifest, testMountDir) - defer swarmfs2.Stop() - - checkFile(t, testMountDir, "1.txt", line1and2) -} - -func TestFUSE(t *testing.T) { - datadir, err := os.MkdirTemp("", "fuse") - if err != nil { - t.Fatalf("unable to create temp dir: %v", err) - } - os.RemoveAll(datadir) - - dpa, err := storage.NewLocalDPA(datadir) - if err != nil { - t.Fatal(err) - } - ta := &testAPI{api: api.NewApi(dpa, nil)} - dpa.Start() - defer dpa.Stop() - - t.Run("mountListAndUmount", ta.mountListAndUnmount) - t.Run("maxMounts", ta.maxMounts) - t.Run("remount", ta.remount) - t.Run("unmount", ta.unmount) - t.Run("unmountWhenResourceBusy", ta.unmountWhenResourceBusy) - t.Run("seekInMultiChunkFile", ta.seekInMultiChunkFile) - t.Run("createNewFile", ta.createNewFile) - t.Run("createNewFileInsideDirectory", ta.createNewFileInsideDirectory) - t.Run("createNewFileInsideNewDirectory", ta.createNewFileInsideNewDirectory) - t.Run("removeExistingFile", ta.removeExistingFile) - t.Run("removeExistingFileInsideDir", ta.removeExistingFileInsideDir) - t.Run("removeNewlyAddedFile", ta.removeNewlyAddedFile) - t.Run("addNewFileAndModifyContents", ta.addNewFileAndModifyContents) - t.Run("removeEmptyDir", ta.removeEmptyDir) - t.Run("removeDirWhichHasFiles", ta.removeDirWhichHasFiles) - t.Run("removeDirWhichHasSubDirs", ta.removeDirWhichHasSubDirs) - t.Run("appendFileContentsToEnd", ta.appendFileContentsToEnd) -} diff --git a/swarm/fuse/swarmfs_unix.go b/swarm/fuse/swarmfs_unix.go deleted file mode 100644 index 23b64276c6d8..000000000000 --- a/swarm/fuse/swarmfs_unix.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// +build linux darwin freebsd - -package fuse - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "bazil.org/fuse" - "bazil.org/fuse/fs" - "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/swarm/api" -) - -var ( - errEmptyMountPoint = errors.New("need non-empty mount point") - errMaxMountCount = errors.New("max FUSE mount count reached") - errMountTimeout = errors.New("mount timeout") - errAlreadyMounted = errors.New("mount point is already serving") -) - -func isFUSEUnsupportedError(err error) bool { - if perr, ok := err.(*os.PathError); ok { - return perr.Op == "open" && perr.Path == "/dev/fuse" - } - return err == fuse.ErrOSXFUSENotFound -} - -// information about every active mount -type MountInfo struct { - MountPoint string - StartManifest string - LatestManifest string - rootDir *SwarmDir - fuseConnection *fuse.Conn - swarmApi *api.Api - lock *sync.RWMutex -} - -func NewMountInfo(mhash, mpoint string, sapi *api.Api) *MountInfo { - newMountInfo := &MountInfo{ - MountPoint: mpoint, - StartManifest: mhash, - LatestManifest: mhash, - rootDir: nil, - fuseConnection: nil, - swarmApi: sapi, - lock: &sync.RWMutex{}, - } - return newMountInfo -} - -func (self *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) { - - if mountpoint == "" { - return nil, errEmptyMountPoint - } - cleanedMountPoint, err := filepath.Abs(filepath.Clean(mountpoint)) - if err != nil { - return nil, err - } - - self.swarmFsLock.Lock() - defer self.swarmFsLock.Unlock() - - noOfActiveMounts := len(self.activeMounts) - if noOfActiveMounts >= maxFuseMounts { - return nil, errMaxMountCount - } - - if _, ok := self.activeMounts[cleanedMountPoint]; ok { - return nil, errAlreadyMounted - } - - log.Info(fmt.Sprintf("Attempting to mount %s ", cleanedMountPoint)) - _, manifestEntryMap, err := self.swarmApi.BuildDirectoryTree(mhash, true) - if err != nil { - return nil, err - } - - mi := NewMountInfo(mhash, cleanedMountPoint, self.swarmApi) - - dirTree := map[string]*SwarmDir{} - rootDir := NewSwarmDir("/", mi) - dirTree["/"] = rootDir - mi.rootDir = rootDir - - for suffix, entry := range manifestEntryMap { - key := common.Hex2Bytes(entry.Hash) - fullpath := "/" + suffix - basepath := filepath.Dir(fullpath) - - parentDir := rootDir - dirUntilNow := "" - paths := strings.Split(basepath, "/") - for i := range paths { - if paths[i] != "" { - thisDir := paths[i] - dirUntilNow = dirUntilNow + "/" + thisDir - - if _, ok := dirTree[dirUntilNow]; !ok { - dirTree[dirUntilNow] = NewSwarmDir(dirUntilNow, mi) - parentDir.directories = append(parentDir.directories, dirTree[dirUntilNow]) - parentDir = dirTree[dirUntilNow] - - } else { - parentDir = dirTree[dirUntilNow] - } - - } - } - thisFile := NewSwarmFile(basepath, filepath.Base(fullpath), mi) - thisFile.key = key - - parentDir.files = append(parentDir.files, thisFile) - } - - fconn, err := fuse.Mount(cleanedMountPoint, fuse.FSName("swarmfs"), fuse.VolumeName(mhash)) - if isFUSEUnsupportedError(err) { - log.Warn("Fuse not installed", "mountpoint", cleanedMountPoint, "err", err) - return nil, err - } else if err != nil { - fuse.Unmount(cleanedMountPoint) - log.Warn("Error mounting swarm manifest", "mountpoint", cleanedMountPoint, "err", err) - return nil, err - } - mi.fuseConnection = fconn - - serverr := make(chan error, 1) - go func() { - log.Info(fmt.Sprintf("Serving %s at %s", mhash, cleanedMountPoint)) - filesys := &SwarmRoot{root: rootDir} - if err := fs.Serve(fconn, filesys); err != nil { - log.Warn(fmt.Sprintf("Could not Serve SwarmFileSystem error: %v", err)) - serverr <- err - } - - }() - - // Check if the mount process has an error to report. - select { - case <-time.After(mountTimeout): - fuse.Unmount(cleanedMountPoint) - return nil, errMountTimeout - - case err := <-serverr: - fuse.Unmount(cleanedMountPoint) - log.Warn("Error serving swarm FUSE FS", "mountpoint", cleanedMountPoint, "err", err) - return nil, err - - case <-fconn.Ready: - log.Info("Now serving swarm FUSE FS", "manifest", mhash, "mountpoint", cleanedMountPoint) - } - - self.activeMounts[cleanedMountPoint] = mi - return mi, nil -} - -func (self *SwarmFS) Unmount(mountpoint string) (*MountInfo, error) { - - self.swarmFsLock.Lock() - defer self.swarmFsLock.Unlock() - - cleanedMountPoint, err := filepath.Abs(filepath.Clean(mountpoint)) - if err != nil { - return nil, err - } - - mountInfo := self.activeMounts[cleanedMountPoint] - - if mountInfo == nil || mountInfo.MountPoint != cleanedMountPoint { - return nil, fmt.Errorf("%s is not mounted", cleanedMountPoint) - } - err = fuse.Unmount(cleanedMountPoint) - if err != nil { - err1 := externalUnmount(cleanedMountPoint) - if err1 != nil { - errStr := fmt.Sprintf("UnMount error: %v", err) - log.Warn(errStr) - return nil, err1 - } - } - - mountInfo.fuseConnection.Close() - delete(self.activeMounts, cleanedMountPoint) - - succString := fmt.Sprintf("UnMounting %v succeeded", cleanedMountPoint) - log.Info(succString) - - return mountInfo, nil -} - -func (self *SwarmFS) Listmounts() []*MountInfo { - self.swarmFsLock.RLock() - defer self.swarmFsLock.RUnlock() - - rows := make([]*MountInfo, 0, len(self.activeMounts)) - for _, mi := range self.activeMounts { - rows = append(rows, mi) - } - return rows -} - -func (self *SwarmFS) Stop() bool { - for mp := range self.activeMounts { - mountInfo := self.activeMounts[mp] - self.Unmount(mountInfo.MountPoint) - } - return true -} diff --git a/swarm/fuse/swarmfs_util.go b/swarm/fuse/swarmfs_util.go deleted file mode 100644 index 9817791ffba5..000000000000 --- a/swarm/fuse/swarmfs_util.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// +build linux darwin freebsd - -package fuse - -import ( - "context" - "fmt" - "os/exec" - "runtime" - - "github.com/XinFinOrg/XDPoSChain/log" -) - -func externalUnmount(mountPoint string) error { - ctx, cancel := context.WithTimeout(context.Background(), unmountTimeout) - defer cancel() - - // Try generic umount. - if err := exec.CommandContext(ctx, "umount", mountPoint).Run(); err == nil { - return nil - } - // Try FUSE-specific commands if umount didn't work. - switch runtime.GOOS { - case "darwin": - return exec.CommandContext(ctx, "diskutil", "umount", "force", mountPoint).Run() - case "linux": - return exec.CommandContext(ctx, "fusermount", "-u", mountPoint).Run() - default: - return fmt.Errorf("unmount: unimplemented") - } -} - -func addFileToSwarm(sf *SwarmFile, content []byte, size int) error { - fkey, mhash, err := sf.mountInfo.swarmApi.AddFile(sf.mountInfo.LatestManifest, sf.path, sf.name, content, true) - if err != nil { - return err - } - - sf.lock.Lock() - defer sf.lock.Unlock() - sf.key = fkey - sf.fileSize = int64(size) - - sf.mountInfo.lock.Lock() - defer sf.mountInfo.lock.Unlock() - sf.mountInfo.LatestManifest = mhash - - log.Info("Added new file:", "fname", sf.name, "New Manifest hash", mhash) - return nil -} - -func removeFileFromSwarm(sf *SwarmFile) error { - mkey, err := sf.mountInfo.swarmApi.RemoveFile(sf.mountInfo.LatestManifest, sf.path, sf.name, true) - if err != nil { - return err - } - - sf.mountInfo.lock.Lock() - defer sf.mountInfo.lock.Unlock() - sf.mountInfo.LatestManifest = mkey - - log.Info("Removed file:", "fname", sf.name, "New Manifest hash", mkey) - return nil -} - -func removeDirectoryFromSwarm(sd *SwarmDir) error { - if len(sd.directories) == 0 && len(sd.files) == 0 { - return nil - } - - for _, d := range sd.directories { - err := removeDirectoryFromSwarm(d) - if err != nil { - return err - } - } - - for _, f := range sd.files { - err := removeFileFromSwarm(f) - if err != nil { - return err - } - } - - return nil -} - -func appendToExistingFileInSwarm(sf *SwarmFile, content []byte, offset int64, length int64) error { - fkey, mhash, err := sf.mountInfo.swarmApi.AppendFile(sf.mountInfo.LatestManifest, sf.path, sf.name, sf.fileSize, content, sf.key, offset, length, true) - if err != nil { - return err - } - - sf.lock.Lock() - defer sf.lock.Unlock() - sf.key = fkey - sf.fileSize = sf.fileSize + int64(len(content)) - - sf.mountInfo.lock.Lock() - defer sf.mountInfo.lock.Unlock() - sf.mountInfo.LatestManifest = mhash - - log.Info("Appended file:", "fname", sf.name, "New Manifest hash", mhash) - return nil -} diff --git a/swarm/metrics/flags.go b/swarm/metrics/flags.go deleted file mode 100644 index ebe5f6db287b..000000000000 --- a/swarm/metrics/flags.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package metrics - -import ( - "time" - - "github.com/XinFinOrg/XDPoSChain/cmd/utils" - "github.com/XinFinOrg/XDPoSChain/log" - gethmetrics "github.com/XinFinOrg/XDPoSChain/metrics" - "github.com/XinFinOrg/XDPoSChain/metrics/influxdb" - "gopkg.in/urfave/cli.v1" -) - -var ( - metricsEnableInfluxDBExportFlag = cli.BoolFlag{ - Name: "metrics.influxdb.export", - Usage: "Enable metrics export/push to an external InfluxDB database", - } - metricsInfluxDBEndpointFlag = cli.StringFlag{ - Name: "metrics.influxdb.endpoint", - Usage: "Metrics InfluxDB endpoint", - Value: "http://127.0.0.1:8086", - } - metricsInfluxDBDatabaseFlag = cli.StringFlag{ - Name: "metrics.influxdb.database", - Usage: "Metrics InfluxDB database", - Value: "metrics", - } - metricsInfluxDBUsernameFlag = cli.StringFlag{ - Name: "metrics.influxdb.username", - Usage: "Metrics InfluxDB username", - Value: "", - } - metricsInfluxDBPasswordFlag = cli.StringFlag{ - Name: "metrics.influxdb.password", - Usage: "Metrics InfluxDB password", - Value: "", - } - // The `host` tag is part of every measurement sent to InfluxDB. Queries on tags are faster in InfluxDB. - // It is used so that we can group all nodes and average a measurement across all of them, but also so - // that we can select a specific node and inspect its measurements. - // https://docs.influxdata.com/influxdb/v1.4/concepts/key_concepts/#tag-key - metricsInfluxDBHostTagFlag = cli.StringFlag{ - Name: "metrics.influxdb.host.tag", - Usage: "Metrics InfluxDB `host` tag attached to all measurements", - Value: "localhost", - } -) - -// Flags holds all command-line flags required for metrics collection. -var Flags = []cli.Flag{ - utils.MetricsEnabledFlag, - metricsEnableInfluxDBExportFlag, - metricsInfluxDBEndpointFlag, metricsInfluxDBDatabaseFlag, metricsInfluxDBUsernameFlag, metricsInfluxDBPasswordFlag, metricsInfluxDBHostTagFlag, -} - -func Setup(ctx *cli.Context) { - if gethmetrics.Enabled { - log.Info("Enabling swarm metrics collection") - var ( - enableExport = ctx.GlobalBool(metricsEnableInfluxDBExportFlag.Name) - endpoint = ctx.GlobalString(metricsInfluxDBEndpointFlag.Name) - database = ctx.GlobalString(metricsInfluxDBDatabaseFlag.Name) - username = ctx.GlobalString(metricsInfluxDBUsernameFlag.Name) - password = ctx.GlobalString(metricsInfluxDBPasswordFlag.Name) - hosttag = ctx.GlobalString(metricsInfluxDBHostTagFlag.Name) - ) - - if enableExport { - log.Info("Enabling swarm metrics export to InfluxDB") - go influxdb.InfluxDBWithTags(gethmetrics.DefaultRegistry, 10*time.Second, endpoint, database, username, password, "swarm.", map[string]string{ - "host": hosttag, - }) - } - } -} diff --git a/swarm/network/depo.go b/swarm/network/depo.go deleted file mode 100644 index 2c20cb493255..000000000000 --- a/swarm/network/depo.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package network - -import ( - "bytes" - "encoding/binary" - "fmt" - "time" - - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/metrics" - "github.com/XinFinOrg/XDPoSChain/swarm/storage" -) - -//metrics variables -var ( - syncReceiveCount = metrics.NewRegisteredCounter("network.sync.recv.count", nil) - syncReceiveIgnore = metrics.NewRegisteredCounter("network.sync.recv.ignore", nil) - syncSendCount = metrics.NewRegisteredCounter("network.sync.send.count", nil) - syncSendRefused = metrics.NewRegisteredCounter("network.sync.send.refused", nil) - syncSendNotFound = metrics.NewRegisteredCounter("network.sync.send.notfound", nil) -) - -// Handler for storage/retrieval related protocol requests -// implements the StorageHandler interface used by the bzz protocol -type Depo struct { - hashfunc storage.SwarmHasher - localStore storage.ChunkStore - netStore storage.ChunkStore -} - -func NewDepo(hash storage.SwarmHasher, localStore, remoteStore storage.ChunkStore) *Depo { - return &Depo{ - hashfunc: hash, - localStore: localStore, - netStore: remoteStore, // entrypoint internal - } -} - -// Handles UnsyncedKeysMsg after msg decoding - unsynced hashes upto sync state -// * the remote sync state is just stored and handled in protocol -// * filters through the new syncRequests and send the ones missing -// * back immediately as a deliveryRequest message -// * empty message just pings back for more (is this needed?) -// * strict signed sync states may be needed. -func (self *Depo) HandleUnsyncedKeysMsg(req *unsyncedKeysMsgData, p *peer) error { - unsynced := req.Unsynced - var missing []*syncRequest - var chunk *storage.Chunk - var err error - for _, req := range unsynced { - // skip keys that are found, - chunk, err = self.localStore.Get(req.Key[:]) - if err != nil || chunk.SData == nil { - missing = append(missing, req) - } - } - log.Debug(fmt.Sprintf("Depo.HandleUnsyncedKeysMsg: received %v unsynced keys: %v missing. new state: %v", len(unsynced), len(missing), req.State)) - log.Trace(fmt.Sprintf("Depo.HandleUnsyncedKeysMsg: received %v", unsynced)) - // send delivery request with missing keys - err = p.deliveryRequest(missing) - if err != nil { - return err - } - // set peers state to persist - p.syncState = req.State - return nil -} - -// Handles deliveryRequestMsg -// * serves actual chunks asked by the remote peer -// by pushing to the delivery queue (sync db) of the correct priority -// (remote peer is free to reprioritize) -// * the message implies remote peer wants more, so trigger for -// * new outgoing unsynced keys message is fired -func (self *Depo) HandleDeliveryRequestMsg(req *deliveryRequestMsgData, p *peer) error { - deliver := req.Deliver - // queue the actual delivery of a chunk () - log.Trace(fmt.Sprintf("Depo.HandleDeliveryRequestMsg: received %v delivery requests: %v", len(deliver), deliver)) - for _, sreq := range deliver { - // TODO: look up in cache here or in deliveries - // priorities are taken from the message so the remote party can - // reprioritise to at their leisure - // r = self.pullCached(sreq.Key) // pulls and deletes from cache - Push(p, sreq.Key, sreq.Priority) - } - - // sends it out as unsyncedKeysMsg - p.syncer.sendUnsyncedKeys() - return nil -} - -// the entrypoint for store requests coming from the bzz wire protocol -// if key found locally, return. otherwise -// remote is untrusted, so hash is verified and chunk passed on to NetStore -func (self *Depo) HandleStoreRequestMsg(req *storeRequestMsgData, p *peer) { - var islocal bool - req.from = p - chunk, err := self.localStore.Get(req.Key) - switch { - case err != nil: - log.Trace(fmt.Sprintf("Depo.handleStoreRequest: %v not found locally. create new chunk/request", req.Key)) - // not found in memory cache, ie., a genuine store request - // create chunk - syncReceiveCount.Inc(1) - chunk = storage.NewChunk(req.Key, nil) - - case chunk.SData == nil: - // found chunk in memory store, needs the data, validate now - log.Trace(fmt.Sprintf("Depo.HandleStoreRequest: %v. request entry found", req)) - - default: - // data is found, store request ignored - // this should update access count? - syncReceiveIgnore.Inc(1) - log.Trace(fmt.Sprintf("Depo.HandleStoreRequest: %v found locally. ignore.", req)) - islocal = true - //return - } - - hasher := self.hashfunc() - hasher.Write(req.SData) - if !bytes.Equal(hasher.Sum(nil), req.Key) { - // data does not validate, ignore - // TODO: peer should be penalised/dropped? - log.Warn(fmt.Sprintf("Depo.HandleStoreRequest: chunk invalid. store request ignored: %v", req)) - return - } - - if islocal { - return - } - // update chunk with size and data - chunk.SData = req.SData // protocol validates that SData is minimum 9 bytes long (int64 size + at least one byte of data) - chunk.Size = int64(binary.LittleEndian.Uint64(req.SData[0:8])) - log.Trace(fmt.Sprintf("delivery of %v from %v", chunk, p)) - chunk.Source = p - self.netStore.Put(chunk) -} - -// entrypoint for retrieve requests coming from the bzz wire protocol -// checks swap balance - return if peer has no credit -func (self *Depo) HandleRetrieveRequestMsg(req *retrieveRequestMsgData, p *peer) { - req.from = p - // swap - record credit for 1 request - // note that only charge actual reqsearches - var err error - if p.swap != nil { - err = p.swap.Add(1) - } - if err != nil { - log.Warn(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - cannot process request: %v", req.Key.Log(), err)) - return - } - - // call storage.NetStore#Get which - // blocks until local retrieval finished - // launches cloud retrieval - chunk, _ := self.netStore.Get(req.Key) - req = self.strategyUpdateRequest(chunk.Req, req) - // check if we can immediately deliver - if chunk.SData != nil { - log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content found, delivering...", req.Key.Log())) - - if req.MaxSize == 0 || int64(req.MaxSize) >= chunk.Size { - sreq := &storeRequestMsgData{ - Id: req.Id, - Key: chunk.Key, - SData: chunk.SData, - requestTimeout: req.timeout, // - } - syncSendCount.Inc(1) - p.syncer.addRequest(sreq, DeliverReq) - } else { - syncSendRefused.Inc(1) - log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content found, not wanted", req.Key.Log())) - } - } else { - syncSendNotFound.Inc(1) - log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content not found locally. asked swarm for help. will get back", req.Key.Log())) - } -} - -// add peer request the chunk and decides the timeout for the response if still searching -func (self *Depo) strategyUpdateRequest(rs *storage.RequestStatus, origReq *retrieveRequestMsgData) (req *retrieveRequestMsgData) { - log.Trace(fmt.Sprintf("Depo.strategyUpdateRequest: key %v", origReq.Key.Log())) - // we do not create an alternative one - req = origReq - if rs != nil { - self.addRequester(rs, req) - req.setTimeout(self.searchTimeout(rs, req)) - } - return -} - -// decides the timeout promise sent with the immediate peers response to a retrieve request -// if timeout is explicitly set and expired -func (self *Depo) searchTimeout(rs *storage.RequestStatus, req *retrieveRequestMsgData) (timeout *time.Time) { - reqt := req.getTimeout() - t := time.Now().Add(searchTimeout) - if reqt != nil && reqt.Before(t) { - return reqt - } else { - return &t - } -} - -/* -adds a new peer to an existing open request -only add if less than requesterCount peers forwarded the same request id so far -note this is done irrespective of status (searching or found) -*/ -func (self *Depo) addRequester(rs *storage.RequestStatus, req *retrieveRequestMsgData) { - log.Trace(fmt.Sprintf("Depo.addRequester: key %v - add peer to req.Id %v", req.Key.Log(), req.Id)) - list := rs.Requesters[req.Id] - rs.Requesters[req.Id] = append(list, req) -} diff --git a/swarm/network/forwarding.go b/swarm/network/forwarding.go deleted file mode 100644 index 447f441126b5..000000000000 --- a/swarm/network/forwarding.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package network - -import ( - "fmt" - "math/rand" - "time" - - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/swarm/storage" -) - -const requesterCount = 3 - -/* -forwarder implements the CloudStore interface (use by storage.NetStore) -and serves as the cloud store backend orchestrating storage/retrieval/delivery -via the native bzz protocol -which uses an MSB logarithmic distance-based semi-permanent Kademlia table for -* recursive forwarding style routing for retrieval -* smart syncronisation -*/ - -type forwarder struct { - hive *Hive -} - -func NewForwarder(hive *Hive) *forwarder { - return &forwarder{hive: hive} -} - -// generate a unique id uint64 -func generateId() uint64 { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - return uint64(r.Int63()) -} - -var searchTimeout = 3 * time.Second - -// forwarding logic -// logic propagating retrieve requests to peers given by the kademlia hive -func (self *forwarder) Retrieve(chunk *storage.Chunk) { - peers := self.hive.getPeers(chunk.Key, 0) - log.Trace(fmt.Sprintf("forwarder.Retrieve: %v - received %d peers from KΛÐΞMLIΛ...", chunk.Key.Log(), len(peers))) -OUT: - for _, p := range peers { - log.Trace(fmt.Sprintf("forwarder.Retrieve: sending retrieveRequest %v to peer [%v]", chunk.Key.Log(), p)) - for _, recipients := range chunk.Req.Requesters { - for _, recipient := range recipients { - req := recipient.(*retrieveRequestMsgData) - if req.from.Addr() == p.Addr() { - continue OUT - } - } - } - req := &retrieveRequestMsgData{ - Key: chunk.Key, - Id: generateId(), - } - var err error - if p.swap != nil { - err = p.swap.Add(-1) - } - if err == nil { - p.retrieve(req) - break OUT - } - log.Warn(fmt.Sprintf("forwarder.Retrieve: unable to send retrieveRequest to peer [%v]: %v", chunk.Key.Log(), err)) - } -} - -// requests to specific peers given by the kademlia hive -// except for peers that the store request came from (if any) -// delivery queueing taken care of by syncer -func (self *forwarder) Store(chunk *storage.Chunk) { - var n int - msg := &storeRequestMsgData{ - Key: chunk.Key, - SData: chunk.SData, - } - var source *peer - if chunk.Source != nil { - source = chunk.Source.(*peer) - } - for _, p := range self.hive.getPeers(chunk.Key, 0) { - log.Trace(fmt.Sprintf("forwarder.Store: %v %v", p, chunk)) - - if p.syncer != nil && (source == nil || p.Addr() != source.Addr()) { - n++ - Deliver(p, msg, PropagateReq) - } - } - log.Trace(fmt.Sprintf("forwarder.Store: sent to %v peers (chunk = %v)", n, chunk)) -} - -// once a chunk is found deliver it to its requesters unless timed out -func (self *forwarder) Deliver(chunk *storage.Chunk) { - // iterate over request entries - for id, requesters := range chunk.Req.Requesters { - counter := requesterCount - msg := &storeRequestMsgData{ - Key: chunk.Key, - SData: chunk.SData, - } - var n int - var req *retrieveRequestMsgData - // iterate over requesters with the same id - for id, r := range requesters { - req = r.(*retrieveRequestMsgData) - if req.timeout == nil || req.timeout.After(time.Now()) { - log.Trace(fmt.Sprintf("forwarder.Deliver: %v -> %v", req.Id, req.from)) - msg.Id = uint64(id) - Deliver(req.from, msg, DeliverReq) - n++ - counter-- - if counter <= 0 { - break - } - } - } - log.Trace(fmt.Sprintf("forwarder.Deliver: submit chunk %v (request id %v) for delivery to %v peers", chunk.Key.Log(), id, n)) - } -} - -// initiate delivery of a chunk to a particular peer via syncer#addRequest -// depending on syncer mode and priority settings and sync request type -// this either goes via confirmation roundtrip or queued or pushed directly -func Deliver(p *peer, req interface{}, ty int) { - p.syncer.addRequest(req, ty) -} - -// push chunk over to peer -func Push(p *peer, key storage.Key, priority uint) { - p.syncer.doDelivery(key, priority, p.syncer.quit) -} diff --git a/swarm/network/hive.go b/swarm/network/hive.go deleted file mode 100644 index 49b8b7fd71c6..000000000000 --- a/swarm/network/hive.go +++ /dev/null @@ -1,403 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package network - -import ( - "fmt" - "math/rand" - "path/filepath" - "time" - - "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/metrics" - "github.com/XinFinOrg/XDPoSChain/p2p/discover" - "github.com/XinFinOrg/XDPoSChain/p2p/netutil" - "github.com/XinFinOrg/XDPoSChain/swarm/network/kademlia" - "github.com/XinFinOrg/XDPoSChain/swarm/storage" -) - -// Hive is the logistic manager of the swarm -// it uses a generic kademlia nodetable to find best peer list -// for any target -// this is used by the netstore to search for content in the swarm -// the bzz protocol peersMsgData exchange is relayed to Kademlia -// for db storage and filtering -// connections and disconnections are reported and relayed -// to keep the nodetable uptodate - -var ( - peersNumGauge = metrics.NewRegisteredGauge("network.peers.num", nil) - addPeerCounter = metrics.NewRegisteredCounter("network.addpeer.count", nil) - removePeerCounter = metrics.NewRegisteredCounter("network.removepeer.count", nil) -) - -type Hive struct { - listenAddr func() string - callInterval uint64 - id discover.NodeID - addr kademlia.Address - kad *kademlia.Kademlia - path string - quit chan bool - toggle chan bool - more chan bool - - // for testing only - swapEnabled bool - syncEnabled bool - blockRead bool - blockWrite bool -} - -const ( - callInterval = 3000000000 - // bucketSize = 3 - // maxProx = 8 - // proxBinSize = 4 -) - -type HiveParams struct { - CallInterval uint64 - KadDbPath string - *kademlia.KadParams -} - -//create default params -func NewDefaultHiveParams() *HiveParams { - kad := kademlia.NewDefaultKadParams() - // kad.BucketSize = bucketSize - // kad.MaxProx = maxProx - // kad.ProxBinSize = proxBinSize - - return &HiveParams{ - CallInterval: callInterval, - KadParams: kad, - } -} - -//this can only finally be set after all config options (file, cmd line, env vars) -//have been evaluated -func (self *HiveParams) Init(path string) { - self.KadDbPath = filepath.Join(path, "bzz-peers.json") -} - -func NewHive(addr common.Hash, params *HiveParams, swapEnabled, syncEnabled bool) *Hive { - kad := kademlia.New(kademlia.Address(addr), params.KadParams) - return &Hive{ - callInterval: params.CallInterval, - kad: kad, - addr: kad.Addr(), - path: params.KadDbPath, - swapEnabled: swapEnabled, - syncEnabled: syncEnabled, - } -} - -func (self *Hive) SyncEnabled(on bool) { - self.syncEnabled = on -} - -func (self *Hive) SwapEnabled(on bool) { - self.swapEnabled = on -} - -func (self *Hive) BlockNetworkRead(on bool) { - self.blockRead = on -} - -func (self *Hive) BlockNetworkWrite(on bool) { - self.blockWrite = on -} - -// public accessor to the hive base address -func (self *Hive) Addr() kademlia.Address { - return self.addr -} - -// Start receives network info only at startup -// listedAddr is a function to retrieve listening address to advertise to peers -// connectPeer is a function to connect to a peer based on its NodeID or enode URL -// there are called on the p2p.Server which runs on the node -func (self *Hive) Start(id discover.NodeID, listenAddr func() string, connectPeer func(string) error) (err error) { - self.toggle = make(chan bool) - self.more = make(chan bool) - self.quit = make(chan bool) - self.id = id - self.listenAddr = listenAddr - err = self.kad.Load(self.path, nil) - if err != nil { - log.Warn(fmt.Sprintf("Warning: error reading kaddb '%s' (skipping): %v", self.path, err)) - err = nil - } - // this loop is doing bootstrapping and maintains a healthy table - go self.keepAlive() - go func() { - // whenever toggled ask kademlia about most preferred peer - for alive := range self.more { - if !alive { - // receiving false closes the loop while allowing parallel routines - // to attempt to write to more (remove Peer when shutting down) - return - } - node, need, proxLimit := self.kad.Suggest() - - if node != nil && len(node.Url) > 0 { - log.Trace(fmt.Sprintf("call known bee %v", node.Url)) - // enode or any lower level connection address is unnecessary in future - // discovery table is used to look it up. - connectPeer(node.Url) - } - if need { - // a random peer is taken from the table - peers := self.kad.FindClosest(kademlia.RandomAddressAt(self.addr, rand.Intn(self.kad.MaxProx)), 1) - if len(peers) > 0 { - // a random address at prox bin 0 is sent for lookup - randAddr := kademlia.RandomAddressAt(self.addr, proxLimit) - req := &retrieveRequestMsgData{ - Key: storage.Key(randAddr[:]), - } - log.Trace(fmt.Sprintf("call any bee near %v (PO%03d) - messenger bee: %v", randAddr, proxLimit, peers[0])) - peers[0].(*peer).retrieve(req) - } else { - log.Warn(fmt.Sprintf("no peer")) - } - log.Trace(fmt.Sprintf("buzz kept alive")) - } else { - log.Info(fmt.Sprintf("no need for more bees")) - } - select { - case self.toggle <- need: - case <-self.quit: - return - } - log.Debug(fmt.Sprintf("queen's address: %v, population: %d (%d)", self.addr, self.kad.Count(), self.kad.DBCount())) - } - }() - return -} - -// keepAlive is a forever loop -// in its awake state it periodically triggers connection attempts -// by writing to self.more until Kademlia Table is saturated -// wake state is toggled by writing to self.toggle -// it restarts if the table becomes non-full again due to disconnections -func (self *Hive) keepAlive() { - alarm := time.NewTicker(time.Duration(self.callInterval)).C - for { - peersNumGauge.Update(int64(self.kad.Count())) - select { - case <-alarm: - if self.kad.DBCount() > 0 { - select { - case self.more <- true: - log.Debug(fmt.Sprintf("buzz wakeup")) - default: - } - } - case need := <-self.toggle: - if alarm == nil && need { - alarm = time.NewTicker(time.Duration(self.callInterval)).C - } - if alarm != nil && !need { - alarm = nil - - } - case <-self.quit: - return - } - } -} - -func (self *Hive) Stop() error { - // closing toggle channel quits the updateloop - close(self.quit) - return self.kad.Save(self.path, saveSync) -} - -// called at the end of a successful protocol handshake -func (self *Hive) addPeer(p *peer) error { - addPeerCounter.Inc(1) - defer func() { - select { - case self.more <- true: - default: - } - }() - log.Trace(fmt.Sprintf("hi new bee %v", p)) - err := self.kad.On(p, loadSync) - if err != nil { - return err - } - // self lookup (can be encoded as nil/zero key since peers addr known) + no id () - // the most common way of saying hi in bzz is initiation of gossip - // let me know about anyone new from my hood , here is the storageradius - // to send the 6 byte self lookup - // we do not record as request or forward it, just reply with peers - p.retrieve(&retrieveRequestMsgData{}) - log.Trace(fmt.Sprintf("'whatsup wheresdaparty' sent to %v", p)) - - return nil -} - -// called after peer disconnected -func (self *Hive) removePeer(p *peer) { - removePeerCounter.Inc(1) - log.Debug(fmt.Sprintf("bee %v removed", p)) - self.kad.Off(p, saveSync) - select { - case self.more <- true: - default: - } - if self.kad.Count() == 0 { - log.Debug(fmt.Sprintf("empty, all bees gone")) - } -} - -// Retrieve a list of live peers that are closer to target than us -func (self *Hive) getPeers(target storage.Key, max int) (peers []*peer) { - var addr kademlia.Address - copy(addr[:], target[:]) - for _, node := range self.kad.FindClosest(addr, max) { - peers = append(peers, node.(*peer)) - } - return -} - -// disconnects all the peers -func (self *Hive) DropAll() { - log.Info(fmt.Sprintf("dropping all bees")) - for _, node := range self.kad.FindClosest(kademlia.Address{}, 0) { - node.Drop() - } -} - -// contructor for kademlia.NodeRecord based on peer address alone -// TODO: should go away and only addr passed to kademlia -func newNodeRecord(addr *peerAddr) *kademlia.NodeRecord { - now := time.Now() - return &kademlia.NodeRecord{ - Addr: addr.Addr, - Url: addr.String(), - Seen: now, - After: now, - } -} - -// called by the protocol when receiving peerset (for target address) -// peersMsgData is converted to a slice of NodeRecords for Kademlia -// this is to store all thats needed -func (self *Hive) HandlePeersMsg(req *peersMsgData, from *peer) { - var nrs []*kademlia.NodeRecord - for _, p := range req.Peers { - if err := netutil.CheckRelayIP(from.remoteAddr.IP, p.IP); err != nil { - log.Trace(fmt.Sprintf("invalid peer IP %v from %v: %v", from.remoteAddr.IP, p.IP, err)) - continue - } - nrs = append(nrs, newNodeRecord(p)) - } - self.kad.Add(nrs) -} - -// peer wraps the protocol instance to represent a connected peer -// it implements kademlia.Node interface -type peer struct { - *bzz // protocol instance running on peer connection -} - -// protocol instance implements kademlia.Node interface (embedded peer) -func (self *peer) Addr() kademlia.Address { - return self.remoteAddr.Addr -} - -func (self *peer) Url() string { - return self.remoteAddr.String() -} - -// TODO take into account traffic -func (self *peer) LastActive() time.Time { - return self.lastActive -} - -// reads the serialised form of sync state persisted as the 'Meta' attribute -// and sets the decoded syncState on the online node -func loadSync(record *kademlia.NodeRecord, node kademlia.Node) error { - p, ok := node.(*peer) - if !ok { - return fmt.Errorf("invalid type") - } - if record.Meta == nil { - log.Debug(fmt.Sprintf("no sync state for node record %v setting default", record)) - p.syncState = &syncState{DbSyncState: &storage.DbSyncState{}} - return nil - } - state, err := decodeSync(record.Meta) - if err != nil { - return fmt.Errorf("error decoding kddb record meta info into a sync state: %v", err) - } - log.Trace(fmt.Sprintf("sync state for node record %v read from Meta: %s", record, string(*(record.Meta)))) - p.syncState = state - return err -} - -// callback when saving a sync state -func saveSync(record *kademlia.NodeRecord, node kademlia.Node) { - if p, ok := node.(*peer); ok { - meta, err := encodeSync(p.syncState) - if err != nil { - log.Warn(fmt.Sprintf("error saving sync state for %v: %v", node, err)) - return - } - log.Trace(fmt.Sprintf("saved sync state for %v: %s", node, string(*meta))) - record.Meta = meta - } -} - -// the immediate response to a retrieve request, -// sends relevant peer data given by the kademlia hive to the requester -// TODO: remember peers sent for duration of the session, only new peers sent -func (self *Hive) peers(req *retrieveRequestMsgData) { - if req != nil { - var addrs []*peerAddr - if req.timeout == nil || time.Now().Before(*(req.timeout)) { - key := req.Key - // self lookup from remote peer - if storage.IsZeroKey(key) { - addr := req.from.Addr() - key = storage.Key(addr[:]) - req.Key = nil - } - // get peer addresses from hive - for _, peer := range self.getPeers(key, int(req.MaxPeers)) { - addrs = append(addrs, peer.remoteAddr) - } - log.Debug(fmt.Sprintf("Hive sending %d peer addresses to %v. req.Id: %v, req.Key: %v", len(addrs), req.from, req.Id, req.Key.Log())) - - peersData := &peersMsgData{ - Peers: addrs, - Key: req.Key, - Id: req.Id, - } - peersData.setTimeout(req.timeout) - req.from.peers(peersData) - } - } -} - -func (self *Hive) String() string { - return self.kad.String() -} diff --git a/swarm/network/kademlia/address.go b/swarm/network/kademlia/address.go deleted file mode 100644 index 1814d252acec..000000000000 --- a/swarm/network/kademlia/address.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package kademlia - -import ( - "fmt" - "math/rand" - "strings" - - "github.com/XinFinOrg/XDPoSChain/common" -) - -type Address common.Hash - -func (a Address) String() string { - return fmt.Sprintf("%x", a[:]) -} - -func (a *Address) MarshalJSON() (out []byte, err error) { - return []byte(`"` + a.String() + `"`), nil -} - -func (a *Address) UnmarshalJSON(value []byte) error { - *a = Address(common.HexToHash(string(value[1 : len(value)-1]))) - return nil -} - -// the string form of the binary representation of an address (only first 8 bits) -func (a Address) Bin() string { - var bs []string - for _, b := range a[:] { - bs = append(bs, fmt.Sprintf("%08b", b)) - } - return strings.Join(bs, "") -} - -/* -Proximity(x, y) returns the proximity order of the MSB distance between x and y - -The distance metric MSB(x, y) of two equal length byte sequences x an y is the -value of the binary integer cast of the x^y, ie., x and y bitwise xor-ed. -the binary cast is big endian: most significant bit first (=MSB). - -Proximity(x, y) is a discrete logarithmic scaling of the MSB distance. -It is defined as the reverse rank of the integer part of the base 2 -logarithm of the distance. -It is calculated by counting the number of common leading zeros in the (MSB) -binary representation of the x^y. - -(0 farthest, 255 closest, 256 self) -*/ -func proximity(one, other Address) (ret int) { - for i := 0; i < len(one); i++ { - oxo := one[i] ^ other[i] - for j := 0; j < 8; j++ { - if (oxo>>uint8(7-j))&0x01 != 0 { - return i*8 + j - } - } - } - return len(one) * 8 -} - -// Address.ProxCmp compares the distances a->target and b->target. -// Returns -1 if a is closer to target, 1 if b is closer to target -// and 0 if they are equal. -func (target Address) ProxCmp(a, b Address) int { - for i := range target { - da := a[i] ^ target[i] - db := b[i] ^ target[i] - if da > db { - return 1 - } else if da < db { - return -1 - } - } - return 0 -} - -// randomAddressAt(address, prox) generates a random address -// at proximity order prox relative to address -// if prox is negative a random address is generated -func RandomAddressAt(self Address, prox int) (addr Address) { - addr = self - var pos int - if prox >= 0 { - pos = prox / 8 - trans := prox % 8 - transbytea := byte(0) - for j := 0; j <= trans; j++ { - transbytea |= 1 << uint8(7-j) - } - flipbyte := byte(1 << uint8(7-trans)) - transbyteb := transbytea ^ byte(255) - randbyte := byte(rand.Intn(255)) - addr[pos] = ((addr[pos] & transbytea) ^ flipbyte) | randbyte&transbyteb - } - for i := pos + 1; i < len(addr); i++ { - addr[i] = byte(rand.Intn(255)) - } - - return -} - -// KeyRange(a0, a1, proxLimit) returns the address inclusive address -// range that contain addresses closer to one than other -func KeyRange(one, other Address, proxLimit int) (start, stop Address) { - prox := proximity(one, other) - if prox >= proxLimit { - prox = proxLimit - } - start = CommonBitsAddrByte(one, other, byte(0x00), prox) - stop = CommonBitsAddrByte(one, other, byte(0xff), prox) - return -} - -func CommonBitsAddrF(self, other Address, f func() byte, p int) (addr Address) { - prox := proximity(self, other) - var pos int - if p <= prox { - prox = p - } - pos = prox / 8 - addr = self - trans := byte(prox % 8) - var transbytea byte - if p > prox { - transbytea = byte(0x7f) - } else { - transbytea = byte(0xff) - } - transbytea >>= trans - transbyteb := transbytea ^ byte(0xff) - addrpos := addr[pos] - addrpos &= transbyteb - if p > prox { - addrpos ^= byte(0x80 >> trans) - } - addrpos |= transbytea & f() - addr[pos] = addrpos - for i := pos + 1; i < len(addr); i++ { - addr[i] = f() - } - - return -} - -func CommonBitsAddr(self, other Address, prox int) (addr Address) { - return CommonBitsAddrF(self, other, func() byte { return byte(rand.Intn(255)) }, prox) -} - -func CommonBitsAddrByte(self, other Address, b byte, prox int) (addr Address) { - return CommonBitsAddrF(self, other, func() byte { return b }, prox) -} - -// randomAddressAt() generates a random address -func RandomAddress() Address { - return RandomAddressAt(Address{}, -1) -} diff --git a/swarm/network/kademlia/address_test.go b/swarm/network/kademlia/address_test.go deleted file mode 100644 index b127d5781335..000000000000 --- a/swarm/network/kademlia/address_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package kademlia - -import ( - "math/rand" - "reflect" - "testing" - - "github.com/XinFinOrg/XDPoSChain/common" -) - -func (Address) Generate(rand *rand.Rand, size int) reflect.Value { - var id Address - for i := 0; i < len(id); i++ { - id[i] = byte(uint8(rand.Intn(255))) - } - return reflect.ValueOf(id) -} - -func TestCommonBitsAddrF(t *testing.T) { - a := Address(common.HexToHash("0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef")) - b := Address(common.HexToHash("0x8123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef")) - c := Address(common.HexToHash("0x4123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef")) - d := Address(common.HexToHash("0x0023456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef")) - e := Address(common.HexToHash("0x01A3456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef")) - ab := CommonBitsAddrF(a, b, func() byte { return byte(0x00) }, 10) - expab := Address(common.HexToHash("0x8000000000000000000000000000000000000000000000000000000000000000")) - - if ab != expab { - t.Fatalf("%v != %v", ab, expab) - } - ac := CommonBitsAddrF(a, c, func() byte { return byte(0x00) }, 10) - expac := Address(common.HexToHash("0x4000000000000000000000000000000000000000000000000000000000000000")) - - if ac != expac { - t.Fatalf("%v != %v", ac, expac) - } - ad := CommonBitsAddrF(a, d, func() byte { return byte(0x00) }, 10) - expad := Address(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000")) - - if ad != expad { - t.Fatalf("%v != %v", ad, expad) - } - ae := CommonBitsAddrF(a, e, func() byte { return byte(0x00) }, 10) - expae := Address(common.HexToHash("0x0180000000000000000000000000000000000000000000000000000000000000")) - - if ae != expae { - t.Fatalf("%v != %v", ae, expae) - } - acf := CommonBitsAddrF(a, c, func() byte { return byte(0xff) }, 10) - expacf := Address(common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) - - if acf != expacf { - t.Fatalf("%v != %v", acf, expacf) - } - aeo := CommonBitsAddrF(a, e, func() byte { return byte(0x00) }, 2) - expaeo := Address(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000")) - - if aeo != expaeo { - t.Fatalf("%v != %v", aeo, expaeo) - } - aep := CommonBitsAddrF(a, e, func() byte { return byte(0xff) }, 2) - expaep := Address(common.HexToHash("0x3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) - - if aep != expaep { - t.Fatalf("%v != %v", aep, expaep) - } - -} - -func TestRandomAddressAt(t *testing.T) { - var a Address - for i := 0; i < 100; i++ { - a = RandomAddress() - prox := rand.Intn(255) - b := RandomAddressAt(a, prox) - if proximity(a, b) != prox { - t.Fatalf("incorrect address prox(%v, %v) == %v (expected %v)", a, b, proximity(a, b), prox) - } - } -} diff --git a/swarm/network/kademlia/kaddb.go b/swarm/network/kademlia/kaddb.go deleted file mode 100644 index 0eeb1899cc84..000000000000 --- a/swarm/network/kademlia/kaddb.go +++ /dev/null @@ -1,348 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package kademlia - -import ( - "encoding/json" - "fmt" - "os" - "sync" - "time" - - "github.com/XinFinOrg/XDPoSChain/log" -) - -type NodeData interface { - json.Marshaler - json.Unmarshaler -} - -// allow inactive peers under -type NodeRecord struct { - Addr Address // address of node - Url string // Url, used to connect to node - After time.Time // next call after time - Seen time.Time // last connected at time - Meta *json.RawMessage // arbitrary metadata saved for a peer - - node Node -} - -func (self *NodeRecord) setSeen() { - t := time.Now() - self.Seen = t - self.After = t -} - -func (self *NodeRecord) String() string { - return fmt.Sprintf("<%v>", self.Addr) -} - -// persisted node record database () -type KadDb struct { - Address Address - Nodes [][]*NodeRecord - index map[Address]*NodeRecord - cursors []int - lock sync.RWMutex - purgeInterval time.Duration - initialRetryInterval time.Duration - connRetryExp int -} - -func newKadDb(addr Address, params *KadParams) *KadDb { - return &KadDb{ - Address: addr, - Nodes: make([][]*NodeRecord, params.MaxProx+1), // overwritten by load - cursors: make([]int, params.MaxProx+1), - index: make(map[Address]*NodeRecord), - purgeInterval: params.PurgeInterval, - initialRetryInterval: params.InitialRetryInterval, - connRetryExp: params.ConnRetryExp, - } -} - -func (self *KadDb) findOrCreate(index int, a Address, url string) *NodeRecord { - defer self.lock.Unlock() - self.lock.Lock() - - record, found := self.index[a] - if !found { - record = &NodeRecord{ - Addr: a, - Url: url, - } - log.Info(fmt.Sprintf("add new record %v to kaddb", record)) - // insert in kaddb - self.index[a] = record - self.Nodes[index] = append(self.Nodes[index], record) - } else { - log.Info(fmt.Sprintf("found record %v in kaddb", record)) - } - // update last seen time - record.setSeen() - // update with url in case IP/port changes - record.Url = url - return record -} - -// add adds node records to kaddb (persisted node record db) -func (self *KadDb) add(nrs []*NodeRecord, proximityBin func(Address) int) { - defer self.lock.Unlock() - self.lock.Lock() - var n int - var nodes []*NodeRecord - for _, node := range nrs { - _, found := self.index[node.Addr] - if !found && node.Addr != self.Address { - node.setSeen() - self.index[node.Addr] = node - index := proximityBin(node.Addr) - dbcursor := self.cursors[index] - nodes = self.Nodes[index] - // this is inefficient for allocation, need to just append then shift - newnodes := make([]*NodeRecord, len(nodes)+1) - copy(newnodes[:], nodes[:dbcursor]) - newnodes[dbcursor] = node - copy(newnodes[dbcursor+1:], nodes[dbcursor:]) - log.Trace(fmt.Sprintf("new nodes: %v, nodes: %v", newnodes, nodes)) - self.Nodes[index] = newnodes - n++ - } - } - if n > 0 { - log.Debug(fmt.Sprintf("%d/%d node records (new/known)", n, len(nrs))) - } -} - -/* -next return one node record with the highest priority for desired -connection. -This is used to pick candidates for live nodes that are most wanted for -a higly connected low centrality network structure for Swarm which best suits -for a Kademlia-style routing. - -* Starting as naive node with empty db, this implements Kademlia bootstrapping -* As a mature node, it fills short lines. All on demand. - -The candidate is chosen using the following strategy: -We check for missing online nodes in the buckets for 1 upto Max BucketSize rounds. -On each round we proceed from the low to high proximity order buckets. -If the number of active nodes (=connected peers) is < rounds, then start looking -for a known candidate. To determine if there is a candidate to recommend the -kaddb node record database row corresponding to the bucket is checked. - -If the row cursor is on position i, the ith element in the row is chosen. -If the record is scheduled not to be retried before NOW, the next element is taken. -If the record is scheduled to be retried, it is set as checked, scheduled for -checking and is returned. The time of the next check is in X (duration) such that -X = ConnRetryExp * delta where delta is the time past since the last check and -ConnRetryExp is constant obsoletion factor. (Note that when node records are added -from peer messages, they are marked as checked and placed at the cursor, ie. -given priority over older entries). Entries which were checked more than -purgeInterval ago are deleted from the kaddb row. If no candidate is found after -a full round of checking the next bucket up is considered. If no candidate is -found when we reach the maximum-proximity bucket, the next round starts. - -node record a is more favoured to b a > b iff a is a passive node (record of -offline past peer) -|proxBin(a)| < |proxBin(b)| -|| (proxBin(a) < proxBin(b) && |proxBin(a)| == |proxBin(b)|) -|| (proxBin(a) == proxBin(b) && lastChecked(a) < lastChecked(b)) - -The second argument returned names the first missing slot found -*/ -func (self *KadDb) findBest(maxBinSize int, binSize func(int) int) (node *NodeRecord, need bool, proxLimit int) { - // return nil, proxLimit indicates that all buckets are filled - defer self.lock.Unlock() - self.lock.Lock() - - var interval time.Duration - var found bool - var purge []bool - var delta time.Duration - var cursor int - var count int - var after time.Time - - // iterate over columns maximum bucketsize times - for rounds := 1; rounds <= maxBinSize; rounds++ { - ROUND: - // iterate over rows from PO 0 upto MaxProx - for po, dbrow := range self.Nodes { - // if row has rounds connected peers, then take the next - if binSize(po) >= rounds { - continue ROUND - } - if !need { - // set proxlimit to the PO where the first missing slot is found - proxLimit = po - need = true - } - purge = make([]bool, len(dbrow)) - - // there is a missing slot - finding a node to connect to - // select a node record from the relavant kaddb row (of identical prox order) - ROW: - for cursor = self.cursors[po]; !found && count < len(dbrow); cursor = (cursor + 1) % len(dbrow) { - count++ - node = dbrow[cursor] - - // skip already connected nodes - if node.node != nil { - log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d/%d) already connected", node.Addr, po, cursor, len(dbrow))) - continue ROW - } - - // if node is scheduled to connect - if node.After.After(time.Now()) { - log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) skipped. seen at %v (%v ago), scheduled at %v", node.Addr, po, cursor, node.Seen, delta, node.After)) - continue ROW - } - - delta = time.Since(node.Seen) - if delta < self.initialRetryInterval { - delta = self.initialRetryInterval - } - if delta > self.purgeInterval { - // remove node - purge[cursor] = true - log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) unreachable since %v. Removed", node.Addr, po, cursor, node.Seen)) - continue ROW - } - - log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) ready to be tried. seen at %v (%v ago), scheduled at %v", node.Addr, po, cursor, node.Seen, delta, node.After)) - - // scheduling next check - interval = delta * time.Duration(self.connRetryExp) - after = time.Now().Add(interval) - - log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) selected as candidate connection %v. seen at %v (%v ago), selectable since %v, retry after %v (in %v)", node.Addr, po, cursor, rounds, node.Seen, delta, node.After, after, interval)) - node.After = after - found = true - } // ROW - self.cursors[po] = cursor - self.delete(po, purge) - if found { - return node, need, proxLimit - } - } // ROUND - } // ROUNDS - - return nil, need, proxLimit -} - -// deletes the noderecords of a kaddb row corresponding to the indexes -// caller must hold the dblock -// the call is unsafe, no index checks -func (self *KadDb) delete(row int, purge []bool) { - var nodes []*NodeRecord - dbrow := self.Nodes[row] - for i, del := range purge { - if i == self.cursors[row] { - //reset cursor - self.cursors[row] = len(nodes) - } - // delete the entry to be purged - if del { - delete(self.index, dbrow[i].Addr) - continue - } - // otherwise append to new list - nodes = append(nodes, dbrow[i]) - } - self.Nodes[row] = nodes -} - -// save persists kaddb on disk (written to file on path in json format. -func (self *KadDb) save(path string, cb func(*NodeRecord, Node)) error { - defer self.lock.Unlock() - self.lock.Lock() - - var n int - - for _, b := range self.Nodes { - for _, node := range b { - n++ - node.After = time.Now() - node.Seen = time.Now() - if cb != nil { - cb(node, node.node) - } - } - } - - data, err := json.MarshalIndent(self, "", " ") - if err != nil { - return err - } - err = os.WriteFile(path, data, os.ModePerm) - if err != nil { - log.Warn(fmt.Sprintf("unable to save kaddb with %v nodes to %v: %v", n, path, err)) - } else { - log.Info(fmt.Sprintf("saved kaddb with %v nodes to %v", n, path)) - } - return err -} - -// Load(path) loads the node record database (kaddb) from file on path. -func (self *KadDb) load(path string, cb func(*NodeRecord, Node) error) (err error) { - defer self.lock.Unlock() - self.lock.Lock() - - var data []byte - data, err = os.ReadFile(path) - if err != nil { - return - } - - err = json.Unmarshal(data, self) - if err != nil { - return - } - var n int - var purge []bool - for po, b := range self.Nodes { - purge = make([]bool, len(b)) - ROW: - for i, node := range b { - if cb != nil { - err = cb(node, node.node) - if err != nil { - purge[i] = true - continue ROW - } - } - n++ - if node.After.IsZero() { - node.After = time.Now() - } - self.index[node.Addr] = node - } - self.delete(po, purge) - } - log.Info(fmt.Sprintf("loaded kaddb with %v nodes from %v", n, path)) - - return -} - -// accessor for KAD offline db count -func (self *KadDb) count() int { - defer self.lock.Unlock() - self.lock.Lock() - return len(self.index) -} diff --git a/swarm/network/kademlia/kademlia.go b/swarm/network/kademlia/kademlia.go deleted file mode 100644 index f294b4805356..000000000000 --- a/swarm/network/kademlia/kademlia.go +++ /dev/null @@ -1,454 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package kademlia - -import ( - "fmt" - "sort" - "strings" - "sync" - "time" - - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/metrics" -) - -//metrics variables -//For metrics, we want to count how many times peers are added/removed -//at a certain index. Thus we do that with an array of counters with -//entry for each index -var ( - bucketAddIndexCount []metrics.Counter - bucketRmIndexCount []metrics.Counter -) - -const ( - bucketSize = 4 - proxBinSize = 2 - maxProx = 8 - connRetryExp = 2 - maxPeers = 100 -) - -var ( - purgeInterval = 42 * time.Hour - initialRetryInterval = 42 * time.Millisecond - maxIdleInterval = 42 * 1000 * time.Millisecond - // maxIdleInterval = 42 * 10 0 * time.Millisecond -) - -type KadParams struct { - // adjustable parameters - MaxProx int - ProxBinSize int - BucketSize int - PurgeInterval time.Duration - InitialRetryInterval time.Duration - MaxIdleInterval time.Duration - ConnRetryExp int -} - -func NewDefaultKadParams() *KadParams { - return &KadParams{ - MaxProx: maxProx, - ProxBinSize: proxBinSize, - BucketSize: bucketSize, - PurgeInterval: purgeInterval, - InitialRetryInterval: initialRetryInterval, - MaxIdleInterval: maxIdleInterval, - ConnRetryExp: connRetryExp, - } -} - -// Kademlia is a table of active nodes -type Kademlia struct { - addr Address // immutable baseaddress of the table - *KadParams // Kademlia configuration parameters - proxLimit int // state, the PO of the first row of the most proximate bin - proxSize int // state, the number of peers in the most proximate bin - count int // number of active peers (w live connection) - buckets [][]Node // the actual bins - db *KadDb // kaddb, node record database - lock sync.RWMutex // mutex to access buckets -} - -type Node interface { - Addr() Address - Url() string - LastActive() time.Time - Drop() -} - -// public constructor -// add is the base address of the table -// params is KadParams configuration -func New(addr Address, params *KadParams) *Kademlia { - buckets := make([][]Node, params.MaxProx+1) - kad := &Kademlia{ - addr: addr, - KadParams: params, - buckets: buckets, - db: newKadDb(addr, params), - } - kad.initMetricsVariables() - return kad -} - -// accessor for KAD base address -func (self *Kademlia) Addr() Address { - return self.addr -} - -// accessor for KAD active node count -func (self *Kademlia) Count() int { - defer self.lock.Unlock() - self.lock.Lock() - return self.count -} - -// accessor for KAD active node count -func (self *Kademlia) DBCount() int { - return self.db.count() -} - -// On is the entry point called when a new nodes is added -// unsafe in that node is not checked to be already active node (to be called once) -func (self *Kademlia) On(node Node, cb func(*NodeRecord, Node) error) (err error) { - log.Debug(fmt.Sprintf("%v", self)) - defer self.lock.Unlock() - self.lock.Lock() - - index := self.proximityBin(node.Addr()) - record := self.db.findOrCreate(index, node.Addr(), node.Url()) - - if cb != nil { - err = cb(record, node) - log.Trace(fmt.Sprintf("cb(%v, %v) ->%v", record, node, err)) - if err != nil { - return fmt.Errorf("unable to add node %v, callback error: %v", node.Addr(), err) - } - log.Debug(fmt.Sprintf("add node record %v with node %v", record, node)) - } - - // insert in kademlia table of active nodes - bucket := self.buckets[index] - // if bucket is full insertion replaces the worst node - // TODO: give priority to peers with active traffic - if len(bucket) < self.BucketSize { // >= allows us to add peers beyond the bucketsize limitation - self.buckets[index] = append(bucket, node) - bucketAddIndexCount[index].Inc(1) - log.Debug(fmt.Sprintf("add node %v to table", node)) - self.setProxLimit(index, true) - record.node = node - self.count++ - return nil - } - - // always rotate peers - idle := self.MaxIdleInterval - var pos int - var replaced Node - for i, p := range bucket { - idleInt := time.Since(p.LastActive()) - if idleInt > idle { - idle = idleInt - pos = i - replaced = p - } - } - if replaced == nil { - log.Debug(fmt.Sprintf("all peers wanted, PO%03d bucket full", index)) - return fmt.Errorf("bucket full") - } - log.Debug(fmt.Sprintf("node %v replaced by %v (idle for %v > %v)", replaced, node, idle, self.MaxIdleInterval)) - replaced.Drop() - // actually replace in the row. When off(node) is called, the peer is no longer in the row - bucket[pos] = node - // there is no change in bucket cardinalities so no prox limit adjustment is needed - record.node = node - self.count++ - return nil - -} - -// Off is the called when a node is taken offline (from the protocol main loop exit) -func (self *Kademlia) Off(node Node, cb func(*NodeRecord, Node)) (err error) { - self.lock.Lock() - defer self.lock.Unlock() - - index := self.proximityBin(node.Addr()) - bucketRmIndexCount[index].Inc(1) - bucket := self.buckets[index] - for i := 0; i < len(bucket); i++ { - if node.Addr() == bucket[i].Addr() { - self.buckets[index] = append(bucket[:i], bucket[(i+1):]...) - self.setProxLimit(index, false) - break - } - } - - record := self.db.index[node.Addr()] - // callback on remove - if cb != nil { - cb(record, record.node) - } - record.node = nil - self.count-- - log.Debug(fmt.Sprintf("remove node %v from table, population now is %v", node, self.count)) - - return -} - -// proxLimit is dynamically adjusted so that -// 1) there is no empty buckets in bin < proxLimit and -// 2) the sum of all items are the minimum possible but higher than ProxBinSize -// adjust Prox (proxLimit and proxSize after an insertion/removal of nodes) -// caller holds the lock -func (self *Kademlia) setProxLimit(r int, on bool) { - // if the change is outside the core (PO lower) - // and the change does not leave a bucket empty then - // no adjustment needed - if r < self.proxLimit && len(self.buckets[r]) > 0 { - return - } - // if on=a node was added, then r must be within prox limit so increment cardinality - if on { - self.proxSize++ - curr := len(self.buckets[self.proxLimit]) - // if now core is big enough without the furthest bucket, then contract - // this can result in more than one bucket change - for self.proxSize >= self.ProxBinSize+curr && curr > 0 { - self.proxSize -= curr - self.proxLimit++ - curr = len(self.buckets[self.proxLimit]) - - log.Trace(fmt.Sprintf("proxbin contraction (size: %v, limit: %v, bin: %v)", self.proxSize, self.proxLimit, r)) - } - return - } - // otherwise - if r >= self.proxLimit { - self.proxSize-- - } - // expand core by lowering prox limit until hit zero or cover the empty bucket or reached target cardinality - for (self.proxSize < self.ProxBinSize || r < self.proxLimit) && - self.proxLimit > 0 { - // - self.proxLimit-- - self.proxSize += len(self.buckets[self.proxLimit]) - log.Trace(fmt.Sprintf("proxbin expansion (size: %v, limit: %v, bin: %v)", self.proxSize, self.proxLimit, r)) - } -} - -/* -returns the list of nodes belonging to the same proximity bin -as the target. The most proximate bin will be the union of the bins between -proxLimit and MaxProx. -*/ -func (self *Kademlia) FindClosest(target Address, max int) []Node { - self.lock.Lock() - defer self.lock.Unlock() - - r := nodesByDistance{ - target: target, - } - - po := self.proximityBin(target) - index := po - step := 1 - log.Trace(fmt.Sprintf("serving %v nodes at %v (PO%02d)", max, index, po)) - - // if max is set to 0, just want a full bucket, dynamic number - min := max - // set limit to max - limit := max - if max == 0 { - min = 1 - limit = maxPeers - } - - var n int - for index >= 0 { - // add entire bucket - for _, p := range self.buckets[index] { - r.push(p, limit) - n++ - } - // terminate if index reached the bottom or enough peers > min - log.Trace(fmt.Sprintf("add %v -> %v (PO%02d, PO%03d)", len(self.buckets[index]), n, index, po)) - if n >= min && (step < 0 || max == 0) { - break - } - // reach top most non-empty PO bucket, turn around - if index == self.MaxProx { - index = po - step = -1 - } - index += step - } - log.Trace(fmt.Sprintf("serve %d (<=%d) nodes for target lookup %v (PO%03d)", n, max, target, po)) - return r.nodes -} - -func (self *Kademlia) Suggest() (*NodeRecord, bool, int) { - defer self.lock.RUnlock() - self.lock.RLock() - return self.db.findBest(self.BucketSize, func(i int) int { return len(self.buckets[i]) }) -} - -// adds node records to kaddb (persisted node record db) -func (self *Kademlia) Add(nrs []*NodeRecord) { - self.db.add(nrs, self.proximityBin) -} - -// nodesByDistance is a list of nodes, ordered by distance to target. -type nodesByDistance struct { - nodes []Node - target Address -} - -func sortedByDistanceTo(target Address, slice []Node) bool { - var last Address - for i, node := range slice { - if i > 0 { - if target.ProxCmp(node.Addr(), last) < 0 { - return false - } - } - last = node.Addr() - } - return true -} - -// push(node, max) adds the given node to the list, keeping the total size -// below max elements. -func (h *nodesByDistance) push(node Node, max int) { - // returns the firt index ix such that func(i) returns true - ix := sort.Search(len(h.nodes), func(i int) bool { - return h.target.ProxCmp(h.nodes[i].Addr(), node.Addr()) >= 0 - }) - - if len(h.nodes) < max { - h.nodes = append(h.nodes, node) - } - if ix < len(h.nodes) { - copy(h.nodes[ix+1:], h.nodes[ix:]) - h.nodes[ix] = node - } -} - -/* -Taking the proximity order relative to a fix point x classifies the points in -the space (n byte long byte sequences) into bins. Items in each are at -most half as distant from x as items in the previous bin. Given a sample of -uniformly distributed items (a hash function over arbitrary sequence) the -proximity scale maps onto series of subsets with cardinalities on a negative -exponential scale. - -It also has the property that any two item belonging to the same bin are at -most half as distant from each other as they are from x. - -If we think of random sample of items in the bins as connections in a network of interconnected nodes than relative proximity can serve as the basis for local -decisions for graph traversal where the task is to find a route between two -points. Since in every hop, the finite distance halves, there is -a guaranteed constant maximum limit on the number of hops needed to reach one -node from the other. -*/ - -func (self *Kademlia) proximityBin(other Address) (ret int) { - ret = proximity(self.addr, other) - if ret > self.MaxProx { - ret = self.MaxProx - } - return -} - -// provides keyrange for chunk db iteration -func (self *Kademlia) KeyRange(other Address) (start, stop Address) { - defer self.lock.RUnlock() - self.lock.RLock() - return KeyRange(self.addr, other, self.proxLimit) -} - -// save persists kaddb on disk (written to file on path in json format. -func (self *Kademlia) Save(path string, cb func(*NodeRecord, Node)) error { - return self.db.save(path, cb) -} - -// Load(path) loads the node record database (kaddb) from file on path. -func (self *Kademlia) Load(path string, cb func(*NodeRecord, Node) error) (err error) { - return self.db.load(path, cb) -} - -// kademlia table + kaddb table displayed with ascii -func (self *Kademlia) String() string { - defer self.lock.RUnlock() - self.lock.RLock() - defer self.db.lock.RUnlock() - self.db.lock.RLock() - - var rows []string - rows = append(rows, "=========================================================================") - rows = append(rows, fmt.Sprintf("%v KΛÐΞMLIΛ hive: queen's address: %v", time.Now().UTC().Format(time.UnixDate), self.addr.String()[:6])) - rows = append(rows, fmt.Sprintf("population: %d (%d), proxLimit: %d, proxSize: %d", self.count, len(self.db.index), self.proxLimit, self.proxSize)) - rows = append(rows, fmt.Sprintf("MaxProx: %d, ProxBinSize: %d, BucketSize: %d", self.MaxProx, self.ProxBinSize, self.BucketSize)) - - for i, bucket := range self.buckets { - - if i == self.proxLimit { - rows = append(rows, fmt.Sprintf("============ PROX LIMIT: %d ==========================================", i)) - } - row := []string{fmt.Sprintf("%03d", i), fmt.Sprintf("%2d", len(bucket))} - var k int - c := self.db.cursors[i] - for ; k < len(bucket); k++ { - p := bucket[(c+k)%len(bucket)] - row = append(row, p.Addr().String()[:6]) - if k == 4 { - break - } - } - for ; k < 4; k++ { - row = append(row, " ") - } - row = append(row, fmt.Sprintf("| %2d %2d", len(self.db.Nodes[i]), self.db.cursors[i])) - - for j, p := range self.db.Nodes[i] { - row = append(row, p.Addr.String()[:6]) - if j == 3 { - break - } - } - rows = append(rows, strings.Join(row, " ")) - if i == self.MaxProx { - } - } - rows = append(rows, "=========================================================================") - return strings.Join(rows, "\n") -} - -//We have to build up the array of counters for each index -func (self *Kademlia) initMetricsVariables() { - //create the arrays - bucketAddIndexCount = make([]metrics.Counter, self.MaxProx+1) - bucketRmIndexCount = make([]metrics.Counter, self.MaxProx+1) - //at each index create a metrics counter - for i := 0; i < (self.KadParams.MaxProx + 1); i++ { - bucketAddIndexCount[i] = metrics.NewRegisteredCounter(fmt.Sprintf("network.kademlia.bucket.add.%d.index", i), nil) - bucketRmIndexCount[i] = metrics.NewRegisteredCounter(fmt.Sprintf("network.kademlia.bucket.rm.%d.index", i), nil) - } -} diff --git a/swarm/network/kademlia/kademlia_test.go b/swarm/network/kademlia/kademlia_test.go deleted file mode 100644 index 88858908a4cd..000000000000 --- a/swarm/network/kademlia/kademlia_test.go +++ /dev/null @@ -1,392 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package kademlia - -import ( - "fmt" - "math" - "math/rand" - "os" - "path/filepath" - "reflect" - "testing" - "testing/quick" - "time" -) - -var ( - quickrand = rand.New(rand.NewSource(time.Now().Unix())) - quickcfgFindClosest = &quick.Config{MaxCount: 50, Rand: quickrand} - quickcfgBootStrap = &quick.Config{MaxCount: 100, Rand: quickrand} -) - -type testNode struct { - addr Address -} - -func (n *testNode) String() string { - return fmt.Sprintf("%x", n.addr[:]) -} - -func (n *testNode) Addr() Address { - return n.addr -} - -func (n *testNode) Drop() { -} - -func (n *testNode) Url() string { - return "" -} - -func (n *testNode) LastActive() time.Time { - return time.Now() -} - -func TestOn(t *testing.T) { - addr, ok1 := gen(Address{}, quickrand).(Address) - other, ok2 := gen(Address{}, quickrand).(Address) - if !ok1 || !ok2 { - t.Errorf("oops") - } - kad := New(addr, NewDefaultKadParams()) - err := kad.On(&testNode{addr: other}, nil) - _ = err -} - -func TestBootstrap(t *testing.T) { - - test := func(test *bootstrapTest) bool { - // for any node kad.le, Target and N - params := NewDefaultKadParams() - params.MaxProx = test.MaxProx - params.BucketSize = test.BucketSize - params.ProxBinSize = test.BucketSize - kad := New(test.Self, params) - var err error - - for p := 0; p < 9; p++ { - var nrs []*NodeRecord - n := math.Pow(float64(2), float64(7-p)) - for i := 0; i < int(n); i++ { - addr := RandomAddressAt(test.Self, p) - nrs = append(nrs, &NodeRecord{ - Addr: addr, - }) - } - kad.Add(nrs) - } - - node := &testNode{test.Self} - - n := 0 - for n < 100 { - err = kad.On(node, nil) - if err != nil { - t.Fatalf("backend not accepting node: %v", err) - } - - record, need, _ := kad.Suggest() - if !need { - break - } - n++ - if record == nil { - continue - } - node = &testNode{record.Addr} - } - exp := test.BucketSize * (test.MaxProx + 1) - if kad.Count() != exp { - t.Errorf("incorrect number of peers, expected %d, got %d\n%v", exp, kad.Count(), kad) - return false - } - return true - } - if err := quick.Check(test, quickcfgBootStrap); err != nil { - t.Error(err) - } - -} - -func TestFindClosest(t *testing.T) { - - test := func(test *FindClosestTest) bool { - // for any node kad.le, Target and N - params := NewDefaultKadParams() - params.MaxProx = 7 - kad := New(test.Self, params) - var err error - for _, node := range test.All { - err = kad.On(node, nil) - if err != nil && err.Error() != "bucket full" { - t.Fatalf("backend not accepting node: %v", err) - } - } - - if len(test.All) == 0 || test.N == 0 { - return true - } - nodes := kad.FindClosest(test.Target, test.N) - - // check that the number of results is min(N, kad.len) - wantN := test.N - if tlen := kad.Count(); tlen < test.N { - wantN = tlen - } - - if len(nodes) != wantN { - t.Errorf("wrong number of nodes: got %d, want %d", len(nodes), wantN) - return false - } - - if hasDuplicates(nodes) { - t.Errorf("result contains duplicates") - return false - } - - if !sortedByDistanceTo(test.Target, nodes) { - t.Errorf("result is not sorted by distance to target") - return false - } - - // check that the result nodes have minimum distance to target. - farthestResult := nodes[len(nodes)-1].Addr() - for i, b := range kad.buckets { - for j, n := range b { - if contains(nodes, n.Addr()) { - continue // don't run the check below for nodes in result - } - if test.Target.ProxCmp(n.Addr(), farthestResult) < 0 { - _ = i * j - t.Errorf("kad.le contains node that is closer to target but it's not in result") - return false - } - } - } - return true - } - if err := quick.Check(test, quickcfgFindClosest); err != nil { - t.Error(err) - } -} - -type proxTest struct { - add bool - index int - addr Address -} - -var ( - addresses []Address -) - -func TestProxAdjust(t *testing.T) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - self := gen(Address{}, r).(Address) - params := NewDefaultKadParams() - params.MaxProx = 7 - kad := New(self, params) - - var err error - for i := 0; i < 100; i++ { - a := gen(Address{}, r).(Address) - addresses = append(addresses, a) - err = kad.On(&testNode{addr: a}, nil) - if err != nil && err.Error() != "bucket full" { - t.Fatalf("backend not accepting node: %v", err) - } - if !kad.proxCheck(t) { - return - } - } - test := func(test *proxTest) bool { - node := &testNode{test.addr} - if test.add { - kad.On(node, nil) - } else { - kad.Off(node, nil) - } - return kad.proxCheck(t) - } - if err := quick.Check(test, quickcfgFindClosest); err != nil { - t.Error(err) - } -} - -func TestSaveLoad(t *testing.T) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - addresses := gen([]Address{}, r).([]Address) - self := RandomAddress() - params := NewDefaultKadParams() - params.MaxProx = 7 - kad := New(self, params) - - var err error - - for _, a := range addresses { - err = kad.On(&testNode{addr: a}, nil) - if err != nil && err.Error() != "bucket full" { - t.Fatalf("backend not accepting node: %v", err) - } - } - nodes := kad.FindClosest(self, 100) - - path := filepath.Join(os.TempDir(), "bzz-kad-test-save-load.peers") - err = kad.Save(path, nil) - if err != nil && err.Error() != "bucket full" { - t.Fatalf("unepected error saving kaddb: %v", err) - } - kad = New(self, params) - err = kad.Load(path, nil) - if err != nil && err.Error() != "bucket full" { - t.Fatalf("unepected error loading kaddb: %v", err) - } - for _, b := range kad.db.Nodes { - for _, node := range b { - err = kad.On(&testNode{node.Addr}, nil) - if err != nil && err.Error() != "bucket full" { - t.Fatalf("backend not accepting node: %v", err) - } - } - } - loadednodes := kad.FindClosest(self, 100) - for i, node := range loadednodes { - if nodes[i].Addr() != node.Addr() { - t.Errorf("node mismatch at %d/%d: %v != %v", i, len(nodes), nodes[i].Addr(), node.Addr()) - } - } -} - -func (self *Kademlia) proxCheck(t *testing.T) bool { - var sum int - for i, b := range self.buckets { - l := len(b) - // if we are in the high prox multibucket - if i >= self.proxLimit { - sum += l - } else if l == 0 { - t.Errorf("bucket %d empty, yet proxLimit is %d\n%v", len(b), self.proxLimit, self) - return false - } - } - // check if merged high prox bucket does not exceed size - if sum > 0 { - if sum != self.proxSize { - t.Errorf("proxSize incorrect, expected %v, got %v", sum, self.proxSize) - return false - } - last := len(self.buckets[self.proxLimit]) - if last > 0 && sum >= self.ProxBinSize+last { - t.Errorf("proxLimit %v incorrect, redundant non-empty bucket %d added to proxBin with %v (target %v)\n%v", self.proxLimit, last, sum-last, self.ProxBinSize, self) - return false - } - if self.proxLimit > 0 && sum < self.ProxBinSize { - t.Errorf("proxLimit %v incorrect. proxSize %v is less than target %v, yet there is more peers", self.proxLimit, sum, self.ProxBinSize) - return false - } - } - return true -} - -type bootstrapTest struct { - MaxProx int - BucketSize int - Self Address -} - -func (*bootstrapTest) Generate(rand *rand.Rand, size int) reflect.Value { - t := &bootstrapTest{ - Self: gen(Address{}, rand).(Address), - MaxProx: 5 + rand.Intn(2), - BucketSize: rand.Intn(3) + 1, - } - return reflect.ValueOf(t) -} - -type FindClosestTest struct { - Self Address - Target Address - All []Node - N int -} - -func (c FindClosestTest) String() string { - return fmt.Sprintf("A: %064x\nT: %064x\n(%d)\n", c.Self[:], c.Target[:], c.N) -} - -func (*FindClosestTest) Generate(rand *rand.Rand, size int) reflect.Value { - t := &FindClosestTest{ - Self: gen(Address{}, rand).(Address), - Target: gen(Address{}, rand).(Address), - N: rand.Intn(bucketSize), - } - for _, a := range gen([]Address{}, rand).([]Address) { - t.All = append(t.All, &testNode{addr: a}) - } - return reflect.ValueOf(t) -} - -func (*proxTest) Generate(rand *rand.Rand, size int) reflect.Value { - var add bool - if rand.Intn(1) == 0 { - add = true - } - var t *proxTest - if add { - t = &proxTest{ - addr: gen(Address{}, rand).(Address), - add: add, - } - } else { - t = &proxTest{ - index: rand.Intn(len(addresses)), - add: add, - } - } - return reflect.ValueOf(t) -} - -func hasDuplicates(slice []Node) bool { - seen := make(map[Address]bool) - for _, node := range slice { - if seen[node.Addr()] { - return true - } - seen[node.Addr()] = true - } - return false -} - -func contains(nodes []Node, addr Address) bool { - for _, n := range nodes { - if n.Addr() == addr { - return true - } - } - return false -} - -// gen wraps quick.Value so it's easier to use. -// it generates a random value of the given value's type. -func gen(typ interface{}, rand *rand.Rand) interface{} { - v, ok := quick.Value(reflect.TypeOf(typ), rand) - if !ok { - panic(fmt.Sprintf("couldn't generate random value of type %T", typ)) - } - return v.Interface() -} diff --git a/swarm/network/messages.go b/swarm/network/messages.go deleted file mode 100644 index 86a17515d5aa..000000000000 --- a/swarm/network/messages.go +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package network - -import ( - "fmt" - "net" - "time" - - "github.com/XinFinOrg/XDPoSChain/contracts/chequebook" - "github.com/XinFinOrg/XDPoSChain/p2p/discover" - "github.com/XinFinOrg/XDPoSChain/swarm/network/kademlia" - "github.com/XinFinOrg/XDPoSChain/swarm/services/swap" - "github.com/XinFinOrg/XDPoSChain/swarm/storage" -) - -/* -BZZ protocol Message Types and Message Data Types -*/ - -// bzz protocol message codes -const ( - statusMsg = iota // 0x01 - storeRequestMsg // 0x02 - retrieveRequestMsg // 0x03 - peersMsg // 0x04 - syncRequestMsg // 0x05 - deliveryRequestMsg // 0x06 - unsyncedKeysMsg // 0x07 - paymentMsg // 0x08 -) - -/* - Handshake - -* Version: 8 byte integer version of the protocol -* ID: arbitrary byte sequence client identifier human readable -* Addr: the address advertised by the node, format similar to DEVp2p wire protocol -* Swap: info for the swarm accounting protocol -* NetworkID: 8 byte integer network identifier -* Caps: swarm-specific capabilities, format identical to devp2p -* SyncState: syncronisation state (db iterator key and address space etc) persisted about the peer - -*/ -type statusMsgData struct { - Version uint64 - ID string - Addr *peerAddr - Swap *swap.SwapProfile - NetworkId uint64 -} - -func (self *statusMsgData) String() string { - return fmt.Sprintf("Status: Version: %v, ID: %v, Addr: %v, Swap: %v, NetworkId: %v", self.Version, self.ID, self.Addr, self.Swap, self.NetworkId) -} - -/* - store requests are forwarded to the peers in their kademlia proximity bin - if they are distant - if they are within our storage radius or have any incentive to store it - then attach your nodeID to the metadata - if the storage request is sufficiently close (within our proxLimit, i. e., the - last row of the routing table) -*/ -type storeRequestMsgData struct { - Key storage.Key // hash of datasize | data - SData []byte // the actual chunk Data - // optional - Id uint64 // request ID. if delivery, the ID is retrieve request ID - requestTimeout *time.Time // expiry for forwarding - [not serialised][not currently used] - storageTimeout *time.Time // expiry of content - [not serialised][not currently used] - from *peer // [not serialised] protocol registers the requester -} - -func (self storeRequestMsgData) String() string { - var from string - if self.from == nil { - from = "self" - } else { - from = self.from.Addr().String() - } - end := len(self.SData) - if len(self.SData) > 10 { - end = 10 - } - return fmt.Sprintf("from: %v, Key: %v; ID: %v, requestTimeout: %v, storageTimeout: %v, SData %x", from, self.Key, self.Id, self.requestTimeout, self.storageTimeout, self.SData[:end]) -} - -/* -Retrieve request - -Timeout in milliseconds. Note that zero timeout retrieval requests do not request forwarding, but prompt for a peers message response. therefore they serve also -as messages to retrieve peers. - -MaxSize specifies the maximum size that the peer will accept. This is useful in -particular if we allow storage and delivery of multichunk payload representing -the entire or partial subtree unfolding from the requested root key. -So when only interested in limited part of a stream (infinite trees) or only -testing chunk availability etc etc, we can indicate it by limiting the size here. - -Request ID can be newly generated or kept from the request originator. -If request ID Is missing or zero, the request is handled as a lookup only -prompting a peers response but not launching a search. Lookup requests are meant -to be used to bootstrap kademlia tables. - -In the special case that the key is the zero value as well, the remote peer's -address is assumed (the message is to be handled as a self lookup request). -The response is a PeersMsg with the peers in the kademlia proximity bin -corresponding to the address. -*/ - -type retrieveRequestMsgData struct { - Key storage.Key // target Key address of chunk to be retrieved - Id uint64 // request id, request is a lookup if missing or zero - MaxSize uint64 // maximum size of delivery accepted - MaxPeers uint64 // maximum number of peers returned - Timeout uint64 // the longest time we are expecting a response - timeout *time.Time // [not serialied] - from *peer // -} - -func (self *retrieveRequestMsgData) String() string { - var from string - if self.from == nil { - from = "ourselves" - } else { - from = self.from.Addr().String() - } - var target []byte - if len(self.Key) > 3 { - target = self.Key[:4] - } - return fmt.Sprintf("from: %v, Key: %x; ID: %v, MaxSize: %v, MaxPeers: %d", from, target, self.Id, self.MaxSize, self.MaxPeers) -} - -// lookups are encoded by missing request ID -func (self *retrieveRequestMsgData) isLookup() bool { - return self.Id == 0 -} - -// sets timeout fields -func (self *retrieveRequestMsgData) setTimeout(t *time.Time) { - self.timeout = t - if t != nil { - self.Timeout = uint64(t.UnixNano()) - } else { - self.Timeout = 0 - } -} - -func (self *retrieveRequestMsgData) getTimeout() (t *time.Time) { - if self.Timeout > 0 && self.timeout == nil { - timeout := time.Unix(int64(self.Timeout), 0) - t = &timeout - self.timeout = t - } - return -} - -// peerAddr is sent in StatusMsg as part of the handshake -type peerAddr struct { - IP net.IP - Port uint16 - ID []byte // the 64 byte NodeID (ECDSA Public Key) - Addr kademlia.Address -} - -// peerAddr pretty prints as enode -func (self *peerAddr) String() string { - var nodeid discover.NodeID - copy(nodeid[:], self.ID) - return discover.NewNode(nodeid, self.IP, 0, self.Port).String() -} - -/* -peers Msg is one response to retrieval; it is always encouraged after a retrieval -request to respond with a list of peers in the same kademlia proximity bin. -The encoding of a peer is identical to that in the devp2p base protocol peers -messages: [IP, Port, NodeID] -note that a node's DPA address is not the NodeID but the hash of the NodeID. - -Timeout serves to indicate whether the responder is forwarding the query within -the timeout or not. - -NodeID serves as the owner of payment contracts and signer of proofs of transfer. - -The Key is the target (if response to a retrieval request) or missing (zero value) -peers address (hash of NodeID) if retrieval request was a self lookup. - -Peers message is requested by retrieval requests with a missing or zero value request ID -*/ -type peersMsgData struct { - Peers []*peerAddr // - Timeout uint64 // - timeout *time.Time // indicate whether responder is expected to deliver content - Key storage.Key // present if a response to a retrieval request - Id uint64 // present if a response to a retrieval request - from *peer -} - -// peers msg pretty printer -func (self *peersMsgData) String() string { - var from string - if self.from == nil { - from = "ourselves" - } else { - from = self.from.Addr().String() - } - var target []byte - if len(self.Key) > 3 { - target = self.Key[:4] - } - return fmt.Sprintf("from: %v, Key: %x; ID: %v, Peers: %v", from, target, self.Id, self.Peers) -} - -func (self *peersMsgData) setTimeout(t *time.Time) { - self.timeout = t - if t != nil { - self.Timeout = uint64(t.UnixNano()) - } else { - self.Timeout = 0 - } -} - -/* -syncRequest - -is sent after the handshake to initiate syncing -the syncState of the remote node is persisted in kaddb and set on the -peer/protocol instance when the node is registered by hive as online{ -*/ - -type syncRequestMsgData struct { - SyncState *syncState `rlp:"nil"` -} - -func (self *syncRequestMsgData) String() string { - return fmt.Sprintf("%v", self.SyncState) -} - -/* -deliveryRequest - -is sent once a batch of sync keys is filtered. The ones not found are -sent as a list of syncReuest (hash, priority) in the Deliver field. -When the source receives the sync request it continues to iterate -and fetch at most N items as yet unsynced. -At the same time responds with deliveries of the items. -*/ -type deliveryRequestMsgData struct { - Deliver []*syncRequest -} - -func (self *deliveryRequestMsgData) String() string { - return fmt.Sprintf("sync request for new chunks\ndelivery request for %v chunks", len(self.Deliver)) -} - -/* -unsyncedKeys - -is sent first after the handshake if SyncState iterator brings up hundreds, thousands? -and subsequently sent as a response to deliveryRequestMsgData. - -Syncing is the iterative process of exchanging unsyncedKeys and deliveryRequestMsgs -both ways. - -State contains the sync state sent by the source. When the source receives the -sync state it continues to iterate and fetch at most N items as yet unsynced. -At the same time responds with deliveries of the items. -*/ -type unsyncedKeysMsgData struct { - Unsynced []*syncRequest - State *syncState -} - -func (self *unsyncedKeysMsgData) String() string { - return fmt.Sprintf("sync: keys of %d new chunks (state %v) => synced: %v", len(self.Unsynced), self.State, self.State.Synced) -} - -/* -payment - -is sent when the swap balance is tilted in favour of the remote peer -and in absolute units exceeds the PayAt parameter in the remote peer's profile -*/ - -type paymentMsgData struct { - Units uint // units actually paid for (checked against amount by swap) - Promise *chequebook.Cheque // payment with cheque -} - -func (self *paymentMsgData) String() string { - return fmt.Sprintf("payment for %d units: %v", self.Units, self.Promise) -} diff --git a/swarm/network/protocol.go b/swarm/network/protocol.go deleted file mode 100644 index be793b40a146..000000000000 --- a/swarm/network/protocol.go +++ /dev/null @@ -1,534 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package network - -/* -bzz implements the swarm wire protocol [bzz] (sister of eth and shh) -the protocol instance is launched on each peer by the network layer if the -bzz protocol handler is registered on the p2p server. - -The bzz protocol component speaks the bzz protocol -* handle the protocol handshake -* register peers in the KΛÐΞMLIΛ table via the hive logistic manager -* dispatch to hive for handling the DHT logic -* encode and decode requests for storage and retrieval -* handle sync protocol messages via the syncer -* talks the SWAP payment protocol (swap accounting is done within NetStore) -*/ - -import ( - "errors" - "fmt" - "net" - "strconv" - "time" - - "github.com/XinFinOrg/XDPoSChain/contracts/chequebook" - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/metrics" - "github.com/XinFinOrg/XDPoSChain/p2p" - bzzswap "github.com/XinFinOrg/XDPoSChain/swarm/services/swap" - "github.com/XinFinOrg/XDPoSChain/swarm/services/swap/swap" - "github.com/XinFinOrg/XDPoSChain/swarm/storage" -) - -//metrics variables -var ( - storeRequestMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.storerequest.count", nil) - retrieveRequestMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.retrieverequest.count", nil) - peersMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.peers.count", nil) - syncRequestMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.syncrequest.count", nil) - unsyncedKeysMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.unsyncedkeys.count", nil) - deliverRequestMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.deliverrequest.count", nil) - paymentMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.payment.count", nil) - invalidMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.invalid.count", nil) - handleStatusMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.handlestatus.count", nil) -) - -const ( - Version = 0 - ProtocolLength = uint64(8) - ProtocolMaxMsgSize = 10 * 1024 * 1024 - NetworkId = 3 -) - -// bzz represents the swarm wire protocol -// an instance is running on each peer -type bzz struct { - storage StorageHandler // handler storage/retrieval related requests coming via the bzz wire protocol - hive *Hive // the logistic manager, peerPool, routing service and peer handler - dbAccess *DbAccess // access to db storage counter and iterator for syncing - requestDb *storage.LDBDatabase // db to persist backlog of deliveries to aid syncing - remoteAddr *peerAddr // remote peers address - peer *p2p.Peer // the p2p peer object - rw p2p.MsgReadWriter // messageReadWriter to send messages to - backend chequebook.Backend - lastActive time.Time - NetworkId uint64 - - swap *swap.Swap // swap instance for the peer connection - swapParams *bzzswap.SwapParams // swap settings both local and remote - swapEnabled bool // flag to enable SWAP (will be set via Caps in handshake) - syncEnabled bool // flag to enable SYNC (will be set via Caps in handshake) - syncer *syncer // syncer instance for the peer connection - syncParams *SyncParams // syncer params - syncState *syncState // outgoing syncronisation state (contains reference to remote peers db counter) -} - -// interface type for handler of storage/retrieval related requests coming -// via the bzz wire protocol -// messages: UnsyncedKeys, DeliveryRequest, StoreRequest, RetrieveRequest -type StorageHandler interface { - HandleUnsyncedKeysMsg(req *unsyncedKeysMsgData, p *peer) error - HandleDeliveryRequestMsg(req *deliveryRequestMsgData, p *peer) error - HandleStoreRequestMsg(req *storeRequestMsgData, p *peer) - HandleRetrieveRequestMsg(req *retrieveRequestMsgData, p *peer) -} - -/* -main entrypoint, wrappers starting a server that will run the bzz protocol -use this constructor to attach the protocol ("class") to server caps -This is done by node.Node#Register(func(node.ServiceContext) (Service, error)) -Service implements Protocols() which is an array of protocol constructors -at node startup the protocols are initialised -the Dev p2p layer then calls Run(p *p2p.Peer, rw p2p.MsgReadWriter) error -on each peer connection -The Run function of the Bzz protocol class creates a bzz instance -which will represent the peer for the swarm hive and all peer-aware components -*/ -func Bzz(cloud StorageHandler, backend chequebook.Backend, hive *Hive, dbaccess *DbAccess, sp *bzzswap.SwapParams, sy *SyncParams, networkId uint64) (p2p.Protocol, error) { - - // a single global request db is created for all peer connections - // this is to persist delivery backlog and aid syncronisation - requestDb, err := storage.NewLDBDatabase(sy.RequestDbPath) - if err != nil { - return p2p.Protocol{}, fmt.Errorf("error setting up request db: %v", err) - } - if networkId == 0 { - networkId = NetworkId - } - return p2p.Protocol{ - Name: "bzz", - Version: Version, - Length: ProtocolLength, - Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { - return run(requestDb, cloud, backend, hive, dbaccess, sp, sy, networkId, p, rw) - }, - }, nil -} - -/* -the main protocol loop that - * does the handshake by exchanging statusMsg - * if peer is valid and accepted, registers with the hive - * then enters into a forever loop handling incoming messages - * storage and retrieval related queries coming via bzz are dispatched to StorageHandler - * peer-related messages are dispatched to the hive - * payment related messages are relayed to SWAP service - * on disconnect, unregister the peer in the hive (note RemovePeer in the post-disconnect hook) - * whenever the loop terminates, the peer will disconnect with Subprotocol error - * whenever handlers return an error the loop terminates -*/ -func run(requestDb *storage.LDBDatabase, depo StorageHandler, backend chequebook.Backend, hive *Hive, dbaccess *DbAccess, sp *bzzswap.SwapParams, sy *SyncParams, networkId uint64, p *p2p.Peer, rw p2p.MsgReadWriter) (err error) { - - self := &bzz{ - storage: depo, - backend: backend, - hive: hive, - dbAccess: dbaccess, - requestDb: requestDb, - peer: p, - rw: rw, - swapParams: sp, - syncParams: sy, - swapEnabled: hive.swapEnabled, - syncEnabled: true, - NetworkId: networkId, - } - - // handle handshake - err = self.handleStatus() - if err != nil { - return err - } - defer func() { - // if the handler loop exits, the peer is disconnecting - // deregister the peer in the hive - self.hive.removePeer(&peer{bzz: self}) - if self.syncer != nil { - self.syncer.stop() // quits request db and delivery loops, save requests - } - if self.swap != nil { - self.swap.Stop() // quits chequebox autocash etc - } - }() - - // the main forever loop that handles incoming requests - for { - if self.hive.blockRead { - log.Warn(fmt.Sprintf("Cannot read network")) - time.Sleep(100 * time.Millisecond) - continue - } - err = self.handle() - if err != nil { - return - } - } -} - -// TODO: may need to implement protocol drop only? don't want to kick off the peer -// if they are useful for other protocols -func (self *bzz) Drop() { - self.peer.Disconnect(p2p.DiscSubprotocolError) -} - -// one cycle of the main forever loop that handles and dispatches incoming messages -func (self *bzz) handle() error { - msg, err := self.rw.ReadMsg() - log.Debug(fmt.Sprintf("<- %v", msg)) - if err != nil { - return err - } - if msg.Size > ProtocolMaxMsgSize { - return fmt.Errorf("message too long: %v > %v", msg.Size, ProtocolMaxMsgSize) - } - // make sure that the payload has been fully consumed - defer msg.Discard() - - switch msg.Code { - - case statusMsg: - // no extra status message allowed. The one needed already handled by - // handleStatus - log.Debug(fmt.Sprintf("Status message: %v", msg)) - return errors.New("extra status message") - - case storeRequestMsg: - // store requests are dispatched to netStore - storeRequestMsgCounter.Inc(1) - var req storeRequestMsgData - if err := msg.Decode(&req); err != nil { - return fmt.Errorf("<- %v: %v", msg, err) - } - if n := len(req.SData); n < 9 { - return fmt.Errorf("<- %v: Data too short (%v)", msg, n) - } - // last Active time is set only when receiving chunks - self.lastActive = time.Now() - log.Trace(fmt.Sprintf("incoming store request: %s", req.String())) - // swap accounting is done within forwarding - self.storage.HandleStoreRequestMsg(&req, &peer{bzz: self}) - - case retrieveRequestMsg: - // retrieve Requests are dispatched to netStore - retrieveRequestMsgCounter.Inc(1) - var req retrieveRequestMsgData - if err := msg.Decode(&req); err != nil { - return fmt.Errorf("<- %v: %v", msg, err) - } - req.from = &peer{bzz: self} - // if request is lookup and not to be delivered - if req.isLookup() { - log.Trace(fmt.Sprintf("self lookup for %v: responding with peers only...", req.from)) - } else if req.Key == nil { - return fmt.Errorf("protocol handler: req.Key == nil || req.Timeout == nil") - } else { - // swap accounting is done within netStore - self.storage.HandleRetrieveRequestMsg(&req, &peer{bzz: self}) - } - // direct response with peers, TODO: sort this out - self.hive.peers(&req) - - case peersMsg: - // response to lookups and immediate response to retrieve requests - // dispatches new peer data to the hive that adds them to KADDB - peersMsgCounter.Inc(1) - var req peersMsgData - if err := msg.Decode(&req); err != nil { - return fmt.Errorf("<- %v: %v", msg, err) - } - req.from = &peer{bzz: self} - log.Trace(fmt.Sprintf("<- peer addresses: %v", req)) - self.hive.HandlePeersMsg(&req, &peer{bzz: self}) - - case syncRequestMsg: - syncRequestMsgCounter.Inc(1) - var req syncRequestMsgData - if err := msg.Decode(&req); err != nil { - return fmt.Errorf("<- %v: %v", msg, err) - } - log.Debug(fmt.Sprintf("<- sync request: %v", req)) - self.lastActive = time.Now() - self.sync(req.SyncState) - - case unsyncedKeysMsg: - // coming from parent node offering - unsyncedKeysMsgCounter.Inc(1) - var req unsyncedKeysMsgData - if err := msg.Decode(&req); err != nil { - return fmt.Errorf("<- %v: %v", msg, err) - } - log.Debug(fmt.Sprintf("<- unsynced keys : %s", req.String())) - err := self.storage.HandleUnsyncedKeysMsg(&req, &peer{bzz: self}) - self.lastActive = time.Now() - if err != nil { - return fmt.Errorf("<- %v: %v", msg, err) - } - - case deliveryRequestMsg: - // response to syncKeysMsg hashes filtered not existing in db - // also relays the last synced state to the source - deliverRequestMsgCounter.Inc(1) - var req deliveryRequestMsgData - if err := msg.Decode(&req); err != nil { - return fmt.Errorf("<-msg %v: %v", msg, err) - } - log.Debug(fmt.Sprintf("<- delivery request: %s", req.String())) - err := self.storage.HandleDeliveryRequestMsg(&req, &peer{bzz: self}) - self.lastActive = time.Now() - if err != nil { - return fmt.Errorf("<- %v: %v", msg, err) - } - - case paymentMsg: - // swap protocol message for payment, Units paid for, Cheque paid with - paymentMsgCounter.Inc(1) - if self.swapEnabled { - var req paymentMsgData - if err := msg.Decode(&req); err != nil { - return fmt.Errorf("<- %v: %v", msg, err) - } - log.Debug(fmt.Sprintf("<- payment: %s", req.String())) - self.swap.Receive(int(req.Units), req.Promise) - } - - default: - // no other message is allowed - invalidMsgCounter.Inc(1) - return fmt.Errorf("invalid message code: %v", msg.Code) - } - return nil -} - -func (self *bzz) handleStatus() (err error) { - - handshake := &statusMsgData{ - Version: uint64(Version), - ID: "honey", - Addr: self.selfAddr(), - NetworkId: self.NetworkId, - Swap: &bzzswap.SwapProfile{ - Profile: self.swapParams.Profile, - PayProfile: self.swapParams.PayProfile, - }, - } - - err = p2p.Send(self.rw, statusMsg, handshake) - if err != nil { - return err - } - - // read and handle remote status - var msg p2p.Msg - msg, err = self.rw.ReadMsg() - if err != nil { - return err - } - - if msg.Code != statusMsg { - return fmt.Errorf("first msg has code %x (!= %x)", msg.Code, statusMsg) - } - - handleStatusMsgCounter.Inc(1) - - if msg.Size > ProtocolMaxMsgSize { - return fmt.Errorf("message too long: %v > %v", msg.Size, ProtocolMaxMsgSize) - } - - var status statusMsgData - if err := msg.Decode(&status); err != nil { - return fmt.Errorf("<- %v: %v", msg, err) - } - - if status.NetworkId != self.NetworkId { - return fmt.Errorf("network id mismatch: %d (!= %d)", status.NetworkId, self.NetworkId) - } - - if Version != status.Version { - return fmt.Errorf("protocol version mismatch: %d (!= %d)", status.Version, Version) - } - - self.remoteAddr = self.peerAddr(status.Addr) - log.Trace(fmt.Sprintf("self: advertised IP: %v, peer advertised: %v, local address: %v\npeer: advertised IP: %v, remote address: %v\n", self.selfAddr(), self.remoteAddr, self.peer.LocalAddr(), status.Addr.IP, self.peer.RemoteAddr())) - - if self.swapEnabled { - // set remote profile for accounting - self.swap, err = bzzswap.NewSwap(self.swapParams, status.Swap, self.backend, self) - if err != nil { - return err - } - } - - log.Info(fmt.Sprintf("Peer %08x is capable (%d/%d)", self.remoteAddr.Addr[:4], status.Version, status.NetworkId)) - err = self.hive.addPeer(&peer{bzz: self}) - if err != nil { - return err - } - - // hive sets syncstate so sync should start after node added - log.Info(fmt.Sprintf("syncronisation request sent with %v", self.syncState)) - self.syncRequest() - - return nil -} - -func (self *bzz) sync(state *syncState) error { - // syncer setup - if self.syncer != nil { - return errors.New("sync request can only be sent once") - } - - cnt := self.dbAccess.counter() - remoteaddr := self.remoteAddr.Addr - start, stop := self.hive.kad.KeyRange(remoteaddr) - - // an explicitly received nil syncstate disables syncronisation - if state == nil { - self.syncEnabled = false - log.Warn(fmt.Sprintf("syncronisation disabled for peer %v", self)) - state = &syncState{DbSyncState: &storage.DbSyncState{}, Synced: true} - } else { - state.synced = make(chan bool) - state.SessionAt = cnt - if storage.IsZeroKey(state.Stop) && state.Synced { - state.Start = storage.Key(start[:]) - state.Stop = storage.Key(stop[:]) - } - log.Debug(fmt.Sprintf("syncronisation requested by peer %v at state %v", self, state)) - } - var err error - self.syncer, err = newSyncer( - self.requestDb, - storage.Key(remoteaddr[:]), - self.dbAccess, - self.unsyncedKeys, self.store, - self.syncParams, state, func() bool { return self.syncEnabled }, - ) - if err != nil { - return nil - } - log.Trace(fmt.Sprintf("syncer set for peer %v", self)) - return nil -} - -func (self *bzz) String() string { - return self.remoteAddr.String() -} - -// repair reported address if IP missing -func (self *bzz) peerAddr(base *peerAddr) *peerAddr { - if base.IP.IsUnspecified() { - host, _, _ := net.SplitHostPort(self.peer.RemoteAddr().String()) - base.IP = net.ParseIP(host) - } - return base -} - -// returns self advertised node connection info (listening address w enodes) -// IP will get repaired on the other end if missing -// or resolved via ID by discovery at dialout -func (self *bzz) selfAddr() *peerAddr { - id := self.hive.id - host, port, _ := net.SplitHostPort(self.hive.listenAddr()) - intport, _ := strconv.Atoi(port) - addr := &peerAddr{ - Addr: self.hive.addr, - ID: id[:], - IP: net.ParseIP(host), - Port: uint16(intport), - } - return addr -} - -// outgoing messages -// send retrieveRequestMsg -func (self *bzz) retrieve(req *retrieveRequestMsgData) error { - return self.send(retrieveRequestMsg, req) -} - -// send storeRequestMsg -func (self *bzz) store(req *storeRequestMsgData) error { - return self.send(storeRequestMsg, req) -} - -func (self *bzz) syncRequest() error { - req := &syncRequestMsgData{} - if self.hive.syncEnabled { - log.Debug(fmt.Sprintf("syncronisation request to peer %v at state %v", self, self.syncState)) - req.SyncState = self.syncState - } - if self.syncState == nil { - log.Warn(fmt.Sprintf("syncronisation disabled for peer %v at state %v", self, self.syncState)) - } - return self.send(syncRequestMsg, req) -} - -// queue storeRequestMsg in request db -func (self *bzz) deliveryRequest(reqs []*syncRequest) error { - req := &deliveryRequestMsgData{ - Deliver: reqs, - } - return self.send(deliveryRequestMsg, req) -} - -// batch of syncRequests to send off -func (self *bzz) unsyncedKeys(reqs []*syncRequest, state *syncState) error { - req := &unsyncedKeysMsgData{ - Unsynced: reqs, - State: state, - } - return self.send(unsyncedKeysMsg, req) -} - -// send paymentMsg -func (self *bzz) Pay(units int, promise swap.Promise) { - req := &paymentMsgData{uint(units), promise.(*chequebook.Cheque)} - self.payment(req) -} - -// send paymentMsg -func (self *bzz) payment(req *paymentMsgData) error { - return self.send(paymentMsg, req) -} - -// sends peersMsg -func (self *bzz) peers(req *peersMsgData) error { - return self.send(peersMsg, req) -} - -func (self *bzz) send(msg uint64, data interface{}) error { - if self.hive.blockWrite { - return fmt.Errorf("network write blocked") - } - log.Trace(fmt.Sprintf("-> %v: %v (%T) to %v", msg, data, data, self)) - err := p2p.Send(self.rw, msg, data) - if err != nil { - self.Drop() - } - return err -} diff --git a/swarm/network/protocol_test.go b/swarm/network/protocol_test.go deleted file mode 100644 index 988d0ac923c9..000000000000 --- a/swarm/network/protocol_test.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package network diff --git a/swarm/network/syncdb.go b/swarm/network/syncdb.go deleted file mode 100644 index d6ccb40e31b9..000000000000 --- a/swarm/network/syncdb.go +++ /dev/null @@ -1,389 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package network - -import ( - "encoding/binary" - "fmt" - - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/swarm/storage" - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/iterator" -) - -const counterKeyPrefix = 0x01 - -/* -syncDb is a queueing service for outgoing deliveries. -One instance per priority queue for each peer - -a syncDb instance maintains an in-memory buffer (of capacity bufferSize) -once its in-memory buffer is full it switches to persisting in db -and dbRead iterator iterates through the items keeping their order -once the db read catches up (there is no more items in the db) then -it switches back to in-memory buffer. - -when syncdb is stopped all items in the buffer are saved to the db -*/ -type syncDb struct { - start []byte // this syncdb starting index in requestdb - key storage.Key // remote peers address key - counterKey []byte // db key to persist counter - priority uint // priotity High|Medium|Low - buffer chan interface{} // incoming request channel - db *storage.LDBDatabase // underlying db (TODO should be interface) - done chan bool // chan to signal goroutines finished quitting - quit chan bool // chan to signal quitting to goroutines - total, dbTotal int // counts for one session - batch chan chan int // channel for batch requests - dbBatchSize uint // number of items before batch is saved -} - -// constructor needs a shared request db (leveldb) -// priority is used in the index key -// uses a buffer and a leveldb for persistent storage -// bufferSize, dbBatchSize are config parameters -func newSyncDb(db *storage.LDBDatabase, key storage.Key, priority uint, bufferSize, dbBatchSize uint, deliver func(interface{}, chan bool) bool) *syncDb { - start := make([]byte, 42) - start[1] = byte(priorities - priority) - copy(start[2:34], key) - - counterKey := make([]byte, 34) - counterKey[0] = counterKeyPrefix - copy(counterKey[1:], start[1:34]) - - syncdb := &syncDb{ - start: start, - key: key, - counterKey: counterKey, - priority: priority, - buffer: make(chan interface{}, bufferSize), - db: db, - done: make(chan bool), - quit: make(chan bool), - batch: make(chan chan int), - dbBatchSize: dbBatchSize, - } - log.Trace(fmt.Sprintf("syncDb[peer: %v, priority: %v] - initialised", key.Log(), priority)) - - // starts the main forever loop reading from buffer - go syncdb.bufferRead(deliver) - return syncdb -} - -/* -bufferRead is a forever iterator loop that takes care of delivering -outgoing store requests reads from incoming buffer - -its argument is the deliver function taking the item as first argument -and a quit channel as second. -Closing of this channel is supposed to abort all waiting for delivery -(typically network write) - -The iteration switches between 2 modes, -* buffer mode reads the in-memory buffer and delivers the items directly -* db mode reads from the buffer and writes to the db, parallelly another -routine is started that reads from the db and delivers items - -If there is buffer contention in buffer mode (slow network, high upload volume) -syncdb switches to db mode and starts dbRead -Once db backlog is delivered, it reverts back to in-memory buffer - -It is automatically started when syncdb is initialised. - -It saves the buffer to db upon receiving quit signal. syncDb#stop() -*/ -func (self *syncDb) bufferRead(deliver func(interface{}, chan bool) bool) { - var buffer, db chan interface{} // channels representing the two read modes - var more bool - var req interface{} - var entry *syncDbEntry - var inBatch, inDb int - batch := new(leveldb.Batch) - var dbSize chan int - quit := self.quit - counterValue := make([]byte, 8) - - // counter is used for keeping the items in order, persisted to db - // start counter where db was at, 0 if not found - data, err := self.db.Get(self.counterKey) - var counter uint64 - if err == nil { - counter = binary.BigEndian.Uint64(data) - log.Trace(fmt.Sprintf("syncDb[%v/%v] - counter read from db at %v", self.key.Log(), self.priority, counter)) - } else { - log.Trace(fmt.Sprintf("syncDb[%v/%v] - counter starts at %v", self.key.Log(), self.priority, counter)) - } - -LOOP: - for { - // waiting for item next in the buffer, or quit signal or batch request - select { - // buffer only closes when writing to db - case req = <-buffer: - // deliver request : this is blocking on network write so - // it is passed the quit channel as argument, so that it returns - // if syncdb is stopped. In this case we need to save the item to the db - more = deliver(req, self.quit) - if !more { - log.Debug(fmt.Sprintf("syncDb[%v/%v] quit: switching to db. session tally (db/total): %v/%v", self.key.Log(), self.priority, self.dbTotal, self.total)) - // received quit signal, save request currently waiting delivery - // by switching to db mode and closing the buffer - buffer = nil - db = self.buffer - close(db) - quit = nil // needs to block the quit case in select - break // break from select, this item will be written to the db - } - self.total++ - log.Trace(fmt.Sprintf("syncDb[%v/%v] deliver (db/total): %v/%v", self.key.Log(), self.priority, self.dbTotal, self.total)) - // by the time deliver returns, there were new writes to the buffer - // if buffer contention is detected, switch to db mode which drains - // the buffer so no process will block on pushing store requests - if len(buffer) == cap(buffer) { - log.Debug(fmt.Sprintf("syncDb[%v/%v] buffer full %v: switching to db. session tally (db/total): %v/%v", self.key.Log(), self.priority, cap(buffer), self.dbTotal, self.total)) - buffer = nil - db = self.buffer - } - continue LOOP - - // incoming entry to put into db - case req, more = <-db: - if !more { - // only if quit is called, saved all the buffer - binary.BigEndian.PutUint64(counterValue, counter) - batch.Put(self.counterKey, counterValue) // persist counter in batch - self.writeSyncBatch(batch) // save batch - log.Trace(fmt.Sprintf("syncDb[%v/%v] quitting: save current batch to db", self.key.Log(), self.priority)) - break LOOP - } - self.dbTotal++ - self.total++ - // otherwise break after select - case dbSize = <-self.batch: - // explicit request for batch - if inBatch == 0 && quit != nil { - // there was no writes since the last batch so db depleted - // switch to buffer mode - log.Debug(fmt.Sprintf("syncDb[%v/%v] empty db: switching to buffer", self.key.Log(), self.priority)) - db = nil - buffer = self.buffer - dbSize <- 0 // indicates to 'caller' that batch has been written - inDb = 0 - continue LOOP - } - binary.BigEndian.PutUint64(counterValue, counter) - batch.Put(self.counterKey, counterValue) - log.Debug(fmt.Sprintf("syncDb[%v/%v] write batch %v/%v - %x - %x", self.key.Log(), self.priority, inBatch, counter, self.counterKey, counterValue)) - batch = self.writeSyncBatch(batch) - dbSize <- inBatch // indicates to 'caller' that batch has been written - inBatch = 0 - continue LOOP - - // closing syncDb#quit channel is used to signal to all goroutines to quit - case <-quit: - // need to save backlog, so switch to db mode - db = self.buffer - buffer = nil - quit = nil - log.Trace(fmt.Sprintf("syncDb[%v/%v] quitting: save buffer to db", self.key.Log(), self.priority)) - close(db) - continue LOOP - } - - // only get here if we put req into db - entry, err = self.newSyncDbEntry(req, counter) - if err != nil { - log.Warn(fmt.Sprintf("syncDb[%v/%v] saving request %v (#%v/%v) failed: %v", self.key.Log(), self.priority, req, inBatch, inDb, err)) - continue LOOP - } - batch.Put(entry.key, entry.val) - log.Trace(fmt.Sprintf("syncDb[%v/%v] to batch %v '%v' (#%v/%v/%v)", self.key.Log(), self.priority, req, entry, inBatch, inDb, counter)) - // if just switched to db mode and not quitting, then launch dbRead - // in a parallel go routine to send deliveries from db - if inDb == 0 && quit != nil { - log.Trace(fmt.Sprintf("syncDb[%v/%v] start dbRead", self.key.Log(), self.priority)) - go self.dbRead(true, counter, deliver) - } - inDb++ - inBatch++ - counter++ - // need to save the batch if it gets too large (== dbBatchSize) - if inBatch%int(self.dbBatchSize) == 0 { - batch = self.writeSyncBatch(batch) - } - } - log.Info(fmt.Sprintf("syncDb[%v:%v]: saved %v keys (saved counter at %v)", self.key.Log(), self.priority, inBatch, counter)) - close(self.done) -} - -// writes the batch to the db and returns a new batch object -func (self *syncDb) writeSyncBatch(batch *leveldb.Batch) *leveldb.Batch { - err := self.db.Write(batch) - if err != nil { - log.Warn(fmt.Sprintf("syncDb[%v/%v] saving batch to db failed: %v", self.key.Log(), self.priority, err)) - return batch - } - return new(leveldb.Batch) -} - -// abstract type for db entries (TODO could be a feature of Receipts) -type syncDbEntry struct { - key, val []byte -} - -func (self syncDbEntry) String() string { - return fmt.Sprintf("key: %x, value: %x", self.key, self.val) -} - -/* - dbRead is iterating over store requests to be sent over to the peer - this is mainly to prevent crashes due to network output buffer contention (???) - as well as to make syncronisation resilient to disconnects - the messages are supposed to be sent in the p2p priority queue. - - the request DB is shared between peers, but domains for each syncdb - are disjoint. dbkeys (42 bytes) are structured: - * 0: 0x00 (0x01 reserved for counter key) - * 1: priorities - priority (so that high priority can be replayed first) - * 2-33: peers address - * 34-41: syncdb counter to preserve order (this field is missing for the counter key) - - values (40 bytes) are: - * 0-31: key - * 32-39: request id - -dbRead needs a boolean to indicate if on first round all the historical -record is synced. Second argument to indicate current db counter -The third is the function to apply -*/ -func (self *syncDb) dbRead(useBatches bool, counter uint64, fun func(interface{}, chan bool) bool) { - key := make([]byte, 42) - copy(key, self.start) - binary.BigEndian.PutUint64(key[34:], counter) - var batches, n, cnt, total int - var more bool - var entry *syncDbEntry - var it iterator.Iterator - var del *leveldb.Batch - batchSizes := make(chan int) - - for { - // if useBatches is false, cnt is not set - if useBatches { - // this could be called before all cnt items sent out - // so that loop is not blocking while delivering - // only relevant if cnt is large - select { - case self.batch <- batchSizes: - case <-self.quit: - return - } - // wait for the write to finish and get the item count in the next batch - cnt = <-batchSizes - batches++ - if cnt == 0 { - // empty - return - } - } - it = self.db.NewIterator() - it.Seek(key) - if !it.Valid() { - copy(key, self.start) - useBatches = true - continue - } - del = new(leveldb.Batch) - log.Trace(fmt.Sprintf("syncDb[%v/%v]: new iterator: %x (batch %v, count %v)", self.key.Log(), self.priority, key, batches, cnt)) - - for n = 0; !useBatches || n < cnt; it.Next() { - copy(key, it.Key()) - if len(key) == 0 || key[0] != 0 { - copy(key, self.start) - useBatches = true - break - } - val := make([]byte, 40) - copy(val, it.Value()) - entry = &syncDbEntry{key, val} - // log.Trace(fmt.Sprintf("syncDb[%v/%v] - %v, batches: %v, total: %v, session total from db: %v/%v", self.key.Log(), self.priority, self.key.Log(), batches, total, self.dbTotal, self.total)) - more = fun(entry, self.quit) - if !more { - // quit received when waiting to deliver entry, the entry will not be deleted - log.Trace(fmt.Sprintf("syncDb[%v/%v] batch %v quit after %v/%v items", self.key.Log(), self.priority, batches, n, cnt)) - break - } - // since subsequent batches of the same db session are indexed incrementally - // deleting earlier batches can be delayed and parallelised - // this could be batch delete when db is idle (but added complexity esp when quitting) - del.Delete(key) - n++ - total++ - } - log.Debug(fmt.Sprintf("syncDb[%v/%v] - db session closed, batches: %v, total: %v, session total from db: %v/%v", self.key.Log(), self.priority, batches, total, self.dbTotal, self.total)) - self.db.Write(del) // this could be async called only when db is idle - it.Release() - } -} - -// -func (self *syncDb) stop() { - close(self.quit) - <-self.done -} - -// calculate a dbkey for the request, for the db to work -// see syncdb for db key structure -// polimorphic: accepted types, see syncer#addRequest -func (self *syncDb) newSyncDbEntry(req interface{}, counter uint64) (entry *syncDbEntry, err error) { - var key storage.Key - var chunk *storage.Chunk - var id uint64 - var ok bool - var sreq *storeRequestMsgData - - if key, ok = req.(storage.Key); ok { - id = generateId() - } else if chunk, ok = req.(*storage.Chunk); ok { - key = chunk.Key - id = generateId() - } else if sreq, ok = req.(*storeRequestMsgData); ok { - key = sreq.Key - id = sreq.Id - } else if entry, ok = req.(*syncDbEntry); !ok { - return nil, fmt.Errorf("type not allowed: %v (%T)", req, req) - } - - // order by peer > priority > seqid - // value is request id if exists - if entry == nil { - dbkey := make([]byte, 42) - dbval := make([]byte, 40) - - // encode key - copy(dbkey[:], self.start[:34]) // db peer - binary.BigEndian.PutUint64(dbkey[34:], counter) - // encode value - copy(dbval, key[:]) - binary.BigEndian.PutUint64(dbval[32:], id) - - entry = &syncDbEntry{dbkey, dbval} - } - return -} diff --git a/swarm/network/syncdb_test.go b/swarm/network/syncdb_test.go deleted file mode 100644 index 44588c002c92..000000000000 --- a/swarm/network/syncdb_test.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package network - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "testing" - "time" - - "github.com/XinFinOrg/XDPoSChain/crypto" - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/swarm/storage" -) - -func init() { - log.Root().SetHandler(log.LvlFilterHandler(log.LvlCrit, log.StreamHandler(os.Stderr, log.TerminalFormat(false)))) -} - -type testSyncDb struct { - *syncDb - c int - t *testing.T - fromDb chan bool - delivered [][]byte - sent []int - dbdir string - at int -} - -func newTestSyncDb(priority, bufferSize, batchSize int, dbdir string, t *testing.T) *testSyncDb { - if len(dbdir) == 0 { - tmp, err := os.MkdirTemp(os.TempDir(), "syncdb-test") - if err != nil { - t.Fatalf("unable to create temporary direcory %v: %v", tmp, err) - } - dbdir = tmp - } - db, err := storage.NewLDBDatabase(filepath.Join(dbdir, "requestdb")) - if err != nil { - t.Fatalf("unable to create db: %v", err) - } - self := &testSyncDb{ - fromDb: make(chan bool), - dbdir: dbdir, - t: t, - } - h := crypto.Keccak256Hash([]byte{0}) - key := storage.Key(h[:]) - self.syncDb = newSyncDb(db, key, uint(priority), uint(bufferSize), uint(batchSize), self.deliver) - // kick off db iterator right away, if no items on db this will allow - // reading from the buffer - return self - -} - -func (self *testSyncDb) close() { - self.db.Close() - os.RemoveAll(self.dbdir) -} - -func (self *testSyncDb) push(n int) { - for i := 0; i < n; i++ { - self.buffer <- storage.Key(crypto.Keccak256([]byte{byte(self.c)})) - self.sent = append(self.sent, self.c) - self.c++ - } - log.Debug(fmt.Sprintf("pushed %v requests", n)) -} - -func (self *testSyncDb) draindb() { - it := self.db.NewIterator() - defer it.Release() - for { - it.Seek(self.start) - if !it.Valid() { - return - } - k := it.Key() - if len(k) == 0 || k[0] == 1 { - return - } - it.Release() - it = self.db.NewIterator() - } -} - -func (self *testSyncDb) deliver(req interface{}, quit chan bool) bool { - _, db := req.(*syncDbEntry) - key, _, _, _, err := parseRequest(req) - if err != nil { - self.t.Fatalf("unexpected error of key %v: %v", key, err) - } - self.delivered = append(self.delivered, key) - select { - case self.fromDb <- db: - return true - case <-quit: - return false - } -} - -func (self *testSyncDb) expect(n int, db bool) { - var ok bool - // for n items - for i := 0; i < n; i++ { - ok = <-self.fromDb - if self.at+1 > len(self.delivered) { - self.t.Fatalf("expected %v, got %v", self.at+1, len(self.delivered)) - } - if len(self.sent) > self.at && !bytes.Equal(crypto.Keccak256([]byte{byte(self.sent[self.at])}), self.delivered[self.at]) { - self.t.Fatalf("expected delivery %v/%v/%v to be hash of %v, from db: %v = %v", i, n, self.at, self.sent[self.at], ok, db) - log.Debug(fmt.Sprintf("%v/%v/%v to be hash of %v, from db: %v = %v", i, n, self.at, self.sent[self.at], ok, db)) - } - if !ok && db { - self.t.Fatalf("expected delivery %v/%v/%v from db", i, n, self.at) - } - if ok && !db { - self.t.Fatalf("expected delivery %v/%v/%v from cache", i, n, self.at) - } - self.at++ - } -} - -func TestSyncDb(t *testing.T) { - t.Skip("fails randomly on all platforms") - - priority := High - bufferSize := 5 - batchSize := 2 * bufferSize - s := newTestSyncDb(priority, bufferSize, batchSize, "", t) - defer s.close() - defer s.stop() - s.dbRead(false, 0, s.deliver) - s.draindb() - - s.push(4) - s.expect(1, false) - // 3 in buffer - time.Sleep(100 * time.Millisecond) - s.push(3) - // push over limit - s.expect(1, false) - // one popped from the buffer, then contention detected - s.expect(4, true) - s.push(4) - s.expect(5, true) - // depleted db, switch back to buffer - s.draindb() - s.push(5) - s.expect(4, false) - s.push(3) - s.expect(4, false) - // buffer depleted - time.Sleep(100 * time.Millisecond) - s.push(6) - s.expect(1, false) - // push into buffer full, switch to db - s.expect(5, true) - s.draindb() - s.push(1) - s.expect(1, false) -} - -func TestSaveSyncDb(t *testing.T) { - amount := 30 - priority := High - bufferSize := amount - batchSize := 10 - s := newTestSyncDb(priority, bufferSize, batchSize, "", t) - go s.dbRead(false, 0, s.deliver) - s.push(amount) - s.stop() - s.db.Close() - - s = newTestSyncDb(priority, bufferSize, batchSize, s.dbdir, t) - go s.dbRead(false, 0, s.deliver) - s.expect(amount, true) - for i, key := range s.delivered { - expKey := crypto.Keccak256([]byte{byte(i)}) - if !bytes.Equal(key, expKey) { - t.Fatalf("delivery %v expected to be key %x, got %x", i, expKey, key) - } - } - s.push(amount) - s.expect(amount, false) - for i := amount; i < 2*amount; i++ { - key := s.delivered[i] - expKey := crypto.Keccak256([]byte{byte(i - amount)}) - if !bytes.Equal(key, expKey) { - t.Fatalf("delivery %v expected to be key %x, got %x", i, expKey, key) - } - } - s.stop() - s.db.Close() - - s = newTestSyncDb(priority, bufferSize, batchSize, s.dbdir, t) - defer s.close() - defer s.stop() - - go s.dbRead(false, 0, s.deliver) - s.push(1) - s.expect(1, false) - -} diff --git a/swarm/network/syncer.go b/swarm/network/syncer.go deleted file mode 100644 index abdb33954be9..000000000000 --- a/swarm/network/syncer.go +++ /dev/null @@ -1,781 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package network - -import ( - "encoding/binary" - "encoding/json" - "fmt" - "path/filepath" - - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/swarm/storage" -) - -// syncer parameters (global, not peer specific) default values -const ( - requestDbBatchSize = 512 // size of batch before written to request db - keyBufferSize = 1024 // size of buffer for unsynced keys - syncBatchSize = 128 // maximum batchsize for outgoing requests - syncBufferSize = 128 // size of buffer for delivery requests - syncCacheSize = 1024 // cache capacity to store request queue in memory -) - -// priorities -const ( - Low = iota // 0 - Medium // 1 - High // 2 - priorities // 3 number of priority levels -) - -// request types -const ( - DeliverReq = iota // 0 - PushReq // 1 - PropagateReq // 2 - HistoryReq // 3 - BacklogReq // 4 -) - -// json serialisable struct to record the syncronisation state between 2 peers -type syncState struct { - *storage.DbSyncState // embeds the following 4 fields: - // Start Key // lower limit of address space - // Stop Key // upper limit of address space - // First uint64 // counter taken from last sync state - // Last uint64 // counter of remote peer dbStore at the time of last connection - SessionAt uint64 // set at the time of connection - LastSeenAt uint64 // set at the time of connection - Latest storage.Key // cursor of dbstore when last (continuously set by syncer) - Synced bool // true iff Sync is done up to the last disconnect - synced chan bool // signal that sync stage finished -} - -// wrapper of db-s to provide mockable custom local chunk store access to syncer -type DbAccess struct { - db *storage.DbStore - loc *storage.LocalStore -} - -func NewDbAccess(loc *storage.LocalStore) *DbAccess { - return &DbAccess{loc.DbStore.(*storage.DbStore), loc} -} - -// to obtain the chunks from key or request db entry only -func (self *DbAccess) get(key storage.Key) (*storage.Chunk, error) { - return self.loc.Get(key) -} - -// current storage counter of chunk db -func (self *DbAccess) counter() uint64 { - return self.db.Counter() -} - -// implemented by dbStoreSyncIterator -type keyIterator interface { - Next() storage.Key -} - -// generator function for iteration by address range and storage counter -func (self *DbAccess) iterator(s *syncState) keyIterator { - it, err := self.db.NewSyncIterator(*(s.DbSyncState)) - if err != nil { - return nil - } - return keyIterator(it) -} - -func (self syncState) String() string { - if self.Synced { - return fmt.Sprintf( - "session started at: %v, last seen at: %v, latest key: %v", - self.SessionAt, self.LastSeenAt, - self.Latest.Log(), - ) - } else { - return fmt.Sprintf( - "address: %v-%v, index: %v-%v, session started at: %v, last seen at: %v, latest key: %v", - self.Start.Log(), self.Stop.Log(), - self.First, self.Last, - self.SessionAt, self.LastSeenAt, - self.Latest.Log(), - ) - } -} - -// syncer parameters (global, not peer specific) -type SyncParams struct { - RequestDbPath string // path for request db (leveldb) - RequestDbBatchSize uint // nuber of items before batch is saved to requestdb - KeyBufferSize uint // size of key buffer - SyncBatchSize uint // maximum batchsize for outgoing requests - SyncBufferSize uint // size of buffer for - SyncCacheSize uint // cache capacity to store request queue in memory - SyncPriorities []uint // list of priority levels for req types 0-3 - SyncModes []bool // list of sync modes for for req types 0-3 -} - -// constructor with default values -func NewDefaultSyncParams() *SyncParams { - return &SyncParams{ - RequestDbBatchSize: requestDbBatchSize, - KeyBufferSize: keyBufferSize, - SyncBufferSize: syncBufferSize, - SyncBatchSize: syncBatchSize, - SyncCacheSize: syncCacheSize, - SyncPriorities: []uint{High, Medium, Medium, Low, Low}, - SyncModes: []bool{true, true, true, true, false}, - } -} - -//this can only finally be set after all config options (file, cmd line, env vars) -//have been evaluated -func (self *SyncParams) Init(path string) { - self.RequestDbPath = filepath.Join(path, "requests") -} - -// syncer is the agent that manages content distribution/storage replication/chunk storeRequest forwarding -type syncer struct { - *SyncParams // sync parameters - syncF func() bool // if syncing is needed - key storage.Key // remote peers address key - state *syncState // sync state for our dbStore - syncStates chan *syncState // different stages of sync - deliveryRequest chan bool // one of two triggers needed to send unsyncedKeys - newUnsyncedKeys chan bool // one of two triggers needed to send unsynced keys - quit chan bool // signal to quit loops - - // DB related fields - dbAccess *DbAccess // access to dbStore - - // native fields - queues [priorities]*syncDb // in-memory cache / queues for sync reqs - keys [priorities]chan interface{} // buffer for unsynced keys - deliveries [priorities]chan *storeRequestMsgData // delivery - - // bzz protocol instance outgoing message callbacks (mockable for testing) - unsyncedKeys func([]*syncRequest, *syncState) error // send unsyncedKeysMsg - store func(*storeRequestMsgData) error // send storeRequestMsg -} - -// a syncer instance is linked to each peer connection -// constructor is called from protocol after successful handshake -// the returned instance is attached to the peer and can be called -// by the forwarder -func newSyncer( - db *storage.LDBDatabase, remotekey storage.Key, - dbAccess *DbAccess, - unsyncedKeys func([]*syncRequest, *syncState) error, - store func(*storeRequestMsgData) error, - params *SyncParams, - state *syncState, - syncF func() bool, -) (*syncer, error) { - - syncBufferSize := params.SyncBufferSize - keyBufferSize := params.KeyBufferSize - dbBatchSize := params.RequestDbBatchSize - - self := &syncer{ - syncF: syncF, - key: remotekey, - dbAccess: dbAccess, - syncStates: make(chan *syncState, 20), - deliveryRequest: make(chan bool, 1), - newUnsyncedKeys: make(chan bool, 1), - SyncParams: params, - state: state, - quit: make(chan bool), - unsyncedKeys: unsyncedKeys, - store: store, - } - - // initialising - for i := 0; i < priorities; i++ { - self.keys[i] = make(chan interface{}, keyBufferSize) - self.deliveries[i] = make(chan *storeRequestMsgData) - // initialise a syncdb instance for each priority queue - self.queues[i] = newSyncDb(db, remotekey, uint(i), syncBufferSize, dbBatchSize, self.deliver(uint(i))) - } - log.Info(fmt.Sprintf("syncer started: %v", state)) - // launch chunk delivery service - go self.syncDeliveries() - // launch sync task manager - if self.syncF() { - go self.sync() - } - // process unsynced keys to broadcast - go self.syncUnsyncedKeys() - - return self, nil -} - -// metadata serialisation -func encodeSync(state *syncState) (*json.RawMessage, error) { - data, err := json.MarshalIndent(state, "", " ") - if err != nil { - return nil, err - } - meta := json.RawMessage(data) - return &meta, nil -} - -func decodeSync(meta *json.RawMessage) (*syncState, error) { - if meta == nil { - return nil, fmt.Errorf("unable to deserialise sync state from ") - } - data := []byte(*(meta)) - if len(data) == 0 { - return nil, fmt.Errorf("unable to deserialise sync state from ") - } - state := &syncState{DbSyncState: &storage.DbSyncState{}} - err := json.Unmarshal(data, state) - return state, err -} - -/* - sync implements the syncing script - * first all items left in the request Db are replayed - * type = StaleSync - * Mode: by default once again via confirmation roundtrip - * Priority: the items are replayed as the proirity specified for StaleSync - * but within the order respects earlier priority level of request - * after all items are consumed for a priority level, the the respective - queue for delivery requests is open (this way new reqs not written to db) - (TODO: this should be checked) - * the sync state provided by the remote peer is used to sync history - * all the backlog from earlier (aborted) syncing is completed starting from latest - * if Last < LastSeenAt then all items in between then process all - backlog from upto last disconnect - * if Last > 0 && - - sync is called from the syncer constructor and is not supposed to be used externally -*/ -func (self *syncer) sync() { - state := self.state - // sync finished - defer close(self.syncStates) - - // 0. first replay stale requests from request db - if state.SessionAt == 0 { - log.Debug(fmt.Sprintf("syncer[%v]: nothing to sync", self.key.Log())) - return - } - log.Debug(fmt.Sprintf("syncer[%v]: start replaying stale requests from request db", self.key.Log())) - for p := priorities - 1; p >= 0; p-- { - self.queues[p].dbRead(false, 0, self.replay()) - } - log.Debug(fmt.Sprintf("syncer[%v]: done replaying stale requests from request db", self.key.Log())) - - // unless peer is synced sync unfinished history beginning on - if !state.Synced { - start := state.Start - - if !storage.IsZeroKey(state.Latest) { - // 1. there is unfinished earlier sync - state.Start = state.Latest - log.Debug(fmt.Sprintf("syncer[%v]: start syncronising backlog (unfinished sync: %v)", self.key.Log(), state)) - // blocks while the entire history upto state is synced - self.syncState(state) - if state.Last < state.SessionAt { - state.First = state.Last + 1 - } - } - state.Latest = storage.ZeroKey - state.Start = start - // 2. sync up to last disconnect1 - if state.First < state.LastSeenAt { - state.Last = state.LastSeenAt - log.Debug(fmt.Sprintf("syncer[%v]: start syncronising history upto last disconnect at %v: %v", self.key.Log(), state.LastSeenAt, state)) - self.syncState(state) - state.First = state.LastSeenAt - } - state.Latest = storage.ZeroKey - - } else { - // synchronisation starts at end of last session - state.First = state.LastSeenAt - } - - // 3. sync up to current session start - // if there have been new chunks since last session - if state.LastSeenAt < state.SessionAt { - state.Last = state.SessionAt - log.Debug(fmt.Sprintf("syncer[%v]: start syncronising history since last disconnect at %v up until session start at %v: %v", self.key.Log(), state.LastSeenAt, state.SessionAt, state)) - // blocks until state syncing is finished - self.syncState(state) - } - log.Info(fmt.Sprintf("syncer[%v]: syncing all history complete", self.key.Log())) - -} - -// wait till syncronised block uptil state is synced -func (self *syncer) syncState(state *syncState) { - self.syncStates <- state - select { - case <-state.synced: - case <-self.quit: - } -} - -// stop quits both request processor and saves the request cache to disk -func (self *syncer) stop() { - close(self.quit) - log.Trace(fmt.Sprintf("syncer[%v]: stop and save sync request db backlog", self.key.Log())) - for _, db := range self.queues { - db.stop() - } -} - -// rlp serialisable sync request -type syncRequest struct { - Key storage.Key - Priority uint -} - -func (self *syncRequest) String() string { - return fmt.Sprintf("", self.Key.Log(), self.Priority) -} - -func (self *syncer) newSyncRequest(req interface{}, p int) (*syncRequest, error) { - key, _, _, _, err := parseRequest(req) - // TODO: if req has chunk, it should be put in a cache - // create - if err != nil { - return nil, err - } - return &syncRequest{key, uint(p)}, nil -} - -// serves historical items from the DB -// * read is on demand, blocking unless history channel is read -// * accepts sync requests (syncStates) to create new db iterator -// * closes the channel one iteration finishes -func (self *syncer) syncHistory(state *syncState) chan interface{} { - var n uint - history := make(chan interface{}) - log.Debug(fmt.Sprintf("syncer[%v]: syncing history between %v - %v for chunk addresses %v - %v", self.key.Log(), state.First, state.Last, state.Start, state.Stop)) - it := self.dbAccess.iterator(state) - if it != nil { - go func() { - // signal end of the iteration ended - defer close(history) - IT: - for { - key := it.Next() - if key == nil { - break IT - } - select { - // blocking until history channel is read from - case history <- key: - n++ - log.Trace(fmt.Sprintf("syncer[%v]: history: %v (%v keys)", self.key.Log(), key.Log(), n)) - state.Latest = key - case <-self.quit: - return - } - } - log.Debug(fmt.Sprintf("syncer[%v]: finished syncing history between %v - %v for chunk addresses %v - %v (at %v) (chunks = %v)", self.key.Log(), state.First, state.Last, state.Start, state.Stop, state.Latest, n)) - }() - } - return history -} - -// triggers key syncronisation -func (self *syncer) sendUnsyncedKeys() { - select { - case self.deliveryRequest <- true: - default: - } -} - -// assembles a new batch of unsynced keys -// * keys are drawn from the key buffers in order of priority queue -// * if the queues of priority for History (HistoryReq) or higher are depleted, -// historical data is used so historical items are lower priority within -// their priority group. -// * Order of historical data is unspecified -func (self *syncer) syncUnsyncedKeys() { - // send out new - var unsynced []*syncRequest - var more, justSynced bool - var keyCount, historyCnt int - var history chan interface{} - - priority := High - keys := self.keys[priority] - var newUnsyncedKeys, deliveryRequest chan bool - keyCounts := make([]int, priorities) - histPrior := self.SyncPriorities[HistoryReq] - syncStates := self.syncStates - state := self.state - -LOOP: - for { - - var req interface{} - // select the highest priority channel to read from - // keys channels are buffered so the highest priority ones - // are checked first - integrity can only be guaranteed if writing - // is locked while selecting - if priority != High || len(keys) == 0 { - // selection is not needed if the High priority queue has items - keys = nil - PRIORITIES: - for priority = High; priority >= 0; priority-- { - // the first priority channel that is non-empty will be assigned to keys - if len(self.keys[priority]) > 0 { - log.Trace(fmt.Sprintf("syncer[%v]: reading request with priority %v", self.key.Log(), priority)) - keys = self.keys[priority] - break PRIORITIES - } - log.Trace(fmt.Sprintf("syncer[%v/%v]: queue: [%v, %v, %v]", self.key.Log(), priority, len(self.keys[High]), len(self.keys[Medium]), len(self.keys[Low]))) - // if the input queue is empty on this level, resort to history if there is any - if uint(priority) == histPrior && history != nil { - log.Trace(fmt.Sprintf("syncer[%v]: reading history for %v", self.key.Log(), self.key)) - keys = history - break PRIORITIES - } - } - } - - // if peer ready to receive but nothing to send - if keys == nil && deliveryRequest == nil { - // if no items left and switch to waiting mode - log.Trace(fmt.Sprintf("syncer[%v]: buffers consumed. Waiting", self.key.Log())) - newUnsyncedKeys = self.newUnsyncedKeys - } - - // send msg iff - // * peer is ready to receive keys AND ( - // * all queues and history are depleted OR - // * batch full OR - // * all history have been consumed, synced) - if deliveryRequest == nil && - (justSynced || - len(unsynced) > 0 && keys == nil || - len(unsynced) == int(self.SyncBatchSize)) { - justSynced = false - // listen to requests - deliveryRequest = self.deliveryRequest - newUnsyncedKeys = nil // not care about data until next req comes in - // set sync to current counter - // (all nonhistorical outgoing traffic sheduled and persisted - state.LastSeenAt = self.dbAccess.counter() - state.Latest = storage.ZeroKey - log.Trace(fmt.Sprintf("syncer[%v]: sending %v", self.key.Log(), unsynced)) - // send the unsynced keys - stateCopy := *state - err := self.unsyncedKeys(unsynced, &stateCopy) - if err != nil { - log.Warn(fmt.Sprintf("syncer[%v]: unable to send unsynced keys: %v", self.key.Log(), err)) - } - self.state = state - log.Debug(fmt.Sprintf("syncer[%v]: --> %v keys sent: (total: %v (%v), history: %v), sent sync state: %v", self.key.Log(), len(unsynced), keyCounts, keyCount, historyCnt, stateCopy)) - unsynced = nil - keys = nil - } - - // process item and add it to the batch - select { - case <-self.quit: - break LOOP - case req, more = <-keys: - if keys == history && !more { - log.Trace(fmt.Sprintf("syncer[%v]: syncing history segment complete", self.key.Log())) - // history channel is closed, waiting for new state (called from sync()) - syncStates = self.syncStates - state.Synced = true // this signals that the current segment is complete - select { - case state.synced <- false: - case <-self.quit: - break LOOP - } - justSynced = true - history = nil - } - case <-deliveryRequest: - log.Trace(fmt.Sprintf("syncer[%v]: peer ready to receive", self.key.Log())) - - // this 1 cap channel can wake up the loop - // signaling that peer is ready to receive unsynced Keys - // the channel is set to nil any further writes will be ignored - deliveryRequest = nil - - case <-newUnsyncedKeys: - log.Trace(fmt.Sprintf("syncer[%v]: new unsynced keys available", self.key.Log())) - // this 1 cap channel can wake up the loop - // signals that data is available to send if peer is ready to receive - newUnsyncedKeys = nil - keys = self.keys[High] - - case state, more = <-syncStates: - // this resets the state - if !more { - state = self.state - log.Trace(fmt.Sprintf("syncer[%v]: (priority %v) syncing complete upto %v)", self.key.Log(), priority, state)) - state.Synced = true - syncStates = nil - } else { - log.Trace(fmt.Sprintf("syncer[%v]: (priority %v) syncing history upto %v priority %v)", self.key.Log(), priority, state, histPrior)) - state.Synced = false - history = self.syncHistory(state) - // only one history at a time, only allow another one once the - // history channel is closed - syncStates = nil - } - } - if req == nil { - continue LOOP - } - - log.Trace(fmt.Sprintf("syncer[%v]: (priority %v) added to unsynced keys: %v", self.key.Log(), priority, req)) - keyCounts[priority]++ - keyCount++ - if keys == history { - log.Trace(fmt.Sprintf("syncer[%v]: (priority %v) history item %v (synced = %v)", self.key.Log(), priority, req, state.Synced)) - historyCnt++ - } - if sreq, err := self.newSyncRequest(req, priority); err == nil { - // extract key from req - log.Trace(fmt.Sprintf("syncer[%v]: (priority %v): request %v (synced = %v)", self.key.Log(), priority, req, state.Synced)) - unsynced = append(unsynced, sreq) - } else { - log.Warn(fmt.Sprintf("syncer[%v]: (priority %v): error creating request for %v: %v)", self.key.Log(), priority, req, err)) - } - - } -} - -// delivery loop -// takes into account priority, send store Requests with chunk (delivery) -// idle blocking if no new deliveries in any of the queues -func (self *syncer) syncDeliveries() { - var req *storeRequestMsgData - p := High - var deliveries chan *storeRequestMsgData - var msg *storeRequestMsgData - var err error - var c = [priorities]int{} - var n = [priorities]int{} - var total, success uint - - for { - deliveries = self.deliveries[p] - select { - case req = <-deliveries: - n[p]++ - c[p]++ - default: - if p == Low { - // blocking, depletion on all channels, no preference for priority - select { - case req = <-self.deliveries[High]: - n[High]++ - case req = <-self.deliveries[Medium]: - n[Medium]++ - case req = <-self.deliveries[Low]: - n[Low]++ - case <-self.quit: - return - } - p = High - } else { - p-- - continue - } - } - total++ - msg, err = self.newStoreRequestMsgData(req) - if err != nil { - log.Warn(fmt.Sprintf("syncer[%v]: failed to create store request for %v: %v", self.key.Log(), req, err)) - } else { - err = self.store(msg) - if err != nil { - log.Warn(fmt.Sprintf("syncer[%v]: failed to deliver %v: %v", self.key.Log(), req, err)) - } else { - success++ - log.Trace(fmt.Sprintf("syncer[%v]: %v successfully delivered", self.key.Log(), req)) - } - } - if total%self.SyncBatchSize == 0 { - log.Debug(fmt.Sprintf("syncer[%v]: deliver Total: %v, Success: %v, High: %v/%v, Medium: %v/%v, Low %v/%v", self.key.Log(), total, success, c[High], n[High], c[Medium], n[Medium], c[Low], n[Low])) - } - } -} - -/* - addRequest handles requests for delivery - it accepts 4 types: - - * storeRequestMsgData: coming from netstore propagate response - * chunk: coming from forwarding (questionable: id?) - * key: from incoming syncRequest - * syncDbEntry: key,id encoded in db - - If sync mode is on for the type of request, then - it sends the request to the keys queue of the correct priority - channel buffered with capacity (SyncBufferSize) - - If sync mode is off then, requests are directly sent to deliveries -*/ -func (self *syncer) addRequest(req interface{}, ty int) { - // retrieve priority for request type name int8 - - priority := self.SyncPriorities[ty] - // sync mode for this type ON - if self.syncF() || ty == DeliverReq { - if self.SyncModes[ty] { - self.addKey(req, priority, self.quit) - } else { - self.addDelivery(req, priority, self.quit) - } - } -} - -// addKey queues sync request for sync confirmation with given priority -// ie the key will go out in an unsyncedKeys message -func (self *syncer) addKey(req interface{}, priority uint, quit chan bool) bool { - select { - case self.keys[priority] <- req: - // this wakes up the unsynced keys loop if idle - select { - case self.newUnsyncedKeys <- true: - default: - } - return true - case <-quit: - return false - } -} - -// addDelivery queues delivery request for with given priority -// ie the chunk will be delivered ASAP mod priority queueing handled by syncdb -// requests are persisted across sessions for correct sync -func (self *syncer) addDelivery(req interface{}, priority uint, quit chan bool) bool { - select { - case self.queues[priority].buffer <- req: - return true - case <-quit: - return false - } -} - -// doDelivery delivers the chunk for the request with given priority -// without queuing -func (self *syncer) doDelivery(req interface{}, priority uint, quit chan bool) bool { - msgdata, err := self.newStoreRequestMsgData(req) - if err != nil { - log.Warn(fmt.Sprintf("unable to deliver request %v: %v", msgdata, err)) - return false - } - select { - case self.deliveries[priority] <- msgdata: - return true - case <-quit: - return false - } -} - -// returns the delivery function for given priority -// passed on to syncDb -func (self *syncer) deliver(priority uint) func(req interface{}, quit chan bool) bool { - return func(req interface{}, quit chan bool) bool { - return self.doDelivery(req, priority, quit) - } -} - -// returns the replay function passed on to syncDb -// depending on sync mode settings for BacklogReq, -// re play of request db backlog sends items via confirmation -// or directly delivers -func (self *syncer) replay() func(req interface{}, quit chan bool) bool { - sync := self.SyncModes[BacklogReq] - priority := self.SyncPriorities[BacklogReq] - // sync mode for this type ON - if sync { - return func(req interface{}, quit chan bool) bool { - return self.addKey(req, priority, quit) - } - } else { - return func(req interface{}, quit chan bool) bool { - return self.doDelivery(req, priority, quit) - } - - } -} - -// given a request, extends it to a full storeRequestMsgData -// polimorphic: see addRequest for the types accepted -func (self *syncer) newStoreRequestMsgData(req interface{}) (*storeRequestMsgData, error) { - - key, id, chunk, sreq, err := parseRequest(req) - if err != nil { - return nil, err - } - - if sreq == nil { - if chunk == nil { - var err error - chunk, err = self.dbAccess.get(key) - if err != nil { - return nil, err - } - } - - sreq = &storeRequestMsgData{ - Id: id, - Key: chunk.Key, - SData: chunk.SData, - } - } - - return sreq, nil -} - -// parse request types and extracts, key, id, chunk, request if available -// does not do chunk lookup ! -func parseRequest(req interface{}) (storage.Key, uint64, *storage.Chunk, *storeRequestMsgData, error) { - var key storage.Key - var entry *syncDbEntry - var chunk *storage.Chunk - var id uint64 - var ok bool - var sreq *storeRequestMsgData - var err error - - if key, ok = req.(storage.Key); ok { - id = generateId() - - } else if entry, ok = req.(*syncDbEntry); ok { - id = binary.BigEndian.Uint64(entry.val[32:]) - key = storage.Key(entry.val[:32]) - - } else if chunk, ok = req.(*storage.Chunk); ok { - key = chunk.Key - id = generateId() - - } else if sreq, ok = req.(*storeRequestMsgData); ok { - key = sreq.Key - } else { - err = fmt.Errorf("type not allowed: %v (%T)", req, req) - } - - return key, id, chunk, sreq, err -} diff --git a/swarm/services/swap/swap.go b/swarm/services/swap/swap.go deleted file mode 100644 index 153f058968a0..000000000000 --- a/swarm/services/swap/swap.go +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package swap - -import ( - "context" - "crypto/ecdsa" - "fmt" - "math/big" - "os" - "path/filepath" - "sync" - "time" - - "github.com/XinFinOrg/XDPoSChain/accounts/abi/bind" - "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/contracts/chequebook" - "github.com/XinFinOrg/XDPoSChain/contracts/chequebook/contract" - "github.com/XinFinOrg/XDPoSChain/core/types" - "github.com/XinFinOrg/XDPoSChain/crypto" - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/swarm/services/swap/swap" -) - -// SwAP Swarm Accounting Protocol with -// SWAP^2 Strategies of Withholding Automatic Payments -// SWAP^3 Accreditation: payment via credit SWAP -// using chequebook pkg for delayed payments -// default parameters - -var ( - autoCashInterval = 300 * time.Second // default interval for autocash - autoCashThreshold = big.NewInt(50000000000000) // threshold that triggers autocash (wei) - autoDepositInterval = 300 * time.Second // default interval for autocash - autoDepositThreshold = big.NewInt(50000000000000) // threshold that triggers autodeposit (wei) - autoDepositBuffer = big.NewInt(100000000000000) // buffer that is surplus for fork protection etc (wei) - buyAt = big.NewInt(20000000000) // maximum chunk price host is willing to pay (wei) - sellAt = big.NewInt(20000000000) // minimum chunk price host requires (wei) - payAt = 100 // threshold that triggers payment {request} (units) - dropAt = 10000 // threshold that triggers disconnect (units) -) - -const ( - chequebookDeployRetries = 5 - chequebookDeployDelay = 1 * time.Second // delay between retries -) - -type SwapParams struct { - *swap.Params - *PayProfile -} - -type SwapProfile struct { - *swap.Profile - *PayProfile -} - -type PayProfile struct { - PublicKey string // check against signature of promise - Contract common.Address // address of chequebook contract - Beneficiary common.Address // recipient address for swarm sales revenue - privateKey *ecdsa.PrivateKey - publicKey *ecdsa.PublicKey - owner common.Address - chbook *chequebook.Chequebook - lock sync.RWMutex -} - -// create params with default values -func NewDefaultSwapParams() *SwapParams { - return &SwapParams{ - PayProfile: &PayProfile{}, - Params: &swap.Params{ - Profile: &swap.Profile{ - BuyAt: buyAt, - SellAt: sellAt, - PayAt: uint(payAt), - DropAt: uint(dropAt), - }, - Strategy: &swap.Strategy{ - AutoCashInterval: autoCashInterval, - AutoCashThreshold: autoCashThreshold, - AutoDepositInterval: autoDepositInterval, - AutoDepositThreshold: autoDepositThreshold, - AutoDepositBuffer: autoDepositBuffer, - }, - }, - } -} - -// this can only finally be set after all config options (file, cmd line, env vars) -// have been evaluated -func (self *SwapParams) Init(contract common.Address, prvkey *ecdsa.PrivateKey) { - pubkey := &prvkey.PublicKey - - self.PayProfile = &PayProfile{ - PublicKey: common.ToHex(crypto.FromECDSAPub(pubkey)), - Contract: contract, - Beneficiary: crypto.PubkeyToAddress(*pubkey), - privateKey: prvkey, - publicKey: pubkey, - owner: crypto.PubkeyToAddress(*pubkey), - } -} - -// swap constructor, parameters -// * global chequebook, assume deployed service and -// * the balance is at buffer. -// swap.Add(n) called in netstore -// n > 0 called when sending chunks = receiving retrieve requests -// OR sending cheques. -// n < 0 called when receiving chunks = receiving delivery responses -// OR receiving cheques. - -func NewSwap(local *SwapParams, remote *SwapProfile, backend chequebook.Backend, proto swap.Protocol) (self *swap.Swap, err error) { - var ( - ctx = context.TODO() - ok bool - in *chequebook.Inbox - out *chequebook.Outbox - ) - - // check if remote chequebook is valid - // insolvent chequebooks suicide so will signal as invalid - // TODO: monitoring a chequebooks events - ok, err = chequebook.ValidateCode(ctx, backend, remote.Contract) - if !ok { - log.Info(fmt.Sprintf("invalid contract %v for peer %v: %v)", remote.Contract.Hex()[:8], proto, err)) - } else { - pub, err := crypto.UnmarshalPubkey(common.FromHex(remote.PublicKey)) - if err != nil { - return nil, err - } - // remote contract valid, create inbox - in, err = chequebook.NewInbox(local.privateKey, remote.Contract, local.Beneficiary, pub, backend) - if err != nil { - log.Warn(fmt.Sprintf("unable to set up inbox for chequebook contract %v for peer %v: %v)", remote.Contract.Hex()[:8], proto, err)) - } - } - - // check if local chequebook contract is valid - ok, err = chequebook.ValidateCode(ctx, backend, local.Contract) - if !ok { - log.Warn(fmt.Sprintf("unable to set up outbox for peer %v: chequebook contract (owner: %v): %v)", proto, local.owner.Hex(), err)) - } else { - out = chequebook.NewOutbox(local.Chequebook(), remote.Beneficiary) - } - - pm := swap.Payment{ - In: in, - Out: out, - Buys: out != nil, - Sells: in != nil, - } - self, err = swap.New(local.Params, pm, proto) - if err != nil { - return - } - // remote profile given (first) in handshake - self.SetRemote(remote.Profile) - var buy, sell string - if self.Buys { - buy = "purchase from peer enabled at " + remote.SellAt.String() + " wei/chunk" - } else { - buy = "purchase from peer disabled" - } - if self.Sells { - sell = "selling to peer enabled at " + local.SellAt.String() + " wei/chunk" - } else { - sell = "selling to peer disabled" - } - log.Warn(fmt.Sprintf("SWAP arrangement with <%v>: %v; %v)", proto, buy, sell)) - - return -} - -func (self *SwapParams) Chequebook() *chequebook.Chequebook { - defer self.lock.Unlock() - self.lock.Lock() - return self.chbook -} - -func (self *SwapParams) PrivateKey() *ecdsa.PrivateKey { - return self.privateKey -} - -// func (self *SwapParams) PublicKey() *ecdsa.PublicKey { -// return self.publicKey -// } - -func (self *SwapParams) SetKey(prvkey *ecdsa.PrivateKey) { - self.privateKey = prvkey - self.publicKey = &prvkey.PublicKey -} - -// setChequebook(path, backend) wraps the -// chequebook initialiser and sets up autoDeposit to cover spending. -func (self *SwapParams) SetChequebook(ctx context.Context, backend chequebook.Backend, path string) error { - self.lock.Lock() - contract := self.Contract - self.lock.Unlock() - - valid, err := chequebook.ValidateCode(ctx, backend, contract) - if err != nil { - return err - } else if valid { - return self.newChequebookFromContract(path, backend) - } - return self.deployChequebook(ctx, backend, path) -} - -func (self *SwapParams) deployChequebook(ctx context.Context, backend chequebook.Backend, path string) error { - opts := bind.NewKeyedTransactor(self.privateKey) - opts.Value = self.AutoDepositBuffer - opts.Context = ctx - - log.Info(fmt.Sprintf("Deploying new chequebook (owner: %v)", opts.From.Hex())) - contract, err := deployChequebookLoop(opts, backend) - if err != nil { - log.Error(fmt.Sprintf("unable to deploy new chequebook: %v", err)) - return err - } - log.Info(fmt.Sprintf("new chequebook deployed at %v (owner: %v)", contract.Hex(), opts.From.Hex())) - - // need to save config at this point - self.lock.Lock() - self.Contract = contract - err = self.newChequebookFromContract(path, backend) - self.lock.Unlock() - if err != nil { - log.Warn(fmt.Sprintf("error initialising cheque book (owner: %v): %v", opts.From.Hex(), err)) - } - return err -} - -// repeatedly tries to deploy a chequebook. -func deployChequebookLoop(opts *bind.TransactOpts, backend chequebook.Backend) (addr common.Address, err error) { - var tx *types.Transaction - for try := 0; try < chequebookDeployRetries; try++ { - if try > 0 { - time.Sleep(chequebookDeployDelay) - } - if _, tx, _, err = contract.DeployChequebook(opts, backend); err != nil { - log.Warn(fmt.Sprintf("can't send chequebook deploy tx (try %d): %v", try, err)) - continue - } - if addr, err = bind.WaitDeployed(opts.Context, backend, tx); err != nil { - log.Warn(fmt.Sprintf("chequebook deploy error (try %d): %v", try, err)) - continue - } - return addr, nil - } - return addr, err -} - -// initialise the chequebook from a persisted json file or create a new one -// caller holds the lock -func (self *SwapParams) newChequebookFromContract(path string, backend chequebook.Backend) error { - hexkey := common.Bytes2Hex(self.Contract.Bytes()) - err := os.MkdirAll(filepath.Join(path, "chequebooks"), os.ModePerm) - if err != nil { - return fmt.Errorf("unable to create directory for chequebooks: %v", err) - } - - chbookpath := filepath.Join(path, "chequebooks", hexkey+".json") - self.chbook, err = chequebook.LoadChequebook(chbookpath, self.privateKey, backend, true) - - if err != nil { - self.chbook, err = chequebook.NewChequebook(chbookpath, self.Contract, self.privateKey, backend) - if err != nil { - log.Warn(fmt.Sprintf("unable to initialise chequebook (owner: %v): %v", self.owner.Hex(), err)) - return fmt.Errorf("unable to initialise chequebook (owner: %v): %v", self.owner.Hex(), err) - } - } - - self.chbook.AutoDeposit(self.AutoDepositInterval, self.AutoDepositThreshold, self.AutoDepositBuffer) - log.Info(fmt.Sprintf("auto deposit ON for %v -> %v: interval = %v, threshold = %v, buffer = %v)", crypto.PubkeyToAddress(*(self.publicKey)).Hex()[:8], self.Contract.Hex()[:8], self.AutoDepositInterval, self.AutoDepositThreshold, self.AutoDepositBuffer)) - - return nil -} diff --git a/swarm/services/swap/swap/swap.go b/swarm/services/swap/swap/swap.go deleted file mode 100644 index 4169d72826d7..000000000000 --- a/swarm/services/swap/swap/swap.go +++ /dev/null @@ -1,252 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package swap - -import ( - "fmt" - "math/big" - "sync" - "time" - - "github.com/XinFinOrg/XDPoSChain/log" -) - -// SwAP Swarm Accounting Protocol with -// Swift Automatic Payments -// a peer to peer micropayment system - -// public swap profile -// public parameters for SWAP, serializable config struct passed in handshake -type Profile struct { - BuyAt *big.Int // accepted max price for chunk - SellAt *big.Int // offered sale price for chunk - PayAt uint // threshold that triggers payment request - DropAt uint // threshold that triggers disconnect -} - -// Strategy encapsulates parameters relating to -// automatic deposit and automatic cashing -type Strategy struct { - AutoCashInterval time.Duration // default interval for autocash - AutoCashThreshold *big.Int // threshold that triggers autocash (wei) - AutoDepositInterval time.Duration // default interval for autocash - AutoDepositThreshold *big.Int // threshold that triggers autodeposit (wei) - AutoDepositBuffer *big.Int // buffer that is surplus for fork protection etc (wei) -} - -// Params extends the public profile with private parameters relating to -// automatic deposit and automatic cashing -type Params struct { - *Profile - *Strategy -} - -// Promise -// 3rd party Provable Promise of Payment -// issued by outPayment -// serialisable to send with Protocol -type Promise interface{} - -// interface for the peer protocol for testing or external alternative payment -type Protocol interface { - Pay(int, Promise) // units, payment proof - Drop() - String() string -} - -// interface for the (delayed) ougoing payment system with autodeposit -type OutPayment interface { - Issue(amount *big.Int) (promise Promise, err error) - AutoDeposit(interval time.Duration, threshold, buffer *big.Int) - Stop() -} - -// interface for the (delayed) incoming payment system with autocash -type InPayment interface { - Receive(promise Promise) (*big.Int, error) - AutoCash(cashInterval time.Duration, maxUncashed *big.Int) - Stop() -} - -// swap is the swarm accounting protocol instance -// * pairwise accounting and payments -type Swap struct { - lock sync.Mutex // mutex for balance access - balance int // units of chunk/retrieval request - local *Params // local peer's swap parameters - remote *Profile // remote peer's swap profile - proto Protocol // peer communication protocol - Payment -} - -type Payment struct { - Out OutPayment // outgoing payment handler - In InPayment // incoming payment handler - Buys, Sells bool -} - -// swap constructor -func New(local *Params, pm Payment, proto Protocol) (self *Swap, err error) { - - self = &Swap{ - local: local, - Payment: pm, - proto: proto, - } - - self.SetParams(local) - - return -} - -// entry point for setting remote swap profile (e.g from handshake or other message) -func (self *Swap) SetRemote(remote *Profile) { - defer self.lock.Unlock() - self.lock.Lock() - - self.remote = remote - if self.Sells && (remote.BuyAt.Sign() <= 0 || self.local.SellAt.Sign() <= 0 || remote.BuyAt.Cmp(self.local.SellAt) < 0) { - self.Out.Stop() - self.Sells = false - } - if self.Buys && (remote.SellAt.Sign() <= 0 || self.local.BuyAt.Sign() <= 0 || self.local.BuyAt.Cmp(self.remote.SellAt) < 0) { - self.In.Stop() - self.Buys = false - } - - log.Debug(fmt.Sprintf("<%v> remote profile set: pay at: %v, drop at: %v, buy at: %v, sell at: %v", self.proto, remote.PayAt, remote.DropAt, remote.BuyAt, remote.SellAt)) - -} - -// to set strategy dynamically -func (self *Swap) SetParams(local *Params) { - defer self.lock.Unlock() - self.lock.Lock() - self.local = local - self.setParams(local) -} - -// caller holds the lock - -func (self *Swap) setParams(local *Params) { - - if self.Sells { - self.In.AutoCash(local.AutoCashInterval, local.AutoCashThreshold) - log.Info(fmt.Sprintf("<%v> set autocash to every %v, max uncashed limit: %v", self.proto, local.AutoCashInterval, local.AutoCashThreshold)) - } else { - log.Info(fmt.Sprintf("<%v> autocash off (not selling)", self.proto)) - } - if self.Buys { - self.Out.AutoDeposit(local.AutoDepositInterval, local.AutoDepositThreshold, local.AutoDepositBuffer) - log.Info(fmt.Sprintf("<%v> set autodeposit to every %v, pay at: %v, buffer: %v", self.proto, local.AutoDepositInterval, local.AutoDepositThreshold, local.AutoDepositBuffer)) - } else { - log.Info(fmt.Sprintf("<%v> autodeposit off (not buying)", self.proto)) - } -} - -// Add(n) -// n > 0 called when promised/provided n units of service -// n < 0 called when used/requested n units of service -func (self *Swap) Add(n int) error { - defer self.lock.Unlock() - self.lock.Lock() - self.balance += n - if !self.Sells && self.balance > 0 { - log.Trace(fmt.Sprintf("<%v> remote peer cannot have debt (balance: %v)", self.proto, self.balance)) - self.proto.Drop() - return fmt.Errorf("[SWAP] <%v> remote peer cannot have debt (balance: %v)", self.proto, self.balance) - } - if !self.Buys && self.balance < 0 { - log.Trace(fmt.Sprintf("<%v> we cannot have debt (balance: %v)", self.proto, self.balance)) - return fmt.Errorf("[SWAP] <%v> we cannot have debt (balance: %v)", self.proto, self.balance) - } - if self.balance >= int(self.local.DropAt) { - log.Trace(fmt.Sprintf("<%v> remote peer has too much debt (balance: %v, disconnect threshold: %v)", self.proto, self.balance, self.local.DropAt)) - self.proto.Drop() - return fmt.Errorf("[SWAP] <%v> remote peer has too much debt (balance: %v, disconnect threshold: %v)", self.proto, self.balance, self.local.DropAt) - } else if self.balance <= -int(self.remote.PayAt) { - self.send() - } - return nil -} - -func (self *Swap) Balance() int { - defer self.lock.Unlock() - self.lock.Lock() - return self.balance -} - -// send(units) is called when payment is due -// In case of insolvency no promise is issued and sent, safe against fraud -// No return value: no error = payment is opportunistic = hang in till dropped -func (self *Swap) send() { - if self.local.BuyAt != nil && self.balance < 0 { - amount := big.NewInt(int64(-self.balance)) - amount.Mul(amount, self.remote.SellAt) - promise, err := self.Out.Issue(amount) - if err != nil { - log.Warn(fmt.Sprintf("<%v> cannot issue cheque (amount: %v, channel: %v): %v", self.proto, amount, self.Out, err)) - } else { - log.Warn(fmt.Sprintf("<%v> cheque issued (amount: %v, channel: %v)", self.proto, amount, self.Out)) - self.proto.Pay(-self.balance, promise) - self.balance = 0 - } - } -} - -// receive(units, promise) is called by the protocol when a payment msg is received -// returns error if promise is invalid. -func (self *Swap) Receive(units int, promise Promise) error { - if units <= 0 { - return fmt.Errorf("invalid units: %v <= 0", units) - } - - price := new(big.Int).SetInt64(int64(units)) - price.Mul(price, self.local.SellAt) - - amount, err := self.In.Receive(promise) - - if err != nil { - err = fmt.Errorf("invalid promise: %v", err) - } else if price.Cmp(amount) != 0 { - // verify amount = units * unit sale price - return fmt.Errorf("invalid amount: %v = %v * %v (units sent in msg * agreed sale unit price) != %v (signed in cheque)", price, units, self.local.SellAt, amount) - } - if err != nil { - log.Trace(fmt.Sprintf("<%v> invalid promise (amount: %v, channel: %v): %v", self.proto, amount, self.In, err)) - return err - } - - // credit remote peer with units - self.Add(-units) - log.Trace(fmt.Sprintf("<%v> received promise (amount: %v, channel: %v): %v", self.proto, amount, self.In, promise)) - - return nil -} - -// stop() causes autocash loop to terminate. -// Called after protocol handle loop terminates. -func (self *Swap) Stop() { - defer self.lock.Unlock() - self.lock.Lock() - if self.Buys { - self.Out.Stop() - } - if self.Sells { - self.In.Stop() - } -} diff --git a/swarm/services/swap/swap/swap_test.go b/swarm/services/swap/swap/swap_test.go deleted file mode 100644 index b0ab835836b2..000000000000 --- a/swarm/services/swap/swap/swap_test.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package swap - -import ( - "math/big" - "testing" - "time" - - "github.com/XinFinOrg/XDPoSChain/common" -) - -type testInPayment struct { - received []*testPromise - autocashInterval time.Duration - autocashLimit *big.Int -} - -type testPromise struct { - amount *big.Int -} - -func (self *testInPayment) Receive(promise Promise) (*big.Int, error) { - p := promise.(*testPromise) - self.received = append(self.received, p) - return p.amount, nil -} - -func (self *testInPayment) AutoCash(interval time.Duration, limit *big.Int) { - self.autocashInterval = interval - self.autocashLimit = limit -} - -func (self *testInPayment) Cash() (string, error) { return "", nil } - -func (self *testInPayment) Stop() {} - -type testOutPayment struct { - deposits []*big.Int - autodepositInterval time.Duration - autodepositThreshold *big.Int - autodepositBuffer *big.Int -} - -func (self *testOutPayment) Issue(amount *big.Int) (promise Promise, err error) { - return &testPromise{amount}, nil -} - -func (self *testOutPayment) Deposit(amount *big.Int) (string, error) { - self.deposits = append(self.deposits, amount) - return "", nil -} - -func (self *testOutPayment) AutoDeposit(interval time.Duration, threshold, buffer *big.Int) { - self.autodepositInterval = interval - self.autodepositThreshold = threshold - self.autodepositBuffer = buffer -} - -func (self *testOutPayment) Stop() {} - -type testProtocol struct { - drop bool - amounts []int - promises []*testPromise -} - -func (self *testProtocol) Drop() { - self.drop = true -} - -func (self *testProtocol) String() string { - return "" -} - -func (self *testProtocol) Pay(amount int, promise Promise) { - p := promise.(*testPromise) - self.promises = append(self.promises, p) - self.amounts = append(self.amounts, amount) -} - -func TestSwap(t *testing.T) { - - strategy := &Strategy{ - AutoCashInterval: 1 * time.Second, - AutoCashThreshold: big.NewInt(20), - AutoDepositInterval: 1 * time.Second, - AutoDepositThreshold: big.NewInt(20), - AutoDepositBuffer: big.NewInt(40), - } - - local := &Params{ - Profile: &Profile{ - PayAt: 5, - DropAt: 10, - BuyAt: common.Big3, - SellAt: common.Big2, - }, - Strategy: strategy, - } - - in := &testInPayment{} - out := &testOutPayment{} - proto := &testProtocol{} - - swap, _ := New(local, Payment{In: in, Out: out, Buys: true, Sells: true}, proto) - - if in.autocashInterval != strategy.AutoCashInterval { - t.Fatalf("autocash interval not properly set, expect %v, got %v", strategy.AutoCashInterval, in.autocashInterval) - } - if out.autodepositInterval != strategy.AutoDepositInterval { - t.Fatalf("autodeposit interval not properly set, expect %v, got %v", strategy.AutoDepositInterval, out.autodepositInterval) - } - - remote := &Profile{ - PayAt: 3, - DropAt: 10, - BuyAt: common.Big2, - SellAt: common.Big3, - } - swap.SetRemote(remote) - - swap.Add(9) - if proto.drop { - t.Fatalf("not expected peer to be dropped") - } - swap.Add(1) - if !proto.drop { - t.Fatalf("expected peer to be dropped") - } - if !proto.drop { - t.Fatalf("expected peer to be dropped") - } - proto.drop = false - - swap.Receive(10, &testPromise{big.NewInt(20)}) - if swap.balance != 0 { - t.Fatalf("expected zero balance, got %v", swap.balance) - } - - if len(proto.amounts) != 0 { - t.Fatalf("expected zero balance, got %v", swap.balance) - } - - swap.Add(-2) - if len(proto.amounts) > 0 { - t.Fatalf("expected no payments yet, got %v", proto.amounts) - } - - swap.Add(-1) - if len(proto.amounts) != 1 { - t.Fatalf("expected one payment, got %v", len(proto.amounts)) - } - - if proto.amounts[0] != 3 { - t.Fatalf("expected payment for %v units, got %v", proto.amounts[0], 3) - } - - exp := new(big.Int).Mul(big.NewInt(int64(proto.amounts[0])), remote.SellAt) - if proto.promises[0].amount.Cmp(exp) != 0 { - t.Fatalf("expected payment amount %v, got %v", exp, proto.promises[0].amount) - } - - swap.SetParams(&Params{ - Profile: &Profile{ - PayAt: 5, - DropAt: 10, - BuyAt: common.Big3, - SellAt: common.Big2, - }, - Strategy: &Strategy{ - AutoCashInterval: 2 * time.Second, - AutoCashThreshold: big.NewInt(40), - AutoDepositInterval: 2 * time.Second, - AutoDepositThreshold: big.NewInt(40), - AutoDepositBuffer: big.NewInt(60), - }, - }) - -} diff --git a/swarm/storage/chunker.go b/swarm/storage/chunker.go deleted file mode 100644 index bc0856d59316..000000000000 --- a/swarm/storage/chunker.go +++ /dev/null @@ -1,533 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package storage - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "sync" - "time" - - "github.com/XinFinOrg/XDPoSChain/metrics" -) - -/* -The distributed storage implemented in this package requires fix sized chunks of content. - -Chunker is the interface to a component that is responsible for disassembling and assembling larger data. - -TreeChunker implements a Chunker based on a tree structure defined as follows: - -1 each node in the tree including the root and other branching nodes are stored as a chunk. - -2 branching nodes encode data contents that includes the size of the dataslice covered by its entire subtree under the node as well as the hash keys of all its children : -data_{i} := size(subtree_{i}) || key_{j} || key_{j+1} .... || key_{j+n-1} - -3 Leaf nodes encode an actual subslice of the input data. - -4 if data size is not more than maximum chunksize, the data is stored in a single chunk - key = hash(int64(size) + data) - -5 if data size is more than chunksize*branches^l, but no more than chunksize* - branches^(l+1), the data vector is split into slices of chunksize* - branches^l length (except the last one). - key = hash(int64(size) + key(slice0) + key(slice1) + ...) - - The underlying hash function is configurable -*/ - -/* -Tree chunker is a concrete implementation of data chunking. -This chunker works in a simple way, it builds a tree out of the document so that each node either represents a chunk of real data or a chunk of data representing an branching non-leaf node of the tree. In particular each such non-leaf chunk will represent is a concatenation of the hash of its respective children. This scheme simultaneously guarantees data integrity as well as self addressing. Abstract nodes are transparent since their represented size component is strictly greater than their maximum data size, since they encode a subtree. - -If all is well it is possible to implement this by simply composing readers so that no extra allocation or buffering is necessary for the data splitting and joining. This means that in principle there can be direct IO between : memory, file system, network socket (bzz peers storage request is read from the socket). In practice there may be need for several stages of internal buffering. -The hashing itself does use extra copies and allocation though, since it does need it. -*/ - -var ( - errAppendOppNotSuported = errors.New("Append operation not supported") - errOperationTimedOut = errors.New("operation timed out") -) - -//metrics variables -var ( - newChunkCounter = metrics.NewRegisteredCounter("storage.chunks.new", nil) -) - -type TreeChunker struct { - branches int64 - hashFunc SwarmHasher - // calculated - hashSize int64 // self.hashFunc.New().Size() - chunkSize int64 // hashSize* branches - workerCount int64 // the number of worker routines used - workerLock sync.RWMutex // lock for the worker count -} - -func NewTreeChunker(params *ChunkerParams) (self *TreeChunker) { - self = &TreeChunker{} - self.hashFunc = MakeHashFunc(params.Hash) - self.branches = params.Branches - self.hashSize = int64(self.hashFunc().Size()) - self.chunkSize = self.hashSize * self.branches - self.workerCount = 0 - - return -} - -// func (self *TreeChunker) KeySize() int64 { -// return self.hashSize -// } - -// String() for pretty printing -func (self *Chunk) String() string { - return fmt.Sprintf("Key: %v TreeSize: %v Chunksize: %v", self.Key.Log(), self.Size, len(self.SData)) -} - -type hashJob struct { - key Key - chunk []byte - size int64 - parentWg *sync.WaitGroup -} - -func (self *TreeChunker) incrementWorkerCount() { - self.workerLock.Lock() - defer self.workerLock.Unlock() - self.workerCount += 1 -} - -func (self *TreeChunker) getWorkerCount() int64 { - self.workerLock.RLock() - defer self.workerLock.RUnlock() - return self.workerCount -} - -func (self *TreeChunker) decrementWorkerCount() { - self.workerLock.Lock() - defer self.workerLock.Unlock() - self.workerCount -= 1 -} - -func (self *TreeChunker) Split(data io.Reader, size int64, chunkC chan *Chunk, swg, wwg *sync.WaitGroup) (Key, error) { - if self.chunkSize <= 0 { - panic("chunker must be initialised") - } - - jobC := make(chan *hashJob, 2*ChunkProcessors) - wg := &sync.WaitGroup{} - errC := make(chan error) - quitC := make(chan bool) - - // wwg = workers waitgroup keeps track of hashworkers spawned by this split call - if wwg != nil { - wwg.Add(1) - } - - self.incrementWorkerCount() - go self.hashWorker(jobC, chunkC, errC, quitC, swg, wwg) - - depth := 0 - treeSize := self.chunkSize - - // takes lowest depth such that chunksize*HashCount^(depth+1) > size - // power series, will find the order of magnitude of the data size in base hashCount or numbers of levels of branching in the resulting tree. - for ; treeSize < size; treeSize *= self.branches { - depth++ - } - - key := make([]byte, self.hashFunc().Size()) - // this waitgroup member is released after the root hash is calculated - wg.Add(1) - //launch actual recursive function passing the waitgroups - go self.split(depth, treeSize/self.branches, key, data, size, jobC, chunkC, errC, quitC, wg, swg, wwg) - - // closes internal error channel if all subprocesses in the workgroup finished - go func() { - // waiting for all threads to finish - wg.Wait() - // if storage waitgroup is non-nil, we wait for storage to finish too - if swg != nil { - swg.Wait() - } - close(errC) - }() - - defer close(quitC) - select { - case err := <-errC: - if err != nil { - return nil, err - } - case <-time.NewTimer(splitTimeout).C: - return nil, errOperationTimedOut - } - - return key, nil -} - -func (self *TreeChunker) split(depth int, treeSize int64, key Key, data io.Reader, size int64, jobC chan *hashJob, chunkC chan *Chunk, errC chan error, quitC chan bool, parentWg, swg, wwg *sync.WaitGroup) { - - // - - for depth > 0 && size < treeSize { - treeSize /= self.branches - depth-- - } - - if depth == 0 { - // leaf nodes -> content chunks - chunkData := make([]byte, size+8) - binary.LittleEndian.PutUint64(chunkData[0:8], uint64(size)) - var readBytes int64 - for readBytes < size { - n, err := data.Read(chunkData[8+readBytes:]) - readBytes += int64(n) - if err != nil && !(err == io.EOF && readBytes == size) { - errC <- err - return - } - } - select { - case jobC <- &hashJob{key, chunkData, size, parentWg}: - case <-quitC: - } - return - } - // dept > 0 - // intermediate chunk containing child nodes hashes - branchCnt := (size + treeSize - 1) / treeSize - - var chunk = make([]byte, branchCnt*self.hashSize+8) - var pos, i int64 - - binary.LittleEndian.PutUint64(chunk[0:8], uint64(size)) - - childrenWg := &sync.WaitGroup{} - var secSize int64 - for i < branchCnt { - // the last item can have shorter data - if size-pos < treeSize { - secSize = size - pos - } else { - secSize = treeSize - } - // the hash of that data - subTreeKey := chunk[8+i*self.hashSize : 8+(i+1)*self.hashSize] - - childrenWg.Add(1) - self.split(depth-1, treeSize/self.branches, subTreeKey, data, secSize, jobC, chunkC, errC, quitC, childrenWg, swg, wwg) - - i++ - pos += treeSize - } - // wait for all the children to complete calculating their hashes and copying them onto sections of the chunk - // parentWg.Add(1) - // go func() { - childrenWg.Wait() - - worker := self.getWorkerCount() - if int64(len(jobC)) > worker && worker < ChunkProcessors { - if wwg != nil { - wwg.Add(1) - } - self.incrementWorkerCount() - go self.hashWorker(jobC, chunkC, errC, quitC, swg, wwg) - - } - select { - case jobC <- &hashJob{key, chunk, size, parentWg}: - case <-quitC: - } -} - -func (self *TreeChunker) hashWorker(jobC chan *hashJob, chunkC chan *Chunk, errC chan error, quitC chan bool, swg, wwg *sync.WaitGroup) { - defer self.decrementWorkerCount() - - hasher := self.hashFunc() - if wwg != nil { - defer wwg.Done() - } - for { - select { - - case job, ok := <-jobC: - if !ok { - return - } - // now we got the hashes in the chunk, then hash the chunks - self.hashChunk(hasher, job, chunkC, swg) - case <-quitC: - return - } - } -} - -// The treeChunkers own Hash hashes together -// - the size (of the subtree encoded in the Chunk) -// - the Chunk, ie. the contents read from the input reader -func (self *TreeChunker) hashChunk(hasher SwarmHash, job *hashJob, chunkC chan *Chunk, swg *sync.WaitGroup) { - hasher.ResetWithLength(job.chunk[:8]) // 8 bytes of length - hasher.Write(job.chunk[8:]) // minus 8 []byte length - h := hasher.Sum(nil) - - newChunk := &Chunk{ - Key: h, - SData: job.chunk, - Size: job.size, - wg: swg, - } - - // report hash of this chunk one level up (keys corresponds to the proper subslice of the parent chunk) - copy(job.key, h) - // send off new chunk to storage - if chunkC != nil { - if swg != nil { - swg.Add(1) - } - } - job.parentWg.Done() - - if chunkC != nil { - //NOTE: this increases the chunk count even if the local node already has this chunk; - //on file upload the node will increase this counter even if the same file has already been uploaded - //So it should be evaluated whether it is worth keeping this counter - //and/or actually better track when the chunk is Put to the local database - //(which may question the need for disambiguation when a completely new chunk has been created - //and/or a chunk is being put to the local DB; for chunk tracking it may be worth distinguishing - newChunkCounter.Inc(1) - chunkC <- newChunk - } -} - -func (self *TreeChunker) Append(key Key, data io.Reader, chunkC chan *Chunk, swg, wwg *sync.WaitGroup) (Key, error) { - return nil, errAppendOppNotSuported -} - -// LazyChunkReader implements LazySectionReader -type LazyChunkReader struct { - key Key // root key - chunkC chan *Chunk // chunk channel to send retrieve requests on - chunk *Chunk // size of the entire subtree - off int64 // offset - chunkSize int64 // inherit from chunker - branches int64 // inherit from chunker - hashSize int64 // inherit from chunker -} - -// implements the Joiner interface -func (self *TreeChunker) Join(key Key, chunkC chan *Chunk) LazySectionReader { - return &LazyChunkReader{ - key: key, - chunkC: chunkC, - chunkSize: self.chunkSize, - branches: self.branches, - hashSize: self.hashSize, - } -} - -// Size is meant to be called on the LazySectionReader -func (self *LazyChunkReader) Size(quitC chan bool) (n int64, err error) { - if self.chunk != nil { - return self.chunk.Size, nil - } - chunk := retrieve(self.key, self.chunkC, quitC) - if chunk == nil { - select { - case <-quitC: - return 0, errors.New("aborted") - default: - return 0, fmt.Errorf("root chunk not found for %v", self.key.Hex()) - } - } - self.chunk = chunk - return chunk.Size, nil -} - -// read at can be called numerous times -// concurrent reads are allowed -// Size() needs to be called synchronously on the LazyChunkReader first -func (self *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error) { - // this is correct, a swarm doc cannot be zero length, so no EOF is expected - if len(b) == 0 { - return 0, nil - } - quitC := make(chan bool) - size, err := self.Size(quitC) - if err != nil { - return 0, err - } - - errC := make(chan error) - - // } - var treeSize int64 - var depth int - // calculate depth and max treeSize - treeSize = self.chunkSize - for ; treeSize < size; treeSize *= self.branches { - depth++ - } - wg := sync.WaitGroup{} - wg.Add(1) - go self.join(b, off, off+int64(len(b)), depth, treeSize/self.branches, self.chunk, &wg, errC, quitC) - go func() { - wg.Wait() - close(errC) - }() - - err = <-errC - if err != nil { - close(quitC) - - return 0, err - } - if off+int64(len(b)) >= size { - return len(b), io.EOF - } - return len(b), nil -} - -func (self *LazyChunkReader) join(b []byte, off int64, eoff int64, depth int, treeSize int64, chunk *Chunk, parentWg *sync.WaitGroup, errC chan error, quitC chan bool) { - defer parentWg.Done() - // return NewDPA(&LocalStore{}) - - // chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8])) - - // find appropriate block level - for chunk.Size < treeSize && depth > 0 { - treeSize /= self.branches - depth-- - } - - // leaf chunk found - if depth == 0 { - extra := 8 + eoff - int64(len(chunk.SData)) - if extra > 0 { - eoff -= extra - } - copy(b, chunk.SData[8+off:8+eoff]) - return // simply give back the chunks reader for content chunks - } - - // subtree - start := off / treeSize - end := (eoff + treeSize - 1) / treeSize - - wg := &sync.WaitGroup{} - defer wg.Wait() - - for i := start; i < end; i++ { - soff := i * treeSize - roff := soff - seoff := soff + treeSize - - if soff < off { - soff = off - } - if seoff > eoff { - seoff = eoff - } - if depth > 1 { - wg.Wait() - } - wg.Add(1) - go func(j int64) { - childKey := chunk.SData[8+j*self.hashSize : 8+(j+1)*self.hashSize] - chunk := retrieve(childKey, self.chunkC, quitC) - if chunk == nil { - select { - case errC <- fmt.Errorf("chunk %v-%v not found", off, off+treeSize): - case <-quitC: - } - return - } - if soff < off { - soff = off - } - self.join(b[soff-off:seoff-off], soff-roff, seoff-roff, depth-1, treeSize/self.branches, chunk, wg, errC, quitC) - }(i) - } //for -} - -// the helper method submits chunks for a key to a oueue (DPA) and -// block until they time out or arrive -// abort if quitC is readable -func retrieve(key Key, chunkC chan *Chunk, quitC chan bool) *Chunk { - chunk := &Chunk{ - Key: key, - C: make(chan bool), // close channel to signal data delivery - } - // submit chunk for retrieval - select { - case chunkC <- chunk: // submit retrieval request, someone should be listening on the other side (or we will time out globally) - case <-quitC: - return nil - } - // waiting for the chunk retrieval - select { // chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8])) - - case <-quitC: - // this is how we control process leakage (quitC is closed once join is finished (after timeout)) - return nil - case <-chunk.C: // bells are ringing, data have been delivered - } - if len(chunk.SData) == 0 { - return nil // chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8])) - - } - return chunk -} - -// Read keeps a cursor so cannot be called simulateously, see ReadAt -func (self *LazyChunkReader) Read(b []byte) (read int, err error) { - read, err = self.ReadAt(b, self.off) - - self.off += int64(read) - return -} - -// completely analogous to standard SectionReader implementation -var errWhence = errors.New("Seek: invalid whence") -var errOffset = errors.New("Seek: invalid offset") - -func (s *LazyChunkReader) Seek(offset int64, whence int) (int64, error) { - switch whence { - default: - return 0, errWhence - case 0: - offset += 0 - case 1: - offset += s.off - case 2: - if s.chunk == nil { //seek from the end requires rootchunk for size. call Size first - _, err := s.Size(nil) - if err != nil { - return 0, fmt.Errorf("can't get size: %v", err) - } - } - offset += s.chunk.Size - } - - if offset < 0 { - return 0, errOffset - } - s.off = offset - return offset, nil -} diff --git a/swarm/storage/chunker_test.go b/swarm/storage/chunker_test.go deleted file mode 100644 index 2dcbbe79caf9..000000000000 --- a/swarm/storage/chunker_test.go +++ /dev/null @@ -1,552 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package storage - -import ( - "bytes" - "crypto/rand" - "encoding/binary" - "errors" - "fmt" - "io" - "sync" - "testing" - "time" - - "github.com/XinFinOrg/XDPoSChain/crypto/sha3" -) - -/* -Tests TreeChunker by splitting and joining a random byte slice -*/ - -type test interface { - Fatalf(string, ...interface{}) - Logf(string, ...interface{}) -} - -type chunkerTester struct { - inputs map[uint64][]byte - chunks map[string]*Chunk - t test -} - -func (self *chunkerTester) Split(chunker Splitter, data io.Reader, size int64, chunkC chan *Chunk, swg *sync.WaitGroup, expectedError error) (key Key, err error) { - // reset - self.chunks = make(map[string]*Chunk) - - if self.inputs == nil { - self.inputs = make(map[uint64][]byte) - } - - quitC := make(chan bool) - timeout := time.After(600 * time.Second) - if chunkC != nil { - go func() error { - for { - select { - case <-timeout: - return errors.New("Split timeout error") - case <-quitC: - return nil - case chunk := <-chunkC: - // self.chunks = append(self.chunks, chunk) - self.chunks[chunk.Key.String()] = chunk - if chunk.wg != nil { - chunk.wg.Done() - } - } - - } - }() - } - - key, err = chunker.Split(data, size, chunkC, swg, nil) - if err != nil && expectedError == nil { - err = fmt.Errorf("Split error: %v", err) - } - - if chunkC != nil { - if swg != nil { - swg.Wait() - } - close(quitC) - } - return key, err -} - -func (self *chunkerTester) Append(chunker Splitter, rootKey Key, data io.Reader, chunkC chan *Chunk, swg *sync.WaitGroup, expectedError error) (key Key, err error) { - quitC := make(chan bool) - timeout := time.After(60 * time.Second) - if chunkC != nil { - go func() error { - for { - select { - case <-timeout: - return errors.New("Append timeout error") - case <-quitC: - return nil - case chunk := <-chunkC: - if chunk != nil { - stored, success := self.chunks[chunk.Key.String()] - if !success { - // Requesting data - self.chunks[chunk.Key.String()] = chunk - if chunk.wg != nil { - chunk.wg.Done() - } - } else { - // getting data - chunk.SData = stored.SData - chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8])) - close(chunk.C) - } - } - } - } - }() - } - - key, err = chunker.Append(rootKey, data, chunkC, swg, nil) - if err != nil && expectedError == nil { - err = fmt.Errorf("Append error: %v", err) - } - - if chunkC != nil { - if swg != nil { - swg.Wait() - } - close(quitC) - } - return key, err -} - -func (self *chunkerTester) Join(chunker Chunker, key Key, c int, chunkC chan *Chunk, quitC chan bool) LazySectionReader { - // reset but not the chunks - - reader := chunker.Join(key, chunkC) - - timeout := time.After(600 * time.Second) - i := 0 - go func() error { - for { - select { - case <-timeout: - return errors.New("Join timeout error") - case chunk, ok := <-chunkC: - if !ok { - close(quitC) - return nil - } - // this just mocks the behaviour of a chunk store retrieval - stored, success := self.chunks[chunk.Key.String()] - if !success { - return errors.New("Not found") - } - chunk.SData = stored.SData - chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8])) - close(chunk.C) - i++ - } - } - }() - return reader -} - -func testRandomBrokenData(splitter Splitter, n int, tester *chunkerTester) { - data := io.LimitReader(rand.Reader, int64(n)) - brokendata := brokenLimitReader(data, n, n/2) - - buf := make([]byte, n) - _, err := brokendata.Read(buf) - if err == nil || err.Error() != "Broken reader" { - tester.t.Fatalf("Broken reader is not broken, hence broken. Returns: %v", err) - } - - data = io.LimitReader(rand.Reader, int64(n)) - brokendata = brokenLimitReader(data, n, n/2) - - chunkC := make(chan *Chunk, 1000) - swg := &sync.WaitGroup{} - - expectedError := fmt.Errorf("Broken reader") - key, err := tester.Split(splitter, brokendata, int64(n), chunkC, swg, expectedError) - if err == nil || err.Error() != expectedError.Error() { - tester.t.Fatalf("Not receiving the correct error! Expected %v, received %v", expectedError, err) - } - tester.t.Logf(" Key = %v\n", key) -} - -func testRandomData(splitter Splitter, n int, tester *chunkerTester) Key { - if tester.inputs == nil { - tester.inputs = make(map[uint64][]byte) - } - input, found := tester.inputs[uint64(n)] - var data io.Reader - if !found { - data, input = testDataReaderAndSlice(n) - tester.inputs[uint64(n)] = input - } else { - data = io.LimitReader(bytes.NewReader(input), int64(n)) - } - - chunkC := make(chan *Chunk, 1000) - swg := &sync.WaitGroup{} - - key, err := tester.Split(splitter, data, int64(n), chunkC, swg, nil) - if err != nil { - tester.t.Fatalf(err.Error()) - } - tester.t.Logf(" Key = %v\n", key) - - chunkC = make(chan *Chunk, 1000) - quitC := make(chan bool) - - chunker := NewTreeChunker(NewChunkerParams()) - reader := tester.Join(chunker, key, 0, chunkC, quitC) - output := make([]byte, n) - r, err := reader.Read(output) - if r != n || err != io.EOF { - tester.t.Fatalf("read error read: %v n = %v err = %v\n", r, n, err) - } - if input != nil { - if !bytes.Equal(output, input) { - tester.t.Fatalf("input and output mismatch\n IN: %v\nOUT: %v\n", input, output) - } - } - close(chunkC) - <-quitC - - return key -} - -func testRandomDataAppend(splitter Splitter, n, m int, tester *chunkerTester) { - if tester.inputs == nil { - tester.inputs = make(map[uint64][]byte) - } - input, found := tester.inputs[uint64(n)] - var data io.Reader - if !found { - data, input = testDataReaderAndSlice(n) - tester.inputs[uint64(n)] = input - } else { - data = io.LimitReader(bytes.NewReader(input), int64(n)) - } - - chunkC := make(chan *Chunk, 1000) - swg := &sync.WaitGroup{} - - key, err := tester.Split(splitter, data, int64(n), chunkC, swg, nil) - if err != nil { - tester.t.Fatalf(err.Error()) - } - tester.t.Logf(" Key = %v\n", key) - - //create a append data stream - appendInput, found := tester.inputs[uint64(m)] - var appendData io.Reader - if !found { - appendData, appendInput = testDataReaderAndSlice(m) - tester.inputs[uint64(m)] = appendInput - } else { - appendData = io.LimitReader(bytes.NewReader(appendInput), int64(m)) - } - - chunkC = make(chan *Chunk, 1000) - swg = &sync.WaitGroup{} - - newKey, err := tester.Append(splitter, key, appendData, chunkC, swg, nil) - if err != nil { - tester.t.Fatalf(err.Error()) - } - tester.t.Logf(" NewKey = %v\n", newKey) - - chunkC = make(chan *Chunk, 1000) - quitC := make(chan bool) - - chunker := NewTreeChunker(NewChunkerParams()) - reader := tester.Join(chunker, newKey, 0, chunkC, quitC) - newOutput := make([]byte, n+m) - r, err := reader.Read(newOutput) - if r != (n + m) { - tester.t.Fatalf("read error read: %v n = %v err = %v\n", r, n, err) - } - - newInput := append(input, appendInput...) - if !bytes.Equal(newOutput, newInput) { - tester.t.Fatalf("input and output mismatch\n IN: %v\nOUT: %v\n", newInput, newOutput) - } - - close(chunkC) -} - -func TestSha3ForCorrectness(t *testing.T) { - tester := &chunkerTester{t: t} - - size := 4096 - input := make([]byte, size+8) - binary.LittleEndian.PutUint64(input[:8], uint64(size)) - - io.LimitReader(bytes.NewReader(input[8:]), int64(size)) - - rawSha3 := sha3.NewKeccak256() - rawSha3.Reset() - rawSha3.Write(input) - rawSha3Output := rawSha3.Sum(nil) - - sha3FromMakeFunc := MakeHashFunc(SHA3Hash)() - sha3FromMakeFunc.ResetWithLength(input[:8]) - sha3FromMakeFunc.Write(input[8:]) - sha3FromMakeFuncOutput := sha3FromMakeFunc.Sum(nil) - - if len(rawSha3Output) != len(sha3FromMakeFuncOutput) { - tester.t.Fatalf("Original SHA3 and abstracted Sha3 has different length %v:%v\n", len(rawSha3Output), len(sha3FromMakeFuncOutput)) - } - - if !bytes.Equal(rawSha3Output, sha3FromMakeFuncOutput) { - tester.t.Fatalf("Original SHA3 and abstracted Sha3 mismatch %v:%v\n", rawSha3Output, sha3FromMakeFuncOutput) - } - -} - -func TestDataAppend(t *testing.T) { - sizes := []int{1, 1, 1, 4095, 4096, 4097, 1, 1, 1, 123456, 2345678, 2345678} - appendSizes := []int{4095, 4096, 4097, 1, 1, 1, 8191, 8192, 8193, 9000, 3000, 5000} - - tester := &chunkerTester{t: t} - chunker := NewPyramidChunker(NewChunkerParams()) - for i, s := range sizes { - testRandomDataAppend(chunker, s, appendSizes[i], tester) - - } -} - -func TestRandomData(t *testing.T) { - sizes := []int{1, 60, 83, 179, 253, 1024, 4095, 4096, 4097, 8191, 8192, 8193, 12287, 12288, 12289, 123456, 2345678} - tester := &chunkerTester{t: t} - - chunker := NewTreeChunker(NewChunkerParams()) - pyramid := NewPyramidChunker(NewChunkerParams()) - for _, s := range sizes { - treeChunkerKey := testRandomData(chunker, s, tester) - pyramidChunkerKey := testRandomData(pyramid, s, tester) - if treeChunkerKey.String() != pyramidChunkerKey.String() { - tester.t.Fatalf("tree chunker and pyramid chunker key mismatch for size %v\n TC: %v\n PC: %v\n", s, treeChunkerKey.String(), pyramidChunkerKey.String()) - } - } - - cp := NewChunkerParams() - cp.Hash = BMTHash - chunker = NewTreeChunker(cp) - pyramid = NewPyramidChunker(cp) - for _, s := range sizes { - treeChunkerKey := testRandomData(chunker, s, tester) - pyramidChunkerKey := testRandomData(pyramid, s, tester) - if treeChunkerKey.String() != pyramidChunkerKey.String() { - tester.t.Fatalf("tree chunker BMT and pyramid chunker BMT key mismatch for size %v \n TC: %v\n PC: %v\n", s, treeChunkerKey.String(), pyramidChunkerKey.String()) - } - } - -} - -func XTestRandomBrokenData(t *testing.T) { - sizes := []int{1, 60, 83, 179, 253, 1024, 4095, 4096, 4097, 8191, 8192, 8193, 12287, 12288, 12289, 123456, 2345678} - tester := &chunkerTester{t: t} - chunker := NewTreeChunker(NewChunkerParams()) - for _, s := range sizes { - testRandomBrokenData(chunker, s, tester) - } -} - -func benchReadAll(reader LazySectionReader) { - size, _ := reader.Size(nil) - output := make([]byte, 1000) - for pos := int64(0); pos < size; pos += 1000 { - reader.ReadAt(output, pos) - } -} - -func benchmarkJoin(n int, t *testing.B) { - t.ReportAllocs() - for i := 0; i < t.N; i++ { - chunker := NewTreeChunker(NewChunkerParams()) - tester := &chunkerTester{t: t} - data := testDataReader(n) - - chunkC := make(chan *Chunk, 1000) - swg := &sync.WaitGroup{} - - key, err := tester.Split(chunker, data, int64(n), chunkC, swg, nil) - if err != nil { - tester.t.Fatalf(err.Error()) - } - chunkC = make(chan *Chunk, 1000) - quitC := make(chan bool) - reader := tester.Join(chunker, key, i, chunkC, quitC) - benchReadAll(reader) - close(chunkC) - <-quitC - } -} - -func benchmarkSplitTreeSHA3(n int, t *testing.B) { - t.ReportAllocs() - for i := 0; i < t.N; i++ { - chunker := NewTreeChunker(NewChunkerParams()) - tester := &chunkerTester{t: t} - data := testDataReader(n) - _, err := tester.Split(chunker, data, int64(n), nil, nil, nil) - if err != nil { - tester.t.Fatalf(err.Error()) - } - } -} - -func benchmarkSplitTreeBMT(n int, t *testing.B) { - t.ReportAllocs() - for i := 0; i < t.N; i++ { - cp := NewChunkerParams() - cp.Hash = BMTHash - chunker := NewTreeChunker(cp) - tester := &chunkerTester{t: t} - data := testDataReader(n) - _, err := tester.Split(chunker, data, int64(n), nil, nil, nil) - if err != nil { - tester.t.Fatalf(err.Error()) - } - } -} - -func benchmarkSplitPyramidSHA3(n int, t *testing.B) { - t.ReportAllocs() - for i := 0; i < t.N; i++ { - splitter := NewPyramidChunker(NewChunkerParams()) - tester := &chunkerTester{t: t} - data := testDataReader(n) - _, err := tester.Split(splitter, data, int64(n), nil, nil, nil) - if err != nil { - tester.t.Fatalf(err.Error()) - } - } -} - -func benchmarkSplitPyramidBMT(n int, t *testing.B) { - t.ReportAllocs() - for i := 0; i < t.N; i++ { - cp := NewChunkerParams() - cp.Hash = BMTHash - splitter := NewPyramidChunker(cp) - tester := &chunkerTester{t: t} - data := testDataReader(n) - _, err := tester.Split(splitter, data, int64(n), nil, nil, nil) - if err != nil { - tester.t.Fatalf(err.Error()) - } - } -} - -func benchmarkAppendPyramid(n, m int, t *testing.B) { - t.ReportAllocs() - for i := 0; i < t.N; i++ { - chunker := NewPyramidChunker(NewChunkerParams()) - tester := &chunkerTester{t: t} - data := testDataReader(n) - data1 := testDataReader(m) - - chunkC := make(chan *Chunk, 1000) - swg := &sync.WaitGroup{} - key, err := tester.Split(chunker, data, int64(n), chunkC, swg, nil) - if err != nil { - tester.t.Fatalf(err.Error()) - } - - chunkC = make(chan *Chunk, 1000) - swg = &sync.WaitGroup{} - - _, err = tester.Append(chunker, key, data1, chunkC, swg, nil) - if err != nil { - tester.t.Fatalf(err.Error()) - } - - close(chunkC) - } -} - -func BenchmarkJoin_2(t *testing.B) { benchmarkJoin(100, t) } -func BenchmarkJoin_3(t *testing.B) { benchmarkJoin(1000, t) } -func BenchmarkJoin_4(t *testing.B) { benchmarkJoin(10000, t) } -func BenchmarkJoin_5(t *testing.B) { benchmarkJoin(100000, t) } -func BenchmarkJoin_6(t *testing.B) { benchmarkJoin(1000000, t) } -func BenchmarkJoin_7(t *testing.B) { benchmarkJoin(10000000, t) } -func BenchmarkJoin_8(t *testing.B) { benchmarkJoin(100000000, t) } - -func BenchmarkSplitTreeSHA3_2(t *testing.B) { benchmarkSplitTreeSHA3(100, t) } -func BenchmarkSplitTreeSHA3_2h(t *testing.B) { benchmarkSplitTreeSHA3(500, t) } -func BenchmarkSplitTreeSHA3_3(t *testing.B) { benchmarkSplitTreeSHA3(1000, t) } -func BenchmarkSplitTreeSHA3_3h(t *testing.B) { benchmarkSplitTreeSHA3(5000, t) } -func BenchmarkSplitTreeSHA3_4(t *testing.B) { benchmarkSplitTreeSHA3(10000, t) } -func BenchmarkSplitTreeSHA3_4h(t *testing.B) { benchmarkSplitTreeSHA3(50000, t) } -func BenchmarkSplitTreeSHA3_5(t *testing.B) { benchmarkSplitTreeSHA3(100000, t) } -func BenchmarkSplitTreeSHA3_6(t *testing.B) { benchmarkSplitTreeSHA3(1000000, t) } -func BenchmarkSplitTreeSHA3_7(t *testing.B) { benchmarkSplitTreeSHA3(10000000, t) } -func BenchmarkSplitTreeSHA3_8(t *testing.B) { benchmarkSplitTreeSHA3(100000000, t) } - -func BenchmarkSplitTreeBMT_2(t *testing.B) { benchmarkSplitTreeBMT(100, t) } -func BenchmarkSplitTreeBMT_2h(t *testing.B) { benchmarkSplitTreeBMT(500, t) } -func BenchmarkSplitTreeBMT_3(t *testing.B) { benchmarkSplitTreeBMT(1000, t) } -func BenchmarkSplitTreeBMT_3h(t *testing.B) { benchmarkSplitTreeBMT(5000, t) } -func BenchmarkSplitTreeBMT_4(t *testing.B) { benchmarkSplitTreeBMT(10000, t) } -func BenchmarkSplitTreeBMT_4h(t *testing.B) { benchmarkSplitTreeBMT(50000, t) } -func BenchmarkSplitTreeBMT_5(t *testing.B) { benchmarkSplitTreeBMT(100000, t) } -func BenchmarkSplitTreeBMT_6(t *testing.B) { benchmarkSplitTreeBMT(1000000, t) } -func BenchmarkSplitTreeBMT_7(t *testing.B) { benchmarkSplitTreeBMT(10000000, t) } -func BenchmarkSplitTreeBMT_8(t *testing.B) { benchmarkSplitTreeBMT(100000000, t) } - -func BenchmarkSplitPyramidSHA3_2(t *testing.B) { benchmarkSplitPyramidSHA3(100, t) } -func BenchmarkSplitPyramidSHA3_2h(t *testing.B) { benchmarkSplitPyramidSHA3(500, t) } -func BenchmarkSplitPyramidSHA3_3(t *testing.B) { benchmarkSplitPyramidSHA3(1000, t) } -func BenchmarkSplitPyramidSHA3_3h(t *testing.B) { benchmarkSplitPyramidSHA3(5000, t) } -func BenchmarkSplitPyramidSHA3_4(t *testing.B) { benchmarkSplitPyramidSHA3(10000, t) } -func BenchmarkSplitPyramidSHA3_4h(t *testing.B) { benchmarkSplitPyramidSHA3(50000, t) } -func BenchmarkSplitPyramidSHA3_5(t *testing.B) { benchmarkSplitPyramidSHA3(100000, t) } -func BenchmarkSplitPyramidSHA3_6(t *testing.B) { benchmarkSplitPyramidSHA3(1000000, t) } -func BenchmarkSplitPyramidSHA3_7(t *testing.B) { benchmarkSplitPyramidSHA3(10000000, t) } -func BenchmarkSplitPyramidSHA3_8(t *testing.B) { benchmarkSplitPyramidSHA3(100000000, t) } - -func BenchmarkSplitPyramidBMT_2(t *testing.B) { benchmarkSplitPyramidBMT(100, t) } -func BenchmarkSplitPyramidBMT_2h(t *testing.B) { benchmarkSplitPyramidBMT(500, t) } -func BenchmarkSplitPyramidBMT_3(t *testing.B) { benchmarkSplitPyramidBMT(1000, t) } -func BenchmarkSplitPyramidBMT_3h(t *testing.B) { benchmarkSplitPyramidBMT(5000, t) } -func BenchmarkSplitPyramidBMT_4(t *testing.B) { benchmarkSplitPyramidBMT(10000, t) } -func BenchmarkSplitPyramidBMT_4h(t *testing.B) { benchmarkSplitPyramidBMT(50000, t) } -func BenchmarkSplitPyramidBMT_5(t *testing.B) { benchmarkSplitPyramidBMT(100000, t) } -func BenchmarkSplitPyramidBMT_6(t *testing.B) { benchmarkSplitPyramidBMT(1000000, t) } -func BenchmarkSplitPyramidBMT_7(t *testing.B) { benchmarkSplitPyramidBMT(10000000, t) } -func BenchmarkSplitPyramidBMT_8(t *testing.B) { benchmarkSplitPyramidBMT(100000000, t) } - -func BenchmarkAppendPyramid_2(t *testing.B) { benchmarkAppendPyramid(100, 1000, t) } -func BenchmarkAppendPyramid_2h(t *testing.B) { benchmarkAppendPyramid(500, 1000, t) } -func BenchmarkAppendPyramid_3(t *testing.B) { benchmarkAppendPyramid(1000, 1000, t) } -func BenchmarkAppendPyramid_4(t *testing.B) { benchmarkAppendPyramid(10000, 1000, t) } -func BenchmarkAppendPyramid_4h(t *testing.B) { benchmarkAppendPyramid(50000, 1000, t) } -func BenchmarkAppendPyramid_5(t *testing.B) { benchmarkAppendPyramid(1000000, 1000, t) } -func BenchmarkAppendPyramid_6(t *testing.B) { benchmarkAppendPyramid(1000000, 1000, t) } -func BenchmarkAppendPyramid_7(t *testing.B) { benchmarkAppendPyramid(10000000, 1000, t) } -func BenchmarkAppendPyramid_8(t *testing.B) { benchmarkAppendPyramid(100000000, 1000, t) } - -// go test -timeout 20m -cpu 4 -bench=./swarm/storage -run no -// If you dont add the timeout argument above .. the benchmark will timeout and dump diff --git a/swarm/storage/common_test.go b/swarm/storage/common_test.go deleted file mode 100644 index 6a3e6e06d0d7..000000000000 --- a/swarm/storage/common_test.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package storage - -import ( - "bytes" - "crypto/rand" - "fmt" - "io" - "sync" - "testing" - - "github.com/XinFinOrg/XDPoSChain/log" -) - -type brokenLimitedReader struct { - lr io.Reader - errAt int - off int - size int -} - -func brokenLimitReader(data io.Reader, size int, errAt int) *brokenLimitedReader { - return &brokenLimitedReader{ - lr: data, - errAt: errAt, - size: size, - } -} - -func testDataReader(l int) (r io.Reader) { - return io.LimitReader(rand.Reader, int64(l)) -} - -func (self *brokenLimitedReader) Read(buf []byte) (int, error) { - if self.off+len(buf) > self.errAt { - return 0, fmt.Errorf("Broken reader") - } - self.off += len(buf) - return self.lr.Read(buf) -} - -func testDataReaderAndSlice(l int) (r io.Reader, slice []byte) { - slice = make([]byte, l) - if _, err := rand.Read(slice); err != nil { - panic("rand error") - } - r = io.LimitReader(bytes.NewReader(slice), int64(l)) - return -} - -func testStore(m ChunkStore, l int64, branches int64, t *testing.T) { - - chunkC := make(chan *Chunk) - go func() { - for chunk := range chunkC { - m.Put(chunk) - if chunk.wg != nil { - chunk.wg.Done() - } - } - }() - chunker := NewTreeChunker(&ChunkerParams{ - Branches: branches, - Hash: SHA3Hash, - }) - swg := &sync.WaitGroup{} - key, _ := chunker.Split(rand.Reader, l, chunkC, swg, nil) - swg.Wait() - close(chunkC) - chunkC = make(chan *Chunk) - - quit := make(chan bool) - - go func() { - for ch := range chunkC { - go func(chunk *Chunk) { - storedChunk, err := m.Get(chunk.Key) - if err == notFound { - log.Trace(fmt.Sprintf("chunk '%v' not found", chunk.Key.Log())) - } else if err != nil { - log.Trace(fmt.Sprintf("error retrieving chunk %v: %v", chunk.Key.Log(), err)) - } else { - chunk.SData = storedChunk.SData - chunk.Size = storedChunk.Size - } - log.Trace(fmt.Sprintf("chunk '%v' not found", chunk.Key.Log())) - close(chunk.C) - }(ch) - } - close(quit) - }() - r := chunker.Join(key, chunkC) - - b := make([]byte, l) - n, err := r.ReadAt(b, 0) - if err != io.EOF { - t.Fatalf("read error (%v/%v) %v", n, l, err) - } - close(chunkC) - <-quit -} diff --git a/swarm/storage/database.go b/swarm/storage/database.go deleted file mode 100644 index 1340f6fc5d17..000000000000 --- a/swarm/storage/database.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package storage - -// this is a clone of an earlier state of the ethereum ethdb/database -// no need for queueing/caching - -import ( - "fmt" - - "github.com/XinFinOrg/XDPoSChain/compression/rle" - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" -) - -const openFileLimit = 128 - -type LDBDatabase struct { - db *leveldb.DB - comp bool -} - -func NewLDBDatabase(file string) (*LDBDatabase, error) { - // Open the db - db, err := leveldb.OpenFile(file, &opt.Options{OpenFilesCacheCapacity: openFileLimit}) - if err != nil { - return nil, err - } - - database := &LDBDatabase{db: db, comp: false} - - return database, nil -} - -func (self *LDBDatabase) Put(key []byte, value []byte) { - if self.comp { - value = rle.Compress(value) - } - - err := self.db.Put(key, value, nil) - if err != nil { - fmt.Println("Error put", err) - } -} - -func (self *LDBDatabase) Get(key []byte) ([]byte, error) { - dat, err := self.db.Get(key, nil) - if err != nil { - return nil, err - } - - if self.comp { - return rle.Decompress(dat) - } - - return dat, nil -} - -func (self *LDBDatabase) Delete(key []byte) error { - return self.db.Delete(key, nil) -} - -func (self *LDBDatabase) LastKnownTD() []byte { - data, _ := self.Get([]byte("LTD")) - - if len(data) == 0 { - data = []byte{0x0} - } - - return data -} - -func (self *LDBDatabase) NewIterator() iterator.Iterator { - return self.db.NewIterator(nil, nil) -} - -func (self *LDBDatabase) Write(batch *leveldb.Batch) error { - return self.db.Write(batch, nil) -} - -func (self *LDBDatabase) Close() { - // Close the leveldb database - self.db.Close() -} diff --git a/swarm/storage/dbstore.go b/swarm/storage/dbstore.go deleted file mode 100644 index caac05e1379c..000000000000 --- a/swarm/storage/dbstore.go +++ /dev/null @@ -1,601 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// disk storage layer for the package bzz -// DbStore implements the ChunkStore interface and is used by the DPA as -// persistent storage of chunks -// it implements purging based on access count allowing for external control of -// max capacity - -package storage - -import ( - "archive/tar" - "bytes" - "encoding/binary" - "encoding/hex" - "fmt" - "io" - "sync" - - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/metrics" - "github.com/XinFinOrg/XDPoSChain/rlp" - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/iterator" -) - -// metrics variables -var ( - gcCounter = metrics.NewRegisteredCounter("storage.db.dbstore.gc.count", nil) - dbStoreDeleteCounter = metrics.NewRegisteredCounter("storage.db.dbstore.rm.count", nil) -) - -const ( - defaultDbCapacity = 5000000 - defaultRadius = 0 // not yet used - - gcArraySize = 10000 - gcArrayFreeRatio = 0.1 - - // key prefixes for leveldb storage - kpIndex = 0 - kpData = 1 -) - -var ( - keyAccessCnt = []byte{2} - keyEntryCnt = []byte{3} - keyDataIdx = []byte{4} - keyGCPos = []byte{5} -) - -type gcItem struct { - idx uint64 - value uint64 - idxKey []byte -} - -type DbStore struct { - db *LDBDatabase - - // this should be stored in db, accessed transactionally - entryCnt, accessCnt, dataIdx, capacity uint64 - - gcPos, gcStartPos []byte - gcArray []*gcItem - - hashfunc SwarmHasher - - lock sync.Mutex -} - -func NewDbStore(path string, hash SwarmHasher, capacity uint64, radius int) (s *DbStore, err error) { - s = new(DbStore) - - s.hashfunc = hash - - s.db, err = NewLDBDatabase(path) - if err != nil { - return - } - - s.setCapacity(capacity) - - s.gcStartPos = make([]byte, 1) - s.gcStartPos[0] = kpIndex - s.gcArray = make([]*gcItem, gcArraySize) - - data, _ := s.db.Get(keyEntryCnt) - s.entryCnt = BytesToU64(data) - data, _ = s.db.Get(keyAccessCnt) - s.accessCnt = BytesToU64(data) - data, _ = s.db.Get(keyDataIdx) - s.dataIdx = BytesToU64(data) - s.gcPos, _ = s.db.Get(keyGCPos) - if s.gcPos == nil { - s.gcPos = s.gcStartPos - } - return -} - -type dpaDBIndex struct { - Idx uint64 - Access uint64 -} - -func BytesToU64(data []byte) uint64 { - if len(data) < 8 { - return 0 - } - return binary.LittleEndian.Uint64(data) -} - -func U64ToBytes(val uint64) []byte { - data := make([]byte, 8) - binary.LittleEndian.PutUint64(data, val) - return data -} - -func getIndexGCValue(index *dpaDBIndex) uint64 { - return index.Access -} - -func (s *DbStore) updateIndexAccess(index *dpaDBIndex) { - index.Access = s.accessCnt -} - -func getIndexKey(hash Key) []byte { - HashSize := len(hash) - key := make([]byte, HashSize+1) - key[0] = 0 - copy(key[1:], hash[:]) - return key -} - -func getDataKey(idx uint64) []byte { - key := make([]byte, 9) - key[0] = 1 - binary.BigEndian.PutUint64(key[1:9], idx) - - return key -} - -func encodeIndex(index *dpaDBIndex) []byte { - data, _ := rlp.EncodeToBytes(index) - return data -} - -func encodeData(chunk *Chunk) []byte { - return chunk.SData -} - -func decodeIndex(data []byte, index *dpaDBIndex) { - dec := rlp.NewStream(bytes.NewReader(data), 0) - dec.Decode(index) -} - -func decodeData(data []byte, chunk *Chunk) { - chunk.SData = data - chunk.Size = int64(binary.LittleEndian.Uint64(data[0:8])) -} - -func gcListPartition(list []*gcItem, left int, right int, pivotIndex int) int { - pivotValue := list[pivotIndex].value - dd := list[pivotIndex] - list[pivotIndex] = list[right] - list[right] = dd - storeIndex := left - for i := left; i < right; i++ { - if list[i].value < pivotValue { - dd = list[storeIndex] - list[storeIndex] = list[i] - list[i] = dd - storeIndex++ - } - } - dd = list[storeIndex] - list[storeIndex] = list[right] - list[right] = dd - return storeIndex -} - -func gcListSelect(list []*gcItem, left int, right int, n int) int { - if left == right { - return left - } - pivotIndex := (left + right) / 2 - pivotIndex = gcListPartition(list, left, right, pivotIndex) - if n == pivotIndex { - return n - } else { - if n < pivotIndex { - return gcListSelect(list, left, pivotIndex-1, n) - } else { - return gcListSelect(list, pivotIndex+1, right, n) - } - } -} - -func (s *DbStore) collectGarbage(ratio float32) { - it := s.db.NewIterator() - it.Seek(s.gcPos) - if it.Valid() { - s.gcPos = it.Key() - } else { - s.gcPos = nil - } - gcnt := 0 - - for (gcnt < gcArraySize) && (uint64(gcnt) < s.entryCnt) { - - if (s.gcPos == nil) || (s.gcPos[0] != kpIndex) { - it.Seek(s.gcStartPos) - if it.Valid() { - s.gcPos = it.Key() - } else { - s.gcPos = nil - } - } - - if (s.gcPos == nil) || (s.gcPos[0] != kpIndex) { - break - } - - gci := new(gcItem) - gci.idxKey = s.gcPos - var index dpaDBIndex - decodeIndex(it.Value(), &index) - gci.idx = index.Idx - // the smaller, the more likely to be gc'd - gci.value = getIndexGCValue(&index) - s.gcArray[gcnt] = gci - gcnt++ - it.Next() - if it.Valid() { - s.gcPos = it.Key() - } else { - s.gcPos = nil - } - } - it.Release() - - cutidx := gcListSelect(s.gcArray, 0, gcnt-1, int(float32(gcnt)*ratio)) - cutval := s.gcArray[cutidx].value - - // fmt.Print(gcnt, " ", s.entryCnt, " ") - - // actual gc - for i := 0; i < gcnt; i++ { - if s.gcArray[i].value <= cutval { - gcCounter.Inc(1) - s.delete(s.gcArray[i].idx, s.gcArray[i].idxKey) - } - } - - // fmt.Println(s.entryCnt) - - s.db.Put(keyGCPos, s.gcPos) -} - -// Export writes all chunks from the store to a tar archive, returning the -// number of chunks written. -func (s *DbStore) Export(out io.Writer) (int64, error) { - tw := tar.NewWriter(out) - defer tw.Close() - - it := s.db.NewIterator() - defer it.Release() - var count int64 - for ok := it.Seek([]byte{kpIndex}); ok; ok = it.Next() { - key := it.Key() - if (key == nil) || (key[0] != kpIndex) { - break - } - - var index dpaDBIndex - decodeIndex(it.Value(), &index) - - data, err := s.db.Get(getDataKey(index.Idx)) - if err != nil { - log.Warn(fmt.Sprintf("Chunk %x found but could not be accessed: %v", key[:], err)) - continue - } - - hdr := &tar.Header{ - Name: hex.EncodeToString(key[1:]), - Mode: 0644, - Size: int64(len(data)), - } - if err := tw.WriteHeader(hdr); err != nil { - return count, err - } - if _, err := tw.Write(data); err != nil { - return count, err - } - count++ - } - - return count, nil -} - -// Import reads chunks into the store from a tar archive, returning the number -// of chunks read. -func (s *DbStore) Import(in io.Reader) (int64, error) { - tr := tar.NewReader(in) - - var count int64 - for { - hdr, err := tr.Next() - if err == io.EOF { - break - } else if err != nil { - return count, err - } - - if len(hdr.Name) != 64 { - log.Warn("ignoring non-chunk file", "name", hdr.Name) - continue - } - - key, err := hex.DecodeString(hdr.Name) - if err != nil { - log.Warn("ignoring invalid chunk file", "name", hdr.Name, "err", err) - continue - } - - data, err := io.ReadAll(tr) - if err != nil { - return count, err - } - - s.Put(&Chunk{Key: key, SData: data}) - count++ - } - - return count, nil -} - -func (s *DbStore) Cleanup() { - //Iterates over the database and checks that there are no faulty chunks - it := s.db.NewIterator() - startPosition := []byte{kpIndex} - it.Seek(startPosition) - var key []byte - var errorsFound, total int - for it.Valid() { - key = it.Key() - if (key == nil) || (key[0] != kpIndex) { - break - } - total++ - var index dpaDBIndex - decodeIndex(it.Value(), &index) - - data, err := s.db.Get(getDataKey(index.Idx)) - if err != nil { - log.Warn(fmt.Sprintf("Chunk %x found but could not be accessed: %v", key[:], err)) - s.delete(index.Idx, getIndexKey(key[1:])) - errorsFound++ - } else { - hasher := s.hashfunc() - hasher.Write(data) - hash := hasher.Sum(nil) - if !bytes.Equal(hash, key[1:]) { - log.Warn(fmt.Sprintf("Found invalid chunk. Hash mismatch. hash=%x, key=%x", hash, key[:])) - s.delete(index.Idx, getIndexKey(key[1:])) - errorsFound++ - } - } - it.Next() - } - it.Release() - log.Warn(fmt.Sprintf("Found %v errors out of %v entries", errorsFound, total)) -} - -func (s *DbStore) delete(idx uint64, idxKey []byte) { - batch := new(leveldb.Batch) - batch.Delete(idxKey) - batch.Delete(getDataKey(idx)) - dbStoreDeleteCounter.Inc(1) - s.entryCnt-- - batch.Put(keyEntryCnt, U64ToBytes(s.entryCnt)) - s.db.Write(batch) -} - -func (s *DbStore) Counter() uint64 { - s.lock.Lock() - defer s.lock.Unlock() - return s.dataIdx -} - -func (s *DbStore) Put(chunk *Chunk) { - s.lock.Lock() - defer s.lock.Unlock() - - ikey := getIndexKey(chunk.Key) - var index dpaDBIndex - - if s.tryAccessIdx(ikey, &index) { - if chunk.dbStored != nil { - close(chunk.dbStored) - } - log.Trace(fmt.Sprintf("Storing to DB: chunk already exists, only update access")) - return // already exists, only update access - } - - data := encodeData(chunk) - //data := ethutil.Encode([]interface{}{entry}) - - if s.entryCnt >= s.capacity { - s.collectGarbage(gcArrayFreeRatio) - } - - batch := new(leveldb.Batch) - - batch.Put(getDataKey(s.dataIdx), data) - - index.Idx = s.dataIdx - s.updateIndexAccess(&index) - - idata := encodeIndex(&index) - batch.Put(ikey, idata) - - batch.Put(keyEntryCnt, U64ToBytes(s.entryCnt)) - s.entryCnt++ - batch.Put(keyDataIdx, U64ToBytes(s.dataIdx)) - s.dataIdx++ - batch.Put(keyAccessCnt, U64ToBytes(s.accessCnt)) - s.accessCnt++ - - s.db.Write(batch) - if chunk.dbStored != nil { - close(chunk.dbStored) - } - log.Trace(fmt.Sprintf("DbStore.Put: %v. db storage counter: %v ", chunk.Key.Log(), s.dataIdx)) -} - -// try to find index; if found, update access cnt and return true -func (s *DbStore) tryAccessIdx(ikey []byte, index *dpaDBIndex) bool { - idata, err := s.db.Get(ikey) - if err != nil { - return false - } - decodeIndex(idata, index) - - batch := new(leveldb.Batch) - - batch.Put(keyAccessCnt, U64ToBytes(s.accessCnt)) - s.accessCnt++ - s.updateIndexAccess(index) - idata = encodeIndex(index) - batch.Put(ikey, idata) - - s.db.Write(batch) - - return true -} - -func (s *DbStore) Get(key Key) (chunk *Chunk, err error) { - s.lock.Lock() - defer s.lock.Unlock() - - var index dpaDBIndex - - if s.tryAccessIdx(getIndexKey(key), &index) { - var data []byte - data, err = s.db.Get(getDataKey(index.Idx)) - if err != nil { - log.Trace(fmt.Sprintf("DBStore: Chunk %v found but could not be accessed: %v", key.Log(), err)) - s.delete(index.Idx, getIndexKey(key)) - return - } - - hasher := s.hashfunc() - hasher.Write(data) - hash := hasher.Sum(nil) - if !bytes.Equal(hash, key) { - s.delete(index.Idx, getIndexKey(key)) - log.Warn("Invalid Chunk in Database. Please repair with command: 'swarm cleandb'") - } - - chunk = &Chunk{ - Key: key, - } - decodeData(data, chunk) - } else { - err = notFound - } - - return - -} - -func (s *DbStore) updateAccessCnt(key Key) { - - s.lock.Lock() - defer s.lock.Unlock() - - var index dpaDBIndex - s.tryAccessIdx(getIndexKey(key), &index) // result_chn == nil, only update access cnt - -} - -func (s *DbStore) setCapacity(c uint64) { - - s.lock.Lock() - defer s.lock.Unlock() - - s.capacity = c - - if s.entryCnt > c { - ratio := float32(1.01) - float32(c)/float32(s.entryCnt) - if ratio < gcArrayFreeRatio { - ratio = gcArrayFreeRatio - } - if ratio > 1 { - ratio = 1 - } - for s.entryCnt > c { - s.collectGarbage(ratio) - } - } -} - -func (s *DbStore) Close() { - s.db.Close() -} - -// describes a section of the DbStore representing the unsynced -// -// domain relevant to a peer -// Start - Stop designate a continuous area Keys in an address space -// typically the addresses closer to us than to the peer but not closer -// another closer peer in between -// From - To designates a time interval typically from the last disconnect -// till the latest connection (real time traffic is relayed) -type DbSyncState struct { - Start, Stop Key - First, Last uint64 -} - -// implements the syncer iterator interface -// iterates by storage index (~ time of storage = first entry to db) -type dbSyncIterator struct { - it iterator.Iterator - DbSyncState -} - -// initialises a sync iterator from a syncToken (passed in with the handshake) -func (self *DbStore) NewSyncIterator(state DbSyncState) (si *dbSyncIterator, err error) { - if state.First > state.Last { - return nil, fmt.Errorf("no entries found") - } - si = &dbSyncIterator{ - it: self.db.NewIterator(), - DbSyncState: state, - } - si.it.Seek(getIndexKey(state.Start)) - return si, nil -} - -// walk the area from Start to Stop and returns items within time interval -// First to Last -func (self *dbSyncIterator) Next() (key Key) { - for self.it.Valid() { - dbkey := self.it.Key() - if dbkey[0] != 0 { - break - } - key = Key(make([]byte, len(dbkey)-1)) - copy(key[:], dbkey[1:]) - if bytes.Compare(key[:], self.Start) <= 0 { - self.it.Next() - continue - } - if bytes.Compare(key[:], self.Stop) > 0 { - break - } - var index dpaDBIndex - decodeIndex(self.it.Value(), &index) - self.it.Next() - if (index.Idx >= self.First) && (index.Idx < self.Last) { - return - } - } - self.it.Release() - return nil -} diff --git a/swarm/storage/dbstore_test.go b/swarm/storage/dbstore_test.go deleted file mode 100644 index c2c807f2d35b..000000000000 --- a/swarm/storage/dbstore_test.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package storage - -import ( - "bytes" - "os" - "testing" - - "github.com/XinFinOrg/XDPoSChain/common" -) - -func initDbStore(t *testing.T) *DbStore { - dir, err := os.MkdirTemp("", "bzz-storage-test") - if err != nil { - t.Fatal(err) - } - m, err := NewDbStore(dir, MakeHashFunc(SHA3Hash), defaultDbCapacity, defaultRadius) - if err != nil { - t.Fatal("can't create store:", err) - } - return m -} - -func testDbStore(l int64, branches int64, t *testing.T) { - m := initDbStore(t) - defer m.Close() - testStore(m, l, branches, t) -} - -func TestDbStore128_0x1000000(t *testing.T) { - testDbStore(0x1000000, 128, t) -} - -func TestDbStore128_10000_(t *testing.T) { - testDbStore(10000, 128, t) -} - -func TestDbStore128_1000_(t *testing.T) { - testDbStore(1000, 128, t) -} - -func TestDbStore128_100_(t *testing.T) { - testDbStore(100, 128, t) -} - -func TestDbStore2_100_(t *testing.T) { - testDbStore(100, 2, t) -} - -func TestDbStoreNotFound(t *testing.T) { - m := initDbStore(t) - defer m.Close() - _, err := m.Get(ZeroKey) - if err != notFound { - t.Errorf("Expected notFound, got %v", err) - } -} - -func TestDbStoreSyncIterator(t *testing.T) { - m := initDbStore(t) - defer m.Close() - keys := []Key{ - Key(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")), - Key(common.Hex2Bytes("4000000000000000000000000000000000000000000000000000000000000000")), - Key(common.Hex2Bytes("5000000000000000000000000000000000000000000000000000000000000000")), - Key(common.Hex2Bytes("3000000000000000000000000000000000000000000000000000000000000000")), - Key(common.Hex2Bytes("2000000000000000000000000000000000000000000000000000000000000000")), - Key(common.Hex2Bytes("1000000000000000000000000000000000000000000000000000000000000000")), - } - for _, key := range keys { - m.Put(NewChunk(key, nil)) - } - it, err := m.NewSyncIterator(DbSyncState{ - Start: Key(common.Hex2Bytes("1000000000000000000000000000000000000000000000000000000000000000")), - Stop: Key(common.Hex2Bytes("4000000000000000000000000000000000000000000000000000000000000000")), - First: 2, - Last: 4, - }) - if err != nil { - t.Fatalf("unexpected error creating NewSyncIterator") - } - - var chunk Key - var res []Key - for { - chunk = it.Next() - if chunk == nil { - break - } - res = append(res, chunk) - } - if len(res) != 1 { - t.Fatalf("Expected 1 chunk, got %v: %v", len(res), res) - } - if !bytes.Equal(res[0][:], keys[3]) { - t.Fatalf("Expected %v chunk, got %v", keys[3], res[0]) - } - - if err != nil { - t.Fatalf("unexpected error creating NewSyncIterator") - } - - it, err = m.NewSyncIterator(DbSyncState{ - Start: Key(common.Hex2Bytes("1000000000000000000000000000000000000000000000000000000000000000")), - Stop: Key(common.Hex2Bytes("5000000000000000000000000000000000000000000000000000000000000000")), - First: 2, - Last: 4, - }) - - res = nil - for { - chunk = it.Next() - if chunk == nil { - break - } - res = append(res, chunk) - } - if len(res) != 2 { - t.Fatalf("Expected 2 chunk, got %v: %v", len(res), res) - } - if !bytes.Equal(res[0][:], keys[3]) { - t.Fatalf("Expected %v chunk, got %v", keys[3], res[0]) - } - if !bytes.Equal(res[1][:], keys[2]) { - t.Fatalf("Expected %v chunk, got %v", keys[2], res[1]) - } - - if err != nil { - t.Fatalf("unexpected error creating NewSyncIterator") - } - - it, _ = m.NewSyncIterator(DbSyncState{ - Start: Key(common.Hex2Bytes("1000000000000000000000000000000000000000000000000000000000000000")), - Stop: Key(common.Hex2Bytes("4000000000000000000000000000000000000000000000000000000000000000")), - First: 2, - Last: 5, - }) - res = nil - for { - chunk = it.Next() - if chunk == nil { - break - } - res = append(res, chunk) - } - if len(res) != 2 { - t.Fatalf("Expected 2 chunk, got %v", len(res)) - } - if !bytes.Equal(res[0][:], keys[4]) { - t.Fatalf("Expected %v chunk, got %v", keys[4], res[0]) - } - if !bytes.Equal(res[1][:], keys[3]) { - t.Fatalf("Expected %v chunk, got %v", keys[3], res[1]) - } - - it, _ = m.NewSyncIterator(DbSyncState{ - Start: Key(common.Hex2Bytes("2000000000000000000000000000000000000000000000000000000000000000")), - Stop: Key(common.Hex2Bytes("4000000000000000000000000000000000000000000000000000000000000000")), - First: 2, - Last: 5, - }) - res = nil - for { - chunk = it.Next() - if chunk == nil { - break - } - res = append(res, chunk) - } - if len(res) != 1 { - t.Fatalf("Expected 1 chunk, got %v", len(res)) - } - if !bytes.Equal(res[0][:], keys[3]) { - t.Fatalf("Expected %v chunk, got %v", keys[3], res[0]) - } -} diff --git a/swarm/storage/dpa.go b/swarm/storage/dpa.go deleted file mode 100644 index 1b67d78e64fd..000000000000 --- a/swarm/storage/dpa.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package storage - -import ( - "errors" - "fmt" - "io" - "sync" - "time" - - "github.com/XinFinOrg/XDPoSChain/log" -) - -/* -DPA provides the client API entrypoints Store and Retrieve to store and retrieve -It can store anything that has a byte slice representation, so files or serialised objects etc. - -Storage: DPA calls the Chunker to segment the input datastream of any size to a merkle hashed tree of chunks. The key of the root block is returned to the client. - -Retrieval: given the key of the root block, the DPA retrieves the block chunks and reconstructs the original data and passes it back as a lazy reader. A lazy reader is a reader with on-demand delayed processing, i.e. the chunks needed to reconstruct a large file are only fetched and processed if that particular part of the document is actually read. - -As the chunker produces chunks, DPA dispatches them to its own chunk store -implementation for storage or retrieval. -*/ - -const ( - storeChanCapacity = 100 - retrieveChanCapacity = 100 - singletonSwarmDbCapacity = 50000 - singletonSwarmCacheCapacity = 500 - maxStoreProcesses = 8 - maxRetrieveProcesses = 8 -) - -var ( - notFound = errors.New("not found") -) - -type DPA struct { - ChunkStore - storeC chan *Chunk - retrieveC chan *Chunk - Chunker Chunker - - lock sync.Mutex - running bool - quitC chan bool -} - -// for testing locally -func NewLocalDPA(datadir string) (*DPA, error) { - - hash := MakeHashFunc("SHA256") - - dbStore, err := NewDbStore(datadir, hash, singletonSwarmDbCapacity, 0) - if err != nil { - return nil, err - } - - return NewDPA(&LocalStore{ - NewMemStore(dbStore, singletonSwarmCacheCapacity), - dbStore, - }, NewChunkerParams()), nil -} - -func NewDPA(store ChunkStore, params *ChunkerParams) *DPA { - chunker := NewTreeChunker(params) - return &DPA{ - Chunker: chunker, - ChunkStore: store, - } -} - -// Public API. Main entry point for document retrieval directly. Used by the -// FS-aware API and httpaccess -// Chunk retrieval blocks on netStore requests with a timeout so reader will -// report error if retrieval of chunks within requested range time out. -func (self *DPA) Retrieve(key Key) LazySectionReader { - return self.Chunker.Join(key, self.retrieveC) -} - -// Public API. Main entry point for document storage directly. Used by the -// FS-aware API and httpaccess -func (self *DPA) Store(data io.Reader, size int64, swg *sync.WaitGroup, wwg *sync.WaitGroup) (key Key, err error) { - return self.Chunker.Split(data, size, self.storeC, swg, wwg) -} - -func (self *DPA) Start() { - self.lock.Lock() - defer self.lock.Unlock() - if self.running { - return - } - self.running = true - self.retrieveC = make(chan *Chunk, retrieveChanCapacity) - self.storeC = make(chan *Chunk, storeChanCapacity) - self.quitC = make(chan bool) - self.storeLoop() - self.retrieveLoop() -} - -func (self *DPA) Stop() { - self.lock.Lock() - defer self.lock.Unlock() - if !self.running { - return - } - self.running = false - close(self.quitC) -} - -// retrieveLoop dispatches the parallel chunk retrieval requests received on the -// retrieve channel to its ChunkStore (NetStore or LocalStore) -func (self *DPA) retrieveLoop() { - for i := 0; i < maxRetrieveProcesses; i++ { - go self.retrieveWorker() - } - log.Trace(fmt.Sprintf("dpa: retrieve loop spawning %v workers", maxRetrieveProcesses)) -} - -func (self *DPA) retrieveWorker() { - for chunk := range self.retrieveC { - log.Trace(fmt.Sprintf("dpa: retrieve loop : chunk %v", chunk.Key.Log())) - storedChunk, err := self.Get(chunk.Key) - if err == notFound { - log.Trace(fmt.Sprintf("chunk %v not found", chunk.Key.Log())) - } else if err != nil { - log.Trace(fmt.Sprintf("error retrieving chunk %v: %v", chunk.Key.Log(), err)) - } else { - chunk.SData = storedChunk.SData - chunk.Size = storedChunk.Size - } - close(chunk.C) - - select { - case <-self.quitC: - return - default: - } - } -} - -// storeLoop dispatches the parallel chunk store request processors -// received on the store channel to its ChunkStore (NetStore or LocalStore) -func (self *DPA) storeLoop() { - for i := 0; i < maxStoreProcesses; i++ { - go self.storeWorker() - } - log.Trace(fmt.Sprintf("dpa: store spawning %v workers", maxStoreProcesses)) -} - -func (self *DPA) storeWorker() { - - for chunk := range self.storeC { - self.Put(chunk) - if chunk.wg != nil { - log.Trace(fmt.Sprintf("dpa: store processor %v", chunk.Key.Log())) - chunk.wg.Done() - - } - select { - case <-self.quitC: - return - default: - } - } -} - -// DpaChunkStore implements the ChunkStore interface, -// this chunk access layer assumed 2 chunk stores -// local storage eg. LocalStore and network storage eg., NetStore -// access by calling network is blocking with a timeout - -type dpaChunkStore struct { - n int - localStore ChunkStore - netStore ChunkStore -} - -func NewDpaChunkStore(localStore, netStore ChunkStore) *dpaChunkStore { - return &dpaChunkStore{0, localStore, netStore} -} - -// Get is the entrypoint for local retrieve requests -// waits for response or times out -func (self *dpaChunkStore) Get(key Key) (chunk *Chunk, err error) { - chunk, err = self.netStore.Get(key) - // timeout := time.Now().Add(searchTimeout) - if chunk.SData != nil { - log.Trace(fmt.Sprintf("DPA.Get: %v found locally, %d bytes", key.Log(), len(chunk.SData))) - return - } - // TODO: use self.timer time.Timer and reset with defer disableTimer - timer := time.After(searchTimeout) - select { - case <-timer: - log.Trace(fmt.Sprintf("DPA.Get: %v request time out ", key.Log())) - err = notFound - case <-chunk.Req.C: - log.Trace(fmt.Sprintf("DPA.Get: %v retrieved, %d bytes (%p)", key.Log(), len(chunk.SData), chunk)) - } - return -} - -// Put is the entrypoint for local store requests coming from storeLoop -func (self *dpaChunkStore) Put(entry *Chunk) { - chunk, err := self.localStore.Get(entry.Key) - if err != nil { - log.Trace(fmt.Sprintf("DPA.Put: %v new chunk. call netStore.Put", entry.Key.Log())) - chunk = entry - } else if chunk.SData == nil { - log.Trace(fmt.Sprintf("DPA.Put: %v request entry found", entry.Key.Log())) - chunk.SData = entry.SData - chunk.Size = entry.Size - } else { - log.Trace(fmt.Sprintf("DPA.Put: %v chunk already known", entry.Key.Log())) - return - } - // from this point on the storage logic is the same with network storage requests - log.Trace(fmt.Sprintf("DPA.Put %v: %v", self.n, chunk.Key.Log())) - self.n++ - self.netStore.Put(chunk) -} - -// Close chunk store -func (self *dpaChunkStore) Close() {} diff --git a/swarm/storage/dpa_test.go b/swarm/storage/dpa_test.go deleted file mode 100644 index c24488f46b54..000000000000 --- a/swarm/storage/dpa_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package storage - -import ( - "bytes" - "io" - "os" - "sync" - "testing" -) - -const testDataSize = 0x1000000 - -func TestDPArandom(t *testing.T) { - dbStore := initDbStore(t) - dbStore.setCapacity(50000) - memStore := NewMemStore(dbStore, defaultCacheCapacity) - localStore := &LocalStore{ - memStore, - dbStore, - } - chunker := NewTreeChunker(NewChunkerParams()) - dpa := &DPA{ - Chunker: chunker, - ChunkStore: localStore, - } - dpa.Start() - defer dpa.Stop() - defer os.RemoveAll("/tmp/bzz") - - reader, slice := testDataReaderAndSlice(testDataSize) - wg := &sync.WaitGroup{} - key, err := dpa.Store(reader, testDataSize, wg, nil) - if err != nil { - t.Errorf("Store error: %v", err) - } - wg.Wait() - resultReader := dpa.Retrieve(key) - resultSlice := make([]byte, len(slice)) - n, err := resultReader.ReadAt(resultSlice, 0) - if err != io.EOF { - t.Errorf("Retrieve error: %v", err) - } - if n != len(slice) { - t.Errorf("Slice size error got %d, expected %d.", n, len(slice)) - } - if !bytes.Equal(slice, resultSlice) { - t.Errorf("Comparison error.") - } - os.WriteFile("/tmp/slice.bzz.16M", slice, 0666) - os.WriteFile("/tmp/result.bzz.16M", resultSlice, 0666) - localStore.memStore = NewMemStore(dbStore, defaultCacheCapacity) - resultReader = dpa.Retrieve(key) - for i := range resultSlice { - resultSlice[i] = 0 - } - n, err = resultReader.ReadAt(resultSlice, 0) - if err != io.EOF { - t.Errorf("Retrieve error after removing memStore: %v", err) - } - if n != len(slice) { - t.Errorf("Slice size error after removing memStore got %d, expected %d.", n, len(slice)) - } - if !bytes.Equal(slice, resultSlice) { - t.Errorf("Comparison error after removing memStore.") - } -} - -func TestDPA_capacity(t *testing.T) { - dbStore := initDbStore(t) - memStore := NewMemStore(dbStore, defaultCacheCapacity) - localStore := &LocalStore{ - memStore, - dbStore, - } - memStore.setCapacity(0) - chunker := NewTreeChunker(NewChunkerParams()) - dpa := &DPA{ - Chunker: chunker, - ChunkStore: localStore, - } - dpa.Start() - reader, slice := testDataReaderAndSlice(testDataSize) - wg := &sync.WaitGroup{} - key, err := dpa.Store(reader, testDataSize, wg, nil) - if err != nil { - t.Errorf("Store error: %v", err) - } - wg.Wait() - resultReader := dpa.Retrieve(key) - resultSlice := make([]byte, len(slice)) - n, err := resultReader.ReadAt(resultSlice, 0) - if err != io.EOF { - t.Errorf("Retrieve error: %v", err) - } - if n != len(slice) { - t.Errorf("Slice size error got %d, expected %d.", n, len(slice)) - } - if !bytes.Equal(slice, resultSlice) { - t.Errorf("Comparison error.") - } - // Clear memStore - memStore.setCapacity(0) - // check whether it is, indeed, empty - dpa.ChunkStore = memStore - resultReader = dpa.Retrieve(key) - if _, err = resultReader.ReadAt(resultSlice, 0); err == nil { - t.Errorf("Was able to read %d bytes from an empty memStore.", len(slice)) - } - // check how it works with localStore - dpa.ChunkStore = localStore - // localStore.dbStore.setCapacity(0) - resultReader = dpa.Retrieve(key) - for i := range resultSlice { - resultSlice[i] = 0 - } - n, err = resultReader.ReadAt(resultSlice, 0) - if err != io.EOF { - t.Errorf("Retrieve error after clearing memStore: %v", err) - } - if n != len(slice) { - t.Errorf("Slice size error after clearing memStore got %d, expected %d.", n, len(slice)) - } - if !bytes.Equal(slice, resultSlice) { - t.Errorf("Comparison error after clearing memStore.") - } -} diff --git a/swarm/storage/localstore.go b/swarm/storage/localstore.go deleted file mode 100644 index fa9a32d98a88..000000000000 --- a/swarm/storage/localstore.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package storage - -import ( - "encoding/binary" - - "github.com/XinFinOrg/XDPoSChain/metrics" -) - -//metrics variables -var ( - dbStorePutCounter = metrics.NewRegisteredCounter("storage.db.dbstore.put.count", nil) -) - -// LocalStore is a combination of inmemory db over a disk persisted db -// implements a Get/Put with fallback (caching) logic using any 2 ChunkStores -type LocalStore struct { - memStore ChunkStore - DbStore ChunkStore -} - -// This constructor uses MemStore and DbStore as components -func NewLocalStore(hash SwarmHasher, params *StoreParams) (*LocalStore, error) { - dbStore, err := NewDbStore(params.ChunkDbPath, hash, params.DbCapacity, params.Radius) - if err != nil { - return nil, err - } - return &LocalStore{ - memStore: NewMemStore(dbStore, params.CacheCapacity), - DbStore: dbStore, - }, nil -} - -func (self *LocalStore) CacheCounter() uint64 { - return uint64(self.memStore.(*MemStore).Counter()) -} - -func (self *LocalStore) DbCounter() uint64 { - return self.DbStore.(*DbStore).Counter() -} - -// LocalStore is itself a chunk store -// unsafe, in that the data is not integrity checked -func (self *LocalStore) Put(chunk *Chunk) { - chunk.dbStored = make(chan bool) - self.memStore.Put(chunk) - if chunk.wg != nil { - chunk.wg.Add(1) - } - go func() { - dbStorePutCounter.Inc(1) - self.DbStore.Put(chunk) - if chunk.wg != nil { - chunk.wg.Done() - } - }() -} - -// Get(chunk *Chunk) looks up a chunk in the local stores -// This method is blocking until the chunk is retrieved -// so additional timeout may be needed to wrap this call if -// ChunkStores are remote and can have long latency -func (self *LocalStore) Get(key Key) (chunk *Chunk, err error) { - chunk, err = self.memStore.Get(key) - if err == nil { - return - } - chunk, err = self.DbStore.Get(key) - if err != nil { - return - } - chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8])) - self.memStore.Put(chunk) - return -} - -// Close local store -func (self *LocalStore) Close() {} diff --git a/swarm/storage/memstore.go b/swarm/storage/memstore.go deleted file mode 100644 index b2ab3484764b..000000000000 --- a/swarm/storage/memstore.go +++ /dev/null @@ -1,334 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// memory storage layer for the package blockhash - -package storage - -import ( - "fmt" - "sync" - - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/metrics" -) - -//metrics variables -var ( - memstorePutCounter = metrics.NewRegisteredCounter("storage.db.memstore.put.count", nil) - memstoreRemoveCounter = metrics.NewRegisteredCounter("storage.db.memstore.rm.count", nil) -) - -const ( - memTreeLW = 2 // log2(subtree count) of the subtrees - memTreeFLW = 14 // log2(subtree count) of the root layer - dbForceUpdateAccessCnt = 1000 - defaultCacheCapacity = 5000 -) - -type MemStore struct { - memtree *memTree - entryCnt, capacity uint // stored entries - accessCnt uint64 // access counter; oldest is thrown away when full - dbAccessCnt uint64 - dbStore *DbStore - lock sync.Mutex -} - -/* -a hash prefix subtree containing subtrees or one storage entry (but never both) - -- access[0] stores the smallest (oldest) access count value in this subtree -- if it contains more subtrees and its subtree count is at least 4, access[1:2] - stores the smallest access count in the first and second halves of subtrees - (so that access[0] = min(access[1], access[2]) -- likewise, if subtree count is at least 8, - access[1] = min(access[3], access[4]) - access[2] = min(access[5], access[6]) - (access[] is a binary tree inside the multi-bit leveled hash tree) -*/ - -func NewMemStore(d *DbStore, capacity uint) (m *MemStore) { - m = &MemStore{} - m.memtree = newMemTree(memTreeFLW, nil, 0) - m.dbStore = d - m.setCapacity(capacity) - return -} - -type memTree struct { - subtree []*memTree - parent *memTree - parentIdx uint - - bits uint // log2(subtree count) - width uint // subtree count - - entry *Chunk // if subtrees are present, entry should be nil - lastDBaccess uint64 - access []uint64 -} - -func newMemTree(b uint, parent *memTree, pidx uint) (node *memTree) { - node = new(memTree) - node.bits = b - node.width = 1 << b - node.subtree = make([]*memTree, node.width) - node.access = make([]uint64, node.width-1) - node.parent = parent - node.parentIdx = pidx - if parent != nil { - parent.subtree[pidx] = node - } - - return node -} - -func (node *memTree) updateAccess(a uint64) { - aidx := uint(0) - var aa uint64 - oa := node.access[0] - for node.access[aidx] == oa { - node.access[aidx] = a - if aidx > 0 { - aa = node.access[((aidx-1)^1)+1] - aidx = (aidx - 1) >> 1 - } else { - pidx := node.parentIdx - node = node.parent - if node == nil { - return - } - nn := node.subtree[pidx^1] - if nn != nil { - aa = nn.access[0] - } else { - aa = 0 - } - aidx = (node.width + pidx - 2) >> 1 - } - - if (aa != 0) && (aa < a) { - a = aa - } - } -} - -func (s *MemStore) setCapacity(c uint) { - s.lock.Lock() - defer s.lock.Unlock() - - for c < s.entryCnt { - s.removeOldest() - } - s.capacity = c -} - -func (s *MemStore) Counter() uint { - return s.entryCnt -} - -// entry (not its copy) is going to be in MemStore -func (s *MemStore) Put(entry *Chunk) { - if s.capacity == 0 { - return - } - - s.lock.Lock() - defer s.lock.Unlock() - - if s.entryCnt >= s.capacity { - s.removeOldest() - } - - s.accessCnt++ - - memstorePutCounter.Inc(1) - - node := s.memtree - bitpos := uint(0) - for node.entry == nil { - l := entry.Key.bits(bitpos, node.bits) - st := node.subtree[l] - if st == nil { - st = newMemTree(memTreeLW, node, l) - bitpos += node.bits - node = st - break - } - bitpos += node.bits - node = st - } - - if node.entry != nil { - - if node.entry.Key.isEqual(entry.Key) { - node.updateAccess(s.accessCnt) - if entry.SData == nil { - entry.Size = node.entry.Size - entry.SData = node.entry.SData - } - if entry.Req == nil { - entry.Req = node.entry.Req - } - entry.C = node.entry.C - node.entry = entry - return - } - - for node.entry != nil { - - l := node.entry.Key.bits(bitpos, node.bits) - st := node.subtree[l] - if st == nil { - st = newMemTree(memTreeLW, node, l) - } - st.entry = node.entry - node.entry = nil - st.updateAccess(node.access[0]) - - l = entry.Key.bits(bitpos, node.bits) - st = node.subtree[l] - if st == nil { - st = newMemTree(memTreeLW, node, l) - } - bitpos += node.bits - node = st - - } - } - - node.entry = entry - node.lastDBaccess = s.dbAccessCnt - node.updateAccess(s.accessCnt) - s.entryCnt++ -} - -func (s *MemStore) Get(hash Key) (chunk *Chunk, err error) { - s.lock.Lock() - defer s.lock.Unlock() - - node := s.memtree - bitpos := uint(0) - for node.entry == nil { - l := hash.bits(bitpos, node.bits) - st := node.subtree[l] - if st == nil { - return nil, notFound - } - bitpos += node.bits - node = st - } - - if node.entry.Key.isEqual(hash) { - s.accessCnt++ - node.updateAccess(s.accessCnt) - chunk = node.entry - if s.dbAccessCnt-node.lastDBaccess > dbForceUpdateAccessCnt { - s.dbAccessCnt++ - node.lastDBaccess = s.dbAccessCnt - if s.dbStore != nil { - s.dbStore.updateAccessCnt(hash) - } - } - } else { - err = notFound - } - - return -} - -func (s *MemStore) removeOldest() { - node := s.memtree - - for node.entry == nil { - - aidx := uint(0) - av := node.access[aidx] - - for aidx < node.width/2-1 { - if av == node.access[aidx*2+1] { - node.access[aidx] = node.access[aidx*2+2] - aidx = aidx*2 + 1 - } else if av == node.access[aidx*2+2] { - node.access[aidx] = node.access[aidx*2+1] - aidx = aidx*2 + 2 - } else { - panic(nil) - } - } - pidx := aidx*2 + 2 - node.width - if (node.subtree[pidx] != nil) && (av == node.subtree[pidx].access[0]) { - if node.subtree[pidx+1] != nil { - node.access[aidx] = node.subtree[pidx+1].access[0] - } else { - node.access[aidx] = 0 - } - } else if (node.subtree[pidx+1] != nil) && (av == node.subtree[pidx+1].access[0]) { - if node.subtree[pidx] != nil { - node.access[aidx] = node.subtree[pidx].access[0] - } else { - node.access[aidx] = 0 - } - pidx++ - } else { - panic(nil) - } - - //fmt.Println(pidx) - node = node.subtree[pidx] - - } - - if node.entry.dbStored != nil { - log.Trace(fmt.Sprintf("Memstore Clean: Waiting for chunk %v to be saved", node.entry.Key.Log())) - <-node.entry.dbStored - log.Trace(fmt.Sprintf("Memstore Clean: Chunk %v saved to DBStore. Ready to clear from mem.", node.entry.Key.Log())) - } else { - log.Trace(fmt.Sprintf("Memstore Clean: Chunk %v already in DB. Ready to delete.", node.entry.Key.Log())) - } - - if node.entry.SData != nil { - memstoreRemoveCounter.Inc(1) - node.entry = nil - s.entryCnt-- - } - - node.access[0] = 0 - - //--- - - aidx := uint(0) - for { - aa := node.access[aidx] - if aidx > 0 { - aidx = (aidx - 1) >> 1 - } else { - pidx := node.parentIdx - node = node.parent - if node == nil { - return - } - aidx = (node.width + pidx - 2) >> 1 - } - if (aa != 0) && ((aa < node.access[aidx]) || (node.access[aidx] == 0)) { - node.access[aidx] = aa - } - } -} - -// Close memstore -func (s *MemStore) Close() {} diff --git a/swarm/storage/memstore_test.go b/swarm/storage/memstore_test.go deleted file mode 100644 index 2e0ab535af2d..000000000000 --- a/swarm/storage/memstore_test.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package storage - -import ( - "testing" -) - -func testMemStore(l int64, branches int64, t *testing.T) { - m := NewMemStore(nil, defaultCacheCapacity) - testStore(m, l, branches, t) -} - -func TestMemStore128_10000(t *testing.T) { - testMemStore(10000, 128, t) -} - -func TestMemStore128_1000(t *testing.T) { - testMemStore(1000, 128, t) -} - -func TestMemStore128_100(t *testing.T) { - testMemStore(100, 128, t) -} - -func TestMemStore2_100(t *testing.T) { - testMemStore(100, 2, t) -} - -func TestMemStoreNotFound(t *testing.T) { - m := NewMemStore(nil, defaultCacheCapacity) - _, err := m.Get(ZeroKey) - if err != notFound { - t.Errorf("Expected notFound, got %v", err) - } -} diff --git a/swarm/storage/netstore.go b/swarm/storage/netstore.go deleted file mode 100644 index 4ec1b738c069..000000000000 --- a/swarm/storage/netstore.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package storage - -import ( - "fmt" - "path/filepath" - "time" - - "github.com/XinFinOrg/XDPoSChain/log" -) - -/* -NetStore is a cloud storage access abstaction layer for swarm -it contains the shared logic of network served chunk store/retrieval requests -both local (coming from DPA api) and remote (coming from peers via bzz protocol) -it implements the ChunkStore interface and embeds LocalStore - -It is called by the bzz protocol instances via Depo (the store/retrieve request handler) -a protocol instance is running on each peer, so this is heavily parallelised. -NetStore falls back to a backend (CloudStorage interface) -implemented by bzz/network/forwarder. forwarder or IPFS or IPΞS -*/ -type NetStore struct { - hashfunc SwarmHasher - localStore *LocalStore - cloud CloudStore -} - -// backend engine for cloud store -// It can be aggregate dispatching to several parallel implementations: -// bzz/network/forwarder. forwarder or IPFS or IPΞS -type CloudStore interface { - Store(*Chunk) - Deliver(*Chunk) - Retrieve(*Chunk) -} - -type StoreParams struct { - ChunkDbPath string - DbCapacity uint64 - CacheCapacity uint - Radius int -} - -//create params with default values -func NewDefaultStoreParams() (self *StoreParams) { - return &StoreParams{ - DbCapacity: defaultDbCapacity, - CacheCapacity: defaultCacheCapacity, - Radius: defaultRadius, - } -} - -//this can only finally be set after all config options (file, cmd line, env vars) -//have been evaluated -func (self *StoreParams) Init(path string) { - self.ChunkDbPath = filepath.Join(path, "chunks") -} - -// netstore contructor, takes path argument that is used to initialise dbStore, -// the persistent (disk) storage component of LocalStore -// the second argument is the hive, the connection/logistics manager for the node -func NewNetStore(hash SwarmHasher, lstore *LocalStore, cloud CloudStore, params *StoreParams) *NetStore { - return &NetStore{ - hashfunc: hash, - localStore: lstore, - cloud: cloud, - } -} - -const ( - // maximum number of peers that a retrieved message is delivered to - requesterCount = 3 -) - -var ( - // timeout interval before retrieval is timed out - searchTimeout = 3 * time.Second -) - -// store logic common to local and network chunk store requests -// ~ unsafe put in localdb no check if exists no extra copy no hash validation -// the chunk is forced to propagate (Cloud.Store) even if locally found! -// caller needs to make sure if that is wanted -func (self *NetStore) Put(entry *Chunk) { - self.localStore.Put(entry) - - // handle deliveries - if entry.Req != nil { - log.Trace(fmt.Sprintf("NetStore.Put: localStore.Put %v hit existing request...delivering", entry.Key.Log())) - // closing C signals to other routines (local requests) - // that the chunk is has been retrieved - close(entry.Req.C) - // deliver the chunk to requesters upstream - go self.cloud.Deliver(entry) - } else { - log.Trace(fmt.Sprintf("NetStore.Put: localStore.Put %v stored locally", entry.Key.Log())) - // handle propagating store requests - // go self.cloud.Store(entry) - go self.cloud.Store(entry) - } -} - -// retrieve logic common for local and network chunk retrieval requests -func (self *NetStore) Get(key Key) (*Chunk, error) { - var err error - chunk, err := self.localStore.Get(key) - if err == nil { - if chunk.Req == nil { - log.Trace(fmt.Sprintf("NetStore.Get: %v found locally", key)) - } else { - log.Trace(fmt.Sprintf("NetStore.Get: %v hit on an existing request", key)) - // no need to launch again - } - return chunk, err - } - // no data and no request status - log.Trace(fmt.Sprintf("NetStore.Get: %v not found locally. open new request", key)) - chunk = NewChunk(key, newRequestStatus(key)) - self.localStore.memStore.Put(chunk) - go self.cloud.Retrieve(chunk) - return chunk, nil -} - -// Close netstore -func (self *NetStore) Close() {} diff --git a/swarm/storage/pyramid.go b/swarm/storage/pyramid.go deleted file mode 100644 index 19d493405af4..000000000000 --- a/swarm/storage/pyramid.go +++ /dev/null @@ -1,637 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package storage - -import ( - "encoding/binary" - "errors" - "io" - "sync" - "time" -) - -/* - The main idea of a pyramid chunker is to process the input data without knowing the entire size apriori. - For this to be achieved, the chunker tree is built from the ground up until the data is exhausted. - This opens up new aveneus such as easy append and other sort of modifications to the tree thereby avoiding - duplication of data chunks. - - - Below is an example of a two level chunks tree. The leaf chunks are called data chunks and all the above - chunks are called tree chunks. The tree chunk above data chunks is level 0 and so on until it reaches - the root tree chunk. - - - - T10 <- Tree chunk lvl1 - | - __________________________|_____________________________ - / | | \ - / | \ \ - __T00__ ___T01__ ___T02__ ___T03__ <- Tree chunks lvl 0 - / / \ / / \ / / \ / / \ - / / \ / / \ / / \ / / \ - D1 D2 ... D128 D1 D2 ... D128 D1 D2 ... D128 D1 D2 ... D128 <- Data Chunks - - - The split function continuously read the data and creates data chunks and send them to storage. - When certain no of data chunks are created (defaultBranches), a signal is sent to create a tree - entry. When the level 0 tree entries reaches certain threshold (defaultBranches), another signal - is sent to a tree entry one level up.. and so on... until only the data is exhausted AND only one - tree entry is present in certain level. The key of tree entry is given out as the rootKey of the file. - -*/ - -var ( - errLoadingTreeRootChunk = errors.New("LoadTree Error: Could not load root chunk") - errLoadingTreeChunk = errors.New("LoadTree Error: Could not load chunk") -) - -const ( - ChunkProcessors = 8 - DefaultBranches int64 = 128 - splitTimeout = time.Minute * 5 -) - -const ( - DataChunk = 0 - TreeChunk = 1 -) - -type ChunkerParams struct { - Branches int64 - Hash string -} - -func NewChunkerParams() *ChunkerParams { - return &ChunkerParams{ - Branches: DefaultBranches, - Hash: SHA3Hash, - } -} - -// Entry to create a tree node -type TreeEntry struct { - level int - branchCount int64 - subtreeSize uint64 - chunk []byte - key []byte - index int // used in append to indicate the index of existing tree entry - updatePending bool // indicates if the entry is loaded from existing tree -} - -func NewTreeEntry(pyramid *PyramidChunker) *TreeEntry { - return &TreeEntry{ - level: 0, - branchCount: 0, - subtreeSize: 0, - chunk: make([]byte, pyramid.chunkSize+8), - key: make([]byte, pyramid.hashSize), - index: 0, - updatePending: false, - } -} - -// Used by the hash processor to create a data/tree chunk and send to storage -type chunkJob struct { - key Key - chunk []byte - size int64 - parentWg *sync.WaitGroup - chunkType int // used to identify the tree related chunks for debugging - chunkLvl int // leaf-1 is level 0 and goes upwards until it reaches root -} - -type PyramidChunker struct { - hashFunc SwarmHasher - chunkSize int64 - hashSize int64 - branches int64 - workerCount int64 - workerLock sync.RWMutex -} - -func NewPyramidChunker(params *ChunkerParams) (self *PyramidChunker) { - self = &PyramidChunker{} - self.hashFunc = MakeHashFunc(params.Hash) - self.branches = params.Branches - self.hashSize = int64(self.hashFunc().Size()) - self.chunkSize = self.hashSize * self.branches - self.workerCount = 0 - return -} - -func (self *PyramidChunker) Join(key Key, chunkC chan *Chunk) LazySectionReader { - return &LazyChunkReader{ - key: key, - chunkC: chunkC, - chunkSize: self.chunkSize, - branches: self.branches, - hashSize: self.hashSize, - } -} - -func (self *PyramidChunker) incrementWorkerCount() { - self.workerLock.Lock() - defer self.workerLock.Unlock() - self.workerCount += 1 -} - -func (self *PyramidChunker) getWorkerCount() int64 { - self.workerLock.Lock() - defer self.workerLock.Unlock() - return self.workerCount -} - -func (self *PyramidChunker) decrementWorkerCount() { - self.workerLock.Lock() - defer self.workerLock.Unlock() - self.workerCount -= 1 -} - -func (self *PyramidChunker) Split(data io.Reader, size int64, chunkC chan *Chunk, storageWG, processorWG *sync.WaitGroup) (Key, error) { - jobC := make(chan *chunkJob, 2*ChunkProcessors) - wg := &sync.WaitGroup{} - errC := make(chan error) - quitC := make(chan bool) - rootKey := make([]byte, self.hashSize) - chunkLevel := make([][]*TreeEntry, self.branches) - - wg.Add(1) - go self.prepareChunks(false, chunkLevel, data, rootKey, quitC, wg, jobC, processorWG, chunkC, errC, storageWG) - - // closes internal error channel if all subprocesses in the workgroup finished - go func() { - - // waiting for all chunks to finish - wg.Wait() - - // if storage waitgroup is non-nil, we wait for storage to finish too - if storageWG != nil { - storageWG.Wait() - } - //We close errC here because this is passed down to 8 parallel routines underneath. - // if a error happens in one of them.. that particular routine raises error... - // once they all complete successfully, the control comes back and we can safely close this here. - close(errC) - }() - - defer close(quitC) - - select { - case err := <-errC: - if err != nil { - return nil, err - } - case <-time.NewTimer(splitTimeout).C: - } - return rootKey, nil - -} - -func (self *PyramidChunker) Append(key Key, data io.Reader, chunkC chan *Chunk, storageWG, processorWG *sync.WaitGroup) (Key, error) { - quitC := make(chan bool) - rootKey := make([]byte, self.hashSize) - chunkLevel := make([][]*TreeEntry, self.branches) - - // Load the right most unfinished tree chunks in every level - self.loadTree(chunkLevel, key, chunkC, quitC) - - jobC := make(chan *chunkJob, 2*ChunkProcessors) - wg := &sync.WaitGroup{} - errC := make(chan error) - - wg.Add(1) - go self.prepareChunks(true, chunkLevel, data, rootKey, quitC, wg, jobC, processorWG, chunkC, errC, storageWG) - - // closes internal error channel if all subprocesses in the workgroup finished - go func() { - - // waiting for all chunks to finish - wg.Wait() - - // if storage waitgroup is non-nil, we wait for storage to finish too - if storageWG != nil { - storageWG.Wait() - } - close(errC) - }() - - defer close(quitC) - - select { - case err := <-errC: - if err != nil { - return nil, err - } - case <-time.NewTimer(splitTimeout).C: - } - return rootKey, nil - -} - -func (self *PyramidChunker) processor(id int64, jobC chan *chunkJob, chunkC chan *Chunk, errC chan error, quitC chan bool, swg, wwg *sync.WaitGroup) { - defer self.decrementWorkerCount() - - hasher := self.hashFunc() - if wwg != nil { - defer wwg.Done() - } - for { - select { - - case job, ok := <-jobC: - if !ok { - return - } - self.processChunk(id, hasher, job, chunkC, swg) - case <-quitC: - return - } - } -} - -func (self *PyramidChunker) processChunk(id int64, hasher SwarmHash, job *chunkJob, chunkC chan *Chunk, swg *sync.WaitGroup) { - hasher.ResetWithLength(job.chunk[:8]) // 8 bytes of length - hasher.Write(job.chunk[8:]) // minus 8 []byte length - h := hasher.Sum(nil) - - newChunk := &Chunk{ - Key: h, - SData: job.chunk, - Size: job.size, - wg: swg, - } - - // report hash of this chunk one level up (keys corresponds to the proper subslice of the parent chunk) - copy(job.key, h) - - // send off new chunk to storage - if chunkC != nil { - if swg != nil { - swg.Add(1) - } - } - job.parentWg.Done() - - if chunkC != nil { - chunkC <- newChunk - } -} - -func (self *PyramidChunker) loadTree(chunkLevel [][]*TreeEntry, key Key, chunkC chan *Chunk, quitC chan bool) error { - // Get the root chunk to get the total size - chunk := retrieve(key, chunkC, quitC) - if chunk == nil { - return errLoadingTreeRootChunk - } - - //if data size is less than a chunk... add a parent with update as pending - if chunk.Size <= self.chunkSize { - newEntry := &TreeEntry{ - level: 0, - branchCount: 1, - subtreeSize: uint64(chunk.Size), - chunk: make([]byte, self.chunkSize+8), - key: make([]byte, self.hashSize), - index: 0, - updatePending: true, - } - copy(newEntry.chunk[8:], chunk.Key) - chunkLevel[0] = append(chunkLevel[0], newEntry) - return nil - } - - var treeSize int64 - var depth int - treeSize = self.chunkSize - for ; treeSize < chunk.Size; treeSize *= self.branches { - depth++ - } - - // Add the root chunk entry - branchCount := int64(len(chunk.SData)-8) / self.hashSize - newEntry := &TreeEntry{ - level: depth - 1, - branchCount: branchCount, - subtreeSize: uint64(chunk.Size), - chunk: chunk.SData, - key: key, - index: 0, - updatePending: true, - } - chunkLevel[depth-1] = append(chunkLevel[depth-1], newEntry) - - // Add the rest of the tree - for lvl := depth - 1; lvl >= 1; lvl-- { - - //TODO(jmozah): instead of loading finished branches and then trim in the end, - //avoid loading them in the first place - for _, ent := range chunkLevel[lvl] { - branchCount = int64(len(ent.chunk)-8) / self.hashSize - for i := int64(0); i < branchCount; i++ { - key := ent.chunk[8+(i*self.hashSize) : 8+((i+1)*self.hashSize)] - newChunk := retrieve(key, chunkC, quitC) - if newChunk == nil { - return errLoadingTreeChunk - } - bewBranchCount := int64(len(newChunk.SData)-8) / self.hashSize - newEntry := &TreeEntry{ - level: lvl - 1, - branchCount: bewBranchCount, - subtreeSize: uint64(newChunk.Size), - chunk: newChunk.SData, - key: key, - index: 0, - updatePending: true, - } - chunkLevel[lvl-1] = append(chunkLevel[lvl-1], newEntry) - - } - - // We need to get only the right most unfinished branch.. so trim all finished branches - if int64(len(chunkLevel[lvl-1])) >= self.branches { - chunkLevel[lvl-1] = nil - } - } - } - - return nil -} - -func (self *PyramidChunker) prepareChunks(isAppend bool, chunkLevel [][]*TreeEntry, data io.Reader, rootKey []byte, quitC chan bool, wg *sync.WaitGroup, jobC chan *chunkJob, processorWG *sync.WaitGroup, chunkC chan *Chunk, errC chan error, storageWG *sync.WaitGroup) { - defer wg.Done() - - chunkWG := &sync.WaitGroup{} - totalDataSize := 0 - - // processorWG keeps track of workers spawned for hashing chunks - if processorWG != nil { - processorWG.Add(1) - } - - self.incrementWorkerCount() - go self.processor(self.workerCount, jobC, chunkC, errC, quitC, storageWG, processorWG) - - parent := NewTreeEntry(self) - var unFinishedChunk *Chunk - - if isAppend && len(chunkLevel[0]) != 0 { - - lastIndex := len(chunkLevel[0]) - 1 - ent := chunkLevel[0][lastIndex] - - if ent.branchCount < self.branches { - parent = &TreeEntry{ - level: 0, - branchCount: ent.branchCount, - subtreeSize: ent.subtreeSize, - chunk: ent.chunk, - key: ent.key, - index: lastIndex, - updatePending: true, - } - - lastBranch := parent.branchCount - 1 - lastKey := parent.chunk[8+lastBranch*self.hashSize : 8+(lastBranch+1)*self.hashSize] - - unFinishedChunk = retrieve(lastKey, chunkC, quitC) - if unFinishedChunk.Size < self.chunkSize { - - parent.subtreeSize = parent.subtreeSize - uint64(unFinishedChunk.Size) - parent.branchCount = parent.branchCount - 1 - } else { - unFinishedChunk = nil - } - } - } - - for index := 0; ; index++ { - - var n int - var err error - chunkData := make([]byte, self.chunkSize+8) - if unFinishedChunk != nil { - copy(chunkData, unFinishedChunk.SData) - n, err = data.Read(chunkData[8+unFinishedChunk.Size:]) - n += int(unFinishedChunk.Size) - unFinishedChunk = nil - } else { - n, err = data.Read(chunkData[8:]) - } - - totalDataSize += n - if err != nil { - if err == io.EOF || err == io.ErrUnexpectedEOF { - if parent.branchCount == 1 { - // Data is exactly one chunk.. pick the last chunk key as root - chunkWG.Wait() - lastChunksKey := parent.chunk[8 : 8+self.hashSize] - copy(rootKey, lastChunksKey) - break - } - } else { - close(quitC) - break - } - } - - // Data ended in chunk boundary.. just signal to start bulding tree - if n == 0 { - self.buildTree(isAppend, chunkLevel, parent, chunkWG, jobC, quitC, true, rootKey) - break - } else { - - pkey := self.enqueueDataChunk(chunkData, uint64(n), parent, chunkWG, jobC, quitC) - - // update tree related parent data structures - parent.subtreeSize += uint64(n) - parent.branchCount++ - - // Data got exhausted... signal to send any parent tree related chunks - if int64(n) < self.chunkSize { - - // only one data chunk .. so dont add any parent chunk - if parent.branchCount <= 1 { - chunkWG.Wait() - copy(rootKey, pkey) - break - } - - self.buildTree(isAppend, chunkLevel, parent, chunkWG, jobC, quitC, true, rootKey) - break - } - - if parent.branchCount == self.branches { - self.buildTree(isAppend, chunkLevel, parent, chunkWG, jobC, quitC, false, rootKey) - parent = NewTreeEntry(self) - } - - } - - workers := self.getWorkerCount() - if int64(len(jobC)) > workers && workers < ChunkProcessors { - if processorWG != nil { - processorWG.Add(1) - } - self.incrementWorkerCount() - go self.processor(self.workerCount, jobC, chunkC, errC, quitC, storageWG, processorWG) - } - - } - -} - -func (self *PyramidChunker) buildTree(isAppend bool, chunkLevel [][]*TreeEntry, ent *TreeEntry, chunkWG *sync.WaitGroup, jobC chan *chunkJob, quitC chan bool, last bool, rootKey []byte) { - chunkWG.Wait() - self.enqueueTreeChunk(chunkLevel, ent, chunkWG, jobC, quitC, last) - - compress := false - endLvl := self.branches - for lvl := int64(0); lvl < self.branches; lvl++ { - lvlCount := int64(len(chunkLevel[lvl])) - if lvlCount >= self.branches { - endLvl = lvl + 1 - compress = true - break - } - } - - if !compress && !last { - return - } - - // Wait for all the keys to be processed before compressing the tree - chunkWG.Wait() - - for lvl := int64(ent.level); lvl < endLvl; lvl++ { - - lvlCount := int64(len(chunkLevel[lvl])) - if lvlCount == 1 && last { - copy(rootKey, chunkLevel[lvl][0].key) - return - } - - for startCount := int64(0); startCount < lvlCount; startCount += self.branches { - - endCount := startCount + self.branches - if endCount > lvlCount { - endCount = lvlCount - } - - var nextLvlCount int64 - var tempEntry *TreeEntry - if len(chunkLevel[lvl+1]) > 0 { - nextLvlCount = int64(len(chunkLevel[lvl+1]) - 1) - tempEntry = chunkLevel[lvl+1][nextLvlCount] - } - if isAppend && tempEntry != nil && tempEntry.updatePending { - updateEntry := &TreeEntry{ - level: int(lvl + 1), - branchCount: 0, - subtreeSize: 0, - chunk: make([]byte, self.chunkSize+8), - key: make([]byte, self.hashSize), - index: int(nextLvlCount), - updatePending: true, - } - for index := int64(0); index < lvlCount; index++ { - updateEntry.branchCount++ - updateEntry.subtreeSize += chunkLevel[lvl][index].subtreeSize - copy(updateEntry.chunk[8+(index*self.hashSize):8+((index+1)*self.hashSize)], chunkLevel[lvl][index].key[:self.hashSize]) - } - - self.enqueueTreeChunk(chunkLevel, updateEntry, chunkWG, jobC, quitC, last) - - } else { - - noOfBranches := endCount - startCount - newEntry := &TreeEntry{ - level: int(lvl + 1), - branchCount: noOfBranches, - subtreeSize: 0, - chunk: make([]byte, (noOfBranches*self.hashSize)+8), - key: make([]byte, self.hashSize), - index: int(nextLvlCount), - updatePending: false, - } - - index := int64(0) - for i := startCount; i < endCount; i++ { - entry := chunkLevel[lvl][i] - newEntry.subtreeSize += entry.subtreeSize - copy(newEntry.chunk[8+(index*self.hashSize):8+((index+1)*self.hashSize)], entry.key[:self.hashSize]) - index++ - } - - self.enqueueTreeChunk(chunkLevel, newEntry, chunkWG, jobC, quitC, last) - - } - - } - - if !isAppend { - chunkWG.Wait() - if compress { - chunkLevel[lvl] = nil - } - } - } - -} - -func (self *PyramidChunker) enqueueTreeChunk(chunkLevel [][]*TreeEntry, ent *TreeEntry, chunkWG *sync.WaitGroup, jobC chan *chunkJob, quitC chan bool, last bool) { - if ent != nil { - - // wait for data chunks to get over before processing the tree chunk - if last { - chunkWG.Wait() - } - - binary.LittleEndian.PutUint64(ent.chunk[:8], ent.subtreeSize) - ent.key = make([]byte, self.hashSize) - chunkWG.Add(1) - select { - case jobC <- &chunkJob{ent.key, ent.chunk[:ent.branchCount*self.hashSize+8], int64(ent.subtreeSize), chunkWG, TreeChunk, 0}: - case <-quitC: - } - - // Update or append based on weather it is a new entry or being reused - if ent.updatePending { - chunkWG.Wait() - chunkLevel[ent.level][ent.index] = ent - } else { - chunkLevel[ent.level] = append(chunkLevel[ent.level], ent) - } - - } -} - -func (self *PyramidChunker) enqueueDataChunk(chunkData []byte, size uint64, parent *TreeEntry, chunkWG *sync.WaitGroup, jobC chan *chunkJob, quitC chan bool) Key { - binary.LittleEndian.PutUint64(chunkData[:8], size) - pkey := parent.chunk[8+parent.branchCount*self.hashSize : 8+(parent.branchCount+1)*self.hashSize] - - chunkWG.Add(1) - select { - case jobC <- &chunkJob{pkey, chunkData[:size+8], int64(size), chunkWG, DataChunk, -1}: - case <-quitC: - } - - return pkey - -} diff --git a/swarm/storage/swarmhasher.go b/swarm/storage/swarmhasher.go deleted file mode 100644 index 38b86373c52d..000000000000 --- a/swarm/storage/swarmhasher.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package storage - -import ( - "hash" -) - -const ( - BMTHash = "BMT" - SHA3Hash = "SHA3" // http://golang.org/pkg/hash/#Hash -) - -type SwarmHash interface { - hash.Hash - ResetWithLength([]byte) -} - -type HashWithLength struct { - hash.Hash -} - -func (self *HashWithLength) ResetWithLength(length []byte) { - self.Reset() - self.Write(length) -} diff --git a/swarm/storage/types.go b/swarm/storage/types.go deleted file mode 100644 index c4fdb178a1aa..000000000000 --- a/swarm/storage/types.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package storage - -import ( - "bytes" - "crypto" - "fmt" - "hash" - "io" - "sync" - - "github.com/XinFinOrg/XDPoSChain/bmt" - "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/crypto/sha3" -) - -type Hasher func() hash.Hash -type SwarmHasher func() SwarmHash - -// Peer is the recorded as Source on the chunk -// should probably not be here? but network should wrap chunk object -type Peer interface{} - -type Key []byte - -func (x Key) Size() uint { - return uint(len(x)) -} - -func (x Key) isEqual(y Key) bool { - return bytes.Equal(x, y) -} - -func (h Key) bits(i, j uint) uint { - ii := i >> 3 - jj := i & 7 - if ii >= h.Size() { - return 0 - } - - if jj+j <= 8 { - return uint((h[ii] >> jj) & ((1 << j) - 1)) - } - - res := uint(h[ii] >> jj) - jj = 8 - jj - j -= jj - for j != 0 { - ii++ - if j < 8 { - res += uint(h[ii]&((1<. - -package swarm - -import ( - "bytes" - "context" - "crypto/ecdsa" - "fmt" - "math/big" - "net" - "strings" - "time" - "unicode" - - "github.com/XinFinOrg/XDPoSChain/accounts/abi/bind" - "github.com/XinFinOrg/XDPoSChain/common" - "github.com/XinFinOrg/XDPoSChain/contracts/chequebook" - "github.com/XinFinOrg/XDPoSChain/contracts/ens" - "github.com/XinFinOrg/XDPoSChain/crypto" - "github.com/XinFinOrg/XDPoSChain/ethclient" - "github.com/XinFinOrg/XDPoSChain/log" - "github.com/XinFinOrg/XDPoSChain/metrics" - "github.com/XinFinOrg/XDPoSChain/node" - "github.com/XinFinOrg/XDPoSChain/p2p" - "github.com/XinFinOrg/XDPoSChain/p2p/discover" - "github.com/XinFinOrg/XDPoSChain/params" - "github.com/XinFinOrg/XDPoSChain/rpc" - "github.com/XinFinOrg/XDPoSChain/swarm/api" - httpapi "github.com/XinFinOrg/XDPoSChain/swarm/api/http" - "github.com/XinFinOrg/XDPoSChain/swarm/fuse" - "github.com/XinFinOrg/XDPoSChain/swarm/network" - "github.com/XinFinOrg/XDPoSChain/swarm/storage" -) - -var ( - startTime time.Time - updateGaugesPeriod = 5 * time.Second - startCounter = metrics.NewRegisteredCounter("stack,start", nil) - stopCounter = metrics.NewRegisteredCounter("stack,stop", nil) - uptimeGauge = metrics.NewRegisteredGauge("stack.uptime", nil) - dbSizeGauge = metrics.NewRegisteredGauge("storage.db.chunks.size", nil) - cacheSizeGauge = metrics.NewRegisteredGauge("storage.db.cache.size", nil) -) - -// the swarm stack -type Swarm struct { - config *api.Config // swarm configuration - api *api.Api // high level api layer (fs/manifest) - dns api.Resolver // DNS registrar - dbAccess *network.DbAccess // access to local chunk db iterator and storage counter - storage storage.ChunkStore // internal access to storage, common interface to cloud storage backends - dpa *storage.DPA // distributed preimage archive, the local API to the storage with document level storage/retrieval support - depo network.StorageHandler // remote request handler, interface between bzz protocol and the storage - cloud storage.CloudStore // procurement, cloud storage backend (can multi-cloud) - hive *network.Hive // the logistic manager - backend chequebook.Backend // simple blockchain Backend - privateKey *ecdsa.PrivateKey - corsString string - swapEnabled bool - lstore *storage.LocalStore // local store, needs to store for releasing resources after node stopped - sfs *fuse.SwarmFS // need this to cleanup all the active mounts on node exit -} - -type SwarmAPI struct { - Api *api.Api - Backend chequebook.Backend - PrvKey *ecdsa.PrivateKey -} - -func (self *Swarm) API() *SwarmAPI { - return &SwarmAPI{ - Api: self.api, - Backend: self.backend, - PrvKey: self.privateKey, - } -} - -// creates a new swarm service instance -// implements node.Service -func NewSwarm(ctx *node.ServiceContext, backend chequebook.Backend, config *api.Config) (self *Swarm, err error) { - if bytes.Equal(common.FromHex(config.PublicKey), storage.ZeroKey) { - return nil, fmt.Errorf("empty public key") - } - if bytes.Equal(common.FromHex(config.BzzKey), storage.ZeroKey) { - return nil, fmt.Errorf("empty bzz key") - } - - self = &Swarm{ - config: config, - swapEnabled: config.SwapEnabled, - backend: backend, - privateKey: config.Swap.PrivateKey(), - corsString: config.Cors, - } - log.Debug(fmt.Sprintf("Setting up Swarm service components")) - - hash := storage.MakeHashFunc(config.ChunkerParams.Hash) - self.lstore, err = storage.NewLocalStore(hash, config.StoreParams) - if err != nil { - return - } - - // setup local store - log.Debug(fmt.Sprintf("Set up local storage")) - - self.dbAccess = network.NewDbAccess(self.lstore) - log.Debug(fmt.Sprintf("Set up local db access (iterator/counter)")) - - // set up the kademlia hive - self.hive = network.NewHive( - common.HexToHash(self.config.BzzKey), // key to hive (kademlia base address) - config.HiveParams, // configuration parameters - config.SwapEnabled, // SWAP enabled - config.SyncEnabled, // syncronisation enabled - ) - log.Debug(fmt.Sprintf("Set up swarm network with Kademlia hive")) - - // setup cloud storage backend - self.cloud = network.NewForwarder(self.hive) - log.Debug(fmt.Sprintf("-> set swarm forwarder as cloud storage backend")) - - // setup cloud storage internal access layer - self.storage = storage.NewNetStore(hash, self.lstore, self.cloud, config.StoreParams) - log.Debug(fmt.Sprintf("-> swarm net store shared access layer to Swarm Chunk Store")) - - // set up Depo (storage handler = cloud storage access layer for incoming remote requests) - self.depo = network.NewDepo(hash, self.lstore, self.storage) - log.Debug(fmt.Sprintf("-> REmote Access to CHunks")) - - // set up DPA, the cloud storage local access layer - dpaChunkStore := storage.NewDpaChunkStore(self.lstore, self.storage) - log.Debug(fmt.Sprintf("-> Local Access to Swarm")) - // Swarm Hash Merklised Chunking for Arbitrary-length Document/File storage - self.dpa = storage.NewDPA(dpaChunkStore, self.config.ChunkerParams) - log.Debug(fmt.Sprintf("-> Content Store API")) - - if len(config.EnsAPIs) > 0 { - opts := []api.MultiResolverOption{} - for _, c := range config.EnsAPIs { - tld, endpoint, addr := parseEnsAPIAddress(c) - r, err := newEnsClient(endpoint, addr, config) - if err != nil { - return nil, err - } - opts = append(opts, api.MultiResolverOptionWithResolver(r, tld)) - } - self.dns = api.NewMultiResolver(opts...) - } - - self.api = api.NewApi(self.dpa, self.dns) - // Manifests for Smart Hosting - log.Debug(fmt.Sprintf("-> Web3 virtual server API")) - - self.sfs = fuse.NewSwarmFS(self.api) - log.Debug("-> Initializing Fuse file system") - - return self, nil -} - -// parseEnsAPIAddress parses string according to format -// [tld:][contract-addr@]url and returns ENSClientConfig structure -// with endpoint, contract address and TLD. -func parseEnsAPIAddress(s string) (tld, endpoint string, addr common.Address) { - isAllLetterString := func(s string) bool { - for _, r := range s { - if !unicode.IsLetter(r) { - return false - } - } - return true - } - endpoint = s - if i := strings.Index(endpoint, ":"); i > 0 { - if isAllLetterString(endpoint[:i]) && len(endpoint) > i+2 && endpoint[i+1:i+3] != "//" { - tld = endpoint[:i] - endpoint = endpoint[i+1:] - } - } - if i := strings.Index(endpoint, "@"); i > 0 { - addr = common.HexToAddress(endpoint[:i]) - endpoint = endpoint[i+1:] - } - return -} - -// newEnsClient creates a new ENS client for that is a consumer of -// a ENS API on a specific endpoint. It is used as a helper function -// for creating multiple resolvers in NewSwarm function. -func newEnsClient(endpoint string, addr common.Address, config *api.Config) (*ens.ENS, error) { - log.Info("connecting to ENS API", "url", endpoint) - client, err := rpc.Dial(endpoint) - if err != nil { - return nil, fmt.Errorf("error connecting to ENS API %s: %s", endpoint, err) - } - ensClient := ethclient.NewClient(client) - - ensRoot := config.EnsRoot - if addr != (common.Address{}) { - ensRoot = addr - } else { - a, err := detectEnsAddr(client) - if err == nil { - ensRoot = a - } else { - log.Warn(fmt.Sprintf("could not determine ENS contract address, using default %s", ensRoot), "err", err) - } - } - transactOpts := bind.NewKeyedTransactor(config.Swap.PrivateKey()) - dns, err := ens.NewENS(transactOpts, ensRoot, ensClient) - if err != nil { - return nil, err - } - log.Debug(fmt.Sprintf("-> Swarm Domain Name Registrar %v @ address %v", endpoint, ensRoot.Hex())) - return dns, err -} - -// detectEnsAddr determines the ENS contract address by getting both the -// version and genesis hash using the client and matching them to either -// mainnet or testnet addresses -func detectEnsAddr(client *rpc.Client) (common.Address, error) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - var version string - if err := client.CallContext(ctx, &version, "net_version"); err != nil { - return common.Address{}, err - } - - block, err := ethclient.NewClient(client).BlockByNumber(ctx, big.NewInt(0)) - if err != nil { - return common.Address{}, err - } - - switch { - - case version == "1" && block.Hash() == params.MainnetGenesisHash: - log.Info("using Mainnet ENS contract address", "addr", ens.MainNetAddress) - return ens.MainNetAddress, nil - - case version == "3" && block.Hash() == params.TestnetGenesisHash: - log.Info("using Testnet ENS contract address", "addr", ens.TestNetAddress) - return ens.TestNetAddress, nil - - default: - return common.Address{}, fmt.Errorf("unknown version and genesis hash: %s %s", version, block.Hash()) - } -} - -/* -Start is called when the stack is started -* starts the network kademlia hive peer management -* (starts netStore level 0 api) -* starts DPA level 1 api (chunking -> store/retrieve requests) -* (starts level 2 api) -* starts http proxy server -* registers url scheme handlers for bzz, etc -* TODO: start subservices like sword, swear, swarmdns -*/ -// implements the node.Service interface -func (self *Swarm) Start(srv *p2p.Server) error { - startTime = time.Now() - connectPeer := func(url string) error { - node, err := discover.ParseNode(url) - if err != nil { - return fmt.Errorf("invalid node URL: %v", err) - } - srv.AddPeer(node) - return nil - } - // set chequebook - if self.swapEnabled { - ctx := context.Background() // The initial setup has no deadline. - err := self.SetChequebook(ctx) - if err != nil { - return fmt.Errorf("Unable to set chequebook for SWAP: %v", err) - } - log.Debug(fmt.Sprintf("-> cheque book for SWAP: %v", self.config.Swap.Chequebook())) - } else { - log.Debug(fmt.Sprintf("SWAP disabled: no cheque book set")) - } - - log.Warn(fmt.Sprintf("Starting Swarm service")) - self.hive.Start( - discover.PubkeyID(&srv.PrivateKey.PublicKey), - func() string { return srv.ListenAddr }, - connectPeer, - ) - log.Info(fmt.Sprintf("Swarm network started on bzz address: %v", self.hive.Addr())) - - self.dpa.Start() - log.Debug(fmt.Sprintf("Swarm DPA started")) - - // start swarm http proxy server - if self.config.Port != "" { - addr := net.JoinHostPort(self.config.ListenAddr, self.config.Port) - go httpapi.StartHttpServer(self.api, &httpapi.ServerConfig{ - Addr: addr, - CorsString: self.corsString, - }) - log.Info(fmt.Sprintf("Swarm http proxy started on %v", addr)) - - if self.corsString != "" { - log.Debug(fmt.Sprintf("Swarm http proxy started with corsdomain: %v", self.corsString)) - } - } - - self.periodicallyUpdateGauges() - - startCounter.Inc(1) - return nil -} - -func (self *Swarm) periodicallyUpdateGauges() { - ticker := time.NewTicker(updateGaugesPeriod) - - go func() { - for range ticker.C { - self.updateGauges() - } - }() -} - -func (self *Swarm) updateGauges() { - dbSizeGauge.Update(int64(self.lstore.DbCounter())) - cacheSizeGauge.Update(int64(self.lstore.CacheCounter())) - uptimeGauge.Update(time.Since(startTime).Nanoseconds()) -} - -func (self *Swarm) SaveData() { -} - -// implements the node.Service interface -// stops all component services. -func (self *Swarm) Stop() error { - self.dpa.Stop() - err := self.hive.Stop() - if ch := self.config.Swap.Chequebook(); ch != nil { - ch.Stop() - ch.Save() - } - - if self.lstore != nil { - self.lstore.DbStore.Close() - } - self.sfs.Stop() - stopCounter.Inc(1) - return err -} - -// implements the node.Service interface -func (self *Swarm) Protocols() []p2p.Protocol { - proto, err := network.Bzz(self.depo, self.backend, self.hive, self.dbAccess, self.config.Swap, self.config.SyncParams, self.config.NetworkId) - if err != nil { - return nil - } - return []p2p.Protocol{proto} -} - -// implements node.Service -// Apis returns the RPC Api descriptors the Swarm implementation offers -func (self *Swarm) APIs() []rpc.API { - return []rpc.API{ - // public APIs - { - Namespace: "bzz", - Version: "0.1", - Service: &Info{self.config, chequebook.ContractParams}, - Public: true, - }, - // admin APIs - { - Namespace: "bzz", - Version: "0.1", - Service: api.NewControl(self.api, self.hive), - Public: false, - }, - { - Namespace: "chequebook", - Version: chequebook.Version, - Service: chequebook.NewApi(self.config.Swap.Chequebook), - Public: false, - }, - { - Namespace: "swarmfs", - Version: fuse.Swarmfs_Version, - Service: self.sfs, - Public: false, - }, - // storage APIs - // DEPRECATED: Use the HTTP API instead - { - Namespace: "bzz", - Version: "0.1", - Service: api.NewStorage(self.api), - Public: true, - }, - { - Namespace: "bzz", - Version: "0.1", - Service: api.NewFileSystem(self.api), - Public: false, - }, - // {Namespace, Version, api.NewAdmin(self), false}, - } -} - -func (self *Swarm) Api() *api.Api { - return self.api -} - -// SetChequebook ensures that the local checquebook is set up on chain. -func (self *Swarm) SetChequebook(ctx context.Context) error { - err := self.config.Swap.SetChequebook(ctx, self.backend, self.config.Path) - if err != nil { - return err - } - log.Info(fmt.Sprintf("new chequebook set (%v): saving config file, resetting all connections in the hive", self.config.Swap.Contract.Hex())) - self.hive.DropAll() - return nil -} - -// Local swarm without netStore -func NewLocalSwarm(datadir, port string) (self *Swarm, err error) { - - prvKey, err := crypto.GenerateKey() - if err != nil { - return - } - - config := api.NewDefaultConfig() - config.Path = datadir - config.Init(prvKey) - config.Port = port - - dpa, err := storage.NewLocalDPA(datadir) - if err != nil { - return - } - - self = &Swarm{ - api: api.NewApi(dpa, nil), - config: config, - } - - return -} - -// serialisable info about swarm -type Info struct { - *api.Config - *chequebook.Params -} - -func (self *Info) Info() *Info { - return self -} diff --git a/swarm/swarm_test.go b/swarm/swarm_test.go deleted file mode 100644 index 5f22936558ff..000000000000 --- a/swarm/swarm_test.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package swarm - -import ( - "testing" - - "github.com/XinFinOrg/XDPoSChain/common" -) - -func TestParseEnsAPIAddress(t *testing.T) { - for _, x := range []struct { - description string - value string - tld string - endpoint string - addr common.Address - }{ - { - description: "IPC endpoint", - value: "/data/testnet/geth.ipc", - endpoint: "/data/testnet/geth.ipc", - }, - { - description: "HTTP endpoint", - value: "http://127.0.0.1:1234", - endpoint: "http://127.0.0.1:1234", - }, - { - description: "WS endpoint", - value: "ws://127.0.0.1:1234", - endpoint: "ws://127.0.0.1:1234", - }, - { - description: "IPC Endpoint and TLD", - value: "test:/data/testnet/geth.ipc", - endpoint: "/data/testnet/geth.ipc", - tld: "test", - }, - { - description: "HTTP endpoint and TLD", - value: "test:http://127.0.0.1:1234", - endpoint: "http://127.0.0.1:1234", - tld: "test", - }, - { - description: "WS endpoint and TLD", - value: "test:ws://127.0.0.1:1234", - endpoint: "ws://127.0.0.1:1234", - tld: "test", - }, - { - description: "IPC Endpoint and contract address", - value: "314159265dD8dbb310642f98f50C066173C1259b@/data/testnet/geth.ipc", - endpoint: "/data/testnet/geth.ipc", - addr: common.HexToAddress("314159265dD8dbb310642f98f50C066173C1259b"), - }, - { - description: "HTTP endpoint and contract address", - value: "314159265dD8dbb310642f98f50C066173C1259b@http://127.0.0.1:1234", - endpoint: "http://127.0.0.1:1234", - addr: common.HexToAddress("314159265dD8dbb310642f98f50C066173C1259b"), - }, - { - description: "WS endpoint and contract address", - value: "314159265dD8dbb310642f98f50C066173C1259b@ws://127.0.0.1:1234", - endpoint: "ws://127.0.0.1:1234", - addr: common.HexToAddress("314159265dD8dbb310642f98f50C066173C1259b"), - }, - { - description: "IPC Endpoint, TLD and contract address", - value: "test:314159265dD8dbb310642f98f50C066173C1259b@/data/testnet/geth.ipc", - endpoint: "/data/testnet/geth.ipc", - addr: common.HexToAddress("314159265dD8dbb310642f98f50C066173C1259b"), - tld: "test", - }, - { - description: "HTTP endpoint, TLD and contract address", - value: "eth:314159265dD8dbb310642f98f50C066173C1259b@http://127.0.0.1:1234", - endpoint: "http://127.0.0.1:1234", - addr: common.HexToAddress("314159265dD8dbb310642f98f50C066173C1259b"), - tld: "eth", - }, - { - description: "WS endpoint, TLD and contract address", - value: "eth:314159265dD8dbb310642f98f50C066173C1259b@ws://127.0.0.1:1234", - endpoint: "ws://127.0.0.1:1234", - addr: common.HexToAddress("314159265dD8dbb310642f98f50C066173C1259b"), - tld: "eth", - }, - } { - t.Run(x.description, func(t *testing.T) { - tld, endpoint, addr := parseEnsAPIAddress(x.value) - if endpoint != x.endpoint { - t.Errorf("expected Endpoint %q, got %q", x.endpoint, endpoint) - } - if addr != x.addr { - t.Errorf("expected ContractAddress %q, got %q", x.addr.String(), addr.String()) - } - if tld != x.tld { - t.Errorf("expected TLD %q, got %q", x.tld, tld) - } - }) - } -} diff --git a/swarm/testutil/http.go b/swarm/testutil/http.go deleted file mode 100644 index 4d99a0457a6a..000000000000 --- a/swarm/testutil/http.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package testutil - -import ( - "net/http/httptest" - "os" - "testing" - - "github.com/XinFinOrg/XDPoSChain/swarm/api" - httpapi "github.com/XinFinOrg/XDPoSChain/swarm/api/http" - "github.com/XinFinOrg/XDPoSChain/swarm/storage" -) - -func NewTestSwarmServer(t *testing.T) *TestSwarmServer { - dir, err := os.MkdirTemp("", "swarm-storage-test") - if err != nil { - t.Fatal(err) - } - storeparams := &storage.StoreParams{ - ChunkDbPath: dir, - DbCapacity: 5000000, - CacheCapacity: 5000, - Radius: 0, - } - localStore, err := storage.NewLocalStore(storage.MakeHashFunc("SHA3"), storeparams) - if err != nil { - os.RemoveAll(dir) - t.Fatal(err) - } - chunker := storage.NewTreeChunker(storage.NewChunkerParams()) - dpa := &storage.DPA{ - Chunker: chunker, - ChunkStore: localStore, - } - dpa.Start() - a := api.NewApi(dpa, nil) - srv := httptest.NewServer(httpapi.NewServer(a)) - return &TestSwarmServer{ - Server: srv, - Dpa: dpa, - dir: dir, - } -} - -type TestSwarmServer struct { - *httptest.Server - - Dpa *storage.DPA - dir string -} - -func (t *TestSwarmServer) Close() { - t.Server.Close() - t.Dpa.Stop() - os.RemoveAll(t.dir) -} diff --git a/tests/block_test_util.go b/tests/block_test_util.go index 684fbf1d66e7..72e9f210809a 100644 --- a/tests/block_test_util.go +++ b/tests/block_test_util.go @@ -21,10 +21,12 @@ import ( "bytes" "encoding/hex" "encoding/json" + "errors" "fmt" - "github.com/XinFinOrg/XDPoSChain/core/rawdb" "math/big" + "github.com/XinFinOrg/XDPoSChain/core/rawdb" + "github.com/XinFinOrg/XDPoSChain/common" "github.com/XinFinOrg/XDPoSChain/common/hexutil" "github.com/XinFinOrg/XDPoSChain/common/math" @@ -150,17 +152,18 @@ func (t *BlockTest) genesis(config *params.ChainConfig) *core.Genesis { } } -/* See https://github.com/ethereum/tests/wiki/Blockchain-Tests-II +/* +See https://github.com/ethereum/tests/wiki/Blockchain-Tests-II - Whether a block is valid or not is a bit subtle, it's defined by presence of - blockHeader, transactions and uncleHeaders fields. If they are missing, the block is - invalid and we must verify that we do not accept it. + Whether a block is valid or not is a bit subtle, it's defined by presence of + blockHeader, transactions and uncleHeaders fields. If they are missing, the block is + invalid and we must verify that we do not accept it. - Since some tests mix valid and invalid blocks we need to check this for every block. + Since some tests mix valid and invalid blocks we need to check this for every block. - If a block is invalid it does not necessarily fail the test, if it's invalidness is - expected we are expected to ignore it and continue processing and then validate the - post state. + If a block is invalid it does not necessarily fail the test, if it's invalidness is + expected we are expected to ignore it and continue processing and then validate the + post state. */ func (t *BlockTest) insertBlocks(blockchain *core.BlockChain) ([]btBlock, error) { validBlocks := make([]btBlock, 0) @@ -185,7 +188,7 @@ func (t *BlockTest) insertBlocks(blockchain *core.BlockChain) ([]btBlock, error) } } if b.BlockHeader == nil { - return nil, fmt.Errorf("Block insertion should have failed") + return nil, errors.New("Block insertion should have failed") } // validate RLP decoding by checking all values against test file JSON diff --git a/tests/init_test.go b/tests/init_test.go index 77a084d95ef7..53fe8b41201c 100644 --- a/tests/init_test.go +++ b/tests/init_test.go @@ -18,6 +18,7 @@ package tests import ( "encoding/json" + "errors" "fmt" "io" "os" @@ -169,7 +170,7 @@ func (tm *testMatcher) checkFailure(t *testing.T, name string, err error) error t.Logf("error: %v", err) return nil } else { - return fmt.Errorf("test succeeded unexpectedly") + return errors.New("test succeeded unexpectedly") } } return err diff --git a/tests/rlp_test_util.go b/tests/rlp_test_util.go index 9fb53097f38a..9315d8d8ba50 100644 --- a/tests/rlp_test_util.go +++ b/tests/rlp_test_util.go @@ -46,7 +46,7 @@ type RLPTest struct { func (t *RLPTest) Run() error { outb, err := hex.DecodeString(t.Out) if err != nil { - return fmt.Errorf("invalid hex in Out") + return errors.New("invalid hex in Out") } // Handle simple decoding tests with no actual In value. @@ -74,7 +74,7 @@ func checkDecodeInterface(b []byte, isValid bool) error { case isValid && err != nil: return fmt.Errorf("decoding failed: %v", err) case !isValid && err == nil: - return fmt.Errorf("decoding of invalid value succeeded") + return errors.New("decoding of invalid value succeeded") } return nil } diff --git a/tests/vm_test_util.go b/tests/vm_test_util.go index bd516a182721..b9bdffe48552 100644 --- a/tests/vm_test_util.go +++ b/tests/vm_test_util.go @@ -19,6 +19,7 @@ package tests import ( "bytes" "encoding/json" + "errors" "fmt" "math/big" @@ -85,10 +86,10 @@ func (t *VMTest) Run(vmconfig vm.Config) error { if t.json.GasRemaining == nil { if err == nil { - return fmt.Errorf("gas unspecified (indicating an error), but VM returned no error") + return errors.New("gas unspecified (indicating an error), but VM returned no error") } if gasRemaining > 0 { - return fmt.Errorf("gas unspecified (indicating an error), but VM returned gas remaining > 0") + return errors.New("gas unspecified (indicating an error), but VM returned gas remaining > 0") } return nil } diff --git a/trie/proof.go b/trie/proof.go index fff4142401ee..0db68c103c77 100644 --- a/trie/proof.go +++ b/trie/proof.go @@ -395,11 +395,11 @@ func hasRightElement(node Node, key []byte) bool { // Expect the normal case, this function can also be used to verify the following // range proofs(note this function doesn't accept zero element proof): // -// - All elements proof. In this case the left and right proof can be nil, but the -// range should be all the leaves in the trie. +// - All elements proof. In this case the left and right proof can be nil, but the +// range should be all the leaves in the trie. // -// - One element proof. In this case no matter the left edge proof is a non-existent -// proof or not, we can always verify the correctness of the proof. +// - One element proof. In this case no matter the left edge proof is a non-existent +// proof or not, we can always verify the correctness of the proof. // // Except returning the error to indicate the proof is valid or not, the function will // also return a flag to indicate whether there exists more accounts/slots in the trie. @@ -439,7 +439,7 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, keys [][]byte, valu return err, false } if !bytes.Equal(val, values[0]) { - return fmt.Errorf("correct proof but invalid data"), false + return errors.New("correct proof but invalid data"), false } return nil, hasRightElement(root, keys[0]) } diff --git a/trie/trie_test.go b/trie/trie_test.go index 89d47bf7dcac..202fc76d809e 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -19,6 +19,7 @@ package trie import ( "bytes" "encoding/binary" + "errors" "fmt" "math/big" "math/rand" @@ -446,7 +447,7 @@ func runRandTest(rt randTest) bool { checktr.Update(it.Key, it.Value) } if tr.Hash() != checktr.Hash() { - rt[i].err = fmt.Errorf("hash mismatch in opItercheckhash") + rt[i].err = errors.New("hash mismatch in opItercheckhash") } } // Abort the test on error. diff --git a/whisper/whisperv5/api.go b/whisper/whisperv5/api.go index 37c04e70aada..b28ea5075d5d 100644 --- a/whisper/whisperv5/api.go +++ b/whisper/whisperv5/api.go @@ -471,7 +471,7 @@ func (api *PublicWhisperAPI) GetFilterMessages(id string) ([]*Message, error) { f := api.w.GetFilter(id) if f == nil { api.mu.Unlock() - return nil, fmt.Errorf("filter not found") + return nil, errors.New("filter not found") } api.lastUsed[id] = time.Now() api.mu.Unlock() diff --git a/whisper/whisperv5/filter.go b/whisper/whisperv5/filter.go index d91e9747bf6d..5c64d46910d1 100644 --- a/whisper/whisperv5/filter.go +++ b/whisper/whisperv5/filter.go @@ -18,6 +18,7 @@ package whisperv5 import ( "crypto/ecdsa" + "errors" "fmt" "sync" @@ -66,7 +67,7 @@ func (fs *Filters) Install(watcher *Filter) (string, error) { defer fs.mutex.Unlock() if fs.watchers[id] != nil { - return "", fmt.Errorf("failed to generate unique ID") + return "", errors.New("failed to generate unique ID") } if watcher.expectsSymmetricEncryption() { diff --git a/whisper/whisperv5/whisper.go b/whisper/whisperv5/whisper.go index 66901034f6ec..9bbb1cae01d0 100644 --- a/whisper/whisperv5/whisper.go +++ b/whisper/whisperv5/whisper.go @@ -247,7 +247,7 @@ func (w *Whisper) NewKeyPair() (string, error) { return "", err } if !validatePrivateKey(key) { - return "", fmt.Errorf("failed to generate valid key") + return "", errors.New("failed to generate valid key") } id, err := GenerateRandomID() @@ -259,7 +259,7 @@ func (w *Whisper) NewKeyPair() (string, error) { defer w.keyMu.Unlock() if w.privateKeys[id] != nil { - return "", fmt.Errorf("failed to generate unique ID") + return "", errors.New("failed to generate unique ID") } w.privateKeys[id] = key return id, nil @@ -305,7 +305,7 @@ func (w *Whisper) GetPrivateKey(id string) (*ecdsa.PrivateKey, error) { defer w.keyMu.RUnlock() key := w.privateKeys[id] if key == nil { - return nil, fmt.Errorf("invalid id") + return nil, errors.New("invalid id") } return key, nil } @@ -318,7 +318,7 @@ func (w *Whisper) GenerateSymKey() (string, error) { if err != nil { return "", err } else if !validateSymmetricKey(key) { - return "", fmt.Errorf("error in GenerateSymKey: crypto/rand failed to generate random data") + return "", errors.New("error in GenerateSymKey: crypto/rand failed to generate random data") } id, err := GenerateRandomID() @@ -330,7 +330,7 @@ func (w *Whisper) GenerateSymKey() (string, error) { defer w.keyMu.Unlock() if w.symKeys[id] != nil { - return "", fmt.Errorf("failed to generate unique ID") + return "", errors.New("failed to generate unique ID") } w.symKeys[id] = key return id, nil @@ -351,7 +351,7 @@ func (w *Whisper) AddSymKeyDirect(key []byte) (string, error) { defer w.keyMu.Unlock() if w.symKeys[id] != nil { - return "", fmt.Errorf("failed to generate unique ID") + return "", errors.New("failed to generate unique ID") } w.symKeys[id] = key return id, nil @@ -364,7 +364,7 @@ func (w *Whisper) AddSymKeyFromPassword(password string) (string, error) { return "", fmt.Errorf("failed to generate ID: %s", err) } if w.HasSymKey(id) { - return "", fmt.Errorf("failed to generate unique ID") + return "", errors.New("failed to generate unique ID") } derived, err := deriveKeyMaterial([]byte(password), EnvelopeVersion) @@ -377,7 +377,7 @@ func (w *Whisper) AddSymKeyFromPassword(password string) (string, error) { // double check is necessary, because deriveKeyMaterial() is very slow if w.symKeys[id] != nil { - return "", fmt.Errorf("critical error: failed to generate unique ID") + return "", errors.New("critical error: failed to generate unique ID") } w.symKeys[id] = derived return id, nil @@ -409,7 +409,7 @@ func (w *Whisper) GetSymKey(id string) ([]byte, error) { if w.symKeys[id] != nil { return w.symKeys[id], nil } - return nil, fmt.Errorf("non-existent key ID") + return nil, errors.New("non-existent key ID") } // Subscribe installs a new message handler used for filtering, decrypting @@ -427,7 +427,7 @@ func (w *Whisper) GetFilter(id string) *Filter { func (w *Whisper) Unsubscribe(id string) error { ok := w.filters.Uninstall(id) if !ok { - return fmt.Errorf("Unsubscribe: Invalid ID") + return errors.New("Unsubscribe: Invalid ID") } return nil } @@ -440,7 +440,7 @@ func (w *Whisper) Send(envelope *Envelope) error { return err } if !ok { - return fmt.Errorf("failed to add envelope") + return errors.New("failed to add envelope") } return err } @@ -576,7 +576,7 @@ func (wh *Whisper) add(envelope *Envelope) (bool, error) { if envelope.Expiry < now { if envelope.Expiry+SynchAllowance*2 < now { - return false, fmt.Errorf("very old message") + return false, errors.New("very old message") } else { log.Debug("expired envelope dropped", "hash", envelope.Hash().Hex()) return false, nil // drop envelope without error @@ -851,7 +851,7 @@ func GenerateRandomID() (id string, err error) { return "", err } if !validateSymmetricKey(buf) { - return "", fmt.Errorf("error in generateRandomID: crypto/rand failed to generate random data") + return "", errors.New("error in generateRandomID: crypto/rand failed to generate random data") } id = common.Bytes2Hex(buf) return id, err diff --git a/whisper/whisperv6/api.go b/whisper/whisperv6/api.go index 0ea7e0fc524b..8711d6b19538 100644 --- a/whisper/whisperv6/api.go +++ b/whisper/whisperv6/api.go @@ -490,7 +490,7 @@ func (api *PublicWhisperAPI) GetFilterMessages(id string) ([]*Message, error) { f := api.w.GetFilter(id) if f == nil { api.mu.Unlock() - return nil, fmt.Errorf("filter not found") + return nil, errors.New("filter not found") } api.lastUsed[id] = time.Now() api.mu.Unlock() diff --git a/whisper/whisperv6/filter.go b/whisper/whisperv6/filter.go index aae1de848001..801954c7c309 100644 --- a/whisper/whisperv6/filter.go +++ b/whisper/whisperv6/filter.go @@ -18,6 +18,7 @@ package whisperv6 import ( "crypto/ecdsa" + "errors" "fmt" "sync" @@ -65,7 +66,7 @@ func NewFilters(w *Whisper) *Filters { // Install will add a new filter to the filter collection func (fs *Filters) Install(watcher *Filter) (string, error) { if watcher.KeySym != nil && watcher.KeyAsym != nil { - return "", fmt.Errorf("filters must choose between symmetric and asymmetric keys") + return "", errors.New("filters must choose between symmetric and asymmetric keys") } if watcher.Messages == nil { @@ -81,7 +82,7 @@ func (fs *Filters) Install(watcher *Filter) (string, error) { defer fs.mutex.Unlock() if fs.watchers[id] != nil { - return "", fmt.Errorf("failed to generate unique ID") + return "", errors.New("failed to generate unique ID") } if watcher.expectsSymmetricEncryption() { diff --git a/whisper/whisperv6/whisper.go b/whisper/whisperv6/whisper.go index 53cf11a90973..0b83c89d13d0 100644 --- a/whisper/whisperv6/whisper.go +++ b/whisper/whisperv6/whisper.go @@ -379,7 +379,7 @@ func (whisper *Whisper) NewKeyPair() (string, error) { return "", err } if !validatePrivateKey(key) { - return "", fmt.Errorf("failed to generate valid key") + return "", errors.New("failed to generate valid key") } id, err := GenerateRandomID() @@ -391,7 +391,7 @@ func (whisper *Whisper) NewKeyPair() (string, error) { defer whisper.keyMu.Unlock() if whisper.privateKeys[id] != nil { - return "", fmt.Errorf("failed to generate unique ID") + return "", errors.New("failed to generate unique ID") } whisper.privateKeys[id] = key return id, nil @@ -437,7 +437,7 @@ func (whisper *Whisper) GetPrivateKey(id string) (*ecdsa.PrivateKey, error) { defer whisper.keyMu.RUnlock() key := whisper.privateKeys[id] if key == nil { - return nil, fmt.Errorf("invalid id") + return nil, errors.New("invalid id") } return key, nil } @@ -449,7 +449,7 @@ func (whisper *Whisper) GenerateSymKey() (string, error) { if err != nil { return "", err } else if !validateDataIntegrity(key, aesKeyLength) { - return "", fmt.Errorf("error in GenerateSymKey: crypto/rand failed to generate random data") + return "", errors.New("error in GenerateSymKey: crypto/rand failed to generate random data") } id, err := GenerateRandomID() @@ -461,7 +461,7 @@ func (whisper *Whisper) GenerateSymKey() (string, error) { defer whisper.keyMu.Unlock() if whisper.symKeys[id] != nil { - return "", fmt.Errorf("failed to generate unique ID") + return "", errors.New("failed to generate unique ID") } whisper.symKeys[id] = key return id, nil @@ -482,7 +482,7 @@ func (whisper *Whisper) AddSymKeyDirect(key []byte) (string, error) { defer whisper.keyMu.Unlock() if whisper.symKeys[id] != nil { - return "", fmt.Errorf("failed to generate unique ID") + return "", errors.New("failed to generate unique ID") } whisper.symKeys[id] = key return id, nil @@ -495,7 +495,7 @@ func (whisper *Whisper) AddSymKeyFromPassword(password string) (string, error) { return "", fmt.Errorf("failed to generate ID: %s", err) } if whisper.HasSymKey(id) { - return "", fmt.Errorf("failed to generate unique ID") + return "", errors.New("failed to generate unique ID") } // kdf should run no less than 0.1 seconds on an average computer, @@ -510,7 +510,7 @@ func (whisper *Whisper) AddSymKeyFromPassword(password string) (string, error) { // double check is necessary, because deriveKeyMaterial() is very slow if whisper.symKeys[id] != nil { - return "", fmt.Errorf("critical error: failed to generate unique ID") + return "", errors.New("critical error: failed to generate unique ID") } whisper.symKeys[id] = derived return id, nil @@ -542,7 +542,7 @@ func (whisper *Whisper) GetSymKey(id string) ([]byte, error) { if whisper.symKeys[id] != nil { return whisper.symKeys[id], nil } - return nil, fmt.Errorf("non-existent key ID") + return nil, errors.New("non-existent key ID") } // Subscribe installs a new message handler used for filtering, decrypting @@ -581,7 +581,7 @@ func (whisper *Whisper) GetFilter(id string) *Filter { func (whisper *Whisper) Unsubscribe(id string) error { ok := whisper.filters.Uninstall(id) if !ok { - return fmt.Errorf("Unsubscribe: Invalid ID") + return errors.New("Unsubscribe: Invalid ID") } return nil } @@ -591,7 +591,7 @@ func (whisper *Whisper) Unsubscribe(id string) error { func (whisper *Whisper) Send(envelope *Envelope) error { ok, err := whisper.add(envelope, false) if err == nil && !ok { - return fmt.Errorf("failed to add envelope") + return errors.New("failed to add envelope") } return err } @@ -762,7 +762,7 @@ func (whisper *Whisper) add(envelope *Envelope, isP2P bool) (bool, error) { if envelope.Expiry < now { if envelope.Expiry+DefaultSyncAllowance*2 < now { - return false, fmt.Errorf("very old message") + return false, errors.New("very old message") } log.Debug("expired envelope dropped", "hash", envelope.Hash().Hex()) return false, nil // drop envelope without error @@ -1009,7 +1009,7 @@ func GenerateRandomID() (id string, err error) { return "", err } if !validateDataIntegrity(buf, keyIDSize) { - return "", fmt.Errorf("error in generateRandomID: crypto/rand failed to generate random data") + return "", errors.New("error in generateRandomID: crypto/rand failed to generate random data") } id = common.Bytes2Hex(buf) return id, err