Skip to content

Commit

Permalink
fix the most annoying bug ever
Browse files Browse the repository at this point in the history
  • Loading branch information
whyrusleeping committed Jul 31, 2019
1 parent 0f2334f commit a8b434a
Show file tree
Hide file tree
Showing 9 changed files with 163 additions and 273 deletions.
4 changes: 1 addition & 3 deletions api/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ type FullNode interface {

MpoolPending(context.Context, *types.TipSet) ([]*types.SignedMessage, error)
MpoolPush(context.Context, *types.SignedMessage) error
MpoolGetNonce(context.Context, address.Address) (uint64, error)

// FullNodeStruct

Expand All @@ -99,9 +100,6 @@ type FullNode interface {
WalletSign(context.Context, address.Address, []byte) (*types.Signature, error)
WalletDefaultAddress(context.Context) (address.Address, error)

// Really not sure where this belongs. It could go on the wallet, or the message pool, or the chain...
MpoolGetNonce(context.Context, address.Address) (uint64, error)

// Other

// ClientImport imports file under the specified path into filestore
Expand Down
3 changes: 0 additions & 3 deletions chain/blocksync.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,19 +129,16 @@ func (bss *BlockSyncService) collectChainSegment(start []cid.Cid, length uint64,
}

if opts.IncludeMessages {
log.Info("INCLUDING MESSAGES IN SYNC RESPONSE")
msgs, mincl, err := bss.gatherMessages(ts)
if err != nil {
return nil, err
}
log.Infof("messages: ", msgs)

bst.Messages = msgs
bst.MsgIncludes = mincl
}

if opts.IncludeBlocks {
log.Info("INCLUDING BLOCKS IN SYNC RESPONSE")
bst.Blocks = ts.Blocks()
}

Expand Down
2 changes: 1 addition & 1 deletion chain/gen/gen.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ func (m mybs) Get(c cid.Cid) (block.Block, error) {
b, err := m.Blockstore.Get(c)
if err != nil {
// change to error for stacktraces, don't commit with that pls
log.Warn("Get failed: %s %s", c, err)
log.Warnf("Get failed: %s %s", c, err)
return nil, err
}

Expand Down
5 changes: 3 additions & 2 deletions chain/messagepool.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (
"github.com/filecoin-project/go-lotus/chain/address"
"github.com/filecoin-project/go-lotus/chain/store"
"github.com/filecoin-project/go-lotus/chain/types"
"github.com/pkg/errors"
)

type MessagePool struct {
Expand Down Expand Up @@ -128,7 +129,7 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet)
for _, b := range ts.Blocks() {
msgs, err := mp.cs.MessagesForBlock(b)
if err != nil {
return err
return errors.Wrapf(err, "failed to get messages for revert block %s(height %d)", b.Cid(), b.Height)
}
for _, msg := range msgs {
if err := mp.Add(msg); err != nil {
Expand All @@ -142,7 +143,7 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet)
for _, b := range ts.Blocks() {
msgs, err := mp.cs.MessagesForBlock(b)
if err != nil {
return err
return errors.Wrapf(err, "failed to get messages for apply block %s(height %d) (msgroot = %s)", b.Cid(), b.Height, b.Messages)
}
for _, msg := range msgs {
mp.Remove(msg)
Expand Down
27 changes: 20 additions & 7 deletions chain/store/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -187,9 +187,12 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ts *types.TipSet) error {
cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock()
if cs.heaviest == nil || cs.Weight(ts) > cs.Weight(cs.heaviest) {
// TODO: don't do this for initial sync. Now that we don't have a
// difference between 'bootstrap sync' and 'caught up' sync, we need
// some other heuristic.
revert, apply, err := cs.ReorgOps(cs.heaviest, ts)
if err != nil {
return err
return errors.Wrap(err, "computing reorg ops failed")
}
for _, hcf := range cs.headChangeNotifs {
if err := hcf(revert, apply); err != nil {
Expand Down Expand Up @@ -290,6 +293,7 @@ func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.Ti
rightChain = append(rightChain, right)
par, err := cs.LoadTipSet(right.Parents())
if err != nil {
log.Infof("failed to fetch right.Parents: %s", err)
return nil, nil, err
}

Expand Down Expand Up @@ -336,13 +340,21 @@ type storable interface {
ToStorageBlock() (block.Block, error)
}

func (cs *ChainStore) PutMessage(m storable) (cid.Cid, error) {
sb, err := m.ToStorageBlock()
func PutMessage(bs blockstore.Blockstore, m storable) (cid.Cid, error) {
b, err := m.ToStorageBlock()
if err != nil {
return cid.Undef, err
}

return sb.Cid(), cs.bs.Put(sb)
if err := bs.Put(b); err != nil {
return cid.Undef, err
}

return b.Cid(), nil
}

func (cs *ChainStore) PutMessage(m storable) (cid.Cid, error) {
return PutMessage(cs.bs, m)
}

func (cs *ChainStore) AddBlock(b *types.BlockHeader) error {
Expand Down Expand Up @@ -395,6 +407,7 @@ func (cs *ChainStore) TipSetState(cids []cid.Cid) (cid.Cid, error) {
func (cs *ChainStore) GetMessage(c cid.Cid) (*types.SignedMessage, error) {
sb, err := cs.bs.Get(c)
if err != nil {
log.Errorf("get message get failed: %s: %s", c, err)
return nil, err
}

Expand Down Expand Up @@ -428,7 +441,7 @@ func (cs *ChainStore) MessageCidsForBlock(b *types.BlockHeader) ([]cid.Cid, erro
func (cs *ChainStore) MessagesForBlock(b *types.BlockHeader) ([]*types.SignedMessage, error) {
cids, err := cs.MessageCidsForBlock(b)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "loading message cids for block")
}

return cs.LoadMessagesFromCids(cids)
Expand Down Expand Up @@ -461,10 +474,10 @@ func (cs *ChainStore) GetReceipt(b *types.BlockHeader, i int) (*types.MessageRec

func (cs *ChainStore) LoadMessagesFromCids(cids []cid.Cid) ([]*types.SignedMessage, error) {
msgs := make([]*types.SignedMessage, 0, len(cids))
for _, c := range cids {
for i, c := range cids {
m, err := cs.GetMessage(c)
if err != nil {
return nil, err
return nil, errors.Wrapf(err, "failed to get message: (%s):%d", c, i)
}

msgs = append(msgs, m)
Expand Down
Loading

0 comments on commit a8b434a

Please sign in to comment.