diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 6c18cfbd1..05f7d5063 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -1,29 +1,31 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: abci/types/types.proto -package types +package types // import "github.com/tendermint/tendermint/abci/types" + +import proto "github.com/gogo/protobuf/proto" +import golang_proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/golang/protobuf/ptypes/duration" +import _ "github.com/golang/protobuf/ptypes/timestamp" +import merkle "github.com/tendermint/tendermint/crypto/merkle" +import kv "github.com/tendermint/tendermint/libs/kv" + +import time "time" + +import bytes "bytes" import ( - bytes "bytes" - context "context" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - golang_proto "github.com/golang/protobuf/proto" - _ "github.com/golang/protobuf/ptypes/duration" - _ "github.com/golang/protobuf/ptypes/timestamp" - merkle "github.com/tendermint/tendermint/crypto/merkle" - kv "github.com/tendermint/tendermint/libs/kv" + context "golang.org/x/net/context" grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" - time "time" ) +import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + +import io "io" + // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = golang_proto.Marshal @@ -35,7 +37,7 @@ var _ = time.Kitchen // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type CheckTxType int32 @@ -48,7 +50,6 @@ var CheckTxType_name = map[int32]string{ 0: "New", 1: "Recheck", } - var CheckTxType_value = map[string]int32{ "New": 0, "Recheck": 1, @@ -57,9 +58,8 @@ var CheckTxType_value = map[string]int32{ func (x CheckTxType) String() string { return proto.EnumName(CheckTxType_name, int32(x)) } - func (CheckTxType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{0} + return fileDescriptor_types_45ae25b95d0ccb51, []int{0} } type Request struct { @@ -85,7 +85,7 @@ func (m *Request) Reset() { *m = Request{} } func (m *Request) String() string { return proto.CompactTextString(m) } func (*Request) ProtoMessage() {} func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{0} + return fileDescriptor_types_45ae25b95d0ccb51, []int{0} } func (m *Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -95,15 +95,15 @@ func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Request.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_Request.Merge(m, src) +func (dst *Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Request.Merge(dst, src) } func (m *Request) XXX_Size() int { return m.Size() @@ -122,37 +122,37 @@ type isRequest_Value interface { } type Request_Echo struct { - Echo *RequestEcho `protobuf:"bytes,2,opt,name=echo,proto3,oneof" json:"echo,omitempty"` + Echo *RequestEcho `protobuf:"bytes,2,opt,name=echo,oneof"` } type Request_Flush struct { - Flush *RequestFlush `protobuf:"bytes,3,opt,name=flush,proto3,oneof" json:"flush,omitempty"` + Flush *RequestFlush `protobuf:"bytes,3,opt,name=flush,oneof"` } type Request_Info struct { - Info *RequestInfo `protobuf:"bytes,4,opt,name=info,proto3,oneof" json:"info,omitempty"` + Info *RequestInfo `protobuf:"bytes,4,opt,name=info,oneof"` } type Request_SetOption struct { - SetOption *RequestSetOption `protobuf:"bytes,5,opt,name=set_option,json=setOption,proto3,oneof" json:"set_option,omitempty"` + SetOption *RequestSetOption `protobuf:"bytes,5,opt,name=set_option,json=setOption,oneof"` } type Request_InitChain struct { - InitChain *RequestInitChain `protobuf:"bytes,6,opt,name=init_chain,json=initChain,proto3,oneof" json:"init_chain,omitempty"` + InitChain *RequestInitChain `protobuf:"bytes,6,opt,name=init_chain,json=initChain,oneof"` } type Request_Query struct { - Query *RequestQuery `protobuf:"bytes,7,opt,name=query,proto3,oneof" json:"query,omitempty"` + Query *RequestQuery `protobuf:"bytes,7,opt,name=query,oneof"` } type Request_BeginBlock struct { - BeginBlock *RequestBeginBlock `protobuf:"bytes,8,opt,name=begin_block,json=beginBlock,proto3,oneof" json:"begin_block,omitempty"` + BeginBlock *RequestBeginBlock `protobuf:"bytes,8,opt,name=begin_block,json=beginBlock,oneof"` } type Request_CheckTx struct { - CheckTx *RequestCheckTx `protobuf:"bytes,9,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` + CheckTx *RequestCheckTx `protobuf:"bytes,9,opt,name=check_tx,json=checkTx,oneof"` } type Request_DeliverTx struct { - DeliverTx *RequestDeliverTx `protobuf:"bytes,19,opt,name=deliver_tx,json=deliverTx,proto3,oneof" json:"deliver_tx,omitempty"` + DeliverTx *RequestDeliverTx `protobuf:"bytes,19,opt,name=deliver_tx,json=deliverTx,oneof"` } type Request_EndBlock struct { - EndBlock *RequestEndBlock `protobuf:"bytes,11,opt,name=end_block,json=endBlock,proto3,oneof" json:"end_block,omitempty"` + EndBlock *RequestEndBlock `protobuf:"bytes,11,opt,name=end_block,json=endBlock,oneof"` } type Request_Commit struct { - Commit *RequestCommit `protobuf:"bytes,12,opt,name=commit,proto3,oneof" json:"commit,omitempty"` + Commit *RequestCommit `protobuf:"bytes,12,opt,name=commit,oneof"` } func (*Request_Echo) isRequest_Value() {} @@ -251,9 +251,9 @@ func (m *Request) GetCommit() *RequestCommit { return nil } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Request) XXX_OneofWrappers() []interface{} { - return []interface{}{ +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Request) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Request_OneofMarshaler, _Request_OneofUnmarshaler, _Request_OneofSizer, []interface{}{ (*Request_Echo)(nil), (*Request_Flush)(nil), (*Request_Info)(nil), @@ -268,6 +268,234 @@ func (*Request) XXX_OneofWrappers() []interface{} { } } +func _Request_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Request) + // value + switch x := m.Value.(type) { + case *Request_Echo: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Echo); err != nil { + return err + } + case *Request_Flush: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Flush); err != nil { + return err + } + case *Request_Info: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Info); err != nil { + return err + } + case *Request_SetOption: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SetOption); err != nil { + return err + } + case *Request_InitChain: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InitChain); err != nil { + return err + } + case *Request_Query: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Query); err != nil { + return err + } + case *Request_BeginBlock: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BeginBlock); err != nil { + return err + } + case *Request_CheckTx: + _ = b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CheckTx); err != nil { + return err + } + case *Request_DeliverTx: + _ = b.EncodeVarint(19<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DeliverTx); err != nil { + return err + } + case *Request_EndBlock: + _ = b.EncodeVarint(11<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.EndBlock); err != nil { + return err + } + case *Request_Commit: + _ = b.EncodeVarint(12<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Commit); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Request.Value has unexpected type %T", x) + } + return nil +} + +func _Request_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Request) + switch tag { + case 2: // value.echo + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RequestEcho) + err := b.DecodeMessage(msg) + m.Value = &Request_Echo{msg} + return true, err + case 3: // value.flush + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RequestFlush) + err := b.DecodeMessage(msg) + m.Value = &Request_Flush{msg} + return true, err + case 4: // value.info + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RequestInfo) + err := b.DecodeMessage(msg) + m.Value = &Request_Info{msg} + return true, err + case 5: // value.set_option + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RequestSetOption) + err := b.DecodeMessage(msg) + m.Value = &Request_SetOption{msg} + return true, err + case 6: // value.init_chain + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RequestInitChain) + err := b.DecodeMessage(msg) + m.Value = &Request_InitChain{msg} + return true, err + case 7: // value.query + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RequestQuery) + err := b.DecodeMessage(msg) + m.Value = &Request_Query{msg} + return true, err + case 8: // value.begin_block + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RequestBeginBlock) + err := b.DecodeMessage(msg) + m.Value = &Request_BeginBlock{msg} + return true, err + case 9: // value.check_tx + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RequestCheckTx) + err := b.DecodeMessage(msg) + m.Value = &Request_CheckTx{msg} + return true, err + case 19: // value.deliver_tx + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RequestDeliverTx) + err := b.DecodeMessage(msg) + m.Value = &Request_DeliverTx{msg} + return true, err + case 11: // value.end_block + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RequestEndBlock) + err := b.DecodeMessage(msg) + m.Value = &Request_EndBlock{msg} + return true, err + case 12: // value.commit + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RequestCommit) + err := b.DecodeMessage(msg) + m.Value = &Request_Commit{msg} + return true, err + default: + return false, nil + } +} + +func _Request_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Request) + // value + switch x := m.Value.(type) { + case *Request_Echo: + s := proto.Size(x.Echo) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Request_Flush: + s := proto.Size(x.Flush) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Request_Info: + s := proto.Size(x.Info) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Request_SetOption: + s := proto.Size(x.SetOption) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Request_InitChain: + s := proto.Size(x.InitChain) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Request_Query: + s := proto.Size(x.Query) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Request_BeginBlock: + s := proto.Size(x.BeginBlock) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Request_CheckTx: + s := proto.Size(x.CheckTx) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Request_DeliverTx: + s := proto.Size(x.DeliverTx) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Request_EndBlock: + s := proto.Size(x.EndBlock) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Request_Commit: + s := proto.Size(x.Commit) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + type RequestEcho struct { Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -279,7 +507,7 @@ func (m *RequestEcho) Reset() { *m = RequestEcho{} } func (m *RequestEcho) String() string { return proto.CompactTextString(m) } func (*RequestEcho) ProtoMessage() {} func (*RequestEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{1} + return fileDescriptor_types_45ae25b95d0ccb51, []int{1} } func (m *RequestEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -289,15 +517,15 @@ func (m *RequestEcho) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_RequestEcho.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *RequestEcho) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestEcho.Merge(m, src) +func (dst *RequestEcho) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestEcho.Merge(dst, src) } func (m *RequestEcho) XXX_Size() int { return m.Size() @@ -325,7 +553,7 @@ func (m *RequestFlush) Reset() { *m = RequestFlush{} } func (m *RequestFlush) String() string { return proto.CompactTextString(m) } func (*RequestFlush) ProtoMessage() {} func (*RequestFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{2} + return fileDescriptor_types_45ae25b95d0ccb51, []int{2} } func (m *RequestFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -335,15 +563,15 @@ func (m *RequestFlush) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_RequestFlush.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *RequestFlush) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestFlush.Merge(m, src) +func (dst *RequestFlush) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestFlush.Merge(dst, src) } func (m *RequestFlush) XXX_Size() int { return m.Size() @@ -367,7 +595,7 @@ func (m *RequestInfo) Reset() { *m = RequestInfo{} } func (m *RequestInfo) String() string { return proto.CompactTextString(m) } func (*RequestInfo) ProtoMessage() {} func (*RequestInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{3} + return fileDescriptor_types_45ae25b95d0ccb51, []int{3} } func (m *RequestInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -377,15 +605,15 @@ func (m *RequestInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_RequestInfo.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *RequestInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestInfo.Merge(m, src) +func (dst *RequestInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestInfo.Merge(dst, src) } func (m *RequestInfo) XXX_Size() int { return m.Size() @@ -430,7 +658,7 @@ func (m *RequestSetOption) Reset() { *m = RequestSetOption{} } func (m *RequestSetOption) String() string { return proto.CompactTextString(m) } func (*RequestSetOption) ProtoMessage() {} func (*RequestSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{4} + return fileDescriptor_types_45ae25b95d0ccb51, []int{4} } func (m *RequestSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -440,15 +668,15 @@ func (m *RequestSetOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, er return xxx_messageInfo_RequestSetOption.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *RequestSetOption) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestSetOption.Merge(m, src) +func (dst *RequestSetOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestSetOption.Merge(dst, src) } func (m *RequestSetOption) XXX_Size() int { return m.Size() @@ -474,10 +702,10 @@ func (m *RequestSetOption) GetValue() string { } type RequestInitChain struct { - Time time.Time `protobuf:"bytes,1,opt,name=time,proto3,stdtime" json:"time"` + Time time.Time `protobuf:"bytes,1,opt,name=time,stdtime" json:"time"` ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - ConsensusParams *ConsensusParams `protobuf:"bytes,3,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params,omitempty"` - Validators []ValidatorUpdate `protobuf:"bytes,4,rep,name=validators,proto3" json:"validators"` + ConsensusParams *ConsensusParams `protobuf:"bytes,3,opt,name=consensus_params,json=consensusParams" json:"consensus_params,omitempty"` + Validators []ValidatorUpdate `protobuf:"bytes,4,rep,name=validators" json:"validators"` AppStateBytes []byte `protobuf:"bytes,5,opt,name=app_state_bytes,json=appStateBytes,proto3" json:"app_state_bytes,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -488,7 +716,7 @@ func (m *RequestInitChain) Reset() { *m = RequestInitChain{} } func (m *RequestInitChain) String() string { return proto.CompactTextString(m) } func (*RequestInitChain) ProtoMessage() {} func (*RequestInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{5} + return fileDescriptor_types_45ae25b95d0ccb51, []int{5} } func (m *RequestInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -498,15 +726,15 @@ func (m *RequestInitChain) XXX_Marshal(b []byte, deterministic bool) ([]byte, er return xxx_messageInfo_RequestInitChain.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *RequestInitChain) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestInitChain.Merge(m, src) +func (dst *RequestInitChain) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestInitChain.Merge(dst, src) } func (m *RequestInitChain) XXX_Size() int { return m.Size() @@ -566,7 +794,7 @@ func (m *RequestQuery) Reset() { *m = RequestQuery{} } func (m *RequestQuery) String() string { return proto.CompactTextString(m) } func (*RequestQuery) ProtoMessage() {} func (*RequestQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{6} + return fileDescriptor_types_45ae25b95d0ccb51, []int{6} } func (m *RequestQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -576,15 +804,15 @@ func (m *RequestQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_RequestQuery.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *RequestQuery) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestQuery.Merge(m, src) +func (dst *RequestQuery) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestQuery.Merge(dst, src) } func (m *RequestQuery) XXX_Size() int { return m.Size() @@ -625,9 +853,9 @@ func (m *RequestQuery) GetProve() bool { type RequestBeginBlock struct { Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` - Header Header `protobuf:"bytes,2,opt,name=header,proto3" json:"header"` - LastCommitInfo LastCommitInfo `protobuf:"bytes,3,opt,name=last_commit_info,json=lastCommitInfo,proto3" json:"last_commit_info"` - ByzantineValidators []Evidence `protobuf:"bytes,4,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` + Header Header `protobuf:"bytes,2,opt,name=header" json:"header"` + LastCommitInfo LastCommitInfo `protobuf:"bytes,3,opt,name=last_commit_info,json=lastCommitInfo" json:"last_commit_info"` + ByzantineValidators []Evidence `protobuf:"bytes,4,rep,name=byzantine_validators,json=byzantineValidators" json:"byzantine_validators"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -637,7 +865,7 @@ func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} } func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) } func (*RequestBeginBlock) ProtoMessage() {} func (*RequestBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{7} + return fileDescriptor_types_45ae25b95d0ccb51, []int{7} } func (m *RequestBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -647,15 +875,15 @@ func (m *RequestBeginBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, e return xxx_messageInfo_RequestBeginBlock.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *RequestBeginBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestBeginBlock.Merge(m, src) +func (dst *RequestBeginBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestBeginBlock.Merge(dst, src) } func (m *RequestBeginBlock) XXX_Size() int { return m.Size() @@ -706,7 +934,7 @@ func (m *RequestCheckTx) Reset() { *m = RequestCheckTx{} } func (m *RequestCheckTx) String() string { return proto.CompactTextString(m) } func (*RequestCheckTx) ProtoMessage() {} func (*RequestCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{8} + return fileDescriptor_types_45ae25b95d0ccb51, []int{8} } func (m *RequestCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -716,15 +944,15 @@ func (m *RequestCheckTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro return xxx_messageInfo_RequestCheckTx.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *RequestCheckTx) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestCheckTx.Merge(m, src) +func (dst *RequestCheckTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestCheckTx.Merge(dst, src) } func (m *RequestCheckTx) XXX_Size() int { return m.Size() @@ -760,7 +988,7 @@ func (m *RequestDeliverTx) Reset() { *m = RequestDeliverTx{} } func (m *RequestDeliverTx) String() string { return proto.CompactTextString(m) } func (*RequestDeliverTx) ProtoMessage() {} func (*RequestDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{9} + return fileDescriptor_types_45ae25b95d0ccb51, []int{9} } func (m *RequestDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -770,15 +998,15 @@ func (m *RequestDeliverTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, er return xxx_messageInfo_RequestDeliverTx.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *RequestDeliverTx) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestDeliverTx.Merge(m, src) +func (dst *RequestDeliverTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestDeliverTx.Merge(dst, src) } func (m *RequestDeliverTx) XXX_Size() int { return m.Size() @@ -807,7 +1035,7 @@ func (m *RequestEndBlock) Reset() { *m = RequestEndBlock{} } func (m *RequestEndBlock) String() string { return proto.CompactTextString(m) } func (*RequestEndBlock) ProtoMessage() {} func (*RequestEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{10} + return fileDescriptor_types_45ae25b95d0ccb51, []int{10} } func (m *RequestEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -817,15 +1045,15 @@ func (m *RequestEndBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, err return xxx_messageInfo_RequestEndBlock.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *RequestEndBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestEndBlock.Merge(m, src) +func (dst *RequestEndBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestEndBlock.Merge(dst, src) } func (m *RequestEndBlock) XXX_Size() int { return m.Size() @@ -853,7 +1081,7 @@ func (m *RequestCommit) Reset() { *m = RequestCommit{} } func (m *RequestCommit) String() string { return proto.CompactTextString(m) } func (*RequestCommit) ProtoMessage() {} func (*RequestCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{11} + return fileDescriptor_types_45ae25b95d0ccb51, []int{11} } func (m *RequestCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -863,15 +1091,15 @@ func (m *RequestCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return xxx_messageInfo_RequestCommit.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *RequestCommit) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestCommit.Merge(m, src) +func (dst *RequestCommit) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestCommit.Merge(dst, src) } func (m *RequestCommit) XXX_Size() int { return m.Size() @@ -906,7 +1134,7 @@ func (m *Response) Reset() { *m = Response{} } func (m *Response) String() string { return proto.CompactTextString(m) } func (*Response) ProtoMessage() {} func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{12} + return fileDescriptor_types_45ae25b95d0ccb51, []int{12} } func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -916,15 +1144,15 @@ func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Response.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_Response.Merge(m, src) +func (dst *Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Response.Merge(dst, src) } func (m *Response) XXX_Size() int { return m.Size() @@ -943,40 +1171,40 @@ type isResponse_Value interface { } type Response_Exception struct { - Exception *ResponseException `protobuf:"bytes,1,opt,name=exception,proto3,oneof" json:"exception,omitempty"` + Exception *ResponseException `protobuf:"bytes,1,opt,name=exception,oneof"` } type Response_Echo struct { - Echo *ResponseEcho `protobuf:"bytes,2,opt,name=echo,proto3,oneof" json:"echo,omitempty"` + Echo *ResponseEcho `protobuf:"bytes,2,opt,name=echo,oneof"` } type Response_Flush struct { - Flush *ResponseFlush `protobuf:"bytes,3,opt,name=flush,proto3,oneof" json:"flush,omitempty"` + Flush *ResponseFlush `protobuf:"bytes,3,opt,name=flush,oneof"` } type Response_Info struct { - Info *ResponseInfo `protobuf:"bytes,4,opt,name=info,proto3,oneof" json:"info,omitempty"` + Info *ResponseInfo `protobuf:"bytes,4,opt,name=info,oneof"` } type Response_SetOption struct { - SetOption *ResponseSetOption `protobuf:"bytes,5,opt,name=set_option,json=setOption,proto3,oneof" json:"set_option,omitempty"` + SetOption *ResponseSetOption `protobuf:"bytes,5,opt,name=set_option,json=setOption,oneof"` } type Response_InitChain struct { - InitChain *ResponseInitChain `protobuf:"bytes,6,opt,name=init_chain,json=initChain,proto3,oneof" json:"init_chain,omitempty"` + InitChain *ResponseInitChain `protobuf:"bytes,6,opt,name=init_chain,json=initChain,oneof"` } type Response_Query struct { - Query *ResponseQuery `protobuf:"bytes,7,opt,name=query,proto3,oneof" json:"query,omitempty"` + Query *ResponseQuery `protobuf:"bytes,7,opt,name=query,oneof"` } type Response_BeginBlock struct { - BeginBlock *ResponseBeginBlock `protobuf:"bytes,8,opt,name=begin_block,json=beginBlock,proto3,oneof" json:"begin_block,omitempty"` + BeginBlock *ResponseBeginBlock `protobuf:"bytes,8,opt,name=begin_block,json=beginBlock,oneof"` } type Response_CheckTx struct { - CheckTx *ResponseCheckTx `protobuf:"bytes,9,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` + CheckTx *ResponseCheckTx `protobuf:"bytes,9,opt,name=check_tx,json=checkTx,oneof"` } type Response_DeliverTx struct { - DeliverTx *ResponseDeliverTx `protobuf:"bytes,10,opt,name=deliver_tx,json=deliverTx,proto3,oneof" json:"deliver_tx,omitempty"` + DeliverTx *ResponseDeliverTx `protobuf:"bytes,10,opt,name=deliver_tx,json=deliverTx,oneof"` } type Response_EndBlock struct { - EndBlock *ResponseEndBlock `protobuf:"bytes,11,opt,name=end_block,json=endBlock,proto3,oneof" json:"end_block,omitempty"` + EndBlock *ResponseEndBlock `protobuf:"bytes,11,opt,name=end_block,json=endBlock,oneof"` } type Response_Commit struct { - Commit *ResponseCommit `protobuf:"bytes,12,opt,name=commit,proto3,oneof" json:"commit,omitempty"` + Commit *ResponseCommit `protobuf:"bytes,12,opt,name=commit,oneof"` } func (*Response_Exception) isResponse_Value() {} @@ -1083,9 +1311,9 @@ func (m *Response) GetCommit() *ResponseCommit { return nil } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Response) XXX_OneofWrappers() []interface{} { - return []interface{}{ +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Response) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Response_OneofMarshaler, _Response_OneofUnmarshaler, _Response_OneofSizer, []interface{}{ (*Response_Exception)(nil), (*Response_Echo)(nil), (*Response_Flush)(nil), @@ -1101,6 +1329,252 @@ func (*Response) XXX_OneofWrappers() []interface{} { } } +func _Response_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Response) + // value + switch x := m.Value.(type) { + case *Response_Exception: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Exception); err != nil { + return err + } + case *Response_Echo: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Echo); err != nil { + return err + } + case *Response_Flush: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Flush); err != nil { + return err + } + case *Response_Info: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Info); err != nil { + return err + } + case *Response_SetOption: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SetOption); err != nil { + return err + } + case *Response_InitChain: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InitChain); err != nil { + return err + } + case *Response_Query: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Query); err != nil { + return err + } + case *Response_BeginBlock: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BeginBlock); err != nil { + return err + } + case *Response_CheckTx: + _ = b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CheckTx); err != nil { + return err + } + case *Response_DeliverTx: + _ = b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DeliverTx); err != nil { + return err + } + case *Response_EndBlock: + _ = b.EncodeVarint(11<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.EndBlock); err != nil { + return err + } + case *Response_Commit: + _ = b.EncodeVarint(12<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Commit); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Response.Value has unexpected type %T", x) + } + return nil +} + +func _Response_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Response) + switch tag { + case 1: // value.exception + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseException) + err := b.DecodeMessage(msg) + m.Value = &Response_Exception{msg} + return true, err + case 2: // value.echo + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseEcho) + err := b.DecodeMessage(msg) + m.Value = &Response_Echo{msg} + return true, err + case 3: // value.flush + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseFlush) + err := b.DecodeMessage(msg) + m.Value = &Response_Flush{msg} + return true, err + case 4: // value.info + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseInfo) + err := b.DecodeMessage(msg) + m.Value = &Response_Info{msg} + return true, err + case 5: // value.set_option + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseSetOption) + err := b.DecodeMessage(msg) + m.Value = &Response_SetOption{msg} + return true, err + case 6: // value.init_chain + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseInitChain) + err := b.DecodeMessage(msg) + m.Value = &Response_InitChain{msg} + return true, err + case 7: // value.query + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseQuery) + err := b.DecodeMessage(msg) + m.Value = &Response_Query{msg} + return true, err + case 8: // value.begin_block + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseBeginBlock) + err := b.DecodeMessage(msg) + m.Value = &Response_BeginBlock{msg} + return true, err + case 9: // value.check_tx + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseCheckTx) + err := b.DecodeMessage(msg) + m.Value = &Response_CheckTx{msg} + return true, err + case 10: // value.deliver_tx + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseDeliverTx) + err := b.DecodeMessage(msg) + m.Value = &Response_DeliverTx{msg} + return true, err + case 11: // value.end_block + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseEndBlock) + err := b.DecodeMessage(msg) + m.Value = &Response_EndBlock{msg} + return true, err + case 12: // value.commit + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResponseCommit) + err := b.DecodeMessage(msg) + m.Value = &Response_Commit{msg} + return true, err + default: + return false, nil + } +} + +func _Response_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Response) + // value + switch x := m.Value.(type) { + case *Response_Exception: + s := proto.Size(x.Exception) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Response_Echo: + s := proto.Size(x.Echo) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Response_Flush: + s := proto.Size(x.Flush) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Response_Info: + s := proto.Size(x.Info) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Response_SetOption: + s := proto.Size(x.SetOption) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Response_InitChain: + s := proto.Size(x.InitChain) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Response_Query: + s := proto.Size(x.Query) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Response_BeginBlock: + s := proto.Size(x.BeginBlock) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Response_CheckTx: + s := proto.Size(x.CheckTx) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Response_DeliverTx: + s := proto.Size(x.DeliverTx) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Response_EndBlock: + s := proto.Size(x.EndBlock) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Response_Commit: + s := proto.Size(x.Commit) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + // nondeterministic type ResponseException struct { Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` @@ -1113,7 +1587,7 @@ func (m *ResponseException) Reset() { *m = ResponseException{} } func (m *ResponseException) String() string { return proto.CompactTextString(m) } func (*ResponseException) ProtoMessage() {} func (*ResponseException) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{13} + return fileDescriptor_types_45ae25b95d0ccb51, []int{13} } func (m *ResponseException) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1123,15 +1597,15 @@ func (m *ResponseException) XXX_Marshal(b []byte, deterministic bool) ([]byte, e return xxx_messageInfo_ResponseException.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *ResponseException) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseException.Merge(m, src) +func (dst *ResponseException) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseException.Merge(dst, src) } func (m *ResponseException) XXX_Size() int { return m.Size() @@ -1160,7 +1634,7 @@ func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } func (*ResponseEcho) ProtoMessage() {} func (*ResponseEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{14} + return fileDescriptor_types_45ae25b95d0ccb51, []int{14} } func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1170,15 +1644,15 @@ func (m *ResponseEcho) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_ResponseEcho.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *ResponseEcho) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseEcho.Merge(m, src) +func (dst *ResponseEcho) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseEcho.Merge(dst, src) } func (m *ResponseEcho) XXX_Size() int { return m.Size() @@ -1206,7 +1680,7 @@ func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } func (*ResponseFlush) ProtoMessage() {} func (*ResponseFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{15} + return fileDescriptor_types_45ae25b95d0ccb51, []int{15} } func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1216,15 +1690,15 @@ func (m *ResponseFlush) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return xxx_messageInfo_ResponseFlush.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *ResponseFlush) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseFlush.Merge(m, src) +func (dst *ResponseFlush) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseFlush.Merge(dst, src) } func (m *ResponseFlush) XXX_Size() int { return m.Size() @@ -1250,7 +1724,7 @@ func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } func (*ResponseInfo) ProtoMessage() {} func (*ResponseInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{16} + return fileDescriptor_types_45ae25b95d0ccb51, []int{16} } func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1260,15 +1734,15 @@ func (m *ResponseInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_ResponseInfo.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *ResponseInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseInfo.Merge(m, src) +func (dst *ResponseInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseInfo.Merge(dst, src) } func (m *ResponseInfo) XXX_Size() int { return m.Size() @@ -1329,7 +1803,7 @@ func (m *ResponseSetOption) Reset() { *m = ResponseSetOption{} } func (m *ResponseSetOption) String() string { return proto.CompactTextString(m) } func (*ResponseSetOption) ProtoMessage() {} func (*ResponseSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{17} + return fileDescriptor_types_45ae25b95d0ccb51, []int{17} } func (m *ResponseSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1339,15 +1813,15 @@ func (m *ResponseSetOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, e return xxx_messageInfo_ResponseSetOption.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *ResponseSetOption) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseSetOption.Merge(m, src) +func (dst *ResponseSetOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseSetOption.Merge(dst, src) } func (m *ResponseSetOption) XXX_Size() int { return m.Size() @@ -1380,8 +1854,8 @@ func (m *ResponseSetOption) GetInfo() string { } type ResponseInitChain struct { - ConsensusParams *ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params,omitempty"` - Validators []ValidatorUpdate `protobuf:"bytes,2,rep,name=validators,proto3" json:"validators"` + ConsensusParams *ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams" json:"consensus_params,omitempty"` + Validators []ValidatorUpdate `protobuf:"bytes,2,rep,name=validators" json:"validators"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1391,7 +1865,7 @@ func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } func (*ResponseInitChain) ProtoMessage() {} func (*ResponseInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{18} + return fileDescriptor_types_45ae25b95d0ccb51, []int{18} } func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1401,15 +1875,15 @@ func (m *ResponseInitChain) XXX_Marshal(b []byte, deterministic bool) ([]byte, e return xxx_messageInfo_ResponseInitChain.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *ResponseInitChain) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseInitChain.Merge(m, src) +func (dst *ResponseInitChain) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseInitChain.Merge(dst, src) } func (m *ResponseInitChain) XXX_Size() int { return m.Size() @@ -1442,7 +1916,7 @@ type ResponseQuery struct { Index int64 `protobuf:"varint,5,opt,name=index,proto3" json:"index,omitempty"` Key []byte `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"` Value []byte `protobuf:"bytes,7,opt,name=value,proto3" json:"value,omitempty"` - Proof *merkle.Proof `protobuf:"bytes,8,opt,name=proof,proto3" json:"proof,omitempty"` + Proof *merkle.Proof `protobuf:"bytes,8,opt,name=proof" json:"proof,omitempty"` Height int64 `protobuf:"varint,9,opt,name=height,proto3" json:"height,omitempty"` Codespace string `protobuf:"bytes,10,opt,name=codespace,proto3" json:"codespace,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -1454,7 +1928,7 @@ func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } func (*ResponseQuery) ProtoMessage() {} func (*ResponseQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{19} + return fileDescriptor_types_45ae25b95d0ccb51, []int{19} } func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1464,15 +1938,15 @@ func (m *ResponseQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return xxx_messageInfo_ResponseQuery.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *ResponseQuery) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseQuery.Merge(m, src) +func (dst *ResponseQuery) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseQuery.Merge(dst, src) } func (m *ResponseQuery) XXX_Size() int { return m.Size() @@ -1547,7 +2021,7 @@ func (m *ResponseQuery) GetCodespace() string { } type ResponseBeginBlock struct { - Events []Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` + Events []Event `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1557,7 +2031,7 @@ func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } func (*ResponseBeginBlock) ProtoMessage() {} func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{20} + return fileDescriptor_types_45ae25b95d0ccb51, []int{20} } func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1567,15 +2041,15 @@ func (m *ResponseBeginBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, return xxx_messageInfo_ResponseBeginBlock.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *ResponseBeginBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseBeginBlock.Merge(m, src) +func (dst *ResponseBeginBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseBeginBlock.Merge(dst, src) } func (m *ResponseBeginBlock) XXX_Size() int { return m.Size() @@ -1600,7 +2074,7 @@ type ResponseCheckTx struct { Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` - Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` + Events []Event `protobuf:"bytes,7,rep,name=events" json:"events,omitempty"` Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -1611,7 +2085,7 @@ func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } func (*ResponseCheckTx) ProtoMessage() {} func (*ResponseCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{21} + return fileDescriptor_types_45ae25b95d0ccb51, []int{21} } func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1621,15 +2095,15 @@ func (m *ResponseCheckTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, err return xxx_messageInfo_ResponseCheckTx.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *ResponseCheckTx) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseCheckTx.Merge(m, src) +func (dst *ResponseCheckTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseCheckTx.Merge(dst, src) } func (m *ResponseCheckTx) XXX_Size() int { return m.Size() @@ -1703,7 +2177,7 @@ type ResponseDeliverTx struct { Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` - Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` + Events []Event `protobuf:"bytes,7,rep,name=events" json:"events,omitempty"` Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -1714,7 +2188,7 @@ func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } func (*ResponseDeliverTx) ProtoMessage() {} func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{22} + return fileDescriptor_types_45ae25b95d0ccb51, []int{22} } func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1724,15 +2198,15 @@ func (m *ResponseDeliverTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, e return xxx_messageInfo_ResponseDeliverTx.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *ResponseDeliverTx) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseDeliverTx.Merge(m, src) +func (dst *ResponseDeliverTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseDeliverTx.Merge(dst, src) } func (m *ResponseDeliverTx) XXX_Size() int { return m.Size() @@ -1800,9 +2274,9 @@ func (m *ResponseDeliverTx) GetCodespace() string { } type ResponseEndBlock struct { - ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` - ConsensusParamUpdates *ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` - Events []Event `protobuf:"bytes,3,rep,name=events,proto3" json:"events,omitempty"` + ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates" json:"validator_updates"` + ConsensusParamUpdates *ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates" json:"consensus_param_updates,omitempty"` + Events []Event `protobuf:"bytes,3,rep,name=events" json:"events,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1812,7 +2286,7 @@ func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } func (*ResponseEndBlock) ProtoMessage() {} func (*ResponseEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{23} + return fileDescriptor_types_45ae25b95d0ccb51, []int{23} } func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1822,15 +2296,15 @@ func (m *ResponseEndBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, er return xxx_messageInfo_ResponseEndBlock.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *ResponseEndBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseEndBlock.Merge(m, src) +func (dst *ResponseEndBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseEndBlock.Merge(dst, src) } func (m *ResponseEndBlock) XXX_Size() int { return m.Size() @@ -1874,7 +2348,7 @@ func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } func (*ResponseCommit) ProtoMessage() {} func (*ResponseCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{24} + return fileDescriptor_types_45ae25b95d0ccb51, []int{24} } func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1884,15 +2358,15 @@ func (m *ResponseCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro return xxx_messageInfo_ResponseCommit.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *ResponseCommit) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseCommit.Merge(m, src) +func (dst *ResponseCommit) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseCommit.Merge(dst, src) } func (m *ResponseCommit) XXX_Size() int { return m.Size() @@ -1913,9 +2387,9 @@ func (m *ResponseCommit) GetData() []byte { // ConsensusParams contains all consensus-relevant parameters // that can be adjusted by the abci app type ConsensusParams struct { - Block *BlockParams `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` - Evidence *EvidenceParams `protobuf:"bytes,2,opt,name=evidence,proto3" json:"evidence,omitempty"` - Validator *ValidatorParams `protobuf:"bytes,3,opt,name=validator,proto3" json:"validator,omitempty"` + Block *BlockParams `protobuf:"bytes,1,opt,name=block" json:"block,omitempty"` + Evidence *EvidenceParams `protobuf:"bytes,2,opt,name=evidence" json:"evidence,omitempty"` + Validator *ValidatorParams `protobuf:"bytes,3,opt,name=validator" json:"validator,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1925,7 +2399,7 @@ func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } func (m *ConsensusParams) String() string { return proto.CompactTextString(m) } func (*ConsensusParams) ProtoMessage() {} func (*ConsensusParams) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{25} + return fileDescriptor_types_45ae25b95d0ccb51, []int{25} } func (m *ConsensusParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1935,15 +2409,15 @@ func (m *ConsensusParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, err return xxx_messageInfo_ConsensusParams.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *ConsensusParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConsensusParams.Merge(m, src) +func (dst *ConsensusParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsensusParams.Merge(dst, src) } func (m *ConsensusParams) XXX_Size() int { return m.Size() @@ -1990,7 +2464,7 @@ func (m *BlockParams) Reset() { *m = BlockParams{} } func (m *BlockParams) String() string { return proto.CompactTextString(m) } func (*BlockParams) ProtoMessage() {} func (*BlockParams) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{26} + return fileDescriptor_types_45ae25b95d0ccb51, []int{26} } func (m *BlockParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2000,15 +2474,15 @@ func (m *BlockParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_BlockParams.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *BlockParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockParams.Merge(m, src) +func (dst *BlockParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockParams.Merge(dst, src) } func (m *BlockParams) XXX_Size() int { return m.Size() @@ -2036,7 +2510,7 @@ func (m *BlockParams) GetMaxGas() int64 { type EvidenceParams struct { // Note: must be greater than 0 MaxAgeNumBlocks int64 `protobuf:"varint,1,opt,name=max_age_num_blocks,json=maxAgeNumBlocks,proto3" json:"max_age_num_blocks,omitempty"` - MaxAgeDuration time.Duration `protobuf:"bytes,2,opt,name=max_age_duration,json=maxAgeDuration,proto3,stdduration" json:"max_age_duration"` + MaxAgeDuration time.Duration `protobuf:"bytes,2,opt,name=max_age_duration,json=maxAgeDuration,stdduration" json:"max_age_duration"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2046,7 +2520,7 @@ func (m *EvidenceParams) Reset() { *m = EvidenceParams{} } func (m *EvidenceParams) String() string { return proto.CompactTextString(m) } func (*EvidenceParams) ProtoMessage() {} func (*EvidenceParams) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{27} + return fileDescriptor_types_45ae25b95d0ccb51, []int{27} } func (m *EvidenceParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2056,15 +2530,15 @@ func (m *EvidenceParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro return xxx_messageInfo_EvidenceParams.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *EvidenceParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_EvidenceParams.Merge(m, src) +func (dst *EvidenceParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_EvidenceParams.Merge(dst, src) } func (m *EvidenceParams) XXX_Size() int { return m.Size() @@ -2091,7 +2565,7 @@ func (m *EvidenceParams) GetMaxAgeDuration() time.Duration { // ValidatorParams contains limits on validators. type ValidatorParams struct { - PubKeyTypes []string `protobuf:"bytes,1,rep,name=pub_key_types,json=pubKeyTypes,proto3" json:"pub_key_types,omitempty"` + PubKeyTypes []string `protobuf:"bytes,1,rep,name=pub_key_types,json=pubKeyTypes" json:"pub_key_types,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2101,7 +2575,7 @@ func (m *ValidatorParams) Reset() { *m = ValidatorParams{} } func (m *ValidatorParams) String() string { return proto.CompactTextString(m) } func (*ValidatorParams) ProtoMessage() {} func (*ValidatorParams) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{28} + return fileDescriptor_types_45ae25b95d0ccb51, []int{28} } func (m *ValidatorParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2111,15 +2585,15 @@ func (m *ValidatorParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, err return xxx_messageInfo_ValidatorParams.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *ValidatorParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidatorParams.Merge(m, src) +func (dst *ValidatorParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorParams.Merge(dst, src) } func (m *ValidatorParams) XXX_Size() int { return m.Size() @@ -2139,7 +2613,7 @@ func (m *ValidatorParams) GetPubKeyTypes() []string { type LastCommitInfo struct { Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` - Votes []VoteInfo `protobuf:"bytes,2,rep,name=votes,proto3" json:"votes"` + Votes []VoteInfo `protobuf:"bytes,2,rep,name=votes" json:"votes"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2149,7 +2623,7 @@ func (m *LastCommitInfo) Reset() { *m = LastCommitInfo{} } func (m *LastCommitInfo) String() string { return proto.CompactTextString(m) } func (*LastCommitInfo) ProtoMessage() {} func (*LastCommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{29} + return fileDescriptor_types_45ae25b95d0ccb51, []int{29} } func (m *LastCommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2159,15 +2633,15 @@ func (m *LastCommitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro return xxx_messageInfo_LastCommitInfo.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *LastCommitInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_LastCommitInfo.Merge(m, src) +func (dst *LastCommitInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_LastCommitInfo.Merge(dst, src) } func (m *LastCommitInfo) XXX_Size() int { return m.Size() @@ -2194,7 +2668,7 @@ func (m *LastCommitInfo) GetVotes() []VoteInfo { type Event struct { Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Attributes []kv.Pair `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` + Attributes []kv.Pair `protobuf:"bytes,2,rep,name=attributes" json:"attributes,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2204,7 +2678,7 @@ func (m *Event) Reset() { *m = Event{} } func (m *Event) String() string { return proto.CompactTextString(m) } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{30} + return fileDescriptor_types_45ae25b95d0ccb51, []int{30} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2214,15 +2688,15 @@ func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Event.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *Event) XXX_Merge(src proto.Message) { - xxx_messageInfo_Event.Merge(m, src) +func (dst *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(dst, src) } func (m *Event) XXX_Size() int { return m.Size() @@ -2249,12 +2723,12 @@ func (m *Event) GetAttributes() []kv.Pair { type Header struct { // basic block info - Version Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version"` + Version Version `protobuf:"bytes,1,opt,name=version" json:"version"` ChainID string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` - Time time.Time `protobuf:"bytes,4,opt,name=time,proto3,stdtime" json:"time"` + Time time.Time `protobuf:"bytes,4,opt,name=time,stdtime" json:"time"` // prev block info - LastBlockId BlockID `protobuf:"bytes,5,opt,name=last_block_id,json=lastBlockId,proto3" json:"last_block_id"` + LastBlockId BlockID `protobuf:"bytes,5,opt,name=last_block_id,json=lastBlockId" json:"last_block_id"` // hashes of block data LastCommitHash []byte `protobuf:"bytes,6,opt,name=last_commit_hash,json=lastCommitHash,proto3" json:"last_commit_hash,omitempty"` DataHash []byte `protobuf:"bytes,7,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"` @@ -2276,7 +2750,7 @@ func (m *Header) Reset() { *m = Header{} } func (m *Header) String() string { return proto.CompactTextString(m) } func (*Header) ProtoMessage() {} func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{31} + return fileDescriptor_types_45ae25b95d0ccb51, []int{31} } func (m *Header) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2286,15 +2760,15 @@ func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Header.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *Header) XXX_Merge(src proto.Message) { - xxx_messageInfo_Header.Merge(m, src) +func (dst *Header) XXX_Merge(src proto.Message) { + xxx_messageInfo_Header.Merge(dst, src) } func (m *Header) XXX_Size() int { return m.Size() @@ -2415,7 +2889,7 @@ func (m *Version) Reset() { *m = Version{} } func (m *Version) String() string { return proto.CompactTextString(m) } func (*Version) ProtoMessage() {} func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{32} + return fileDescriptor_types_45ae25b95d0ccb51, []int{32} } func (m *Version) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2425,15 +2899,15 @@ func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Version.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *Version) XXX_Merge(src proto.Message) { - xxx_messageInfo_Version.Merge(m, src) +func (dst *Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_Version.Merge(dst, src) } func (m *Version) XXX_Size() int { return m.Size() @@ -2460,7 +2934,7 @@ func (m *Version) GetApp() uint64 { type BlockID struct { Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` - PartsHeader PartSetHeader `protobuf:"bytes,2,opt,name=parts_header,json=partsHeader,proto3" json:"parts_header"` + PartsHeader PartSetHeader `protobuf:"bytes,2,opt,name=parts_header,json=partsHeader" json:"parts_header"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2470,7 +2944,7 @@ func (m *BlockID) Reset() { *m = BlockID{} } func (m *BlockID) String() string { return proto.CompactTextString(m) } func (*BlockID) ProtoMessage() {} func (*BlockID) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{33} + return fileDescriptor_types_45ae25b95d0ccb51, []int{33} } func (m *BlockID) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2480,15 +2954,15 @@ func (m *BlockID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_BlockID.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *BlockID) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockID.Merge(m, src) +func (dst *BlockID) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockID.Merge(dst, src) } func (m *BlockID) XXX_Size() int { return m.Size() @@ -2525,7 +2999,7 @@ func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } func (*PartSetHeader) ProtoMessage() {} func (*PartSetHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{34} + return fileDescriptor_types_45ae25b95d0ccb51, []int{34} } func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2535,15 +3009,15 @@ func (m *PartSetHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return xxx_messageInfo_PartSetHeader.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *PartSetHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_PartSetHeader.Merge(m, src) +func (dst *PartSetHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartSetHeader.Merge(dst, src) } func (m *PartSetHeader) XXX_Size() int { return m.Size() @@ -2582,7 +3056,7 @@ func (m *Validator) Reset() { *m = Validator{} } func (m *Validator) String() string { return proto.CompactTextString(m) } func (*Validator) ProtoMessage() {} func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{35} + return fileDescriptor_types_45ae25b95d0ccb51, []int{35} } func (m *Validator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2592,15 +3066,15 @@ func (m *Validator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Validator.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *Validator) XXX_Merge(src proto.Message) { - xxx_messageInfo_Validator.Merge(m, src) +func (dst *Validator) XXX_Merge(src proto.Message) { + xxx_messageInfo_Validator.Merge(dst, src) } func (m *Validator) XXX_Size() int { return m.Size() @@ -2627,7 +3101,7 @@ func (m *Validator) GetPower() int64 { // ValidatorUpdate type ValidatorUpdate struct { - PubKey PubKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key"` + PubKey PubKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey" json:"pub_key"` Power int64 `protobuf:"varint,2,opt,name=power,proto3" json:"power,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -2638,7 +3112,7 @@ func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } func (*ValidatorUpdate) ProtoMessage() {} func (*ValidatorUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{36} + return fileDescriptor_types_45ae25b95d0ccb51, []int{36} } func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2648,15 +3122,15 @@ func (m *ValidatorUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, err return xxx_messageInfo_ValidatorUpdate.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *ValidatorUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidatorUpdate.Merge(m, src) +func (dst *ValidatorUpdate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorUpdate.Merge(dst, src) } func (m *ValidatorUpdate) XXX_Size() int { return m.Size() @@ -2683,7 +3157,7 @@ func (m *ValidatorUpdate) GetPower() int64 { // VoteInfo type VoteInfo struct { - Validator Validator `protobuf:"bytes,1,opt,name=validator,proto3" json:"validator"` + Validator Validator `protobuf:"bytes,1,opt,name=validator" json:"validator"` SignedLastBlock bool `protobuf:"varint,2,opt,name=signed_last_block,json=signedLastBlock,proto3" json:"signed_last_block,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -2694,7 +3168,7 @@ func (m *VoteInfo) Reset() { *m = VoteInfo{} } func (m *VoteInfo) String() string { return proto.CompactTextString(m) } func (*VoteInfo) ProtoMessage() {} func (*VoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{37} + return fileDescriptor_types_45ae25b95d0ccb51, []int{37} } func (m *VoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2704,15 +3178,15 @@ func (m *VoteInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_VoteInfo.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *VoteInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_VoteInfo.Merge(m, src) +func (dst *VoteInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_VoteInfo.Merge(dst, src) } func (m *VoteInfo) XXX_Size() int { return m.Size() @@ -2749,7 +3223,7 @@ func (m *PubKey) Reset() { *m = PubKey{} } func (m *PubKey) String() string { return proto.CompactTextString(m) } func (*PubKey) ProtoMessage() {} func (*PubKey) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{38} + return fileDescriptor_types_45ae25b95d0ccb51, []int{38} } func (m *PubKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2759,15 +3233,15 @@ func (m *PubKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_PubKey.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *PubKey) XXX_Merge(src proto.Message) { - xxx_messageInfo_PubKey.Merge(m, src) +func (dst *PubKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_PubKey.Merge(dst, src) } func (m *PubKey) XXX_Size() int { return m.Size() @@ -2794,9 +3268,9 @@ func (m *PubKey) GetData() []byte { type Evidence struct { Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Validator Validator `protobuf:"bytes,2,opt,name=validator,proto3" json:"validator"` + Validator Validator `protobuf:"bytes,2,opt,name=validator" json:"validator"` Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` - Time time.Time `protobuf:"bytes,4,opt,name=time,proto3,stdtime" json:"time"` + Time time.Time `protobuf:"bytes,4,opt,name=time,stdtime" json:"time"` TotalVotingPower int64 `protobuf:"varint,5,opt,name=total_voting_power,json=totalVotingPower,proto3" json:"total_voting_power,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -2807,7 +3281,7 @@ func (m *Evidence) Reset() { *m = Evidence{} } func (m *Evidence) String() string { return proto.CompactTextString(m) } func (*Evidence) ProtoMessage() {} func (*Evidence) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{39} + return fileDescriptor_types_45ae25b95d0ccb51, []int{39} } func (m *Evidence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2817,15 +3291,15 @@ func (m *Evidence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Evidence.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *Evidence) XXX_Merge(src proto.Message) { - xxx_messageInfo_Evidence.Merge(m, src) +func (dst *Evidence) XXX_Merge(src proto.Message) { + xxx_messageInfo_Evidence.Merge(dst, src) } func (m *Evidence) XXX_Size() int { return m.Size() @@ -2872,8 +3346,6 @@ func (m *Evidence) GetTotalVotingPower() int64 { } func init() { - proto.RegisterEnum("tendermint.abci.types.CheckTxType", CheckTxType_name, CheckTxType_value) - golang_proto.RegisterEnum("tendermint.abci.types.CheckTxType", CheckTxType_name, CheckTxType_value) proto.RegisterType((*Request)(nil), "tendermint.abci.types.Request") golang_proto.RegisterType((*Request)(nil), "tendermint.abci.types.Request") proto.RegisterType((*RequestEcho)(nil), "tendermint.abci.types.RequestEcho") @@ -2954,164 +3426,9 @@ func init() { golang_proto.RegisterType((*PubKey)(nil), "tendermint.abci.types.PubKey") proto.RegisterType((*Evidence)(nil), "tendermint.abci.types.Evidence") golang_proto.RegisterType((*Evidence)(nil), "tendermint.abci.types.Evidence") + proto.RegisterEnum("tendermint.abci.types.CheckTxType", CheckTxType_name, CheckTxType_value) + golang_proto.RegisterEnum("tendermint.abci.types.CheckTxType", CheckTxType_name, CheckTxType_value) } - -func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_9f1eaa49c51fa1ac) } -func init() { golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_9f1eaa49c51fa1ac) } - -var fileDescriptor_9f1eaa49c51fa1ac = []byte{ - // 2370 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x59, 0x4d, 0x90, 0x1b, 0x47, - 0x15, 0xde, 0xd1, 0x6a, 0x57, 0xd2, 0xd3, 0xee, 0x4a, 0x69, 0x3b, 0x89, 0x22, 0x92, 0x5d, 0xd7, - 0xf8, 0x6f, 0x9d, 0x04, 0x6d, 0x58, 0x2a, 0x54, 0x8c, 0x5d, 0xa1, 0x56, 0x6b, 0x07, 0xa9, 0x62, - 0x3b, 0x9b, 0xb1, 0xbd, 0x18, 0xa8, 0xca, 0x54, 0x4b, 0xd3, 0x96, 0xa6, 0x56, 0x9a, 0x99, 0xcc, - 0xb4, 0x64, 0x89, 0xe2, 0x4e, 0x51, 0xc5, 0x81, 0x0b, 0x55, 0x5c, 0xb8, 0x73, 0xe4, 0xc0, 0x21, - 0x47, 0x8e, 0x39, 0x70, 0xe0, 0xc0, 0xd9, 0xc0, 0xc2, 0x89, 0xca, 0x91, 0xa2, 0x38, 0x52, 0xfd, - 0xba, 0xe7, 0x4f, 0x2b, 0xad, 0xc6, 0xc1, 0x37, 0x2e, 0xd2, 0x74, 0xf7, 0x7b, 0xaf, 0xbb, 0x5f, - 0xbf, 0x7e, 0xdf, 0x7b, 0xaf, 0xe1, 0x35, 0xda, 0xe9, 0xda, 0x7b, 0x7c, 0xea, 0xb1, 0x40, 0xfe, - 0x36, 0x3c, 0xdf, 0xe5, 0x2e, 0x79, 0x95, 0x33, 0xc7, 0x62, 0xfe, 0xd0, 0x76, 0x78, 0x43, 0x90, - 0x34, 0x70, 0xb0, 0x7e, 0x8d, 0xf7, 0x6d, 0xdf, 0x32, 0x3d, 0xea, 0xf3, 0xe9, 0x1e, 0x52, 0xee, - 0xf5, 0xdc, 0x9e, 0x1b, 0x7f, 0x49, 0xf6, 0x7a, 0xbd, 0xeb, 0x4f, 0x3d, 0xee, 0xee, 0x0d, 0x99, - 0x7f, 0x32, 0x60, 0xea, 0x4f, 0x8d, 0x5d, 0x18, 0xd8, 0x9d, 0x60, 0xef, 0x64, 0x9c, 0x9c, 0xaf, - 0xbe, 0xd3, 0x73, 0xdd, 0xde, 0x80, 0x49, 0x99, 0x9d, 0xd1, 0xd3, 0x3d, 0x6e, 0x0f, 0x59, 0xc0, - 0xe9, 0xd0, 0x53, 0x04, 0xdb, 0xb3, 0x04, 0xd6, 0xc8, 0xa7, 0xdc, 0x76, 0x1d, 0x39, 0xae, 0xff, - 0x7b, 0x0d, 0x0a, 0x06, 0xfb, 0x7c, 0xc4, 0x02, 0x4e, 0x3e, 0x80, 0x3c, 0xeb, 0xf6, 0xdd, 0x5a, - 0xee, 0x92, 0xb6, 0x5b, 0xde, 0xd7, 0x1b, 0x73, 0xf7, 0xd2, 0x50, 0xd4, 0x77, 0xbb, 0x7d, 0xb7, - 0xb5, 0x62, 0x20, 0x07, 0xb9, 0x05, 0x6b, 0x4f, 0x07, 0xa3, 0xa0, 0x5f, 0x5b, 0x45, 0xd6, 0xcb, - 0xe7, 0xb3, 0x7e, 0x24, 0x48, 0x5b, 0x2b, 0x86, 0xe4, 0x11, 0xd3, 0xda, 0xce, 0x53, 0xb7, 0x96, - 0xcf, 0x32, 0x6d, 0xdb, 0x79, 0x8a, 0xd3, 0x0a, 0x0e, 0xd2, 0x02, 0x08, 0x18, 0x37, 0x5d, 0x4f, - 0x6c, 0xa8, 0xb6, 0x86, 0xfc, 0xd7, 0xcf, 0xe7, 0x7f, 0xc8, 0xf8, 0x27, 0x48, 0xde, 0x5a, 0x31, - 0x4a, 0x41, 0xd8, 0x10, 0x92, 0x6c, 0xc7, 0xe6, 0x66, 0xb7, 0x4f, 0x6d, 0xa7, 0xb6, 0x9e, 0x45, - 0x52, 0xdb, 0xb1, 0xf9, 0xa1, 0x20, 0x17, 0x92, 0xec, 0xb0, 0x21, 0x54, 0xf1, 0xf9, 0x88, 0xf9, - 0xd3, 0x5a, 0x21, 0x8b, 0x2a, 0x3e, 0x15, 0xa4, 0x42, 0x15, 0xc8, 0x43, 0x3e, 0x86, 0x72, 0x87, - 0xf5, 0x6c, 0xc7, 0xec, 0x0c, 0xdc, 0xee, 0x49, 0xad, 0x88, 0x22, 0x76, 0xcf, 0x17, 0xd1, 0x14, - 0x0c, 0x4d, 0x41, 0xdf, 0x5a, 0x31, 0xa0, 0x13, 0xb5, 0x48, 0x13, 0x8a, 0xdd, 0x3e, 0xeb, 0x9e, - 0x98, 0x7c, 0x52, 0x2b, 0xa1, 0xa4, 0xab, 0xe7, 0x4b, 0x3a, 0x14, 0xd4, 0x8f, 0x26, 0xad, 0x15, - 0xa3, 0xd0, 0x95, 0x9f, 0x42, 0x2f, 0x16, 0x1b, 0xd8, 0x63, 0xe6, 0x0b, 0x29, 0x17, 0xb2, 0xe8, - 0xe5, 0x8e, 0xa4, 0x47, 0x39, 0x25, 0x2b, 0x6c, 0x90, 0xbb, 0x50, 0x62, 0x8e, 0xa5, 0x36, 0x56, - 0x46, 0x41, 0xd7, 0x96, 0x58, 0x98, 0x63, 0x85, 0xdb, 0x2a, 0x32, 0xf5, 0x4d, 0x3e, 0x84, 0xf5, - 0xae, 0x3b, 0x1c, 0xda, 0xbc, 0xb6, 0x81, 0x32, 0xae, 0x2c, 0xd9, 0x12, 0xd2, 0xb6, 0x56, 0x0c, - 0xc5, 0xd5, 0x2c, 0xc0, 0xda, 0x98, 0x0e, 0x46, 0x4c, 0xbf, 0x0e, 0xe5, 0x84, 0x25, 0x93, 0x1a, - 0x14, 0x86, 0x2c, 0x08, 0x68, 0x8f, 0xd5, 0xb4, 0x4b, 0xda, 0x6e, 0xc9, 0x08, 0x9b, 0xfa, 0x16, - 0x6c, 0x24, 0xed, 0x56, 0x1f, 0x46, 0x8c, 0xc2, 0x16, 0x05, 0xe3, 0x98, 0xf9, 0x81, 0x30, 0x40, - 0xc5, 0xa8, 0x9a, 0xe4, 0x32, 0x6c, 0xe2, 0x6e, 0xcd, 0x70, 0x5c, 0xdc, 0xab, 0xbc, 0xb1, 0x81, - 0x9d, 0xc7, 0x8a, 0x68, 0x07, 0xca, 0xde, 0xbe, 0x17, 0x91, 0xac, 0x22, 0x09, 0x78, 0xfb, 0x9e, - 0x22, 0xd0, 0xbf, 0x0b, 0xd5, 0x59, 0xd3, 0x25, 0x55, 0x58, 0x3d, 0x61, 0x53, 0x35, 0x9f, 0xf8, - 0x24, 0x17, 0xd5, 0xb6, 0x70, 0x8e, 0x92, 0xa1, 0xf6, 0xf8, 0xbb, 0x5c, 0xc4, 0x1c, 0x59, 0xab, - 0xb8, 0x6e, 0xc2, 0x49, 0x20, 0x77, 0x79, 0xbf, 0xde, 0x90, 0x0e, 0xa2, 0x11, 0x3a, 0x88, 0xc6, - 0xa3, 0xd0, 0x83, 0x34, 0x8b, 0x5f, 0x3e, 0xdf, 0x59, 0xf9, 0xe5, 0x5f, 0x76, 0x34, 0x03, 0x39, - 0xc8, 0x1b, 0xc2, 0xa0, 0xa8, 0xed, 0x98, 0xb6, 0xa5, 0xe6, 0x29, 0x60, 0xbb, 0x6d, 0x91, 0x4f, - 0xa1, 0xda, 0x75, 0x9d, 0x80, 0x39, 0xc1, 0x28, 0x10, 0x6e, 0x8e, 0x0e, 0x03, 0xe5, 0x0b, 0x16, - 0x1d, 0xf2, 0x61, 0x48, 0x7e, 0x84, 0xd4, 0x46, 0xa5, 0x9b, 0xee, 0x20, 0xf7, 0x00, 0xc6, 0x74, - 0x60, 0x5b, 0x94, 0xbb, 0x7e, 0x50, 0xcb, 0x5f, 0x5a, 0x3d, 0x47, 0xd8, 0x71, 0x48, 0xf8, 0xd8, - 0xb3, 0x28, 0x67, 0xcd, 0xbc, 0x58, 0xb9, 0x91, 0xe0, 0x27, 0xd7, 0xa0, 0x42, 0x3d, 0xcf, 0x0c, - 0x38, 0xe5, 0xcc, 0xec, 0x4c, 0x39, 0x0b, 0xd0, 0x5f, 0x6c, 0x18, 0x9b, 0xd4, 0xf3, 0x1e, 0x8a, - 0xde, 0xa6, 0xe8, 0xd4, 0xad, 0xe8, 0xb4, 0xf1, 0x6a, 0x12, 0x02, 0x79, 0x8b, 0x72, 0x8a, 0xda, - 0xda, 0x30, 0xf0, 0x5b, 0xf4, 0x79, 0x94, 0xf7, 0x95, 0x0e, 0xf0, 0x9b, 0xbc, 0x06, 0xeb, 0x7d, - 0x66, 0xf7, 0xfa, 0x1c, 0xb7, 0xbd, 0x6a, 0xa8, 0x96, 0x38, 0x18, 0xcf, 0x77, 0xc7, 0x0c, 0xbd, - 0x5b, 0xd1, 0x90, 0x0d, 0xfd, 0x57, 0x39, 0x78, 0xe5, 0xcc, 0xf5, 0x15, 0x72, 0xfb, 0x34, 0xe8, - 0x87, 0x73, 0x89, 0x6f, 0x72, 0x4b, 0xc8, 0xa5, 0x16, 0xf3, 0x95, 0x57, 0x7e, 0x6b, 0x81, 0x06, - 0x5a, 0x48, 0xa4, 0x36, 0xae, 0x58, 0xc8, 0x63, 0xa8, 0x0e, 0x68, 0xc0, 0x4d, 0x69, 0xfb, 0x26, - 0x7a, 0xd9, 0xd5, 0x73, 0x3d, 0xc1, 0x3d, 0x1a, 0xde, 0x19, 0x61, 0xdc, 0x4a, 0xdc, 0xd6, 0x20, - 0xd5, 0x4b, 0x9e, 0xc0, 0xc5, 0xce, 0xf4, 0x27, 0xd4, 0xe1, 0xb6, 0xc3, 0xcc, 0x33, 0x67, 0xb4, - 0xb3, 0x40, 0xf4, 0xdd, 0xb1, 0x6d, 0x31, 0xa7, 0x1b, 0x1e, 0xce, 0x85, 0x48, 0x44, 0x74, 0x78, - 0x81, 0xfe, 0x04, 0xb6, 0xd2, 0xbe, 0x88, 0x6c, 0x41, 0x8e, 0x4f, 0x94, 0x46, 0x72, 0x7c, 0x42, - 0xbe, 0x03, 0x79, 0x21, 0x0e, 0xb5, 0xb1, 0xb5, 0x10, 0x2c, 0x14, 0xf7, 0xa3, 0xa9, 0xc7, 0x0c, - 0xa4, 0xd7, 0xf5, 0xe8, 0x26, 0x44, 0xfe, 0x69, 0x56, 0xb6, 0x7e, 0x03, 0x2a, 0x33, 0xae, 0x27, - 0x71, 0xac, 0x5a, 0xf2, 0x58, 0xf5, 0x0a, 0x6c, 0xa6, 0x3c, 0x8c, 0xfe, 0xc7, 0x75, 0x28, 0x1a, - 0x2c, 0xf0, 0x84, 0x11, 0x93, 0x16, 0x94, 0xd8, 0xa4, 0xcb, 0x24, 0x2c, 0x69, 0x4b, 0x9c, 0xb8, - 0xe4, 0xb9, 0x1b, 0xd2, 0x0b, 0xaf, 0x19, 0x31, 0x93, 0x9b, 0x29, 0x48, 0xbe, 0xbc, 0x4c, 0x48, - 0x12, 0x93, 0x6f, 0xa7, 0x31, 0xf9, 0xca, 0x12, 0xde, 0x19, 0x50, 0xbe, 0x99, 0x02, 0xe5, 0x65, - 0x13, 0xa7, 0x50, 0xb9, 0x3d, 0x07, 0x95, 0x97, 0x6d, 0x7f, 0x01, 0x2c, 0xb7, 0xe7, 0xc0, 0xf2, - 0xee, 0xd2, 0xb5, 0xcc, 0xc5, 0xe5, 0xdb, 0x69, 0x5c, 0x5e, 0xa6, 0x8e, 0x19, 0x60, 0xbe, 0x37, - 0x0f, 0x98, 0x6f, 0x2c, 0x91, 0xb1, 0x10, 0x99, 0x0f, 0xcf, 0x20, 0xf3, 0xb5, 0x25, 0xa2, 0xe6, - 0x40, 0x73, 0x3b, 0x05, 0xcd, 0x90, 0x49, 0x37, 0x0b, 0xb0, 0xf9, 0xa3, 0xb3, 0xd8, 0x7c, 0x7d, - 0x99, 0xa9, 0xcd, 0x03, 0xe7, 0xef, 0xcd, 0x80, 0xf3, 0xd5, 0x65, 0xbb, 0x5a, 0x88, 0xce, 0x37, - 0x84, 0x7f, 0x9c, 0xb9, 0x19, 0xc2, 0x97, 0x32, 0xdf, 0x77, 0x7d, 0x05, 0x7c, 0xb2, 0xa1, 0xef, - 0x0a, 0x8f, 0x1d, 0xdb, 0xff, 0x39, 0x48, 0x8e, 0x97, 0x36, 0x61, 0xed, 0xfa, 0x17, 0x5a, 0xcc, - 0x8b, 0x9e, 0x2d, 0xe9, 0xed, 0x4b, 0xca, 0xdb, 0x27, 0x00, 0x3e, 0x97, 0x06, 0xf8, 0x1d, 0x28, - 0x0b, 0x4c, 0x99, 0xc1, 0x6e, 0xea, 0x85, 0xd8, 0x4d, 0xde, 0x86, 0x57, 0xd0, 0xff, 0xca, 0x30, - 0x40, 0x39, 0x92, 0x3c, 0x3a, 0x92, 0x8a, 0x18, 0x90, 0x1a, 0x94, 0x40, 0xf1, 0x4d, 0xb8, 0x90, - 0xa0, 0x15, 0x72, 0x11, 0x0b, 0x24, 0x48, 0x55, 0x23, 0xea, 0x03, 0xcf, 0x6b, 0xd1, 0xa0, 0xaf, - 0xdf, 0x8f, 0x15, 0x14, 0xc7, 0x05, 0x04, 0xf2, 0x5d, 0xd7, 0x92, 0xfb, 0xde, 0x34, 0xf0, 0x5b, - 0xc4, 0x0a, 0x03, 0xb7, 0x87, 0x8b, 0x2b, 0x19, 0xe2, 0x53, 0x50, 0x45, 0x57, 0xbb, 0x24, 0xef, - 0xac, 0xfe, 0x7b, 0x2d, 0x96, 0x17, 0x87, 0x0a, 0xf3, 0x50, 0x5d, 0x7b, 0x99, 0xa8, 0x9e, 0xfb, - 0xdf, 0x50, 0x5d, 0xff, 0x97, 0x16, 0x1f, 0x69, 0x84, 0xd7, 0x5f, 0x4f, 0x05, 0xc2, 0xba, 0x6c, - 0xc7, 0x62, 0x13, 0x54, 0xf9, 0xaa, 0x21, 0x1b, 0x61, 0xa8, 0xb5, 0x8e, 0xc7, 0x90, 0x0e, 0xb5, - 0x0a, 0xd8, 0x27, 0x1b, 0xe4, 0x7d, 0xc4, 0x79, 0xf7, 0xa9, 0x72, 0x0d, 0x29, 0x10, 0x94, 0x49, - 0x5d, 0x43, 0x65, 0x73, 0x47, 0x82, 0xcc, 0x90, 0xd4, 0x09, 0x7c, 0x29, 0xa5, 0xc2, 0x86, 0x37, - 0xa1, 0x24, 0x96, 0x1e, 0x78, 0xb4, 0xcb, 0xf0, 0x6e, 0x97, 0x8c, 0xb8, 0x43, 0xb7, 0x80, 0x9c, - 0xf5, 0x31, 0xe4, 0x01, 0xac, 0xb3, 0x31, 0x73, 0xb8, 0x38, 0x23, 0xa1, 0xd6, 0x37, 0x17, 0x02, - 0x31, 0x73, 0x78, 0xb3, 0x26, 0x94, 0xf9, 0xcf, 0xe7, 0x3b, 0x55, 0xc9, 0xf3, 0xae, 0x3b, 0xb4, - 0x39, 0x1b, 0x7a, 0x7c, 0x6a, 0x28, 0x29, 0xfa, 0xcf, 0x72, 0x02, 0x0f, 0x53, 0xfe, 0x67, 0xae, - 0x7a, 0xc3, 0x4b, 0x93, 0x4b, 0x84, 0x48, 0xd9, 0x54, 0xfe, 0x16, 0x40, 0x8f, 0x06, 0xe6, 0x33, - 0xea, 0x70, 0x66, 0x29, 0xbd, 0x97, 0x7a, 0x34, 0xf8, 0x01, 0x76, 0x88, 0x78, 0x53, 0x0c, 0x8f, - 0x02, 0x66, 0xe1, 0x01, 0xac, 0x1a, 0x85, 0x1e, 0x0d, 0x1e, 0x07, 0xcc, 0x4a, 0xec, 0xb5, 0xf0, - 0x32, 0xf6, 0x9a, 0xd6, 0x77, 0x71, 0x56, 0xdf, 0x3f, 0xcf, 0xc5, 0xb7, 0x23, 0x0e, 0x1f, 0xfe, - 0x3f, 0x75, 0xf1, 0x1b, 0xcc, 0x29, 0xd2, 0x20, 0x40, 0x7e, 0x08, 0xaf, 0x44, 0xb7, 0xd2, 0x1c, - 0xe1, 0x6d, 0x0d, 0xad, 0xf0, 0xc5, 0x2e, 0x77, 0x75, 0x9c, 0xee, 0x0e, 0xc8, 0x67, 0xf0, 0xfa, - 0x8c, 0x0f, 0x8a, 0x26, 0xc8, 0xbd, 0x90, 0x2b, 0x7a, 0x35, 0xed, 0x8a, 0x42, 0xf9, 0xb1, 0xf6, - 0x56, 0x5f, 0xca, 0xad, 0xb9, 0x22, 0x42, 0xd8, 0x24, 0xbc, 0xcd, 0xb3, 0x09, 0xfd, 0xcf, 0x1a, - 0x54, 0x66, 0x16, 0x48, 0x3e, 0x80, 0x35, 0x89, 0xc0, 0xda, 0xb9, 0x85, 0x10, 0xd4, 0xb8, 0xda, - 0x93, 0x64, 0x20, 0x07, 0x50, 0x64, 0x2a, 0xba, 0x56, 0x4a, 0xb9, 0xba, 0x24, 0x08, 0x57, 0xfc, - 0x11, 0x1b, 0xb9, 0x03, 0xa5, 0x48, 0xf5, 0x4b, 0x32, 0xb7, 0xe8, 0xe4, 0x94, 0x90, 0x98, 0x51, - 0x3f, 0x84, 0x72, 0x62, 0x79, 0xe4, 0x1b, 0x50, 0x1a, 0xd2, 0x89, 0x4a, 0xb7, 0x64, 0x00, 0x5d, - 0x1c, 0xd2, 0x09, 0x66, 0x5a, 0xe4, 0x75, 0x28, 0x88, 0xc1, 0x1e, 0x95, 0x07, 0xb9, 0x6a, 0xac, - 0x0f, 0xe9, 0xe4, 0xfb, 0x34, 0xd0, 0x7f, 0xa1, 0xc1, 0x56, 0x7a, 0x9d, 0xe4, 0x1d, 0x20, 0x82, - 0x96, 0xf6, 0x98, 0xe9, 0x8c, 0x86, 0x12, 0x23, 0x43, 0x89, 0x95, 0x21, 0x9d, 0x1c, 0xf4, 0xd8, - 0x83, 0xd1, 0x10, 0xa7, 0x0e, 0xc8, 0x7d, 0xa8, 0x86, 0xc4, 0x61, 0xb1, 0x4b, 0x69, 0xe5, 0x8d, - 0x33, 0xc9, 0xee, 0x1d, 0x45, 0x20, 0x73, 0xdd, 0x5f, 0x8b, 0x5c, 0x77, 0x4b, 0xca, 0x0b, 0x47, - 0xf4, 0xf7, 0xa1, 0x32, 0xb3, 0x63, 0xa2, 0xc3, 0xa6, 0x37, 0xea, 0x98, 0x27, 0x6c, 0x6a, 0xa2, - 0x4a, 0xd0, 0xd4, 0x4b, 0x46, 0xd9, 0x1b, 0x75, 0x3e, 0x66, 0x53, 0x91, 0x75, 0x04, 0x7a, 0x17, - 0xb6, 0xd2, 0xc9, 0x94, 0x00, 0x0e, 0xdf, 0x1d, 0x39, 0x16, 0xae, 0x7b, 0xcd, 0x90, 0x0d, 0x72, - 0x0b, 0xd6, 0xc6, 0xae, 0xb4, 0xe6, 0xf3, 0xb2, 0xa7, 0x63, 0x97, 0xb3, 0x44, 0x4a, 0x26, 0x79, - 0xf4, 0x00, 0xd6, 0xd0, 0x2e, 0x85, 0x8d, 0x61, 0x5a, 0xa4, 0x02, 0x17, 0xf1, 0x4d, 0x8e, 0x01, - 0x28, 0xe7, 0xbe, 0xdd, 0x19, 0xc5, 0xe2, 0x6b, 0x49, 0xf1, 0x03, 0xbb, 0x13, 0x34, 0x4e, 0xc6, - 0x8d, 0x23, 0x6a, 0xfb, 0xcd, 0x37, 0x95, 0x65, 0x5f, 0x8c, 0x79, 0x12, 0xd6, 0x9d, 0x90, 0xa4, - 0x7f, 0x95, 0x87, 0x75, 0x99, 0x6e, 0x92, 0x0f, 0xd3, 0xc5, 0x8f, 0xf2, 0xfe, 0xf6, 0xa2, 0xe5, - 0x4b, 0x2a, 0xb5, 0xfa, 0x28, 0x82, 0xba, 0x36, 0x5b, 0x51, 0x68, 0x96, 0x4f, 0x9f, 0xef, 0x14, - 0x30, 0xfa, 0x68, 0xdf, 0x89, 0xcb, 0x0b, 0x8b, 0xb2, 0xeb, 0xb0, 0x96, 0x91, 0x7f, 0xe1, 0x5a, - 0x46, 0x0b, 0x36, 0x13, 0xe1, 0x96, 0x6d, 0xa9, 0x3c, 0x65, 0xfb, 0xbc, 0x4b, 0xd7, 0xbe, 0xa3, - 0xd6, 0x5f, 0x8e, 0xc2, 0xb1, 0xb6, 0x45, 0x76, 0xd3, 0x49, 0x36, 0x46, 0x6d, 0x32, 0x5c, 0x48, - 0xe4, 0xcd, 0x22, 0x66, 0x13, 0xd7, 0x41, 0x5c, 0x7e, 0x49, 0x22, 0xa3, 0x87, 0xa2, 0xe8, 0xc0, - 0xc1, 0xeb, 0x50, 0x89, 0x03, 0x1b, 0x49, 0x52, 0x94, 0x52, 0xe2, 0x6e, 0x24, 0x7c, 0x0f, 0x2e, - 0x3a, 0x6c, 0xc2, 0xcd, 0x59, 0xea, 0x12, 0x52, 0x13, 0x31, 0x76, 0x9c, 0xe6, 0xb8, 0x0a, 0x5b, - 0xb1, 0x0b, 0x45, 0x5a, 0x90, 0xa5, 0x8f, 0xa8, 0x17, 0xc9, 0xde, 0x80, 0x62, 0x14, 0x76, 0x96, - 0x91, 0xa0, 0x40, 0x65, 0xb4, 0x19, 0x05, 0xb2, 0x3e, 0x0b, 0x46, 0x03, 0xae, 0x84, 0x6c, 0x20, - 0x0d, 0x06, 0xb2, 0x86, 0xec, 0x47, 0xda, 0xcb, 0xb0, 0x19, 0x7a, 0x15, 0x49, 0xb7, 0x89, 0x74, - 0x1b, 0x61, 0x27, 0x12, 0xdd, 0x80, 0xaa, 0xe7, 0xbb, 0x9e, 0x1b, 0x30, 0xdf, 0xa4, 0x96, 0xe5, - 0xb3, 0x20, 0xa8, 0x6d, 0x49, 0x79, 0x61, 0xff, 0x81, 0xec, 0xd6, 0xbf, 0x05, 0x85, 0x30, 0x9e, - 0xbe, 0x08, 0x6b, 0xcd, 0xc8, 0x43, 0xe6, 0x0d, 0xd9, 0x10, 0xf8, 0x7a, 0xe0, 0x79, 0xaa, 0xba, - 0x26, 0x3e, 0xf5, 0x01, 0x14, 0xd4, 0x81, 0xcd, 0xad, 0xa9, 0xdc, 0x87, 0x0d, 0x8f, 0xfa, 0x62, - 0x1b, 0xc9, 0xca, 0xca, 0xa2, 0x8c, 0xf0, 0x88, 0xfa, 0xfc, 0x21, 0xe3, 0xa9, 0x02, 0x4b, 0x19, - 0xf9, 0x65, 0x97, 0x7e, 0x13, 0x36, 0x53, 0x34, 0x62, 0x99, 0xdc, 0xe5, 0x74, 0x10, 0x5e, 0x74, - 0x6c, 0x44, 0x2b, 0xc9, 0xc5, 0x2b, 0xd1, 0x6f, 0x41, 0x29, 0x3a, 0x2b, 0x91, 0x68, 0x84, 0xaa, - 0xd0, 0x94, 0xfa, 0x65, 0x13, 0x8b, 0x48, 0xee, 0x33, 0xe6, 0x2b, 0xeb, 0x97, 0x0d, 0x9d, 0x25, - 0x1c, 0x93, 0x44, 0x33, 0x72, 0x1b, 0x0a, 0xca, 0x31, 0xa9, 0xfb, 0xb8, 0xa8, 0x5c, 0x74, 0x84, - 0x9e, 0x2a, 0x2c, 0x17, 0x49, 0xbf, 0x15, 0x4f, 0x93, 0x4b, 0x4e, 0xf3, 0x53, 0x28, 0x86, 0xce, - 0x27, 0x8d, 0x12, 0x72, 0x86, 0x4b, 0xcb, 0x50, 0x42, 0x4d, 0x12, 0x33, 0x0a, 0x6b, 0x0a, 0xec, - 0x9e, 0xc3, 0x2c, 0x33, 0xbe, 0x82, 0x38, 0x67, 0xd1, 0xa8, 0xc8, 0x81, 0x7b, 0xe1, 0xfd, 0xd2, - 0xdf, 0x83, 0x75, 0xb9, 0xd6, 0xb9, 0x2e, 0x6e, 0x1e, 0xb4, 0xfe, 0x43, 0x83, 0x62, 0x08, 0x1f, - 0x73, 0x99, 0x52, 0x9b, 0xc8, 0x7d, 0xdd, 0x4d, 0xbc, 0x7c, 0x97, 0xf4, 0x2e, 0x10, 0xb4, 0x14, - 0x73, 0xec, 0x72, 0xdb, 0xe9, 0x99, 0xf2, 0x2c, 0x64, 0x24, 0x58, 0xc5, 0x91, 0x63, 0x1c, 0x38, - 0x12, 0xfd, 0x6f, 0x5f, 0x86, 0x72, 0xa2, 0xca, 0x45, 0x0a, 0xb0, 0xfa, 0x80, 0x3d, 0xab, 0xae, - 0x90, 0x32, 0x14, 0x0c, 0x86, 0x35, 0x82, 0xaa, 0xb6, 0xff, 0x55, 0x01, 0x2a, 0x07, 0xcd, 0xc3, - 0xf6, 0x81, 0xe7, 0x0d, 0xec, 0x2e, 0xe2, 0x19, 0xf9, 0x04, 0xf2, 0x98, 0x27, 0x67, 0x78, 0xdf, - 0xa9, 0x67, 0x29, 0x38, 0x11, 0x03, 0xd6, 0x30, 0x9d, 0x26, 0x59, 0x9e, 0x7d, 0xea, 0x99, 0xea, - 0x50, 0x62, 0x91, 0x68, 0x70, 0x19, 0x5e, 0x83, 0xea, 0x59, 0x8a, 0x53, 0xe4, 0x33, 0x28, 0xc5, - 0x79, 0x72, 0xd6, 0x37, 0xa2, 0x7a, 0xe6, 0xb2, 0x95, 0x90, 0x1f, 0x67, 0x06, 0x59, 0x5f, 0x48, - 0xea, 0x99, 0xeb, 0x35, 0xe4, 0x09, 0x14, 0xc2, 0x1c, 0x2c, 0xdb, 0x2b, 0x4e, 0x3d, 0x63, 0x49, - 0x49, 0x1c, 0x9f, 0x4c, 0x9d, 0xb3, 0x3c, 0x55, 0xd5, 0x33, 0xd5, 0xcd, 0xc8, 0x63, 0x58, 0x57, - 0xc1, 0x6f, 0xa6, 0xf7, 0x99, 0x7a, 0xb6, 0x42, 0x91, 0x50, 0x72, 0x5c, 0x9c, 0xc8, 0xfa, 0x3c, - 0x57, 0xcf, 0x5c, 0x30, 0x24, 0x14, 0x20, 0x91, 0x4f, 0x67, 0x7e, 0x77, 0xab, 0x67, 0x2f, 0x04, - 0x92, 0x1f, 0x43, 0x31, 0xca, 0x9a, 0x32, 0xbe, 0x7f, 0xd5, 0xb3, 0xd6, 0xe2, 0x9a, 0xed, 0xff, - 0xfc, 0x6d, 0x5b, 0xfb, 0xed, 0xe9, 0xb6, 0xf6, 0xc5, 0xe9, 0xb6, 0xf6, 0xe5, 0xe9, 0xb6, 0xf6, - 0xa7, 0xd3, 0x6d, 0xed, 0xaf, 0xa7, 0xdb, 0xda, 0x1f, 0xfe, 0xbe, 0xad, 0xfd, 0xe8, 0x9d, 0x9e, - 0xcd, 0xfb, 0xa3, 0x4e, 0xa3, 0xeb, 0x0e, 0xf7, 0x62, 0x81, 0xc9, 0xcf, 0xf8, 0x51, 0xbb, 0xb3, - 0x8e, 0x0e, 0xeb, 0xdb, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xce, 0x64, 0xb9, 0xe4, 0xe9, 0x1e, - 0x00, 0x00, -} - func (this *Request) Equal(that interface{}) bool { if that == nil { return this == nil @@ -5175,44 +5492,6 @@ type ABCIApplicationServer interface { EndBlock(context.Context, *RequestEndBlock) (*ResponseEndBlock, error) } -// UnimplementedABCIApplicationServer can be embedded to have forward compatible implementations. -type UnimplementedABCIApplicationServer struct { -} - -func (*UnimplementedABCIApplicationServer) Echo(ctx context.Context, req *RequestEcho) (*ResponseEcho, error) { - return nil, status.Errorf(codes.Unimplemented, "method Echo not implemented") -} -func (*UnimplementedABCIApplicationServer) Flush(ctx context.Context, req *RequestFlush) (*ResponseFlush, error) { - return nil, status.Errorf(codes.Unimplemented, "method Flush not implemented") -} -func (*UnimplementedABCIApplicationServer) Info(ctx context.Context, req *RequestInfo) (*ResponseInfo, error) { - return nil, status.Errorf(codes.Unimplemented, "method Info not implemented") -} -func (*UnimplementedABCIApplicationServer) SetOption(ctx context.Context, req *RequestSetOption) (*ResponseSetOption, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetOption not implemented") -} -func (*UnimplementedABCIApplicationServer) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeliverTx not implemented") -} -func (*UnimplementedABCIApplicationServer) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) { - return nil, status.Errorf(codes.Unimplemented, "method CheckTx not implemented") -} -func (*UnimplementedABCIApplicationServer) Query(ctx context.Context, req *RequestQuery) (*ResponseQuery, error) { - return nil, status.Errorf(codes.Unimplemented, "method Query not implemented") -} -func (*UnimplementedABCIApplicationServer) Commit(ctx context.Context, req *RequestCommit) (*ResponseCommit, error) { - return nil, status.Errorf(codes.Unimplemented, "method Commit not implemented") -} -func (*UnimplementedABCIApplicationServer) InitChain(ctx context.Context, req *RequestInitChain) (*ResponseInitChain, error) { - return nil, status.Errorf(codes.Unimplemented, "method InitChain not implemented") -} -func (*UnimplementedABCIApplicationServer) BeginBlock(ctx context.Context, req *RequestBeginBlock) (*ResponseBeginBlock, error) { - return nil, status.Errorf(codes.Unimplemented, "method BeginBlock not implemented") -} -func (*UnimplementedABCIApplicationServer) EndBlock(ctx context.Context, req *RequestEndBlock) (*ResponseEndBlock, error) { - return nil, status.Errorf(codes.Unimplemented, "method EndBlock not implemented") -} - func RegisterABCIApplicationServer(s *grpc.Server, srv ABCIApplicationServer) { s.RegisterService(&_ABCIApplication_serviceDesc, srv) } @@ -5471,7 +5750,7 @@ var _ABCIApplication_serviceDesc = grpc.ServiceDesc{ func (m *Request) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -5479,268 +5758,183 @@ func (m *Request) Marshal() (dAtA []byte, err error) { } func (m *Request) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Value != nil { - { - size := m.Value.Size() - i -= size - if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } + nn1, err := m.Value.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += nn1 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *Request_Echo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request_Echo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.Echo != nil { - { - size, err := m.Echo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Echo.Size())) + n2, err := m.Echo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 } - return len(dAtA) - i, nil + return i, nil } func (m *Request_Flush) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request_Flush) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.Flush != nil { - { - size, err := m.Flush.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Flush.Size())) + n3, err := m.Flush.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 } - return len(dAtA) - i, nil + return i, nil } func (m *Request_Info) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request_Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.Info != nil { - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Info.Size())) + n4, err := m.Info.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 } - return len(dAtA) - i, nil + return i, nil } func (m *Request_SetOption) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request_SetOption) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.SetOption != nil { - { - size, err := m.SetOption.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.SetOption.Size())) + n5, err := m.SetOption.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 } - return len(dAtA) - i, nil + return i, nil } func (m *Request_InitChain) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request_InitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.InitChain != nil { - { - size, err := m.InitChain.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x32 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.InitChain.Size())) + n6, err := m.InitChain.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 } - return len(dAtA) - i, nil + return i, nil } func (m *Request_Query) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.Query != nil { - { - size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x3a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Query.Size())) + n7, err := m.Query.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 } - return len(dAtA) - i, nil + return i, nil } func (m *Request_BeginBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.BeginBlock != nil { - { - size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.BeginBlock.Size())) + n8, err := m.BeginBlock.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 } - return len(dAtA) - i, nil + return i, nil } func (m *Request_CheckTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.CheckTx != nil { - { - size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x4a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.CheckTx.Size())) + n9, err := m.CheckTx.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 } - return len(dAtA) - i, nil + return i, nil } func (m *Request_EndBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.EndBlock != nil { - { - size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x5a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.EndBlock.Size())) + n10, err := m.EndBlock.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 } - return len(dAtA) - i, nil + return i, nil } func (m *Request_Commit) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request_Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.Commit != nil { - { - size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x62 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Commit.Size())) + n11, err := m.Commit.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 } - return len(dAtA) - i, nil + return i, nil } func (m *Request_DeliverTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.DeliverTx != nil { - { - size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- dAtA[i] = 0x9a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.DeliverTx.Size())) + n12, err := m.DeliverTx.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 } - return len(dAtA) - i, nil + return i, nil } func (m *RequestEcho) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -5748,33 +5942,26 @@ func (m *RequestEcho) Marshal() (dAtA []byte, err error) { } func (m *RequestEcho) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestEcho) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Message) > 0 { - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *RequestFlush) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -5782,26 +5969,20 @@ func (m *RequestFlush) Marshal() (dAtA []byte, err error) { } func (m *RequestFlush) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestFlush) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *RequestInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -5809,43 +5990,36 @@ func (m *RequestInfo) Marshal() (dAtA []byte, err error) { } func (m *RequestInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.P2PVersion != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.P2PVersion)) - i-- - dAtA[i] = 0x18 + if len(m.Version) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) } if m.BlockVersion != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.BlockVersion)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.BlockVersion)) } - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0xa + if m.P2PVersion != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.P2PVersion)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *RequestSetOption) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -5853,40 +6027,32 @@ func (m *RequestSetOption) Marshal() (dAtA []byte, err error) { } func (m *RequestSetOption) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestSetOption) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) } if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *RequestInitChain) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -5894,74 +6060,62 @@ func (m *RequestInitChain) Marshal() (dAtA []byte, err error) { } func (m *RequestInitChain) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Time))) + n13, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) + if err != nil { + return 0, err } - if len(m.AppStateBytes) > 0 { - i -= len(m.AppStateBytes) - copy(dAtA[i:], m.AppStateBytes) - i = encodeVarintTypes(dAtA, i, uint64(len(m.AppStateBytes))) - i-- - dAtA[i] = 0x2a + i += n13 + if len(m.ChainId) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainId))) + i += copy(dAtA[i:], m.ChainId) + } + if m.ConsensusParams != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.ConsensusParams.Size())) + n14, err := m.ConsensusParams.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 } if len(m.Validators) > 0 { - for iNdEx := len(m.Validators) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Validators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Validators { dAtA[i] = 0x22 - } - } - if m.ConsensusParams != nil { - { - size, err := m.ConsensusParams.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x1a } - if len(m.ChainId) > 0 { - i -= len(m.ChainId) - copy(dAtA[i:], m.ChainId) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainId))) - i-- - dAtA[i] = 0x12 + if len(m.AppStateBytes) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppStateBytes))) + i += copy(dAtA[i:], m.AppStateBytes) } - n13, err13 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err13 != nil { - return 0, err13 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - i -= n13 - i = encodeVarintTypes(dAtA, i, uint64(n13)) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *RequestQuery) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -5969,55 +6123,47 @@ func (m *RequestQuery) Marshal() (dAtA []byte, err error) { } func (m *RequestQuery) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Data) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if len(m.Path) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + } + if m.Height != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) } if m.Prove { - i-- + dAtA[i] = 0x20 + i++ if m.Prove { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x20 - } - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x18 + i++ } - if len(m.Path) > 0 { - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x12 - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0xa + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *RequestBeginBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -6025,67 +6171,54 @@ func (m *RequestBeginBlock) Marshal() (dAtA []byte, err error) { } func (m *RequestBeginBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestBeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ByzantineValidators) > 0 { - for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } + if len(m.Hash) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i += copy(dAtA[i:], m.Hash) } - { - size, err := m.LastCommitInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Header.Size())) + n15, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n15 dAtA[i] = 0x1a - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.LastCommitInfo.Size())) + n16, err := m.LastCommitInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + if len(m.ByzantineValidators) > 0 { + for _, msg := range m.ByzantineValidators { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x12 - if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) - i-- - dAtA[i] = 0xa + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *RequestCheckTx) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -6093,38 +6226,31 @@ func (m *RequestCheckTx) Marshal() (dAtA []byte, err error) { } func (m *RequestCheckTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Tx) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) + i += copy(dAtA[i:], m.Tx) } if m.Type != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Type)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) } - if len(m.Tx) > 0 { - i -= len(m.Tx) - copy(dAtA[i:], m.Tx) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) - i-- - dAtA[i] = 0xa + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *RequestDeliverTx) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -6132,33 +6258,26 @@ func (m *RequestDeliverTx) Marshal() (dAtA []byte, err error) { } func (m *RequestDeliverTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestDeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Tx) > 0 { - i -= len(m.Tx) - copy(dAtA[i:], m.Tx) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) + i += copy(dAtA[i:], m.Tx) } - return len(dAtA) - i, nil + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } func (m *RequestEndBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -6166,31 +6285,25 @@ func (m *RequestEndBlock) Marshal() (dAtA []byte, err error) { } func (m *RequestEndBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) - i-- dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *RequestCommit) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -6198,26 +6311,20 @@ func (m *RequestCommit) Marshal() (dAtA []byte, err error) { } func (m *RequestCommit) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *Response) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -6225,287 +6332,195 @@ func (m *Response) Marshal() (dAtA []byte, err error) { } func (m *Response) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Value != nil { - { - size := m.Value.Size() - i -= size - if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } + nn17, err := m.Value.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += nn17 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *Response_Exception) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response_Exception) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.Exception != nil { - { - size, err := m.Exception.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Exception.Size())) + n18, err := m.Exception.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 } - return len(dAtA) - i, nil + return i, nil } func (m *Response_Echo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response_Echo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.Echo != nil { - { - size, err := m.Echo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Echo.Size())) + n19, err := m.Echo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 } - return len(dAtA) - i, nil + return i, nil } func (m *Response_Flush) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response_Flush) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.Flush != nil { - { - size, err := m.Flush.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Flush.Size())) + n20, err := m.Flush.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 } - return len(dAtA) - i, nil + return i, nil } func (m *Response_Info) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response_Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.Info != nil { - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Info.Size())) + n21, err := m.Info.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 } - return len(dAtA) - i, nil + return i, nil } func (m *Response_SetOption) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response_SetOption) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.SetOption != nil { - { - size, err := m.SetOption.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.SetOption.Size())) + n22, err := m.SetOption.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 } - return len(dAtA) - i, nil + return i, nil } func (m *Response_InitChain) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response_InitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.InitChain != nil { - { - size, err := m.InitChain.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x32 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.InitChain.Size())) + n23, err := m.InitChain.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 } - return len(dAtA) - i, nil + return i, nil } func (m *Response_Query) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.Query != nil { - { - size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x3a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Query.Size())) + n24, err := m.Query.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 } - return len(dAtA) - i, nil + return i, nil } func (m *Response_BeginBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.BeginBlock != nil { - { - size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.BeginBlock.Size())) + n25, err := m.BeginBlock.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 } - return len(dAtA) - i, nil + return i, nil } func (m *Response_CheckTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.CheckTx != nil { - { - size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x4a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.CheckTx.Size())) + n26, err := m.CheckTx.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 } - return len(dAtA) - i, nil + return i, nil } func (m *Response_DeliverTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.DeliverTx != nil { - { - size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x52 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.DeliverTx.Size())) + n27, err := m.DeliverTx.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 } - return len(dAtA) - i, nil + return i, nil } func (m *Response_EndBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.EndBlock != nil { - { - size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x5a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.EndBlock.Size())) + n28, err := m.EndBlock.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 } - return len(dAtA) - i, nil + return i, nil } func (m *Response_Commit) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response_Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.Commit != nil { - { - size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x62 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Commit.Size())) + n29, err := m.Commit.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 } - return len(dAtA) - i, nil + return i, nil } func (m *ResponseException) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -6513,33 +6528,26 @@ func (m *ResponseException) Marshal() (dAtA []byte, err error) { } func (m *ResponseException) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseException) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Error) > 0 { - i -= len(m.Error) - copy(dAtA[i:], m.Error) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Error))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *ResponseEcho) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -6547,33 +6555,26 @@ func (m *ResponseEcho) Marshal() (dAtA []byte, err error) { } func (m *ResponseEcho) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseEcho) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Message) > 0 { - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) } - return len(dAtA) - i, nil + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } func (m *ResponseFlush) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -6581,26 +6582,20 @@ func (m *ResponseFlush) Marshal() (dAtA []byte, err error) { } func (m *ResponseFlush) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseFlush) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *ResponseInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -6608,57 +6603,48 @@ func (m *ResponseInfo) Marshal() (dAtA []byte, err error) { } func (m *ResponseInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.LastBlockAppHash) > 0 { - i -= len(m.LastBlockAppHash) - copy(dAtA[i:], m.LastBlockAppHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.LastBlockAppHash))) - i-- - dAtA[i] = 0x2a + if len(m.Data) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) } - if m.LastBlockHeight != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.LastBlockHeight)) - i-- - dAtA[i] = 0x20 + if len(m.Version) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) } if m.AppVersion != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.AppVersion)) - i-- dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.AppVersion)) } - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x12 + if m.LastBlockHeight != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.LastBlockHeight)) } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0xa + if len(m.LastBlockAppHash) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.LastBlockAppHash))) + i += copy(dAtA[i:], m.LastBlockAppHash) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *ResponseSetOption) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -6666,45 +6652,37 @@ func (m *ResponseSetOption) Marshal() (dAtA []byte, err error) { } func (m *ResponseSetOption) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseSetOption) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Info) > 0 { - i -= len(m.Info) - copy(dAtA[i:], m.Info) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) - i-- - dAtA[i] = 0x22 + if m.Code != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) } if len(m.Log) > 0 { - i -= len(m.Log) - copy(dAtA[i:], m.Log) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) - i-- dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) + i += copy(dAtA[i:], m.Log) } - if m.Code != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Code)) - i-- - dAtA[i] = 0x8 + if len(m.Info) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) + i += copy(dAtA[i:], m.Info) } - return len(dAtA) - i, nil + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } func (m *ResponseInitChain) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -6712,52 +6690,42 @@ func (m *ResponseInitChain) Marshal() (dAtA []byte, err error) { } func (m *ResponseInitChain) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.ConsensusParams != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.ConsensusParams.Size())) + n30, err := m.ConsensusParams.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 } if len(m.Validators) > 0 { - for iNdEx := len(m.Validators) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Validators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Validators { dAtA[i] = 0x12 - } - } - if m.ConsensusParams != nil { - { - size, err := m.ConsensusParams.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } func (m *ResponseQuery) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -6765,88 +6733,75 @@ func (m *ResponseQuery) Marshal() (dAtA []byte, err error) { } func (m *ResponseQuery) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Codespace) > 0 { - i -= len(m.Codespace) - copy(dAtA[i:], m.Codespace) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) - i-- - dAtA[i] = 0x52 + if m.Code != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) } - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x48 + if len(m.Log) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) + i += copy(dAtA[i:], m.Log) } - if m.Proof != nil { - { - size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 + if len(m.Info) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) + i += copy(dAtA[i:], m.Info) } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x3a + if m.Index != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) } if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) - i-- dAtA[i] = 0x32 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) } - if m.Index != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Index)) - i-- - dAtA[i] = 0x28 + if len(m.Value) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) } - if len(m.Info) > 0 { - i -= len(m.Info) - copy(dAtA[i:], m.Info) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) - i-- - dAtA[i] = 0x22 + if m.Proof != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Proof.Size())) + n31, err := m.Proof.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 } - if len(m.Log) > 0 { - i -= len(m.Log) - copy(dAtA[i:], m.Log) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) - i-- - dAtA[i] = 0x1a + if m.Height != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) } - if m.Code != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Code)) - i-- - dAtA[i] = 0x8 + if len(m.Codespace) > 0 { + dAtA[i] = 0x52 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) + i += copy(dAtA[i:], m.Codespace) } - return len(dAtA) - i, nil + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } func (m *ResponseBeginBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -6854,40 +6809,32 @@ func (m *ResponseBeginBlock) Marshal() (dAtA []byte, err error) { } func (m *ResponseBeginBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseBeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Events { dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - return len(dAtA) - i, nil + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } func (m *ResponseCheckTx) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -6895,83 +6842,71 @@ func (m *ResponseCheckTx) Marshal() (dAtA []byte, err error) { } func (m *ResponseCheckTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Code != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) } - if len(m.Codespace) > 0 { - i -= len(m.Codespace) - copy(dAtA[i:], m.Codespace) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) - i-- - dAtA[i] = 0x42 + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } + if len(m.Log) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) + i += copy(dAtA[i:], m.Log) } - if m.GasUsed != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) - i-- - dAtA[i] = 0x30 + if len(m.Info) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) + i += copy(dAtA[i:], m.Info) } if m.GasWanted != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.GasWanted)) - i-- dAtA[i] = 0x28 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.GasWanted)) } - if len(m.Info) > 0 { - i -= len(m.Info) - copy(dAtA[i:], m.Info) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) - i-- - dAtA[i] = 0x22 + if m.GasUsed != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) } - if len(m.Log) > 0 { - i -= len(m.Log) - copy(dAtA[i:], m.Log) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) - i-- - dAtA[i] = 0x1a + if len(m.Events) > 0 { + for _, msg := range m.Events { + dAtA[i] = 0x3a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x12 + if len(m.Codespace) > 0 { + dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) + i += copy(dAtA[i:], m.Codespace) } - if m.Code != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Code)) - i-- - dAtA[i] = 0x8 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *ResponseDeliverTx) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -6979,83 +6914,71 @@ func (m *ResponseDeliverTx) Marshal() (dAtA []byte, err error) { } func (m *ResponseDeliverTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseDeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Code != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) } - if len(m.Codespace) > 0 { - i -= len(m.Codespace) - copy(dAtA[i:], m.Codespace) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) - i-- - dAtA[i] = 0x42 + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } + if len(m.Log) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) + i += copy(dAtA[i:], m.Log) } - if m.GasUsed != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) - i-- - dAtA[i] = 0x30 + if len(m.Info) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) + i += copy(dAtA[i:], m.Info) } if m.GasWanted != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.GasWanted)) - i-- dAtA[i] = 0x28 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.GasWanted)) } - if len(m.Info) > 0 { - i -= len(m.Info) - copy(dAtA[i:], m.Info) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) - i-- - dAtA[i] = 0x22 + if m.GasUsed != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) } - if len(m.Log) > 0 { - i -= len(m.Log) - copy(dAtA[i:], m.Log) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) - i-- - dAtA[i] = 0x1a + if len(m.Events) > 0 { + for _, msg := range m.Events { + dAtA[i] = 0x3a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x12 + if len(m.Codespace) > 0 { + dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) + i += copy(dAtA[i:], m.Codespace) } - if m.Code != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Code)) - i-- - dAtA[i] = 0x8 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *ResponseEndBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -7063,66 +6986,54 @@ func (m *ResponseEndBlock) Marshal() (dAtA []byte, err error) { } func (m *ResponseEndBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + if len(m.ValidatorUpdates) > 0 { + for _, msg := range m.ValidatorUpdates { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i += n } } if m.ConsensusParamUpdates != nil { - { - size, err := m.ConsensusParamUpdates.MarshalToSizedBuffer(dAtA[:i]) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.ConsensusParamUpdates.Size())) + n32, err := m.ConsensusParamUpdates.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + if len(m.Events) > 0 { + for _, msg := range m.Events { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x12 } - if len(m.ValidatorUpdates) > 0 { - for iNdEx := len(m.ValidatorUpdates) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ValidatorUpdates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *ResponseCommit) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -7130,33 +7041,26 @@ func (m *ResponseCommit) Marshal() (dAtA []byte, err error) { } func (m *ResponseCommit) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *ConsensusParams) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -7164,62 +7068,50 @@ func (m *ConsensusParams) Marshal() (dAtA []byte, err error) { } func (m *ConsensusParams) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConsensusParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Validator != nil { - { - size, err := m.Validator.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + if m.Block != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Block.Size())) + n33, err := m.Block.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i += n33 } if m.Evidence != nil { - { - size, err := m.Evidence.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Evidence.Size())) + n34, err := m.Evidence.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 } - if m.Block != nil { - { - size, err := m.Block.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + if m.Validator != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Validator.Size())) + n35, err := m.Validator.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n35 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *BlockParams) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -7227,36 +7119,30 @@ func (m *BlockParams) Marshal() (dAtA []byte, err error) { } func (m *BlockParams) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BlockParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.MaxBytes != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.MaxBytes)) } if m.MaxGas != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.MaxGas)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.MaxGas)) } - if m.MaxBytes != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.MaxBytes)) - i-- - dAtA[i] = 0x8 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *EvidenceParams) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -7264,39 +7150,33 @@ func (m *EvidenceParams) Marshal() (dAtA []byte, err error) { } func (m *EvidenceParams) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EvidenceParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - n34, err34 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MaxAgeDuration, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxAgeDuration):]) - if err34 != nil { - return 0, err34 - } - i -= n34 - i = encodeVarintTypes(dAtA, i, uint64(n34)) - i-- - dAtA[i] = 0x12 if m.MaxAgeNumBlocks != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.MaxAgeNumBlocks)) - i-- dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.MaxAgeNumBlocks)) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxAgeDuration))) + n36, err := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MaxAgeDuration, dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *ValidatorParams) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -7304,35 +7184,35 @@ func (m *ValidatorParams) Marshal() (dAtA []byte, err error) { } func (m *ValidatorParams) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ValidatorParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.PubKeyTypes) > 0 { - for iNdEx := len(m.PubKeyTypes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.PubKeyTypes[iNdEx]) - copy(dAtA[i:], m.PubKeyTypes[iNdEx]) - i = encodeVarintTypes(dAtA, i, uint64(len(m.PubKeyTypes[iNdEx]))) - i-- + for _, s := range m.PubKeyTypes { dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } func (m *LastCommitInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -7340,45 +7220,37 @@ func (m *LastCommitInfo) Marshal() (dAtA []byte, err error) { } func (m *LastCommitInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LastCommitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Round != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) } if len(m.Votes) > 0 { - for iNdEx := len(m.Votes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Votes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Votes { dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - if m.Round != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Round)) - i-- - dAtA[i] = 0x8 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *Event) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -7386,47 +7258,38 @@ func (m *Event) Marshal() (dAtA []byte, err error) { } func (m *Event) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Type) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) } if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Attributes { dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *Header) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -7434,129 +7297,109 @@ func (m *Header) Marshal() (dAtA []byte, err error) { } func (m *Header) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Header) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Version.Size())) + n37, err := m.Version.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - if len(m.ProposerAddress) > 0 { - i -= len(m.ProposerAddress) - copy(dAtA[i:], m.ProposerAddress) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerAddress))) - i-- - dAtA[i] = 0x72 + i += n37 + if len(m.ChainID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainID))) + i += copy(dAtA[i:], m.ChainID) } - if len(m.EvidenceHash) > 0 { - i -= len(m.EvidenceHash) - copy(dAtA[i:], m.EvidenceHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.EvidenceHash))) - i-- - dAtA[i] = 0x6a + if m.Height != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) } - if len(m.LastResultsHash) > 0 { - i -= len(m.LastResultsHash) - copy(dAtA[i:], m.LastResultsHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.LastResultsHash))) - i-- - dAtA[i] = 0x62 + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Time))) + n38, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) + if err != nil { + return 0, err } - if len(m.AppHash) > 0 { - i -= len(m.AppHash) - copy(dAtA[i:], m.AppHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) - i-- - dAtA[i] = 0x5a + i += n38 + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.LastBlockId.Size())) + n39, err := m.LastBlockId.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - if len(m.ConsensusHash) > 0 { - i -= len(m.ConsensusHash) - copy(dAtA[i:], m.ConsensusHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ConsensusHash))) - i-- - dAtA[i] = 0x52 + i += n39 + if len(m.LastCommitHash) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.LastCommitHash))) + i += copy(dAtA[i:], m.LastCommitHash) } - if len(m.NextValidatorsHash) > 0 { - i -= len(m.NextValidatorsHash) - copy(dAtA[i:], m.NextValidatorsHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.NextValidatorsHash))) - i-- - dAtA[i] = 0x4a + if len(m.DataHash) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.DataHash))) + i += copy(dAtA[i:], m.DataHash) } if len(m.ValidatorsHash) > 0 { - i -= len(m.ValidatorsHash) - copy(dAtA[i:], m.ValidatorsHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorsHash))) - i-- dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorsHash))) + i += copy(dAtA[i:], m.ValidatorsHash) } - if len(m.DataHash) > 0 { - i -= len(m.DataHash) - copy(dAtA[i:], m.DataHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.DataHash))) - i-- - dAtA[i] = 0x3a - } - if len(m.LastCommitHash) > 0 { - i -= len(m.LastCommitHash) - copy(dAtA[i:], m.LastCommitHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.LastCommitHash))) - i-- - dAtA[i] = 0x32 + if len(m.NextValidatorsHash) > 0 { + dAtA[i] = 0x4a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.NextValidatorsHash))) + i += copy(dAtA[i:], m.NextValidatorsHash) } - { - size, err := m.LastBlockId.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + if len(m.ConsensusHash) > 0 { + dAtA[i] = 0x52 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ConsensusHash))) + i += copy(dAtA[i:], m.ConsensusHash) } - i-- - dAtA[i] = 0x2a - n36, err36 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err36 != nil { - return 0, err36 + if len(m.AppHash) > 0 { + dAtA[i] = 0x5a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i += copy(dAtA[i:], m.AppHash) } - i -= n36 - i = encodeVarintTypes(dAtA, i, uint64(n36)) - i-- - dAtA[i] = 0x22 - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x18 + if len(m.LastResultsHash) > 0 { + dAtA[i] = 0x62 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.LastResultsHash))) + i += copy(dAtA[i:], m.LastResultsHash) } - if len(m.ChainID) > 0 { - i -= len(m.ChainID) - copy(dAtA[i:], m.ChainID) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainID))) - i-- - dAtA[i] = 0x12 + if len(m.EvidenceHash) > 0 { + dAtA[i] = 0x6a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.EvidenceHash))) + i += copy(dAtA[i:], m.EvidenceHash) } - { - size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + if len(m.ProposerAddress) > 0 { + dAtA[i] = 0x72 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerAddress))) + i += copy(dAtA[i:], m.ProposerAddress) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } func (m *Version) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -7564,36 +7407,30 @@ func (m *Version) Marshal() (dAtA []byte, err error) { } func (m *Version) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Version) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Block != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Block)) } if m.App != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.App)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.App)) } - if m.Block != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Block)) - i-- - dAtA[i] = 0x8 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *BlockID) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -7601,43 +7438,34 @@ func (m *BlockID) Marshal() (dAtA []byte, err error) { } func (m *BlockID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BlockID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.PartsHeader.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i += copy(dAtA[i:], m.Hash) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.PartsHeader.Size())) + n40, err := m.PartsHeader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *PartSetHeader) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -7645,38 +7473,31 @@ func (m *PartSetHeader) Marshal() (dAtA []byte, err error) { } func (m *PartSetHeader) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PartSetHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Total != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Total)) } if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i += copy(dAtA[i:], m.Hash) } - if m.Total != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Total)) - i-- - dAtA[i] = 0x8 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *Validator) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -7684,38 +7505,31 @@ func (m *Validator) Marshal() (dAtA []byte, err error) { } func (m *Validator) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Validator) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Address) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Address))) + i += copy(dAtA[i:], m.Address) } if m.Power != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Power)) - i-- dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Power)) } - if len(m.Address) > 0 { - i -= len(m.Address) - copy(dAtA[i:], m.Address) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Address))) - i-- - dAtA[i] = 0xa + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *ValidatorUpdate) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -7723,41 +7537,33 @@ func (m *ValidatorUpdate) Marshal() (dAtA []byte, err error) { } func (m *ValidatorUpdate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ValidatorUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.PubKey.Size())) + n41, err := m.PubKey.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n41 if m.Power != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Power)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Power)) } - { - size, err := m.PubKey.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *VoteInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -7765,46 +7571,38 @@ func (m *VoteInfo) Marshal() (dAtA []byte, err error) { } func (m *VoteInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VoteInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Validator.Size())) + n42, err := m.Validator.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n42 if m.SignedLastBlock { - i-- + dAtA[i] = 0x10 + i++ if m.SignedLastBlock { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x10 + i++ } - { - size, err := m.Validator.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *PubKey) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -7812,40 +7610,32 @@ func (m *PubKey) Marshal() (dAtA []byte, err error) { } func (m *PubKey) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PubKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Type) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) } if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *Evidence) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -7853,67 +7643,56 @@ func (m *Evidence) Marshal() (dAtA []byte, err error) { } func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Evidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.TotalVotingPower != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.TotalVotingPower)) - i-- - dAtA[i] = 0x28 + if len(m.Type) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) } - n41, err41 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err41 != nil { - return 0, err41 + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Validator.Size())) + n43, err := m.Validator.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i -= n41 - i = encodeVarintTypes(dAtA, i, uint64(n41)) - i-- - dAtA[i] = 0x22 + i += n43 if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) - i-- dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) } - { - size, err := m.Validator.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Time))) + n44, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa + i += n44 + if m.TotalVotingPower != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.TotalVotingPower)) } - return len(dAtA) - i, nil + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { - offset -= sovTypes(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func NewPopulatedRequest(r randyTypes, easy bool) *Request { this := &Request{} @@ -8046,10 +7825,10 @@ func NewPopulatedRequestInitChain(r randyTypes, easy bool) *RequestInitChain { v1 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) this.Time = *v1 this.ChainId = string(randStringTypes(r)) - if r.Intn(5) != 0 { + if r.Intn(10) != 0 { this.ConsensusParams = NewPopulatedConsensusParams(r, easy) } - if r.Intn(5) != 0 { + if r.Intn(10) != 0 { v2 := r.Intn(5) this.Validators = make([]ValidatorUpdate, v2) for i := 0; i < v2; i++ { @@ -8098,7 +7877,7 @@ func NewPopulatedRequestBeginBlock(r randyTypes, easy bool) *RequestBeginBlock { this.Header = *v7 v8 := NewPopulatedLastCommitInfo(r, easy) this.LastCommitInfo = *v8 - if r.Intn(5) != 0 { + if r.Intn(10) != 0 { v9 := r.Intn(5) this.ByzantineValidators = make([]Evidence, v9) for i := 0; i < v9; i++ { @@ -8313,10 +8092,10 @@ func NewPopulatedResponseSetOption(r randyTypes, easy bool) *ResponseSetOption { func NewPopulatedResponseInitChain(r randyTypes, easy bool) *ResponseInitChain { this := &ResponseInitChain{} - if r.Intn(5) != 0 { + if r.Intn(10) != 0 { this.ConsensusParams = NewPopulatedConsensusParams(r, easy) } - if r.Intn(5) != 0 { + if r.Intn(10) != 0 { v14 := r.Intn(5) this.Validators = make([]ValidatorUpdate, v14) for i := 0; i < v14; i++ { @@ -8349,7 +8128,7 @@ func NewPopulatedResponseQuery(r randyTypes, easy bool) *ResponseQuery { for i := 0; i < v17; i++ { this.Value[i] = byte(r.Intn(256)) } - if r.Intn(5) != 0 { + if r.Intn(10) != 0 { this.Proof = merkle.NewPopulatedProof(r, easy) } this.Height = int64(r.Int63()) @@ -8365,7 +8144,7 @@ func NewPopulatedResponseQuery(r randyTypes, easy bool) *ResponseQuery { func NewPopulatedResponseBeginBlock(r randyTypes, easy bool) *ResponseBeginBlock { this := &ResponseBeginBlock{} - if r.Intn(5) != 0 { + if r.Intn(10) != 0 { v18 := r.Intn(5) this.Events = make([]Event, v18) for i := 0; i < v18; i++ { @@ -8397,7 +8176,7 @@ func NewPopulatedResponseCheckTx(r randyTypes, easy bool) *ResponseCheckTx { if r.Intn(2) == 0 { this.GasUsed *= -1 } - if r.Intn(5) != 0 { + if r.Intn(10) != 0 { v21 := r.Intn(5) this.Events = make([]Event, v21) for i := 0; i < v21; i++ { @@ -8430,7 +8209,7 @@ func NewPopulatedResponseDeliverTx(r randyTypes, easy bool) *ResponseDeliverTx { if r.Intn(2) == 0 { this.GasUsed *= -1 } - if r.Intn(5) != 0 { + if r.Intn(10) != 0 { v24 := r.Intn(5) this.Events = make([]Event, v24) for i := 0; i < v24; i++ { @@ -8447,7 +8226,7 @@ func NewPopulatedResponseDeliverTx(r randyTypes, easy bool) *ResponseDeliverTx { func NewPopulatedResponseEndBlock(r randyTypes, easy bool) *ResponseEndBlock { this := &ResponseEndBlock{} - if r.Intn(5) != 0 { + if r.Intn(10) != 0 { v26 := r.Intn(5) this.ValidatorUpdates = make([]ValidatorUpdate, v26) for i := 0; i < v26; i++ { @@ -8455,10 +8234,10 @@ func NewPopulatedResponseEndBlock(r randyTypes, easy bool) *ResponseEndBlock { this.ValidatorUpdates[i] = *v27 } } - if r.Intn(5) != 0 { + if r.Intn(10) != 0 { this.ConsensusParamUpdates = NewPopulatedConsensusParams(r, easy) } - if r.Intn(5) != 0 { + if r.Intn(10) != 0 { v28 := r.Intn(5) this.Events = make([]Event, v28) for i := 0; i < v28; i++ { @@ -8487,13 +8266,13 @@ func NewPopulatedResponseCommit(r randyTypes, easy bool) *ResponseCommit { func NewPopulatedConsensusParams(r randyTypes, easy bool) *ConsensusParams { this := &ConsensusParams{} - if r.Intn(5) != 0 { + if r.Intn(10) != 0 { this.Block = NewPopulatedBlockParams(r, easy) } - if r.Intn(5) != 0 { + if r.Intn(10) != 0 { this.Evidence = NewPopulatedEvidenceParams(r, easy) } - if r.Intn(5) != 0 { + if r.Intn(10) != 0 { this.Validator = NewPopulatedValidatorParams(r, easy) } if !easy && r.Intn(10) != 0 { @@ -8551,7 +8330,7 @@ func NewPopulatedLastCommitInfo(r randyTypes, easy bool) *LastCommitInfo { if r.Intn(2) == 0 { this.Round *= -1 } - if r.Intn(5) != 0 { + if r.Intn(10) != 0 { v33 := r.Intn(5) this.Votes = make([]VoteInfo, v33) for i := 0; i < v33; i++ { @@ -8568,7 +8347,7 @@ func NewPopulatedLastCommitInfo(r randyTypes, easy bool) *LastCommitInfo { func NewPopulatedEvent(r randyTypes, easy bool) *Event { this := &Event{} this.Type = string(randStringTypes(r)) - if r.Intn(5) != 0 { + if r.Intn(10) != 0 { v35 := r.Intn(5) this.Attributes = make([]kv.Pair, v35) for i := 0; i < v35; i++ { @@ -10007,7 +9786,14 @@ func (m *Evidence) Size() (n int) { } func sovTypes(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozTypes(x uint64) (n int) { return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -10027,7 +9813,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10055,7 +9841,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10064,9 +9850,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10090,7 +9873,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10099,9 +9882,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10125,7 +9905,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10134,9 +9914,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10160,7 +9937,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10169,9 +9946,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10195,7 +9969,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10204,9 +9978,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10230,7 +10001,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10239,9 +10010,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10265,7 +10033,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10274,9 +10042,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10300,7 +10065,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10309,9 +10074,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10335,7 +10097,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10344,9 +10106,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10370,7 +10129,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10379,9 +10138,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10405,7 +10161,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10414,9 +10170,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10435,9 +10188,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10466,7 +10216,7 @@ func (m *RequestEcho) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10494,7 +10244,7 @@ func (m *RequestEcho) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10504,9 +10254,6 @@ func (m *RequestEcho) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10521,9 +10268,6 @@ func (m *RequestEcho) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10552,7 +10296,7 @@ func (m *RequestFlush) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10575,9 +10319,6 @@ func (m *RequestFlush) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10606,7 +10347,7 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10634,7 +10375,7 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10644,9 +10385,6 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10666,7 +10404,7 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.BlockVersion |= uint64(b&0x7F) << shift + m.BlockVersion |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10685,7 +10423,7 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.P2PVersion |= uint64(b&0x7F) << shift + m.P2PVersion |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10699,9 +10437,6 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10730,7 +10465,7 @@ func (m *RequestSetOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10758,7 +10493,7 @@ func (m *RequestSetOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10768,9 +10503,6 @@ func (m *RequestSetOption) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10790,7 +10522,7 @@ func (m *RequestSetOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10800,9 +10532,6 @@ func (m *RequestSetOption) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10817,9 +10546,6 @@ func (m *RequestSetOption) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10848,7 +10574,7 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10876,7 +10602,7 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10885,9 +10611,6 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10909,7 +10632,7 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10919,9 +10642,6 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10941,7 +10661,7 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10950,9 +10670,6 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10977,7 +10694,7 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10986,9 +10703,6 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11011,7 +10725,7 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11020,9 +10734,6 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11040,9 +10751,6 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11071,7 +10779,7 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -11099,7 +10807,7 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11108,9 +10816,6 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11133,7 +10838,7 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -11143,9 +10848,6 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11165,7 +10867,7 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Height |= int64(b&0x7F) << shift + m.Height |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -11184,7 +10886,7 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11199,9 +10901,6 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11230,7 +10929,7 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -11258,7 +10957,7 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11267,9 +10966,6 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11292,7 +10988,7 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11301,9 +10997,6 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11325,7 +11018,7 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11334,9 +11027,6 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11358,7 +11048,7 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11367,9 +11057,6 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11387,9 +11074,6 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11418,7 +11102,7 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -11446,7 +11130,7 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11455,9 +11139,6 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11480,7 +11161,7 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= CheckTxType(b&0x7F) << shift + m.Type |= (CheckTxType(b) & 0x7F) << shift if b < 0x80 { break } @@ -11494,9 +11175,6 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11525,7 +11203,7 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -11553,7 +11231,7 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11562,9 +11240,6 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11582,9 +11257,6 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11613,7 +11285,7 @@ func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -11641,7 +11313,7 @@ func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Height |= int64(b&0x7F) << shift + m.Height |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -11655,9 +11327,6 @@ func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11686,7 +11355,7 @@ func (m *RequestCommit) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -11709,9 +11378,6 @@ func (m *RequestCommit) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11740,7 +11406,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -11768,7 +11434,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11777,9 +11443,6 @@ func (m *Response) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11803,7 +11466,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11812,9 +11475,6 @@ func (m *Response) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11838,7 +11498,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11847,9 +11507,6 @@ func (m *Response) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11873,7 +11530,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11882,9 +11539,6 @@ func (m *Response) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11908,7 +11562,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11917,9 +11571,6 @@ func (m *Response) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11943,7 +11594,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11952,9 +11603,6 @@ func (m *Response) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11978,7 +11626,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11987,9 +11635,6 @@ func (m *Response) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12013,7 +11658,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12022,9 +11667,6 @@ func (m *Response) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12048,7 +11690,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12057,9 +11699,6 @@ func (m *Response) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12083,7 +11722,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12092,9 +11731,6 @@ func (m *Response) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12118,7 +11754,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12127,9 +11763,6 @@ func (m *Response) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12153,7 +11786,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12162,9 +11795,6 @@ func (m *Response) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12183,9 +11813,6 @@ func (m *Response) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -12214,7 +11841,7 @@ func (m *ResponseException) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12242,7 +11869,7 @@ func (m *ResponseException) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12252,9 +11879,6 @@ func (m *ResponseException) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12269,9 +11893,6 @@ func (m *ResponseException) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -12300,7 +11921,7 @@ func (m *ResponseEcho) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12328,7 +11949,7 @@ func (m *ResponseEcho) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12338,9 +11959,6 @@ func (m *ResponseEcho) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12355,9 +11973,6 @@ func (m *ResponseEcho) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -12386,7 +12001,7 @@ func (m *ResponseFlush) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12409,9 +12024,6 @@ func (m *ResponseFlush) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -12440,7 +12052,7 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12468,7 +12080,7 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12478,9 +12090,6 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12500,7 +12109,7 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12510,9 +12119,6 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12532,7 +12138,7 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AppVersion |= uint64(b&0x7F) << shift + m.AppVersion |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12551,7 +12157,7 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.LastBlockHeight |= int64(b&0x7F) << shift + m.LastBlockHeight |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12570,7 +12176,7 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12579,9 +12185,6 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12599,9 +12202,6 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -12630,7 +12230,7 @@ func (m *ResponseSetOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12658,7 +12258,7 @@ func (m *ResponseSetOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Code |= uint32(b&0x7F) << shift + m.Code |= (uint32(b) & 0x7F) << shift if b < 0x80 { break } @@ -12677,7 +12277,7 @@ func (m *ResponseSetOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12687,9 +12287,6 @@ func (m *ResponseSetOption) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12709,7 +12306,7 @@ func (m *ResponseSetOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12719,9 +12316,6 @@ func (m *ResponseSetOption) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12736,9 +12330,6 @@ func (m *ResponseSetOption) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -12767,7 +12358,7 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12795,7 +12386,7 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12804,9 +12395,6 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12831,7 +12419,7 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12840,9 +12428,6 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12860,9 +12445,6 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -12891,7 +12473,7 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12919,7 +12501,7 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Code |= uint32(b&0x7F) << shift + m.Code |= (uint32(b) & 0x7F) << shift if b < 0x80 { break } @@ -12938,7 +12520,7 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12948,9 +12530,6 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12970,7 +12549,7 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12980,9 +12559,6 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13002,7 +12578,7 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Index |= int64(b&0x7F) << shift + m.Index |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13021,7 +12597,7 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -13030,9 +12606,6 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13055,7 +12628,7 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -13064,9 +12637,6 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13089,7 +12659,7 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -13098,9 +12668,6 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13125,7 +12692,7 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Height |= int64(b&0x7F) << shift + m.Height |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13144,7 +12711,7 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13154,9 +12721,6 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13171,9 +12735,6 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -13202,7 +12763,7 @@ func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13230,7 +12791,7 @@ func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -13239,9 +12800,6 @@ func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13259,9 +12817,6 @@ func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -13290,7 +12845,7 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13318,7 +12873,7 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Code |= uint32(b&0x7F) << shift + m.Code |= (uint32(b) & 0x7F) << shift if b < 0x80 { break } @@ -13337,7 +12892,7 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -13346,9 +12901,6 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13371,7 +12923,7 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13381,9 +12933,6 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13403,7 +12952,7 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13413,9 +12962,6 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13435,7 +12981,7 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.GasWanted |= int64(b&0x7F) << shift + m.GasWanted |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13454,7 +13000,7 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.GasUsed |= int64(b&0x7F) << shift + m.GasUsed |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13473,7 +13019,7 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -13482,9 +13028,6 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13507,7 +13050,7 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13517,9 +13060,6 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13534,9 +13074,6 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -13565,7 +13102,7 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13593,7 +13130,7 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Code |= uint32(b&0x7F) << shift + m.Code |= (uint32(b) & 0x7F) << shift if b < 0x80 { break } @@ -13612,7 +13149,7 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -13621,9 +13158,6 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13646,7 +13180,7 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13656,9 +13190,6 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13678,7 +13209,7 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13688,9 +13219,6 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13710,7 +13238,7 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.GasWanted |= int64(b&0x7F) << shift + m.GasWanted |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13729,7 +13257,7 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.GasUsed |= int64(b&0x7F) << shift + m.GasUsed |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13748,7 +13276,7 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -13757,9 +13285,6 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13782,7 +13307,7 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13792,9 +13317,6 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13809,9 +13331,6 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -13840,7 +13359,7 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13868,7 +13387,7 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -13877,9 +13396,6 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13902,7 +13418,7 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -13911,9 +13427,6 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13938,7 +13451,7 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -13947,9 +13460,6 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13967,9 +13477,6 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -13998,7 +13505,7 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14026,7 +13533,7 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14035,9 +13542,6 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14055,9 +13559,6 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -14086,7 +13587,7 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14114,7 +13615,7 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14123,9 +13624,6 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14150,7 +13648,7 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14159,9 +13657,6 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14186,7 +13681,7 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14195,9 +13690,6 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14217,9 +13709,6 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -14248,7 +13737,7 @@ func (m *BlockParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14276,7 +13765,7 @@ func (m *BlockParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxBytes |= int64(b&0x7F) << shift + m.MaxBytes |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14295,7 +13784,7 @@ func (m *BlockParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxGas |= int64(b&0x7F) << shift + m.MaxGas |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14309,9 +13798,6 @@ func (m *BlockParams) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -14340,7 +13826,7 @@ func (m *EvidenceParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14368,7 +13854,7 @@ func (m *EvidenceParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxAgeNumBlocks |= int64(b&0x7F) << shift + m.MaxAgeNumBlocks |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14387,7 +13873,7 @@ func (m *EvidenceParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14396,9 +13882,6 @@ func (m *EvidenceParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14415,9 +13898,6 @@ func (m *EvidenceParams) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -14446,7 +13926,7 @@ func (m *ValidatorParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14474,7 +13954,7 @@ func (m *ValidatorParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14484,9 +13964,6 @@ func (m *ValidatorParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14501,9 +13978,6 @@ func (m *ValidatorParams) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -14532,7 +14006,7 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14560,7 +14034,7 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Round |= int32(b&0x7F) << shift + m.Round |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -14579,7 +14053,7 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14588,9 +14062,6 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14608,9 +14079,6 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -14639,7 +14107,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14667,7 +14135,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14677,9 +14145,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14699,7 +14164,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14708,9 +14173,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14728,9 +14190,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -14759,7 +14218,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14787,7 +14246,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14796,9 +14255,6 @@ func (m *Header) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14820,7 +14276,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14830,9 +14286,6 @@ func (m *Header) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14852,7 +14305,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Height |= int64(b&0x7F) << shift + m.Height |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14871,7 +14324,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14880,9 +14333,6 @@ func (m *Header) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14904,7 +14354,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14913,9 +14363,6 @@ func (m *Header) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14937,7 +14384,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14946,9 +14393,6 @@ func (m *Header) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14971,7 +14415,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14980,9 +14424,6 @@ func (m *Header) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15005,7 +14446,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15014,9 +14455,6 @@ func (m *Header) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15039,7 +14477,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15048,9 +14486,6 @@ func (m *Header) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15073,7 +14508,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15082,9 +14517,6 @@ func (m *Header) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15107,7 +14539,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15116,9 +14548,6 @@ func (m *Header) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15141,7 +14570,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15150,9 +14579,6 @@ func (m *Header) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15175,7 +14601,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15184,9 +14610,6 @@ func (m *Header) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15209,7 +14632,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15218,9 +14641,6 @@ func (m *Header) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15238,9 +14658,6 @@ func (m *Header) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -15269,7 +14686,7 @@ func (m *Version) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15297,7 +14714,7 @@ func (m *Version) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Block |= uint64(b&0x7F) << shift + m.Block |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15316,7 +14733,7 @@ func (m *Version) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.App |= uint64(b&0x7F) << shift + m.App |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15330,9 +14747,6 @@ func (m *Version) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -15361,7 +14775,7 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15389,7 +14803,7 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15398,9 +14812,6 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15423,7 +14834,7 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15432,9 +14843,6 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15451,9 +14859,6 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -15482,7 +14887,7 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15510,7 +14915,7 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Total |= int32(b&0x7F) << shift + m.Total |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -15529,7 +14934,7 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15538,9 +14943,6 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15558,9 +14960,6 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -15589,7 +14988,7 @@ func (m *Validator) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15617,7 +15016,7 @@ func (m *Validator) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15626,9 +15025,6 @@ func (m *Validator) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15651,7 +15047,7 @@ func (m *Validator) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Power |= int64(b&0x7F) << shift + m.Power |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15665,9 +15061,6 @@ func (m *Validator) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -15696,7 +15089,7 @@ func (m *ValidatorUpdate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15724,7 +15117,7 @@ func (m *ValidatorUpdate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15733,9 +15126,6 @@ func (m *ValidatorUpdate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15757,7 +15147,7 @@ func (m *ValidatorUpdate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Power |= int64(b&0x7F) << shift + m.Power |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15771,9 +15161,6 @@ func (m *ValidatorUpdate) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -15802,7 +15189,7 @@ func (m *VoteInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15830,7 +15217,7 @@ func (m *VoteInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15839,9 +15226,6 @@ func (m *VoteInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15863,7 +15247,7 @@ func (m *VoteInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15878,9 +15262,6 @@ func (m *VoteInfo) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -15909,7 +15290,7 @@ func (m *PubKey) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15937,7 +15318,7 @@ func (m *PubKey) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15947,9 +15328,6 @@ func (m *PubKey) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15969,7 +15347,7 @@ func (m *PubKey) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15978,9 +15356,6 @@ func (m *PubKey) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15998,9 +15373,6 @@ func (m *PubKey) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -16029,7 +15401,7 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16057,7 +15429,7 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16067,9 +15439,6 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -16089,7 +15458,7 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -16098,9 +15467,6 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -16122,7 +15488,7 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Height |= int64(b&0x7F) << shift + m.Height |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16141,7 +15507,7 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -16150,9 +15516,6 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -16174,7 +15537,7 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TotalVotingPower |= int64(b&0x7F) << shift + m.TotalVotingPower |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16188,9 +15551,6 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -16207,7 +15567,6 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { func skipTypes(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -16239,8 +15598,10 @@ func skipTypes(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -16257,34 +15618,211 @@ func skipTypes(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthTypes } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTypes + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTypes(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthTypes - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") ) + +func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_45ae25b95d0ccb51) } +func init() { + golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_45ae25b95d0ccb51) +} + +var fileDescriptor_types_45ae25b95d0ccb51 = []byte{ + // 2374 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x59, 0x4d, 0x90, 0x1b, 0x47, + 0x15, 0xde, 0xd1, 0x6a, 0x57, 0xd2, 0xd3, 0xee, 0x4a, 0x69, 0x3b, 0x89, 0x22, 0x92, 0x5d, 0xd7, + 0xf8, 0x6f, 0x9d, 0x04, 0x6d, 0x58, 0x2a, 0x54, 0x8c, 0x5d, 0xa1, 0x56, 0x6b, 0x07, 0xa9, 0x62, + 0x3b, 0x9b, 0xb1, 0xbd, 0x18, 0xa8, 0xca, 0x54, 0x4b, 0xd3, 0x96, 0xa6, 0x56, 0x9a, 0x99, 0xcc, + 0xb4, 0x64, 0x89, 0xe2, 0x4e, 0x51, 0xc5, 0x81, 0x0b, 0x55, 0x5c, 0xb8, 0x73, 0xe4, 0xc0, 0x21, + 0x47, 0x8e, 0x39, 0x70, 0xe0, 0xc0, 0xd9, 0xc0, 0xc2, 0x89, 0xca, 0x91, 0xa2, 0x38, 0x52, 0xfd, + 0xba, 0xe7, 0x4f, 0x2b, 0xad, 0xc6, 0xc1, 0x37, 0x2e, 0xd2, 0x74, 0xcf, 0x7b, 0xaf, 0xbb, 0x5f, + 0xbf, 0xf7, 0xbe, 0xf7, 0xde, 0xc0, 0x6b, 0xb4, 0xd3, 0xb5, 0xf7, 0xf8, 0xd4, 0x63, 0x81, 0xfc, + 0x6d, 0x78, 0xbe, 0xcb, 0x5d, 0xf2, 0x2a, 0x67, 0x8e, 0xc5, 0xfc, 0xa1, 0xed, 0xf0, 0x86, 0x20, + 0x69, 0xe0, 0xcb, 0xfa, 0x35, 0xde, 0xb7, 0x7d, 0xcb, 0xf4, 0xa8, 0xcf, 0xa7, 0x7b, 0x48, 0xb9, + 0xd7, 0x73, 0x7b, 0x6e, 0xfc, 0x24, 0xd9, 0xeb, 0xf5, 0xae, 0x3f, 0xf5, 0xb8, 0xbb, 0x37, 0x64, + 0xfe, 0xc9, 0x80, 0xa9, 0x3f, 0xf5, 0xee, 0xc2, 0xc0, 0xee, 0x04, 0x7b, 0x27, 0xe3, 0xe4, 0x7a, + 0xf5, 0x9d, 0x9e, 0xeb, 0xf6, 0x06, 0x4c, 0xca, 0xec, 0x8c, 0x9e, 0xee, 0x71, 0x7b, 0xc8, 0x02, + 0x4e, 0x87, 0x9e, 0x22, 0xd8, 0x9e, 0x25, 0xb0, 0x46, 0x3e, 0xe5, 0xb6, 0xeb, 0xc8, 0xf7, 0xfa, + 0xbf, 0xd7, 0xa0, 0x60, 0xb0, 0xcf, 0x47, 0x2c, 0xe0, 0xe4, 0x03, 0xc8, 0xb3, 0x6e, 0xdf, 0xad, + 0xe5, 0x2e, 0x69, 0xbb, 0xe5, 0x7d, 0xbd, 0x31, 0xf7, 0x2c, 0x0d, 0x45, 0x7d, 0xb7, 0xdb, 0x77, + 0x5b, 0x2b, 0x06, 0x72, 0x90, 0x5b, 0xb0, 0xf6, 0x74, 0x30, 0x0a, 0xfa, 0xb5, 0x55, 0x64, 0xbd, + 0x7c, 0x3e, 0xeb, 0x47, 0x82, 0xb4, 0xb5, 0x62, 0x48, 0x1e, 0xb1, 0xac, 0xed, 0x3c, 0x75, 0x6b, + 0xf9, 0x2c, 0xcb, 0xb6, 0x9d, 0xa7, 0xb8, 0xac, 0xe0, 0x20, 0x2d, 0x80, 0x80, 0x71, 0xd3, 0xf5, + 0xc4, 0x81, 0x6a, 0x6b, 0xc8, 0x7f, 0xfd, 0x7c, 0xfe, 0x87, 0x8c, 0x7f, 0x82, 0xe4, 0xad, 0x15, + 0xa3, 0x14, 0x84, 0x03, 0x21, 0xc9, 0x76, 0x6c, 0x6e, 0x76, 0xfb, 0xd4, 0x76, 0x6a, 0xeb, 0x59, + 0x24, 0xb5, 0x1d, 0x9b, 0x1f, 0x0a, 0x72, 0x21, 0xc9, 0x0e, 0x07, 0x42, 0x15, 0x9f, 0x8f, 0x98, + 0x3f, 0xad, 0x15, 0xb2, 0xa8, 0xe2, 0x53, 0x41, 0x2a, 0x54, 0x81, 0x3c, 0xe4, 0x63, 0x28, 0x77, + 0x58, 0xcf, 0x76, 0xcc, 0xce, 0xc0, 0xed, 0x9e, 0xd4, 0x8a, 0x28, 0x62, 0xf7, 0x7c, 0x11, 0x4d, + 0xc1, 0xd0, 0x14, 0xf4, 0xad, 0x15, 0x03, 0x3a, 0xd1, 0x88, 0x34, 0xa1, 0xd8, 0xed, 0xb3, 0xee, + 0x89, 0xc9, 0x27, 0xb5, 0x12, 0x4a, 0xba, 0x7a, 0xbe, 0xa4, 0x43, 0x41, 0xfd, 0x68, 0xd2, 0x5a, + 0x31, 0x0a, 0x5d, 0xf9, 0x48, 0xee, 0x42, 0x89, 0x39, 0x96, 0xda, 0x4e, 0x19, 0x85, 0x5c, 0x5b, + 0x62, 0x17, 0x8e, 0x15, 0x6e, 0xa6, 0xc8, 0xd4, 0x33, 0xf9, 0x10, 0xd6, 0xbb, 0xee, 0x70, 0x68, + 0xf3, 0xda, 0x06, 0xca, 0xb8, 0xb2, 0x64, 0x23, 0x48, 0xdb, 0x5a, 0x31, 0x14, 0x97, 0xb8, 0x1e, + 0x8b, 0x0d, 0xec, 0x31, 0xf3, 0xc5, 0x61, 0x2e, 0x64, 0xb9, 0x9e, 0x3b, 0x92, 0x1e, 0x8f, 0x53, + 0xb2, 0xc2, 0x41, 0xb3, 0x00, 0x6b, 0x63, 0x3a, 0x18, 0x31, 0xfd, 0x3a, 0x94, 0x13, 0x96, 0x4c, + 0x6a, 0x50, 0x18, 0xb2, 0x20, 0xa0, 0x3d, 0x56, 0xd3, 0x2e, 0x69, 0xbb, 0x25, 0x23, 0x1c, 0xea, + 0x5b, 0xb0, 0x91, 0xb4, 0x5b, 0x7d, 0x18, 0x31, 0x0a, 0x5b, 0x14, 0x8c, 0x63, 0xe6, 0x07, 0xc2, + 0x00, 0x15, 0xa3, 0x1a, 0x92, 0xcb, 0xb0, 0x89, 0x7a, 0x33, 0xc3, 0xf7, 0xc2, 0xaf, 0xf2, 0xc6, + 0x06, 0x4e, 0x1e, 0x2b, 0xa2, 0x1d, 0x28, 0x7b, 0xfb, 0x5e, 0x44, 0xb2, 0x8a, 0x24, 0xe0, 0xed, + 0x7b, 0x8a, 0x40, 0xff, 0x2e, 0x54, 0x67, 0x4d, 0x97, 0x54, 0x61, 0xf5, 0x84, 0x4d, 0xd5, 0x7a, + 0xe2, 0x91, 0x5c, 0x54, 0xc7, 0xc2, 0x35, 0x4a, 0x86, 0x3a, 0xe3, 0xef, 0x72, 0x11, 0x73, 0x64, + 0xad, 0xc2, 0xdd, 0x44, 0x90, 0x40, 0xee, 0xf2, 0x7e, 0xbd, 0x21, 0x03, 0x44, 0x23, 0x0c, 0x10, + 0x8d, 0x47, 0x61, 0x04, 0x69, 0x16, 0xbf, 0x7c, 0xbe, 0xb3, 0xf2, 0xcb, 0xbf, 0xec, 0x68, 0x06, + 0x72, 0x90, 0x37, 0x84, 0x41, 0x51, 0xdb, 0x31, 0x6d, 0x4b, 0xad, 0x53, 0xc0, 0x71, 0xdb, 0x22, + 0x9f, 0x42, 0xb5, 0xeb, 0x3a, 0x01, 0x73, 0x82, 0x51, 0x20, 0xc2, 0x1c, 0x1d, 0x06, 0x2a, 0x16, + 0x2c, 0x32, 0x97, 0xc3, 0x90, 0xfc, 0x08, 0xa9, 0x8d, 0x4a, 0x37, 0x3d, 0x41, 0xee, 0x01, 0x8c, + 0xe9, 0xc0, 0xb6, 0x28, 0x77, 0xfd, 0xa0, 0x96, 0xbf, 0xb4, 0x7a, 0x8e, 0xb0, 0xe3, 0x90, 0xf0, + 0xb1, 0x67, 0x51, 0xce, 0x9a, 0x79, 0xb1, 0x73, 0x23, 0xc1, 0x4f, 0xae, 0x41, 0x85, 0x7a, 0x9e, + 0x19, 0x70, 0xca, 0x99, 0xd9, 0x99, 0x72, 0x16, 0x60, 0xbc, 0xd8, 0x30, 0x36, 0xa9, 0xe7, 0x3d, + 0x14, 0xb3, 0x4d, 0x31, 0xa9, 0x5b, 0xd1, 0x6d, 0xa3, 0x6b, 0x12, 0x02, 0x79, 0x8b, 0x72, 0x8a, + 0xda, 0xda, 0x30, 0xf0, 0x59, 0xcc, 0x79, 0x94, 0xf7, 0x95, 0x0e, 0xf0, 0x99, 0xbc, 0x06, 0xeb, + 0x7d, 0x66, 0xf7, 0xfa, 0x1c, 0x8f, 0xbd, 0x6a, 0xa8, 0x91, 0xb8, 0x18, 0xcf, 0x77, 0xc7, 0x0c, + 0xa3, 0x5b, 0xd1, 0x90, 0x03, 0xfd, 0x57, 0x39, 0x78, 0xe5, 0x8c, 0xfb, 0x0a, 0xb9, 0x7d, 0x1a, + 0xf4, 0xc3, 0xb5, 0xc4, 0x33, 0xb9, 0x25, 0xe4, 0x52, 0x8b, 0xf9, 0x2a, 0x2a, 0xbf, 0xb5, 0x40, + 0x03, 0x2d, 0x24, 0x52, 0x07, 0x57, 0x2c, 0xe4, 0x31, 0x54, 0x07, 0x34, 0xe0, 0xa6, 0xf4, 0x22, + 0x13, 0xa3, 0xec, 0xea, 0xb9, 0x91, 0xe0, 0x1e, 0x0d, 0xbd, 0x4f, 0x18, 0xb7, 0x12, 0xb7, 0x35, + 0x48, 0xcd, 0x92, 0x27, 0x70, 0xb1, 0x33, 0xfd, 0x09, 0x75, 0xb8, 0xed, 0x30, 0xf3, 0xcc, 0x1d, + 0xed, 0x2c, 0x10, 0x7d, 0x77, 0x6c, 0x5b, 0xcc, 0xe9, 0x86, 0x97, 0x73, 0x21, 0x12, 0x11, 0x5d, + 0x5e, 0xa0, 0x3f, 0x81, 0xad, 0x74, 0x2c, 0x22, 0x5b, 0x90, 0xe3, 0x13, 0xa5, 0x91, 0x1c, 0x9f, + 0x90, 0xef, 0x40, 0x5e, 0x88, 0x43, 0x6d, 0x6c, 0x2d, 0x04, 0x0b, 0xc5, 0xfd, 0x68, 0xea, 0x31, + 0x03, 0xe9, 0x75, 0x3d, 0xf2, 0x84, 0x28, 0x30, 0xcc, 0xca, 0xd6, 0x6f, 0x40, 0x65, 0x26, 0x88, + 0x25, 0xae, 0x55, 0x4b, 0x5e, 0xab, 0x5e, 0x81, 0xcd, 0x54, 0xac, 0xd2, 0xff, 0xb8, 0x0e, 0x45, + 0x83, 0x05, 0x9e, 0x30, 0x62, 0xd2, 0x82, 0x12, 0x9b, 0x74, 0x99, 0x84, 0x25, 0x6d, 0x49, 0x10, + 0x97, 0x3c, 0x77, 0x43, 0x7a, 0x11, 0xae, 0x22, 0x66, 0x72, 0x33, 0x05, 0xc9, 0x97, 0x97, 0x09, + 0x49, 0x62, 0xf2, 0xed, 0x34, 0x26, 0x5f, 0x59, 0xc2, 0x3b, 0x03, 0xca, 0x37, 0x53, 0xa0, 0xbc, + 0x6c, 0xe1, 0x14, 0x2a, 0xb7, 0xe7, 0xa0, 0xf2, 0xb2, 0xe3, 0x2f, 0x80, 0xe5, 0xf6, 0x1c, 0x58, + 0xde, 0x5d, 0xba, 0x97, 0xb9, 0xb8, 0x7c, 0x3b, 0x8d, 0xcb, 0xcb, 0xd4, 0x31, 0x03, 0xcc, 0xf7, + 0xe6, 0x01, 0xf3, 0x8d, 0x25, 0x32, 0x16, 0x22, 0xf3, 0xe1, 0x19, 0x64, 0xbe, 0xb6, 0x44, 0xd4, + 0x1c, 0x68, 0x6e, 0xa7, 0x30, 0x11, 0x32, 0xe9, 0x66, 0x3e, 0x28, 0x92, 0x8f, 0xce, 0xa2, 0xfc, + 0xf5, 0x65, 0xa6, 0x36, 0x0f, 0xe6, 0xbf, 0x37, 0x03, 0xf3, 0x57, 0x97, 0x9d, 0x6a, 0x06, 0xe7, + 0x63, 0x74, 0xbe, 0x21, 0xe2, 0xe3, 0x8c, 0x67, 0x88, 0x58, 0xca, 0x7c, 0xdf, 0xf5, 0x15, 0xf0, + 0xc9, 0x81, 0xbe, 0x2b, 0x22, 0x76, 0x6c, 0xff, 0xe7, 0x20, 0x39, 0x3a, 0x6d, 0xc2, 0xda, 0xf5, + 0x2f, 0xb4, 0x98, 0x17, 0x23, 0x5b, 0x32, 0xda, 0x97, 0x54, 0xb4, 0x4f, 0x00, 0x7c, 0x2e, 0x0d, + 0xf0, 0x3b, 0x50, 0x16, 0x98, 0x32, 0x83, 0xdd, 0xd4, 0x0b, 0xb1, 0x9b, 0xbc, 0x0d, 0xaf, 0x60, + 0xfc, 0x95, 0x69, 0x80, 0x0a, 0x24, 0x79, 0x0c, 0x24, 0x15, 0xf1, 0x42, 0x6a, 0x50, 0x02, 0xc5, + 0x37, 0xe1, 0x42, 0x82, 0x56, 0xc8, 0x45, 0x2c, 0x90, 0x20, 0x55, 0x8d, 0xa8, 0x0f, 0x3c, 0xaf, + 0x45, 0x83, 0xbe, 0x7e, 0x3f, 0x56, 0x50, 0x9c, 0x17, 0x10, 0xc8, 0x77, 0x5d, 0x4b, 0x9e, 0x7b, + 0xd3, 0xc0, 0x67, 0x91, 0x2b, 0x0c, 0xdc, 0x1e, 0x6e, 0xae, 0x64, 0x88, 0x47, 0x41, 0x15, 0xb9, + 0x76, 0x49, 0xfa, 0xac, 0xfe, 0x7b, 0x2d, 0x96, 0x17, 0xa7, 0x0a, 0xf3, 0x50, 0x5d, 0x7b, 0x99, + 0xa8, 0x9e, 0xfb, 0xdf, 0x50, 0x5d, 0xff, 0x97, 0x16, 0x5f, 0x69, 0x84, 0xd7, 0x5f, 0x4f, 0x05, + 0xc2, 0xba, 0x6c, 0xc7, 0x62, 0x13, 0x54, 0xf9, 0xaa, 0x21, 0x07, 0x61, 0xaa, 0xb5, 0x8e, 0xd7, + 0x90, 0x4e, 0xb5, 0x0a, 0x38, 0x27, 0x07, 0xe4, 0x7d, 0xc4, 0x79, 0xf7, 0xa9, 0x0a, 0x0d, 0x29, + 0x10, 0x94, 0x45, 0x5d, 0x43, 0x55, 0x73, 0x47, 0x82, 0xcc, 0x90, 0xd4, 0x09, 0x7c, 0x29, 0xa5, + 0xd2, 0x86, 0x37, 0xa1, 0x24, 0xb6, 0x1e, 0x78, 0xb4, 0xcb, 0xd0, 0xb7, 0x4b, 0x46, 0x3c, 0xa1, + 0x5b, 0x40, 0xce, 0xc6, 0x18, 0xf2, 0x00, 0xd6, 0xd9, 0x98, 0x39, 0x5c, 0xdc, 0x91, 0x50, 0xeb, + 0x9b, 0x0b, 0x81, 0x98, 0x39, 0xbc, 0x59, 0x13, 0xca, 0xfc, 0xe7, 0xf3, 0x9d, 0xaa, 0xe4, 0x79, + 0xd7, 0x1d, 0xda, 0x9c, 0x0d, 0x3d, 0x3e, 0x35, 0x94, 0x14, 0xfd, 0x67, 0x39, 0x81, 0x87, 0xa9, + 0xf8, 0x33, 0x57, 0xbd, 0xa1, 0xd3, 0xe4, 0x12, 0x29, 0x52, 0x36, 0x95, 0xbf, 0x05, 0xd0, 0xa3, + 0x81, 0xf9, 0x8c, 0x3a, 0x9c, 0x59, 0x4a, 0xef, 0xa5, 0x1e, 0x0d, 0x7e, 0x80, 0x13, 0x22, 0xdf, + 0x14, 0xaf, 0x47, 0x01, 0xb3, 0xf0, 0x02, 0x56, 0x8d, 0x42, 0x8f, 0x06, 0x8f, 0x03, 0x66, 0x25, + 0xce, 0x5a, 0x78, 0x19, 0x67, 0x4d, 0xeb, 0xbb, 0x38, 0xab, 0xef, 0x9f, 0xe7, 0x62, 0xef, 0x88, + 0xd3, 0x87, 0xff, 0x4f, 0x5d, 0xfc, 0x06, 0x6b, 0x8a, 0x34, 0x08, 0x90, 0x1f, 0xc2, 0x2b, 0x91, + 0x57, 0x9a, 0x23, 0xf4, 0xd6, 0xd0, 0x0a, 0x5f, 0xcc, 0xb9, 0xab, 0xe3, 0xf4, 0x74, 0x40, 0x3e, + 0x83, 0xd7, 0x67, 0x62, 0x50, 0xb4, 0x40, 0xee, 0x85, 0x42, 0xd1, 0xab, 0xe9, 0x50, 0x14, 0xca, + 0x8f, 0xb5, 0xb7, 0xfa, 0x52, 0xbc, 0xe6, 0x8a, 0x48, 0x61, 0x93, 0xf0, 0x36, 0xcf, 0x26, 0xf4, + 0x3f, 0x6b, 0x50, 0x99, 0xd9, 0x20, 0xf9, 0x00, 0xd6, 0x24, 0x02, 0x6b, 0xe7, 0x36, 0x42, 0x50, + 0xe3, 0xea, 0x4c, 0x92, 0x81, 0x1c, 0x40, 0x91, 0xa9, 0xec, 0x5a, 0x29, 0xe5, 0xea, 0x92, 0x24, + 0x5c, 0xf1, 0x47, 0x6c, 0xe4, 0x0e, 0x94, 0x22, 0xd5, 0x2f, 0xa9, 0xdc, 0xa2, 0x9b, 0x53, 0x42, + 0x62, 0x46, 0xfd, 0x10, 0xca, 0x89, 0xed, 0x91, 0x6f, 0x40, 0x69, 0x48, 0x27, 0xaa, 0xdc, 0x92, + 0x09, 0x74, 0x71, 0x48, 0x27, 0x58, 0x69, 0x91, 0xd7, 0xa1, 0x20, 0x5e, 0xf6, 0xa8, 0xbc, 0xc8, + 0x55, 0x63, 0x7d, 0x48, 0x27, 0xdf, 0xa7, 0x81, 0xfe, 0x0b, 0x0d, 0xb6, 0xd2, 0xfb, 0x24, 0xef, + 0x00, 0x11, 0xb4, 0xb4, 0xc7, 0x4c, 0x67, 0x34, 0x94, 0x18, 0x19, 0x4a, 0xac, 0x0c, 0xe9, 0xe4, + 0xa0, 0xc7, 0x1e, 0x8c, 0x86, 0xb8, 0x74, 0x40, 0xee, 0x43, 0x35, 0x24, 0x0e, 0x9b, 0x5d, 0x4a, + 0x2b, 0x6f, 0x9c, 0x29, 0x76, 0xef, 0x28, 0x02, 0x59, 0xeb, 0xfe, 0x5a, 0xd4, 0xba, 0x5b, 0x52, + 0x5e, 0xf8, 0x46, 0x7f, 0x1f, 0x2a, 0x33, 0x27, 0x26, 0x3a, 0x6c, 0x7a, 0xa3, 0x8e, 0x79, 0xc2, + 0xa6, 0x26, 0xaa, 0x04, 0x4d, 0xbd, 0x64, 0x94, 0xbd, 0x51, 0xe7, 0x63, 0x36, 0x15, 0x55, 0x47, + 0xa0, 0x77, 0x61, 0x2b, 0x5d, 0x4c, 0x09, 0xe0, 0xf0, 0xdd, 0x91, 0x63, 0xe1, 0xbe, 0xd7, 0x0c, + 0x39, 0x20, 0xb7, 0x60, 0x6d, 0xec, 0x4a, 0x6b, 0x3e, 0xaf, 0x7a, 0x3a, 0x76, 0x39, 0x4b, 0x94, + 0x64, 0x92, 0x47, 0x0f, 0x60, 0x0d, 0xed, 0x52, 0xd8, 0x18, 0x96, 0x45, 0x2a, 0x71, 0x11, 0xcf, + 0xe4, 0x18, 0x80, 0x72, 0xee, 0xdb, 0x9d, 0x51, 0x2c, 0xbe, 0x96, 0x14, 0x3f, 0xb0, 0x3b, 0x41, + 0xe3, 0x64, 0xdc, 0x38, 0xa2, 0xb6, 0xdf, 0x7c, 0x53, 0x59, 0xf6, 0xc5, 0x98, 0x27, 0x61, 0xdd, + 0x09, 0x49, 0xfa, 0x57, 0x79, 0x58, 0x97, 0xe5, 0x26, 0xf9, 0x30, 0xdd, 0xfc, 0x28, 0xef, 0x6f, + 0x2f, 0xda, 0xbe, 0xa4, 0x52, 0xbb, 0x8f, 0x32, 0xa8, 0x6b, 0xb3, 0x1d, 0x85, 0x66, 0xf9, 0xf4, + 0xf9, 0x4e, 0x01, 0xb3, 0x8f, 0xf6, 0x9d, 0xb8, 0xbd, 0xb0, 0xa8, 0xba, 0x0e, 0x7b, 0x19, 0xf9, + 0x17, 0xee, 0x65, 0xb4, 0x60, 0x33, 0x91, 0x6e, 0xd9, 0x96, 0xaa, 0x53, 0xb6, 0xcf, 0x73, 0xba, + 0xf6, 0x1d, 0xb5, 0xff, 0x72, 0x94, 0x8e, 0xb5, 0x2d, 0xb2, 0x9b, 0x2e, 0xb2, 0x31, 0x6b, 0x93, + 0xe9, 0x42, 0xa2, 0x6e, 0x16, 0x39, 0x9b, 0x70, 0x07, 0xe1, 0xfc, 0x92, 0x44, 0x66, 0x0f, 0x45, + 0x31, 0x81, 0x2f, 0xaf, 0x43, 0x25, 0x4e, 0x6c, 0x24, 0x49, 0x51, 0x4a, 0x89, 0xa7, 0x91, 0xf0, + 0x3d, 0xb8, 0xe8, 0xb0, 0x09, 0x37, 0x67, 0xa9, 0x4b, 0x48, 0x4d, 0xc4, 0xbb, 0xe3, 0x34, 0xc7, + 0x55, 0xd8, 0x8a, 0x43, 0x28, 0xd2, 0x82, 0x6c, 0x7d, 0x44, 0xb3, 0x48, 0xf6, 0x06, 0x14, 0xa3, + 0xb4, 0xb3, 0x8c, 0x04, 0x05, 0x2a, 0xb3, 0xcd, 0x28, 0x91, 0xf5, 0x59, 0x30, 0x1a, 0x70, 0x25, + 0x64, 0x03, 0x69, 0x30, 0x91, 0x35, 0xe4, 0x3c, 0xd2, 0x5e, 0x86, 0xcd, 0x30, 0xaa, 0x48, 0xba, + 0x4d, 0xa4, 0xdb, 0x08, 0x27, 0x91, 0xe8, 0x06, 0x54, 0x3d, 0xdf, 0xf5, 0xdc, 0x80, 0xf9, 0x26, + 0xb5, 0x2c, 0x9f, 0x05, 0x41, 0x6d, 0x4b, 0xca, 0x0b, 0xe7, 0x0f, 0xe4, 0xb4, 0xfe, 0x2d, 0x28, + 0x84, 0xf9, 0xf4, 0x45, 0x58, 0x6b, 0x46, 0x11, 0x32, 0x6f, 0xc8, 0x81, 0xc0, 0xd7, 0x03, 0xcf, + 0x53, 0xdd, 0x35, 0xf1, 0xa8, 0x0f, 0xa0, 0xa0, 0x2e, 0x6c, 0x6e, 0x4f, 0xe5, 0x3e, 0x6c, 0x78, + 0xd4, 0x17, 0xc7, 0x48, 0x76, 0x56, 0x16, 0x55, 0x84, 0x47, 0xd4, 0xe7, 0x0f, 0x19, 0x4f, 0x35, + 0x58, 0xca, 0xc8, 0x2f, 0xa7, 0xf4, 0x9b, 0xb0, 0x99, 0xa2, 0x11, 0xdb, 0xe4, 0x2e, 0xa7, 0x83, + 0xd0, 0xd1, 0x71, 0x10, 0xed, 0x24, 0x17, 0xef, 0x44, 0xbf, 0x05, 0xa5, 0xe8, 0xae, 0x44, 0xa1, + 0x11, 0xaa, 0x42, 0x53, 0xea, 0x97, 0x43, 0x6c, 0x22, 0xb9, 0xcf, 0x98, 0xaf, 0xac, 0x5f, 0x0e, + 0x74, 0x96, 0x08, 0x4c, 0x12, 0xcd, 0xc8, 0x6d, 0x28, 0xa8, 0xc0, 0xa4, 0xfc, 0x71, 0x51, 0xbb, + 0xe8, 0x08, 0x23, 0x55, 0xd8, 0x2e, 0x92, 0x71, 0x2b, 0x5e, 0x26, 0x97, 0x5c, 0xe6, 0xa7, 0x50, + 0x0c, 0x83, 0x4f, 0x1a, 0x25, 0xe4, 0x0a, 0x97, 0x96, 0xa1, 0x84, 0x5a, 0x24, 0x66, 0x14, 0xd6, + 0x14, 0xd8, 0x3d, 0x87, 0x59, 0x66, 0xec, 0x82, 0xb8, 0x66, 0xd1, 0xa8, 0xc8, 0x17, 0xf7, 0x42, + 0xff, 0xd2, 0xdf, 0x83, 0x75, 0xb9, 0xd7, 0xb9, 0x21, 0x6e, 0x1e, 0xb4, 0xfe, 0x43, 0x83, 0x62, + 0x08, 0x1f, 0x73, 0x99, 0x52, 0x87, 0xc8, 0x7d, 0xdd, 0x43, 0xbc, 0xfc, 0x90, 0xf4, 0x2e, 0x10, + 0xb4, 0x14, 0x73, 0xec, 0x72, 0xdb, 0xe9, 0x99, 0xf2, 0x2e, 0x64, 0x26, 0x58, 0xc5, 0x37, 0xc7, + 0xf8, 0xe2, 0x48, 0xcc, 0xbf, 0x7d, 0x19, 0xca, 0x89, 0x2e, 0x17, 0x29, 0xc0, 0xea, 0x03, 0xf6, + 0xac, 0xba, 0x42, 0xca, 0x50, 0x30, 0x18, 0xf6, 0x08, 0xaa, 0xda, 0xfe, 0x57, 0x05, 0xa8, 0x1c, + 0x34, 0x0f, 0xdb, 0x07, 0x9e, 0x37, 0xb0, 0xbb, 0x88, 0x67, 0xe4, 0x13, 0xc8, 0x63, 0x9d, 0x9c, + 0xe1, 0xfb, 0x4e, 0x3d, 0x4b, 0xc3, 0x89, 0x18, 0xb0, 0x86, 0xe5, 0x34, 0xc9, 0xf2, 0xd9, 0xa7, + 0x9e, 0xa9, 0x0f, 0x25, 0x36, 0x89, 0x06, 0x97, 0xe1, 0x6b, 0x50, 0x3d, 0x4b, 0x73, 0x8a, 0x7c, + 0x06, 0xa5, 0xb8, 0x4e, 0xce, 0xfa, 0x8d, 0xa8, 0x9e, 0xb9, 0x6d, 0x25, 0xe4, 0xc7, 0x95, 0x41, + 0xd6, 0x4f, 0x13, 0xf5, 0xcc, 0xfd, 0x1a, 0xf2, 0x04, 0x0a, 0x61, 0x0d, 0x96, 0xed, 0x2b, 0x4e, + 0x3d, 0x63, 0x4b, 0x49, 0x5c, 0x9f, 0x2c, 0x9d, 0xb3, 0x7c, 0xaa, 0xaa, 0x67, 0xea, 0x9b, 0x91, + 0xc7, 0xb0, 0xae, 0x92, 0xdf, 0x4c, 0x5f, 0x7a, 0xea, 0xd9, 0x1a, 0x45, 0x42, 0xc9, 0x71, 0x73, + 0x22, 0xeb, 0xe7, 0xb9, 0x7a, 0xe6, 0x86, 0x21, 0xa1, 0x00, 0x89, 0x7a, 0x3a, 0xf3, 0x77, 0xb7, + 0x7a, 0xf6, 0x46, 0x20, 0xf9, 0x31, 0x14, 0xa3, 0xaa, 0x29, 0xe3, 0x97, 0xb4, 0x7a, 0xd6, 0x5e, + 0x5c, 0xb3, 0xfd, 0x9f, 0xbf, 0x6d, 0x6b, 0xbf, 0x3d, 0xdd, 0xd6, 0xbe, 0x38, 0xdd, 0xd6, 0xbe, + 0x3c, 0xdd, 0xd6, 0xfe, 0x74, 0xba, 0xad, 0xfd, 0xf5, 0x74, 0x5b, 0xfb, 0xc3, 0xdf, 0xb7, 0xb5, + 0x1f, 0xbd, 0xd3, 0xb3, 0x79, 0x7f, 0xd4, 0x69, 0x74, 0xdd, 0xe1, 0x5e, 0x2c, 0x30, 0xf9, 0x18, + 0x7f, 0xd4, 0xee, 0xac, 0x63, 0xc0, 0xfa, 0xf6, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xeb, 0xba, + 0xf5, 0xba, 0xe9, 0x1e, 0x00, 0x00, +} diff --git a/abci/types/typespb_test.go b/abci/types/typespb_test.go index eaa550054..d6816e1da 100644 --- a/abci/types/typespb_test.go +++ b/abci/types/typespb_test.go @@ -1,24 +1,22 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: abci/types/types.proto -package types - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb" - github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" - proto "github.com/gogo/protobuf/proto" - golang_proto "github.com/golang/protobuf/proto" - _ "github.com/golang/protobuf/ptypes/duration" - _ "github.com/golang/protobuf/ptypes/timestamp" - _ "github.com/tendermint/tendermint/crypto/merkle" - _ "github.com/tendermint/tendermint/libs/kv" - math "math" - math_rand "math/rand" - testing "testing" - time "time" -) +package types // import "github.com/tendermint/tendermint/abci/types" + +import testing "testing" +import math_rand "math/rand" +import time "time" +import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" +import github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb" +import proto "github.com/gogo/protobuf/proto" +import golang_proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/golang/protobuf/ptypes/duration" +import _ "github.com/golang/protobuf/ptypes/timestamp" +import _ "github.com/tendermint/tendermint/crypto/merkle" +import _ "github.com/tendermint/tendermint/libs/kv" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/blockchain/v0/reactor.go b/blockchain/v0/reactor.go index d47e892c2..8b0f89412 100644 --- a/blockchain/v0/reactor.go +++ b/blockchain/v0/reactor.go @@ -310,7 +310,7 @@ FOR_LOOP: // NOTE: we can probably make this more efficient, but note that calling // first.Hash() doesn't verify the tx contents, so MakePartSet() is // currently necessary. - err := state.Validators.VerifyCommit( + err := state.Voters.VerifyCommit( chainID, firstID, first.Height, second.LastCommit) if err != nil { bcR.Logger.Error("Error in validation", "err", err) diff --git a/blockchain/v0/reactor_test.go b/blockchain/v0/reactor_test.go index 334cdf942..4c5bb394a 100644 --- a/blockchain/v0/reactor_test.go +++ b/blockchain/v0/reactor_test.go @@ -351,7 +351,7 @@ func makeBlock(privVal types.PrivValidator, height int64, state sm.State, lastCo message := state.MakeHashMessage(0) proof, _ := privVal.GenerateVRFProof(message) block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, - types.SelectProposer(state.Validators, state.LastProofHash, height, 0).Address, 0, proof) + state.Validators.SelectProposer(state.LastProofHash, height, 0).Address, 0, proof) return block } diff --git a/blockchain/v1/reactor.go b/blockchain/v1/reactor.go index 1aba26b35..e427e12f2 100644 --- a/blockchain/v1/reactor.go +++ b/blockchain/v1/reactor.go @@ -420,7 +420,7 @@ func (bcR *BlockchainReactor) processBlock() error { // NOTE: we can probably make this more efficient, but note that calling // first.Hash() doesn't verify the tx contents, so MakePartSet() is // currently necessary. - err = bcR.state.Validators.VerifyCommit(chainID, firstID, first.Height, second.LastCommit) + err = bcR.state.Voters.VerifyCommit(chainID, firstID, first.Height, second.LastCommit) if err != nil { bcR.Logger.Error("error during commit verification", "err", err, "first", first.Height, "second", second.Height) diff --git a/blockchain/v1/reactor_test.go b/blockchain/v1/reactor_test.go index 2d1f5f130..da4d6064a 100644 --- a/blockchain/v1/reactor_test.go +++ b/blockchain/v1/reactor_test.go @@ -423,7 +423,7 @@ func makeBlock(privVal types.PrivValidator, height int64, state sm.State, lastCo message := state.MakeHashMessage(0) proof, _ := privVal.GenerateVRFProof(message) block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, - types.SelectProposer(state.Validators, state.LastProofHash, height, 0).Address, 0, proof) + state.Validators.SelectProposer(state.LastProofHash, height, 0).Address, 0, proof) return block } diff --git a/blockchain/v2/processor_context.go b/blockchain/v2/processor_context.go index 7e96a3a69..010022002 100644 --- a/blockchain/v2/processor_context.go +++ b/blockchain/v2/processor_context.go @@ -39,7 +39,7 @@ func (pc pContext) tmState() state.State { } func (pc pContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error { - return pc.state.Validators.VerifyCommit(chainID, blockID, height, commit) + return pc.state.Voters.VerifyCommit(chainID, blockID, height, commit) } func (pc *pContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { diff --git a/blockchain/v2/reactor_test.go b/blockchain/v2/reactor_test.go index 081fcb4a5..88ca0e1de 100644 --- a/blockchain/v2/reactor_test.go +++ b/blockchain/v2/reactor_test.go @@ -428,7 +428,7 @@ func makeTxs(height int64) (txs []types.Tx) { func makeBlock(privVal types.PrivValidator, height int64, state sm.State, lastCommit *types.Commit) *types.Block { message := state.MakeHashMessage(0) proof, _ := privVal.GenerateVRFProof(message) - proposerAddr := types.SelectProposer(state.Validators, state.LastProofHash, height, 0).Address + proposerAddr := state.Validators.SelectProposer(state.LastProofHash, height, 0).Address block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, proposerAddr, 0, proof) return block } diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index a1ddc743a..16fec9790 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -178,8 +178,8 @@ func TestByzantine(t *testing.T) { // find proposer of current height and round from State func findProposer(state *State) (int, *types.Validator) { - proposer := types.SelectProposer(state.Validators, state.state.LastProofHash, state.Height, state.Round) - return state.Validators.GetByAddress(proposer.PubKey.Address()) + proposer := state.Validators.SelectProposer(state.state.LastProofHash, state.Height, state.Round) + return state.Voters.GetByAddress(proposer.PubKey.Address()) } //------------------------------- diff --git a/consensus/common_test.go b/consensus/common_test.go index d99267704..9c079f178 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -422,7 +422,7 @@ func forceProposer(cs *State, vals []*validatorStub, index []int, height []int64 curVal = vals[theOthers(index[j])] mustBe = false } - if curVal.GetPubKey().Equals(types.SelectProposer(cs.Validators, currentHash, height[j], round[j]).PubKey) != + if curVal.GetPubKey().Equals(cs.Validators.SelectProposer(currentHash, height[j], round[j]).PubKey) != mustBe { allMatch = false break diff --git a/consensus/metrics.go b/consensus/metrics.go index 5fa27118a..b4bb7f652 100644 --- a/consensus/metrics.go +++ b/consensus/metrics.go @@ -19,28 +19,29 @@ type Metrics struct { // Height of the chain. Height metrics.Gauge - // ValidatorLastSignedHeight of a validator. - ValidatorLastSignedHeight metrics.Gauge + // VoterLastSignedHeight of a voter. + VoterLastSignedHeight metrics.Gauge // Number of rounds. Rounds metrics.Gauge - // Number of validators. - Validators metrics.Gauge - // Total power of all validators. - ValidatorsPower metrics.Gauge - // Power of a validator. - ValidatorPower metrics.Gauge - // Amount of blocks missed by a validator. - ValidatorMissedBlocks metrics.Gauge - // Number of validators who did not sign. - MissingValidators metrics.Gauge - // Total power of the missing validators. - MissingValidatorsPower metrics.Gauge - // Number of validators who tried to double sign. - ByzantineValidators metrics.Gauge - // Total power of the byzantine validators. - ByzantineValidatorsPower metrics.Gauge + // ValidatorOrVoter: voter + // Number of voters. + Voters metrics.Gauge + // Total power of all voters. + VotersPower metrics.Gauge + // Power of a voter. + VoterPower metrics.Gauge + // Amount of blocks missed by a voter. + VoterMissedBlocks metrics.Gauge + // Number of voters who did not sign. + MissingVoters metrics.Gauge + // Total power of the missing voters. + MissingVotersPower metrics.Gauge + // Number of voters who tried to double sign. + ByzantineVoters metrics.Gauge + // Total power of the byzantine voters. + ByzantineVotersPower metrics.Gauge // Time between this and the last block. BlockIntervalSeconds metrics.Gauge @@ -82,59 +83,59 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Help: "Number of rounds.", }, labels).With(labelsAndValues...), - Validators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Voters: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, - Name: "validators", - Help: "Number of validators.", + Name: "voters", + Help: "Number of voters.", }, labels).With(labelsAndValues...), - ValidatorLastSignedHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + VoterLastSignedHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, - Name: "validator_last_signed_height", - Help: "Last signed height for a validator", + Name: "voter_last_signed_height", + Help: "Last signed height for a voter", }, append(labels, "validator_address")).With(labelsAndValues...), - ValidatorMissedBlocks: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + VoterMissedBlocks: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, - Name: "validator_missed_blocks", - Help: "Total missed blocks for a validator", + Name: "voter_missed_blocks", + Help: "Total missed blocks for a voter", }, append(labels, "validator_address")).With(labelsAndValues...), - ValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + VotersPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, - Name: "validators_power", - Help: "Total power of all validators.", + Name: "voters_power", + Help: "Total power of all voters.", }, labels).With(labelsAndValues...), - ValidatorPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + VoterPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, - Name: "validator_power", - Help: "Power of a validator", + Name: "voter_power", + Help: "Power of a voter", }, append(labels, "validator_address")).With(labelsAndValues...), - MissingValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + MissingVoters: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, - Name: "missing_validators", - Help: "Number of validators who did not sign.", + Name: "missing_voters", + Help: "Number of voters who did not sign.", }, labels).With(labelsAndValues...), - MissingValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + MissingVotersPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, - Name: "missing_validators_power", - Help: "Total power of the missing validators.", + Name: "missing_voters_power", + Help: "Total power of the missing voters.", }, labels).With(labelsAndValues...), - ByzantineValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + ByzantineVoters: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, - Name: "byzantine_validators", - Help: "Number of validators who tried to double sign.", + Name: "byzantine_voters", + Help: "Number of voters who tried to double sign.", }, labels).With(labelsAndValues...), - ByzantineValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + ByzantineVotersPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, - Name: "byzantine_validators_power", - Help: "Total power of the byzantine validators.", + Name: "byzantine_voters_power", + Help: "Total power of the byzantine voters.", }, labels).With(labelsAndValues...), BlockIntervalSeconds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ @@ -188,18 +189,18 @@ func NopMetrics() *Metrics { return &Metrics{ Height: discard.NewGauge(), - ValidatorLastSignedHeight: discard.NewGauge(), + VoterLastSignedHeight: discard.NewGauge(), Rounds: discard.NewGauge(), - Validators: discard.NewGauge(), - ValidatorsPower: discard.NewGauge(), - ValidatorPower: discard.NewGauge(), - ValidatorMissedBlocks: discard.NewGauge(), - MissingValidators: discard.NewGauge(), - MissingValidatorsPower: discard.NewGauge(), - ByzantineValidators: discard.NewGauge(), - ByzantineValidatorsPower: discard.NewGauge(), + Voters: discard.NewGauge(), + VotersPower: discard.NewGauge(), + VoterPower: discard.NewGauge(), + VoterMissedBlocks: discard.NewGauge(), + MissingVoters: discard.NewGauge(), + MissingVotersPower: discard.NewGauge(), + ByzantineVoters: discard.NewGauge(), + ByzantineVotersPower: discard.NewGauge(), BlockIntervalSeconds: discard.NewGauge(), diff --git a/consensus/reactor.go b/consensus/reactor.go index 3711dd7cf..f42f0731c 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -310,9 +310,9 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { case *VoteMessage: cs := conR.conS cs.mtx.RLock() - height, valSize, lastCommitSize := cs.Height, cs.Validators.Size(), cs.LastCommit.Size() + height, voterSize, lastCommitSize := cs.Height, cs.Voters.Size(), cs.LastCommit.Size() cs.mtx.RUnlock() - ps.EnsureVoteBitArrays(height, valSize) + ps.EnsureVoteBitArrays(height, voterSize) ps.EnsureVoteBitArrays(height-1, lastCommitSize) ps.SetHasVote(msg.Vote) @@ -1119,7 +1119,7 @@ func (ps *PeerState) getVoteBitArray(height int64, round int, votesType types.Si } // 'round': A round for which we have a +2/3 commit. -func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValidators int) { +func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numVoters int) { if ps.PRS.Height != height { return } @@ -1143,37 +1143,37 @@ func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValida if round == ps.PRS.Round { ps.PRS.CatchupCommit = ps.PRS.Precommits } else { - ps.PRS.CatchupCommit = bits.NewBitArray(numValidators) + ps.PRS.CatchupCommit = bits.NewBitArray(numVoters) } } // EnsureVoteBitArrays ensures the bit-arrays have been allocated for tracking // what votes this peer has received. -// NOTE: It's important to make sure that numValidators actually matches -// what the node sees as the number of validators for height. -func (ps *PeerState) EnsureVoteBitArrays(height int64, numValidators int) { +// NOTE: It's important to make sure that numVoters actually matches +// what the node sees as the number of voters for height. +func (ps *PeerState) EnsureVoteBitArrays(height int64, numVoters int) { ps.mtx.Lock() defer ps.mtx.Unlock() - ps.ensureVoteBitArrays(height, numValidators) + ps.ensureVoteBitArrays(height, numVoters) } -func (ps *PeerState) ensureVoteBitArrays(height int64, numValidators int) { +func (ps *PeerState) ensureVoteBitArrays(height int64, numVoters int) { if ps.PRS.Height == height { if ps.PRS.Prevotes == nil { - ps.PRS.Prevotes = bits.NewBitArray(numValidators) + ps.PRS.Prevotes = bits.NewBitArray(numVoters) } if ps.PRS.Precommits == nil { - ps.PRS.Precommits = bits.NewBitArray(numValidators) + ps.PRS.Precommits = bits.NewBitArray(numVoters) } if ps.PRS.CatchupCommit == nil { - ps.PRS.CatchupCommit = bits.NewBitArray(numValidators) + ps.PRS.CatchupCommit = bits.NewBitArray(numVoters) } if ps.PRS.ProposalPOL == nil { - ps.PRS.ProposalPOL = bits.NewBitArray(numValidators) + ps.PRS.ProposalPOL = bits.NewBitArray(numVoters) } } else if ps.PRS.Height == height+1 { if ps.PRS.LastCommit == nil { - ps.PRS.LastCommit = bits.NewBitArray(numValidators) + ps.PRS.LastCommit = bits.NewBitArray(numVoters) } } } diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index 50654c124..076d6ee0b 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -363,48 +363,48 @@ func TestReactorVotingPowerChange(t *testing.T) { val1PubKey := css[0].privValidator.GetPubKey() val1PubKeyABCI := types.TM2PB.PubKey(val1PubKey) updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25) - previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower() + previousTotalVotingPower := css[0].GetRoundState().LastVoters.TotalVotingPower() waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) - if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { + if css[0].GetRoundState().LastVoters.TotalVotingPower() == previousTotalVotingPower { t.Fatalf( "expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, - css[0].GetRoundState().LastValidators.TotalVotingPower()) + css[0].GetRoundState().LastVoters.TotalVotingPower()) } updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2) - previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower() + previousTotalVotingPower = css[0].GetRoundState().LastVoters.TotalVotingPower() waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) - if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { + if css[0].GetRoundState().LastVoters.TotalVotingPower() == previousTotalVotingPower { t.Fatalf( "expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, - css[0].GetRoundState().LastValidators.TotalVotingPower()) + css[0].GetRoundState().LastVoters.TotalVotingPower()) } updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26) - previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower() + previousTotalVotingPower = css[0].GetRoundState().LastVoters.TotalVotingPower() waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) - if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { + if css[0].GetRoundState().LastVoters.TotalVotingPower() == previousTotalVotingPower { t.Fatalf( "expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, - css[0].GetRoundState().LastValidators.TotalVotingPower()) + css[0].GetRoundState().LastVoters.TotalVotingPower()) } } @@ -469,18 +469,18 @@ func TestReactorValidatorSetChanges(t *testing.T) { updateValidatorPubKey1 := css[nVals].privValidator.GetPubKey() updatePubKey1ABCI := types.TM2PB.PubKey(updateValidatorPubKey1) updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) - previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower() + previousTotalVotingPower := css[nVals].GetRoundState().LastVoters.TotalVotingPower() waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1) waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1) waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) - if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { + if css[nVals].GetRoundState().LastVoters.TotalVotingPower() == previousTotalVotingPower { t.Errorf( "expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, - css[nVals].GetRoundState().LastValidators.TotalVotingPower()) + css[nVals].GetRoundState().LastVoters.TotalVotingPower()) } //--------------------------------------------------------------------------- diff --git a/consensus/replay.go b/consensus/replay.go index 1453849cc..4ad89200e 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -312,7 +312,7 @@ func (h *Handshaker) ReplayBlocks( Time: h.genDoc.GenesisTime, ChainId: h.genDoc.ChainID, ConsensusParams: csParams, - Validators: nextVals, + Validators: nextVals, // ValidatorOrVoter: validator AppStateBytes: h.genDoc.AppState, } res, err := proxyApp.Consensus().InitChainSync(req) @@ -328,7 +328,10 @@ func (h *Handshaker) ReplayBlocks( return nil, err } state.Validators = types.NewValidatorSet(vals) + state.Voters = types.ToVoterAll(state.Validators) + // Should sync it with MakeGenesisState() state.NextValidators = types.NewValidatorSet(vals) + state.NextVoters = types.SelectVoter(state.NextValidators, h.genDoc.Hash()) } else if len(h.genDoc.Validators) == 0 { // If validator set is not set in genesis and still empty after InitChain, exit. return nil, fmt.Errorf("validator set is nil in genesis and still empty after InitChain") diff --git a/consensus/replay_file.go b/consensus/replay_file.go index b8b8c51da..8e301e13c 100644 --- a/consensus/replay_file.go +++ b/consensus/replay_file.go @@ -248,8 +248,8 @@ func (pb *playback) replayConsoleLoop() int { switch tokens[1] { case "short": fmt.Printf("%v/%v/%v\n", rs.Height, rs.Round, rs.Step) - case "validators": - fmt.Println(rs.Validators) + case "voters": + fmt.Println(rs.Voters) case "proposal": fmt.Println(rs.Proposal) case "proposal_block": diff --git a/consensus/replay_test.go b/consensus/replay_test.go index 6753731e0..973511ae2 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -310,8 +310,8 @@ var ( var modes = []uint{0, 1, 2} func getProposerIdx(state *State, height int64, round int) (int, *types.Validator) { - proposer := types.SelectProposer(state.Validators, state.state.LastProofHash, height, round) - return state.Validators.GetByAddress(proposer.PubKey.Address()) + proposer := state.Validators.SelectProposer(state.state.LastProofHash, height, round) + return state.Voters.GetByAddress(proposer.PubKey.Address()) } func createProposalBlock(cs *State, proposerState *State, round int) (*types.Block, *types.PartSet) { @@ -811,7 +811,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { const appVersion = 0x0 stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), appVersion) genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) - state.LastValidators = state.Validators.Copy() + state.LastVoters = state.Voters.Copy() // mode = 0 for committing all the blocks blocks := makeBlocks(3, &state, privVal) store.chain = blocks @@ -899,7 +899,7 @@ func makeBlock(state sm.State, lastBlock *types.Block, lastBlockMeta *types.Bloc message := state.MakeHashMessage(0) proof, _ := privVal.GenerateVRFProof(message) return state.MakeBlock(height, []types.Tx{}, lastCommit, nil, - types.SelectProposer(state.Validators, state.LastProofHash, height, 0).Address, 0, proof) + state.Validators.SelectProposer(state.LastProofHash, height, 0).Address, 0, proof) } type badApp struct { diff --git a/consensus/state.go b/consensus/state.go index b1969efdd..23ba3e477 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -245,6 +245,7 @@ func (cs *State) GetRoundStateSimpleJSON() ([]byte, error) { } // GetValidators returns a copy of the current validators. +// ValidatorOrVoter: validator func (cs *State) GetValidators() (int64, []*types.Validator) { cs.mtx.RLock() defer cs.mtx.RUnlock() @@ -489,7 +490,7 @@ func (cs *State) reconstructLastCommit(state sm.State) { return } seenCommit := cs.blockStore.LoadSeenCommit(state.LastBlockHeight) - lastPrecommits := types.CommitToVoteSet(state.ChainID, seenCommit, state.LastValidators) + lastPrecommits := types.CommitToVoteSet(state.ChainID, seenCommit, state.LastVoters) if !lastPrecommits.HasTwoThirdsMajority() { panic("Failed to reconstruct LastCommit: Does not have +2/3 maj") } @@ -527,7 +528,7 @@ func (cs *State) updateToState(state sm.State) { } // Reset fields based on state. - validators := state.Validators + voters := state.Voters lastPrecommits := (*types.VoteSet)(nil) if cs.CommitRound > -1 && cs.Votes != nil { if !cs.Votes.Precommits(cs.CommitRound).HasTwoThirdsMajority() { @@ -553,7 +554,8 @@ func (cs *State) updateToState(state sm.State) { cs.StartTime = cs.config.Commit(cs.CommitTime) } - cs.Validators = validators + cs.Validators = state.Validators.Copy() + cs.Voters = state.Voters.Copy() cs.Proposal = nil cs.ProposalBlock = nil cs.ProposalBlockParts = nil @@ -563,10 +565,10 @@ func (cs *State) updateToState(state sm.State) { cs.ValidRound = -1 cs.ValidBlock = nil cs.ValidBlockParts = nil - cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, validators) + cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, voters) cs.CommitRound = -1 cs.LastCommit = lastPrecommits - cs.LastValidators = state.LastValidators + cs.LastVoters = state.LastVoters cs.TriggeredTimeoutPrecommit = false cs.state = state @@ -828,7 +830,7 @@ func (cs *State) enterNewRound(height int64, round int) { logger.Info(fmt.Sprintf("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) // Select the current height and round Proposer - cs.Proposer = types.SelectProposer(cs.Validators, cs.state.LastProofHash, height, round) + cs.Proposer = cs.Validators.SelectProposer(cs.state.LastProofHash, height, round) // Setup new round // we don't fire newStep for this step, @@ -917,8 +919,8 @@ func (cs *State) enterPropose(height int64, round int) { // if not a validator, we're done address := cs.privValidator.GetPubKey().Address() - if !cs.Validators.HasAddress(address) { - logger.Debug("This node is not a validator", "addr", address, "vals", cs.Validators) + if !cs.Voters.HasAddress(address) { + logger.Debug("This node is not a validator", "addr", address, "vals", cs.Voters) return } logger.Debug("This node is a validator") @@ -1463,14 +1465,14 @@ func (cs *State) finalizeCommit(height int64) { } func (cs *State) recordMetrics(height int64, block *types.Block) { - cs.metrics.Validators.Set(float64(cs.Validators.Size())) - cs.metrics.ValidatorsPower.Set(float64(cs.Validators.TotalVotingPower())) + cs.metrics.Voters.Set(float64(cs.Voters.Size())) + cs.metrics.VotersPower.Set(float64(cs.Voters.TotalVotingPower())) var ( - missingValidators int - missingValidatorsPower int64 + missingVoters int + missingVotersPower int64 ) - // height=0 -> MissingValidators and MissingValidatorsPower are both 0. + // height=0 -> MissingVoters and MissingVotersPower are both 0. // Remember that the first LastCommit is intentionally empty, so it's not // fair to increment missing validators number. if height > 1 { @@ -1478,44 +1480,44 @@ func (cs *State) recordMetrics(height int64, block *types.Block) { // after first block. var ( commitSize = block.LastCommit.Size() - valSetLen = len(cs.LastValidators.Validators) + valSetLen = len(cs.LastVoters.Voters) ) if commitSize != valSetLen { panic(fmt.Sprintf("commit size (%d) doesn't match valset length (%d) at height %d\n\n%v\n\n%v", - commitSize, valSetLen, block.Height, block.LastCommit.Signatures, cs.LastValidators.Validators)) + commitSize, valSetLen, block.Height, block.LastCommit.Signatures, cs.LastVoters.Voters)) } - for i, val := range cs.LastValidators.Validators { + for i, val := range cs.LastVoters.Voters { commitSig := block.LastCommit.Signatures[i] if commitSig.Absent() { - missingValidators++ - missingValidatorsPower += val.VotingPower + missingVoters++ + missingVotersPower += val.VotingPower } if cs.privValidator != nil && bytes.Equal(val.Address, cs.privValidator.GetPubKey().Address()) { label := []string{ "validator_address", val.Address.String(), } - cs.metrics.ValidatorPower.With(label...).Set(float64(val.VotingPower)) + cs.metrics.VoterPower.With(label...).Set(float64(val.VotingPower)) if commitSig.ForBlock() { - cs.metrics.ValidatorLastSignedHeight.With(label...).Set(float64(height)) + cs.metrics.VoterLastSignedHeight.With(label...).Set(float64(height)) } else { - cs.metrics.ValidatorMissedBlocks.With(label...).Add(float64(1)) + cs.metrics.VoterMissedBlocks.With(label...).Add(float64(1)) } } } } - cs.metrics.MissingValidators.Set(float64(missingValidators)) - cs.metrics.MissingValidatorsPower.Set(float64(missingValidatorsPower)) + cs.metrics.MissingVoters.Set(float64(missingVoters)) + cs.metrics.MissingVotersPower.Set(float64(missingVotersPower)) - cs.metrics.ByzantineValidators.Set(float64(len(block.Evidence.Evidence))) - byzantineValidatorsPower := int64(0) + cs.metrics.ByzantineVoters.Set(float64(len(block.Evidence.Evidence))) + byzantineVotersPower := int64(0) for _, ev := range block.Evidence.Evidence { - if _, val := cs.Validators.GetByAddress(ev.Address()); val != nil { - byzantineValidatorsPower += val.VotingPower + if _, val := cs.Voters.GetByAddress(ev.Address()); val != nil { + byzantineVotersPower += val.VotingPower } } - cs.metrics.ByzantineValidatorsPower.Set(float64(byzantineValidatorsPower)) + cs.metrics.ByzantineVotersPower.Set(float64(byzantineVotersPower)) if height > 1 { lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1) @@ -1551,7 +1553,7 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error { } // If consensus does not enterNewRound yet, cs.Proposer may be nil or prior proposer, so don't use cs.Proposer - proposer := types.SelectProposer(cs.Validators, cs.state.LastProofHash, proposal.Height, proposal.Round) + proposer := cs.Validators.SelectProposer(cs.state.LastProofHash, proposal.Height, proposal.Round) // Verify signature if !proposer.PubKey.VerifyBytes(proposal.SignBytes(cs.state.ChainID), proposal.Signature) { @@ -1852,7 +1854,7 @@ func (cs *State) signVote( cs.wal.FlushAndSync() addr := cs.privValidator.GetPubKey().Address() - valIndex, _ := cs.Validators.GetByAddress(addr) + valIndex, _ := cs.Voters.GetByAddress(addr) vote := &types.Vote{ ValidatorAddress: addr, @@ -1889,7 +1891,7 @@ func (cs *State) voteTime() time.Time { // sign the vote and publish on internalMsgQueue func (cs *State) signAddVote(msgType types.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote { // if we don't have a key or we're not in the validator set, do nothing - if cs.privValidator == nil || !cs.Validators.HasAddress(cs.privValidator.GetPubKey().Address()) { + if cs.privValidator == nil || !cs.Voters.HasAddress(cs.privValidator.GetPubKey().Address()) { return nil } vote, err := cs.signVote(msgType, hash, header) diff --git a/consensus/state_test.go b/consensus/state_test.go index af91e80aa..a639e5ce8 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -80,7 +80,7 @@ func TestStateProposerSelection0(t *testing.T) { ensureNewRound(newRoundCh, height+1, 0) prop = cs1.GetRoundState().Proposer - addr := types.SelectProposer(cs1.Validators, cs1.state.LastProofHash, cs1.Height, cs1.Round).PubKey.Address() + addr := cs1.Validators.SelectProposer(cs1.state.LastProofHash, cs1.Height, cs1.Round).PubKey.Address() if !bytes.Equal(prop.Address, addr) { panic(fmt.Sprintf("expected proposer to be validator %d. Got %X", 1, prop.Address)) } @@ -104,10 +104,10 @@ func TestStateProposerSelection2(t *testing.T) { // everyone just votes nil. we get a new proposer each round for i := 0; i < len(vss); i++ { prop := cs1.GetRoundState().Proposer - addr := types.SelectProposer(cs1.Validators, cs1.state.LastProofHash, height, i+round).PubKey.Address() + addr := cs1.Validators.SelectProposer(cs1.state.LastProofHash, height, i+round).PubKey.Address() correctProposer := addr if !bytes.Equal(prop.Address, correctProposer) { - idx, _ := cs1.Validators.GetByAddress(addr) + idx, _ := cs1.Voters.GetByAddress(addr) panic(fmt.Sprintf( "expected RoundState.Validators.GetProposer() to be validator %d. Got %X", idx, diff --git a/consensus/types/height_vote_set.go b/consensus/types/height_vote_set.go index 14cd04bf8..34a0d548f 100644 --- a/consensus/types/height_vote_set.go +++ b/consensus/types/height_vote_set.go @@ -36,9 +36,9 @@ We let each peer provide us with up to 2 unexpected "catchup" rounds. One for their LastCommit round, and another for the official commit round. */ type HeightVoteSet struct { - chainID string - height int64 - valSet *types.ValidatorSet + chainID string + height int64 + voterSet *types.VoterSet mtx sync.Mutex round int // max tracked round @@ -46,20 +46,20 @@ type HeightVoteSet struct { peerCatchupRounds map[p2p.ID][]int // keys: peer.ID; values: at most 2 rounds } -func NewHeightVoteSet(chainID string, height int64, valSet *types.ValidatorSet) *HeightVoteSet { +func NewHeightVoteSet(chainID string, height int64, voterSet *types.VoterSet) *HeightVoteSet { hvs := &HeightVoteSet{ chainID: chainID, } - hvs.Reset(height, valSet) + hvs.Reset(height, voterSet) return hvs } -func (hvs *HeightVoteSet) Reset(height int64, valSet *types.ValidatorSet) { +func (hvs *HeightVoteSet) Reset(height int64, voterSet *types.VoterSet) { hvs.mtx.Lock() defer hvs.mtx.Unlock() hvs.height = height - hvs.valSet = valSet + hvs.voterSet = voterSet hvs.roundVoteSets = make(map[int]RoundVoteSet) hvs.peerCatchupRounds = make(map[p2p.ID][]int) @@ -100,8 +100,8 @@ func (hvs *HeightVoteSet) addRound(round int) { panic("addRound() for an existing round") } // log.Debug("addRound(round)", "round", round) - prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, types.PrevoteType, hvs.valSet) - precommits := types.NewVoteSet(hvs.chainID, hvs.height, round, types.PrecommitType, hvs.valSet) + prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, types.PrevoteType, hvs.voterSet) + precommits := types.NewVoteSet(hvs.chainID, hvs.height, round, types.PrecommitType, hvs.voterSet) hvs.roundVoteSets[round] = RoundVoteSet{ Prevotes: prevotes, Precommits: precommits, diff --git a/consensus/types/height_vote_set_test.go b/consensus/types/height_vote_set_test.go index 93c73f1a1..c18a82b08 100644 --- a/consensus/types/height_vote_set_test.go +++ b/consensus/types/height_vote_set_test.go @@ -20,7 +20,7 @@ func TestMain(m *testing.M) { } func TestPeerCatchupRounds(t *testing.T) { - valSet, privVals := types.RandValidatorSet(10, 1) + _, valSet, privVals := types.RandVoterSet(10, 1) hvs := NewHeightVoteSet(config.ChainID(), 1, valSet) diff --git a/consensus/types/round_state.go b/consensus/types/round_state.go index 35c58f698..527556f7c 100644 --- a/consensus/types/round_state.go +++ b/consensus/types/round_state.go @@ -73,6 +73,7 @@ type RoundState struct { // Subjective time when +2/3 precommits for Block at Round were found CommitTime time.Time `json:"commit_time"` Validators *types.ValidatorSet `json:"validators"` + Voters *types.VoterSet `json:"voters"` Proposer *types.Validator `json:"proposer"` Proposal *types.Proposal `json:"proposal"` ProposalBlock *types.Block `json:"proposal_block"` @@ -86,12 +87,12 @@ type RoundState struct { ValidBlock *types.Block `json:"valid_block"` // Last known block of POL mentioned above. // Last known block parts of POL metnioned above. - ValidBlockParts *types.PartSet `json:"valid_block_parts"` - Votes *HeightVoteSet `json:"votes"` - CommitRound int `json:"commit_round"` // - LastCommit *types.VoteSet `json:"last_commit"` // Last precommits at Height-1 - LastValidators *types.ValidatorSet `json:"last_validators"` - TriggeredTimeoutPrecommit bool `json:"triggered_timeout_precommit"` + ValidBlockParts *types.PartSet `json:"valid_block_parts"` + Votes *HeightVoteSet `json:"votes"` + CommitRound int `json:"commit_round"` // + LastCommit *types.VoteSet `json:"last_commit"` // Last precommits at Height-1 + LastVoters *types.VoterSet `json:"last_voters"` + TriggeredTimeoutPrecommit bool `json:"triggered_timeout_precommit"` } // Compressed version of the RoundState for use in RPC @@ -113,7 +114,7 @@ func (rs *RoundState) RoundStateSimple() RoundStateSimple { } addr := rs.Proposer.Address - idx, _ := rs.Validators.GetByAddress(addr) + idx, _ := rs.Voters.GetByAddress(addr) return RoundStateSimple{ HeightRoundStep: fmt.Sprintf("%d/%d/%d", rs.Height, rs.Round, rs.Step), @@ -132,7 +133,7 @@ func (rs *RoundState) RoundStateSimple() RoundStateSimple { // NewRoundEvent returns the RoundState with proposer information as an event. func (rs *RoundState) NewRoundEvent() types.EventDataNewRound { addr := rs.Proposer.Address - idx, _ := rs.Validators.GetByAddress(addr) + idx, _ := rs.Voters.GetByAddress(addr) return types.EventDataNewRound{ Height: rs.Height, @@ -182,7 +183,7 @@ func (rs *RoundState) StringIndented(indent string) string { %s H:%v R:%v S:%v %s StartTime: %v %s CommitTime: %v -%s Validators: %v +%s Voters: %v %s Proposer: %v %s Proposal: %v %s ProposalBlock: %v %v @@ -192,12 +193,12 @@ func (rs *RoundState) StringIndented(indent string) string { %s ValidBlock: %v %v %s Votes: %v %s LastCommit: %v -%s LastValidators:%v +%s LastVoters:%v %s}`, indent, rs.Height, rs.Round, rs.Step, indent, rs.StartTime, indent, rs.CommitTime, - indent, rs.Validators.StringIndented(indent+" "), + indent, rs.Voters.StringIndented(indent+" "), indent, rs.Proposer.String(), indent, rs.Proposal, indent, rs.ProposalBlockParts.StringShort(), rs.ProposalBlock.StringShort(), @@ -207,7 +208,7 @@ func (rs *RoundState) StringIndented(indent string) string { indent, rs.ValidBlockParts.StringShort(), rs.ValidBlock.StringShort(), indent, rs.Votes.StringIndented(indent+" "), indent, rs.LastCommit.StringShort(), - indent, rs.LastValidators.StringIndented(indent+" "), + indent, rs.LastVoters.StringIndented(indent+" "), indent) } diff --git a/consensus/types/round_state_test.go b/consensus/types/round_state_test.go index f5f5f72c0..67b9f0ebe 100644 --- a/consensus/types/round_state_test.go +++ b/consensus/types/round_state_test.go @@ -16,7 +16,7 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) { // Random validators nval, ntxs := 100, 100 - vset, _ := types.RandValidatorSet(nval, 1) + _, vset, _ := types.RandVoterSet(nval, 1) commitSigs := make([]types.CommitSig, nval) blockID := types.BlockID{ Hash: tmrand.Bytes(tmhash.Size), @@ -46,7 +46,7 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) { LastBlockID: blockID, LastCommitHash: tmrand.Bytes(20), DataHash: tmrand.Bytes(20), - ValidatorsHash: tmrand.Bytes(20), + VotersHash: tmrand.Bytes(20), ConsensusHash: tmrand.Bytes(20), AppHash: tmrand.Bytes(20), LastResultsHash: tmrand.Bytes(20), @@ -71,7 +71,7 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) { rs := &RoundState{ StartTime: tmtime.Now(), CommitTime: tmtime.Now(), - Validators: vset, + Voters: vset, Proposal: proposal, ProposalBlock: block, ProposalBlockParts: parts, @@ -81,7 +81,7 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) { ValidBlockParts: parts, Votes: nil, // TODO LastCommit: nil, // TODO - LastValidators: vset, + LastVoters: vset, } b.StartTimer() diff --git a/crypto/merkle/merkle.pb.go b/crypto/merkle/merkle.pb.go index 80823dd2b..6c061d62a 100644 --- a/crypto/merkle/merkle.pb.go +++ b/crypto/merkle/merkle.pb.go @@ -1,17 +1,16 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: crypto/merkle/merkle.proto -package merkle - -import ( - bytes "bytes" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) +package merkle // import "github.com/tendermint/tendermint/crypto/merkle" + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import bytes "bytes" + +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -22,7 +21,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package // ProofOp defines an operation used for calculating Merkle root // The data could be arbitrary format, providing nessecary data @@ -40,7 +39,7 @@ func (m *ProofOp) Reset() { *m = ProofOp{} } func (m *ProofOp) String() string { return proto.CompactTextString(m) } func (*ProofOp) ProtoMessage() {} func (*ProofOp) Descriptor() ([]byte, []int) { - return fileDescriptor_9c1c2162d560d38e, []int{0} + return fileDescriptor_merkle_c5e19a3b0ab40da4, []int{0} } func (m *ProofOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -50,15 +49,15 @@ func (m *ProofOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ProofOp.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *ProofOp) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProofOp.Merge(m, src) +func (dst *ProofOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProofOp.Merge(dst, src) } func (m *ProofOp) XXX_Size() int { return m.Size() @@ -92,7 +91,7 @@ func (m *ProofOp) GetData() []byte { // Proof is Merkle proof defined by the list of ProofOps type Proof struct { - Ops []ProofOp `protobuf:"bytes,1,rep,name=ops,proto3" json:"ops"` + Ops []ProofOp `protobuf:"bytes,1,rep,name=ops" json:"ops"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -102,7 +101,7 @@ func (m *Proof) Reset() { *m = Proof{} } func (m *Proof) String() string { return proto.CompactTextString(m) } func (*Proof) ProtoMessage() {} func (*Proof) Descriptor() ([]byte, []int) { - return fileDescriptor_9c1c2162d560d38e, []int{1} + return fileDescriptor_merkle_c5e19a3b0ab40da4, []int{1} } func (m *Proof) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -112,15 +111,15 @@ func (m *Proof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Proof.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *Proof) XXX_Merge(src proto.Message) { - xxx_messageInfo_Proof.Merge(m, src) +func (dst *Proof) XXX_Merge(src proto.Message) { + xxx_messageInfo_Proof.Merge(dst, src) } func (m *Proof) XXX_Size() int { return m.Size() @@ -142,28 +141,6 @@ func init() { proto.RegisterType((*ProofOp)(nil), "tendermint.crypto.merkle.ProofOp") proto.RegisterType((*Proof)(nil), "tendermint.crypto.merkle.Proof") } - -func init() { proto.RegisterFile("crypto/merkle/merkle.proto", fileDescriptor_9c1c2162d560d38e) } - -var fileDescriptor_9c1c2162d560d38e = []byte{ - // 230 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4a, 0x2e, 0xaa, 0x2c, - 0x28, 0xc9, 0xd7, 0xcf, 0x4d, 0x2d, 0xca, 0xce, 0x49, 0x85, 0x52, 0x7a, 0x05, 0x45, 0xf9, 0x25, - 0xf9, 0x42, 0x12, 0x25, 0xa9, 0x79, 0x29, 0xa9, 0x45, 0xb9, 0x99, 0x79, 0x25, 0x7a, 0x10, 0x65, - 0x7a, 0x10, 0x79, 0x29, 0xb5, 0x92, 0x8c, 0xcc, 0xa2, 0x94, 0xf8, 0x82, 0xc4, 0xa2, 0x92, 0x4a, - 0x7d, 0xb0, 0x62, 0xfd, 0xf4, 0xfc, 0xf4, 0x7c, 0x04, 0x0b, 0x62, 0x82, 0x92, 0x33, 0x17, 0x7b, - 0x40, 0x51, 0x7e, 0x7e, 0x9a, 0x7f, 0x81, 0x90, 0x10, 0x17, 0x4b, 0x49, 0x65, 0x41, 0xaa, 0x04, - 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x98, 0x2d, 0x24, 0xc0, 0xc5, 0x9c, 0x9d, 0x5a, 0x29, 0xc1, - 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0x62, 0x82, 0x54, 0xa5, 0x24, 0x96, 0x24, 0x4a, 0x30, 0x83, - 0x85, 0xc0, 0x6c, 0x25, 0x27, 0x2e, 0x56, 0xb0, 0x21, 0x42, 0x96, 0x5c, 0xcc, 0xf9, 0x05, 0xc5, - 0x12, 0x8c, 0x0a, 0xcc, 0x1a, 0xdc, 0x46, 0x8a, 0x7a, 0xb8, 0x5c, 0xa7, 0x07, 0xb5, 0xd2, 0x89, - 0xe5, 0xc4, 0x3d, 0x79, 0x86, 0x20, 0x90, 0x1e, 0x27, 0x97, 0x1f, 0x0f, 0xe5, 0x18, 0x57, 0x3c, - 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0xa3, - 0xf4, 0xd2, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0x11, 0xa6, 0x21, 0x33, - 0x51, 0x42, 0x27, 0x89, 0x0d, 0xec, 0x2b, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc8, 0xcc, - 0x2c, 0x91, 0x35, 0x01, 0x00, 0x00, -} - func (this *ProofOp) Equal(that interface{}) bool { if that == nil { return this == nil @@ -232,7 +209,7 @@ func (this *Proof) Equal(that interface{}) bool { func (m *ProofOp) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -240,47 +217,38 @@ func (m *ProofOp) Marshal() (dAtA []byte, err error) { } func (m *ProofOp) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProofOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintMerkle(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x1a + if len(m.Type) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintMerkle(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) } if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintMerkle(dAtA, i, uint64(len(m.Key))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintMerkle(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintMerkle(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa + if len(m.Data) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintMerkle(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) } - return len(dAtA) - i, nil + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } func (m *Proof) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -288,46 +256,36 @@ func (m *Proof) Marshal() (dAtA []byte, err error) { } func (m *Proof) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Proof) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Ops) > 0 { - for iNdEx := len(m.Ops) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ops[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMerkle(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Ops { dAtA[i] = 0xa + i++ + i = encodeVarintMerkle(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - return len(dAtA) - i, nil + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } func encodeVarintMerkle(dAtA []byte, offset int, v uint64) int { - offset -= sovMerkle(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func NewPopulatedProofOp(r randyMerkle, easy bool) *ProofOp { this := &ProofOp{} @@ -350,7 +308,7 @@ func NewPopulatedProofOp(r randyMerkle, easy bool) *ProofOp { func NewPopulatedProof(r randyMerkle, easy bool) *Proof { this := &Proof{} - if r.Intn(5) != 0 { + if r.Intn(10) != 0 { v3 := r.Intn(5) this.Ops = make([]ProofOp, v3) for i := 0; i < v3; i++ { @@ -479,7 +437,14 @@ func (m *Proof) Size() (n int) { } func sovMerkle(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozMerkle(x uint64) (n int) { return sovMerkle(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -499,7 +464,7 @@ func (m *ProofOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -527,7 +492,7 @@ func (m *ProofOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -537,9 +502,6 @@ func (m *ProofOp) Unmarshal(dAtA []byte) error { return ErrInvalidLengthMerkle } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMerkle - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -559,7 +521,7 @@ func (m *ProofOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -568,9 +530,6 @@ func (m *ProofOp) Unmarshal(dAtA []byte) error { return ErrInvalidLengthMerkle } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthMerkle - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -593,7 +552,7 @@ func (m *ProofOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -602,9 +561,6 @@ func (m *ProofOp) Unmarshal(dAtA []byte) error { return ErrInvalidLengthMerkle } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthMerkle - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -622,9 +578,6 @@ func (m *ProofOp) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthMerkle } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthMerkle - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -653,7 +606,7 @@ func (m *Proof) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -681,7 +634,7 @@ func (m *Proof) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -690,9 +643,6 @@ func (m *Proof) Unmarshal(dAtA []byte) error { return ErrInvalidLengthMerkle } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMerkle - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -710,9 +660,6 @@ func (m *Proof) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthMerkle } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthMerkle - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -729,7 +676,6 @@ func (m *Proof) Unmarshal(dAtA []byte) error { func skipMerkle(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -761,8 +707,10 @@ func skipMerkle(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -779,34 +727,74 @@ func skipMerkle(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthMerkle } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupMerkle + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMerkle + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipMerkle(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthMerkle - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthMerkle = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMerkle = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupMerkle = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthMerkle = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMerkle = fmt.Errorf("proto: integer overflow") ) + +func init() { proto.RegisterFile("crypto/merkle/merkle.proto", fileDescriptor_merkle_c5e19a3b0ab40da4) } + +var fileDescriptor_merkle_c5e19a3b0ab40da4 = []byte{ + // 230 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4a, 0x2e, 0xaa, 0x2c, + 0x28, 0xc9, 0xd7, 0xcf, 0x4d, 0x2d, 0xca, 0xce, 0x49, 0x85, 0x52, 0x7a, 0x05, 0x45, 0xf9, 0x25, + 0xf9, 0x42, 0x12, 0x25, 0xa9, 0x79, 0x29, 0xa9, 0x45, 0xb9, 0x99, 0x79, 0x25, 0x7a, 0x10, 0x65, + 0x7a, 0x10, 0x79, 0x29, 0xb5, 0x92, 0x8c, 0xcc, 0xa2, 0x94, 0xf8, 0x82, 0xc4, 0xa2, 0x92, 0x4a, + 0x7d, 0xb0, 0x62, 0xfd, 0xf4, 0xfc, 0xf4, 0x7c, 0x04, 0x0b, 0x62, 0x82, 0x92, 0x33, 0x17, 0x7b, + 0x40, 0x51, 0x7e, 0x7e, 0x9a, 0x7f, 0x81, 0x90, 0x10, 0x17, 0x4b, 0x49, 0x65, 0x41, 0xaa, 0x04, + 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x98, 0x2d, 0x24, 0xc0, 0xc5, 0x9c, 0x9d, 0x5a, 0x29, 0xc1, + 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0x62, 0x82, 0x54, 0xa5, 0x24, 0x96, 0x24, 0x4a, 0x30, 0x83, + 0x85, 0xc0, 0x6c, 0x25, 0x27, 0x2e, 0x56, 0xb0, 0x21, 0x42, 0x96, 0x5c, 0xcc, 0xf9, 0x05, 0xc5, + 0x12, 0x8c, 0x0a, 0xcc, 0x1a, 0xdc, 0x46, 0x8a, 0x7a, 0xb8, 0x5c, 0xa7, 0x07, 0xb5, 0xd2, 0x89, + 0xe5, 0xc4, 0x3d, 0x79, 0x86, 0x20, 0x90, 0x1e, 0x27, 0x97, 0x1f, 0x0f, 0xe5, 0x18, 0x57, 0x3c, + 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0xa3, + 0xf4, 0xd2, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0x11, 0xa6, 0x21, 0x33, + 0x51, 0x42, 0x27, 0x89, 0x0d, 0xec, 0x2b, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc8, 0xcc, + 0x2c, 0x91, 0x35, 0x01, 0x00, 0x00, +} diff --git a/evidence/pool.go b/evidence/pool.go index 62b0a3325..d2e998f10 100644 --- a/evidence/pool.go +++ b/evidence/pool.go @@ -106,8 +106,8 @@ func (evpool *Pool) AddEvidence(evidence types.Evidence) (err error) { // fetch the validator and return its voting power as its priority // TODO: something better ? - valset, _ := sm.LoadValidators(evpool.stateDB, evidence.Height()) - _, val := valset.GetByAddress(evidence.Address()) + valSet, _, _ := sm.LoadValidators(evpool.stateDB, evidence.Height()) + _, val := valSet.GetByAddress(evidence.Address()) priority := val.VotingPower added := evpool.store.AddNewEvidence(evidence, priority) diff --git a/libs/kv/kvpair.go b/libs/kv/kvpair.go index 8eebae606..2474b2e47 100644 --- a/libs/kv/kvpair.go +++ b/libs/kv/kvpair.go @@ -35,4 +35,3 @@ func (kvs Pairs) Less(i, j int) bool { } func (kvs Pairs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] } func (kvs Pairs) Sort() { sort.Sort(kvs) } - diff --git a/libs/kv/types.pb.go b/libs/kv/types.pb.go index 7a6e6e12e..9ed37d23b 100644 --- a/libs/kv/types.pb.go +++ b/libs/kv/types.pb.go @@ -1,18 +1,17 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: libs/kv/types.proto -package kv +package kv // import "github.com/tendermint/tendermint/libs/kv" -import ( - bytes "bytes" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - golang_proto "github.com/golang/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) +import proto "github.com/gogo/protobuf/proto" +import golang_proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import bytes "bytes" + +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -24,7 +23,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type Pair struct { Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` @@ -38,7 +37,7 @@ func (m *Pair) Reset() { *m = Pair{} } func (m *Pair) String() string { return proto.CompactTextString(m) } func (*Pair) ProtoMessage() {} func (*Pair) Descriptor() ([]byte, []int) { - return fileDescriptor_31432671d164f444, []int{0} + return fileDescriptor_types_2603e618194ed4c7, []int{0} } func (m *Pair) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -48,15 +47,15 @@ func (m *Pair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Pair.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *Pair) XXX_Merge(src proto.Message) { - xxx_messageInfo_Pair.Merge(m, src) +func (dst *Pair) XXX_Merge(src proto.Message) { + xxx_messageInfo_Pair.Merge(dst, src) } func (m *Pair) XXX_Size() int { return m.Size() @@ -85,26 +84,6 @@ func init() { proto.RegisterType((*Pair)(nil), "tendermint.libs.kv.Pair") golang_proto.RegisterType((*Pair)(nil), "tendermint.libs.kv.Pair") } - -func init() { proto.RegisterFile("libs/kv/types.proto", fileDescriptor_31432671d164f444) } -func init() { golang_proto.RegisterFile("libs/kv/types.proto", fileDescriptor_31432671d164f444) } - -var fileDescriptor_31432671d164f444 = []byte{ - // 182 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xce, 0xc9, 0x4c, 0x2a, - 0xd6, 0xcf, 0x2e, 0xd3, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, - 0x12, 0x2a, 0x49, 0xcd, 0x4b, 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x03, 0xc9, 0xeb, 0x65, - 0x97, 0x49, 0xa9, 0x95, 0x64, 0x64, 0x16, 0xa5, 0xc4, 0x17, 0x24, 0x16, 0x95, 0x54, 0xea, 0x83, - 0x95, 0xe9, 0xa7, 0xe7, 0xa7, 0xe7, 0x23, 0x58, 0x10, 0xbd, 0x4a, 0x7a, 0x5c, 0x2c, 0x01, 0x89, - 0x99, 0x45, 0x42, 0x02, 0x5c, 0xcc, 0xd9, 0xa9, 0x95, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, - 0x20, 0xa6, 0x90, 0x08, 0x17, 0x6b, 0x59, 0x62, 0x4e, 0x69, 0xaa, 0x04, 0x13, 0x58, 0x0c, 0xc2, - 0x71, 0x72, 0xfb, 0xf1, 0x50, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x1d, 0x8f, 0xe4, 0x18, 0x4f, - 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x03, 0x8f, 0xe5, 0x18, - 0xa3, 0x34, 0xd2, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0x11, 0x8e, 0x42, - 0x66, 0x42, 0xdd, 0x9f, 0xc4, 0x06, 0xb6, 0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x5d, 0x3a, - 0xdc, 0xba, 0xd1, 0x00, 0x00, 0x00, -} - func (this *Pair) Equal(that interface{}) bool { if that == nil { return this == nil @@ -138,7 +117,7 @@ func (this *Pair) Equal(that interface{}) bool { func (m *Pair) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -146,46 +125,36 @@ func (m *Pair) Marshal() (dAtA []byte, err error) { } func (m *Pair) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Pair) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) } if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { - offset -= sovTypes(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func NewPopulatedPair(r randyTypes, easy bool) *Pair { this := &Pair{} @@ -298,7 +267,14 @@ func (m *Pair) Size() (n int) { } func sovTypes(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozTypes(x uint64) (n int) { return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -318,7 +294,7 @@ func (m *Pair) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -346,7 +322,7 @@ func (m *Pair) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -355,9 +331,6 @@ func (m *Pair) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -380,7 +353,7 @@ func (m *Pair) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -389,9 +362,6 @@ func (m *Pair) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -409,9 +379,6 @@ func (m *Pair) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -428,7 +395,6 @@ func (m *Pair) Unmarshal(dAtA []byte) error { func skipTypes(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -460,8 +426,10 @@ func skipTypes(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -478,34 +446,72 @@ func skipTypes(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthTypes } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTypes + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTypes(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthTypes - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") ) + +func init() { proto.RegisterFile("libs/kv/types.proto", fileDescriptor_types_2603e618194ed4c7) } +func init() { golang_proto.RegisterFile("libs/kv/types.proto", fileDescriptor_types_2603e618194ed4c7) } + +var fileDescriptor_types_2603e618194ed4c7 = []byte{ + // 182 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xce, 0xc9, 0x4c, 0x2a, + 0xd6, 0xcf, 0x2e, 0xd3, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, + 0x12, 0x2a, 0x49, 0xcd, 0x4b, 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x03, 0xc9, 0xeb, 0x65, + 0x97, 0x49, 0xa9, 0x95, 0x64, 0x64, 0x16, 0xa5, 0xc4, 0x17, 0x24, 0x16, 0x95, 0x54, 0xea, 0x83, + 0x95, 0xe9, 0xa7, 0xe7, 0xa7, 0xe7, 0x23, 0x58, 0x10, 0xbd, 0x4a, 0x7a, 0x5c, 0x2c, 0x01, 0x89, + 0x99, 0x45, 0x42, 0x02, 0x5c, 0xcc, 0xd9, 0xa9, 0x95, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, + 0x20, 0xa6, 0x90, 0x08, 0x17, 0x6b, 0x59, 0x62, 0x4e, 0x69, 0xaa, 0x04, 0x13, 0x58, 0x0c, 0xc2, + 0x71, 0x72, 0xfb, 0xf1, 0x50, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x1d, 0x8f, 0xe4, 0x18, 0x4f, + 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x03, 0x8f, 0xe5, 0x18, + 0xa3, 0x34, 0xd2, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0x11, 0x8e, 0x42, + 0x66, 0x42, 0xdd, 0x9f, 0xc4, 0x06, 0xb6, 0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x5d, 0x3a, + 0xdc, 0xba, 0xd1, 0x00, 0x00, 0x00, +} diff --git a/libs/kv/typespb_test.go b/libs/kv/typespb_test.go index dc45bf7f2..026d81ffb 100644 --- a/libs/kv/typespb_test.go +++ b/libs/kv/typespb_test.go @@ -1,20 +1,18 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: libs/kv/types.proto -package kv +package kv // import "github.com/tendermint/tendermint/libs/kv" -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb" - github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" - proto "github.com/gogo/protobuf/proto" - golang_proto "github.com/golang/protobuf/proto" - math "math" - math_rand "math/rand" - testing "testing" - time "time" -) +import testing "testing" +import math_rand "math/rand" +import time "time" +import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" +import github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb" +import proto "github.com/gogo/protobuf/proto" +import golang_proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/libs/rand/sampling.go b/libs/rand/sampling.go index 8ba7b4518..8aaf411c7 100644 --- a/libs/rand/sampling.go +++ b/libs/rand/sampling.go @@ -9,6 +9,7 @@ import ( type Candidate interface { Priority() uint64 LessThan(other Candidate) bool + IncreaseWin() } const uint64Mask = uint64(0x7FFFFFFFFFFFFFFF) @@ -65,6 +66,55 @@ func RandomSamplingWithPriority( totalPriority, actualTotalPriority, seed, sampleSize, undrawn, undrawn, thresholds[undrawn], len(candidates))) } +const MaxSamplingLoopTry = 1000 + +// `RandomSamplingToMax` elects voters among candidates so it updates wins of candidates +// Voters can be elected by a maximum `limitCandidates`. +// However, if the likely candidates are less than the `limitCandidates`, +// the number of voters may be less than the `limitCandidates`. +// This is to prevent falling into an infinite loop. +func RandomSamplingToMax( + seed uint64, candidates []Candidate, limitCandidates int, totalPriority uint64) uint64 { + + if len(candidates) < limitCandidates { + panic("The number of candidates cannot be less limitCandidate") + } + + candidates = sort(candidates) + totalSampling := uint64(0) + winCandidates := make(map[Candidate]bool) + for len(winCandidates) < limitCandidates && totalSampling < MaxSamplingLoopTry { + threshold := uint64(float64(nextRandom(&seed)&uint64Mask) / float64(uint64Mask+1) * float64(totalPriority)) + cumulativePriority := uint64(0) + found := false + for _, candidate := range candidates { + if threshold < cumulativePriority+candidate.Priority() { + if !winCandidates[candidate] { + winCandidates[candidate] = true + } + candidate.IncreaseWin() + totalSampling++ + found = true + break + } + cumulativePriority += candidate.Priority() + } + + if !found { + panic(fmt.Sprintf("Cannot find random sample. totalPriority may be wrong: totalPriority=%d, "+ + "actualTotalPriority=%d, threshold=%d", totalPriority, sumTotalPriority(candidates), threshold)) + } + } + return totalSampling +} + +func sumTotalPriority(candidates []Candidate) (sum uint64) { + for _, candi := range candidates { + sum += candi.Priority() + } + return +} + // SplitMix64 // http://xoshiro.di.unimi.it/splitmix64.c // diff --git a/libs/rand/sampling_test.go b/libs/rand/sampling_test.go index b090f75ab..3ed78e0dc 100644 --- a/libs/rand/sampling_test.go +++ b/libs/rand/sampling_test.go @@ -9,6 +9,7 @@ import ( type Element struct { ID uint32 + Win uint64 Weight uint64 } @@ -24,6 +25,10 @@ func (e *Element) LessThan(other Candidate) bool { return e.ID < o.ID } +func (e *Element) IncreaseWin() { + e.Win++ +} + func TestRandomSamplingWithPriority(t *testing.T) { candidates := newCandidates(100, func(i int) uint64 { return uint64(i) }) @@ -84,10 +89,70 @@ func TestRandomSamplingPanicCase(t *testing.T) { } } +func numberOfWinnersAndWins(candidate []Candidate) (winners uint64, totalWins uint64) { + for _, c := range candidate { + if c.(*Element).Win > 0 { + winners++ + totalWins += c.(*Element).Win + } + } + return +} + +func TestRandomSamplingToMax(t *testing.T) { + candidates1 := newCandidates(100, func(i int) uint64 { return uint64(i) }) + voters1 := RandomSamplingToMax(0, candidates1, 10, sumTotalPriority(candidates1)) + winners, totalWins := numberOfWinnersAndWins(candidates1) + if winners != 10 { + t.Errorf(fmt.Sprintf("unexpected sample size: %d", winners)) + } + if voters1 != totalWins { + t.Errorf(fmt.Sprintf("unexpected totalWins: %d", voters1)) + } + + candidates2 := newCandidates(100, func(i int) uint64 { return uint64(i) }) + _ = RandomSamplingToMax(0, candidates2, 10, sumTotalPriority(candidates2)) + + if !sameCandidates(candidates1, candidates2) { + t.Error("The two voter sets elected by the same seed are different.") + } + + candidates3 := newCandidates(0, func(i int) uint64 { return uint64(i) }) + voters3 := RandomSamplingToMax(0, candidates3, 0, sumTotalPriority(candidates3)) + if voters3 != 0 { + t.Errorf(fmt.Sprintf("unexpected totalWins: %d", voters3)) + } +} + +func TestRandomSamplingToMaxPanic(t *testing.T) { + type Case struct { + Candidates []Candidate + TotalPriority uint64 + } + + cases := [...]*Case{ + // specified total priority is greater than actual one + {newCandidates(10, func(i int) uint64 { return 1 }), 50000}, + // limitCandidates is greater than the number of candidates + {newCandidates(5, func(i int) uint64 { return 10 }), 5}, + } + + for i, c := range cases { + func() { + defer func() { + if recover() == nil { + t.Errorf("expected panic didn't happen in case %d", i+1) + } + }() + RandomSamplingToMax(0, c.Candidates, 10, c.TotalPriority) + }() + } +} + func newCandidates(length int, prio func(int) uint64) (candidates []Candidate) { candidates = make([]Candidate, length) for i := 0; i < length; i++ { - candidates[i] = &Element{uint32(i), prio(i)} + candidates[i] = &Element{uint32(i), 0, prio(i)} } return } @@ -102,6 +167,9 @@ func sameCandidates(c1 []Candidate, c2 []Candidate) bool { if c1[i].(*Element).ID != c2[i].(*Element).ID { return false } + if c1[i].(*Element).Win != c2[i].(*Element).Win { + return false + } } return true } diff --git a/lite/base_verifier.go b/lite/base_verifier.go index 6a2a50ab5..b7fb4cb4d 100644 --- a/lite/base_verifier.go +++ b/lite/base_verifier.go @@ -12,26 +12,26 @@ import ( var _ Verifier = (*BaseVerifier)(nil) // BaseVerifier lets us check the validity of SignedHeaders at height or -// later, requiring sufficient votes (> 2/3) from the given valset. +// later, requiring sufficient votes (> 2/3) from the given voterSet. // To verify blocks produced by a blockchain with mutable validator sets, // use the DynamicVerifier. // TODO: Handle unbonding time. type BaseVerifier struct { - chainID string - height int64 - valset *types.ValidatorSet + chainID string + height int64 + voterSet *types.VoterSet } // NewBaseVerifier returns a new Verifier initialized with a validator set at // some height. -func NewBaseVerifier(chainID string, height int64, valset *types.ValidatorSet) *BaseVerifier { +func NewBaseVerifier(chainID string, height int64, valset *types.VoterSet) *BaseVerifier { if valset.IsNilOrEmpty() { - panic("NewBaseVerifier requires a valid valset") + panic("NewBaseVerifier requires a valid voterSet") } return &BaseVerifier{ - chainID: chainID, - height: height, - valset: valset, + chainID: chainID, + height: height, + voterSet: valset, } } @@ -56,9 +56,9 @@ func (bv *BaseVerifier) Verify(signedHeader types.SignedHeader) error { } // We can't verify with the wrong validator set. - if !bytes.Equal(signedHeader.ValidatorsHash, - bv.valset.Hash()) { - return lerr.ErrUnexpectedValidators(signedHeader.ValidatorsHash, bv.valset.Hash()) + if !bytes.Equal(signedHeader.VotersHash, + bv.voterSet.Hash()) { + return lerr.ErrUnexpectedValidators(signedHeader.VotersHash, bv.voterSet.Hash()) } // Do basic sanity checks. @@ -68,7 +68,7 @@ func (bv *BaseVerifier) Verify(signedHeader types.SignedHeader) error { } // Check commit signatures. - err = bv.valset.VerifyCommit( + err = bv.voterSet.VerifyCommit( bv.chainID, signedHeader.Commit.BlockID, signedHeader.Height, signedHeader.Commit) if err != nil { diff --git a/lite/base_verifier_test.go b/lite/base_verifier_test.go index 2ef1203fb..4a971dc2d 100644 --- a/lite/base_verifier_test.go +++ b/lite/base_verifier_test.go @@ -14,14 +14,14 @@ func TestBaseCert(t *testing.T) { keys := genPrivKeys(4) // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! - vals := keys.ToValidators(20, 10) + vals := types.ToVoterAll(keys.ToValidators(20, 10)) // and a Verifier based on our known set chainID := "test-static" cert := NewBaseVerifier(chainID, 2, vals) cases := []struct { keys privKeys - vals *types.ValidatorSet + vals *types.VoterSet height int64 first, last int // who actually signs proper bool // true -> expect no error @@ -37,7 +37,7 @@ func TestBaseCert(t *testing.T) { {keys, vals, 4, 0, len(keys) - 1, false, false}, // Changing the power a little bit breaks the static validator. // The sigs are enough, but the validator hash is unknown. - {keys, keys.ToValidators(20, 11), 5, 0, len(keys), false, true}, + {keys, types.ToVoterAll(keys.ToValidators(20, 11)), 5, 0, len(keys), false, true}, } for _, tc := range cases { diff --git a/lite/client/provider.go b/lite/client/provider.go index e24dbe0e4..104b22723 100644 --- a/lite/client/provider.go +++ b/lite/client/provider.go @@ -97,11 +97,11 @@ func (p *provider) fetchLatestCommit(minHeight int64, maxHeight int64) (*ctypes. } // Implements Provider. -func (p *provider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { - return p.getValidatorSet(chainID, height) +func (p *provider) VoterSet(chainID string, height int64) (valset *types.VoterSet, err error) { + return p.getVoterSet(chainID, height) } -func (p *provider) getValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { +func (p *provider) getVoterSet(chainID string, height int64) (valset *types.VoterSet, err error) { if chainID != p.chainID { err = fmt.Errorf("expected chainID %s, got %s", p.chainID, chainID) return @@ -110,12 +110,15 @@ func (p *provider) getValidatorSet(chainID string, height int64) (valset *types. err = fmt.Errorf("expected height >= 1, got height %v", height) return } - res, err := p.client.Validators(&height, 0, 0) + + var res *ctypes.ResultVoters + res, err = p.client.Voters(&height, 0, 0) + if err != nil { // TODO pass through other types of errors. return nil, lerr.ErrUnknownValidators(chainID, height) } - valset = types.NewValidatorSet(res.Validators) + valset = types.NewVoterSet(res.Voters) return } @@ -123,13 +126,13 @@ func (p *provider) getValidatorSet(chainID string, height int64) (valset *types. func (p *provider) fillFullCommit(signedHeader types.SignedHeader) (fc lite.FullCommit, err error) { // Get the validators. - valset, err := p.getValidatorSet(signedHeader.ChainID, signedHeader.Height) + valset, err := p.getVoterSet(signedHeader.ChainID, signedHeader.Height) if err != nil { return lite.FullCommit{}, err } // Get the next validators. - nextValset, err := p.getValidatorSet(signedHeader.ChainID, signedHeader.Height+1) + nextValset, err := p.getVoterSet(signedHeader.ChainID, signedHeader.Height+1) if err != nil { return lite.FullCommit{}, err } diff --git a/lite/commit.go b/lite/commit.go index 6cd354173..e808a0d0f 100644 --- a/lite/commit.go +++ b/lite/commit.go @@ -14,48 +14,48 @@ import ( // revert to block-by-block updating of lite Verifier's latest validator set, // even in the face of arbitrarily large power changes. type FullCommit struct { - SignedHeader types.SignedHeader `json:"signed_header"` - Validators *types.ValidatorSet `json:"validator_set"` - NextValidators *types.ValidatorSet `json:"next_validator_set"` + SignedHeader types.SignedHeader `json:"signed_header"` + Voters *types.VoterSet `json:"voter_set"` + NextVoters *types.VoterSet `json:"next_validator_set"` } // NewFullCommit returns a new FullCommit. -func NewFullCommit(signedHeader types.SignedHeader, valset, nextValset *types.ValidatorSet) FullCommit { +func NewFullCommit(signedHeader types.SignedHeader, voterSet, nextVoterSet *types.VoterSet) FullCommit { return FullCommit{ - SignedHeader: signedHeader, - Validators: valset, - NextValidators: nextValset, + SignedHeader: signedHeader, + Voters: voterSet, + NextVoters: nextVoterSet, } } // Validate the components and check for consistency. -// This also checks to make sure that Validators actually +// This also checks to make sure that Voters actually // signed the SignedHeader.Commit. -// If > 2/3 did not sign the Commit from fc.Validators, it +// If > 2/3 did not sign the Commit from fc.Voters, it // is not a valid commit! func (fc FullCommit) ValidateFull(chainID string) error { // Ensure that Validators exists and matches the header. - if fc.Validators.Size() == 0 { - return errors.New("need FullCommit.Validators") + if fc.Voters.Size() == 0 { + return errors.New("need FullCommit.Voters") } if !bytes.Equal( - fc.SignedHeader.ValidatorsHash, - fc.Validators.Hash()) { - return fmt.Errorf("header has vhash %X but valset hash is %X", - fc.SignedHeader.ValidatorsHash, - fc.Validators.Hash(), + fc.SignedHeader.VotersHash, + fc.Voters.Hash()) { + return fmt.Errorf("header has vhash %X but voterSet hash is %X", + fc.SignedHeader.VotersHash, + fc.Voters.Hash(), ) } // Ensure that NextValidators exists and matches the header. - if fc.NextValidators.Size() == 0 { + if fc.NextVoters.Size() == 0 { return errors.New("need FullCommit.NextValidators") } if !bytes.Equal( - fc.SignedHeader.NextValidatorsHash, - fc.NextValidators.Hash()) { - return fmt.Errorf("header has next vhash %X but next valset hash is %X", - fc.SignedHeader.NextValidatorsHash, - fc.NextValidators.Hash(), + fc.SignedHeader.NextVotersHash, + fc.NextVoters.Hash()) { + return fmt.Errorf("header has next vhash %X but next voterSet hash is %X", + fc.SignedHeader.NextVotersHash, + fc.NextVoters.Hash(), ) } // Validate the header. @@ -65,7 +65,7 @@ func (fc FullCommit) ValidateFull(chainID string) error { } // Validate the signatures on the commit. hdr, cmt := fc.SignedHeader.Header, fc.SignedHeader.Commit - return fc.Validators.VerifyCommit( + return fc.Voters.VerifyCommit( hdr.ChainID, cmt.BlockID, hdr.Height, cmt) } diff --git a/lite/dbprovider.go b/lite/dbprovider.go index 35f7270ae..6a12c3772 100644 --- a/lite/dbprovider.go +++ b/lite/dbprovider.go @@ -59,16 +59,16 @@ func (dbp *DBProvider) SaveFullCommit(fc FullCommit) error { // Save the fc.validators. // We might be overwriting what we already have, but // it makes the logic easier for now. - vsKey := validatorSetKey(fc.ChainID(), fc.Height()) - vsBz, err := dbp.cdc.MarshalBinaryLengthPrefixed(fc.Validators) + vsKey := voterSetKey(fc.ChainID(), fc.Height()) + vsBz, err := dbp.cdc.MarshalBinaryLengthPrefixed(fc.Voters) if err != nil { return err } batch.Set(vsKey, vsBz) // Save the fc.NextValidators. - nvsKey := validatorSetKey(fc.ChainID(), fc.Height()+1) - nvsBz, err := dbp.cdc.MarshalBinaryLengthPrefixed(fc.NextValidators) + nvsKey := voterSetKey(fc.ChainID(), fc.Height()+1) + nvsBz, err := dbp.cdc.MarshalBinaryLengthPrefixed(fc.NextVoters) if err != nil { return err } @@ -148,12 +148,12 @@ func (dbp *DBProvider) LatestFullCommit(chainID string, minHeight, maxHeight int return FullCommit{}, lerr.ErrCommitNotFound() } -func (dbp *DBProvider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { - return dbp.getValidatorSet(chainID, height) +func (dbp *DBProvider) VoterSet(chainID string, height int64) (valset *types.VoterSet, err error) { + return dbp.getVoterSet(chainID, height) } -func (dbp *DBProvider) getValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { - vsBz, err := dbp.db.Get(validatorSetKey(chainID, height)) +func (dbp *DBProvider) getVoterSet(chainID string, height int64) (valset *types.VoterSet, err error) { + vsBz, err := dbp.db.Get(voterSetKey(chainID, height)) if err != nil { return nil, err } @@ -166,7 +166,7 @@ func (dbp *DBProvider) getValidatorSet(chainID string, height int64) (valset *ty return } - // To test deep equality. This makes it easier to test for e.g. valset + // To test deep equality. This makes it easier to test for e.g. voterSet // equivalence using assert.Equal (tests for deep equality) in our tests, // which also tests for unexported/private field equivalence. valset.TotalVotingPower() @@ -177,22 +177,22 @@ func (dbp *DBProvider) getValidatorSet(chainID string, height int64) (valset *ty func (dbp *DBProvider) fillFullCommit(sh types.SignedHeader) (FullCommit, error) { var chainID = sh.ChainID var height = sh.Height - var valset, nextValset *types.ValidatorSet + var valset, nextValset *types.VoterSet // Load the validator set. - valset, err := dbp.getValidatorSet(chainID, height) + valset, err := dbp.getVoterSet(chainID, height) if err != nil { return FullCommit{}, err } // Load the next validator set. - nextValset, err = dbp.getValidatorSet(chainID, height+1) + nextValset, err = dbp.getVoterSet(chainID, height+1) if err != nil { return FullCommit{}, err } // Return filled FullCommit. return FullCommit{ - SignedHeader: sh, - Validators: valset, - NextValidators: nextValset, + SignedHeader: sh, + Voters: valset, + NextVoters: nextValset, }, nil } @@ -243,7 +243,7 @@ func signedHeaderKey(chainID string, height int64) []byte { return []byte(fmt.Sprintf("%s/%010d/sh", chainID, height)) } -func validatorSetKey(chainID string, height int64) []byte { +func voterSetKey(chainID string, height int64) []byte { return []byte(fmt.Sprintf("%s/%010d/vs", chainID, height)) } diff --git a/lite/dynamic_verifier.go b/lite/dynamic_verifier.go index d4efdcbeb..7c27d7635 100644 --- a/lite/dynamic_verifier.go +++ b/lite/dynamic_verifier.go @@ -107,7 +107,7 @@ func (dv *DynamicVerifier) Verify(shdr types.SignedHeader) error { } // Get the latest known full commit <= h-1 from our trusted providers. - // The full commit at h-1 contains the valset to sign for h. + // The full commit at h-1 contains the voterSet to sign for h. prevHeight := shdr.Height - 1 trustedFC, err := dv.trusted.LatestFullCommit(dv.chainID, 1, prevHeight) if err != nil { @@ -115,38 +115,38 @@ func (dv *DynamicVerifier) Verify(shdr types.SignedHeader) error { } // sync up to the prevHeight and assert our latest NextValidatorSet - // is the ValidatorSet for the SignedHeader + // is the VoterSet for the SignedHeader if trustedFC.Height() == prevHeight { - // Return error if valset doesn't match. + // Return error if voterSet doesn't match. if !bytes.Equal( - trustedFC.NextValidators.Hash(), - shdr.Header.ValidatorsHash) { + trustedFC.NextVoters.Hash(), + shdr.Header.VotersHash) { return lerr.ErrUnexpectedValidators( - trustedFC.NextValidators.Hash(), - shdr.Header.ValidatorsHash) + trustedFC.NextVoters.Hash(), + shdr.Header.VotersHash) } } else { - // If valset doesn't match, try to update + // If voterSet doesn't match, try to update if !bytes.Equal( - trustedFC.NextValidators.Hash(), - shdr.Header.ValidatorsHash) { + trustedFC.NextVoters.Hash(), + shdr.Header.VotersHash) { // ... update. trustedFC, err = dv.updateToHeight(prevHeight) if err != nil { return err } - // Return error if valset _still_ doesn't match. - if !bytes.Equal(trustedFC.NextValidators.Hash(), - shdr.Header.ValidatorsHash) { + // Return error if voterSet _still_ doesn't match. + if !bytes.Equal(trustedFC.NextVoters.Hash(), + shdr.Header.VotersHash) { return lerr.ErrUnexpectedValidators( - trustedFC.NextValidators.Hash(), - shdr.Header.ValidatorsHash) + trustedFC.NextVoters.Hash(), + shdr.Header.VotersHash) } } } - // Verify the signed header using the matching valset. - cert := NewBaseVerifier(dv.chainID, trustedFC.Height()+1, trustedFC.NextValidators) + // Verify the signed header using the matching voterSet. + cert := NewBaseVerifier(dv.chainID, trustedFC.Height()+1, trustedFC.NextVoters) err = cert.Verify(shdr) if err != nil { return err @@ -160,7 +160,7 @@ func (dv *DynamicVerifier) Verify(shdr types.SignedHeader) error { // See https://github.com/tendermint/tendermint/issues/3174. // Get the next validator set. - nextValset, err := dv.source.ValidatorSet(dv.chainID, shdr.Height+1) + nextValset, err := dv.source.VoterSet(dv.chainID, shdr.Height+1) if lerr.IsErrUnknownValidators(err) { // Ignore this error. return nil @@ -170,9 +170,9 @@ func (dv *DynamicVerifier) Verify(shdr types.SignedHeader) error { // Create filled FullCommit. nfc := FullCommit{ - SignedHeader: shdr, - Validators: trustedFC.NextValidators, - NextValidators: nextValset, + SignedHeader: shdr, + Voters: trustedFC.NextVoters, + NextVoters: nextValset, } // Validate the full commit. This checks the cryptographic // signatures of Commit against Validators. @@ -191,8 +191,8 @@ func (dv *DynamicVerifier) verifyAndSave(trustedFC, sourceFC FullCommit) error { if trustedFC.Height() >= sourceFC.Height() { panic("should not happen") } - err := trustedFC.NextValidators.VerifyFutureCommit( - sourceFC.Validators, + err := trustedFC.NextVoters.VerifyFutureCommit( + sourceFC.Voters, dv.chainID, sourceFC.SignedHeader.Commit.BlockID, sourceFC.SignedHeader.Height, sourceFC.SignedHeader.Commit, ) diff --git a/lite/dynamic_verifier_test.go b/lite/dynamic_verifier_test.go index 441010efb..424b7ea86 100644 --- a/lite/dynamic_verifier_test.go +++ b/lite/dynamic_verifier_test.go @@ -32,8 +32,8 @@ func TestInquirerValidPath(t *testing.T) { count := 50 fcz := make([]FullCommit, count) for i := 0; i < count; i++ { - vals := keys.ToValidators(vote, 0) - nextVals := nkeys.ToValidators(vote, 0) + vals := types.ToVoterAll(keys.ToValidators(vote, 0)) + nextVals := types.ToVoterAll(nkeys.ToValidators(vote, 0)) h := int64(1 + i) appHash := []byte(fmt.Sprintf("h=%d", h)) fcz[i] = keys.GenFullCommit( @@ -77,7 +77,7 @@ func TestDynamicVerify(t *testing.T) { trust := NewDBProvider("trust", dbm.NewMemDB()) source := NewDBProvider("source", dbm.NewMemDB()) - // 10 commits with one valset, 1 to change, + // 10 commits with one voterSet, 1 to change, // 10 commits with the next one n1, n2 := 10, 10 nCommits := n1 + n2 + 1 @@ -88,9 +88,9 @@ func TestDynamicVerify(t *testing.T) { chainID := "dynamic-verifier" power := int64(10) keys1 := genPrivKeys(5) - vals1 := keys1.ToValidators(power, 0) + vals1 := types.ToVoterAll(keys1.ToValidators(power, 0)) keys2 := genPrivKeys(5) - vals2 := keys2.ToValidators(power, 0) + vals2 := types.ToVoterAll(keys2.ToValidators(power, 0)) // make some commits with the first for i := 0; i < n1; i++ { @@ -126,7 +126,7 @@ func TestDynamicVerify(t *testing.T) { } -func makeFullCommit(height int64, keys privKeys, vals, nextVals *types.ValidatorSet, chainID string) FullCommit { +func makeFullCommit(height int64, keys privKeys, vals, nextVals *types.VoterSet, chainID string) FullCommit { height++ consHash := []byte("special-params") appHash := []byte(fmt.Sprintf("h=%d", height)) @@ -153,8 +153,8 @@ func TestInquirerVerifyHistorical(t *testing.T) { consHash := []byte("special-params") fcz := make([]FullCommit, count) for i := 0; i < count; i++ { - vals := keys.ToValidators(vote, 0) - nextVals := nkeys.ToValidators(vote, 0) + vals := types.ToVoterAll(keys.ToValidators(vote, 0)) + nextVals := types.ToVoterAll(nkeys.ToValidators(vote, 0)) h := int64(1 + i) appHash := []byte(fmt.Sprintf("h=%d", h)) resHash := []byte(fmt.Sprintf("res=%d", h)) @@ -236,8 +236,8 @@ func TestConcurrencyInquirerVerify(t *testing.T) { consHash := []byte("special-params") fcz := make([]FullCommit, count) for i := 0; i < count; i++ { - vals := keys.ToValidators(vote, 0) - nextVals := nkeys.ToValidators(vote, 0) + vals := types.ToVoterAll(keys.ToValidators(vote, 0)) + nextVals := types.ToVoterAll(nkeys.ToValidators(vote, 0)) h := int64(1 + i) appHash := []byte(fmt.Sprintf("h=%d", h)) resHash := []byte(fmt.Sprintf("res=%d", h)) diff --git a/lite/helpers.go b/lite/helpers.go index 29dd50b5b..5665d7250 100644 --- a/lite/helpers.go +++ b/lite/helpers.go @@ -56,7 +56,7 @@ func (pkz privKeys) ExtendSecp(n int) privKeys { return append(pkz, extra...) } -// ToValidators produces a valset from the set of keys. +// ToValidators produces a voterSet from the set of keys. // The first key has weight `init` and it increases by `inc` every step // so we can have all the same weight, or a simple linear distribution // (should be enough for testing). @@ -117,7 +117,7 @@ func makeVote(header *types.Header, valset *types.ValidatorSet, key crypto.PrivK } func genHeader(chainID string, height int64, txs types.Txs, - valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte) *types.Header { + valset, nextValset *types.VoterSet, appHash, consHash, resHash []byte) *types.Header { return &types.Header{ ChainID: chainID, @@ -125,18 +125,18 @@ func genHeader(chainID string, height int64, txs types.Txs, Time: tmtime.Now(), // LastBlockID // LastCommitHash - ValidatorsHash: valset.Hash(), - NextValidatorsHash: nextValset.Hash(), - DataHash: txs.Hash(), - AppHash: appHash, - ConsensusHash: consHash, - LastResultsHash: resHash, + VotersHash: valset.Hash(), + NextVotersHash: nextValset.Hash(), + DataHash: txs.Hash(), + AppHash: appHash, + ConsensusHash: consHash, + LastResultsHash: resHash, } } // GenSignedHeader calls genHeader and signHeader and combines them into a SignedHeader. func (pkz privKeys) GenSignedHeader(chainID string, height int64, txs types.Txs, - valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) types.SignedHeader { + valset, nextValset *types.VoterSet, appHash, consHash, resHash []byte, first, last int) types.SignedHeader { header := genHeader(chainID, height, txs, valset, nextValset, appHash, consHash, resHash) check := types.SignedHeader{ @@ -148,7 +148,7 @@ func (pkz privKeys) GenSignedHeader(chainID string, height int64, txs types.Txs, // GenFullCommit calls genHeader and signHeader and combines them into a FullCommit. func (pkz privKeys) GenFullCommit(chainID string, height int64, txs types.Txs, - valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) FullCommit { + valset, nextValset *types.VoterSet, appHash, consHash, resHash []byte, first, last int) FullCommit { header := genHeader(chainID, height, txs, valset, nextValset, appHash, consHash, resHash) commit := types.SignedHeader{ diff --git a/lite/multiprovider.go b/lite/multiprovider.go index 364647a40..704dc5e60 100644 --- a/lite/multiprovider.go +++ b/lite/multiprovider.go @@ -71,11 +71,11 @@ func (mc *multiProvider) LatestFullCommit(chainID string, minHeight, maxHeight i return } -// ValidatorSet returns validator set at height as provided by the first +// VoterSet returns validator set at height as provided by the first // provider which has it, or an error otherwise. -func (mc *multiProvider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { +func (mc *multiProvider) VoterSet(chainID string, height int64) (valset *types.VoterSet, err error) { for _, p := range mc.providers { - valset, err = p.ValidatorSet(chainID, height) + valset, err = p.VoterSet(chainID, height) if err == nil { // TODO Log unexpected types of errors. return valset, nil diff --git a/lite/provider.go b/lite/provider.go index ebab16264..571fe9d93 100644 --- a/lite/provider.go +++ b/lite/provider.go @@ -14,9 +14,9 @@ type Provider interface { // If maxHeight is zero, returns the latest where minHeight <= height. LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error) - // Get the valset that corresponds to chainID and height and return. + // Get the voterSet that corresponds to chainID and height and return. // Height must be >= 1. - ValidatorSet(chainID string, height int64) (*types.ValidatorSet, error) + VoterSet(chainID string, height int64) (*types.VoterSet, error) // Set a logger. SetLogger(logger log.Logger) diff --git a/lite/provider_test.go b/lite/provider_test.go index 98fff8cb4..ca79c8483 100644 --- a/lite/provider_test.go +++ b/lite/provider_test.go @@ -26,8 +26,8 @@ func (missingProvider) SaveFullCommit(FullCommit) error { return nil } func (missingProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error) { return FullCommit{}, lerr.ErrCommitNotFound() } -func (missingProvider) ValidatorSet(chainID string, height int64) (*types.ValidatorSet, error) { - return nil, errors.New("missing validator set") +func (missingProvider) VoterSet(chainID string, height int64) (*types.VoterSet, error) { + return nil, errors.New("missing voter set") } func (missingProvider) SetLogger(_ log.Logger) {} @@ -54,7 +54,7 @@ func checkProvider(t *testing.T, p PersistentProvider, chainID, app string) { // Make a bunch of full commits. fcz := make([]FullCommit, count) for i := 0; i < count; i++ { - vals := keys.ToValidators(10, int64(count/2)) + vals := types.ToVoterAll(keys.ToValidators(10, int64(count/2))) h := int64(20 + 10*i) fcz[i] = keys.GenFullCommit(chainID, h, nil, vals, vals, appHash, []byte("params"), []byte("results"), 0, 5) } @@ -72,8 +72,8 @@ func checkProvider(t *testing.T, p PersistentProvider, chainID, app string) { fc2, err := p.LatestFullCommit(chainID, fc.Height(), fc.Height()) assert.Nil(err) assert.Equal(fc.SignedHeader, fc2.SignedHeader) - assert.Equal(fc.Validators, fc2.Validators) - assert.Equal(fc.NextValidators, fc2.NextValidators) + assert.Equal(fc.Voters, fc2.Voters) + assert.Equal(fc.NextVoters, fc2.NextVoters) } // Make sure we get the last hash if we overstep. @@ -118,7 +118,7 @@ func TestMultiLatestFullCommit(t *testing.T) { // Set a bunch of full commits. for i := 0; i < count; i++ { - vals := keys.ToValidators(10, int64(count/2)) + vals := types.ToVoterAll(keys.ToValidators(10, int64(count/2))) h := int64(10 * (i + 1)) fc := keys.GenFullCommit(chainID, h, nil, vals, vals, appHash, []byte("params"), []byte("results"), 0, 5) err := p2.SaveFullCommit(fc) diff --git a/lite/proxy/proxy.go b/lite/proxy/proxy.go index 5fb51f0b3..53a28a3f1 100644 --- a/lite/proxy/proxy.go +++ b/lite/proxy/proxy.go @@ -132,9 +132,9 @@ func makeTxFunc(c rpcclient.Client) func(ctx *rpctypes.Context, hash []byte, pro func makeValidatorsFunc(c rpcclient.Client) func( ctx *rpctypes.Context, height *int64, -) (*ctypes.ResultValidators, error) { - return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultValidators, error) { - return c.Validators(height, 0, 0) +) (*ctypes.ResultVoters, error) { + return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultVoters, error) { + return c.Voters(height, 0, 0) } } diff --git a/lite/proxy/query_test.go b/lite/proxy/query_test.go index 34d9d1d4c..409704ef9 100644 --- a/lite/proxy/query_test.go +++ b/lite/proxy/query_test.go @@ -54,7 +54,7 @@ func _TestAppProofs(t *testing.T) { source := certclient.NewProvider(chainID, cl) seed, err := source.LatestFullCommit(chainID, 1, 1) require.NoError(err, "%#v", err) - cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators) + cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Voters) // Wait for tx confirmation. done := make(chan int64) @@ -139,7 +139,7 @@ func TestTxProofs(t *testing.T) { source := certclient.NewProvider(chainID, cl) seed, err := source.LatestFullCommit(chainID, brh-2, brh-2) require.NoError(err, "%#v", err) - cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators) + cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Voters) // First let's make sure a bogus transaction hash returns a valid non-existence proof. key := types.Tx([]byte("bogus")).Hash() diff --git a/lite/proxy/validate_test.go b/lite/proxy/validate_test.go index cf9a0de6b..a47741848 100644 --- a/lite/proxy/validate_test.go +++ b/lite/proxy/validate_test.go @@ -18,9 +18,9 @@ var ( ) var hdrHeight11 = types.Header{ - Height: 11, - Time: testTime1, - ValidatorsHash: []byte("Tendermint"), + Height: 11, + Time: testTime1, + VotersHash: []byte("Tendermint"), } func TestValidateBlock(t *testing.T) { @@ -143,8 +143,8 @@ func TestValidateBlockMeta(t *testing.T) { { meta: &types.BlockMeta{ Header: types.Header{ - Height: 11, - ValidatorsHash: []byte("lite-test"), + Height: 11, + VotersHash: []byte("lite-test"), // TODO: should be able to use empty time after Amino upgrade Time: testTime1, }, @@ -159,15 +159,15 @@ func TestValidateBlockMeta(t *testing.T) { meta: &types.BlockMeta{ Header: types.Header{ Height: 11, DataHash: deadBeefHash, - ValidatorsHash: []byte("Tendermint"), - Time: testTime1, + VotersHash: []byte("Tendermint"), + Time: testTime1, }, }, signedHeader: types.SignedHeader{ Header: &types.Header{ Height: 11, DataHash: deadBeefHash, - ValidatorsHash: []byte("Tendermint"), - Time: testTime2, + VotersHash: []byte("Tendermint"), + Time: testTime2, }, Commit: types.NewCommit(11, 0, types.BlockID{Hash: []byte("DEADBEEF")}, nil), }, @@ -178,15 +178,15 @@ func TestValidateBlockMeta(t *testing.T) { meta: &types.BlockMeta{ Header: types.Header{ Height: 11, DataHash: deadBeefHash, - ValidatorsHash: []byte("Tendermint"), - Time: testTime2, + VotersHash: []byte("Tendermint"), + Time: testTime2, }, }, signedHeader: types.SignedHeader{ Header: &types.Header{ Height: 11, DataHash: deadBeefHash, - ValidatorsHash: []byte("Tendermint-x"), - Time: testTime2, + VotersHash: []byte("Tendermint-x"), + Time: testTime2, }, Commit: types.NewCommit(11, 0, types.BlockID{Hash: []byte("DEADBEEF")}, nil), }, diff --git a/lite2/client.go b/lite2/client.go index b5ffca78c..83e64891e 100644 --- a/lite2/client.go +++ b/lite2/client.go @@ -113,7 +113,7 @@ type Client struct { // Highest trusted header from the store (height=H). latestTrustedHeader *types.SignedHeader // Highest validator set from the store (height=H). - latestTrustedVals *types.ValidatorSet + latestTrustedVals *types.VoterSet // See RemoveNoLongerTrustedHeadersPeriod option pruningSize uint16 @@ -239,7 +239,7 @@ func (c *Client) restoreTrustedHeaderAndVals() error { return errors.Wrap(err, "can't get last trusted header") } - trustedVals, err := c.trustedStore.ValidatorSet(lastHeight) + trustedVals, err := c.trustedStore.VoterSet(lastHeight) if err != nil { return errors.Wrap(err, "can't get last trusted validators") } @@ -360,9 +360,9 @@ func (c *Client) initializeWithTrustOptions(options TrustOptions) error { return err } - if !bytes.Equal(h.ValidatorsHash, vals.Hash()) { + if !bytes.Equal(h.VotersHash, vals.Hash()) { return errors.Errorf("expected header's validators (%X) to match those that were supplied (%X)", - h.ValidatorsHash, + h.VotersHash, vals.Hash(), ) } @@ -417,12 +417,12 @@ func (c *Client) TrustedHeader(height int64) (*types.SignedHeader, error) { // - header signed by that validator set has not been verified yet // // Safe for concurrent use by multiple goroutines. -func (c *Client) TrustedValidatorSet(height int64) (valSet *types.ValidatorSet, heightUsed int64, err error) { +func (c *Client) TrustedValidatorSet(height int64) (valSet *types.VoterSet, heightUsed int64, err error) { heightUsed, err = c.compareWithLatestHeight(height) if err != nil { return nil, heightUsed, err } - valSet, err = c.trustedStore.ValidatorSet(heightUsed) + valSet, err = c.trustedStore.VoterSet(heightUsed) if err != nil { return nil, heightUsed, err } @@ -522,10 +522,10 @@ func (c *Client) VerifyHeaderAtHeight(height int64, now time.Time) (*types.Signe // If the primary provides an invalid header (ErrInvalidHeader), it is rejected // and replaced by another provider until all are exhausted. // -// If, at any moment, SignedHeader or ValidatorSet are not found by the primary +// If, at any moment, SignedHeader or VoterSet are not found by the primary // provider, provider.ErrSignedHeaderNotFound / // provider.ErrValidatorSetNotFound error is returned. -func (c *Client) VerifyHeader(newHeader *types.SignedHeader, newVals *types.ValidatorSet, now time.Time) error { +func (c *Client) VerifyHeader(newHeader *types.SignedHeader, newVals *types.VoterSet, now time.Time) error { if newHeader.Height <= 0 { return errors.New("negative or zero height") } @@ -545,7 +545,7 @@ func (c *Client) VerifyHeader(newHeader *types.SignedHeader, newVals *types.Vali return c.verifyHeader(newHeader, newVals, now) } -func (c *Client) verifyHeader(newHeader *types.SignedHeader, newVals *types.ValidatorSet, now time.Time) error { +func (c *Client) verifyHeader(newHeader *types.SignedHeader, newVals *types.VoterSet, now time.Time) error { c.logger.Info("VerifyHeader", "height", newHeader.Height, "hash", hash2str(newHeader.Hash()), "vals", hash2str(newVals.Hash())) @@ -648,14 +648,14 @@ func (c *Client) cleanupAfter(height int64) error { func (c *Client) sequence( initiallyTrustedHeader *types.SignedHeader, newHeader *types.SignedHeader, - newVals *types.ValidatorSet, + newVals *types.VoterSet, now time.Time) error { var ( trustedHeader = initiallyTrustedHeader interimHeader *types.SignedHeader - interimVals *types.ValidatorSet + interimVals *types.VoterSet err error ) @@ -710,9 +710,9 @@ func (c *Client) sequence( // see VerifyHeader func (c *Client) bisection( initiallyTrustedHeader *types.SignedHeader, - initiallyTrustedVals *types.ValidatorSet, + initiallyTrustedVals *types.VoterSet, newHeader *types.SignedHeader, - newVals *types.ValidatorSet, + newVals *types.VoterSet, now time.Time) error { var ( @@ -769,9 +769,9 @@ func (c *Client) bisection( } } -func (c *Client) updateTrustedHeaderAndVals(h *types.SignedHeader, vals *types.ValidatorSet) error { - if !bytes.Equal(h.ValidatorsHash, vals.Hash()) { - return errors.Errorf("expected validator's hash %X, but got %X", h.ValidatorsHash, vals.Hash()) +func (c *Client) updateTrustedHeaderAndVals(h *types.SignedHeader, vals *types.VoterSet) error { + if !bytes.Equal(h.VotersHash, vals.Hash()) { + return errors.Errorf("expected validator's hash %X, but got %X", h.VotersHash, vals.Hash()) } if err := c.trustedStore.SaveSignedHeaderAndValidatorSet(h, vals); err != nil { @@ -794,7 +794,7 @@ func (c *Client) updateTrustedHeaderAndVals(h *types.SignedHeader, vals *types.V // fetch header and validators for the given height (0 - latest) from primary // provider. -func (c *Client) fetchHeaderAndValsAtHeight(height int64) (*types.SignedHeader, *types.ValidatorSet, error) { +func (c *Client) fetchHeaderAndValsAtHeight(height int64) (*types.SignedHeader, *types.VoterSet, error) { h, err := c.signedHeaderFromPrimary(height) if err != nil { return nil, nil, errors.Wrapf(err, "failed to obtain the header #%d", height) @@ -1002,13 +1002,13 @@ func (c *Client) signedHeaderFromPrimary(height int64) (*types.SignedHeader, err return c.signedHeaderFromPrimary(height) } -// validatorSetFromPrimary retrieves the ValidatorSet from the primary provider +// validatorSetFromPrimary retrieves the VoterSet from the primary provider // at the specified height. Handles dropout by the primary provider after 5 // attempts by replacing it with an alternative provider. -func (c *Client) validatorSetFromPrimary(height int64) (*types.ValidatorSet, error) { +func (c *Client) validatorSetFromPrimary(height int64) (*types.VoterSet, error) { for attempt := uint16(1); attempt <= c.maxRetryAttempts; attempt++ { c.providerMutex.Lock() - vals, err := c.primary.ValidatorSet(height) + vals, err := c.primary.VoterSet(height) c.providerMutex.Unlock() if err == nil || err == provider.ErrValidatorSetNotFound { return vals, err diff --git a/lite2/client_test.go b/lite2/client_test.go index 65ea55122..55fb24187 100644 --- a/lite2/client_test.go +++ b/lite2/client_test.go @@ -23,7 +23,7 @@ const ( var ( keys = genPrivKeys(4) - vals = keys.ToValidators(20, 10) + vals = types.ToVoterAll(keys.ToValidators(20, 10)) bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") h1 = keys.GenSignedHeader(chainID, 1, bTime, nil, vals, vals, []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)) @@ -39,7 +39,7 @@ var ( Height: 1, Hash: h1.Hash(), } - valSet = map[int64]*types.ValidatorSet{ + valSet = map[int64]*types.VoterSet{ 1: vals, 2: vals, 3: vals, @@ -62,12 +62,12 @@ var ( func TestClient_SequentialVerification(t *testing.T) { newKeys := genPrivKeys(4) - newVals := newKeys.ToValidators(10, 1) + newVals := types.ToVoterAll(newKeys.ToValidators(10, 1)) testCases := []struct { name string otherHeaders map[int64]*types.SignedHeader // all except ^ - vals map[int64]*types.ValidatorSet + vals map[int64]*types.VoterSet initErr bool verifyErr bool }{ @@ -85,7 +85,7 @@ func TestClient_SequentialVerification(t *testing.T) { 1: keys.GenSignedHeader(chainID, 1, bTime.Add(1*time.Hour), nil, vals, vals, []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)), }, - map[int64]*types.ValidatorSet{ + map[int64]*types.VoterSet{ 1: vals, }, true, @@ -126,7 +126,7 @@ func TestClient_SequentialVerification(t *testing.T) { { "bad: different validator set at height 3", headerSet, - map[int64]*types.ValidatorSet{ + map[int64]*types.VoterSet{ 1: vals, 2: vals, 3: newVals, @@ -176,16 +176,16 @@ func TestClient_SequentialVerification(t *testing.T) { func TestClient_SkippingVerification(t *testing.T) { // required for 2nd test case newKeys := genPrivKeys(4) - newVals := newKeys.ToValidators(10, 1) + newVals := types.ToVoterAll(newKeys.ToValidators(10, 1)) // 1/3+ of vals, 2/3- of newVals transitKeys := keys.Extend(3) - transitVals := transitKeys.ToValidators(10, 1) + transitVals := types.ToVoterAll(transitKeys.ToValidators(10, 1)) testCases := []struct { name string otherHeaders map[int64]*types.SignedHeader // all except ^ - vals map[int64]*types.ValidatorSet + vals map[int64]*types.VoterSet initErr bool verifyErr bool }{ @@ -209,7 +209,7 @@ func TestClient_SkippingVerification(t *testing.T) { 3: transitKeys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, transitVals, transitVals, []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(transitKeys)), }, - map[int64]*types.ValidatorSet{ + map[int64]*types.VoterSet{ 1: vals, 2: vals, 3: transitVals, @@ -229,7 +229,7 @@ func TestClient_SkippingVerification(t *testing.T) { 3: newKeys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, newVals, newVals, []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(newKeys)), }, - map[int64]*types.ValidatorSet{ + map[int64]*types.VoterSet{ 1: vals, 2: vals, 3: newVals, @@ -249,7 +249,7 @@ func TestClient_SkippingVerification(t *testing.T) { 3: newKeys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, newVals, newVals, []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(newKeys)), }, - map[int64]*types.ValidatorSet{ + map[int64]*types.VoterSet{ 1: vals, 2: vals, 3: newVals, @@ -348,7 +348,7 @@ func TestClientRestoresTrustedHeaderAfterStartup1(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, valSet) if assert.NotNil(t, valSet) { - assert.Equal(t, h.ValidatorsHash.Bytes(), valSet.Hash()) + assert.Equal(t, h.VotersHash.Bytes(), valSet.Hash()) } } @@ -395,7 +395,7 @@ func TestClientRestoresTrustedHeaderAfterStartup1(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, valSet) if assert.NotNil(t, valSet) { - assert.Equal(t, h.ValidatorsHash.Bytes(), valSet.Hash()) + assert.Equal(t, h.VotersHash.Bytes(), valSet.Hash()) } } } @@ -432,7 +432,7 @@ func TestClientRestoresTrustedHeaderAfterStartup2(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, valSet) if assert.NotNil(t, valSet) { - assert.Equal(t, h.ValidatorsHash.Bytes(), valSet.Hash()) + assert.Equal(t, h.VotersHash.Bytes(), valSet.Hash()) } } @@ -517,7 +517,7 @@ func TestClientRestoresTrustedHeaderAfterStartup3(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, valSet) if assert.NotNil(t, valSet) { - assert.Equal(t, h.ValidatorsHash.Bytes(), valSet.Hash()) + assert.Equal(t, h.VotersHash.Bytes(), valSet.Hash()) } // Check we no longer have 2nd header (+header2+). @@ -578,7 +578,7 @@ func TestClientRestoresTrustedHeaderAfterStartup3(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, valSet) if assert.NotNil(t, valSet) { - assert.Equal(t, h.ValidatorsHash.Bytes(), valSet.Hash()) + assert.Equal(t, h.VotersHash.Bytes(), valSet.Hash()) } // Check we no longer have invalid 2nd header (+header2+). @@ -613,7 +613,7 @@ func TestClient_Update(t *testing.T) { valSet, _, err := c.TrustedValidatorSet(3) assert.NoError(t, err) if assert.NotNil(t, valSet) { - assert.Equal(t, h.ValidatorsHash.Bytes(), valSet.Hash()) + assert.Equal(t, h.VotersHash.Bytes(), valSet.Hash()) } } @@ -810,7 +810,7 @@ func TestClient_NewClientFromTrustedStore(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, valSet) if assert.NotNil(t, valSet) { - assert.Equal(t, h.ValidatorsHash.Bytes(), valSet.Hash()) + assert.Equal(t, h.VotersHash.Bytes(), valSet.Hash()) } } @@ -839,7 +839,7 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { []byte("app_hash2"), []byte("cons_hash"), []byte("results_hash"), len(keys), len(keys), types.BlockID{Hash: h1.Hash()}), }, - map[int64]*types.ValidatorSet{ + map[int64]*types.VoterSet{ 1: vals, 2: vals, }, @@ -852,7 +852,7 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { 2: h2, 3: {Header: nil, Commit: nil}, }, - map[int64]*types.ValidatorSet{ + map[int64]*types.VoterSet{ 1: vals, 2: vals, }, diff --git a/lite2/provider/http/http.go b/lite2/provider/http/http.go index 130bf0a24..536591258 100644 --- a/lite2/provider/http/http.go +++ b/lite2/provider/http/http.go @@ -81,16 +81,16 @@ func (p *http) SignedHeader(height int64) (*types.SignedHeader, error) { return &commit.SignedHeader, nil } -// ValidatorSet fetches a ValidatorSet at the given height. Multiple HTTP +// VoterSet fetches a VoterSet at the given height. Multiple HTTP // requests might be required if the validator set size is over 100. -func (p *http) ValidatorSet(height int64) (*types.ValidatorSet, error) { +func (p *http) VoterSet(height int64) (*types.VoterSet, error) { h, err := validateHeight(height) if err != nil { return nil, err } const maxPerPage = 100 - res, err := p.client.Validators(h, 0, maxPerPage) + res, err := p.client.Voters(h, 0, maxPerPage) if err != nil { // TODO: standartise errors on the RPC side if strings.Contains(err.Error(), "height must be less than or equal") { @@ -100,23 +100,23 @@ func (p *http) ValidatorSet(height int64) (*types.ValidatorSet, error) { } var ( - vals = res.Validators + vals = res.Voters page = 1 ) // Check if there are more validators. - for len(res.Validators) == maxPerPage { - res, err = p.client.Validators(h, page, maxPerPage) + for len(res.Voters) == maxPerPage { + res, err = p.client.Voters(h, page, maxPerPage) if err != nil { return nil, err } - if len(res.Validators) > 0 { - vals = append(vals, res.Validators...) + if len(res.Voters) > 0 { + vals = append(vals, res.Voters...) } page++ } - return types.NewValidatorSet(vals), nil + return types.NewVoterSet(vals), nil } func validateHeight(height int64) (*int64, error) { diff --git a/lite2/provider/mock/deadmock.go b/lite2/provider/mock/deadmock.go index 77c474411..55bfa44b9 100644 --- a/lite2/provider/mock/deadmock.go +++ b/lite2/provider/mock/deadmock.go @@ -28,6 +28,6 @@ func (p *deadMock) SignedHeader(height int64) (*types.SignedHeader, error) { return nil, errors.New("no response from provider") } -func (p *deadMock) ValidatorSet(height int64) (*types.ValidatorSet, error) { +func (p *deadMock) VoterSet(height int64) (*types.VoterSet, error) { return nil, errors.New("no response from provider") } diff --git a/lite2/provider/mock/mock.go b/lite2/provider/mock/mock.go index 7ff7bc9a1..4ffc9c37f 100644 --- a/lite2/provider/mock/mock.go +++ b/lite2/provider/mock/mock.go @@ -11,12 +11,12 @@ import ( type mock struct { chainID string headers map[int64]*types.SignedHeader - vals map[int64]*types.ValidatorSet + vals map[int64]*types.VoterSet } // New creates a mock provider with the given set of headers and validator // sets. -func New(chainID string, headers map[int64]*types.SignedHeader, vals map[int64]*types.ValidatorSet) provider.Provider { +func New(chainID string, headers map[int64]*types.SignedHeader, vals map[int64]*types.VoterSet) provider.Provider { return &mock{ chainID: chainID, headers: headers, @@ -53,7 +53,7 @@ func (p *mock) SignedHeader(height int64) (*types.SignedHeader, error) { return nil, provider.ErrSignedHeaderNotFound } -func (p *mock) ValidatorSet(height int64) (*types.ValidatorSet, error) { +func (p *mock) VoterSet(height int64) (*types.VoterSet, error) { if height == 0 && len(p.vals) > 0 { return p.vals[int64(len(p.vals))], nil } diff --git a/lite2/provider/provider.go b/lite2/provider/provider.go index 773e17e32..ba4d0cdad 100644 --- a/lite2/provider/provider.go +++ b/lite2/provider/provider.go @@ -22,14 +22,14 @@ type Provider interface { // error is returned. SignedHeader(height int64) (*types.SignedHeader, error) - // ValidatorSet returns the ValidatorSet that corresponds to height. + // VoterSet returns the VoterSet that corresponds to height. // // 0 - the latest. // height must be >= 0. // - // If the provider fails to fetch the ValidatorSet due to the IO or other + // If the provider fails to fetch the VoterSet due to the IO or other // issues, an error will be returned. - // If there's no ValidatorSet for the given height, ErrValidatorSetNotFound + // If there's no VoterSet for the given height, ErrValidatorSetNotFound // error is returned. - ValidatorSet(height int64) (*types.ValidatorSet, error) + VoterSet(height int64) (*types.VoterSet, error) } diff --git a/lite2/proxy/routes.go b/lite2/proxy/routes.go index f7d5cd25b..45c9ad41c 100644 --- a/lite2/proxy/routes.go +++ b/lite2/proxy/routes.go @@ -132,10 +132,10 @@ func makeTxSearchFunc(c *lrpc.Client) rpcTxSearchFunc { } type rpcValidatorsFunc func(ctx *rpctypes.Context, height *int64, - page, perPage int) (*ctypes.ResultValidators, error) + page, perPage int) (*ctypes.ResultVoters, error) func makeValidatorsFunc(c *lrpc.Client) rpcValidatorsFunc { - return func(ctx *rpctypes.Context, height *int64, page, perPage int) (*ctypes.ResultValidators, error) { + return func(ctx *rpctypes.Context, height *int64, page, perPage int) (*ctypes.ResultVoters, error) { return c.Validators(height, page, perPage) } } diff --git a/lite2/rpc/client.go b/lite2/rpc/client.go index abd15adc2..8c18cd709 100644 --- a/lite2/rpc/client.go +++ b/lite2/rpc/client.go @@ -300,8 +300,12 @@ func (c *Client) TxSearch(query string, prove bool, page, perPage int, orderBy s return c.next.TxSearch(query, prove, page, perPage, orderBy) } -func (c *Client) Validators(height *int64, page, perPage int) (*ctypes.ResultValidators, error) { - return c.next.Validators(height, page, perPage) +func (c *Client) Validators(height *int64, page, perPage int) (*ctypes.ResultVoters, error) { + return c.next.Voters(height, page, perPage) +} + +func (c *Client) Voters(height *int64, page, perPage int) (*ctypes.ResultVoters, error) { + return c.next.Voters(height, page, perPage) } func (c *Client) BroadcastEvidence(ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { diff --git a/lite2/store/db/db.go b/lite2/store/db/db.go index d405b9865..67da3c691 100644 --- a/lite2/store/db/db.go +++ b/lite2/store/db/db.go @@ -47,11 +47,11 @@ func New(db dbm.DB, prefix string) store.Store { return &dbs{db: db, prefix: prefix, cdc: cdc, size: size} } -// SaveSignedHeaderAndValidatorSet persists SignedHeader and ValidatorSet to +// SaveSignedHeaderAndValidatorSet persists SignedHeader and VoterSet to // the db. // // Safe for concurrent use by multiple goroutines. -func (s *dbs) SaveSignedHeaderAndValidatorSet(sh *types.SignedHeader, valSet *types.ValidatorSet) error { +func (s *dbs) SaveSignedHeaderAndValidatorSet(sh *types.SignedHeader, valSet *types.VoterSet) error { if sh.Height <= 0 { panic("negative or zero height") } @@ -84,7 +84,7 @@ func (s *dbs) SaveSignedHeaderAndValidatorSet(sh *types.SignedHeader, valSet *ty return err } -// DeleteSignedHeaderAndValidatorSet deletes SignedHeader and ValidatorSet from +// DeleteSignedHeaderAndValidatorSet deletes SignedHeader and VoterSet from // the db. // // Safe for concurrent use by multiple goroutines. @@ -132,10 +132,10 @@ func (s *dbs) SignedHeader(height int64) (*types.SignedHeader, error) { return signedHeader, err } -// ValidatorSet loads ValidatorSet at the given height. +// VoterSet loads VoterSet at the given height. // // Safe for concurrent use by multiple goroutines. -func (s *dbs) ValidatorSet(height int64) (*types.ValidatorSet, error) { +func (s *dbs) VoterSet(height int64) (*types.VoterSet, error) { if height <= 0 { panic("negative or zero height") } @@ -148,7 +148,7 @@ func (s *dbs) ValidatorSet(height int64) (*types.ValidatorSet, error) { return nil, store.ErrValidatorSetNotFound } - var valSet *types.ValidatorSet + var valSet *types.VoterSet err = s.cdc.UnmarshalBinaryLengthPrefixed(bz, &valSet) return valSet, err } diff --git a/lite2/store/db/db_test.go b/lite2/store/db/db_test.go index 2b82de8f3..2a25800c3 100644 --- a/lite2/store/db/db_test.go +++ b/lite2/store/db/db_test.go @@ -26,7 +26,7 @@ func TestLast_FirstSignedHeaderHeight(t *testing.T) { // 1 key err = dbStore.SaveSignedHeaderAndValidatorSet( - &types.SignedHeader{Header: &types.Header{Height: 1}}, &types.ValidatorSet{}) + &types.SignedHeader{Header: &types.Header{Height: 1}}, &types.VoterSet{}) require.NoError(t, err) height, err = dbStore.LastSignedHeaderHeight() @@ -46,20 +46,20 @@ func Test_SaveSignedHeaderAndValidatorSet(t *testing.T) { require.Error(t, err) assert.Nil(t, h) - valSet, err := dbStore.ValidatorSet(1) + valSet, err := dbStore.VoterSet(1) require.Error(t, err) assert.Nil(t, valSet) // 1 key err = dbStore.SaveSignedHeaderAndValidatorSet( - &types.SignedHeader{Header: &types.Header{Height: 1}}, &types.ValidatorSet{}) + &types.SignedHeader{Header: &types.Header{Height: 1}}, &types.VoterSet{}) require.NoError(t, err) h, err = dbStore.SignedHeader(1) require.NoError(t, err) assert.NotNil(t, h) - valSet, err = dbStore.ValidatorSet(1) + valSet, err = dbStore.VoterSet(1) require.NoError(t, err) assert.NotNil(t, valSet) @@ -71,7 +71,7 @@ func Test_SaveSignedHeaderAndValidatorSet(t *testing.T) { require.Error(t, err) assert.Nil(t, h) - valSet, err = dbStore.ValidatorSet(1) + valSet, err = dbStore.VoterSet(1) require.Error(t, err) assert.Nil(t, valSet) } @@ -85,7 +85,7 @@ func Test_SignedHeaderAfter(t *testing.T) { }) err := dbStore.SaveSignedHeaderAndValidatorSet( - &types.SignedHeader{Header: &types.Header{Height: 2}}, &types.ValidatorSet{}) + &types.SignedHeader{Header: &types.Header{Height: 2}}, &types.VoterSet{}) require.NoError(t, err) h, err := dbStore.SignedHeaderAfter(1) @@ -105,7 +105,7 @@ func Test_Prune(t *testing.T) { // One header err = dbStore.SaveSignedHeaderAndValidatorSet( - &types.SignedHeader{Header: &types.Header{Height: 2}}, &types.ValidatorSet{}) + &types.SignedHeader{Header: &types.Header{Height: 2}}, &types.VoterSet{}) require.NoError(t, err) assert.EqualValues(t, 1, dbStore.Size()) @@ -121,7 +121,7 @@ func Test_Prune(t *testing.T) { // Multiple headers for i := 1; i <= 10; i++ { err = dbStore.SaveSignedHeaderAndValidatorSet( - &types.SignedHeader{Header: &types.Header{Height: int64(i)}}, &types.ValidatorSet{}) + &types.SignedHeader{Header: &types.Header{Height: int64(i)}}, &types.VoterSet{}) require.NoError(t, err) } @@ -144,10 +144,10 @@ func Test_Concurrency(t *testing.T) { defer wg.Done() dbStore.SaveSignedHeaderAndValidatorSet( - &types.SignedHeader{Header: &types.Header{Height: i}}, &types.ValidatorSet{}) + &types.SignedHeader{Header: &types.Header{Height: i}}, &types.VoterSet{}) dbStore.SignedHeader(i) - dbStore.ValidatorSet(i) + dbStore.VoterSet(i) dbStore.LastSignedHeaderHeight() dbStore.FirstSignedHeaderHeight() diff --git a/lite2/store/store.go b/lite2/store/store.go index 7ea6b9c6b..af6821051 100644 --- a/lite2/store/store.go +++ b/lite2/store/store.go @@ -5,13 +5,13 @@ import "github.com/tendermint/tendermint/types" // Store is anything that can persistenly store headers. type Store interface { // SaveSignedHeaderAndValidatorSet saves a SignedHeader (h: sh.Height) and a - // ValidatorSet (h: sh.Height). + // VoterSet (h: sh.Height). // // height must be > 0. - SaveSignedHeaderAndValidatorSet(sh *types.SignedHeader, valSet *types.ValidatorSet) error + SaveSignedHeaderAndValidatorSet(sh *types.SignedHeader, valSet *types.VoterSet) error // DeleteSignedHeaderAndValidatorSet deletes SignedHeader (h: height) and - // ValidatorSet (h: height). + // VoterSet (h: height). // // height must be > 0. DeleteSignedHeaderAndValidatorSet(height int64) error @@ -24,12 +24,12 @@ type Store interface { // If SignedHeader is not found, ErrSignedHeaderNotFound is returned. SignedHeader(height int64) (*types.SignedHeader, error) - // ValidatorSet returns the ValidatorSet that corresponds to height. + // VoterSet returns the VoterSet that corresponds to height. // // height must be > 0. // - // If ValidatorSet is not found, ErrValidatorSetNotFound is returned. - ValidatorSet(height int64) (*types.ValidatorSet, error) + // If VoterSet is not found, ErrValidatorSetNotFound is returned. + VoterSet(height int64) (*types.VoterSet, error) // LastSignedHeaderHeight returns the last (newest) SignedHeader height. // diff --git a/lite2/test_helpers.go b/lite2/test_helpers.go index cc1bf4eb9..9ea261f50 100644 --- a/lite2/test_helpers.go +++ b/lite2/test_helpers.go @@ -120,7 +120,7 @@ func makeVote(header *types.Header, valset *types.ValidatorSet, } func genHeader(chainID string, height int64, bTime time.Time, txs types.Txs, - valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte) *types.Header { + valset, nextValset *types.VoterSet, appHash, consHash, resHash []byte) *types.Header { return &types.Header{ ChainID: chainID, @@ -128,18 +128,18 @@ func genHeader(chainID string, height int64, bTime time.Time, txs types.Txs, Time: bTime, // LastBlockID // LastCommitHash - ValidatorsHash: valset.Hash(), - NextValidatorsHash: nextValset.Hash(), - DataHash: txs.Hash(), - AppHash: appHash, - ConsensusHash: consHash, - LastResultsHash: resHash, + VotersHash: valset.Hash(), + NextVotersHash: nextValset.Hash(), + DataHash: txs.Hash(), + AppHash: appHash, + ConsensusHash: consHash, + LastResultsHash: resHash, } } // GenSignedHeader calls genHeader and signHeader and combines them into a SignedHeader. func (pkz privKeys) GenSignedHeader(chainID string, height int64, bTime time.Time, txs types.Txs, - valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) *types.SignedHeader { + valset, nextValset *types.VoterSet, appHash, consHash, resHash []byte, first, last int) *types.SignedHeader { header := genHeader(chainID, height, bTime, txs, valset, nextValset, appHash, consHash, resHash) return &types.SignedHeader{ @@ -150,7 +150,7 @@ func (pkz privKeys) GenSignedHeader(chainID string, height int64, bTime time.Tim // GenSignedHeaderLastBlockID calls genHeader and signHeader and combines them into a SignedHeader. func (pkz privKeys) GenSignedHeaderLastBlockID(chainID string, height int64, bTime time.Time, txs types.Txs, - valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int, + valset, nextValset *types.VoterSet, appHash, consHash, resHash []byte, first, last int, lastBlockID types.BlockID) *types.SignedHeader { header := genHeader(chainID, height, bTime, txs, valset, nextValset, appHash, consHash, resHash) diff --git a/lite2/verifier.go b/lite2/verifier.go index 6d8459ab6..b5c8c2721 100644 --- a/lite2/verifier.go +++ b/lite2/verifier.go @@ -32,10 +32,10 @@ var ( // e) headers are non-adjacent. func VerifyNonAdjacent( chainID string, - trustedHeader *types.SignedHeader, // height=X - trustedVals *types.ValidatorSet, // height=X or height=X+1 - untrustedHeader *types.SignedHeader, // height=Y - untrustedVals *types.ValidatorSet, // height=Y + trustedHeader *types.SignedHeader, + trustedVals *types.VoterSet, + untrustedHeader *types.SignedHeader, + untrustedVals *types.VoterSet, trustingPeriod time.Duration, now time.Time, trustLevel tmmath.Fraction) error { @@ -88,9 +88,9 @@ func VerifyNonAdjacent( // e) headers are adjacent. func VerifyAdjacent( chainID string, - trustedHeader *types.SignedHeader, // height=X - untrustedHeader *types.SignedHeader, // height=X+1 - untrustedVals *types.ValidatorSet, // height=X+1 + trustedHeader *types.SignedHeader, + untrustedHeader *types.SignedHeader, + untrustedVals *types.VoterSet, trustingPeriod time.Duration, now time.Time) error { @@ -107,10 +107,10 @@ func VerifyAdjacent( } // Check the validator hashes are the same - if !bytes.Equal(untrustedHeader.ValidatorsHash, trustedHeader.NextValidatorsHash) { + if !bytes.Equal(untrustedHeader.VotersHash, trustedHeader.NextVotersHash) { err := errors.Errorf("expected old header next validators (%X) to match those from new header (%X)", - trustedHeader.NextValidatorsHash, - untrustedHeader.ValidatorsHash, + trustedHeader.NextVotersHash, + untrustedHeader.VotersHash, ) return err } @@ -127,10 +127,10 @@ func VerifyAdjacent( // Verify combines both VerifyAdjacent and VerifyNonAdjacent functions. func Verify( chainID string, - trustedHeader *types.SignedHeader, // height=X - trustedVals *types.ValidatorSet, // height=X or height=X+1 - untrustedHeader *types.SignedHeader, // height=Y - untrustedVals *types.ValidatorSet, // height=Y + trustedHeader *types.SignedHeader, + trustedVals *types.VoterSet, + untrustedHeader *types.SignedHeader, + untrustedVals *types.VoterSet, trustingPeriod time.Duration, now time.Time, trustLevel tmmath.Fraction) error { @@ -146,7 +146,7 @@ func Verify( func verifyNewHeaderAndVals( chainID string, untrustedHeader *types.SignedHeader, - untrustedVals *types.ValidatorSet, + untrustedVals *types.VoterSet, trustedHeader *types.SignedHeader, now time.Time) error { @@ -173,9 +173,9 @@ func verifyNewHeaderAndVals( maxClockDrift) } - if !bytes.Equal(untrustedHeader.ValidatorsHash, untrustedVals.Hash()) { - return errors.Errorf("expected new header validators (%X) to match those that were supplied (%X)", - untrustedHeader.ValidatorsHash, + if !bytes.Equal(untrustedHeader.VotersHash, untrustedVals.Hash()) { + return errors.Errorf("expected new header voters (%X) to match those that were supplied (%X)", + untrustedHeader.VotersHash, untrustedVals.Hash(), ) } diff --git a/lite2/verifier_test.go b/lite2/verifier_test.go index adc671516..383896010 100644 --- a/lite2/verifier_test.go +++ b/lite2/verifier_test.go @@ -21,7 +21,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { var ( keys = genPrivKeys(4) // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! - vals = keys.ToValidators(20, 10) + vals = types.ToVoterAll(keys.ToValidators(20, 10)) bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") header = keys.GenSignedHeader(chainID, lastHeight, bTime, nil, vals, vals, []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)) @@ -29,7 +29,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { testCases := []struct { newHeader *types.SignedHeader - newVals *types.ValidatorSet + newVals *types.VoterSet trustingPeriod time.Duration now time.Time expErr error @@ -118,9 +118,10 @@ func TestVerifyAdjacentHeaders(t *testing.T) { }, // vals does not match with what we have -> error 8: { - keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, keys.ToValidators(10, 1), vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)), - keys.ToValidators(10, 1), + keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, + types.ToVoterAll(keys.ToValidators(10, 1)), vals, []byte("app_hash"), []byte("cons_hash"), + []byte("results_hash"), 0, len(keys)), + types.ToVoterAll(keys.ToValidators(10, 1)), 3 * time.Hour, bTime.Add(2 * time.Hour), nil, @@ -130,7 +131,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { 9: { keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)), - keys.ToValidators(10, 1), + types.ToVoterAll(keys.ToValidators(10, 1)), 3 * time.Hour, bTime.Add(2 * time.Hour), nil, @@ -140,7 +141,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { 10: { keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)), - keys.ToValidators(10, 1), + types.ToVoterAll(keys.ToValidators(10, 1)), 1 * time.Hour, bTime.Add(1 * time.Hour), nil, @@ -174,27 +175,27 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { var ( keys = genPrivKeys(4) // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! - vals = keys.ToValidators(20, 10) + vals = types.ToVoterAll(keys.ToValidators(20, 10)) bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") header = keys.GenSignedHeader(chainID, lastHeight, bTime, nil, vals, vals, []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)) // 30, 40, 50 twoThirds = keys[1:] - twoThirdsVals = twoThirds.ToValidators(30, 10) + twoThirdsVals = types.ToVoterAll(twoThirds.ToValidators(30, 10)) // 50 oneThird = keys[len(keys)-1:] - oneThirdVals = oneThird.ToValidators(50, 10) + oneThirdVals = types.ToVoterAll(oneThird.ToValidators(50, 10)) // 20 lessThanOneThird = keys[0:1] - lessThanOneThirdVals = lessThanOneThird.ToValidators(20, 10) + lessThanOneThirdVals = types.ToVoterAll(lessThanOneThird.ToValidators(20, 10)) ) testCases := []struct { newHeader *types.SignedHeader - newVals *types.ValidatorSet + newVals *types.VoterSet trustingPeriod time.Duration now time.Time expErr error @@ -289,7 +290,7 @@ func TestVerifyReturnsErrorIfTrustLevelIsInvalid(t *testing.T) { var ( keys = genPrivKeys(4) // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! - vals = keys.ToValidators(20, 10) + vals = types.ToVoterAll(keys.ToValidators(20, 10)) bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") header = keys.GenSignedHeader(chainID, lastHeight, bTime, nil, vals, vals, []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)) diff --git a/node/node_test.go b/node/node_test.go index 4a65849a1..ba7178fdc 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -355,7 +355,7 @@ func state(nVals int, height int64) (sm.State, dbm.DB, types.PrivValidator) { for i := 1; i < int(height); i++ { s.LastBlockHeight++ - s.LastValidators = s.Validators.Copy() + s.LastVoters = s.Voters.Copy() sm.SaveState(stateDB, s) } return s, stateDB, privVal diff --git a/rpc/client/httpclient.go b/rpc/client/httpclient.go index 98875c91e..b1a0b53b1 100644 --- a/rpc/client/httpclient.go +++ b/rpc/client/httpclient.go @@ -381,15 +381,15 @@ func (c *baseRPCClient) TxSearch(query string, prove bool, page, perPage int, or return result, nil } -func (c *baseRPCClient) Validators(height *int64, page, perPage int) (*ctypes.ResultValidators, error) { - result := new(ctypes.ResultValidators) - _, err := c.caller.Call("validators", map[string]interface{}{ +func (c *baseRPCClient) Voters(height *int64, page, perPage int) (*ctypes.ResultVoters, error) { + result := new(ctypes.ResultVoters) + _, err := c.caller.Call("voters", map[string]interface{}{ "height": height, "page": page, "per_page": perPage, }, result) if err != nil { - return nil, errors.Wrap(err, "Validators") + return nil, errors.Wrap(err, "Voters") } return result, nil } diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 408d803c8..a443b6026 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -67,7 +67,7 @@ type SignClient interface { Block(height *int64) (*ctypes.ResultBlock, error) BlockResults(height *int64) (*ctypes.ResultBlockResults, error) Commit(height *int64) (*ctypes.ResultCommit, error) - Validators(height *int64, page, perPage int) (*ctypes.ResultValidators, error) + Voters(height *int64, page, perPage int) (*ctypes.ResultVoters, error) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) TxSearch(query string, prove bool, page, perPage int, orderBy string) (*ctypes.ResultTxSearch, error) } diff --git a/rpc/client/localclient.go b/rpc/client/localclient.go index e6b0eb937..c430ce00e 100644 --- a/rpc/client/localclient.go +++ b/rpc/client/localclient.go @@ -152,8 +152,8 @@ func (c *Local) Commit(height *int64) (*ctypes.ResultCommit, error) { return core.Commit(c.ctx, height) } -func (c *Local) Validators(height *int64, page, perPage int) (*ctypes.ResultValidators, error) { - return core.Validators(c.ctx, height, page, perPage) +func (c *Local) Voters(height *int64, page, perPage int) (*ctypes.ResultVoters, error) { + return core.Voters(c.ctx, height, page, perPage) } func (c *Local) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index 869d7b3e9..6fc895a23 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -154,8 +154,8 @@ func (c Client) Commit(height *int64) (*ctypes.ResultCommit, error) { return core.Commit(&rpctypes.Context{}, height) } -func (c Client) Validators(height *int64, page, perPage int) (*ctypes.ResultValidators, error) { - return core.Validators(&rpctypes.Context{}, height, page, perPage) +func (c Client) Voters(height *int64, page, perPage int) (*ctypes.ResultVoters, error) { + return core.Voters(&rpctypes.Context{}, height, page, perPage) } func (c Client) BroadcastEvidence(ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index 5e83675e3..6b506fbb5 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -171,10 +171,10 @@ func TestGenesisAndValidators(t *testing.T) { gval := gen.Genesis.Validators[0] // get the current validators - vals, err := c.Validators(nil, 0, 0) + vals, err := c.Voters(nil, 0, 0) require.Nil(t, err, "%d: %+v", i, err) - require.Equal(t, 1, len(vals.Validators)) - val := vals.Validators[0] + require.Equal(t, 1, len(vals.Voters)) + val := vals.Voters[0] // make sure the current set is also the genesis set assert.Equal(t, gval.Power, val.VotingPower) diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index a2a619ea5..bc843e89d 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -7,14 +7,20 @@ import ( rpctypes "github.com/tendermint/tendermint/rpc/lib/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" + dbm "github.com/tendermint/tm-db" ) // Validators gets the validator set at the given block height. // If no height is provided, it will fetch the current validator set. -// Note the validators are sorted by their address - this is the canonical -// order for the validators in the set as used in computing their Merkle root. +// Note the voters are sorted by their address - this is the canonical +// order for the voters in the set as used in computing their Merkle root. // More: https://docs.tendermint.com/master/rpc/#/Info/validators -func Validators(ctx *rpctypes.Context, heightPtr *int64, page, perPage int) (*ctypes.ResultValidators, error) { +func Voters(ctx *rpctypes.Context, heightPtr *int64, page, perPage int) (*ctypes.ResultVoters, error) { + return voters(ctx, heightPtr, page, perPage, sm.LoadValidators) +} + +func voters(ctx *rpctypes.Context, heightPtr *int64, page, perPage int, + loadFunc func(db dbm.DB, height int64) (*types.ValidatorSet, *types.VoterSet, error)) (*ctypes.ResultVoters, error) { // The latest validator that we know is the // NextValidator of the last block. height := consensusState.GetState().LastBlockHeight + 1 @@ -23,12 +29,12 @@ func Validators(ctx *rpctypes.Context, heightPtr *int64, page, perPage int) (*ct return nil, err } - validators, err := sm.LoadValidators(stateDB, height) + _, voters, err := loadFunc(stateDB, height) if err != nil { return nil, err } - totalCount := len(validators.Validators) + totalCount := len(voters.Voters) perPage = validatePerPage(perPage) page, err = validatePage(page, perPage, totalCount) if err != nil { @@ -37,11 +43,11 @@ func Validators(ctx *rpctypes.Context, heightPtr *int64, page, perPage int) (*ct skipCount := validateSkipCount(page, perPage) - v := validators.Validators[skipCount : skipCount+tmmath.MinInt(perPage, totalCount-skipCount)] + v := voters.Voters[skipCount : skipCount+tmmath.MinInt(perPage, totalCount-skipCount)] - return &ctypes.ResultValidators{ + return &ctypes.ResultVoters{ BlockHeight: height, - Validators: v}, nil + Voters: v}, nil } // DumpConsensusState dumps consensus state. diff --git a/rpc/core/doc.go b/rpc/core/doc.go index a51a02982..c1971b855 100644 --- a/rpc/core/doc.go +++ b/rpc/core/doc.go @@ -93,7 +93,7 @@ Available endpoints: /unconfirmed_txs /unsafe_flush_mempool /unsafe_stop_cpu_profiler -/validators +/voters Endpoints that require arguments: /abci_query?path=_&data=_&prove=_ diff --git a/rpc/core/routes.go b/rpc/core/routes.go index aa0403f87..bc7b9b8c1 100644 --- a/rpc/core/routes.go +++ b/rpc/core/routes.go @@ -25,7 +25,7 @@ var Routes = map[string]*rpc.RPCFunc{ "commit": rpc.NewRPCFunc(Commit, "height"), "tx": rpc.NewRPCFunc(Tx, "hash,prove"), "tx_search": rpc.NewRPCFunc(TxSearch, "query,prove,page,per_page,order_by"), - "validators": rpc.NewRPCFunc(Validators, "height,page,per_page"), + "voters": rpc.NewRPCFunc(Voters, "height,page,per_page"), "dump_consensus_state": rpc.NewRPCFunc(DumpConsensusState, ""), "consensus_state": rpc.NewRPCFunc(ConsensusState, ""), "consensus_params": rpc.NewRPCFunc(ConsensusParams, "height"), diff --git a/rpc/core/status.go b/rpc/core/status.go index e6438009a..9d4fb201e 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -66,6 +66,7 @@ func validatorAtHeight(h int64) *types.Validator { privValAddress := pubKey.Address() // If we're still at height h, search in the current validator set. + // ValidatorOrVoter: validator lastBlockHeight, vals := consensusState.GetValidators() if lastBlockHeight == h { for _, val := range vals { @@ -77,7 +78,8 @@ func validatorAtHeight(h int64) *types.Validator { // If we've moved to the next height, retrieve the validator set from DB. if lastBlockHeight > h { - vals, err := sm.LoadValidators(stateDB, h) + // ValidatorOrVoter: validator + vals, _, err := sm.LoadValidators(stateDB, h) if err != nil { return nil // should not happen } diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index 9aee485e9..5d6201a25 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -117,9 +117,9 @@ type Peer struct { } // Validators for a height -type ResultValidators struct { +type ResultVoters struct { BlockHeight int64 `json:"block_height"` - Validators []*types.Validator `json:"validators"` + Voters []*types.Validator `json:"voters"` } // ConsensusParams for given height diff --git a/rpc/grpc/types.pb.go b/rpc/grpc/types.pb.go index f7fdf6b53..f164316d5 100644 --- a/rpc/grpc/types.pb.go +++ b/rpc/grpc/types.pb.go @@ -1,24 +1,24 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: rpc/grpc/types.proto -package coregrpc +package coregrpc // import "github.com/tendermint/tendermint/rpc/grpc" + +import proto "github.com/gogo/protobuf/proto" +import golang_proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import types "github.com/tendermint/tendermint/abci/types" + +import bytes "bytes" import ( - bytes "bytes" - context "context" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - golang_proto "github.com/golang/protobuf/proto" - types "github.com/tendermint/tendermint/abci/types" + context "golang.org/x/net/context" grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" ) +import io "io" + // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = golang_proto.Marshal @@ -29,7 +29,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type RequestPing struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -41,7 +41,7 @@ func (m *RequestPing) Reset() { *m = RequestPing{} } func (m *RequestPing) String() string { return proto.CompactTextString(m) } func (*RequestPing) ProtoMessage() {} func (*RequestPing) Descriptor() ([]byte, []int) { - return fileDescriptor_15f63baabf91876a, []int{0} + return fileDescriptor_types_56fbaabb1671b357, []int{0} } func (m *RequestPing) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -51,15 +51,15 @@ func (m *RequestPing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_RequestPing.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *RequestPing) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestPing.Merge(m, src) +func (dst *RequestPing) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestPing.Merge(dst, src) } func (m *RequestPing) XXX_Size() int { return m.Size() @@ -81,7 +81,7 @@ func (m *RequestBroadcastTx) Reset() { *m = RequestBroadcastTx{} } func (m *RequestBroadcastTx) String() string { return proto.CompactTextString(m) } func (*RequestBroadcastTx) ProtoMessage() {} func (*RequestBroadcastTx) Descriptor() ([]byte, []int) { - return fileDescriptor_15f63baabf91876a, []int{1} + return fileDescriptor_types_56fbaabb1671b357, []int{1} } func (m *RequestBroadcastTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -91,15 +91,15 @@ func (m *RequestBroadcastTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, return xxx_messageInfo_RequestBroadcastTx.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *RequestBroadcastTx) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestBroadcastTx.Merge(m, src) +func (dst *RequestBroadcastTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestBroadcastTx.Merge(dst, src) } func (m *RequestBroadcastTx) XXX_Size() int { return m.Size() @@ -127,7 +127,7 @@ func (m *ResponsePing) Reset() { *m = ResponsePing{} } func (m *ResponsePing) String() string { return proto.CompactTextString(m) } func (*ResponsePing) ProtoMessage() {} func (*ResponsePing) Descriptor() ([]byte, []int) { - return fileDescriptor_15f63baabf91876a, []int{2} + return fileDescriptor_types_56fbaabb1671b357, []int{2} } func (m *ResponsePing) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -137,15 +137,15 @@ func (m *ResponsePing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_ResponsePing.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *ResponsePing) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponsePing.Merge(m, src) +func (dst *ResponsePing) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponsePing.Merge(dst, src) } func (m *ResponsePing) XXX_Size() int { return m.Size() @@ -157,8 +157,8 @@ func (m *ResponsePing) XXX_DiscardUnknown() { var xxx_messageInfo_ResponsePing proto.InternalMessageInfo type ResponseBroadcastTx struct { - CheckTx *types.ResponseCheckTx `protobuf:"bytes,1,opt,name=check_tx,json=checkTx,proto3" json:"check_tx,omitempty"` - DeliverTx *types.ResponseDeliverTx `protobuf:"bytes,2,opt,name=deliver_tx,json=deliverTx,proto3" json:"deliver_tx,omitempty"` + CheckTx *types.ResponseCheckTx `protobuf:"bytes,1,opt,name=check_tx,json=checkTx" json:"check_tx,omitempty"` + DeliverTx *types.ResponseDeliverTx `protobuf:"bytes,2,opt,name=deliver_tx,json=deliverTx" json:"deliver_tx,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -168,7 +168,7 @@ func (m *ResponseBroadcastTx) Reset() { *m = ResponseBroadcastTx{} } func (m *ResponseBroadcastTx) String() string { return proto.CompactTextString(m) } func (*ResponseBroadcastTx) ProtoMessage() {} func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { - return fileDescriptor_15f63baabf91876a, []int{3} + return fileDescriptor_types_56fbaabb1671b357, []int{3} } func (m *ResponseBroadcastTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -178,15 +178,15 @@ func (m *ResponseBroadcastTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, return xxx_messageInfo_ResponseBroadcastTx.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *ResponseBroadcastTx) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseBroadcastTx.Merge(m, src) +func (dst *ResponseBroadcastTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseBroadcastTx.Merge(dst, src) } func (m *ResponseBroadcastTx) XXX_Size() int { return m.Size() @@ -221,36 +221,6 @@ func init() { proto.RegisterType((*ResponseBroadcastTx)(nil), "tendermint.rpc.grpc.ResponseBroadcastTx") golang_proto.RegisterType((*ResponseBroadcastTx)(nil), "tendermint.rpc.grpc.ResponseBroadcastTx") } - -func init() { proto.RegisterFile("rpc/grpc/types.proto", fileDescriptor_15f63baabf91876a) } -func init() { golang_proto.RegisterFile("rpc/grpc/types.proto", fileDescriptor_15f63baabf91876a) } - -var fileDescriptor_15f63baabf91876a = []byte{ - // 344 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xb1, 0x4e, 0xf3, 0x30, - 0x14, 0x85, 0xe5, 0xea, 0xd7, 0x0f, 0xdc, 0x96, 0x0e, 0x2e, 0x42, 0x28, 0x83, 0x55, 0x2a, 0x54, - 0x3a, 0x39, 0x52, 0x19, 0x99, 0x5a, 0x90, 0x10, 0x62, 0xa9, 0xa2, 0x4e, 0x2c, 0x25, 0x75, 0xac, - 0x34, 0x82, 0xc6, 0xc6, 0x71, 0x51, 0xfa, 0x38, 0x6c, 0x3c, 0x02, 0x0b, 0x12, 0x23, 0x23, 0x8f, - 0x00, 0xe1, 0x25, 0x18, 0x91, 0x93, 0x86, 0x78, 0x80, 0xb2, 0x44, 0x27, 0xd6, 0x39, 0x9f, 0xce, - 0xbd, 0xba, 0xb0, 0xa3, 0x24, 0x73, 0x43, 0xf3, 0xd1, 0x4b, 0xc9, 0x13, 0x2a, 0x95, 0xd0, 0x02, - 0xb7, 0x34, 0x8f, 0x03, 0xae, 0xe6, 0x51, 0xac, 0xa9, 0x92, 0x8c, 0x1a, 0x83, 0xd3, 0xd5, 0xb3, - 0x48, 0x05, 0x13, 0xe9, 0x2b, 0xbd, 0x74, 0x73, 0x9f, 0x1b, 0x8a, 0x50, 0x54, 0xaa, 0x08, 0x3b, - 0xbb, 0xfe, 0x94, 0x45, 0x05, 0xce, 0x86, 0x76, 0xb6, 0xa1, 0xee, 0xf1, 0xdb, 0x05, 0x4f, 0xf4, - 0x28, 0x8a, 0xc3, 0xce, 0x01, 0xe0, 0xd5, 0xef, 0x50, 0x09, 0x3f, 0x60, 0x7e, 0xa2, 0xc7, 0x29, - 0x6e, 0x42, 0x4d, 0xa7, 0x7b, 0xa8, 0x8d, 0x7a, 0x0d, 0xaf, 0xa6, 0xd3, 0x4e, 0x13, 0x1a, 0x1e, - 0x4f, 0xa4, 0x88, 0x13, 0x9e, 0xa7, 0xee, 0x11, 0xb4, 0xca, 0x07, 0x3b, 0x37, 0x80, 0x4d, 0x36, - 0xe3, 0xec, 0x7a, 0xb2, 0x4a, 0xd7, 0xfb, 0x5d, 0x6a, 0x0d, 0x61, 0x2a, 0xd1, 0xa2, 0x4c, 0x99, - 0x3e, 0x31, 0xf6, 0x71, 0xea, 0x6d, 0xb0, 0x42, 0xe0, 0x33, 0x80, 0x80, 0xdf, 0x44, 0x77, 0x5c, - 0x19, 0x48, 0x2d, 0x87, 0xf4, 0xfe, 0x80, 0x9c, 0x16, 0x81, 0x71, 0xea, 0x6d, 0x05, 0xa5, 0xec, - 0x3f, 0x21, 0x68, 0x7c, 0x77, 0x1b, 0x8c, 0xce, 0xf1, 0x05, 0xfc, 0x33, 0xe5, 0x71, 0x9b, 0xfe, - 0xb0, 0x57, 0x6a, 0x2d, 0xc5, 0xd9, 0xff, 0xc5, 0x51, 0x6d, 0x00, 0x5f, 0x41, 0xdd, 0x1e, 0xfc, - 0x70, 0x1d, 0xd3, 0x32, 0x3a, 0xbd, 0xb5, 0x68, 0xcb, 0x39, 0x1c, 0x7d, 0xbe, 0x13, 0xf4, 0x90, - 0x11, 0xf4, 0x98, 0x11, 0xf4, 0x92, 0x11, 0xf4, 0x9a, 0x11, 0xf4, 0x96, 0x11, 0xf4, 0xfc, 0x41, - 0xd0, 0x65, 0x3f, 0x8c, 0xf4, 0x6c, 0x31, 0xa5, 0x4c, 0xcc, 0xdd, 0x8a, 0x68, 0xcb, 0xf2, 0xa4, - 0x8e, 0x99, 0x50, 0xdc, 0x88, 0xe9, 0xff, 0xfc, 0x02, 0x8e, 0xbe, 0x02, 0x00, 0x00, 0xff, 0xff, - 0x30, 0xfd, 0xaa, 0xac, 0x6e, 0x02, 0x00, 0x00, -} - func (this *RequestPing) Equal(that interface{}) bool { if that == nil { return this == nil @@ -405,17 +375,6 @@ type BroadcastAPIServer interface { BroadcastTx(context.Context, *RequestBroadcastTx) (*ResponseBroadcastTx, error) } -// UnimplementedBroadcastAPIServer can be embedded to have forward compatible implementations. -type UnimplementedBroadcastAPIServer struct { -} - -func (*UnimplementedBroadcastAPIServer) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { - return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") -} -func (*UnimplementedBroadcastAPIServer) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { - return nil, status.Errorf(codes.Unimplemented, "method BroadcastTx not implemented") -} - func RegisterBroadcastAPIServer(s *grpc.Server, srv BroadcastAPIServer) { s.RegisterService(&_BroadcastAPI_serviceDesc, srv) } @@ -476,7 +435,7 @@ var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ func (m *RequestPing) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -484,26 +443,20 @@ func (m *RequestPing) Marshal() (dAtA []byte, err error) { } func (m *RequestPing) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestPing) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *RequestBroadcastTx) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -511,33 +464,26 @@ func (m *RequestBroadcastTx) Marshal() (dAtA []byte, err error) { } func (m *RequestBroadcastTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Tx) > 0 { - i -= len(m.Tx) - copy(dAtA[i:], m.Tx) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) + i += copy(dAtA[i:], m.Tx) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *ResponsePing) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -545,26 +491,20 @@ func (m *ResponsePing) Marshal() (dAtA []byte, err error) { } func (m *ResponsePing) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponsePing) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *ResponseBroadcastTx) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -572,56 +512,44 @@ func (m *ResponseBroadcastTx) Marshal() (dAtA []byte, err error) { } func (m *ResponseBroadcastTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.CheckTx != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.CheckTx.Size())) + n1, err := m.CheckTx.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 } if m.DeliverTx != nil { - { - size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 - } - if m.CheckTx != nil { - { - size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.DeliverTx.Size())) + n2, err := m.DeliverTx.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n2 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { - offset -= sovTypes(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func NewPopulatedRequestPing(r randyTypes, easy bool) *RequestPing { this := &RequestPing{} @@ -654,10 +582,10 @@ func NewPopulatedResponsePing(r randyTypes, easy bool) *ResponsePing { func NewPopulatedResponseBroadcastTx(r randyTypes, easy bool) *ResponseBroadcastTx { this := &ResponseBroadcastTx{} - if r.Intn(5) != 0 { + if r.Intn(10) != 0 { this.CheckTx = types.NewPopulatedResponseCheckTx(r, easy) } - if r.Intn(5) != 0 { + if r.Intn(10) != 0 { this.DeliverTx = types.NewPopulatedResponseDeliverTx(r, easy) } if !easy && r.Intn(10) != 0 { @@ -799,7 +727,14 @@ func (m *ResponseBroadcastTx) Size() (n int) { } func sovTypes(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozTypes(x uint64) (n int) { return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -819,7 +754,7 @@ func (m *RequestPing) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -842,9 +777,6 @@ func (m *RequestPing) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -873,7 +805,7 @@ func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -901,7 +833,7 @@ func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -910,9 +842,6 @@ func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -930,9 +859,6 @@ func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -961,7 +887,7 @@ func (m *ResponsePing) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -984,9 +910,6 @@ func (m *ResponsePing) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1015,7 +938,7 @@ func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1043,7 +966,7 @@ func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1052,9 +975,6 @@ func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1079,7 +999,7 @@ func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1088,9 +1008,6 @@ func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { return ErrInvalidLengthTypes } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1110,9 +1027,6 @@ func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1129,7 +1043,6 @@ func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { func skipTypes(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1161,8 +1074,10 @@ func skipTypes(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1179,34 +1094,82 @@ func skipTypes(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthTypes } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTypes + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTypes(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthTypes - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") ) + +func init() { proto.RegisterFile("rpc/grpc/types.proto", fileDescriptor_types_56fbaabb1671b357) } +func init() { golang_proto.RegisterFile("rpc/grpc/types.proto", fileDescriptor_types_56fbaabb1671b357) } + +var fileDescriptor_types_56fbaabb1671b357 = []byte{ + // 344 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xb1, 0x4e, 0xf3, 0x30, + 0x14, 0x85, 0xe5, 0xea, 0xd7, 0x0f, 0xdc, 0x96, 0x0e, 0x2e, 0x42, 0x28, 0x83, 0x55, 0x2a, 0x54, + 0x3a, 0x39, 0x52, 0x19, 0x99, 0x5a, 0x90, 0x10, 0x62, 0xa9, 0xa2, 0x4e, 0x2c, 0x25, 0x75, 0xac, + 0x34, 0x82, 0xc6, 0xc6, 0x71, 0x51, 0xfa, 0x38, 0x6c, 0x3c, 0x02, 0x0b, 0x12, 0x23, 0x23, 0x8f, + 0x00, 0xe1, 0x25, 0x18, 0x91, 0x93, 0x86, 0x78, 0x80, 0xb2, 0x44, 0x27, 0xd6, 0x39, 0x9f, 0xce, + 0xbd, 0xba, 0xb0, 0xa3, 0x24, 0x73, 0x43, 0xf3, 0xd1, 0x4b, 0xc9, 0x13, 0x2a, 0x95, 0xd0, 0x02, + 0xb7, 0x34, 0x8f, 0x03, 0xae, 0xe6, 0x51, 0xac, 0xa9, 0x92, 0x8c, 0x1a, 0x83, 0xd3, 0xd5, 0xb3, + 0x48, 0x05, 0x13, 0xe9, 0x2b, 0xbd, 0x74, 0x73, 0x9f, 0x1b, 0x8a, 0x50, 0x54, 0xaa, 0x08, 0x3b, + 0xbb, 0xfe, 0x94, 0x45, 0x05, 0xce, 0x86, 0x76, 0xb6, 0xa1, 0xee, 0xf1, 0xdb, 0x05, 0x4f, 0xf4, + 0x28, 0x8a, 0xc3, 0xce, 0x01, 0xe0, 0xd5, 0xef, 0x50, 0x09, 0x3f, 0x60, 0x7e, 0xa2, 0xc7, 0x29, + 0x6e, 0x42, 0x4d, 0xa7, 0x7b, 0xa8, 0x8d, 0x7a, 0x0d, 0xaf, 0xa6, 0xd3, 0x4e, 0x13, 0x1a, 0x1e, + 0x4f, 0xa4, 0x88, 0x13, 0x9e, 0xa7, 0xee, 0x11, 0xb4, 0xca, 0x07, 0x3b, 0x37, 0x80, 0x4d, 0x36, + 0xe3, 0xec, 0x7a, 0xb2, 0x4a, 0xd7, 0xfb, 0x5d, 0x6a, 0x0d, 0x61, 0x2a, 0xd1, 0xa2, 0x4c, 0x99, + 0x3e, 0x31, 0xf6, 0x71, 0xea, 0x6d, 0xb0, 0x42, 0xe0, 0x33, 0x80, 0x80, 0xdf, 0x44, 0x77, 0x5c, + 0x19, 0x48, 0x2d, 0x87, 0xf4, 0xfe, 0x80, 0x9c, 0x16, 0x81, 0x71, 0xea, 0x6d, 0x05, 0xa5, 0xec, + 0x3f, 0x21, 0x68, 0x7c, 0x77, 0x1b, 0x8c, 0xce, 0xf1, 0x05, 0xfc, 0x33, 0xe5, 0x71, 0x9b, 0xfe, + 0xb0, 0x57, 0x6a, 0x2d, 0xc5, 0xd9, 0xff, 0xc5, 0x51, 0x6d, 0x00, 0x5f, 0x41, 0xdd, 0x1e, 0xfc, + 0x70, 0x1d, 0xd3, 0x32, 0x3a, 0xbd, 0xb5, 0x68, 0xcb, 0x39, 0x1c, 0x7d, 0xbe, 0x13, 0xf4, 0x90, + 0x11, 0xf4, 0x98, 0x11, 0xf4, 0x92, 0x11, 0xf4, 0x9a, 0x11, 0xf4, 0x96, 0x11, 0xf4, 0xfc, 0x41, + 0xd0, 0x65, 0x3f, 0x8c, 0xf4, 0x6c, 0x31, 0xa5, 0x4c, 0xcc, 0xdd, 0x8a, 0x68, 0xcb, 0xf2, 0xa4, + 0x8e, 0x99, 0x50, 0xdc, 0x88, 0xe9, 0xff, 0xfc, 0x02, 0x8e, 0xbe, 0x02, 0x00, 0x00, 0xff, 0xff, + 0x30, 0xfd, 0xaa, 0xac, 0x6e, 0x02, 0x00, 0x00, +} diff --git a/rpc/grpc/typespb_test.go b/rpc/grpc/typespb_test.go index d0a6c8654..58100e13b 100644 --- a/rpc/grpc/typespb_test.go +++ b/rpc/grpc/typespb_test.go @@ -1,21 +1,19 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: rpc/grpc/types.proto -package coregrpc +package coregrpc // import "github.com/tendermint/tendermint/rpc/grpc" -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb" - github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" - proto "github.com/gogo/protobuf/proto" - golang_proto "github.com/golang/protobuf/proto" - _ "github.com/tendermint/tendermint/abci/types" - math "math" - math_rand "math/rand" - testing "testing" - time "time" -) +import testing "testing" +import math_rand "math/rand" +import time "time" +import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" +import github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb" +import proto "github.com/gogo/protobuf/proto" +import golang_proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/tendermint/tendermint/abci/types" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/rpc/swagger/swagger.yaml b/rpc/swagger/swagger.yaml index 6906bdcb3..855cf2e1b 100644 --- a/rpc/swagger/swagger.yaml +++ b/rpc/swagger/swagger.yaml @@ -637,10 +637,10 @@ paths: application/json: schema: $ref: "#/components/schemas/ErrorResponse" - /validators: + /voters: get: - summary: Get validator set at a specified height - operationId: validators + summary: Get voter set at a specified height + operationId: voters parameters: - in: query name: height @@ -668,14 +668,14 @@ paths: tags: - Info description: | - Get Validators. + Get Voters. responses: 200: description: Commit results. content: application/json: schema: - $ref: "#/components/schemas/ValidatorsResponse" + $ref: "#/components/schemas/VotersResponse" 500: description: Error content: @@ -1297,8 +1297,8 @@ components: - "last_block_id" - "last_commit_hash" - "data_hash" - - "validators_hash" - - "next_validators_hash" + - "voters_hash" + - "next_voters_hash" - "consensus_hash" - "app_hash" - "last_results_hash" @@ -1334,10 +1334,10 @@ components: data_hash: type: string example: "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73" - validators_hash: + voters_hash: type: string example: "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0" - next_validators_hash: + next_voters_hash: type: string example: "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0" consensus_hash: @@ -1727,8 +1727,8 @@ components: - "last_block_id" - "last_commit_hash" - "data_hash" - - "validators_hash" - - "next_validators_hash" + - "voters_hash" + - "next_voters_hash" - "consensus_hash" - "app_hash" - "last_results_hash" @@ -1783,10 +1783,10 @@ components: data_hash: type: "string" example: "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73" - validators_hash: + voters_hash: type: "string" example: "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0" - next_validators_hash: + next_voters_hash: type: "string" example: "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0" consensus_hash: @@ -1862,7 +1862,7 @@ components: type: "boolean" example: true type: "object" - ValidatorsResponse: + VotersResponse: type: object required: - "jsonrpc" @@ -1878,12 +1878,12 @@ components: result: required: - "block_height" - - "validators" + - "voters" properties: block_height: type: "string" example: "55" - validators: + voters: type: "array" items: type: "object" @@ -2047,7 +2047,7 @@ components: - "step" - "start_time" - "commit_time" - - "validators" + - "voters" - "proposer" - "proposal" - "proposal_block" @@ -2061,7 +2061,7 @@ components: - "votes" - "commit_round" - "last_commit" - - "last_validators" + - "last_voters" - "triggered_timeout_precommit" properties: height: @@ -2079,11 +2079,11 @@ components: commit_time: type: "string" example: "2019-08-05T11:28:44.064658805Z" - validators: + voters: required: - - "validators" + - "voters" properties: - validators: + voters: type: "array" items: type: "object" @@ -2197,11 +2197,11 @@ components: properties: {} type: "object" type: "object" - last_validators: + last_voters: required: - - "validators" + - "voters" properties: - validators: + voters: type: "array" items: type: "object" diff --git a/state/execution.go b/state/execution.go index 170beaa7f..49cfbaed3 100644 --- a/state/execution.go +++ b/state/execution.go @@ -105,7 +105,7 @@ func (blockExec *BlockExecutor) CreateProposalBlock( evidence := blockExec.evpool.PendingEvidence(maxNumEvidence) // Fetch a limited amount of valid txs - maxDataBytes := types.MaxDataBytes(maxBytes, state.Validators.Size(), len(evidence)) + maxDataBytes := types.MaxDataBytes(maxBytes, state.Voters.Size(), len(evidence)) txs := blockExec.mempool.ReapMaxBytesMaxGas(maxDataBytes, maxGas) return state.MakeBlock(height, txs, commit, evidence, proposerAddr, round, proof) @@ -319,7 +319,7 @@ func getBeginBlockValidatorInfo(block *types.Block, stateDB dbm.DB) (abci.LastCo // Remember that the first LastCommit is intentionally empty, so it makes // sense for LastCommitInfo.Votes to also be empty. if block.Height > 1 { - lastValSet, err := LoadValidators(stateDB, block.Height-1) + _, lastVoterSet, err := LoadValidators(stateDB, block.Height-1) if err != nil { panic(err) } @@ -327,15 +327,16 @@ func getBeginBlockValidatorInfo(block *types.Block, stateDB dbm.DB) (abci.LastCo // Sanity check that commit size matches validator set size - only applies // after first block. var ( - commitSize = block.LastCommit.Size() - valSetLen = len(lastValSet.Validators) + commitSize = block.LastCommit.Size() + voterSetLen = lastVoterSet.Size() ) - if commitSize != valSetLen { - panic(fmt.Sprintf("commit size (%d) doesn't match valset length (%d) at height %d\n\n%v\n\n%v", - commitSize, valSetLen, block.Height, block.LastCommit.Signatures, lastValSet.Validators)) + + if commitSize != voterSetLen { + panic(fmt.Sprintf("commit size (%d) doesn't match voterset length (%d) at height %d\n\n%v\n\n%v", + commitSize, voterSetLen, block.Height, block.LastCommit.Signatures, lastVoterSet.Voters)) } - for i, val := range lastValSet.Validators { + for i, val := range lastVoterSet.Voters { commitSig := block.LastCommit.Signatures[i] voteInfos[i] = abci.VoteInfo{ Validator: types.TM2PB.Validator(val), @@ -349,11 +350,11 @@ func getBeginBlockValidatorInfo(block *types.Block, stateDB dbm.DB) (abci.LastCo // We need the validator set. We already did this in validateBlock. // TODO: Should we instead cache the valset in the evidence itself and add // `SetValidatorSet()` and `ToABCI` methods ? - valset, err := LoadValidators(stateDB, ev.Height()) + _, voterSet, err := LoadValidators(stateDB, ev.Height()) if err != nil { panic(err) } - byzVals[i] = types.TM2PB.Evidence(ev, valset, block.Time) + byzVals[i] = types.TM2PB.Evidence(ev, voterSet, block.Time) } return abci.LastCommitInfo{ @@ -393,7 +394,7 @@ func updateState( ) (State, error) { // Copy the valset so we can apply changes from EndBlock - // and update s.LastValidators and s.Validators. + // and update s.LastVoters and s.Validators. nValSet := state.NextValidators.Copy() // Update the validator set with the latest abciResponses. @@ -433,6 +434,8 @@ func updateState( return state, fmt.Errorf("error get proof of hash: %v", err) } + nextVoters := types.SelectVoter(nValSet, proofHash) + // NOTE: the AppHash has not been populated. // It will be filled on state.Save. return State{ @@ -443,8 +446,10 @@ func updateState( LastBlockTime: header.Time, LastProofHash: proofHash, NextValidators: nValSet, + NextVoters: nextVoters, Validators: state.NextValidators.Copy(), - LastValidators: state.Validators.Copy(), + Voters: state.NextVoters.Copy(), + LastVoters: state.Voters.Copy(), LastHeightValidatorsChanged: lastHeightValsChanged, ConsensusParams: nextParams, LastHeightConsensusParamsChanged: lastHeightParamsChanged, diff --git a/state/execution_test.go b/state/execution_test.go index b9d6ab1bb..9f8bfd504 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -88,7 +88,7 @@ func TestBeginBlockValidators(t *testing.T) { for _, tc := range testCases { lastCommit := types.NewCommit(1, 0, prevBlockID, tc.lastCommitSigs) - proposer := types.SelectProposer(state.Validators, state.LastProofHash, 1, 0) + proposer := state.Validators.SelectProposer(state.LastProofHash, 1, 0) message := state.MakeHashMessage(0) proof, _ := privVals[proposer.Address.String()].GenerateVRFProof(message) @@ -134,7 +134,7 @@ func TestBeginBlockByzantineValidators(t *testing.T) { ev2 := types.NewMockEvidence(height2, time.Now(), idx2, val2) now := tmtime.Now() - valSet := state.Validators + valSet := state.Voters testCases := []struct { desc string evidence []types.Evidence @@ -161,7 +161,7 @@ func TestBeginBlockByzantineValidators(t *testing.T) { lastCommit := types.NewCommit(9, 0, prevBlockID, commitSigs) for _, tc := range testCases { message := state.MakeHashMessage(0) - proposer := types.SelectProposer(state.Validators, state.LastProofHash, 1, 0) + proposer := state.Validators.SelectProposer(state.LastProofHash, 1, 0) proof, _ := privVals[proposer.Address.String()].GenerateVRFProof(message) block, _ := state.MakeBlock(10, makeTxs(2), lastCommit, nil, proposer.Address, 0, proof) block.Time = now diff --git a/state/export_test.go b/state/export_test.go index 1f3990bbd..1170721da 100644 --- a/state/export_test.go +++ b/state/export_test.go @@ -45,6 +45,6 @@ func SaveConsensusParamsInfo(db dbm.DB, nextHeight, changeHeight int64, params t // SaveValidatorsInfo is an alias for the private saveValidatorsInfo method in // store.go, exported exclusively and explicitly for testing. -func SaveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *types.ValidatorSet) { - saveValidatorsInfo(db, height, lastHeightChanged, valSet) +func SaveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, proofHash []byte, valSet *types.ValidatorSet) { + saveValidatorsInfo(db, height, lastHeightChanged, proofHash, valSet) } diff --git a/state/helpers_test.go b/state/helpers_test.go index 8bfe803c5..e38038954 100644 --- a/state/helpers_test.go +++ b/state/helpers_test.go @@ -46,7 +46,7 @@ func makeAndCommitGoodBlock( evidence []types.Evidence) (sm.State, types.BlockID, *types.Commit, error) { // A good block passes state, blockID, err := makeAndApplyGoodBlock(state, - privVals[types.SelectProposer(state.Validators, state.LastProofHash, height, 0).Address.String()], + privVals[state.Validators.SelectProposer(state.LastProofHash, height, 0).Address.String()], height, lastCommit, proposerAddr, blockExec, evidence) if err != nil { return state, types.BlockID{}, nil, err @@ -134,7 +134,7 @@ func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValida for i := 1; i < height; i++ { s.LastBlockHeight++ - s.LastValidators = s.Validators.Copy() + s.LastVoters = s.Voters.Copy() sm.SaveState(stateDB, s) } return s, stateDB, privVals diff --git a/state/state.go b/state/state.go index e1e748419..d4315c40f 100644 --- a/state/state.go +++ b/state/state.go @@ -63,7 +63,7 @@ type State struct { // vrf hash from proof LastProofHash []byte - // LastValidators is used to validate block.LastCommit. + // LastVoters is used to validate block.LastCommit. // Validators are persisted to the database separately every time they change, // so we can query for historical validator sets. // Note that if s.LastBlockHeight causes a valset change, @@ -71,7 +71,9 @@ type State struct { // Extra +1 due to nextValSet delay. NextValidators *types.ValidatorSet Validators *types.ValidatorSet - LastValidators *types.ValidatorSet + NextVoters *types.VoterSet + Voters *types.VoterSet + LastVoters *types.VoterSet LastHeightValidatorsChanged int64 // Consensus parameters used for validating blocks. @@ -103,8 +105,10 @@ func (state State) Copy() State { LastProofHash: state.LastProofHash, NextValidators: state.NextValidators.Copy(), + NextVoters: state.NextVoters.Copy(), Validators: state.Validators.Copy(), - LastValidators: state.LastValidators.Copy(), + Voters: state.Voters.Copy(), + LastVoters: state.LastVoters.Copy(), LastHeightValidatorsChanged: state.LastHeightValidatorsChanged, ConsensusParams: state.ConsensusParams, @@ -156,14 +160,14 @@ func (state State) MakeBlock( if height == 1 { timestamp = state.LastBlockTime // genesis time } else { - timestamp = MedianTime(commit, state.LastValidators) + timestamp = MedianTime(commit, state.LastVoters) } // Fill rest of header with state data. block.Header.Populate( state.Version.Consensus, state.ChainID, timestamp, state.LastBlockID, - state.Validators.Hash(), state.NextValidators.Hash(), + state.Voters.Hash(), state.NextVoters.Hash(), state.ConsensusParams.Hash(), state.AppHash, state.LastResultsHash, proposerAddress, round, @@ -177,7 +181,7 @@ func (state State) MakeBlock( // corresponding validator set. The computed time is always between timestamps of // the votes sent by honest processes, i.e., a faulty processes can not arbitrarily increase or decrease the // computed value. -func MedianTime(commit *types.Commit, validators *types.ValidatorSet) time.Time { +func MedianTime(commit *types.Commit, voters *types.VoterSet) time.Time { weightedTimes := make([]*tmtime.WeightedTime, len(commit.Signatures)) totalVotingPower := int64(0) @@ -185,7 +189,7 @@ func MedianTime(commit *types.Commit, validators *types.ValidatorSet) time.Time if commitSig.Absent() { continue } - _, validator := validators.GetByAddress(commitSig.ValidatorAddress) + _, validator := voters.GetByAddress(commitSig.ValidatorAddress) // If there's no condition, TestValidateBlockCommit panics; not needed normally. if validator != nil { totalVotingPower += validator.VotingPower @@ -256,8 +260,10 @@ func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { LastProofHash: genDoc.Hash(), NextValidators: nextValidatorSet, + NextVoters: types.SelectVoter(nextValidatorSet, genDoc.Hash()), Validators: validatorSet, - LastValidators: types.NewValidatorSet(nil), + Voters: types.ToVoterAll(validatorSet), + LastVoters: &types.VoterSet{}, LastHeightValidatorsChanged: 1, ConsensusParams: *genDoc.ConsensusParams, diff --git a/state/state_test.go b/state/state_test.go index d9ce994b1..f3b180b2f 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -184,26 +184,27 @@ func TestValidatorSimpleSaveLoad(t *testing.T) { assert := assert.New(t) // Can't load anything for height 0. - _, err := sm.LoadValidators(stateDB, 0) + _, _, err := sm.LoadValidators(stateDB, 0) assert.IsType(sm.ErrNoValSetForHeight{}, err, "expected err at height 0") // Should be able to load for height 1. - v, err := sm.LoadValidators(stateDB, 1) + _, v, err := sm.LoadValidators(stateDB, 1) assert.Nil(err, "expected no err at height 1") assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") // Should be able to load for height 2. - v, err = sm.LoadValidators(stateDB, 2) + _, v, err = sm.LoadValidators(stateDB, 2) assert.Nil(err, "expected no err at height 2") assert.Equal(v.Hash(), state.NextValidators.Hash(), "expected validator hashes to match") // Increment height, save; should be able to load for next & next next height. state.LastBlockHeight++ nextHeight := state.LastBlockHeight + 1 - sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) - vp0, err := sm.LoadValidators(stateDB, nextHeight+0) + sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, + state.LastProofHash, state.NextValidators) + _, vp0, err := sm.LoadValidators(stateDB, nextHeight+0) assert.Nil(err, "expected no err") - vp1, err := sm.LoadValidators(stateDB, nextHeight+1) + _, vp1, err := sm.LoadValidators(stateDB, nextHeight+1) assert.Nil(err, "expected no err") assert.Equal(vp0.Hash(), state.Validators.Hash(), "expected validator hashes to match") assert.Equal(vp1.Hash(), state.NextValidators.Hash(), "expected next validator hashes to match") @@ -238,7 +239,8 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) require.NoError(t, err) nextHeight := state.LastBlockHeight + 1 - sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) + sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, + state.LastProofHash, state.NextValidators) } // On each height change, increment the power by one. @@ -256,7 +258,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { } for i, power := range testCases { - v, err := sm.LoadValidators(stateDB, int64(i+1+1)) // +1 because vset changes delayed by 1 block. + _, v, err := sm.LoadValidators(stateDB, int64(i+1+1)) // +1 because vset changes delayed by 1 block. assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", i)) assert.Equal(t, v.Size(), 1, "validator set size is greater than 1: %d", v.Size()) _, val := v.GetByIndex(0) @@ -357,15 +359,16 @@ func genValSetWithPowers(powers []int64) *types.ValidatorSet { // test a proposer appears as frequently as expected func testProposerFreq(t *testing.T, caseNum int, valSet *types.ValidatorSet) { - N := valSet.Size() - totalPower := valSet.TotalVotingPower() + voterSet := types.ToVoterAll(valSet) + N := voterSet.Size() + totalPower := voterSet.TotalVotingPower() // run the proposer selection and track frequencies runMult := 1 runs := int(totalPower) * runMult freqs := make([]int, N) for i := 0; i < runs; i++ { - prop := types.SelectProposer(valSet, []byte{}, 1, i) + prop := valSet.SelectProposer([]byte{}, 1, i) idx, _ := valSet.GetByAddress(prop.Address) freqs[idx]++ valSet.IncrementProposerPriority(1) @@ -516,7 +519,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { state.NextValidators = state.Validators // we only have one validator: assert.Equal(t, val1PubKey.Address(), - types.SelectProposer(state.Validators, []byte{}, state.LastBlockHeight+1, 0).Address) + state.Validators.SelectProposer([]byte{}, state.LastBlockHeight+1, 0).Address) block := makeBlock(state, state.LastBlockHeight+1) blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} @@ -710,7 +713,7 @@ func TestLargeGenesisValidator(t *testing.T) { // -> no change in ProposerPrio (stays zero): assert.EqualValues(t, oldState.NextValidators, updatedState.NextValidators) assert.EqualValues(t, 0, - types.SelectProposer(updatedState.NextValidators, []byte{}, block.Height, 0).ProposerPriority) + updatedState.NextValidators.SelectProposer([]byte{}, block.Height, 0).ProposerPriority) oldState = updatedState } @@ -808,8 +811,8 @@ func TestLargeGenesisValidator(t *testing.T) { blockID = types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} curState, err = sm.UpdateState(curState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) - if !bytes.Equal(types.SelectProposer(curState.Validators, []byte{}, int64(count), 0).Address, - types.SelectProposer(curState.NextValidators, []byte{}, int64(count+1), 0).Address) { + if !bytes.Equal(curState.Validators.SelectProposer([]byte{}, int64(count), 0).Address, + curState.NextValidators.SelectProposer([]byte{}, int64(count+1), 0).Address) { isProposerUnchanged = false } count++ @@ -849,18 +852,18 @@ func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) state.Validators = genValSet(valSetSize) - types.SelectProposer(state.Validators, []byte{}, 1, 0) + state.Validators.SelectProposer([]byte{}, 1, 0) state.NextValidators = state.Validators.Copy() - types.SelectProposer(state.NextValidators, []byte{}, 2, 0) + state.NextValidators.SelectProposer([]byte{}, 2, 0) sm.SaveState(stateDB, state) nextHeight := state.LastBlockHeight + 1 - v0, err := sm.LoadValidators(stateDB, nextHeight) + v0, _, err := sm.LoadValidators(stateDB, nextHeight) assert.Nil(t, err) acc0 := v0.Validators[0].ProposerPriority - v1, err := sm.LoadValidators(stateDB, nextHeight+1) + v1, _, err := sm.LoadValidators(stateDB, nextHeight+1) assert.Nil(t, err) acc1 := v1.Validators[0].ProposerPriority @@ -875,9 +878,9 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { defer tearDown(t) require.Equal(t, int64(0), state.LastBlockHeight) state.Validators = genValSet(valSetSize) - types.SelectProposer(state.Validators, []byte{}, 1, 0) + state.Validators.SelectProposer([]byte{}, 1, 0) state.NextValidators = state.Validators.Copy() - types.SelectProposer(state.NextValidators, []byte{}, 2, 0) + state.NextValidators.SelectProposer([]byte{}, 2, 0) sm.SaveState(stateDB, state) _, valOld := state.Validators.GetByIndex(0) @@ -895,10 +898,11 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) require.Nil(t, err) nextHeight := state.LastBlockHeight + 1 - sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) + sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, + state.LastProofHash, state.NextValidators) // Load nextheight, it should be the oldpubkey. - v0, err := sm.LoadValidators(stateDB, nextHeight) + v0, _, err := sm.LoadValidators(stateDB, nextHeight) assert.Nil(t, err) assert.Equal(t, valSetSize, v0.Size()) index, val := v0.GetByAddress(pubkeyOld.Address()) @@ -908,7 +912,7 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { } // Load nextheight+1, it should be the new pubkey. - v1, err := sm.LoadValidators(stateDB, nextHeight+1) + v1, _, err := sm.LoadValidators(stateDB, nextHeight+1) assert.Nil(t, err) assert.Equal(t, valSetSize, v1.Size()) index, val = v1.GetByAddress(pubkey.Address()) diff --git a/state/store.go b/state/store.go index e49e289f0..77b82bcf3 100644 --- a/state/store.go +++ b/state/store.go @@ -104,10 +104,10 @@ func saveState(db dbm.DB, state State, key []byte) { // This extra logic due to Tendermint validator set changes being delayed 1 block. // It may get overwritten due to InitChain validator updates. lastHeightVoteChanged := int64(1) - saveValidatorsInfo(db, nextHeight, lastHeightVoteChanged, state.Validators) + saveValidatorsInfo(db, nextHeight, lastHeightVoteChanged, []byte{}, state.Validators) } // Save next validators. - saveValidatorsInfo(db, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) + saveValidatorsInfo(db, nextHeight+1, state.LastHeightValidatorsChanged, state.LastProofHash, state.NextValidators) // Save next consensus params. saveConsensusParamsInfo(db, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams) db.SetSync(key, state.Bytes()) @@ -186,6 +186,7 @@ func SaveABCIResponses(db dbm.DB, height int64, abciResponses *ABCIResponses) { type ValidatorsInfo struct { ValidatorSet *types.ValidatorSet LastHeightChanged int64 + ProofHash []byte } // Bytes serializes the ValidatorsInfo using go-amino. @@ -193,16 +194,17 @@ func (valInfo *ValidatorsInfo) Bytes() []byte { return cdc.MustMarshalBinaryBare(valInfo) } -// LoadValidators loads the ValidatorSet for a given height. +// LoadValidators loads the VoterSet for a given height. // Returns ErrNoValSetForHeight if the validator set can't be found for this height. -func LoadValidators(db dbm.DB, height int64) (*types.ValidatorSet, error) { - valInfo := loadValidatorsInfo(db, height) +func LoadValidators(db dbm.DB, height int64) (*types.ValidatorSet, *types.VoterSet, error) { + valInfo := loadValidatorsInfo(db, calcValidatorsKey(height)) if valInfo == nil { - return nil, ErrNoValSetForHeight{height} + return nil, nil, ErrNoValSetForHeight{height} } if valInfo.ValidatorSet == nil { + proofHash := valInfo.ProofHash // store proof hash of the height lastStoredHeight := lastStoredHeightFor(height, valInfo.LastHeightChanged) - valInfo2 := loadValidatorsInfo(db, lastStoredHeight) + valInfo2 := loadValidatorsInfo(db, calcValidatorsKey(lastStoredHeight)) if valInfo2 == nil || valInfo2.ValidatorSet == nil { panic( fmt.Sprintf("Couldn't find validators at height %d (height %d was originally requested)", @@ -213,9 +215,10 @@ func LoadValidators(db dbm.DB, height int64) (*types.ValidatorSet, error) { } valInfo2.ValidatorSet.IncrementProposerPriority(int(height - lastStoredHeight)) // mutate valInfo = valInfo2 + valInfo.ProofHash = proofHash // reload proof again } - return valInfo.ValidatorSet, nil + return valInfo.ValidatorSet, types.SelectVoter(valInfo.ValidatorSet, valInfo.ProofHash), nil } func lastStoredHeightFor(height, lastHeightChanged int64) int64 { @@ -224,8 +227,8 @@ func lastStoredHeightFor(height, lastHeightChanged int64) int64 { } // CONTRACT: Returned ValidatorsInfo can be mutated. -func loadValidatorsInfo(db dbm.DB, height int64) *ValidatorsInfo { - buf, err := db.Get(calcValidatorsKey(height)) +func loadValidatorsInfo(db dbm.DB, valKey []byte) *ValidatorsInfo { + buf, err := db.Get(valKey) if err != nil { panic(err) } @@ -250,12 +253,13 @@ func loadValidatorsInfo(db dbm.DB, height int64) *ValidatorsInfo { // `height` is the effective height for which the validator is responsible for // signing. It should be called from s.Save(), right before the state itself is // persisted. -func saveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *types.ValidatorSet) { +func saveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, proofHash []byte, valSet *types.ValidatorSet) { if lastHeightChanged > height { panic("LastHeightChanged cannot be greater than ValidatorsInfo height") } valInfo := &ValidatorsInfo{ LastHeightChanged: lastHeightChanged, + ProofHash: proofHash, } // Only persist validator set if it was updated or checkpoint height (see // valSetCheckpointInterval) is reached. diff --git a/state/store_test.go b/state/store_test.go index 596f479ed..a65aed979 100644 --- a/state/store_test.go +++ b/state/store_test.go @@ -20,17 +20,17 @@ func TestStoreLoadValidators(t *testing.T) { vals := types.NewValidatorSet([]*types.Validator{val}) // 1) LoadValidators loads validators using a height where they were last changed - sm.SaveValidatorsInfo(stateDB, 1, 1, vals) - sm.SaveValidatorsInfo(stateDB, 2, 1, vals) - loadedVals, err := sm.LoadValidators(stateDB, 2) + sm.SaveValidatorsInfo(stateDB, 1, 1, []byte{}, vals) + sm.SaveValidatorsInfo(stateDB, 2, 1, []byte{}, vals) + loadedVals, _, err := sm.LoadValidators(stateDB, 2) require.NoError(t, err) assert.NotZero(t, loadedVals.Size()) // 2) LoadValidators loads validators using a checkpoint height - sm.SaveValidatorsInfo(stateDB, sm.ValSetCheckpointInterval, 1, vals) + sm.SaveValidatorsInfo(stateDB, sm.ValSetCheckpointInterval, 1, []byte{}, vals) - loadedVals, err = sm.LoadValidators(stateDB, sm.ValSetCheckpointInterval) + loadedVals, _, err = sm.LoadValidators(stateDB, sm.ValSetCheckpointInterval) require.NoError(t, err) assert.NotZero(t, loadedVals.Size()) } @@ -47,18 +47,18 @@ func BenchmarkLoadValidators(b *testing.B) { b.Fatal(err) } state.Validators = genValSet(valSetSize) - types.SelectProposer(state.Validators, []byte{}, 1, 0) + state.Validators.SelectProposer([]byte{}, 1, 0) state.NextValidators = state.Validators.Copy() - types.SelectProposer(state.NextValidators, []byte{}, 2, 0) + state.Validators.SelectProposer([]byte{}, 2, 0) sm.SaveState(stateDB, state) for i := 10; i < 10000000000; i *= 10 { // 10, 100, 1000, ... i := i - sm.SaveValidatorsInfo(stateDB, int64(i), state.LastHeightValidatorsChanged, state.NextValidators) + sm.SaveValidatorsInfo(stateDB, int64(i), state.LastHeightValidatorsChanged, []byte{}, state.NextValidators) b.Run(fmt.Sprintf("height=%d", i), func(b *testing.B) { for n := 0; n < b.N; n++ { - _, err := sm.LoadValidators(stateDB, int64(i)) + _, _, err := sm.LoadValidators(stateDB, int64(i)) if err != nil { b.Fatal(err) } diff --git a/state/validation.go b/state/validation.go index a2760d532..7cea77579 100644 --- a/state/validation.go +++ b/state/validation.go @@ -68,16 +68,16 @@ func validateBlock(evidencePool EvidencePool, stateDB dbm.DB, state State, round block.LastResultsHash, ) } - if !bytes.Equal(block.ValidatorsHash, state.Validators.Hash()) { - return fmt.Errorf("wrong Block.Header.ValidatorsHash. Expected %X, got %v", - state.Validators.Hash(), - block.ValidatorsHash, + if !bytes.Equal(block.VotersHash, state.Voters.Hash()) { + return fmt.Errorf("wrong Block.Header.VotersHash. Expected %X, got %v", + state.Voters.Hash(), + block.VotersHash, ) } - if !bytes.Equal(block.NextValidatorsHash, state.NextValidators.Hash()) { - return fmt.Errorf("wrong Block.Header.NextValidatorsHash. Expected %X, got %v", - state.NextValidators.Hash(), - block.NextValidatorsHash, + if !bytes.Equal(block.NextVotersHash, state.NextVoters.Hash()) { + return fmt.Errorf("wrong Block.Header.NextVotersHash. Expected %X, got %v", + state.NextVoters.Hash(), + block.NextVotersHash, ) } @@ -87,10 +87,10 @@ func validateBlock(evidencePool EvidencePool, stateDB dbm.DB, state State, round return errors.New("block at height 1 can't have LastCommit signatures") } } else { - if len(block.LastCommit.Signatures) != state.LastValidators.Size() { - return types.NewErrInvalidCommitSignatures(state.LastValidators.Size(), len(block.LastCommit.Signatures)) + if len(block.LastCommit.Signatures) != state.LastVoters.Size() { + return types.NewErrInvalidCommitSignatures(state.LastVoters.Size(), len(block.LastCommit.Signatures)) } - err := state.LastValidators.VerifyCommit( + err := state.LastVoters.VerifyCommit( state.ChainID, state.LastBlockID, block.Height-1, block.LastCommit) if err != nil { return err @@ -106,7 +106,7 @@ func validateBlock(evidencePool EvidencePool, stateDB dbm.DB, state State, round ) } - medianTime := MedianTime(block.LastCommit, state.LastValidators) + medianTime := MedianTime(block.LastCommit, state.LastVoters) if !block.Time.Equal(medianTime) { return fmt.Errorf("invalid block time. Expected %v, got %v", medianTime, @@ -153,10 +153,10 @@ func validateBlock(evidencePool EvidencePool, stateDB dbm.DB, state State, round // validate proposer if !bytes.Equal(block.ProposerAddress.Bytes(), - types.SelectProposer(state.Validators, state.LastProofHash, block.Height, block.Round).Address.Bytes()) { + state.Validators.SelectProposer(state.LastProofHash, block.Height, block.Round).Address.Bytes()) { return fmt.Errorf("block.ProposerAddress, %X, is not the proposer %X", block.ProposerAddress, - types.SelectProposer(state.Validators, state.LastProofHash, block.Height, block.Round).Address, + state.Validators.SelectProposer(state.LastProofHash, block.Height, block.Round).Address, ) } @@ -206,23 +206,23 @@ func VerifyEvidence(stateDB dbm.DB, state State, evidence types.Evidence) error evidence.Time(), state.LastBlockTime.Add(evidenceParams.MaxAgeDuration)) } - valset, err := LoadValidators(stateDB, evidence.Height()) + _, voterSet, err := LoadValidators(stateDB, evidence.Height()) if err != nil { // TODO: if err is just that we cant find it cuz we pruned, ignore. // TODO: if its actually bad evidence, punish peer return err } - // The address must have been an active validator at the height. - // NOTE: we will ignore evidence from H if the key was not a validator + // The address must have been an active voter at the height. + // NOTE: we will ignore evidence from H if the key was not a voter // at H, even if it is a validator at some nearby H' // XXX: this makes lite-client bisection as is unsafe // See https://github.com/tendermint/tendermint/issues/3244 ev := evidence height, addr := ev.Height(), ev.Address() - _, val := valset.GetByAddress(addr) + _, val := voterSet.GetByAddress(addr) if val == nil { - return fmt.Errorf("address %X was not a validator at height %d", addr, height) + return fmt.Errorf("address %X was not a voter at height %d", addr, height) } if err := evidence.Verify(state.ChainID, val.PubKey); err != nil { diff --git a/state/validation_test.go b/state/validation_test.go index eae63943d..d86e8bee3 100644 --- a/state/validation_test.go +++ b/state/validation_test.go @@ -54,8 +54,8 @@ func TestValidateBlockHeader(t *testing.T) { {"LastCommitHash wrong", func(block *types.Block) { block.LastCommitHash = wrongHash }}, {"DataHash wrong", func(block *types.Block) { block.DataHash = wrongHash }}, - {"ValidatorsHash wrong", func(block *types.Block) { block.ValidatorsHash = wrongHash }}, - {"NextValidatorsHash wrong", func(block *types.Block) { block.NextValidatorsHash = wrongHash }}, + {"VotersHash wrong", func(block *types.Block) { block.VotersHash = wrongHash }}, + {"NextVotersHash wrong", func(block *types.Block) { block.NextVotersHash = wrongHash }}, {"ConsensusHash wrong", func(block *types.Block) { block.ConsensusHash = wrongHash }}, {"AppHash wrong", func(block *types.Block) { block.AppHash = wrongHash }}, {"LastResultsHash wrong", func(block *types.Block) { block.LastResultsHash = wrongHash }}, @@ -67,7 +67,7 @@ func TestValidateBlockHeader(t *testing.T) { // Build up state for multiple heights for height := int64(1); height < validationTestsStopHeight; height++ { - proposerAddr := types.SelectProposer(state.Validators, state.LastProofHash, height, 0).Address + proposerAddr := state.Validators.SelectProposer(state.LastProofHash, height, 0).Address /* Invalid blocks don't pass */ @@ -107,10 +107,10 @@ func TestValidateBlockCommit(t *testing.T) { badPrivVal := types.NewMockPV() for height := int64(1); height < validationTestsStopHeight; height++ { - proposerAddr := types.SelectProposer(state.Validators, []byte{}, height, 0).Address + proposerAddr := state.Validators.SelectProposer([]byte{}, height, 0).Address if height > 1 { /* - #2589: ensure state.LastValidators.VerifyCommit fails here + #2589: ensure state.LastVoters.VerifyCommit fails here */ // should be height-1 instead of height wrongHeightVote, err := types.MakeVote( @@ -136,7 +136,7 @@ func TestValidateBlockCommit(t *testing.T) { require.True(t, isErrInvalidCommitHeight, "expected ErrInvalidCommitHeight at height %d but got: %v", height, err) /* - #2589: test len(block.LastCommit.Signatures) == state.LastValidators.Size() + #2589: test len(block.LastCommit.Signatures) == state.LastVoters.Size() */ block, _ = state.MakeBlock(height, makeTxs(height), wrongSigsCommit, nil, proposerAddr, 0, proof) err = blockExec.ValidateBlock(state, 0, block) @@ -210,7 +210,7 @@ func TestValidateBlockEvidence(t *testing.T) { lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil) for height := int64(1); height < validationTestsStopHeight; height++ { - proposerAddr := types.SelectProposer(state.Validators, state.LastProofHash, height, 0).Address + proposerAddr := state.Validators.SelectProposer(state.LastProofHash, height, 0).Address proposerIdx, _ := state.Validators.GetByAddress(proposerAddr) goodEvidence := types.NewMockEvidence(height, time.Now(), proposerIdx, proposerAddr) if height > 1 { diff --git a/store/store_test.go b/store/store_test.go index 970628e58..89f4c551f 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -47,7 +47,7 @@ func makeTxs(height int64) (txs []types.Tx) { func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block { block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, - types.SelectProposer(state.Validators, state.LastProofHash, height, 0).Address, 0, nil) + state.Validators.SelectProposer(state.LastProofHash, height, 0).Address, 0, nil) return block } diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile index fb5458e82..9ba010942 100644 --- a/test/docker/Dockerfile +++ b/test/docker/Dockerfile @@ -1,7 +1,4 @@ -FROM golang:1.13 - -# Add testing deps for curl -RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list +FROM golang:1.14 # Grab deps (jq, hexdump, xxd, killall) RUN apt-get update && \ diff --git a/types/block.go b/types/block.go index b06730fb0..dfbbf461b 100644 --- a/types/block.go +++ b/types/block.go @@ -105,11 +105,11 @@ func (b *Block) ValidateBasic() error { // Basic validation of hashes related to application data. // Will validate fully against state in state#ValidateBlock. - if err := ValidateHash(b.ValidatorsHash); err != nil { - return fmt.Errorf("wrong Header.ValidatorsHash: %v", err) + if err := ValidateHash(b.VotersHash); err != nil { + return fmt.Errorf("wrong Header.VotersHash: %v", err) } - if err := ValidateHash(b.NextValidatorsHash); err != nil { - return fmt.Errorf("wrong Header.NextValidatorsHash: %v", err) + if err := ValidateHash(b.NextVotersHash); err != nil { + return fmt.Errorf("wrong Header.NextVotersHash: %v", err) } if err := ValidateHash(b.ConsensusHash); err != nil { return fmt.Errorf("wrong Header.ConsensusHash: %v", err) @@ -336,10 +336,10 @@ type Header struct { DataHash tmbytes.HexBytes `json:"data_hash"` // transactions // hashes from the app output from the prev block - ValidatorsHash tmbytes.HexBytes `json:"validators_hash"` // validators for the current block - NextValidatorsHash tmbytes.HexBytes `json:"next_validators_hash"` // validators for the next block - ConsensusHash tmbytes.HexBytes `json:"consensus_hash"` // consensus params for current block - AppHash tmbytes.HexBytes `json:"app_hash"` // state after txs from the previous block + VotersHash tmbytes.HexBytes `json:"voters_hash"` // voters for the current block + NextVotersHash tmbytes.HexBytes `json:"next_voters_hash"` // voters for the next block + ConsensusHash tmbytes.HexBytes `json:"consensus_hash"` // consensus params for current block + AppHash tmbytes.HexBytes `json:"app_hash"` // state after txs from the previous block // root hash of all results from the txs from the previous block LastResultsHash tmbytes.HexBytes `json:"last_results_hash"` @@ -357,7 +357,7 @@ type Header struct { func (h *Header) Populate( version version.Consensus, chainID string, timestamp time.Time, lastBlockID BlockID, - valHash, nextValHash []byte, + votersHash, nextVotersHash []byte, consensusHash, appHash, lastResultsHash []byte, proposerAddress Address, round int, @@ -367,8 +367,8 @@ func (h *Header) Populate( h.ChainID = chainID h.Time = timestamp h.LastBlockID = lastBlockID - h.ValidatorsHash = valHash - h.NextValidatorsHash = nextValHash + h.VotersHash = votersHash + h.NextVotersHash = nextVotersHash h.ConsensusHash = consensusHash h.AppHash = appHash h.LastResultsHash = lastResultsHash @@ -384,7 +384,7 @@ func (h *Header) Populate( // since a Header is not valid unless there is // a ValidatorsHash (corresponding to the validator set). func (h *Header) Hash() tmbytes.HexBytes { - if h == nil || len(h.ValidatorsHash) == 0 { + if h == nil || len(h.VotersHash) == 0 { return nil } return merkle.SimpleHashFromByteSlices([][]byte{ @@ -395,8 +395,8 @@ func (h *Header) Hash() tmbytes.HexBytes { cdcEncode(h.LastBlockID), cdcEncode(h.LastCommitHash), cdcEncode(h.DataHash), - cdcEncode(h.ValidatorsHash), - cdcEncode(h.NextValidatorsHash), + cdcEncode(h.VotersHash), + cdcEncode(h.NextVotersHash), cdcEncode(h.ConsensusHash), cdcEncode(h.AppHash), cdcEncode(h.LastResultsHash), @@ -438,8 +438,8 @@ func (h *Header) StringIndented(indent string) string { indent, h.LastBlockID, indent, h.LastCommitHash, indent, h.DataHash, - indent, h.ValidatorsHash, - indent, h.NextValidatorsHash, + indent, h.VotersHash, + indent, h.NextVotersHash, indent, h.AppHash, indent, h.ConsensusHash, indent, h.LastResultsHash, @@ -571,9 +571,9 @@ func (cs CommitSig) ValidateBasic() error { // NOTE: Commit is empty for height 1, but never nil. type Commit struct { // NOTE: The signatures are in order of address to preserve the bonded - // ValidatorSet order. + // VoterSet order. // Any peer with a block can gossip signatures by index with a peer without - // recalculating the active ValidatorSet. + // recalculating the active VoterSet. Height int64 `json:"height"` Round int `json:"round"` BlockID BlockID `json:"block_id"` @@ -599,8 +599,8 @@ func NewCommit(height int64, round int, blockID BlockID, commitSigs []CommitSig) // CommitToVoteSet constructs a VoteSet from the Commit and validator set. // Panics if signatures from the commit can't be added to the voteset. // Inverse of VoteSet.MakeCommit(). -func CommitToVoteSet(chainID string, commit *Commit, vals *ValidatorSet) *VoteSet { - voteSet := NewVoteSet(chainID, commit.Height, commit.Round, PrecommitType, vals) +func CommitToVoteSet(chainID string, commit *Commit, voters *VoterSet) *VoteSet { + voteSet := NewVoteSet(chainID, commit.Height, commit.Round, PrecommitType, voters) for idx, commitSig := range commit.Signatures { if commitSig.Absent() { continue // OK, some precommits can be missing. diff --git a/types/block_test.go b/types/block_test.go index 11725673a..f07d17459 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -35,11 +35,11 @@ func TestBlockAddEvidence(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) + voteSet, _, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) - ev := NewMockEvidence(h, time.Now(), 0, valSet.Validators[0].Address) + ev := NewMockEvidence(h, time.Now(), 0, valSet.Voters[0].Address) evList := []Evidence{ev} block := MakeBlock(h, txs, commit, evList) @@ -55,11 +55,11 @@ func TestBlockValidateBasic(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) + voteSet, valSet, voterSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) - ev := NewMockEvidence(h, time.Now(), 0, valSet.Validators[0].Address) + ev := NewMockEvidence(h, time.Now(), 0, voterSet.Voters[0].Address) evList := []Evidence{ev} testCases := []struct { @@ -69,7 +69,7 @@ func TestBlockValidateBasic(t *testing.T) { }{ {"Make Block", func(blk *Block) {}, false}, {"Make Block w/ proposer Addr", func(blk *Block) { - blk.ProposerAddress = SelectProposer(valSet, []byte{}, blk.Height, 0).Address + blk.ProposerAddress = valSet.SelectProposer([]byte{}, blk.Height, 0).Address }, false}, {"Negative Height", func(blk *Block) { blk.Height = -1 }, true}, {"Remove 1/2 the commits", func(blk *Block) { @@ -93,7 +93,7 @@ func TestBlockValidateBasic(t *testing.T) { i := i t.Run(tc.testName, func(t *testing.T) { block := MakeBlock(h, txs, commit, evList) - block.ProposerAddress = SelectProposer(valSet, []byte{}, block.Height, 0).Address + block.ProposerAddress = valSet.SelectProposer([]byte{}, block.Height, 0).Address tc.malleateBlock(block) err = block.ValidateBasic() assert.Equal(t, tc.expErr, err != nil, "#%d: %v", i, err) @@ -120,11 +120,11 @@ func TestBlockMakePartSetWithEvidence(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) + voteSet, _, voterSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) - ev := NewMockEvidence(h, time.Now(), 0, valSet.Validators[0].Address) + ev := NewMockEvidence(h, time.Now(), 0, voterSet.Voters[0].Address) evList := []Evidence{ev} partSet := MakeBlock(h, []Tx{Tx("Hello World")}, commit, evList).MakePartSet(512) @@ -137,15 +137,15 @@ func TestBlockHashesTo(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) + voteSet, _, voterSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) - ev := NewMockEvidence(h, time.Now(), 0, valSet.Validators[0].Address) + ev := NewMockEvidence(h, time.Now(), 0, voterSet.Voters[0].Address) evList := []Evidence{ev} block := MakeBlock(h, []Tx{Tx("Hello World")}, commit, evList) - block.ValidatorsHash = valSet.Hash() + block.VotersHash = voterSet.Hash() assert.False(t, block.HashesTo([]byte{})) assert.False(t, block.HashesTo([]byte("something else"))) assert.True(t, block.HashesTo(block.Hash())) @@ -210,7 +210,7 @@ func TestNilDataHashDoesntCrash(t *testing.T) { func TestCommit(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, _, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) + voteSet, _, _, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) @@ -256,39 +256,39 @@ func TestHeaderHash(t *testing.T) { expectHash bytes.HexBytes }{ {"Generates expected hash", &Header{ - Version: version.Consensus{Block: 1, App: 2}, - ChainID: "chainId", - Height: 3, - Time: time.Date(2019, 10, 13, 16, 14, 44, 0, time.UTC), - LastBlockID: makeBlockID(make([]byte, tmhash.Size), 6, make([]byte, tmhash.Size)), - LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), - DataHash: tmhash.Sum([]byte("data_hash")), - ValidatorsHash: tmhash.Sum([]byte("validators_hash")), - NextValidatorsHash: tmhash.Sum([]byte("next_validators_hash")), - ConsensusHash: tmhash.Sum([]byte("consensus_hash")), - AppHash: tmhash.Sum([]byte("app_hash")), - LastResultsHash: tmhash.Sum([]byte("last_results_hash")), - EvidenceHash: tmhash.Sum([]byte("evidence_hash")), - ProposerAddress: crypto.AddressHash([]byte("proposer_address")), - Round: 1, - Proof: tmhash.Sum([]byte("proof")), - }, hexBytesFromString("A607E71253D996B2D75CC98AEC7FE6363598F6ED37A501B427DBD3A7781FBE15")}, + Version: version.Consensus{Block: 1, App: 2}, + ChainID: "chainId", + Height: 3, + Time: time.Date(2019, 10, 13, 16, 14, 44, 0, time.UTC), + LastBlockID: makeBlockID(make([]byte, tmhash.Size), 6, make([]byte, tmhash.Size)), + LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), + DataHash: tmhash.Sum([]byte("data_hash")), + VotersHash: tmhash.Sum([]byte("voters_hash")), + NextVotersHash: tmhash.Sum([]byte("next_voters_hash")), + ConsensusHash: tmhash.Sum([]byte("consensus_hash")), + AppHash: tmhash.Sum([]byte("app_hash")), + LastResultsHash: tmhash.Sum([]byte("last_results_hash")), + EvidenceHash: tmhash.Sum([]byte("evidence_hash")), + ProposerAddress: crypto.AddressHash([]byte("proposer_address")), + Round: 1, + Proof: tmhash.Sum([]byte("proof")), + }, hexBytesFromString("0ECEA9AA5613ECD1673C223FA92A4651727C3DD7AF61E2C5FA979EEDBCC05F37")}, {"nil header yields nil", nil, nil}, - {"nil ValidatorsHash yields nil", &Header{ - Version: version.Consensus{Block: 1, App: 2}, - ChainID: "chainId", - Height: 3, - Time: time.Date(2019, 10, 13, 16, 14, 44, 0, time.UTC), - LastBlockID: makeBlockID(make([]byte, tmhash.Size), 6, make([]byte, tmhash.Size)), - LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), - DataHash: tmhash.Sum([]byte("data_hash")), - ValidatorsHash: nil, - NextValidatorsHash: tmhash.Sum([]byte("next_validators_hash")), - ConsensusHash: tmhash.Sum([]byte("consensus_hash")), - AppHash: tmhash.Sum([]byte("app_hash")), - LastResultsHash: tmhash.Sum([]byte("last_results_hash")), - EvidenceHash: tmhash.Sum([]byte("evidence_hash")), - ProposerAddress: crypto.AddressHash([]byte("proposer_address")), + {"nil VotersHash yields nil", &Header{ + Version: version.Consensus{Block: 1, App: 2}, + ChainID: "chainId", + Height: 3, + Time: time.Date(2019, 10, 13, 16, 14, 44, 0, time.UTC), + LastBlockID: makeBlockID(make([]byte, tmhash.Size), 6, make([]byte, tmhash.Size)), + LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), + DataHash: tmhash.Sum([]byte("data_hash")), + VotersHash: nil, + NextVotersHash: tmhash.Sum([]byte("next_voters_hash")), + ConsensusHash: tmhash.Sum([]byte("consensus_hash")), + AppHash: tmhash.Sum([]byte("app_hash")), + LastResultsHash: tmhash.Sum([]byte("last_results_hash")), + EvidenceHash: tmhash.Sum([]byte("evidence_hash")), + ProposerAddress: crypto.AddressHash([]byte("proposer_address")), }, nil}, } for _, tc := range testCases { @@ -329,20 +329,20 @@ func TestMaxHeaderBytes(t *testing.T) { timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) h := Header{ - Version: version.Consensus{Block: math.MaxInt64, App: math.MaxInt64}, - ChainID: maxChainID, - Height: math.MaxInt64, - Time: timestamp, - LastBlockID: makeBlockID(make([]byte, tmhash.Size), math.MaxInt64, make([]byte, tmhash.Size)), - LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), - DataHash: tmhash.Sum([]byte("data_hash")), - ValidatorsHash: tmhash.Sum([]byte("validators_hash")), - NextValidatorsHash: tmhash.Sum([]byte("next_validators_hash")), - ConsensusHash: tmhash.Sum([]byte("consensus_hash")), - AppHash: tmhash.Sum([]byte("app_hash")), - LastResultsHash: tmhash.Sum([]byte("last_results_hash")), - EvidenceHash: tmhash.Sum([]byte("evidence_hash")), - ProposerAddress: crypto.AddressHash([]byte("proposer_address")), + Version: version.Consensus{Block: math.MaxInt64, App: math.MaxInt64}, + ChainID: maxChainID, + Height: math.MaxInt64, + Time: timestamp, + LastBlockID: makeBlockID(make([]byte, tmhash.Size), math.MaxInt64, make([]byte, tmhash.Size)), + LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), + DataHash: tmhash.Sum([]byte("data_hash")), + VotersHash: tmhash.Sum([]byte("voters_hash")), + NextVotersHash: tmhash.Sum([]byte("next_voters_hash")), + ConsensusHash: tmhash.Sum([]byte("consensus_hash")), + AppHash: tmhash.Sum([]byte("app_hash")), + LastResultsHash: tmhash.Sum([]byte("last_results_hash")), + EvidenceHash: tmhash.Sum([]byte("evidence_hash")), + ProposerAddress: crypto.AddressHash([]byte("proposer_address")), } bz, err := cdc.MarshalBinaryLengthPrefixed(h) @@ -354,7 +354,7 @@ func TestMaxHeaderBytes(t *testing.T) { func randCommit(now time.Time) *Commit { lastID := makeBlockIDRandom() h := int64(3) - voteSet, _, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) + voteSet, _, _, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals, now) if err != nil { panic(err) @@ -433,7 +433,7 @@ func TestCommitToVoteSet(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) + voteSet, _, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) assert.NoError(t, err) @@ -473,7 +473,7 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { } for _, tc := range testCases { - voteSet, valSet, vals := randVoteSet(height-1, round, PrecommitType, tc.numValidators, 1) + voteSet, _, valSet, vals := randVoteSet(height-1, round, PrecommitType, tc.numValidators, 1) vi := 0 for n := range tc.blockIDs { @@ -513,20 +513,20 @@ func TestSignedHeaderValidateBasic(t *testing.T) { chainID := "𠜎" timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) h := Header{ - Version: version.Consensus{Block: math.MaxInt64, App: math.MaxInt64}, - ChainID: chainID, - Height: commit.Height, - Time: timestamp, - LastBlockID: commit.BlockID, - LastCommitHash: commit.Hash(), - DataHash: commit.Hash(), - ValidatorsHash: commit.Hash(), - NextValidatorsHash: commit.Hash(), - ConsensusHash: commit.Hash(), - AppHash: commit.Hash(), - LastResultsHash: commit.Hash(), - EvidenceHash: commit.Hash(), - ProposerAddress: crypto.AddressHash([]byte("proposer_address")), + Version: version.Consensus{Block: math.MaxInt64, App: math.MaxInt64}, + ChainID: chainID, + Height: commit.Height, + Time: timestamp, + LastBlockID: commit.BlockID, + LastCommitHash: commit.Hash(), + DataHash: commit.Hash(), + VotersHash: commit.Hash(), + NextVotersHash: commit.Hash(), + ConsensusHash: commit.Hash(), + AppHash: commit.Hash(), + LastResultsHash: commit.Hash(), + EvidenceHash: commit.Hash(), + ProposerAddress: crypto.AddressHash([]byte("proposer_address")), } validSignedHeader := SignedHeader{Header: &h, Commit: commit} diff --git a/types/proto3/block.pb.go b/types/proto3/block.pb.go index af3d5faf5..6829fb93d 100644 --- a/types/proto3/block.pb.go +++ b/types/proto3/block.pb.go @@ -1,13 +1,11 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: types/proto3/block.proto -package proto3 +package proto3 // import "github.com/tendermint/tendermint/types/proto3" -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - math "math" -) +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -18,7 +16,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type PartSetHeader struct { Total int32 `protobuf:"varint,1,opt,name=Total,proto3" json:"Total,omitempty"` @@ -32,7 +30,7 @@ func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } func (*PartSetHeader) ProtoMessage() {} func (*PartSetHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_760f4d5ceb2a11f0, []int{0} + return fileDescriptor_block_84e32a3b9446dafc, []int{0} } func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PartSetHeader.Unmarshal(m, b) @@ -40,8 +38,8 @@ func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { func (m *PartSetHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_PartSetHeader.Marshal(b, m, deterministic) } -func (m *PartSetHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_PartSetHeader.Merge(m, src) +func (dst *PartSetHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartSetHeader.Merge(dst, src) } func (m *PartSetHeader) XXX_Size() int { return xxx_messageInfo_PartSetHeader.Size(m) @@ -68,7 +66,7 @@ func (m *PartSetHeader) GetHash() []byte { type BlockID struct { Hash []byte `protobuf:"bytes,1,opt,name=Hash,proto3" json:"Hash,omitempty"` - PartsHeader *PartSetHeader `protobuf:"bytes,2,opt,name=PartsHeader,proto3" json:"PartsHeader,omitempty"` + PartsHeader *PartSetHeader `protobuf:"bytes,2,opt,name=PartsHeader" json:"PartsHeader,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -78,7 +76,7 @@ func (m *BlockID) Reset() { *m = BlockID{} } func (m *BlockID) String() string { return proto.CompactTextString(m) } func (*BlockID) ProtoMessage() {} func (*BlockID) Descriptor() ([]byte, []int) { - return fileDescriptor_760f4d5ceb2a11f0, []int{1} + return fileDescriptor_block_84e32a3b9446dafc, []int{1} } func (m *BlockID) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BlockID.Unmarshal(m, b) @@ -86,8 +84,8 @@ func (m *BlockID) XXX_Unmarshal(b []byte) error { func (m *BlockID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_BlockID.Marshal(b, m, deterministic) } -func (m *BlockID) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockID.Merge(m, src) +func (dst *BlockID) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockID.Merge(dst, src) } func (m *BlockID) XXX_Size() int { return xxx_messageInfo_BlockID.Size(m) @@ -114,21 +112,21 @@ func (m *BlockID) GetPartsHeader() *PartSetHeader { type Header struct { // basic block info - Version *Version `protobuf:"bytes,1,opt,name=Version,proto3" json:"Version,omitempty"` + Version *Version `protobuf:"bytes,1,opt,name=Version" json:"Version,omitempty"` ChainID string `protobuf:"bytes,2,opt,name=ChainID,proto3" json:"ChainID,omitempty"` Height int64 `protobuf:"varint,3,opt,name=Height,proto3" json:"Height,omitempty"` - Time *Timestamp `protobuf:"bytes,4,opt,name=Time,proto3" json:"Time,omitempty"` + Time *Timestamp `protobuf:"bytes,4,opt,name=Time" json:"Time,omitempty"` // prev block info - LastBlockID *BlockID `protobuf:"bytes,5,opt,name=LastBlockID,proto3" json:"LastBlockID,omitempty"` + LastBlockID *BlockID `protobuf:"bytes,5,opt,name=LastBlockID" json:"LastBlockID,omitempty"` // hashes of block data LastCommitHash []byte `protobuf:"bytes,6,opt,name=LastCommitHash,proto3" json:"LastCommitHash,omitempty"` DataHash []byte `protobuf:"bytes,7,opt,name=DataHash,proto3" json:"DataHash,omitempty"` // hashes from the app output from the prev block - ValidatorsHash []byte `protobuf:"bytes,8,opt,name=ValidatorsHash,proto3" json:"ValidatorsHash,omitempty"` - NextValidatorsHash []byte `protobuf:"bytes,9,opt,name=NextValidatorsHash,proto3" json:"NextValidatorsHash,omitempty"` - ConsensusHash []byte `protobuf:"bytes,10,opt,name=ConsensusHash,proto3" json:"ConsensusHash,omitempty"` - AppHash []byte `protobuf:"bytes,11,opt,name=AppHash,proto3" json:"AppHash,omitempty"` - LastResultsHash []byte `protobuf:"bytes,12,opt,name=LastResultsHash,proto3" json:"LastResultsHash,omitempty"` + VotersHash []byte `protobuf:"bytes,8,opt,name=VotersHash,proto3" json:"VotersHash,omitempty"` + NextVotersHash []byte `protobuf:"bytes,9,opt,name=NextVotersHash,proto3" json:"NextVotersHash,omitempty"` + ConsensusHash []byte `protobuf:"bytes,10,opt,name=ConsensusHash,proto3" json:"ConsensusHash,omitempty"` + AppHash []byte `protobuf:"bytes,11,opt,name=AppHash,proto3" json:"AppHash,omitempty"` + LastResultsHash []byte `protobuf:"bytes,12,opt,name=LastResultsHash,proto3" json:"LastResultsHash,omitempty"` // consensus info EvidenceHash []byte `protobuf:"bytes,13,opt,name=EvidenceHash,proto3" json:"EvidenceHash,omitempty"` ProposerAddress []byte `protobuf:"bytes,14,opt,name=ProposerAddress,proto3" json:"ProposerAddress,omitempty"` @@ -141,7 +139,7 @@ func (m *Header) Reset() { *m = Header{} } func (m *Header) String() string { return proto.CompactTextString(m) } func (*Header) ProtoMessage() {} func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_760f4d5ceb2a11f0, []int{2} + return fileDescriptor_block_84e32a3b9446dafc, []int{2} } func (m *Header) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Header.Unmarshal(m, b) @@ -149,8 +147,8 @@ func (m *Header) XXX_Unmarshal(b []byte) error { func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Header.Marshal(b, m, deterministic) } -func (m *Header) XXX_Merge(src proto.Message) { - xxx_messageInfo_Header.Merge(m, src) +func (dst *Header) XXX_Merge(src proto.Message) { + xxx_messageInfo_Header.Merge(dst, src) } func (m *Header) XXX_Size() int { return xxx_messageInfo_Header.Size(m) @@ -210,16 +208,16 @@ func (m *Header) GetDataHash() []byte { return nil } -func (m *Header) GetValidatorsHash() []byte { +func (m *Header) GetVotersHash() []byte { if m != nil { - return m.ValidatorsHash + return m.VotersHash } return nil } -func (m *Header) GetNextValidatorsHash() []byte { +func (m *Header) GetNextVotersHash() []byte { if m != nil { - return m.NextValidatorsHash + return m.NextVotersHash } return nil } @@ -271,7 +269,7 @@ func (m *Version) Reset() { *m = Version{} } func (m *Version) String() string { return proto.CompactTextString(m) } func (*Version) ProtoMessage() {} func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_760f4d5ceb2a11f0, []int{3} + return fileDescriptor_block_84e32a3b9446dafc, []int{3} } func (m *Version) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Version.Unmarshal(m, b) @@ -279,8 +277,8 @@ func (m *Version) XXX_Unmarshal(b []byte) error { func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Version.Marshal(b, m, deterministic) } -func (m *Version) XXX_Merge(src proto.Message) { - xxx_messageInfo_Version.Merge(m, src) +func (dst *Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_Version.Merge(dst, src) } func (m *Version) XXX_Size() int { return xxx_messageInfo_Version.Size(m) @@ -322,7 +320,7 @@ func (m *Timestamp) Reset() { *m = Timestamp{} } func (m *Timestamp) String() string { return proto.CompactTextString(m) } func (*Timestamp) ProtoMessage() {} func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_760f4d5ceb2a11f0, []int{4} + return fileDescriptor_block_84e32a3b9446dafc, []int{4} } func (m *Timestamp) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Timestamp.Unmarshal(m, b) @@ -330,8 +328,8 @@ func (m *Timestamp) XXX_Unmarshal(b []byte) error { func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) } -func (m *Timestamp) XXX_Merge(src proto.Message) { - xxx_messageInfo_Timestamp.Merge(m, src) +func (dst *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(dst, src) } func (m *Timestamp) XXX_Size() int { return xxx_messageInfo_Timestamp.Size(m) @@ -364,38 +362,37 @@ func init() { proto.RegisterType((*Timestamp)(nil), "tendermint.types.proto3.Timestamp") } -func init() { proto.RegisterFile("types/proto3/block.proto", fileDescriptor_760f4d5ceb2a11f0) } - -var fileDescriptor_760f4d5ceb2a11f0 = []byte{ - // 468 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xdf, 0x8b, 0x13, 0x31, - 0x10, 0xc7, 0x59, 0xbb, 0x6d, 0xaf, 0xb3, 0xed, 0x29, 0x83, 0xe8, 0xe2, 0x53, 0x59, 0xe4, 0xe8, - 0x8b, 0x5b, 0xbc, 0x03, 0x41, 0x7d, 0xea, 0x0f, 0xa1, 0x07, 0x22, 0x47, 0x3c, 0xee, 0xc1, 0xb7, - 0xb4, 0x1b, 0xda, 0x60, 0x37, 0x59, 0x92, 0x54, 0xf4, 0x1f, 0xf4, 0xef, 0x92, 0x4c, 0xb6, 0xbd, - 0x6e, 0xb1, 0xdc, 0x53, 0xf3, 0x9d, 0xf9, 0xcc, 0x37, 0xb3, 0x93, 0x29, 0xa4, 0xee, 0x4f, 0x25, - 0xec, 0xb8, 0x32, 0xda, 0xe9, 0x9b, 0xf1, 0x72, 0xab, 0x57, 0x3f, 0x73, 0x12, 0xf8, 0xda, 0x09, - 0x55, 0x08, 0x53, 0x4a, 0xe5, 0x72, 0x82, 0x42, 0xfc, 0x26, 0xfb, 0x08, 0x83, 0x3b, 0x6e, 0xdc, - 0x77, 0xe1, 0x16, 0x82, 0x17, 0xc2, 0xe0, 0x4b, 0x68, 0xdf, 0x6b, 0xc7, 0xb7, 0x69, 0x34, 0x8c, - 0x46, 0x6d, 0x16, 0x04, 0x22, 0xc4, 0x0b, 0x6e, 0x37, 0xe9, 0xb3, 0x61, 0x34, 0xea, 0x33, 0x3a, - 0x67, 0x6b, 0xe8, 0x4e, 0xfd, 0x15, 0xb7, 0xf3, 0x43, 0x3a, 0x7a, 0x4c, 0xe3, 0x02, 0x12, 0xef, - 0x6c, 0x83, 0x2f, 0x55, 0x26, 0xd7, 0x57, 0xf9, 0x99, 0x46, 0xf2, 0x46, 0x17, 0xec, 0xb8, 0x34, - 0xfb, 0x1b, 0x43, 0xa7, 0xee, 0xee, 0x13, 0x74, 0x1f, 0x84, 0xb1, 0x52, 0x2b, 0xba, 0x2b, 0xb9, - 0x1e, 0x9e, 0x35, 0xac, 0x39, 0xb6, 0x2f, 0xc0, 0x14, 0xba, 0xb3, 0x0d, 0x97, 0xea, 0x76, 0x4e, - 0xcd, 0xf4, 0xd8, 0x5e, 0xe2, 0x2b, 0xef, 0x2f, 0xd7, 0x1b, 0x97, 0xb6, 0x86, 0xd1, 0xa8, 0xc5, - 0x6a, 0x85, 0x1f, 0x20, 0xbe, 0x97, 0xa5, 0x48, 0x63, 0xba, 0x2a, 0x3b, 0x7b, 0x95, 0x87, 0xac, - 0xe3, 0x65, 0xc5, 0x88, 0xc7, 0x29, 0x24, 0x5f, 0xb9, 0x75, 0xf5, 0x74, 0xd2, 0xf6, 0x13, 0x9d, - 0xd6, 0x1c, 0x3b, 0x2e, 0xc2, 0x2b, 0xb8, 0xf4, 0x72, 0xa6, 0xcb, 0x52, 0x3a, 0x1a, 0x6e, 0x87, - 0x86, 0x7b, 0x12, 0xc5, 0x37, 0x70, 0x31, 0xe7, 0x8e, 0x13, 0xd1, 0x25, 0xe2, 0xa0, 0xbd, 0xc7, - 0x03, 0xdf, 0xca, 0x82, 0x3b, 0x6d, 0x2c, 0x11, 0x17, 0xc1, 0xa3, 0x19, 0xc5, 0x1c, 0xf0, 0x9b, - 0xf8, 0xed, 0x4e, 0xd8, 0x1e, 0xb1, 0xff, 0xc9, 0xe0, 0x5b, 0x18, 0xcc, 0xb4, 0xb2, 0x42, 0xd9, - 0x5d, 0x40, 0x81, 0xd0, 0x66, 0xd0, 0xcf, 0x7b, 0x52, 0x55, 0x94, 0x4f, 0x28, 0xbf, 0x97, 0x38, - 0x82, 0xe7, 0xfe, 0x2b, 0x98, 0xb0, 0xbb, 0xad, 0x0b, 0x0e, 0x7d, 0x22, 0x4e, 0xc3, 0x98, 0x41, - 0xff, 0xcb, 0x2f, 0x59, 0x08, 0xb5, 0x12, 0x84, 0x0d, 0x08, 0x6b, 0xc4, 0xbc, 0xdb, 0x9d, 0xd1, - 0x95, 0xb6, 0xc2, 0x4c, 0x8a, 0xc2, 0x08, 0x6b, 0xd3, 0xcb, 0xe0, 0x76, 0x12, 0xce, 0xde, 0x1f, - 0xb6, 0xc7, 0xaf, 0x39, 0x4d, 0x9a, 0xd6, 0x28, 0x66, 0x41, 0xe0, 0x0b, 0x68, 0x4d, 0xaa, 0x8a, - 0xd6, 0x23, 0x66, 0xfe, 0x98, 0x7d, 0x86, 0xde, 0xe1, 0x75, 0xfd, 0x17, 0x59, 0xb1, 0xd2, 0xaa, - 0xb0, 0x54, 0xd6, 0x62, 0x7b, 0xe9, 0xed, 0x14, 0x57, 0xda, 0x52, 0x69, 0x9b, 0x05, 0x31, 0x1d, - 0xff, 0x78, 0xb7, 0x96, 0x6e, 0xb3, 0x5b, 0xe6, 0x2b, 0x5d, 0x8e, 0x1f, 0x9f, 0xbf, 0x71, 0x3c, - 0xfa, 0xcb, 0x2e, 0x3b, 0xe1, 0xf7, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7d, 0x0b, 0x4e, 0x15, - 0xc9, 0x03, 0x00, 0x00, +func init() { proto.RegisterFile("types/proto3/block.proto", fileDescriptor_block_84e32a3b9446dafc) } + +var fileDescriptor_block_84e32a3b9446dafc = []byte{ + // 464 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x53, 0xdf, 0x8b, 0xd3, 0x40, + 0x10, 0x26, 0x36, 0x6d, 0xaf, 0x93, 0xf6, 0x94, 0x45, 0x34, 0xf8, 0x20, 0x25, 0xc8, 0xd1, 0x17, + 0x53, 0xbc, 0x03, 0x41, 0x7d, 0xea, 0x0f, 0xa1, 0x07, 0x22, 0xc7, 0x7a, 0xdc, 0x83, 0x6f, 0xdb, + 0x66, 0x68, 0x83, 0xcd, 0x6e, 0xd8, 0xdd, 0x8a, 0xfe, 0x63, 0xfe, 0x7d, 0xb2, 0xb3, 0x69, 0x2e, + 0x29, 0x94, 0x7b, 0xea, 0x7c, 0xdf, 0x7c, 0xf3, 0xcd, 0x74, 0x76, 0x02, 0xb1, 0xfd, 0x5b, 0xa2, + 0x99, 0x96, 0x5a, 0x59, 0x75, 0x33, 0x5d, 0xef, 0xd5, 0xe6, 0x57, 0x4a, 0x80, 0xbd, 0xb6, 0x28, + 0x33, 0xd4, 0x45, 0x2e, 0x6d, 0x4a, 0x22, 0xcf, 0xdf, 0x24, 0x9f, 0x60, 0x74, 0x27, 0xb4, 0xfd, + 0x81, 0x76, 0x85, 0x22, 0x43, 0xcd, 0x5e, 0x42, 0xf7, 0x5e, 0x59, 0xb1, 0x8f, 0x83, 0x71, 0x30, + 0xe9, 0x72, 0x0f, 0x18, 0x83, 0x70, 0x25, 0xcc, 0x2e, 0x7e, 0x36, 0x0e, 0x26, 0x43, 0x4e, 0x71, + 0xb2, 0x85, 0xfe, 0xdc, 0xb5, 0xb8, 0x5d, 0xd6, 0xe9, 0xe0, 0x31, 0xcd, 0x56, 0x10, 0x39, 0x67, + 0xe3, 0x7d, 0xa9, 0x32, 0xba, 0xbe, 0x4a, 0xcf, 0x0c, 0x92, 0xb6, 0xa6, 0xe0, 0xcd, 0xd2, 0xe4, + 0x5f, 0x08, 0xbd, 0x6a, 0xba, 0xcf, 0xd0, 0x7f, 0x40, 0x6d, 0x72, 0x25, 0xa9, 0x57, 0x74, 0x3d, + 0x3e, 0x6b, 0x58, 0xe9, 0xf8, 0xb1, 0x80, 0xc5, 0xd0, 0x5f, 0xec, 0x44, 0x2e, 0x6f, 0x97, 0x34, + 0xcc, 0x80, 0x1f, 0x21, 0x7b, 0xe5, 0xfc, 0xf3, 0xed, 0xce, 0xc6, 0x9d, 0x71, 0x30, 0xe9, 0xf0, + 0x0a, 0xb1, 0x8f, 0x10, 0xde, 0xe7, 0x05, 0xc6, 0x21, 0xb5, 0x4a, 0xce, 0xb6, 0x72, 0x22, 0x63, + 0x45, 0x51, 0x72, 0xd2, 0xb3, 0x39, 0x44, 0xdf, 0x84, 0xb1, 0xd5, 0x76, 0xe2, 0xee, 0x13, 0x93, + 0x56, 0x3a, 0xde, 0x2c, 0x62, 0x57, 0x70, 0xe9, 0xe0, 0x42, 0x15, 0x45, 0x6e, 0x69, 0xb9, 0x3d, + 0x5a, 0xee, 0x09, 0xcb, 0xde, 0xc0, 0xc5, 0x52, 0x58, 0x41, 0x8a, 0x3e, 0x29, 0x6a, 0xcc, 0xde, + 0x02, 0x3c, 0x28, 0x8b, 0xda, 0x50, 0xf6, 0x82, 0xb2, 0x0d, 0xc6, 0xf5, 0xf8, 0x8e, 0x7f, 0x6c, + 0x43, 0x33, 0xf0, 0x3d, 0xda, 0x2c, 0x7b, 0x07, 0xa3, 0x85, 0x92, 0x06, 0xa5, 0x39, 0x78, 0x19, + 0x90, 0xac, 0x4d, 0xba, 0xfd, 0xce, 0xca, 0x92, 0xf2, 0x11, 0xe5, 0x8f, 0x90, 0x4d, 0xe0, 0xb9, + 0x9b, 0x9a, 0xa3, 0x39, 0xec, 0xad, 0x77, 0x18, 0x92, 0xe2, 0x94, 0x66, 0x09, 0x0c, 0xbf, 0xfe, + 0xce, 0x33, 0x94, 0x1b, 0x24, 0xd9, 0x88, 0x64, 0x2d, 0xce, 0xb9, 0xdd, 0x69, 0x55, 0x2a, 0x83, + 0x7a, 0x96, 0x65, 0x1a, 0x8d, 0x89, 0x2f, 0xbd, 0xdb, 0x09, 0x9d, 0x7c, 0xa8, 0xaf, 0xc5, 0x9d, + 0x35, 0x6d, 0x96, 0xce, 0x26, 0xe4, 0x1e, 0xb0, 0x17, 0xd0, 0x99, 0x95, 0x25, 0x9d, 0x43, 0xc8, + 0x5d, 0x98, 0x7c, 0x81, 0x41, 0xfd, 0x9a, 0xee, 0x1f, 0x19, 0xdc, 0x28, 0x99, 0x19, 0x2a, 0xeb, + 0xf0, 0x23, 0x74, 0x76, 0x52, 0x48, 0x65, 0xa8, 0xb4, 0xcb, 0x3d, 0x98, 0x4f, 0x7f, 0xbe, 0xdf, + 0xe6, 0x76, 0x77, 0x58, 0xa7, 0x1b, 0x55, 0x4c, 0x1f, 0x9f, 0xbb, 0x15, 0x36, 0x3e, 0xd1, 0x75, + 0xcf, 0xff, 0xfe, 0x0f, 0x00, 0x00, 0xff, 0xff, 0xf7, 0xe5, 0x35, 0x61, 0xb9, 0x03, 0x00, 0x00, } diff --git a/types/proto3/block.proto b/types/proto3/block.proto index adaa0a00d..eb198b7ba 100644 --- a/types/proto3/block.proto +++ b/types/proto3/block.proto @@ -24,19 +24,19 @@ message Header { BlockID LastBlockID = 5; // hashes of block data - bytes LastCommitHash = 6; // commit from validators from the last block - bytes DataHash = 7; // transactions + bytes LastCommitHash = 6; // commit from validators from the last block + bytes DataHash = 7; // transactions // hashes from the app output from the prev block - bytes ValidatorsHash = 8; // validators for the current block - bytes NextValidatorsHash = 9; // validators for the next block - bytes ConsensusHash = 10; // consensus params for current block - bytes AppHash = 11; // state after txs from the previous block - bytes LastResultsHash = 12; // root hash of all results from the txs from the previous block + bytes VotersHash = 8; // voters for the current block + bytes NextVotersHash = 9; // voters for the next block + bytes ConsensusHash = 10; // consensus params for current block + bytes AppHash = 11; // state after txs from the previous block + bytes LastResultsHash = 12; // root hash of all results from the txs from the previous block // consensus info - bytes EvidenceHash = 13; // evidence included in the block - bytes ProposerAddress = 14; // original proposer of the block + bytes EvidenceHash = 13; // evidence included in the block + bytes ProposerAddress = 14; // original proposer of the block } message Version { diff --git a/types/proto3_test.go b/types/proto3_test.go index f969be128..f50c483ea 100644 --- a/types/proto3_test.go +++ b/types/proto3_test.go @@ -33,7 +33,7 @@ func TestProto3Compatibility(t *testing.T) { }, LastCommitHash: []byte("commit hash"), DataHash: []byte("data hash"), - ValidatorsHash: []byte("validators hash"), + VotersHash: []byte("voters hash"), } aminoHeader := Header{ ChainID: "cosmos", @@ -48,7 +48,7 @@ func TestProto3Compatibility(t *testing.T) { }, LastCommitHash: []byte("commit hash"), DataHash: []byte("data hash"), - ValidatorsHash: []byte("validators hash"), + VotersHash: []byte("voters hash"), } ab, err := cdc.MarshalBinaryBare(aminoHeader) assert.NoError(t, err, "unexpected error") @@ -64,7 +64,7 @@ func TestProto3Compatibility(t *testing.T) { Time: &proto3.Timestamp{Seconds: seconds, Nanos: nanos}, LastCommitHash: []byte("commit hash"), DataHash: []byte("data hash"), - ValidatorsHash: []byte("validators hash"), + VotersHash: []byte("voters hash"), } emptyLastBlockAm := Header{ ChainID: "cosmos", @@ -72,7 +72,7 @@ func TestProto3Compatibility(t *testing.T) { Time: tm, LastCommitHash: []byte("commit hash"), DataHash: []byte("data hash"), - ValidatorsHash: []byte("validators hash"), + VotersHash: []byte("voters hash"), } ab, err = cdc.MarshalBinaryBare(emptyLastBlockAm) diff --git a/types/protobuf.go b/types/protobuf.go index 52815593f..5ff70936f 100644 --- a/types/protobuf.go +++ b/types/protobuf.go @@ -57,8 +57,8 @@ func (tm2pb) Header(header *Header) abci.Header { LastCommitHash: header.LastCommitHash, DataHash: header.DataHash, - ValidatorsHash: header.ValidatorsHash, - NextValidatorsHash: header.NextValidatorsHash, + ValidatorsHash: header.VotersHash, + NextValidatorsHash: header.NextVotersHash, ConsensusHash: header.ConsensusHash, AppHash: header.AppHash, LastResultsHash: header.LastResultsHash, @@ -149,7 +149,7 @@ func (tm2pb) ConsensusParams(params *ConsensusParams) *abci.ConsensusParams { // ABCI Evidence includes information from the past that's not included in the evidence itself // so Evidence types stays compact. // XXX: panics on nil or unknown pubkey type -func (tm2pb) Evidence(ev Evidence, valSet *ValidatorSet, evTime time.Time) abci.Evidence { +func (tm2pb) Evidence(ev Evidence, valSet *VoterSet, evTime time.Time) abci.Evidence { _, val := valSet.GetByAddress(ev.Address()) if val == nil { // should already have checked this diff --git a/types/protobuf_test.go b/types/protobuf_test.go index b688716b5..049eaa48e 100644 --- a/types/protobuf_test.go +++ b/types/protobuf_test.go @@ -139,7 +139,7 @@ func TestABCIEvidence(t *testing.T) { } abciEv := TM2PB.Evidence( ev, - NewValidatorSet([]*Validator{NewValidator(pubKey, 10)}), + ToVoterAll(NewValidatorSet([]*Validator{NewValidator(pubKey, 10)})), time.Now(), ) diff --git a/types/validator_set.go b/types/validator_set.go index 6d04424a6..a902348fa 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -2,7 +2,6 @@ package types import ( "bytes" - "encoding/binary" "fmt" "math" "math/big" @@ -11,8 +10,6 @@ import ( "github.com/pkg/errors" "github.com/tendermint/tendermint/crypto/merkle" - "github.com/tendermint/tendermint/crypto/tmhash" - tmmath "github.com/tendermint/tendermint/libs/math" tmrand "github.com/tendermint/tendermint/libs/rand" ) @@ -31,7 +28,7 @@ const ( PriorityWindowSizeFactor = 2 ) -// ValidatorSet represent a set of *Validator at a given height. +// VoterSet represent a set of *Validator at a given height. // The validators can be fetched by address or index. // The index is in order of .Address, so the indices are fixed // for all rounds of a given blockchain height - ie. the validators @@ -49,9 +46,9 @@ type ValidatorSet struct { totalVotingPower int64 } -// NewValidatorSet initializes a ValidatorSet by copying over the +// NewValidatorSet initializes a VoterSet by copying over the // values from `valz`, a list of Validators. If valz is nil or empty, -// the new ValidatorSet will have an empty list of Validators. +// the new VoterSet will have an empty list of Validators. // The addresses of validators in `valz` must be unique otherwise the // function panics. // Note the validator set size has an implied limit equal to that of the MaxVotesCount - @@ -211,7 +208,7 @@ func validatorListCopy(valsList []*Validator) []*Validator { return valsCopy } -// Copy each validator into a new ValidatorSet. +// Copy each validator into a new VoterSet. func (vals *ValidatorSet) Copy() *ValidatorSet { return &ValidatorSet{ Validators: validatorListCopy(vals.Validators), @@ -242,7 +239,7 @@ func (vals *ValidatorSet) GetByAddress(address []byte) (index int, val *Validato // GetByIndex returns the validator's address and validator itself by index. // It returns nil values if index is less than 0 or greater or equal to -// len(ValidatorSet.Validators). +// len(VoterSet.Validators). func (vals *ValidatorSet) GetByIndex(index int) (address []byte, val *Validator) { if index < 0 || index >= len(vals.Validators) { return nil, nil @@ -599,223 +596,18 @@ func (vals *ValidatorSet) UpdateWithChangeSet(changes []*Validator) error { return vals.updateWithChangeSet(changes, true) } -// VerifyCommit verifies +2/3 of the set had signed the given commit. -func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, - height int64, commit *Commit) error { - - if vals.Size() != len(commit.Signatures) { - return NewErrInvalidCommitSignatures(vals.Size(), len(commit.Signatures)) - } - if err := verifyCommitBasic(commit, height, blockID); err != nil { - return err - } - - talliedVotingPower := int64(0) - votingPowerNeeded := vals.TotalVotingPower() * 2 / 3 - for idx, commitSig := range commit.Signatures { - if commitSig.Absent() { - continue // OK, some signatures can be absent. - } - - // The vals and commit have a 1-to-1 correspondance. - // This means we don't need the validator address or to do any lookup. - val := vals.Validators[idx] - - // Validate signature. - voteSignBytes := commit.VoteSignBytes(chainID, idx) - if !val.PubKey.VerifyBytes(voteSignBytes, commitSig.Signature) { - return fmt.Errorf("wrong signature (#%d): %X", idx, commitSig.Signature) - } - // Good! - if blockID.Equals(commitSig.BlockID(commit.BlockID)) { - talliedVotingPower += val.VotingPower - } - // else { - // It's OK that the BlockID doesn't match. We include stray - // signatures (~votes for nil) to measure validator availability. - // } - - // return as soon as +2/3 of the signatures are verified - if talliedVotingPower > votingPowerNeeded { - return nil - } - } - - // talliedVotingPower <= needed, thus return error - return ErrNotEnoughVotingPowerSigned{Got: talliedVotingPower, Needed: votingPowerNeeded} -} - -// VerifyFutureCommit will check to see if the set would be valid with a different -// validator set. -// -// vals is the old validator set that we know. Over 2/3 of the power in old -// signed this block. -// -// In Tendermint, 1/3 of the voting power can halt or fork the chain, but 1/3 -// can't make arbitrary state transitions. You still need > 2/3 Byzantine to -// make arbitrary state transitions. -// -// To preserve this property in the light client, we also require > 2/3 of the -// old vals to sign the future commit at H, that way we preserve the property -// that if they weren't being truthful about the validator set at H (block hash -// -> vals hash) or about the app state (block hash -> app hash) we can slash -// > 2/3. Otherwise, the lite client isn't providing the same security -// guarantees. -// -// Even if we added a slashing condition that if you sign a block header with -// the wrong validator set, then we would only need > 1/3 of signatures from -// the old vals on the new commit, it wouldn't be sufficient because the new -// vals can be arbitrary and commit some arbitrary app hash. -// -// newSet is the validator set that signed this block. Only votes from new are -// sufficient for 2/3 majority in the new set as well, for it to be a valid -// commit. -// -// NOTE: This doesn't check whether the commit is a future commit, because the -// current height isn't part of the ValidatorSet. Caller must check that the -// commit height is greater than the height for this validator set. -func (vals *ValidatorSet) VerifyFutureCommit(newSet *ValidatorSet, chainID string, - blockID BlockID, height int64, commit *Commit) error { - oldVals := vals - - // Commit must be a valid commit for newSet. - err := newSet.VerifyCommit(chainID, blockID, height, commit) - if err != nil { - return err - } - - // Check old voting power. - oldVotingPower := int64(0) - seen := map[int]bool{} - - for idx, commitSig := range commit.Signatures { - if commitSig.Absent() { - continue // OK, some signatures can be absent. - } - - // See if this validator is in oldVals. - oldIdx, val := oldVals.GetByAddress(commitSig.ValidatorAddress) - if val == nil || seen[oldIdx] { - continue // missing or double vote... - } - seen[oldIdx] = true - - // Validate signature. - voteSignBytes := commit.VoteSignBytes(chainID, idx) - if !val.PubKey.VerifyBytes(voteSignBytes, commitSig.Signature) { - return errors.Errorf("wrong signature (#%d): %X", idx, commitSig.Signature) - } - // Good! - if blockID.Equals(commitSig.BlockID(commit.BlockID)) { - oldVotingPower += val.VotingPower - } - // else { - // It's OK that the BlockID doesn't match. We include stray - // signatures (~votes for nil) to measure validator availability. - // } - } - - if got, needed := oldVotingPower, oldVals.TotalVotingPower()*2/3; got <= needed { - return ErrNotEnoughVotingPowerSigned{Got: got, Needed: needed} - } - return nil -} - -// VerifyCommitTrusting verifies that trustLevel ([1/3, 1]) of the validator -// set signed this commit. -// NOTE the given validators do not necessarily correspond to the validator set -// for this commit, but there may be some intersection. -func (vals *ValidatorSet) VerifyCommitTrusting(chainID string, blockID BlockID, - height int64, commit *Commit, trustLevel tmmath.Fraction) error { - - if trustLevel.Numerator*3 < trustLevel.Denominator || // < 1/3 - trustLevel.Numerator > trustLevel.Denominator { // > 1 - panic(fmt.Sprintf("trustLevel must be within [1/3, 1], given %v", trustLevel)) - } - - if err := verifyCommitBasic(commit, height, blockID); err != nil { - return err - } - - var ( - talliedVotingPower int64 - seenVals = make(map[int]int, len(commit.Signatures)) // validator index -> commit index - votingPowerNeeded = (vals.TotalVotingPower() * trustLevel.Numerator) / trustLevel.Denominator - ) - - for idx, commitSig := range commit.Signatures { - if commitSig.Absent() { - continue // OK, some signatures can be absent. - } - - // We don't know the validators that committed this block, so we have to - // check for each vote if its validator is already known. - valIdx, val := vals.GetByAddress(commitSig.ValidatorAddress) - - if firstIndex, ok := seenVals[valIdx]; ok { // double vote - secondIndex := idx - return errors.Errorf("double vote from %v (%d and %d)", val, firstIndex, secondIndex) - } - - if val != nil { - seenVals[valIdx] = idx - - // Validate signature. - voteSignBytes := commit.VoteSignBytes(chainID, idx) - if !val.PubKey.VerifyBytes(voteSignBytes, commitSig.Signature) { - return errors.Errorf("wrong signature (#%d): %X", idx, commitSig.Signature) - } - - // Good! - if blockID.Equals(commitSig.BlockID(commit.BlockID)) { - talliedVotingPower += val.VotingPower - } - // else { - // It's OK that the BlockID doesn't match. We include stray - // signatures (~votes for nil) to measure validator availability. - // } - - if talliedVotingPower > votingPowerNeeded { - return nil - } - } - } - - return ErrNotEnoughVotingPowerSigned{Got: talliedVotingPower, Needed: votingPowerNeeded} -} - -func verifyCommitBasic(commit *Commit, height int64, blockID BlockID) error { - if err := commit.ValidateBasic(); err != nil { - return err - } - if height != commit.Height { - return NewErrInvalidCommitHeight(height, commit.Height) +func (vals *ValidatorSet) SelectProposer(proofHash []byte, height int64, round int) *Validator { + if vals.IsNilOrEmpty() { + panic("empty validator set") } - if !blockID.Equals(commit.BlockID) { - return fmt.Errorf("invalid commit -- wrong block ID: want %v, got %v", - blockID, commit.BlockID) + seed := hashToSeed(MakeRoundHash(proofHash, height, round)) + candidates := make([]tmrand.Candidate, len(vals.Validators)) + for i, val := range vals.Validators { + candidates[i] = &candidate{idx: i, val: val} } - return nil -} - -//----------------- - -// IsErrNotEnoughVotingPowerSigned returns true if err is -// ErrNotEnoughVotingPowerSigned. -func IsErrNotEnoughVotingPowerSigned(err error) bool { - _, ok := errors.Cause(err).(ErrNotEnoughVotingPowerSigned) - return ok -} - -// ErrNotEnoughVotingPowerSigned is returned when not enough validators signed -// a commit. -type ErrNotEnoughVotingPowerSigned struct { - Got int64 - Needed int64 -} - -func (e ErrNotEnoughVotingPowerSigned) Error() string { - return fmt.Sprintf("invalid commit -- insufficient voting power: got %d, needed more than %d", e.Got, e.Needed) + samples := tmrand.RandomSamplingWithPriority(seed, candidates, 1, uint64(vals.TotalVotingPower())) + proposerIdx := samples[0].(*candidate).idx + return vals.Validators[proposerIdx] } //---------------- @@ -824,17 +616,17 @@ func (vals *ValidatorSet) String() string { return vals.StringIndented("") } -// StringIndented returns an intended string representation of ValidatorSet. +// StringIndented returns an intended string representation of VoterSet. func (vals *ValidatorSet) StringIndented(indent string) string { if vals == nil { - return "nil-ValidatorSet" + return "nil-VoterSet" } var valStrings []string vals.Iterate(func(index int, val *Validator) bool { valStrings = append(valStrings, val.String()) return false }) - return fmt.Sprintf(`ValidatorSet{ + return fmt.Sprintf(`VoterSet{ %s Validators: %s %v %s}`, @@ -866,22 +658,6 @@ func (valz ValidatorsByAddress) Swap(i, j int) { //---------------------------------------- // for testing -// RandValidatorSet returns a randomized validator set, useful for testing. -// NOTE: PrivValidator are in order. -// UNSTABLE -func RandValidatorSet(numValidators int, votingPower int64) (*ValidatorSet, []PrivValidator) { - valz := make([]*Validator, numValidators) - privValidators := make([]PrivValidator, numValidators) - for i := 0; i < numValidators; i++ { - val, privValidator := RandValidator(false, votingPower) - valz[i] = val - privValidators[i] = privValidator - } - vals := NewValidatorSet(valz) - sort.Sort(PrivValidatorsByAddress(privValidators)) - return vals, privValidators -} - /////////////////////////////////////////////////////////////////////////////// // safe addition/subtraction @@ -924,60 +700,3 @@ func safeSubClip(a, b int64) int64 { } return c } - -// candidate save simple validator data for selecting proposer -type candidate struct { - idx int - address Address - votingPower int64 -} - -func (c *candidate) Priority() uint64 { - // TODO Is it possible to have a negative VotingPower? - if c.votingPower < 0 { - return 0 - } - return uint64(c.votingPower) -} - -func (c *candidate) LessThan(other tmrand.Candidate) bool { - o, ok := other.(*candidate) - if !ok { - panic("incompatible type") - } - return bytes.Compare(c.address, o.address) < 0 -} - -func SelectProposer(validators *ValidatorSet, proofHash []byte, height int64, round int) *Validator { - if validators.IsNilOrEmpty() { - panic("empty validator set") - } - seed := hashToSeed(MakeRoundHash(proofHash, height, round)) - candidates := make([]tmrand.Candidate, len(validators.Validators)) - for i, val := range validators.Validators { - candidates[i] = &candidate{idx: i, address: val.Address, votingPower: val.VotingPower} - } - vals := tmrand.RandomSamplingWithPriority(seed, candidates, 1, uint64(validators.TotalVotingPower())) - proposerIdx := vals[0].(*candidate).idx - return validators.Validators[proposerIdx] -} - -func hashToSeed(hash []byte) uint64 { - for len(hash) < 8 { - hash = append(hash, byte(0)) - } - return binary.LittleEndian.Uint64(hash[:8]) -} - -// MakeRoundHash combines the VRF hash, block height, and round to create a hash value for each round. This value is -// used for random sampling of the Proposer. -func MakeRoundHash(proofHash []byte, height int64, round int) []byte { - b := make([]byte, 16) - binary.LittleEndian.PutUint64(b, uint64(height)) - binary.LittleEndian.PutUint64(b[8:], uint64(round)) - hash := tmhash.New() - hash.Write(proofHash) - hash.Write(b[:8]) - hash.Write(b[8:16]) - return hash.Sum(nil) -} diff --git a/types/validator_set_test.go b/types/validator_set_test.go index 468d33f59..d868fce41 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -24,11 +24,11 @@ func TestValidatorSetBasic(t *testing.T) { // but attempting to IncrementProposerPriority on them will panic. vset := NewValidatorSet([]*Validator{}) assert.Panics(t, func() { vset.IncrementProposerPriority(1) }) - assert.Panics(t, func() { SelectProposer(vset, []byte{}, 1, 0) }) + assert.Panics(t, func() { vset.SelectProposer([]byte{}, 1, 0) }) vset = NewValidatorSet(nil) assert.Panics(t, func() { vset.IncrementProposerPriority(1) }) - assert.Panics(t, func() { SelectProposer(vset, []byte{}, 1, 0) }) + assert.Panics(t, func() { vset.SelectProposer([]byte{}, 1, 0) }) assert.EqualValues(t, vset, vset.Copy()) assert.False(t, vset.HasAddress([]byte("some val"))) @@ -61,7 +61,8 @@ func TestValidatorSetBasic(t *testing.T) { assert.Equal(t, val.VotingPower, vset.TotalVotingPower()) assert.NotNil(t, vset.Hash()) assert.NotPanics(t, func() { vset.IncrementProposerPriority(1) }) - assert.Equal(t, val.Address, SelectProposer(vset, []byte{}, 1, 0).Address) + assert.Equal(t, val.Address, + vset.SelectProposer([]byte{}, 1, 0).Address) // update val = randValidator(vset.TotalVotingPower()) @@ -81,14 +82,14 @@ func TestCopy(t *testing.T) { vset := randValidatorSet(10) vsetHash := vset.Hash() if len(vsetHash) == 0 { - t.Fatalf("ValidatorSet had unexpected zero hash") + t.Fatalf("VoterSet had unexpected zero hash") } vsetCopy := vset.Copy() vsetCopyHash := vsetCopy.Hash() if !bytes.Equal(vsetHash, vsetCopyHash) { - t.Fatalf("ValidatorSet copy had wrong hash. Orig: %X, Copy: %X", vsetHash, vsetCopyHash) + t.Fatalf("VoterSet copy had wrong hash. Orig: %X, Copy: %X", vsetHash, vsetCopyHash) } } @@ -142,7 +143,7 @@ func bytesToInt(b []byte) int { func verifyWinningRate(t *testing.T, vals *ValidatorSet, tries int, error float64) { selected := make([]int, len(vals.Validators)) for i := 0; i < tries; i++ { - prop := SelectProposer(vals, []byte{}, int64(i), 0) + prop := vals.SelectProposer([]byte{}, int64(i), 0) for j := 0; j < len(vals.Validators); j++ { if bytes.Equal(prop.Address, vals.Validators[j].Address) { selected[j]++ @@ -172,7 +173,7 @@ func TestProposerSelection1(t *testing.T) { }) var proposers []string for i := 0; i < 99; i++ { - val := SelectProposer(vset, []byte{}, int64(i), 0) + val := vset.SelectProposer([]byte{}, int64(i), 0) proposers = append(proposers, string(val.Address)) } expected := `foo foo foo foo bar bar foo bar foo baz bar foo baz baz baz foo foo bar foo bar baz bar foo baz foo ` + @@ -195,7 +196,7 @@ func TestProposerSelection2(t *testing.T) { vals := NewValidatorSet(valList) expected := []int{0, 1, 0, 0, 2, 2, 0, 2, 1, 2, 2, 1, 2, 2, 2} for i := 0; i < len(valList)*5; i++ { - prop := SelectProposer(vals, []byte{}, int64(i), 0) + prop := vals.SelectProposer([]byte{}, int64(i), 0) if bytesToInt(prop.Address) != expected[i] { t.Fatalf("(%d): Expected %d. Got %d", i, expected[i], bytesToInt(prop.Address)) } @@ -219,7 +220,7 @@ func TestProposerSelection2(t *testing.T) { vals = NewValidatorSet(valList) N := 4 + 5 + 3 for i := 0; i < 10000*N; i++ { - prop := SelectProposer(vals, []byte{}, int64(i), 0) + prop := vals.SelectProposer([]byte{}, int64(i), 0) propCount[bytesToInt(prop.Address)]++ } fmt.Printf("%v", propCount) @@ -263,10 +264,17 @@ func randPubKey() crypto.PubKey { return ed25519.PubKeyEd25519(pubKey) } +func max(a, b int64) int64 { + if a >= b { + return a + } + return b +} + func randValidator(totalVotingPower int64) *Validator { // this modulo limits the ProposerPriority/VotingPower to stay in the // bounds of MaxTotalVotingPower minus the already existing voting power: - val := NewValidator(randPubKey(), int64(tmrand.Uint64()%uint64((MaxTotalVotingPower-totalVotingPower)))) + val := NewValidator(randPubKey(), max(int64(tmrand.Uint64()%uint64((MaxTotalVotingPower-totalVotingPower))), 1)) val.ProposerPriority = tmrand.Int64() % (MaxTotalVotingPower - totalVotingPower) return val } @@ -281,6 +289,24 @@ func randValidatorSet(numValidators int) *ValidatorSet { return NewValidatorSet(validators) } +func randValidatorWithMinMax(min, max int64) (*Validator, PrivValidator) { + privVal := NewMockPV() + val := NewValidator(privVal.GetPubKey(), min+int64(tmrand.Uint64()%uint64(1+max-min))) + val.ProposerPriority = min + tmrand.Int64()%max + return val, privVal +} + +func randValidatorSetWithMinMax(numValidators int, min, max int64) (*ValidatorSet, map[string]PrivValidator) { + validators := make([]*Validator, numValidators) + privMap := make(map[string]PrivValidator) + var privVal PrivValidator + for i := 0; i < numValidators; i++ { + validators[i], privVal = randValidatorWithMinMax(min, max) + privMap[validators[i].Address.String()] = privVal + } + return NewValidatorSet(validators), privMap +} + func (vals *ValidatorSet) toBytes() []byte { bz, err := cdc.MarshalBinaryLengthPrefixed(vals) if err != nil { @@ -503,7 +529,8 @@ func TestAveragingInIncrementProposerPriorityWithVotingPower(t *testing.T) { for i, tc := range tcs { tc.vals.IncrementProposerPriority(tc.times) - assert.Equal(t, tc.wantProposer.Address, SelectProposer(tc.vals, []byte{}, int64(i), 0).Address, + assert.Equal(t, tc.wantProposer.Address, + tc.vals.SelectProposer([]byte{}, int64(i), 0).Address, "test case: %v", i) for valIdx, val := range tc.vals.Validators { @@ -546,7 +573,7 @@ func TestValidatorSetVerifyCommit(t *testing.T) { privKey := ed25519.GenPrivKey() pubKey := privKey.PubKey() v1 := NewValidator(pubKey, 1000) - vset := NewValidatorSet([]*Validator{v1}) + vset := NewVoterSet([]*Validator{v1}) // good var ( diff --git a/types/vote_set.go b/types/vote_set.go index 82698fe51..121dad601 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -63,7 +63,7 @@ type VoteSet struct { height int64 round int signedMsgType SignedMsgType - valSet *ValidatorSet + valSet *VoterSet mtx sync.Mutex votesBitArray *bits.BitArray @@ -75,7 +75,7 @@ type VoteSet struct { } // Constructs a new VoteSet struct used to accumulate votes for given height/round. -func NewVoteSet(chainID string, height int64, round int, signedMsgType SignedMsgType, valSet *ValidatorSet) *VoteSet { +func NewVoteSet(chainID string, height int64, round int, signedMsgType SignedMsgType, voterSet *VoterSet) *VoteSet { if height == 0 { panic("Cannot make VoteSet for height == 0, doesn't make sense.") } @@ -84,12 +84,12 @@ func NewVoteSet(chainID string, height int64, round int, signedMsgType SignedMsg height: height, round: round, signedMsgType: signedMsgType, - valSet: valSet, - votesBitArray: bits.NewBitArray(valSet.Size()), - votes: make([]*Vote, valSet.Size()), + valSet: voterSet, + votesBitArray: bits.NewBitArray(voterSet.Size()), + votes: make([]*Vote, voterSet.Size()), sum: 0, maj23: nil, - votesByBlock: make(map[string]*blockVotes, valSet.Size()), + votesByBlock: make(map[string]*blockVotes, voterSet.Size()), peerMaj23s: make(map[P2PID]BlockID), } } @@ -178,7 +178,7 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { lookupAddr, val := voteSet.valSet.GetByIndex(valIndex) if val == nil { return false, errors.Wrapf(ErrVoteInvalidValidatorIndex, - "Cannot find validator %d in valSet of size %d", valIndex, voteSet.valSet.Size()) + "Cannot find voter %d in valSet of size %d", valIndex, voteSet.valSet.Size()) } // Ensure that the signer has the right address. @@ -586,11 +586,11 @@ type blockVotes struct { sum int64 // vote sum } -func newBlockVotes(peerMaj23 bool, numValidators int) *blockVotes { +func newBlockVotes(peerMaj23 bool, numVoters int) *blockVotes { return &blockVotes{ peerMaj23: peerMaj23, - bitArray: bits.NewBitArray(numValidators), - votes: make([]*Vote, numValidators), + bitArray: bits.NewBitArray(numVoters), + votes: make([]*Vote, numVoters), sum: 0, } } diff --git a/types/vote_set_test.go b/types/vote_set_test.go index ab4433a39..d69a824c1 100644 --- a/types/vote_set_test.go +++ b/types/vote_set_test.go @@ -18,9 +18,9 @@ func randVoteSet( signedMsgType SignedMsgType, numValidators int, votingPower int64, -) (*VoteSet, *ValidatorSet, []PrivValidator) { - valSet, privValidators := RandValidatorSet(numValidators, votingPower) - return NewVoteSet("test_chain_id", height, round, signedMsgType, valSet), valSet, privValidators +) (*VoteSet, *ValidatorSet, *VoterSet, []PrivValidator) { + valSet, voterSet, privValidators := RandVoterSet(numValidators, votingPower) + return NewVoteSet("test_chain_id", height, round, signedMsgType, voterSet), valSet, voterSet, privValidators } // Convenience: Return new vote with different validator address/index @@ -68,7 +68,7 @@ func withBlockPartsHeader(vote *Vote, blockPartsHeader PartSetHeader) *Vote { func TestAddVote(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1) + voteSet, _, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1) val0 := privValidators[0] // t.Logf(">> %v", voteSet) @@ -113,7 +113,7 @@ func TestAddVote(t *testing.T) { func Test2_3Majority(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1) + voteSet, _, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1) voteProto := &Vote{ ValidatorAddress: nil, // NOTE: must fill in @@ -169,7 +169,7 @@ func Test2_3Majority(t *testing.T) { func Test2_3MajorityRedux(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 100, 1) + voteSet, _, _, privValidators := randVoteSet(height, round, PrevoteType, 100, 1) blockHash := crypto.CRandBytes(32) blockPartsTotal := 123 @@ -274,7 +274,7 @@ func Test2_3MajorityRedux(t *testing.T) { func TestBadVotes(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1) + voteSet, _, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1) voteProto := &Vote{ ValidatorAddress: nil, @@ -339,7 +339,7 @@ func TestBadVotes(t *testing.T) { func TestConflicts(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 4, 1) + voteSet, _, _, privValidators := randVoteSet(height, round, PrevoteType, 4, 1) blockHash1 := tmrand.Bytes(32) blockHash2 := tmrand.Bytes(32) @@ -473,7 +473,7 @@ func TestConflicts(t *testing.T) { func TestMakeCommit(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, PrecommitType, 10, 1) + voteSet, _, _, privValidators := randVoteSet(height, round, PrecommitType, 10, 1) blockHash, blockPartsHeader := crypto.CRandBytes(32), PartSetHeader{123, crypto.CRandBytes(32)} voteProto := &Vote{ diff --git a/types/voter_set.go b/types/voter_set.go new file mode 100644 index 000000000..9898321be --- /dev/null +++ b/types/voter_set.go @@ -0,0 +1,488 @@ +package types + +import ( + "bytes" + "encoding/binary" + "fmt" + "math" + "sort" + "strings" + + "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/crypto/tmhash" + tmmath "github.com/tendermint/tendermint/libs/math" + tmrand "github.com/tendermint/tendermint/libs/rand" +) + +var MaxVoters = 20 + +// VoterSet represent a set of *Validator at a given height. +type VoterSet struct { + // NOTE: persisted via reflect, must be exported. + Voters []*Validator `json:"voters"` + + // cached (unexported) + totalVotingPower int64 +} + +func NewVoterSet(valz []*Validator) *VoterSet { + sort.Sort(ValidatorsByAddress(valz)) + vals := &VoterSet{Voters: copyValidatorListShallow(valz), totalVotingPower: 0} + vals.updateTotalVotingPower() + return vals +} + +// IsNilOrEmpty returns true if validator set is nil or empty. +func (voters *VoterSet) IsNilOrEmpty() bool { + return voters == nil || len(voters.Voters) == 0 +} + +// HasAddress returns true if address given is in the validator set, false - +// otherwise. +func (voters *VoterSet) HasAddress(address []byte) bool { + idx := sort.Search(len(voters.Voters), func(i int) bool { + return bytes.Compare(address, voters.Voters[i].Address) <= 0 + }) + return idx < len(voters.Voters) && bytes.Equal(voters.Voters[idx].Address, address) +} + +// GetByAddress returns an index of the validator with address and validator +// itself if found. Otherwise, -1 and nil are returned. +func (voters *VoterSet) GetByAddress(address []byte) (index int, val *Validator) { + idx := sort.Search(len(voters.Voters), func(i int) bool { + return bytes.Compare(address, voters.Voters[i].Address) <= 0 + }) + if idx < len(voters.Voters) && bytes.Equal(voters.Voters[idx].Address, address) { + return idx, voters.Voters[idx].Copy() + } + return -1, nil +} + +// GetByIndex returns the validator's address and validator itself by index. +// It returns nil values if index is less than 0 or greater or equal to +// len(VoterSet.Validators). +func (voters *VoterSet) GetByIndex(index int) (address []byte, val *Validator) { + if index < 0 || index >= len(voters.Voters) { + return nil, nil + } + val = voters.Voters[index] + return val.Address, val.Copy() +} + +// Size returns the length of the validator set. +func (voters *VoterSet) Size() int { + return len(voters.Voters) +} + +func copyValidatorListShallow(vals []*Validator) []*Validator { + result := make([]*Validator, len(vals)) + copy(result, vals) + return result +} + +// VoterSet.Copy() copies validator list shallow +func (voters *VoterSet) Copy() *VoterSet { + return &VoterSet{ + Voters: copyValidatorListShallow(voters.Voters), + totalVotingPower: voters.totalVotingPower, + } +} + +// Forces recalculation of the set's total voting power. +// Panics if total voting power is bigger than MaxTotalVotingPower. +func (voters *VoterSet) updateTotalVotingPower() { + sum := int64(0) + for _, val := range voters.Voters { + // mind overflow + sum = safeAddClip(sum, val.VotingPower) + if sum > MaxTotalVotingPower { + panic(fmt.Sprintf( + "Total voting power should be guarded to not exceed %v; got: %v", + MaxTotalVotingPower, + sum)) + } + } + + voters.totalVotingPower = sum +} + +func (voters *VoterSet) TotalVotingPower() int64 { + if voters.totalVotingPower == 0 { + voters.updateTotalVotingPower() + } + return voters.totalVotingPower +} + +// Hash returns the Merkle root hash build using validators (as leaves) in the +// set. +func (voters *VoterSet) Hash() []byte { + if len(voters.Voters) == 0 { + return nil + } + bzs := make([][]byte, len(voters.Voters)) + for i, val := range voters.Voters { + bzs[i] = val.Bytes() + } + return merkle.SimpleHashFromByteSlices(bzs) +} + +// VerifyCommit verifies +2/3 of the set had signed the given commit. +func (voters *VoterSet) VerifyCommit(chainID string, blockID BlockID, + height int64, commit *Commit) error { + + if voters.Size() != len(commit.Signatures) { + return NewErrInvalidCommitSignatures(voters.Size(), len(commit.Signatures)) + } + if err := verifyCommitBasic(commit, height, blockID); err != nil { + return err + } + + talliedVotingPower := int64(0) + votingPowerNeeded := voters.TotalVotingPower() * 2 / 3 + for idx, commitSig := range commit.Signatures { + if commitSig.Absent() { + continue // OK, some signatures can be absent. + } + + // The vals and commit have a 1-to-1 correspondance. + // This means we don't need the validator address or to do any lookup. + val := voters.Voters[idx] + + // Validate signature. + voteSignBytes := commit.VoteSignBytes(chainID, idx) + if !val.PubKey.VerifyBytes(voteSignBytes, commitSig.Signature) { + return fmt.Errorf("wrong signature (#%d): %X", idx, commitSig.Signature) + } + // Good! + if blockID.Equals(commitSig.BlockID(commit.BlockID)) { + talliedVotingPower += val.VotingPower + } + // else { + // It's OK that the BlockID doesn't match. We include stray + // signatures (~votes for nil) to measure validator availability. + // } + + // return as soon as +2/3 of the signatures are verified + if talliedVotingPower > votingPowerNeeded { + return nil + } + } + + // talliedVotingPower <= needed, thus return error + return ErrNotEnoughVotingPowerSigned{Got: talliedVotingPower, Needed: votingPowerNeeded} +} + +// VerifyFutureCommit will check to see if the set would be valid with a different +// validator set. +// +// vals is the old validator set that we know. Over 2/3 of the power in old +// signed this block. +// +// In Tendermint, 1/3 of the voting power can halt or fork the chain, but 1/3 +// can't make arbitrary state transitions. You still need > 2/3 Byzantine to +// make arbitrary state transitions. +// +// To preserve this property in the light client, we also require > 2/3 of the +// old vals to sign the future commit at H, that way we preserve the property +// that if they weren't being truthful about the validator set at H (block hash +// -> vals hash) or about the app state (block hash -> app hash) we can slash +// > 2/3. Otherwise, the lite client isn't providing the same security +// guarantees. +// +// Even if we added a slashing condition that if you sign a block header with +// the wrong validator set, then we would only need > 1/3 of signatures from +// the old vals on the new commit, it wouldn't be sufficient because the new +// vals can be arbitrary and commit some arbitrary app hash. +// +// newSet is the validator set that signed this block. Only votes from new are +// sufficient for 2/3 majority in the new set as well, for it to be a valid +// commit. +// +// NOTE: This doesn't check whether the commit is a future commit, because the +// current height isn't part of the VoterSet. Caller must check that the +// commit height is greater than the height for this validator set. +func (voters *VoterSet) VerifyFutureCommit(newSet *VoterSet, chainID string, + blockID BlockID, height int64, commit *Commit) error { + oldVoters := voters + + // Commit must be a valid commit for newSet. + err := newSet.VerifyCommit(chainID, blockID, height, commit) + if err != nil { + return err + } + + // Check old voting power. + oldVotingPower := int64(0) + seen := map[int]bool{} + + for idx, commitSig := range commit.Signatures { + if commitSig.Absent() { + continue // OK, some signatures can be absent. + } + + // See if this validator is in oldVals. + oldIdx, val := oldVoters.GetByAddress(commitSig.ValidatorAddress) + if val == nil || seen[oldIdx] { + continue // missing or double vote... + } + seen[oldIdx] = true + + // Validate signature. + voteSignBytes := commit.VoteSignBytes(chainID, idx) + if !val.PubKey.VerifyBytes(voteSignBytes, commitSig.Signature) { + return errors.Errorf("wrong signature (#%d): %X", idx, commitSig.Signature) + } + // Good! + if blockID.Equals(commitSig.BlockID(commit.BlockID)) { + oldVotingPower += val.VotingPower + } + // else { + // It's OK that the BlockID doesn't match. We include stray + // signatures (~votes for nil) to measure validator availability. + // } + } + + if got, needed := oldVotingPower, oldVoters.TotalVotingPower()*2/3; got <= needed { + return ErrNotEnoughVotingPowerSigned{Got: got, Needed: needed} + } + return nil +} + +// VerifyCommitTrusting verifies that trustLevel ([1/3, 1]) of the validator +// set signed this commit. +// NOTE the given validators do not necessarily correspond to the validator set +// for this commit, but there may be some intersection. +func (voters *VoterSet) VerifyCommitTrusting(chainID string, blockID BlockID, + height int64, commit *Commit, trustLevel tmmath.Fraction) error { + + if trustLevel.Numerator*3 < trustLevel.Denominator || // < 1/3 + trustLevel.Numerator > trustLevel.Denominator { // > 1 + panic(fmt.Sprintf("trustLevel must be within [1/3, 1], given %v", trustLevel)) + } + + if err := verifyCommitBasic(commit, height, blockID); err != nil { + return err + } + + var ( + talliedVotingPower int64 + seenVals = make(map[int]int, len(commit.Signatures)) // validator index -> commit index + votingPowerNeeded = (voters.TotalVotingPower() * trustLevel.Numerator) / trustLevel.Denominator + ) + + for idx, commitSig := range commit.Signatures { + if commitSig.Absent() { + continue // OK, some signatures can be absent. + } + + // We don't know the validators that committed this block, so we have to + // check for each vote if its validator is already known. + valIdx, val := voters.GetByAddress(commitSig.ValidatorAddress) + + if firstIndex, ok := seenVals[valIdx]; ok { // double vote + secondIndex := idx + return errors.Errorf("double vote from %v (%d and %d)", val, firstIndex, secondIndex) + } + + if val != nil { + seenVals[valIdx] = idx + + // Validate signature. + voteSignBytes := commit.VoteSignBytes(chainID, idx) + if !val.PubKey.VerifyBytes(voteSignBytes, commitSig.Signature) { + return errors.Errorf("wrong signature (#%d): %X", idx, commitSig.Signature) + } + + // Good! + if blockID.Equals(commitSig.BlockID(commit.BlockID)) { + talliedVotingPower += val.VotingPower + } + // else { + // It's OK that the BlockID doesn't match. We include stray + // signatures (~votes for nil) to measure validator availability. + // } + + if talliedVotingPower > votingPowerNeeded { + return nil + } + } + } + + return ErrNotEnoughVotingPowerSigned{Got: talliedVotingPower, Needed: votingPowerNeeded} +} + +func verifyCommitBasic(commit *Commit, height int64, blockID BlockID) error { + if err := commit.ValidateBasic(); err != nil { + return err + } + if height != commit.Height { + return NewErrInvalidCommitHeight(height, commit.Height) + } + if !blockID.Equals(commit.BlockID) { + return fmt.Errorf("invalid commit -- wrong block ID: want %v, got %v", + blockID, commit.BlockID) + } + return nil +} + +//----------------- + +// IsErrNotEnoughVotingPowerSigned returns true if err is +// ErrNotEnoughVotingPowerSigned. +func IsErrNotEnoughVotingPowerSigned(err error) bool { + _, ok := errors.Cause(err).(ErrNotEnoughVotingPowerSigned) + return ok +} + +// ErrNotEnoughVotingPowerSigned is returned when not enough validators signed +// a commit. +type ErrNotEnoughVotingPowerSigned struct { + Got int64 + Needed int64 +} + +func (e ErrNotEnoughVotingPowerSigned) Error() string { + return fmt.Sprintf("invalid commit -- insufficient voting power: got %d, needed more than %d", e.Got, e.Needed) +} + +//---------------- + +// Iterate will run the given function over the set. +func (voters *VoterSet) Iterate(fn func(index int, val *Validator) bool) { + for i, val := range voters.Voters { + stop := fn(i, val) + if stop { + break + } + } +} + +func (voters *VoterSet) String() string { + return voters.StringIndented("") +} + +// StringIndented returns an intended string representation of VoterSet. +func (voters *VoterSet) StringIndented(indent string) string { + if voters == nil { + return "nil-VoterSet" + } + var valStrings []string + voters.Iterate(func(index int, val *Validator) bool { + valStrings = append(valStrings, val.String()) + return false + }) + return fmt.Sprintf(`VoterSet{ +%s Validators: +%s %v +%s}`, + indent, indent, strings.Join(valStrings, "\n"+indent+" "), + indent) + +} + +func SelectVoter(validators *ValidatorSet, proofHash []byte) *VoterSet { + // TODO: decide MaxVoters; make it to config + if len(proofHash) == 0 || validators.Size() <= MaxVoters { + // height 1 has voter set that is same to validator set + result := &VoterSet{Voters: copyValidatorListShallow(validators.Validators), totalVotingPower: 0} + result.updateTotalVotingPower() + return result + } + + seed := hashToSeed(proofHash) + candidates := make([]tmrand.Candidate, len(validators.Validators)) + for i, val := range validators.Validators { + candidates[i] = &candidate{idx: i, win: 0, val: val} + } + totalSampling := tmrand.RandomSamplingToMax(seed, candidates, MaxVoters, uint64(validators.TotalVotingPower())) + voters := 0 + for _, candi := range candidates { + if candi.(*candidate).win > 0 { + voters += 1 + } + } + + vals := make([]*Validator, voters) + index := 0 + for _, candi := range candidates { + if candi.(*candidate).win > 0 { + vals[index] = &Validator{Address: candi.(*candidate).val.Address, + PubKey: candi.(*candidate).val.PubKey, + // VotingPower = TotalVotingPower * win / totalSampling : can be overflow + VotingPower: validators.TotalVotingPower()/int64(totalSampling)*int64(candi.(*candidate).win) + + int64(math.Ceil(float64(validators.TotalVotingPower()%int64(totalSampling))/float64(int64(totalSampling))* + float64(candi.(*candidate).win)))} + index++ + } + } + return NewVoterSet(vals) +} + +// This should be used in only test +func ToVoterAll(validators *ValidatorSet) *VoterSet { + return NewVoterSet(validators.Validators) +} + +// candidate save simple validator data for selecting proposer +type candidate struct { + idx int + win uint64 + val *Validator +} + +func (c *candidate) Priority() uint64 { + // TODO Is it possible to have a negative VotingPower? + if c.val.VotingPower < 0 { + return 0 + } + return uint64(c.val.VotingPower) +} + +func (c *candidate) LessThan(other tmrand.Candidate) bool { + o, ok := other.(*candidate) + if !ok { + panic("incompatible type") + } + return bytes.Compare(c.val.Address, o.val.Address) < 0 +} + +func (c *candidate) IncreaseWin() { + c.win++ +} + +func hashToSeed(hash []byte) uint64 { + for len(hash) < 8 { + hash = append(hash, byte(0)) + } + return binary.LittleEndian.Uint64(hash[:8]) +} + +// MakeRoundHash combines the VRF hash, block height, and round to create a hash value for each round. This value is +// used for random sampling of the Proposer. +func MakeRoundHash(proofHash []byte, height int64, round int) []byte { + b := make([]byte, 16) + binary.LittleEndian.PutUint64(b, uint64(height)) + binary.LittleEndian.PutUint64(b[8:], uint64(round)) + hash := tmhash.New() + hash.Write(proofHash) + hash.Write(b[:8]) + hash.Write(b[8:16]) + return hash.Sum(nil) +} + +// RandValidatorSet returns a randomized validator set, useful for testing. +// NOTE: PrivValidator are in order. +// UNSTABLE +func RandVoterSet(numVoters int, votingPower int64) (*ValidatorSet, *VoterSet, []PrivValidator) { + valz := make([]*Validator, numVoters) + privValidators := make([]PrivValidator, numVoters) + for i := 0; i < numVoters; i++ { + val, privValidator := RandValidator(false, votingPower) + valz[i] = val + privValidators[i] = privValidator + } + vals := NewValidatorSet(valz) + sort.Sort(PrivValidatorsByAddress(privValidators)) + return vals, SelectVoter(vals, []byte{}), privValidators +} diff --git a/types/voter_set_test.go b/types/voter_set_test.go new file mode 100644 index 000000000..0359540a6 --- /dev/null +++ b/types/voter_set_test.go @@ -0,0 +1,112 @@ +package types + +import ( + "math" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/crypto/vrf" + tmtime "github.com/tendermint/tendermint/types/time" +) + +func TestSelectVoter(t *testing.T) { + MaxVoters = 29 + valSet := randValidatorSet(30) + for i := 0; i < 10000; i++ { + voterSet := SelectVoter(valSet, []byte{byte(i)}) + assert.True(t, math.Abs(float64(valSet.TotalVotingPower()-voterSet.TotalVotingPower())) <= 10) + } +} + +func toGenesisValidators(vals []*Validator) []GenesisValidator { + genVals := make([]GenesisValidator, len(vals)) + for i, val := range vals { + genVals[i] = GenesisValidator{Address: val.Address, PubKey: val.PubKey, Power: val.VotingPower, Name: "name"} + } + return genVals +} + +/** +The result when we set LoopCount to 10000 + << min power=100, max power=100, actual average voters=10, max voters=10 >> largest gap: 0.040000 + << min power=100, max power=100, actual average voters=20, max voters=20 >> largest gap: 0.030000 + << min power=100, max power=100, actual average voters=29, max voters=29 >> largest gap: 0.010000 + << min power=100, max power=10000, actual average voters=10, max voters=10 >> largest gap: 0.183673 + << min power=100, max power=10000, actual average voters=20, max voters=20 >> largest gap: 0.128788 + << min power=100, max power=10000, actual average voters=28, max voters=29 >> largest gap: 0.304348 + << min power=100, max power=1000000, actual average voters=10, max voters=10 >> largest gap: 0.093158 + << min power=100, max power=1000000, actual average voters=20, max voters=20 >> largest gap: 0.094404 + << min power=100, max power=1000000, actual average voters=28, max voters=29 >> largest gap: 0.194133 + << min power=100, max power=100000000, actual average voters=10, max voters=10 >> largest gap: 0.076536 + << min power=100, max power=100000000, actual average voters=20, max voters=20 >> largest gap: 0.076547 + << min power=100, max power=100000000, actual average voters=29, max voters=29 >> largest gap: 0.147867 +*/ +func TestSelectVoterReasonableVotingPower(t *testing.T) { + // Raise LoopCount to get smaller gap over 10000. But large LoopCount takes a lot of time + const LoopCount = 100 + for minMaxRate := 1; minMaxRate <= 1000000; minMaxRate *= 100 { + findLargestVotingPowerGap(t, LoopCount, minMaxRate, 10) + findLargestVotingPowerGap(t, LoopCount, minMaxRate, 20) + findLargestVotingPowerGap(t, LoopCount, minMaxRate, 29) + } +} + +func findLargestVotingPowerGap(t *testing.T, loopCount int, minMaxRate int, maxVoters int) { + valSet, privMap := randValidatorSetWithMinMax(30, 100, 100*int64(minMaxRate)) + genDoc := &GenesisDoc{ + GenesisTime: tmtime.Now(), + ChainID: "tendermint-test", + Validators: toGenesisValidators(valSet.Validators), + } + hash := genDoc.Hash() + MaxVoters = maxVoters + accumulation := make(map[string]int64) + totalVoters := 0 + for i := 0; i < loopCount; i++ { + voterSet := SelectVoter(valSet, hash) + for _, voter := range voterSet.Voters { + accumulation[voter.Address.String()] += voter.VotingPower + } + proposer := valSet.SelectProposer(hash, int64(i), 0) + message := MakeRoundHash(hash, int64(i), 0) + proof, _ := privMap[proposer.Address.String()].GenerateVRFProof(message) + hash, _ = vrf.ProofToHash(proof) + totalVoters += voterSet.Size() + } + largestGap := float64(0) + for _, val := range valSet.Validators { + acc := accumulation[val.Address.String()] / int64(loopCount) + if math.Abs(float64(val.VotingPower-acc))/float64(val.VotingPower) > largestGap { + largestGap = math.Abs(float64(val.VotingPower-acc)) / float64(val.VotingPower) + } + } + t.Logf("<< min power=100, max power=%d, actual average voters=%d, max voters=%d >> largest gap: %f", + 100*minMaxRate, totalVoters/loopCount, maxVoters, largestGap) +} + +/** + This test is a test to see the difference between MaxVoters and the actual number of elected voters. + This test is to identify the minimum MaxVoters that cannot be selected as much as MaxVoters by fixing MaxSamplingLoopTry. + If MaxSamplingLoopTry is very large then actual elected voters is up to MaxVoters, + but large MaxSamplingLoopTry takes too much time. +*/ +func TestSelectVoterMaxVarious(t *testing.T) { + hash := 0 + for minMaxRate := 1; minMaxRate <= 100000000; minMaxRate *= 10000 { + t.Logf("<<< min: 100, max: %d >>>", 100*minMaxRate) + for validators := 16; validators <= 256; validators *= 4 { + for voters := 1; voters <= validators; voters += 10 { + MaxVoters = voters + valSet, _ := randValidatorSetWithMinMax(validators, 100, 100*int64(minMaxRate)) + voterSet := SelectVoter(valSet, []byte{byte(hash)}) + assert.True(t, int(math.Abs(float64(valSet.TotalVotingPower()-voterSet.TotalVotingPower()))) <= voters) + if voterSet.Size() < MaxVoters { + t.Logf("Cannot elect voters up to MaxVoters: validators=%d, MaxVoters=%d, actual voters=%d", + validators, voters, voterSet.Size()) + break + } + hash++ + } + } + } +}