Skip to content

Commit

Permalink
Merge branch 'main' into evan/bump-celestia-app-v1.0.0-rc9
Browse files Browse the repository at this point in the history
  • Loading branch information
adlerjohn authored Jul 12, 2023
2 parents 51c6e14 + c8fa853 commit be28920
Show file tree
Hide file tree
Showing 7 changed files with 96 additions and 62 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/docker-build-publish.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,6 @@ jobs:
permissions:
contents: write
packages: write
uses: celestiaorg/.github/.github/workflows/reusable_dockerfile_pipeline.yml@v0.2.0 # yamllint disable-line rule:line-length
uses: celestiaorg/.github/.github/workflows/reusable_dockerfile_pipeline.yml@v0.2.2 # yamllint disable-line rule:line-length
with:
dockerfile: Dockerfile
8 changes: 0 additions & 8 deletions header/headertest/verify_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,14 +37,6 @@ func TestVerify(t *testing.T) {
},
err: true,
},
{
prepare: func() libhead.Header {
untrusted := *untrustedNonAdj
untrusted.Commit = NewTestSuite(t, 2).Commit(RandRawHeader(t))
return &untrusted
},
err: true,
},
{
prepare: func() libhead.Header {
untrusted := *untrustedAdj
Expand Down
8 changes: 0 additions & 8 deletions header/verify.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@ import (
"fmt"
"time"

"github.com/tendermint/tendermint/light"

libhead "github.com/celestiaorg/go-header"
)

Expand Down Expand Up @@ -47,12 +45,6 @@ func (eh *ExtendedHeader) Verify(untrusted libhead.Header) error {
return nil
}

// Ensure that untrusted commit has enough of trusted commit's power.
err := eh.ValidatorSet.VerifyCommitLightTrusting(eh.ChainID(), untrst.Commit, light.DefaultTrustLevel)
if err != nil {
return &libhead.VerifyError{Reason: err}
}

return nil
}

Expand Down
2 changes: 1 addition & 1 deletion nodebuilder/p2p/genesis.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ func GenesisFor(net Network) (string, error) {
// NOTE: Every time we add a new long-running network, its genesis hash has to be added here.
var genesisList = map[Network]string{
Arabica: "7A5FABB19713D732D967B1DA84FA0DF5E87A7B62302D783F78743E216C1A3550",
Mocha: "831B81ADDC5CE999EBB0C150B778F76DAAD9E09DF75FACF164B1F11DCE93E2E1",
Mocha: "79A97034D569C4199A867439B1B7B77D4E1E1D9697212755E1CE6D920CDBB541",
BlockspaceRace: "1A8491A72F73929680DAA6C93E3B593579261B2E76536BFA4F5B97D6FE76E088",
Private: "",
}
58 changes: 35 additions & 23 deletions share/eds/byzantine/bad_encoding.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package byzantine

import (
"bytes"
"errors"
"fmt"

"github.com/celestiaorg/celestia-app/pkg/wrapper"
Expand Down Expand Up @@ -113,50 +112,58 @@ func (p *BadEncodingProof) UnmarshalBinary(data []byte) error {
func (p *BadEncodingProof) Validate(hdr libhead.Header) error {
header, ok := hdr.(*header.ExtendedHeader)
if !ok {
panic(fmt.Sprintf("invalid header type: expected %T, got %T", header, hdr))
panic(fmt.Sprintf("invalid header type received during BEFP validation: expected %T, got %T", header, hdr))
}
if header.Height() != int64(p.BlockHeight) {
return errors.New("fraud: incorrect block height")
return fmt.Errorf("incorrect block height during BEFP validation: expected %d, got %d",
p.BlockHeight, header.Height(),
)
}
merkleRowRoots := header.DAH.RowRoots
merkleColRoots := header.DAH.ColumnRoots
if len(merkleRowRoots) != len(merkleColRoots) {

if len(header.DAH.RowRoots) != len(header.DAH.ColumnRoots) {
// NOTE: This should never happen as callers of this method should not feed it with a
// malformed extended header.
panic(fmt.Sprintf(
"fraud: invalid extended header: length of row and column roots do not match. (rowRoots=%d) (colRoots=%d)",
len(merkleRowRoots),
len(merkleColRoots)),
"invalid extended header: length of row and column roots do not match. (rowRoots=%d) (colRoots=%d)",
len(header.DAH.RowRoots),
len(header.DAH.ColumnRoots)),
)
}
if int(p.Index) >= len(merkleRowRoots) {
return fmt.Errorf("fraud: invalid proof: index out of bounds (%d >= %d)", int(p.Index), len(merkleRowRoots))

// merkleRoots are the roots against which we are going to check the inclusion of the received
// shares. Changing the order of the roots to prove the shares relative to the orthogonal axis,
// because inside the rsmt2d library rsmt2d.Row = 0 and rsmt2d.Col = 1
merkleRoots := header.DAH.RowRoots
if p.Axis == rsmt2d.Row {
merkleRoots = header.DAH.ColumnRoots
}
if len(merkleRowRoots) != len(p.Shares) {
return fmt.Errorf("fraud: invalid proof: incorrect number of shares %d != %d", len(p.Shares), len(merkleRowRoots))
if int(p.Index) >= len(merkleRoots) {
return fmt.Errorf("invalid %s proof: index out of bounds (%d >= %d)",
BadEncoding, int(p.Index), len(merkleRoots),
)
}

root := merkleRowRoots[p.Index]
if p.Axis == rsmt2d.Col {
root = merkleColRoots[p.Index]
if len(p.Shares) != len(merkleRoots) {
return fmt.Errorf("invalid %s proof: incorrect number of shares %d != %d",
BadEncoding, len(p.Shares), len(merkleRoots),
)
}

// verify that Merkle proofs correspond to particular shares.
shares := make([][]byte, len(merkleRowRoots))
shares := make([][]byte, len(merkleRoots))
for index, shr := range p.Shares {
if shr == nil {
continue
}
// validate inclusion of the share into one of the DAHeader roots
if ok := shr.Validate(ipld.MustCidFromNamespacedSha256(root)); !ok {
return fmt.Errorf("fraud: invalid proof: incorrect share received at index %d", index)
if ok := shr.Validate(ipld.MustCidFromNamespacedSha256(merkleRoots[index])); !ok {
return fmt.Errorf("invalid %s proof: incorrect share received at index %d", BadEncoding, index)
}
// NMTree commits the additional namespace while rsmt2d does not know about, so we trim it
// this is ugliness from NMTWrapper that we have to embrace ¯\_(ツ)_/¯
shares[index] = share.GetData(shr.Share)
}

odsWidth := uint64(len(merkleRowRoots) / 2)
odsWidth := uint64(len(merkleRoots) / 2)
codec := share.DefaultRSMT2DCodec()

// rebuild a row or col.
Expand All @@ -183,10 +190,15 @@ func (p *BadEncodingProof) Validate(hdr libhead.Header) error {
return err
}

// root is a merkle root of the row/col where ErrByzantine occurred
root := header.DAH.RowRoots[p.Index]
if p.Axis == rsmt2d.Col {
root = header.DAH.ColumnRoots[p.Index]
}

// comparing rebuilt Merkle Root of bad row/col with respective Merkle Root of row/col from block.
if bytes.Equal(expectedRoot, root) {
return errors.New("fraud: invalid proof: recomputed Merkle root matches the DAH's row/column root")
return fmt.Errorf("invalid %s proof: recomputed Merkle root matches the DAH's row/column root", BadEncoding)
}

return nil
}
46 changes: 35 additions & 11 deletions share/eds/byzantine/byzantine.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"fmt"

"github.com/ipfs/go-blockservice"
"golang.org/x/sync/errgroup"

"github.com/celestiaorg/celestia-app/pkg/da"
"github.com/celestiaorg/rsmt2d"
Expand Down Expand Up @@ -35,25 +36,48 @@ func NewErrByzantine(
dah *da.DataAvailabilityHeader,
errByz *rsmt2d.ErrByzantineData,
) *ErrByzantine {
root := [][][]byte{
dah.RowRoots,
// changing the order to collect proofs against an orthogonal axis
roots := [][][]byte{
dah.ColumnRoots,
}[errByz.Axis][errByz.Index]
sharesWithProof, err := GetProofsForShares(
ctx,
bGetter,
ipld.MustCidFromNamespacedSha256(root),
errByz.Shares,
)
if err != nil {
dah.RowRoots,
}[errByz.Axis]

sharesWithProof := make([]*ShareWithProof, len(errByz.Shares))
sharesAmount := 0

errGr, ctx := errgroup.WithContext(ctx)
for index, share := range errByz.Shares {
// skip further shares if we already requested half of them, which is enough to recompute the row
// or col
if sharesAmount == len(dah.RowRoots)/2 {
break
}

if share == nil {
continue
}
sharesAmount++

index := index
errGr.Go(func() error {
share, err := getProofsAt(
ctx, bGetter,
ipld.MustCidFromNamespacedSha256(roots[index]),
int(errByz.Index), len(errByz.Shares),
)
sharesWithProof[index] = share
return err
})
}

if err := errGr.Wait(); err != nil {
// Fatal as rsmt2d proved that error is byzantine,
// but we cannot properly collect the proof,
// so verification will fail and thus services won't be stopped
// while we still have to stop them.
// TODO(@Wondertan): Find a better way to handle
log.Fatalw("getting proof for ErrByzantine", "err", err)
}

return &ErrByzantine{
Index: uint32(errByz.Index),
Shares: sharesWithProof,
Expand Down
34 changes: 24 additions & 10 deletions share/eds/byzantine/share_proof.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,24 +78,38 @@ func GetProofsForShares(
proofs := make([]*ShareWithProof, len(shares))
for index, share := range shares {
if share != nil {
proof := make([]cid.Cid, 0)
// TODO(@vgonkivs): Combine GetLeafData and GetProof in one function as the are traversing the same
// tree. Add options that will control what data will be fetched.
s, err := ipld.GetLeaf(ctx, bGetter, root, index, len(shares))
proof, err := getProofsAt(ctx, bGetter, root, index, len(shares))
if err != nil {
return nil, err
}
proof, err = ipld.GetProof(ctx, bGetter, root, proof, index, len(shares))
if err != nil {
return nil, err
}
proofs[index] = NewShareWithProof(index, s.RawData(), proof)
proofs[index] = proof
}
}

return proofs, nil
}

func getProofsAt(
ctx context.Context,
bGetter blockservice.BlockGetter,
root cid.Cid,
index,
total int,
) (*ShareWithProof, error) {
proof := make([]cid.Cid, 0)
// TODO(@vgonkivs): Combine GetLeafData and GetProof in one function as the are traversing the same
// tree. Add options that will control what data will be fetched.
node, err := ipld.GetLeaf(ctx, bGetter, root, index, total)
if err != nil {
return nil, err
}

proof, err = ipld.GetProof(ctx, bGetter, root, proof, index, total)
if err != nil {
return nil, err
}
return NewShareWithProof(index, node.RawData(), proof), nil
}

func ProtoToShare(protoShares []*pb.Share) []*ShareWithProof {
shares := make([]*ShareWithProof, len(protoShares))
for i, share := range protoShares {
Expand Down

0 comments on commit be28920

Please sign in to comment.