Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: reject blobs that are too large #2201

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 7 additions & 16 deletions app/test/check_tx_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,6 @@ import (
"bytes"
"testing"

tmrand "github.com/tendermint/tendermint/libs/rand"

"github.com/celestiaorg/celestia-app/app"
"github.com/celestiaorg/celestia-app/app/encoding"
appns "github.com/celestiaorg/celestia-app/pkg/namespace"
Expand All @@ -15,6 +13,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
tmrand "github.com/tendermint/tendermint/libs/rand"
coretypes "github.com/tendermint/tendermint/types"
)

Expand Down Expand Up @@ -111,49 +110,41 @@ func TestCheckTx(t *testing.T) {
name: "1,000 byte blob",
checkType: abci.CheckTxType_New,
getTx: func() []byte {
tx := blobfactory.RandBlobTxsWithAccounts(encCfg.TxConfig.TxEncoder(), tmrand.NewRand(), kr, nil, 1_000, 1, false, testutil.ChainID, accs[4:5])[0]
return tx
return blobfactory.BlobTxWithSize(t, encCfg.TxConfig.TxEncoder(), kr, testutil.ChainID, accs[4], 1_000)
},
expectedABCICode: abci.CodeTypeOK,
},
{
name: "10,000 byte blob",
checkType: abci.CheckTxType_New,
getTx: func() []byte {
tx := blobfactory.RandBlobTxsWithAccounts(encCfg.TxConfig.TxEncoder(), tmrand.NewRand(), kr, nil, 10_000, 1, false, testutil.ChainID, accs[5:6])[0]
return tx
return blobfactory.BlobTxWithSize(t, encCfg.TxConfig.TxEncoder(), kr, testutil.ChainID, accs[5], 10_000)
},
expectedABCICode: abci.CodeTypeOK,
},
{
name: "100,000 byte blob",
checkType: abci.CheckTxType_New,
getTx: func() []byte {
tx := blobfactory.RandBlobTxsWithAccounts(encCfg.TxConfig.TxEncoder(), tmrand.NewRand(), kr, nil, 100_000, 1, false, testutil.ChainID, accs[6:7])[0]
return tx
return blobfactory.BlobTxWithSize(t, encCfg.TxConfig.TxEncoder(), kr, testutil.ChainID, accs[6], 100_000)
},
expectedABCICode: abci.CodeTypeOK,
},
{
name: "1,000,000 byte blob",
checkType: abci.CheckTxType_New,
getTx: func() []byte {
tx := blobfactory.RandBlobTxsWithAccounts(encCfg.TxConfig.TxEncoder(), tmrand.NewRand(), kr, nil, 1_000_000, 1, false, testutil.ChainID, accs[7:8])[0]
return tx
return blobfactory.BlobTxWithSize(t, encCfg.TxConfig.TxEncoder(), kr, testutil.ChainID, accs[7], 1_000_000)
},
expectedABCICode: abci.CodeTypeOK,
},
{
name: "10,000,000 byte blob",
checkType: abci.CheckTxType_New,
getTx: func() []byte {
tx := blobfactory.RandBlobTxsWithAccounts(encCfg.TxConfig.TxEncoder(), tmrand.NewRand(), kr, nil, 10_000_000, 1, false, testutil.ChainID, accs[8:9])[0]
return tx
return blobfactory.BlobTxWithSize(t, encCfg.TxConfig.TxEncoder(), kr, testutil.ChainID, accs[8], 10_000_000)
},
// TODO: consider modifying CheckTx to return an error for this case
// so that consensus nodes do not propagate blobs that are too
// large.
expectedABCICode: abci.CodeTypeOK,
expectedABCICode: blobtypes.ErrBlobSizeTooLarge.ABCICode(),
},
}

Expand Down
45 changes: 45 additions & 0 deletions test/util/blobfactory/payforblob_factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -260,6 +260,51 @@ func RandBlobTxs(enc sdk.TxEncoder, rand *tmrand.Rand, count, blobsPerTx, size i
return txs
}

// BlobTxWithSize returns a blobTx with a given size. It intentionally does not
// use NewMsgPayForBlobs in order to bypass the ValidateBlobs() check which
// would fail for large blobs.
func BlobTxWithSize(t *testing.T, encoder sdk.TxEncoder, kr keyring.Keyring, chainId string, account string, size int) coretypes.Tx {
signer := blobtypes.NewKeyringSigner(kr, account, chainId)
address, err := signer.GetSignerInfo().GetAddress()
require.NoError(t, err)

coin := sdk.Coin{
Denom: bondDenom,
Amount: sdk.NewInt(10),
}
options := []blobtypes.TxBuilderOption{
blobtypes.SetFeeAmount(sdk.NewCoins(coin)),
blobtypes.SetGasLimit(10_000_000),
}
builder := signer.NewTxBuilder(options...)

namespace := appns.MustNewV0(bytes.Repeat([]byte{0x1}, appns.NamespaceVersionZeroIDSize))
blob, err := blobtypes.NewBlob(namespace, tmrand.Bytes(size), appconsts.ShareVersionZero)
require.NoError(t, err)

commitments, err := blobtypes.CreateCommitments([]*tmproto.Blob{blob})
require.NoError(t, err)

msg := &blobtypes.MsgPayForBlobs{
Signer: address.String(),
Namespaces: [][]byte{namespace.Bytes()},
ShareCommitments: commitments,
BlobSizes: []uint32{uint32(size)},
ShareVersions: []uint32{uint32(appconsts.ShareVersionZero)},
}
require.NoError(t, err)

signedTx, err := signer.BuildSignedTx(builder, msg)
require.NoError(t, err)

rawTx, err := encoder(signedTx)
require.NoError(t, err)

blobTx, err := coretypes.MarshalBlobTx(rawTx, blob)
require.NoError(t, err)
return blobTx
}

func RandBlobTxsWithNamespaces(enc sdk.TxEncoder, namespaces []appns.Namespace, sizes []int) []coretypes.Tx {
const acc = "signer"
kr := testfactory.GenerateKeyring(acc)
Expand Down
1 change: 1 addition & 0 deletions x/blob/types/errors.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,4 +33,5 @@ var (
ErrNoShareCommitments = errors.Register(ModuleName, 11135, "no share commitments provided")
ErrInvalidNamespace = errors.Register(ModuleName, 11136, "invalid namespace")
ErrInvalidNamespaceVersion = errors.Register(ModuleName, 11137, "invalid namespace version")
ErrBlobSizeTooLarge = errors.Register(ModuleName, 11138, "blob size too large")
)
50 changes: 48 additions & 2 deletions x/blob/types/payforblob.go
Original file line number Diff line number Diff line change
Expand Up @@ -319,8 +319,9 @@ func ValidateBlobs(blobs ...*Blob) error {
return err
}

if len(blob.Data) == 0 {
return ErrZeroBlobSize
err = validateBlobData(blob.Data)
if err != nil {
return err
}

if !slices.Contains(appconsts.SupportedShareVersions, uint8(blob.ShareVersion)) {
Expand All @@ -331,6 +332,51 @@ func ValidateBlobs(blobs ...*Blob) error {
return nil
}

// validateBlobData returns an error if the size of data is zero or too large.
func validateBlobData(data []byte) error {
if len(data) == 0 {
return ErrZeroBlobSize
}

maxBlobSize := blobSizeUpperBound()
if len(data) > maxBlobSize {
return ErrBlobSizeTooLarge.Wrapf("max blob size is %d bytes", maxBlobSize)
}
return nil
}

// blobSizeUpperBound returns an upper bound for the max valid blob size based
// on the upper bounds for square size and block bytes. Note it is possible that
// blobs of this size do not fit in a block because the limiting factor may be a
// parameter in application state (i.e. GovMaxSquareSize). Additionally, even if
// the application state parameters are at their upper bounds, the number of
// shares available for blob bytes may be less than estimated in this function
// (i.e. if the PFB tx shares occupy more than one share). As a result, the
// upper bound returned by this function may over-estimate the max valid blob
// size but it should not under-estimate. Consequently, it may be used to
// immediately reject blobs that are too large but blobs smaller than this upper
// bound may still fail to be included in a block.
func blobSizeUpperBound() int {
maxSquareSize := appconsts.SquareSizeUpperBound(appconsts.LatestVersion)
maxShares := maxSquareSize * maxSquareSize

// Subtract one from maxShares because at least one share must be occupied
// by the PFB tx associated with this blob.
maxBlobShares := maxShares - 1
maxBlobBytes := maxBlobShares * appconsts.ContinuationSparseShareContentSize

return min(maxBlobBytes, coretypes.MaxBlockSizeBytes)
}

// min returns the minimum of two ints. This function can be removed once we
// upgrade to Go 1.21.
func min(a, b int) int {
if a < b {
return a
}
return b
}

// extractBlobComponents separates and returns the components of a slice of
// blobs.
func extractBlobComponents(pblobs []*tmproto.Blob) (namespaceVersions []uint32, namespaceIds [][]byte, sizes []uint32, shareVersions []uint32) {
Expand Down
20 changes: 19 additions & 1 deletion x/blob/types/payforblob_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -469,7 +469,7 @@ func TestValidateBlobs(t *testing.T) {
expectError: true,
},
{
name: "empty blob",
name: "empty blob data",
blob: &Blob{
Data: []byte{},
NamespaceId: appns.RandomBlobNamespace().ID,
Expand All @@ -478,6 +478,16 @@ func TestValidateBlobs(t *testing.T) {
},
expectError: true,
},
{
name: "blob data too large",
blob: &Blob{
Data: bytes.Repeat([]byte{1}, 10_000_000), // 10 MB
NamespaceId: appns.RandomBlobNamespace().ID,
ShareVersion: uint32(appconsts.DefaultShareVersion),
NamespaceVersion: uint32(appns.NamespaceVersionZero),
},
expectError: true,
},
{
name: "invalid namespace",
blob: &Blob{
Expand All @@ -499,3 +509,11 @@ func TestValidateBlobs(t *testing.T) {
}
}
}

// Test_blobSizeUpperBound is contrived but it verifies that the blob size upper
// bound is a constant value.
func Test_blobSizeUpperBound(t *testing.T) {
got := blobSizeUpperBound()
want := 7_896_606
assert.Equal(t, want, got)
}
21 changes: 21 additions & 0 deletions x/blob/types/test/blob_tx_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -237,6 +237,27 @@ func TestValidateBlobTx(t *testing.T) {
},
expectedErr: nil,
},
{
name: "invalid blob tx because blob size is zero",
getTx: func() tmproto.BlobTx {
rawBtx := validRawBtx()
btx, _ := coretypes.UnmarshalBlobTx(rawBtx)
btx.Blobs[0].Data = []byte{}
return btx
},
expectedErr: types.ErrZeroBlobSize,
},
{
name: "invalid blob tx because blob size is too large",
getTx: func() tmproto.BlobTx {
rawBtx := validRawBtx()
btx, isBlob := coretypes.UnmarshalBlobTx(rawBtx)
require.True(t, isBlob)
btx.Blobs[0].Data = bytes.Repeat([]byte{0}, 10_000_000) // 10 MB
return btx
},
expectedErr: types.ErrBlobSizeTooLarge,
},
}

for _, tt := range tests {
Expand Down
Loading