Skip to content

Commit

Permalink
Celestia daProvider
Browse files Browse the repository at this point in the history
  • Loading branch information
Ferret-san committed Jun 7, 2024
1 parent 5694a9d commit 5c0a511
Show file tree
Hide file tree
Showing 21 changed files with 636 additions and 300 deletions.
3 changes: 1 addition & 2 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
[submodule "go-ethereum"]
path = go-ethereum
url = https://github.com/celestiaorg/go-ethereum.git
branch = celestia-v2.3.1
url = https://github.com/OffchainLabs/go-ethereum.git
[submodule "fastcache"]
path = fastcache
url = https://github.com/OffchainLabs/fastcache.git
Expand Down
52 changes: 15 additions & 37 deletions arbnode/batch_poster.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,6 @@ import (
"github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/cmd/chaininfo"
"github.com/offchainlabs/nitro/cmd/genericconf"
"github.com/offchainlabs/nitro/das"
celestiaTypes "github.com/offchainlabs/nitro/das/celestia/types"
"github.com/offchainlabs/nitro/execution"
"github.com/offchainlabs/nitro/solgen/go/bridgegen"
"github.com/offchainlabs/nitro/util"
Expand Down Expand Up @@ -101,7 +99,6 @@ type BatchPoster struct {
gasRefunderAddr common.Address
building *buildingBatch
dapWriter daprovider.Writer
celestiaWriter celestiaTypes.DataAvailabilityWriter
dataPoster *dataposter.DataPoster
redisLock *redislock.Simple
messagesPerBatch *arbmath.MovingAverage[uint64]
Expand Down Expand Up @@ -284,8 +281,8 @@ type BatchPosterOpts struct {
Config BatchPosterConfigFetcher
DeployInfo *chaininfo.RollupAddresses
TransactOpts *bind.TransactOpts
// Todo (change to support multiple writers)
DAPWriter daprovider.Writer
CelestiaWriter celestiaTypes.DataAvailabilityWriter
ParentChainID *big.Int
}

Expand Down Expand Up @@ -332,7 +329,6 @@ func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, e
gasRefunderAddr: opts.Config().gasRefunder,
bridgeAddr: opts.DeployInfo.Bridge,
dapWriter: opts.DAPWriter,
celestiaWriter: opts.CelestiaWriter,
redisLock: redisLock,
}
b.messagesPerBatch, err = arbmath.NewMovingAverage[uint64](20)
Expand Down Expand Up @@ -1255,39 +1251,21 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error)
return false, nil
}

if b.celestiaWriter != nil {
celestiaMsg, err := b.celestiaWriter.Store(ctx, sequencerMsg)
if err != nil {
if config.DisableCelestiaFallbackStoreDataOnChain && config.DisableCelestiaFallbackStoreDataOnDAS {
return false, errors.New("unable to post batch to Celestia and fallback storing data on chain and das is disabled")
}
if config.DisableCelestiaFallbackStoreDataOnDAS {
log.Warn("Falling back to storing data on chain ", "err", err)
} else {
log.Warn("Falling back to storing data on DAC ", "err", err)

}

// We nest the anytrust logic here for now as using this fork liekly means your primary DA is Celestia
// and the Anytrust DAC is instead used as a fallback
if b.dapWriter != nil {
if !b.redisLock.AttemptLock(ctx) {
return false, errAttemptLockFailed
}
if b.dapWriter != nil {
if !b.redisLock.AttemptLock(ctx) {
return false, errAttemptLockFailed
}

gotNonce, gotMeta, err := b.dataPoster.GetNextNonceAndMeta(ctx)
if err != nil {
return false, err
}
if nonce != gotNonce || !bytes.Equal(batchPositionBytes, gotMeta) {
return false, fmt.Errorf("%w: nonce changed from %d to %d while creating batch", storage.ErrStorageRace, nonce, gotNonce)
}
sequencerMsg, err = b.dapWriter.Store(ctx, sequencerMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), []byte{}, config.DisableDapFallbackStoreDataOnChain)
if err != nil {
return false, err
}
} else {
sequencerMsg = celestiaMsg
gotNonce, gotMeta, err := b.dataPoster.GetNextNonceAndMeta(ctx)
if err != nil {
return false, err
}
if nonce != gotNonce || !bytes.Equal(batchPositionBytes, gotMeta) {
return false, fmt.Errorf("%w: nonce changed from %d to %d while creating batch", storage.ErrStorageRace, nonce, gotNonce)
}
sequencerMsg, err = b.dapWriter.Store(ctx, sequencerMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), []byte{}, config.DisableDapFallbackStoreDataOnChain)
if err != nil {
return false, err
}
}

Expand Down
20 changes: 18 additions & 2 deletions arbnode/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -527,8 +527,8 @@ func createNodeImpl(
var daWriter das.DataAvailabilityServiceWriter
var daReader das.DataAvailabilityServiceReader
var dasLifecycleManager *das.LifecycleManager
var celestiaReader celestiaTypes.DataAvailabilityReader
var celestiaWriter celestiaTypes.DataAvailabilityWriter
var celestiaReader celestiaTypes.CelestiaReader
var celestiaWriter celestiaTypes.CelestiaWriter
if config.DataAvailability.Enable {
if config.BatchPoster.Enable {
daWriter, daReader, dasLifecycleManager, err = das.CreateBatchPosterDAS(ctx, &config.DataAvailability, dataSigner, l1client, deployInfo.SequencerInbox)
Expand All @@ -554,6 +554,16 @@ func createNodeImpl(
return nil, errors.New("a data availability service is required for this chain, but it was not configured")
}

if config.Celestia.Enable {
celestiaService, err := celestia.NewCelestiaDA(&config.Celestia, nil)
if err != nil {
return nil, err
}

celestiaReader = celestiaService
celestiaWriter = celestiaService
}

// We support a nil txStreamer for the pruning code
if txStreamer != nil && txStreamer.chainConfig.ArbitrumChainParams.DataAvailabilityCommittee && daReader == nil {
return nil, errors.New("data availability service required but unconfigured")
Expand All @@ -565,6 +575,9 @@ func createNodeImpl(
if blobReader != nil {
dapReaders = append(dapReaders, daprovider.NewReaderForBlobReader(blobReader))
}
if celestiaReader != nil {
dapReaders = append(dapReaders, celestiaTypes.NewReaderForCelestia(celestiaReader))
}
inboxTracker, err := NewInboxTracker(arbDb, txStreamer, dapReaders, config.SnapSyncTest)
if err != nil {
return nil, err
Expand Down Expand Up @@ -695,6 +708,9 @@ func createNodeImpl(
if daWriter != nil {
dapWriter = daprovider.NewWriterForDAS(daWriter)
}
if celestiaWriter != nil {
dapWriter = celestiaTypes.NewWriterForCelestia(celestiaWriter)
}
batchPoster, err = NewBatchPoster(ctx, &BatchPosterOpts{
DataPosterDB: rawdb.NewTable(arbDb, storage.BatchPosterPrefix),
L1Reader: l1Reader,
Expand Down
5 changes: 0 additions & 5 deletions arbstate/daprovider/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ import (
"github.com/offchainlabs/nitro/arbos/util"
"github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/blsSignatures"
celestiaTypes "github.com/offchainlabs/nitro/das/celestia/types"
"github.com/offchainlabs/nitro/das/dastree"
)

Expand Down Expand Up @@ -62,10 +61,6 @@ func RecordPreimagesTo(preimages map[arbutil.PreimageType]map[common.Hash][]byte
}
}

type CelestiaDataAvailabilityReader interface {
celestiaTypes.DataAvailabilityReader
}

// DASMessageHeaderFlag indicates that this data is a certificate for the data availability service,
// which will retrieve the full batch data.
const DASMessageHeaderFlag byte = 0x80
Expand Down
126 changes: 2 additions & 124 deletions arbstate/inbox.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,15 +105,11 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash
if !foundDA {
if daprovider.IsDASMessageHeaderByte(payload[0]) {
log.Error("No DAS Reader configured, but sequencer message found with DAS header")
} else if daprovider.IsCelestiaMessageHeaderByte(payload[0]) {
log.Error("No Celestia Reader configured, but sequencer message found with Celestia header")
} else if daprovider.IsBlobHashesHeaderByte(payload[0]) {
return nil, daprovider.ErrNoBlobReader
}
// TODO (Diego)
// else if IsBlobHashesHeaderByte(payload[0]) {
// return nil, ErrNoBlobReader
// } else if IsCelestiaMessageHeaderByte(payload[0]) {
// log.Error("No Celestia Reader configured, but sequencer message found with Celestia header")
// }
}
}

Expand Down Expand Up @@ -167,124 +163,6 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash
return parsedMsg, nil
}

// TODO (Diego):
// func NewDAProviderCelestia(celestia celestiaTypes.DataAvailabilityReader) *dAProviderForCelestia {
// return &dAProviderForCelestia{
// celestia: celestia,
// }
// }

// type dAProviderForCelestia struct {
// celestia celestiaTypes.DataAvailabilityReader
// }

// func (c *dAProviderForCelestia) IsValidHeaderByte(headerByte byte) bool {
// return IsCelestiaMessageHeaderByte(headerByte)
// }

// func (c *dAProviderForCelestia) RecoverPayloadFromBatch(
// ctx context.Context,
// batchNum uint64,
// batchBlockHash common.Hash,
// sequencerMsg []byte,
// preimages map[arbutil.PreimageType]map[common.Hash][]byte,
// keysetValidationMode KeysetValidationMode,
// ) ([]byte, error) {
// return RecoverPayloadFromCelestiaBatch(ctx, batchNum, sequencerMsg, c.celestia, preimages)
// }

// func RecoverPayloadFromCelestiaBatch(
// ctx context.Context,
// batchNum uint64,
// sequencerMsg []byte,
// celestiaReader celestiaTypes.DataAvailabilityReader,
// preimages map[arbutil.PreimageType]map[common.Hash][]byte,
// ) ([]byte, error) {
// var sha256Preimages map[common.Hash][]byte
// if preimages != nil {
// if preimages[arbutil.Sha2_256PreimageType] == nil {
// preimages[arbutil.Sha2_256PreimageType] = make(map[common.Hash][]byte)
// }
// sha256Preimages = preimages[arbutil.Sha2_256PreimageType]
// }

// buf := bytes.NewBuffer(sequencerMsg[40:])

// header, err := buf.ReadByte()
// if err != nil {
// log.Error("Couldn't deserialize Celestia header byte", "err", err)
// return nil, nil
// }
// if !IsCelestiaMessageHeaderByte(header) {
// log.Error("Couldn't deserialize Celestia header byte", "err", errors.New("tried to deserialize a message that doesn't have the Celestia header"))
// return nil, nil
// }

// recordPreimage := func(key common.Hash, value []byte) {
// sha256Preimages[key] = value
// }

// blobPointer := celestiaTypes.BlobPointer{}
// blobBytes := buf.Bytes()
// err = blobPointer.UnmarshalBinary(blobBytes)
// if err != nil {
// log.Error("Couldn't unmarshal Celestia blob pointer", "err", err)
// return nil, nil
// }

// payload, squareData, err := celestiaReader.Read(ctx, &blobPointer)
// if err != nil {
// log.Error("Failed to resolve blob pointer from celestia", "err", err)
// return nil, err
// }

// // we read a batch that is to be discarded, so we return the empty batch
// if len(payload) == 0 {
// return payload, nil
// }

// if sha256Preimages != nil {
// if squareData == nil {
// log.Error("squareData is nil, read from replay binary, but preimages are empty")
// return nil, err
// }

// odsSize := squareData.SquareSize / 2
// rowIndex := squareData.StartRow
// for _, row := range squareData.Rows {
// treeConstructor := tree.NewConstructor(recordPreimage, odsSize)
// root, err := tree.ComputeNmtRoot(treeConstructor, uint(rowIndex), row)
// if err != nil {
// log.Error("Failed to compute row root", "err", err)
// return nil, err
// }

// rowRootMatches := bytes.Equal(squareData.RowRoots[rowIndex], root)
// if !rowRootMatches {
// log.Error("Row roots do not match", "eds row root", squareData.RowRoots[rowIndex], "calculated", root)
// log.Error("Row roots", "row_roots", squareData.RowRoots)
// return nil, err
// }
// rowIndex += 1
// }

// rowsCount := len(squareData.RowRoots)
// slices := make([][]byte, rowsCount+rowsCount)
// copy(slices[0:rowsCount], squareData.RowRoots)
// copy(slices[rowsCount:], squareData.ColumnRoots)

// dataRoot := tree.HashFromByteSlices(recordPreimage, slices)

// dataRootMatches := bytes.Equal(dataRoot, blobPointer.DataRoot[:])
// if !dataRootMatches {
// log.Error("Data Root do not match", "blobPointer data root", blobPointer.DataRoot, "calculated", dataRoot)
// return nil, nil
// }
// }

// return payload, nil
// }

type inboxMultiplexer struct {
backend InboxBackend
delayedMessagesRead uint64
Expand Down
5 changes: 2 additions & 3 deletions cmd/replay/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -334,9 +334,8 @@ func main() {
}
// TODO (Diego): Add daProviders = append(daProviders, arbstate.NewDAProviderCelestia(&PreimageCelestiaReader{}))
var dapReaders []daprovider.Reader
if dasReader != nil {
dapReaders = append(dapReaders, daprovider.NewReaderForDAS(dasReader))
}
dapReaders = append(dapReaders, daprovider.NewReaderForDAS(&PreimageDASReader{}))
dapReaders = append(dapReaders, celestiaTypes.NewReaderForCelestia(&PreimageCelestiaReader{}))
dapReaders = append(dapReaders, daprovider.NewReaderForBlobReader(&BlobPreimageReader{}))
inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dapReaders, keysetValidationMode)
ctx := context.Background()
Expand Down
2 changes: 1 addition & 1 deletion contracts
Submodule contracts updated 67 files
+48 −0 .github/workflows/audit-ci.yml
+24 −0 .github/workflows/slither.yml
+0 −3 .gitmodules
+1 −0 .prettierignore
+52 −0 audit-ci.jsonc
+10 −4 foundry.toml
+4 −20 hardhat.config.ts
+0 −1 lib/blobstream-contracts
+12 −6 package.json
+0 −263 patches/@nomiclabs+hardhat-etherscan+3.1.0.patch
+0 −5 remappings.txt
+2 −1 scripts/config.ts.example
+11 −1 scripts/createERC20Rollup.ts
+10 −1 scripts/createEthRollup.ts
+6 −1 scripts/deployment.ts
+0 −102 scripts/deploymentCelestiaReuseExisting.ts
+108 −52 scripts/deploymentUtils.ts
+117 −0 scripts/local-deployment/deployCreatorAndCreateRollup.ts
+221 −48 scripts/rollupCreation.ts
+7 −0 slither.config.json
+13,099 −0 slither.db.json
+0 −6 src/bridge/ISequencerInbox.sol
+71 −49 src/bridge/SequencerInbox.sol
+0 −167 src/celestia/BlobstreamVerifier.sol
+0 −349 src/celestia/DAVerifier.sol
+0 −44 src/celestia/IBlobstreamX.sol
+208 −0 src/chain/CacheManager.sol
+0 −45 src/challenge/ChallengeLib.sol
+24 −9 src/challenge/ChallengeManager.sol
+13 −0 src/challenge/IChallengeManager.sol
+0 −3 src/libraries/Error.sol
+52 −0 src/mocks/Benchmarks.sol
+0 −83 src/mocks/MockBlobstream.sol
+117 −0 src/mocks/MultiCallTest.sol
+0 −488 src/mocks/OneStepProverHostIoCelestiaMock.sol
+126 −0 src/mocks/Program.sol
+176 −0 src/mocks/SdkStorage.sol
+22 −0 src/mocks/SimpleCacheManager.sol
+10 −0 src/osp/IOneStepProofEntry.sol
+81 −7 src/osp/OneStepProofEntry.sol
+64 −20 src/osp/OneStepProver0.sol
+214 −53 src/osp/OneStepProverHostIo.sol
+4 −33 src/osp/OneStepProverMemory.sol
+2 −0 src/precompiles/ArbDebug.sol
+51 −9 src/precompiles/ArbOwner.sol
+119 −0 src/precompiles/ArbWasm.sol
+33 −0 src/precompiles/ArbWasmCache.sol
+88 −47 src/state/Deserialize.sol
+38 −3 src/state/Instructions.sol
+92 −15 src/state/Machine.sol
+22 −4 src/state/MerkleProof.sol
+3 −1 src/state/Module.sol
+54 −0 src/state/ModuleMemory.sol
+58 −0 src/state/MultiStack.sol
+6 −1 src/state/StackFrame.sol
+13 −1 src/state/Value.sol
+6 −1 src/state/ValueStack.sol
+11 −13 test/contract/arbRollup.spec.ts
+4 −6 test/contract/sequencerInboxForceInclude.spec.ts
+12 −8 test/contract/validatorWallet.spec.ts
+167 −0 test/foundry/CacheManager.t.sol
+107 −10 test/foundry/ChallengeManager.t.sol
+3 −1 test/signatures/ChallengeManager
+9 −0 test/signatures/OneStepProofEntry
+1 −1 test/signatures/test-sigs.bash
+1 −0 test/storage/ChallengeManager
+971 −4,604 yarn.lock
16 changes: 4 additions & 12 deletions das/celestia/celestia.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,18 +69,6 @@ var (
ErrTxIncorrectAccountSequence = errors.New("incorrect account sequence")
)

// CelestiaMessageHeaderFlag indicates that this data is a Blob Pointer
// which will be used to retrieve data from Celestia
const CelestiaMessageHeaderFlag byte = 0x63

func hasBits(checking byte, bits byte) bool {
return (checking & bits) == bits
}

func IsCelestiaMessageHeaderByte(header byte) bool {
return hasBits(header, CelestiaMessageHeaderFlag)
}

type CelestiaDA struct {
Cfg *DAConfig
Client *openrpc.Client
Expand All @@ -107,6 +95,10 @@ func CelestiaDAConfigAddOptions(prefix string, f *pflag.FlagSet) {
f.String(prefix+".validator-config"+".blobstream", "", "Blobstream address, only used for validation")
}

// CelestiaMessageHeaderFlag indicates that this data is a Blob Pointer
// which will be used to retrieve data from Celestia
const CelestiaMessageHeaderFlag byte = 0x63

func NewCelestiaDA(cfg *DAConfig, ethClient *ethclient.Client) (*CelestiaDA, error) {
if cfg == nil {
return nil, errors.New("celestia cfg cannot be blank")
Expand Down
9 changes: 5 additions & 4 deletions das/celestia/tree/hash.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package tree

import (
"github.com/offchainlabs/nitro/arbutil"
"github.com/tendermint/tendermint/crypto/tmhash"

"github.com/ethereum/go-ethereum/common"
Expand All @@ -18,19 +19,19 @@ func emptyHash() []byte {
}

// returns tmhash(0x00 || leaf)
func leafHash(record func(bytes32, []byte), leaf []byte) []byte {
func leafHash(record func(bytes32, []byte, arbutil.PreimageType), leaf []byte) []byte {
preimage := append(leafPrefix, leaf...)
hash := tmhash.Sum(preimage)

record(common.BytesToHash(hash), preimage)
record(common.BytesToHash(hash), preimage, arbutil.Sha2_256PreimageType)
return hash
}

// returns tmhash(0x01 || left || right)
func innerHash(record func(bytes32, []byte), left []byte, right []byte) []byte {
func innerHash(record func(bytes32, []byte, arbutil.PreimageType), left []byte, right []byte) []byte {
preimage := append(innerPrefix, append(left, right...)...)
hash := tmhash.Sum(preimage)

record(common.BytesToHash(hash), preimage)
record(common.BytesToHash(hash), preimage, arbutil.Sha2_256PreimageType)
return tmhash.Sum(append(innerPrefix, append(left, right...)...))
}
Loading

0 comments on commit 5c0a511

Please sign in to comment.