Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

enable database pruning #125

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -1835,11 +1835,11 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
cfg.Genesis = core.DefaultMorphHoleskyGenesisBlock()
// forced for mainnet
// disable pruning
if ctx.GlobalString(GCModeFlag.Name) != GCModeArchive {
log.Crit("Must use --gcmode=archive")
}
log.Info("Pruning disabled")
cfg.NoPruning = true
// if ctx.GlobalString(GCModeFlag.Name) != GCModeArchive {
// log.Crit("Must use --gcmode=archive")
// }
// log.Info("Pruning disabled")
// cfg.NoPruning = true
// disable prefetch
log.Info("Prefetch disabled")
cfg.NoPrefetch = true
Expand Down
5 changes: 3 additions & 2 deletions core/state/statedb.go
Original file line number Diff line number Diff line change
Expand Up @@ -1006,9 +1006,10 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
}
// The onleaf func is called _serially_, so we can reuse the same account
// for unmarshalling every time.
var account types.StateAccount
var account *types.StateAccount
root, accountCommitted, err := s.trie.Commit(func(_ [][]byte, _ []byte, leaf []byte, parent common.Hash) error {
if err := rlp.DecodeBytes(leaf, &account); err != nil {
var err error
if account, err = types.UnmarshalStateAccount(leaf); err != nil {
return nil
}
if account.Root != s.db.TrieDB().EmptyRoot() {
Expand Down
5 changes: 3 additions & 2 deletions eth/ethconfig/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,15 @@
package ethconfig

import (
"github.com/scroll-tech/go-ethereum/consensus/l2"
"math/big"
"os"
"os/user"
"path/filepath"
"runtime"
"time"

"github.com/scroll-tech/go-ethereum/consensus/l2"

"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/consensus"
"github.com/scroll-tech/go-ethereum/consensus/clique"
Expand Down Expand Up @@ -79,7 +80,7 @@ var Defaults = Config{
TrieCleanCacheJournal: "triecache",
TrieCleanCacheRejournal: 60 * time.Minute,
TrieDirtyCache: 256,
TrieTimeout: 60 * time.Minute,
TrieTimeout: 1 * time.Minute,
SnapshotCache: 102,
FilterLogCacheSize: 32,
Miner: miner.DefaultConfig,
Expand Down
89 changes: 69 additions & 20 deletions trie/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
package trie

import (
"bytes"
"errors"
"fmt"
"io"
Expand All @@ -33,6 +34,8 @@ import (
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rlp"
zktrie "github.com/scroll-tech/zktrie/trie"
zkt "github.com/scroll-tech/zktrie/types"
)

var (
Expand Down Expand Up @@ -92,7 +95,8 @@ type Database struct {
childrenSize common.StorageSize // Storage size of the external children tracking
preimages *preimageStore // The store for caching preimages

lock sync.RWMutex
lock sync.RWMutex
rawLock sync.RWMutex //For zk raw dirties
}

// rawNode is a simple binary blob used to differentiate between collapsed trie
Expand Down Expand Up @@ -140,6 +144,13 @@ type rawShortNode struct {
func (n rawShortNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") }
func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") }

type rawZkNode struct {
n *zktrie.Node
}

func (n rawZkNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") }
func (n rawZkNode) fstring(ind string) string { panic("this should never end up in a live trie") }

// cachedNode is all the information we know about a single cached trie node
// in the memory database write layer.
type cachedNode struct {
Expand Down Expand Up @@ -168,6 +179,9 @@ func (n *cachedNode) rlp() []byte {
if node, ok := n.node.(rawNode); ok {
return node
}
if node, ok := n.node.(rawZkNode); ok {
return node.n.CanonicalValue()
}
blob, err := rlp.EncodeToBytes(n.node)
if err != nil {
panic(err)
Expand All @@ -181,6 +195,10 @@ func (n *cachedNode) obj(hash common.Hash) node {
if node, ok := n.node.(rawNode); ok {
return mustDecodeNode(hash[:], node)
}

if node, ok := n.node.(rawZkNode); ok {
return node
}
return expandNode(hash[:], n.node)
}

Expand Down Expand Up @@ -208,6 +226,16 @@ func forGatherChildren(n node, onChild func(hash common.Hash)) {
}
case hashNode:
onChild(common.BytesToHash(n))
case rawZkNode:
switch n.n.Type {
case zktrie.NodeTypeBranch_0, zktrie.NodeTypeBranch_1, zktrie.NodeTypeBranch_2, zktrie.NodeTypeBranch_3:
if !bytes.Equal(n.n.ChildL[:], common.Hash{}.Bytes()) {
onChild(common.BytesToHash(n.n.ChildL.Bytes()))
}
if !bytes.Equal(n.n.ChildR[:], common.Hash{}.Bytes()) {
onChild(common.BytesToHash(n.n.ChildR.Bytes()))
}
}
case valueNode, nil, rawNode:
default:
panic(fmt.Sprintf("unknown node type: %T", n))
Expand All @@ -231,8 +259,7 @@ func simplifyNode(n node) node {
}
}
return node

case valueNode, hashNode, rawNode:
case valueNode, hashNode, rawNode, rawZkNode:
return n

default:
Expand Down Expand Up @@ -360,12 +387,18 @@ func (db *Database) insert(hash common.Hash, size int, node node) {
// node retrieves a cached trie node from memory, or returns nil if none can be
// found in the memory cache.
func (db *Database) node(hash common.Hash) node {
zkHash := zkt.NewHashFromBytes(hash[:])
nodeKey := common.BytesToHash(BitReverse(zkHash[:]))

// Retrieve the node from the clean cache if available
if db.cleans != nil {
if enc := db.cleans.Get(nil, hash[:]); enc != nil {
if enc := db.cleans.Get(nil, nodeKey[:]); enc != nil {
memcacheCleanHitMeter.Mark(1)
memcacheCleanReadMeter.Mark(int64(len(enc)))
return mustDecodeNode(hash[:], enc)

if zkNode, err := zktrie.NewNodeFromBytes(enc); err == nil {
return rawZkNode{zkNode}
}
}
}
// Retrieve the node from the dirty cache if available
Expand All @@ -381,15 +414,20 @@ func (db *Database) node(hash common.Hash) node {
memcacheDirtyMissMeter.Mark(1)

// Content unavailable in memory, attempt to retrieve from disk
enc, err := db.diskdb.Get(hash[:])
enc, err := db.diskdb.Get(nodeKey[:])
if err != nil || enc == nil {
return nil
}
if db.cleans != nil {
db.cleans.Set(hash[:], enc)
db.cleans.Set(nodeKey[:], enc)
memcacheCleanMissMeter.Mark(1)
memcacheCleanWriteMeter.Mark(int64(len(enc)))
}

if zkNode, err := zktrie.NewNodeFromBytes(enc); err == nil {
return rawZkNode{zkNode}
}

return mustDecodeNode(hash[:], enc)
}

Expand Down Expand Up @@ -585,7 +623,11 @@ func (db *Database) Cap(limit common.StorageSize) error {
for size > limit && oldest != (common.Hash{}) {
// Fetch the oldest referenced node and push into the batch
node := db.dirties[oldest]
rawdb.WriteTrieNode(batch, oldest, node.rlp())

zkHash := zkt.NewHashFromBytes(oldest[:])
nodeKey := common.BytesToHash(BitReverse(zkHash[:]))

rawdb.WriteTrieNode(batch, nodeKey, node.rlp())

// If we exceeded the ideal batch size, commit and reset
if batch.ValueSize() >= ethdb.IdealBatchSize {
Expand Down Expand Up @@ -654,18 +696,21 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H
start := time.Now()
batch := db.diskdb.NewBatch()

db.lock.Lock()
for _, v := range db.rawDirties {
batch.Put(v.K, v.V)
}
for k := range db.rawDirties {
delete(db.rawDirties, k)
}
db.lock.Unlock()
if err := batch.Write(); err != nil {
return err
if (db.newest == common.Hash{}) {
db.lock.Lock()

for _, v := range db.rawDirties {
batch.Put(v.K, v.V)
}
for k := range db.rawDirties {
delete(db.rawDirties, k)
}
db.lock.Unlock()
if err := batch.Write(); err != nil {
return err
}
batch.Reset()
}
batch.Reset()

if (node == common.Hash{}) {
return nil
Expand All @@ -675,6 +720,7 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H
if db.preimages != nil {
db.preimages.commit(true)
}

// Move the trie itself into the batch, flushing if enough data is accumulated
nodes, storage := len(db.dirties), db.dirtiesSize

Expand Down Expand Up @@ -731,7 +777,10 @@ func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleane
return err
}
// If we've reached an optimal batch size, commit and start over
rawdb.WriteTrieNode(batch, hash, node.rlp())
zkHash := zkt.NewHashFromBytes(hash[:])
nodeKey := common.BytesToHash(BitReverse(zkHash[:]))

rawdb.WriteTrieNode(batch, nodeKey, node.rlp())
if callback != nil {
callback(hash)
}
Expand Down
Loading