Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

core/rawdb: refactor db inspector for extending multiple ancient store #25896

Merged
merged 8 commits into from
Oct 28, 2022
4 changes: 2 additions & 2 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ const (
BlockChainVersion uint64 = 8
)

// CacheConfig contains the configuration values for the trie caching/pruning
// CacheConfig contains the configuration values for the trie database
// that's resident in a blockchain.
type CacheConfig struct {
TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
Expand Down Expand Up @@ -1409,7 +1409,7 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types
if len(logs) > 0 {
bc.logsFeed.Send(logs)
}
// In theory we should fire a ChainHeadEvent when we inject
// In theory, we should fire a ChainHeadEvent when we inject
// a canonical block, but sometimes we can insert a batch of
// canonical blocks. Avoid firing too many ChainHeadEvents,
// we will fire an accumulated ChainHeadEvent and disable fire
Expand Down
86 changes: 86 additions & 0 deletions core/blockchain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4007,3 +4007,89 @@ func TestTxIndexer(t *testing.T) {
os.RemoveAll(frdir)
}
}

func TestCreateThenDeletePreByzantium(t *testing.T) {
// We use Ropsten chain config instead of Testchain config, this is
// deliberate: we want to use pre-byz rules where we have intermediate state roots
// between transactions.
testCreateThenDelete(t, params.RopstenChainConfig)
}
func TestCreateThenDeletePostByzantium(t *testing.T) {
testCreateThenDelete(t, params.TestChainConfig)
}

// testCreateThenDelete tests a creation and subsequent deletion of a contract, happening
// within the same block.
func testCreateThenDelete(t *testing.T, config *params.ChainConfig) {
var (
engine = ethash.NewFaker()
// A sender who makes transactions, has some funds
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
address = crypto.PubkeyToAddress(key.PublicKey)
destAddress = crypto.CreateAddress(address, 0)
funds = big.NewInt(1000000000000000)
)

// runtime code is 0x60ffff : PUSH1 0xFF SELFDESTRUCT, a.k.a SELFDESTRUCT(0xFF)
code := append([]byte{0x60, 0xff, 0xff}, make([]byte, 32-3)...)
initCode := []byte{
// SSTORE 1:1
byte(vm.PUSH1), 0x1,
byte(vm.PUSH1), 0x1,
byte(vm.SSTORE),
// Get the runtime-code on the stack
byte(vm.PUSH32)}
initCode = append(initCode, code...)
initCode = append(initCode, []byte{
byte(vm.PUSH1), 0x0, // offset
byte(vm.MSTORE),
byte(vm.PUSH1), 0x3, // size
byte(vm.PUSH1), 0x0, // offset
byte(vm.RETURN), // return 3 bytes of zero-code
}...)
gspec := &Genesis{
Config: config,
Alloc: GenesisAlloc{
address: {Balance: funds},
},
}
nonce := uint64(0)
signer := types.HomesteadSigner{}
_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2, func(i int, b *BlockGen) {
fee := big.NewInt(1)
if b.header.BaseFee != nil {
fee = b.header.BaseFee
}
b.SetCoinbase(common.Address{1})
tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{
Nonce: nonce,
GasPrice: new(big.Int).Set(fee),
Gas: 100000,
Data: initCode,
})
nonce++
b.AddTx(tx)
tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{
Nonce: nonce,
GasPrice: new(big.Int).Set(fee),
Gas: 100000,
To: &destAddress,
})
b.AddTx(tx)
nonce++
})
// Import the canonical chain
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{
//Debug: true,
//Tracer: logger.NewJSONLogger(nil, os.Stdout),
}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
// Import the blocks
for _, block := range blocks {
if _, err := chain.InsertChain([]*types.Block{block}); err != nil {
t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err)
}
}
}
33 changes: 0 additions & 33 deletions core/rawdb/ancient_scheme.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@

package rawdb

import "fmt"

// The list of table names of chain freezer.
const (
// chainFreezerHeaderTable indicates the name of the freezer header table.
Expand Down Expand Up @@ -53,34 +51,3 @@ var (

// freezers the collections of all builtin freezers.
var freezers = []string{chainFreezerName}

// InspectFreezerTable dumps out the index of a specific freezer table. The passed
// ancient indicates the path of root ancient directory where the chain freezer can
// be opened. Start and end specify the range for dumping out indexes.
// Note this function can only be used for debugging purposes.
func InspectFreezerTable(ancient string, freezerName string, tableName string, start, end int64) error {
var (
path string
tables map[string]bool
)
switch freezerName {
case chainFreezerName:
path, tables = resolveChainFreezerDir(ancient), chainFreezerNoSnappy
default:
return fmt.Errorf("unknown freezer, supported ones: %v", freezers)
}
noSnappy, exist := tables[tableName]
if !exist {
var names []string
for name := range tables {
names = append(names, name)
}
return fmt.Errorf("unknown table, supported ones: %v", names)
}
table, err := newFreezerTable(path, tableName, noSnappy, true)
if err != nil {
return err
}
table.dumpIndexStdout(start, end)
return nil
}
134 changes: 134 additions & 0 deletions core/rawdb/ancient_utils.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.

package rawdb

import (
"fmt"
"strings"

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
)

// freezerInfo contains the basic information of the freezer.
type freezerInfo struct {
name string // The identifier of freezer
head uint64 // The number of last stored item in the freezer
tail uint64 // The number of first stored item in the freezer
sizes map[string]common.StorageSize // The storage size per table
}

// count returns the number of stored items in the freezer.
func (info freezerInfo) count() uint64 {
rjl493456442 marked this conversation as resolved.
Show resolved Hide resolved
return info.head - info.tail + 1
}

// totalSize returns the storage size of entire freezer.
func (info freezerInfo) totalSize() common.StorageSize {
rjl493456442 marked this conversation as resolved.
Show resolved Hide resolved
var total common.StorageSize
for _, size := range info.sizes {
total += size
}
return total
}

// summary returns a string-representation of the freezerInfo.
func (info freezerInfo) summary() [][]string {
rjl493456442 marked this conversation as resolved.
Show resolved Hide resolved
var ret [][]string
for table, size := range info.sizes {
rjl493456442 marked this conversation as resolved.
Show resolved Hide resolved
ret = append(ret, []string{
fmt.Sprintf("Ancient store (%s)", strings.Title(info.name)),
strings.Title(table),
size.String(),
fmt.Sprintf("%d", info.count()),
})
}
return ret
}

// inspectFreezers inspects all freezers registered in the system.
func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) {
var infos []freezerInfo
for _, freezer := range freezers {
switch freezer {
case chainFreezerName:
// Chain ancient store is a bit special. It's always opened along
// with the key-value store, inspect the chain store directly.
info := freezerInfo{
name: freezer,
sizes: make(map[string]common.StorageSize),
}
// Retrieve storage size of every contained table.
for table := range chainFreezerNoSnappy {
size, err := db.AncientSize(table)
if err != nil {
return nil, err
}
info.sizes[table] = common.StorageSize(size)
}
// Retrieve the number of last stored item
ancients, err := db.Ancients()
if err != nil {
return nil, err
}
info.head = ancients - 1

// Retrieve the number of first stored item
tail, err := db.Tail()
if err != nil {
return nil, err
}
info.tail = tail
infos = append(infos, info)

default:
return nil, fmt.Errorf("unknown freezer, supported ones: %v", freezers)
}
}
return infos, nil
}

// InspectFreezerTable dumps out the index of a specific freezer table. The passed
// ancient indicates the path of root ancient directory where the chain freezer can
// be opened. Start and end specify the range for dumping out indexes.
// Note this function can only be used for debugging purposes.
func InspectFreezerTable(ancient string, freezerName string, tableName string, start, end int64) error {
var (
path string
tables map[string]bool
)
switch freezerName {
case chainFreezerName:
path, tables = resolveChainFreezerDir(ancient), chainFreezerNoSnappy
default:
return fmt.Errorf("unknown freezer, supported ones: %v", freezers)
}
noSnappy, exist := tables[tableName]
if !exist {
var names []string
for name := range tables {
names = append(names, name)
}
return fmt.Errorf("unknown table, supported ones: %v", names)
}
table, err := newFreezerTable(path, tableName, noSnappy, true)
if err != nil {
return err
}
table.dumpIndexStdout(start, end)
return nil
}
37 changes: 10 additions & 27 deletions core/rawdb/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -379,13 +379,6 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
beaconHeaders stat
cliqueSnaps stat

// Ancient store statistics
ancientHeadersSize common.StorageSize
ancientBodiesSize common.StorageSize
ancientReceiptsSize common.StorageSize
ancientTdsSize common.StorageSize
ancientHashesSize common.StorageSize

// Les statistic
chtTrieNodes stat
bloomTrieNodes stat
Expand Down Expand Up @@ -473,20 +466,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
logged = time.Now()
}
}
// Inspect append-only file store then.
ancientSizes := []*common.StorageSize{&ancientHeadersSize, &ancientBodiesSize, &ancientReceiptsSize, &ancientHashesSize, &ancientTdsSize}
for i, category := range []string{chainFreezerHeaderTable, chainFreezerBodiesTable, chainFreezerReceiptTable, chainFreezerHashTable, chainFreezerDifficultyTable} {
if size, err := db.AncientSize(category); err == nil {
*ancientSizes[i] += common.StorageSize(size)
total += common.StorageSize(size)
}
}
// Get number of ancient rows inside the freezer
ancients := counter(0)
if count, err := db.Ancients(); err == nil {
ancients = counter(count)
}
// Display the database statistic.
// Display the database statistic of key-value store.
stats := [][]string{
{"Key-Value store", "Headers", headers.Size(), headers.Count()},
{"Key-Value store", "Bodies", bodies.Size(), bodies.Count()},
Expand All @@ -504,14 +484,18 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
{"Key-Value store", "Beacon sync headers", beaconHeaders.Size(), beaconHeaders.Count()},
{"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()},
{"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()},
{"Ancient store", "Headers", ancientHeadersSize.String(), ancients.String()},
{"Ancient store", "Bodies", ancientBodiesSize.String(), ancients.String()},
{"Ancient store", "Receipt lists", ancientReceiptsSize.String(), ancients.String()},
{"Ancient store", "Difficulties", ancientTdsSize.String(), ancients.String()},
{"Ancient store", "Block number->hash", ancientHashesSize.String(), ancients.String()},
{"Light client", "CHT trie nodes", chtTrieNodes.Size(), chtTrieNodes.Count()},
{"Light client", "Bloom trie nodes", bloomTrieNodes.Size(), bloomTrieNodes.Count()},
}
// Inspect all registered append-only file store then.
ancients, err := inspectFreezers(db)
if err != nil {
return err
}
for _, ancient := range ancients {
stats = append(stats, ancient.summary()...)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Moving parts of the formatting code into the freezerInfo feels wrong. It is very very very specific on how this table is formatted here. I think it's a very low level impl detail leak into an API method (even if private). Please inline the formatting and use use getters on ancient to get the infos needed for the table.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fixed

total += ancient.totalSize()
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Database", "Category", "Size", "Items"})
table.SetFooter([]string{"", "Total", total.String(), " "})
Expand All @@ -521,6 +505,5 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
if unaccounted.size > 0 {
log.Error("Database contains unaccounted data", "size", unaccounted.size, "count", unaccounted.count)
}

return nil
}
13 changes: 7 additions & 6 deletions core/state/snapshot/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -187,8 +187,9 @@ type Tree struct {
// If the memory layers in the journal do not match the disk layer (e.g. there is
// a gap) or the journal is missing, there are two repair cases:
//
// - if the 'recovery' parameter is true, all memory diff-layers will be discarded.
// This case happens when the snapshot is 'ahead' of the state trie.
// - if the 'recovery' parameter is true, memory diff-layers and the disk-layer
// will all be kept. This case happens when the snapshot is 'ahead' of the
// state trie.
// - otherwise, the entire snapshot is considered invalid and will be recreated on
// a background thread.
func New(config Config, diskdb ethdb.KeyValueStore, triedb *trie.Database, root common.Hash) (*Tree, error) {
Expand All @@ -199,16 +200,16 @@ func New(config Config, diskdb ethdb.KeyValueStore, triedb *trie.Database, root
triedb: triedb,
layers: make(map[common.Hash]snapshot),
}
// Create the building waiter iff the background generation is allowed
if !config.NoBuild && !config.AsyncBuild {
defer snap.waitBuild()
}
// Attempt to load a previously persisted snapshot and rebuild one if failed
head, disabled, err := loadSnapshot(diskdb, triedb, root, config.CacheSize, config.Recovery, config.NoBuild)
if disabled {
log.Warn("Snapshot maintenance disabled (syncing)")
return snap, nil
}
// Create the building waiter iff the background generation is allowed
if !config.NoBuild && !config.AsyncBuild {
defer snap.waitBuild()
}
Comment on lines +209 to +212
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is an interesting change, and not immediately obvious why it was needed (or correct). It would make a difference in case we're syncing, and in the old code we would create a goroutine to waitBuild, and in the new code we would not.

Any comments about this?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Previously, we will always create a waiter if "sync-style" snapshot construction is required. Although in production, sync-style generation is always disabled which means the waiter is never be created.

While if we are in sync, the snapshot construction is disabled, if we still wait the construction by waiter, then it's a deadlook.

This change ensures we only create the waiter that the construction is really initialized.

if err != nil {
log.Warn("Failed to load snapshot", "err", err)
if !config.NoBuild {
Expand Down
Loading