Skip to content

Commit

Permalink
core: mark storage as deleted for destructed contracts
Browse files Browse the repository at this point in the history
  • Loading branch information
rjl493456442 committed Oct 8, 2022
1 parent 5647516 commit 7ddb9f6
Show file tree
Hide file tree
Showing 7 changed files with 207 additions and 69 deletions.
8 changes: 5 additions & 3 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ var (
storageReadTimer = metrics.NewRegisteredTimer("chain/storage/reads", nil)
storageHashTimer = metrics.NewRegisteredTimer("chain/storage/hashes", nil)
storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil)
storageDeleteTimer = metrics.NewRegisteredTimer("chain/storage/deletes", nil)
storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil)

snapshotAccountReadTimer = metrics.NewRegisteredTimer("chain/snapshot/account/reads", nil)
Expand Down Expand Up @@ -124,7 +125,7 @@ const (
BlockChainVersion uint64 = 8
)

// CacheConfig contains the configuration values for the trie caching/pruning
// CacheConfig contains the configuration values for the trie database
// that's resident in a blockchain.
type CacheConfig struct {
TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
Expand Down Expand Up @@ -1409,7 +1410,7 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types
if len(logs) > 0 {
bc.logsFeed.Send(logs)
}
// In theory we should fire a ChainHeadEvent when we inject
// In theory, we should fire a ChainHeadEvent when we inject
// a canonical block, but sometimes we can insert a batch of
// canonical blocks. Avoid firing too many ChainHeadEvents,
// we will fire an accumulated ChainHeadEvent and disable fire
Expand Down Expand Up @@ -1716,11 +1717,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete, we can mark them
accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete, we can mark them
storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them
storageDeleteTimer.Update(statedb.StorageDeletes) // Storage deletes are complete, we can mark them
snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete, we can mark them
snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete, we can mark them
triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation
trieproc := statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates
trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates
trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates + statedb.StorageDeletes

blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash)

Expand Down
33 changes: 0 additions & 33 deletions core/rawdb/ancient_scheme.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@

package rawdb

import "fmt"

// The list of table names of chain freezer.
const (
// chainFreezerHeaderTable indicates the name of the freezer header table.
Expand Down Expand Up @@ -53,34 +51,3 @@ var (

// freezers the collections of all builtin freezers.
var freezers = []string{chainFreezerName}

// InspectFreezerTable dumps out the index of a specific freezer table. The passed
// ancient indicates the path of root ancient directory where the chain freezer can
// be opened. Start and end specify the range for dumping out indexes.
// Note this function can only be used for debugging purposes.
func InspectFreezerTable(ancient string, freezerName string, tableName string, start, end int64) error {
var (
path string
tables map[string]bool
)
switch freezerName {
case chainFreezerName:
path, tables = resolveChainFreezerDir(ancient), chainFreezerNoSnappy
default:
return fmt.Errorf("unknown freezer, supported ones: %v", freezers)
}
noSnappy, exist := tables[tableName]
if !exist {
var names []string
for name := range tables {
names = append(names, name)
}
return fmt.Errorf("unknown table, supported ones: %v", names)
}
table, err := newFreezerTable(path, tableName, noSnappy, true)
if err != nil {
return err
}
table.dumpIndexStdout(start, end)
return nil
}
134 changes: 134 additions & 0 deletions core/rawdb/ancient_utils.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.

package rawdb

import (
"fmt"
"strings"

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
)

// freezerInfo contains the basic information of the freezer.
type freezerInfo struct {
name string // The identifier of freezer
head uint64 // The number of last stored item in the freezer
tail uint64 // The number of first stored item in the freezer
sizes map[string]common.StorageSize // The storage size per table
}

// count returns the number of stored items in the freezer.
func (info freezerInfo) count() uint64 {
return info.head - info.tail + 1
}

// totalSize returns the storage size of entire freezer.
func (info freezerInfo) totalSize() common.StorageSize {
var total common.StorageSize
for _, size := range info.sizes {
total += size
}
return total
}

// summary returns a string-representation of the freezerInfo.
func (info freezerInfo) summary() [][]string {
var ret [][]string
for table, size := range info.sizes {
ret = append(ret, []string{
fmt.Sprintf("Ancient store (%s)", strings.Title(info.name)),
strings.Title(table),
size.String(),
fmt.Sprintf("%d", info.count()),
})
}
return ret
}

// inspectFreezers inspects all freezers registered in the system.
func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) {
var infos []freezerInfo
for _, freezer := range freezers {
switch freezer {
case chainFreezerName:
// Chain ancient store is a bit special. It's always opened along
// with the key-value store, inspect the chain store directly.
info := freezerInfo{
name: freezer,
sizes: make(map[string]common.StorageSize),
}
// Retrieve storage size of every contained table.
for table := range chainFreezerNoSnappy {
size, err := db.AncientSize(table)
if err != nil {
return nil, err
}
info.sizes[table] = common.StorageSize(size)
}
// Retrieve the number of last stored item
ancients, err := db.Ancients()
if err != nil {
return nil, err
}
info.head = ancients - 1

// Retrieve the number of first stored item
tail, err := db.Tail()
if err != nil {
return nil, err
}
info.tail = tail
infos = append(infos, info)

default:
return nil, fmt.Errorf("unknown freezer, supported ones: %v", freezers)
}
}
return infos, nil
}

// InspectFreezerTable dumps out the index of a specific freezer table. The passed
// ancient indicates the path of root ancient directory where the chain freezer can
// be opened. Start and end specify the range for dumping out indexes.
// Note this function can only be used for debugging purposes.
func InspectFreezerTable(ancient string, freezerName string, tableName string, start, end int64) error {
var (
path string
tables map[string]bool
)
switch freezerName {
case chainFreezerName:
path, tables = resolveChainFreezerDir(ancient), chainFreezerNoSnappy
default:
return fmt.Errorf("unknown freezer, supported ones: %v", freezers)
}
noSnappy, exist := tables[tableName]
if !exist {
var names []string
for name := range tables {
names = append(names, name)
}
return fmt.Errorf("unknown table, supported ones: %v", names)
}
table, err := newFreezerTable(path, tableName, noSnappy, true)
if err != nil {
return err
}
table.dumpIndexStdout(start, end)
return nil
}
37 changes: 10 additions & 27 deletions core/rawdb/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -379,13 +379,6 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
beaconHeaders stat
cliqueSnaps stat

// Ancient store statistics
ancientHeadersSize common.StorageSize
ancientBodiesSize common.StorageSize
ancientReceiptsSize common.StorageSize
ancientTdsSize common.StorageSize
ancientHashesSize common.StorageSize

// Les statistic
chtTrieNodes stat
bloomTrieNodes stat
Expand Down Expand Up @@ -473,20 +466,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
logged = time.Now()
}
}
// Inspect append-only file store then.
ancientSizes := []*common.StorageSize{&ancientHeadersSize, &ancientBodiesSize, &ancientReceiptsSize, &ancientHashesSize, &ancientTdsSize}
for i, category := range []string{chainFreezerHeaderTable, chainFreezerBodiesTable, chainFreezerReceiptTable, chainFreezerHashTable, chainFreezerDifficultyTable} {
if size, err := db.AncientSize(category); err == nil {
*ancientSizes[i] += common.StorageSize(size)
total += common.StorageSize(size)
}
}
// Get number of ancient rows inside the freezer
ancients := counter(0)
if count, err := db.Ancients(); err == nil {
ancients = counter(count)
}
// Display the database statistic.
// Display the database statistic of key-value store.
stats := [][]string{
{"Key-Value store", "Headers", headers.Size(), headers.Count()},
{"Key-Value store", "Bodies", bodies.Size(), bodies.Count()},
Expand All @@ -504,14 +484,18 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
{"Key-Value store", "Beacon sync headers", beaconHeaders.Size(), beaconHeaders.Count()},
{"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()},
{"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()},
{"Ancient store", "Headers", ancientHeadersSize.String(), ancients.String()},
{"Ancient store", "Bodies", ancientBodiesSize.String(), ancients.String()},
{"Ancient store", "Receipt lists", ancientReceiptsSize.String(), ancients.String()},
{"Ancient store", "Difficulties", ancientTdsSize.String(), ancients.String()},
{"Ancient store", "Block number->hash", ancientHashesSize.String(), ancients.String()},
{"Light client", "CHT trie nodes", chtTrieNodes.Size(), chtTrieNodes.Count()},
{"Light client", "Bloom trie nodes", bloomTrieNodes.Size(), bloomTrieNodes.Count()},
}
// Inspect all registered append-only file store then.
ancients, err := inspectFreezers(db)
if err != nil {
return err
}
for _, ancient := range ancients {
stats = append(stats, ancient.summary()...)
total += ancient.totalSize()
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Database", "Category", "Size", "Items"})
table.SetFooter([]string{"", "Total", total.String(), " "})
Expand All @@ -521,6 +505,5 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
if unaccounted.size > 0 {
log.Error("Database contains unaccounted data", "size", unaccounted.size, "count", unaccounted.count)
}

return nil
}
13 changes: 7 additions & 6 deletions core/state/snapshot/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -187,8 +187,9 @@ type Tree struct {
// If the memory layers in the journal do not match the disk layer (e.g. there is
// a gap) or the journal is missing, there are two repair cases:
//
// - if the 'recovery' parameter is true, all memory diff-layers will be discarded.
// This case happens when the snapshot is 'ahead' of the state trie.
// - if the 'recovery' parameter is true, memory diff-layers and the disk-layer
// will all be kept. This case happens when the snapshot is 'ahead' of the
// state trie.
// - otherwise, the entire snapshot is considered invalid and will be recreated on
// a background thread.
func New(config Config, diskdb ethdb.KeyValueStore, triedb *trie.Database, root common.Hash) (*Tree, error) {
Expand All @@ -199,16 +200,16 @@ func New(config Config, diskdb ethdb.KeyValueStore, triedb *trie.Database, root
triedb: triedb,
layers: make(map[common.Hash]snapshot),
}
// Create the building waiter iff the background generation is allowed
if !config.NoBuild && !config.AsyncBuild {
defer snap.waitBuild()
}
// Attempt to load a previously persisted snapshot and rebuild one if failed
head, disabled, err := loadSnapshot(diskdb, triedb, root, config.CacheSize, config.Recovery, config.NoBuild)
if disabled {
log.Warn("Snapshot maintenance disabled (syncing)")
return snap, nil
}
// Create the building waiter iff the background generation is allowed
if !config.NoBuild && !config.AsyncBuild {
defer snap.waitBuild()
}
if err != nil {
log.Warn("Failed to load snapshot", "err", err)
if !config.NoBuild {
Expand Down
37 changes: 37 additions & 0 deletions core/state/state_object.go
Original file line number Diff line number Diff line change
Expand Up @@ -395,6 +395,43 @@ func (s *stateObject) CommitTrie(db Database) (*trie.NodeSet, error) {
return nodes, err
}

// DeleteTrie the storage trie of the object from db.
func (s *stateObject) DeleteTrie(db Database) (*trie.NodeSet, error) {
// Track the amount of time wasted on iterating and deleting the storage trie
if metrics.EnabledExpensive {
defer func(start time.Time) { s.db.StorageDeletes += time.Since(start) }(time.Now())
}
stTrie, err := db.OpenStorageTrie(s.db.originalRoot, s.addrHash, s.data.Root)
if err != nil {
return nil, err
}
// It can be an attack vector when iterating a huge contract. Stop collecting
// in case the accumulated nodes reach the threshold. It's fine to not clean
// up the dangling trie nodes since they are non-accessible dangling nodes
// anyway.
var (
paths [][]byte
blobs [][]byte
size common.StorageSize
iter = stTrie.NodeIterator(nil)
)
for iter.Next(true) {
if iter.Hash() == (common.Hash{}) {
continue
}
path, blob := common.CopyBytes(iter.Path()), common.CopyBytes(iter.NodeBlob())
paths = append(paths, path)
blobs = append(blobs, blob)

// Pretty arbitrary number, approximately 1GB as the threshold
size += common.StorageSize(len(path) + len(blob))
if size > 1073741824 {
return nil, nil
}
}
return trie.NewNodeSetWithDeletion(s.addrHash, paths, blobs), nil
}

// AddBalance adds amount to s's balance.
// It is used to add funds to the destination account of a transfer.
func (s *stateObject) AddBalance(amount *big.Int) {
Expand Down
14 changes: 14 additions & 0 deletions core/state/statedb.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@ type StateDB struct {
StorageHashes time.Duration
StorageUpdates time.Duration
StorageCommits time.Duration
StorageDeletes time.Duration
SnapshotAccountReads time.Duration
SnapshotStorageReads time.Duration
SnapshotCommits time.Duration
Expand Down Expand Up @@ -933,6 +934,19 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
storageTrieNodesUpdated += updates
storageTrieNodesDeleted += deleted
}
} else {
// Account is deleted, nuke out the storage data as well.
set, err := obj.DeleteTrie(s.db)
if err != nil {
return common.Hash{}, err
}
if set != nil {
if err := nodes.Merge(set); err != nil {
return common.Hash{}, err
}
_, deleted := set.Size()
storageTrieNodesDeleted += deleted
}
}
}
if len(s.stateObjectsDirty) > 0 {
Expand Down

0 comments on commit 7ddb9f6

Please sign in to comment.