Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rewind fix #6

Merged
merged 11 commits into from
Jul 16, 2024
7 changes: 7 additions & 0 deletions common/format.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,3 +80,10 @@ func (t PrettyAge) String() string {
}
return result
}

type PrettySeconds int

// Returns int to formatted seconds
func (t PrettySeconds) String() string {
return fmt.Sprintf("%ds", t)
}
34 changes: 22 additions & 12 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,18 +137,22 @@ type CacheConfig struct {
SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory
Preimages bool // Whether to store preimage of trie key to the disk

SnapshotNoBuild bool // Whether the background generation is allowed
SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
AllowForceUpdate bool // Enable to force snapshots based on commit counts

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
AllowForceUpdate bool // Enable to force snapshots based on commit counts
AllowForceUpdate bool // Enable to force root snapshots based on the configured commits threshold

CommitThreshold int // Number of commits to force a root snapshot

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
CommitThreshold int // Number of commits to force a root snapshot
CommitThreshold int // Threshold of commits to force a root snapshot update

SnapshotNoBuild bool // Whether the background generation is allowed
SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
}

// defaultCacheConfig are the default caching values if none are specified by the
// user (also used during testing).
var defaultCacheConfig = &CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 256,
SnapshotWait: true,
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 256,
SnapshotWait: true,
AllowForceUpdate: false,
CommitThreshold: 128,
}

// BlockChain represents the canonical chain given a database with a genesis
Expand Down Expand Up @@ -397,10 +401,12 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
recover = true
}
snapconfig := snapshot.Config{
CacheSize: bc.cacheConfig.SnapshotLimit,
Recovery: recover,
NoBuild: bc.cacheConfig.SnapshotNoBuild,
AsyncBuild: !bc.cacheConfig.SnapshotWait,
CacheSize: bc.cacheConfig.SnapshotLimit,
Recovery: recover,
NoBuild: bc.cacheConfig.SnapshotNoBuild,
AsyncBuild: !bc.cacheConfig.SnapshotWait,
AllowForceUpdate: bc.cacheConfig.AllowForceUpdate,
CommitThreshold: bc.cacheConfig.CommitThreshold,
}
bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Root)
}
Expand Down Expand Up @@ -623,7 +629,6 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
// current freezer limit to start nuking id underflown
pivot := rawdb.ReadLastPivotNumber(bc.db)
frozen, _ := bc.db.Ancients()

updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (*types.Header, bool) {
// Rewind the blockchain, ensuring we don't end up with a stateless head
// block. Note, depth equality is permitted to allow using SetHead as a
Expand Down Expand Up @@ -1358,6 +1363,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
if err != nil {
return err
}
log.Debug("Committed State", "root", root)
// If we're running an archive node, always flush
if bc.cacheConfig.TrieDirtyDisabled {
return bc.triedb.Commit(root, false)
Expand All @@ -1368,6 +1374,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.

current := block.NumberU64()
// Flush limits are not considered for the first TriesInMemory blocks.
log.Debug("Trie in memory", "current", current, "inMemory", TriesInMemory)

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

would this be a large struct to print? if so maybe change to Trace level

if current <= TriesInMemory {
return nil
}
Expand All @@ -1376,12 +1383,14 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
nodes, imgs = bc.triedb.Size()
limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
)

if nodes > limit || imgs > 4*1024*1024 {
bc.triedb.Cap(limit - ethdb.IdealBatchSize)
}
// Find the next state trie we need to commit
chosen := current - TriesInMemory
flushInterval := time.Duration(bc.flushInterval.Load())
log.Debug("Flush Interval", "proc", bc.gcproc, "interval", flushInterval, "block", chosen, "flushing", (bc.gcproc > flushInterval))
// If we exceeded time allowance, flush an entire trie to disk
if bc.gcproc > flushInterval {
// If the header is missing (canonical chain behind), we're reorging a low
Expand All @@ -1401,6 +1410,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
bc.gcproc = 0
}
}

// Garbage collect anything below our required write retention
for !bc.triegc.Empty() {
root, number := bc.triegc.Pop()
Expand Down
4 changes: 2 additions & 2 deletions core/blockchain_repair_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1808,7 +1808,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
if tt.commitBlock > 0 {
chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), false)
if snapshots {
if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0, false); err != nil {
t.Fatalf("Failed to flatten snapshots: %v", err)
}
}
Expand Down Expand Up @@ -1935,7 +1935,7 @@ func TestIssue23496(t *testing.T) {
if _, err := chain.InsertChain(blocks[1:2]); err != nil {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
if err := chain.snaps.Cap(blocks[1].Root(), 0); err != nil {
if err := chain.snaps.Cap(blocks[1].Root(), 0, false); err != nil {
t.Fatalf("Failed to flatten snapshots: %v", err)
}

Expand Down
2 changes: 1 addition & 1 deletion core/blockchain_sethead_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2009,7 +2009,7 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
if tt.commitBlock > 0 {
chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), false)
if snapshots {
if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0, false); err != nil {
t.Fatalf("Failed to flatten snapshots: %v", err)
}
}
Expand Down
2 changes: 1 addition & 1 deletion core/blockchain_snapshot_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
// Flushing the entire snap tree into the disk, the
// relevant (a) snapshot root and (b) snapshot generator
// will be persisted atomically.
chain.snaps.Cap(blocks[point-1].Root(), 0)
chain.snaps.Cap(blocks[point-1].Root(), 0, false)
diskRoot, blockRoot := chain.snaps.DiskRoot(), blocks[point-1].Root()
if !bytes.Equal(diskRoot.Bytes(), blockRoot.Bytes()) {
t.Fatalf("Failed to flush disk layer change, want %x, got %x", blockRoot, diskRoot)
Expand Down
2 changes: 1 addition & 1 deletion core/state/pruner/pruner.go
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, sta
// Pruning is done, now drop the "useless" layers from the snapshot.
// Firstly, flushing the target layer into the disk. After that all
// diff layers below the target will all be merged into the disk.
if err := snaptree.Cap(root, 0); err != nil {
if err := snaptree.Cap(root, 0, false); err != nil {
return err
}
// Secondly, flushing the snapshot journal into the disk. All diff
Expand Down
3 changes: 3 additions & 0 deletions core/state/snapshot/difflayer.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,9 @@ var (
bloomDestructHasherOffset = 0
bloomAccountHasherOffset = 0
bloomStorageHasherOffset = 0

// Count for number of commits before fore disk root update

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
// Count for number of commits before fore disk root update
// Count for number of commits before forcing disk root update

defaultCommitThreshold = 128
)

func init() {
Expand Down
8 changes: 4 additions & 4 deletions core/state/snapshot/disklayer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ func TestDiskMerge(t *testing.T) {
}); err != nil {
t.Fatalf("failed to update snapshot tree: %v", err)
}
if err := snaps.Cap(diffRoot, 0); err != nil {
if err := snaps.Cap(diffRoot, 0, false); err != nil {
t.Fatalf("failed to flatten snapshot tree: %v", err)
}
// Retrieve all the data through the disk layer and validate it
Expand Down Expand Up @@ -356,7 +356,7 @@ func TestDiskPartialMerge(t *testing.T) {
}); err != nil {
t.Fatalf("test %d: failed to update snapshot tree: %v", i, err)
}
if err := snaps.Cap(diffRoot, 0); err != nil {
if err := snaps.Cap(diffRoot, 0, false); err != nil {
t.Fatalf("test %d: failed to flatten snapshot tree: %v", i, err)
}
// Retrieve all the data through the disk layer and validate it
Expand Down Expand Up @@ -467,7 +467,7 @@ func TestDiskGeneratorPersistence(t *testing.T) {
}, nil); err != nil {
t.Fatalf("failed to update snapshot tree: %v", err)
}
if err := snaps.Cap(diffRoot, 0); err != nil {
if err := snaps.Cap(diffRoot, 0, false); err != nil {
t.Fatalf("failed to flatten snapshot tree: %v", err)
}
blob := rawdb.ReadSnapshotGenerator(db)
Expand All @@ -489,7 +489,7 @@ func TestDiskGeneratorPersistence(t *testing.T) {
}
diskLayer := snaps.layers[snaps.diskRoot()].(*diskLayer)
diskLayer.genMarker = nil // Construction finished
if err := snaps.Cap(diffTwoRoot, 0); err != nil {
if err := snaps.Cap(diffTwoRoot, 0, false); err != nil {
t.Fatalf("failed to flatten snapshot tree: %v", err)
}
blob = rawdb.ReadSnapshotGenerator(db)
Expand Down
12 changes: 6 additions & 6 deletions core/state/snapshot/iterator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ func TestAccountIteratorTraversal(t *testing.T) {
aggregatorMemoryLimit = limit
}()
aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
snaps.Cap(common.HexToHash("0x04"), 2)
snaps.Cap(common.HexToHash("0x04"), 2, false)
verifyIterator(t, 7, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount)

it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
Expand Down Expand Up @@ -296,7 +296,7 @@ func TestStorageIteratorTraversal(t *testing.T) {
aggregatorMemoryLimit = limit
}()
aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
snaps.Cap(common.HexToHash("0x04"), 2)
snaps.Cap(common.HexToHash("0x04"), 2, false)
verifyIterator(t, 6, head.(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage)

it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
Expand Down Expand Up @@ -384,7 +384,7 @@ func TestAccountIteratorTraversalValues(t *testing.T) {
aggregatorMemoryLimit = limit
}()
aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
snaps.Cap(common.HexToHash("0x09"), 2)
snaps.Cap(common.HexToHash("0x09"), 2, false)

it, _ = snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{})
for it.Next() {
Expand Down Expand Up @@ -483,7 +483,7 @@ func TestStorageIteratorTraversalValues(t *testing.T) {
aggregatorMemoryLimit = limit
}()
aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
snaps.Cap(common.HexToHash("0x09"), 2)
snaps.Cap(common.HexToHash("0x09"), 2, false)

it, _ = snaps.StorageIterator(common.HexToHash("0x09"), common.HexToHash("0xaa"), common.Hash{})
for it.Next() {
Expand Down Expand Up @@ -541,7 +541,7 @@ func TestAccountIteratorLargeTraversal(t *testing.T) {
aggregatorMemoryLimit = limit
}()
aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
snaps.Cap(common.HexToHash("0x80"), 2)
snaps.Cap(common.HexToHash("0x80"), 2, false)

verifyIterator(t, 200, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount)

Expand Down Expand Up @@ -580,7 +580,7 @@ func TestAccountIteratorFlattening(t *testing.T) {
it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
defer it.Release()

if err := snaps.Cap(common.HexToHash("0x04"), 1); err != nil {
if err := snaps.Cap(common.HexToHash("0x04"), 1, false); err != nil {
t.Fatalf("failed to flatten snapshot stack: %v", err)
}
//verifyIterator(t, 7, it)
Expand Down
Loading