Skip to content

Commit

Permalink
add to configs
Browse files Browse the repository at this point in the history
  • Loading branch information
Brindrajsinh-Chauhan committed Apr 16, 2024
1 parent e6470d0 commit 5f07d0f
Show file tree
Hide file tree
Showing 6 changed files with 57 additions and 60 deletions.
36 changes: 17 additions & 19 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,18 +137,22 @@ type CacheConfig struct {
SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory
Preimages bool // Whether to store preimage of trie key to the disk

SnapshotNoBuild bool // Whether the background generation is allowed
SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
EnableSnapRootInterval bool // Enable to force snapshots based on time Interval
SnapRootThreshold int // Time in seconds to force a root snapshot
SnapshotNoBuild bool // Whether the background generation is allowed
SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
}

// defaultCacheConfig are the default caching values if none are specified by the
// user (also used during testing).
var defaultCacheConfig = &CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 256,
SnapshotWait: true,
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 256,
SnapshotWait: true,
EnableSnapRootInterval: false,
SnapRootThreshold: 600, // 10 min
}

// BlockChain represents the canonical chain given a database with a genesis
Expand Down Expand Up @@ -401,7 +405,8 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
Recovery: recover,
NoBuild: bc.cacheConfig.SnapshotNoBuild,
AsyncBuild: !bc.cacheConfig.SnapshotWait,
EnableSnapRootInterval: true,
EnableSnapRootInterval: bc.cacheConfig.EnableSnapRootInterval,
SnapRootThreshold: bc.cacheConfig.SnapRootThreshold,
}
bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Root)
}
Expand Down Expand Up @@ -612,7 +617,6 @@ func (bc *BlockChain) SetSafe(header *types.Header) {
//
// The method returns the block number where the requested root cap was found.
func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Hash, repair bool) (uint64, error) {
log.Debug("Setting Head beyond root", "root", root)
if !bc.chainmu.TryLock() {
return 0, errChainStopped
}
Expand All @@ -625,7 +629,6 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
// current freezer limit to start nuking id underflown
pivot := rawdb.ReadLastPivotNumber(bc.db)
frozen, _ := bc.db.Ancients()
log.Debug("Pivot and frozen block", "frozen", frozen)
updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (*types.Header, bool) {
// Rewind the blockchain, ensuring we don't end up with a stateless head
// block. Note, depth equality is permitted to allow using SetHead as a
Expand All @@ -643,8 +646,6 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha

for {
// If a root threshold was requested but not yet crossed, check
// when we found the root, beyond root become true
log.Debug("Checking Root conditions", "beyondroot", !beyondRoot, "target", root, "current", newHeadBlock.Root())
if root != (common.Hash{}) && !beyondRoot && newHeadBlock.Root() == root {
beyondRoot, rootNumber = true, newHeadBlock.NumberU64()
}
Expand Down Expand Up @@ -1337,7 +1338,6 @@ func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
// writeBlockWithState writes block, metadata and corresponding state data to the
// database.
func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) error {
log.Debug("Writing block with State")
// Calculate the total difficulty of the block
ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
if ptd == nil {
Expand Down Expand Up @@ -1374,7 +1374,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.

current := block.NumberU64()
// Flush limits are not considered for the first TriesInMemory blocks.
log.Debug("Trie in memory", "current", current, "inmemory", TriesInMemory)
log.Debug("Trie in memory", "current", current, "inMemory", TriesInMemory)
if current <= TriesInMemory {
return nil
}
Expand All @@ -1383,14 +1383,14 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
nodes, imgs = bc.triedb.Size()
limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
)
log.Debug("Trie dirty size", "size", nodes, "limit", limit, "imgs", imgs)

if nodes > limit || imgs > 4*1024*1024 {
bc.triedb.Cap(limit - ethdb.IdealBatchSize)
}
// Find the next state trie we need to commit
chosen := current - TriesInMemory
flushInterval := time.Duration(bc.flushInterval.Load())
log.Debug("Flush Interval", "proc", bc.gcproc, "interval", flushInterval)
log.Debug("Flush Interval", "proc", bc.gcproc, "interval", flushInterval, "block", chosen, "flushing", (bc.gcproc > flushInterval))
// If we exceeded time allowance, flush an entire trie to disk
if bc.gcproc > flushInterval {
// If the header is missing (canonical chain behind), we're reorging a low
Expand All @@ -1405,13 +1405,12 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", flushInterval, "optimum", float64(chosen-bc.lastWrite)/TriesInMemory)
}
// Flush an entire trie and restart the counters
log.Info("Flushing trie", "number", chosen, "root", header.Root)
bc.triedb.Commit(header.Root, true)
bc.lastWrite = chosen
bc.gcproc = 0
}
}
log.Debug("Flusing Interval not reached")

// Garbage collect anything below our required write retention
for !bc.triegc.Empty() {
root, number := bc.triegc.Pop()
Expand All @@ -1438,7 +1437,6 @@ func (bc *BlockChain) WriteBlockAndSetHead(block *types.Block, receipts []*types
// writeBlockAndSetHead is the internal implementation of WriteBlockAndSetHead.
// This function expects the chain mutex to be held.
func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
log.Debug("Writing Block and set head")
if err := bc.writeBlockWithState(block, receipts, state); err != nil {
return NonStatTy, err
}
Expand Down
33 changes: 9 additions & 24 deletions core/state/snapshot/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ type Config struct {
NoBuild bool // Indicator that the snapshots generation is disallowed
AsyncBuild bool // The snapshot generation is allowed to be constructed asynchronously
EnableSnapRootInterval bool // Enable forcing snap root generation on an interval
SnapRootInterval int // Time in seconds after which to force a snapshot update
SnapRootThreshold int // Time in seconds after which to force a snapshot update
}

// Tree is an Ethereum state snapshot tree. It consists of one persistent base
Expand All @@ -173,7 +173,7 @@ type Tree struct {
triedb *trie.Database // In-memory cache to access the trie through
layers map[common.Hash]snapshot // Collection of all known layers
lock sync.RWMutex
baseTime time.Time // Base time when the tree was started/restarted
baseTime time.Time // Base time to calculate snap root interval
// Test hooks
onFlatten func() // Hook invoked when the bottom most diff layers are flattened
}
Expand Down Expand Up @@ -204,10 +204,10 @@ func New(config Config, diskdb ethdb.KeyValueStore, triedb *trie.Database, root
baseTime: time.Now(),
}

// Setting the default interval value if it is enabled
// Important to set it to default value if enabled to avoid update every block
if config.EnableSnapRootInterval && (config.SnapRootInterval == 0) {
snap.config.SnapRootInterval = defaultSnapRootInterval
// Setting the default interval value if it is enabled and not set
// Important to set it to at least default value if enabled to avoid update snap root very aggressively
if config.EnableSnapRootInterval && (config.SnapRootThreshold < defaultSnapRootInterval) {
snap.config.SnapRootThreshold = defaultSnapRootInterval
}

// Attempt to load a previously persisted snapshot and rebuild one if failed
Expand Down Expand Up @@ -386,7 +386,6 @@ func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs m
// we want to ensure that *at least* the requested number of diff layers remain.
func (t *Tree) Cap(root common.Hash, layers int) error {
// Retrieve the head snapshot to cap from
log.Debug("Cap snap tree", "root", root, "layers", layers)
snap := t.Snapshot(root)
if snap == nil {
return fmt.Errorf("snapshot [%#x] missing", root)
Expand Down Expand Up @@ -470,7 +469,6 @@ func (t *Tree) Cap(root common.Hash, layers int) error {
// survival is only known *after* capping, we need to omit it from the count if
// we want to ensure that *at least* the requested number of diff layers remain.
func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer {
log.Debug("Tree cap internal layer")
// Dive until we run out of layers or reach the persistent database
for i := 0; i < layers-1; i++ {
// If we still have diff layers below, continue down
Expand All @@ -486,11 +484,9 @@ func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer {
// the memory limit is not yet exceeded.
switch parent := diff.parent.(type) {
case *diskLayer:
log.Debug("Tree Parent case", "type", "disklayer", "parent", parent)
return nil

case *diffLayer:
log.Debug("Tree Parent case", "type", "difflayer", "parent", parent)
// Hold the write lock until the flattened parent is linked correctly.
// Otherwise, the stale layer may be accessed by external reads in the
// meantime.
Expand All @@ -509,17 +505,16 @@ func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer {
diff.parent = flattened
// Check if we are above the interval time if it is enabled
timeFromLastSnap := time.Now().Sub(t.baseTime).Seconds()
forceSnapshot := (t.config.EnableSnapRootInterval && (int(timeFromLastSnap) >= t.config.SnapRootInterval))
forceSnapshot := (t.config.EnableSnapRootInterval && (int(timeFromLastSnap) >= t.config.SnapRootThreshold))

log.Debug("Flattened Memory Limit", "limit", aggregatorMemoryLimit, "currentMemory", flattened.memory, "timeThreshold", t.config.SnapRootInterval, "timePassed", timeFromLastSnap, "forceSnapshot", forceSnapshot)
log.Debug("Validating snapRoot update", "limit", aggregatorMemoryLimit, "currentMemory", flattened.memory, "timeThreshold", common.PrettyDuration(t.config.SnapRootThreshold), "elapsed", common.PrettyDuration(timeFromLastSnap), "forceSnapshot", forceSnapshot)
if (flattened.memory < aggregatorMemoryLimit) && !forceSnapshot {
// Accumulator layer is smaller than the limit, so we can abort, unless
// there's a snapshot being generated currently. In that case, the trie
// will move from underneath the generator so we **must** merge all the
// partial data down into the snapshot and restart the generation.
// or time interval has been crossed to force a snapshot
if flattened.parent.(*diskLayer).genAbort == nil {
log.Debug("Returning from cap")
return nil
}
}
Expand All @@ -529,7 +524,6 @@ func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer {
// If the bottom-most layer is larger than our memory cap, persist to disk
bottom := diff.parent.(*diffLayer)
bottom.lock.RLock()
log.Debug("Going into diffToDisk")
base := diffToDisk(bottom)
bottom.lock.RUnlock()
// resetting the baseTime
Expand All @@ -545,7 +539,6 @@ func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer {
// The disk layer persistence should be operated in an atomic way. All updates should
// be discarded if the whole transition if not finished.
func diffToDisk(bottom *diffLayer) *diskLayer {
log.Debug("Checking Diff to disk")
var (
base = bottom.parent.(*diskLayer)
batch = base.diskdb.NewBatch()
Expand All @@ -558,7 +551,6 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
stats = <-abort
}
// Put the deletion in the batch writer, flush all updates in the final step.
log.Debug("Deleting Snapshot")
rawdb.DeleteSnapshotRoot(batch)

// Mark the original base as stale as we're going to create a new wrapper
Expand All @@ -570,7 +562,6 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
base.lock.Unlock()

// Destroy all the destructed accounts from the database
log.Debug("Destroy account from database")
for hash := range bottom.destructSet {
// Skip any account not covered yet by the snapshot
if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 {
Expand All @@ -591,7 +582,6 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
// huge). It's ok to flush, the root will go missing in case of a
// crash and we'll detect and regenerate the snapshot.
if batch.ValueSize() > ethdb.IdealBatchSize {
log.Debug("Write batch")
if err := batch.Write(); err != nil {
log.Crit("Failed to write storage deletions", "err", err)
}
Expand All @@ -601,7 +591,6 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
it.Release()
}
// Push all updated accounts into the database
log.Debug("Push Update account from database")
for hash, data := range bottom.accountData {
// Skip any account not covered yet by the snapshot
if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 {
Expand All @@ -619,7 +608,6 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
// root will go missing in case of a crash and we'll detect and regen
// the snapshot.
if batch.ValueSize() > ethdb.IdealBatchSize {
log.Debug("Write account batch")
if err := batch.Write(); err != nil {
log.Crit("Failed to write storage deletions", "err", err)
}
Expand Down Expand Up @@ -652,12 +640,9 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
snapshotFlushStorageSizeMeter.Mark(int64(len(data)))
}
}
log.Debug("At write snapshot point", "bottom", bottom.root)
// Update the snapshot block marker and write any remainder data
rawdb.WriteSnapshotRoot(batch, bottom.root)

log.Debug("Snapshot Write complete")

// Write out the generator progress marker and report
journalProgress(batch, base.genMarker, stats)

Expand All @@ -666,7 +651,7 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
if err := batch.Write(); err != nil {
log.Crit("Failed to write leftover snapshot", "err", err)
}
log.Debug("Journalled disk layer", "root", bottom.root, "complete", base.genMarker == nil)
log.Debug("Snapshot Root generation complete", "newSnapRoot", bottom.root)
res := &diskLayer{
root: bottom.root,
cache: base.cache,
Expand Down
10 changes: 4 additions & 6 deletions core/state/statedb.go
Original file line number Diff line number Diff line change
Expand Up @@ -958,7 +958,6 @@ func (s *StateDB) clearJournalAndRefund() {

// Commit writes the state to the underlying in-memory trie database.
func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
log.Debug("Committing StateDB")
// Short circuit in case any database failure occurred earlier.
if s.dbErr != nil {
return common.Hash{}, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
Expand Down Expand Up @@ -1004,7 +1003,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
// and in path-based-scheme some technical challenges are still unsolved.
// Although it won't affect the correctness but please fix it TODO(rjl493456442).
}
log.Debug("State Object Dirty complete")

if len(s.stateObjectsDirty) > 0 {
s.stateObjectsDirty = make(map[common.Address]struct{})
}
Expand All @@ -1019,7 +1018,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
start = time.Now()
}
root, set := s.trie.Commit(true)
log.Debug("Trie commit root", "root", root)
log.Debug("Committed Trie root", "root", root)
// Merge the dirty nodes of account trie into global set
if set != nil {
if err := nodes.Merge(set); err != nil {
Expand All @@ -1041,12 +1040,12 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
s.AccountUpdated, s.AccountDeleted = 0, 0
s.StorageUpdated, s.StorageDeleted = 0, 0
}
log.Debug("Checking Snapshot enabled", "valid", (s.snap != nil))

// If snapshotting is enabled, update the snapshot tree with this new version
if s.snap != nil {
start := time.Now()
log.Debug("Snapshot update status", "parent", s.snap.Root(), "root", root, "diskroot", s.snaps.DiskRoot())
// Only update if there's a state transition (skip empty Clique blocks)
log.Debug("Snapshot update status", "parent", s.snap.Root(), "root", root, "skipping", (s.snap.Root() == root))
if parent := s.snap.Root(); parent != root {
if err := s.snaps.Update(root, parent, s.convertAccountSet(s.stateObjectsDestruct), s.snapAccounts, s.snapStorage); err != nil {
log.Warn("Failed to update snapshot tree", "from", parent, "to", root, "err", err)
Expand All @@ -1058,7 +1057,6 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
if err := s.snaps.Cap(root, 128); err != nil {
log.Warn("Failed to cap snapshot tree", "root", root, "layers", 128, "err", err)
}
log.Debug("Completed Snap cap")
}
if metrics.EnabledExpensive {
s.SnapshotCommits += time.Since(start)
Expand Down
20 changes: 11 additions & 9 deletions eth/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -182,15 +182,17 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
EnablePreimageRecording: config.EnablePreimageRecording,
}
cacheConfig = &core.CacheConfig{
TrieCleanLimit: config.TrieCleanCache,
TrieCleanJournal: stack.ResolvePath(config.TrieCleanCacheJournal),
TrieCleanRejournal: config.TrieCleanCacheRejournal,
TrieCleanNoPrefetch: config.NoPrefetch,
TrieDirtyLimit: config.TrieDirtyCache,
TrieDirtyDisabled: config.NoPruning,
TrieTimeLimit: config.TrieTimeout,
SnapshotLimit: config.SnapshotCache,
Preimages: config.Preimages,
TrieCleanLimit: config.TrieCleanCache,
TrieCleanJournal: stack.ResolvePath(config.TrieCleanCacheJournal),
TrieCleanRejournal: config.TrieCleanCacheRejournal,
TrieCleanNoPrefetch: config.NoPrefetch,
TrieDirtyLimit: config.TrieDirtyCache,
TrieDirtyDisabled: config.NoPruning,
TrieTimeLimit: config.TrieTimeout,
SnapshotLimit: config.SnapshotCache,
Preimages: config.Preimages,
EnableSnapRootInterval: config.EnableSnapRootInterval,
SnapRootThreshold: config.SnapRootThreshold,
}
)
// Override the chain config with provided settings.
Expand Down
2 changes: 2 additions & 0 deletions eth/ethconfig/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,8 @@ type Config struct {
TrieTimeout time.Duration `toml:",omitempty"` // Cumulative Time interval spent on gc, after which to flush trie cache to disk
SnapshotCache int
Preimages bool
EnableSnapRootInterval bool
SnapRootThreshold int

// This is the number of blocks for which logs will be cached in the filter system.
FilterLogCacheSize int
Expand Down
Loading

0 comments on commit 5f07d0f

Please sign in to comment.