diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go
index b07f07d021b..866ea000006 100644
--- a/cmd/evm/staterunner.go
+++ b/cmd/evm/staterunner.go
@@ -35,12 +35,10 @@ import (
"github.com/erigontech/erigon-lib/common/datadir"
"github.com/erigontech/erigon-lib/config3"
"github.com/erigontech/erigon-lib/kv/mdbx"
- "github.com/erigontech/erigon-lib/kv/rawdbv3"
"github.com/erigontech/erigon-lib/kv/temporal"
"github.com/erigontech/erigon-lib/log/v3"
libstate "github.com/erigontech/erigon-lib/state"
- "github.com/erigontech/erigon/core/rawdb"
"github.com/erigontech/erigon/core/state"
"github.com/erigontech/erigon/core/vm"
"github.com/erigontech/erigon/eth/tracers/logger"
@@ -144,8 +142,7 @@ func aggregateResultsFromStateTests(
MustOpen()
defer _db.Close()
- cr := rawdb.NewCanonicalReader(rawdbv3.TxNums)
- agg, err := libstate.NewAggregator(context.Background(), dirs, config3.HistoryV3AggregationStep, _db, cr, log.New())
+ agg, err := libstate.NewAggregator(context.Background(), dirs, config3.HistoryV3AggregationStep, _db, log.New())
if err != nil {
return nil, err
}
diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go
index 7429e06aa3e..adddee467e0 100644
--- a/cmd/integration/commands/stages.go
+++ b/cmd/integration/commands/stages.go
@@ -1131,14 +1131,7 @@ func stageCustomTrace(db kv.RwDB, ctx context.Context, logger log.Logger) error
var batchSize datasize.ByteSize
must(batchSize.UnmarshalText([]byte(batchSizeStr)))
- s := stage(sync, nil, db, stages.CustomTrace)
-
- logger.Info("Stage", "name", s.ID, "progress", s.BlockNumber)
chainConfig, pm := fromdb.ChainConfig(db), fromdb.PruneMode(db)
- if pruneTo > 0 {
- pm.History = prune.Distance(s.BlockNumber - pruneTo)
- }
-
syncCfg := ethconfig.Defaults.Sync
syncCfg.ExecWorkerCount = int(workers)
syncCfg.ReconWorkerCount = int(reconWorkers)
@@ -1146,56 +1139,7 @@ func stageCustomTrace(db kv.RwDB, ctx context.Context, logger log.Logger) error
genesis := core.GenesisBlockByChainName(chain)
br, _ := blocksIO(db, logger)
cfg := stagedsync.StageCustomTraceCfg(db, pm, dirs, br, chainConfig, engine, genesis, &syncCfg)
-
- if unwind > 0 {
- if err := db.View(ctx, func(tx kv.Tx) error {
- blockNumWithCommitment, ok, err := tx.(libstate.HasAggTx).AggTx().(*libstate.AggregatorRoTx).CanUnwindBeforeBlockNum(s.BlockNumber-unwind, tx)
- if err != nil {
- return err
- }
- if !ok {
- return fmt.Errorf("too deep unwind requested: %d, minimum allowed: %d", s.BlockNumber-unwind, blockNumWithCommitment)
- }
- unwind = s.BlockNumber - blockNumWithCommitment
- return nil
- }); err != nil {
- return err
- }
- }
-
- var tx kv.RwTx //nil - means lower-level code (each stage) will manage transactions
- if noCommit {
- var err error
- tx, err = db.BeginRw(ctx)
- if err != nil {
- return err
- }
- defer tx.Rollback()
- }
- txc := wrap.TxContainer{Tx: tx}
-
- if unwind > 0 {
- u := sync.NewUnwindState(stages.CustomTrace, s.BlockNumber-unwind, s.BlockNumber, true, false)
- err := stagedsync.UnwindCustomTrace(u, s, txc, cfg, ctx, logger)
- if err != nil {
- return err
- }
- return nil
- }
-
- if pruneTo > 0 {
- p, err := sync.PruneStageState(stages.CustomTrace, s.BlockNumber, tx, db, true)
- if err != nil {
- return err
- }
- err = stagedsync.PruneCustomTrace(p, tx, cfg, ctx, logger)
- if err != nil {
- return err
- }
- return nil
- }
-
- err := stagedsync.SpawnCustomTrace(s, txc, cfg, ctx, block, logger)
+ err := stagedsync.SpawnCustomTrace(cfg, ctx, logger)
if err != nil {
return err
}
@@ -1334,8 +1278,8 @@ func allSnapshots(ctx context.Context, db kv.RoDB, logger log.Logger) (*freezebl
var err error
blockReader := freezeblocks.NewBlockReader(_allSnapshotsSingleton, _allBorSnapshotsSingleton)
- cr := rawdb.NewCanonicalReader(rawdbv3.TxNums.WithCustomReadTxNumFunc(freezeblocks.ReadTxNumFuncFromBlockReader(ctx, blockReader)))
- _aggSingleton, err = libstate.NewAggregator(ctx, dirs, config3.HistoryV3AggregationStep, db, cr, logger)
+ txNums := rawdbv3.TxNums.WithCustomReadTxNumFunc(freezeblocks.ReadTxNumFuncFromBlockReader(ctx, blockReader))
+ _aggSingleton, err = libstate.NewAggregator(ctx, dirs, config3.HistoryV3AggregationStep, db, logger)
if err != nil {
panic(err)
}
@@ -1386,7 +1330,7 @@ func allSnapshots(ctx context.Context, db kv.RoDB, logger log.Logger) (*freezebl
ac := _aggSingleton.BeginFilesRo()
defer ac.Close()
ac.LogStats(tx, func(endTxNumMinimax uint64) (uint64, error) {
- _, histBlockNumProgress, err := rawdbv3.TxNums.WithCustomReadTxNumFunc(freezeblocks.ReadTxNumFuncFromBlockReader(ctx, blockReader)).FindBlockNum(tx, endTxNumMinimax)
+ _, histBlockNumProgress, err := txNums.FindBlockNum(tx, endTxNumMinimax)
return histBlockNumProgress, err
})
return nil
diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go
index 6a5dd22576a..5f852130a61 100644
--- a/cmd/rpcdaemon/cli/config.go
+++ b/cmd/rpcdaemon/cli/config.go
@@ -416,8 +416,7 @@ func RemoteServices(ctx context.Context, cfg *httpcfg.HttpCfg, logger log.Logger
blockReader = freezeblocks.NewBlockReader(allSnapshots, allBorSnapshots)
txNumsReader := rawdbv3.TxNums.WithCustomReadTxNumFunc(freezeblocks.ReadTxNumFuncFromBlockReader(ctx, blockReader))
- cr := rawdb.NewCanonicalReader(txNumsReader)
- agg, err := libstate.NewAggregator(ctx, cfg.Dirs, config3.HistoryV3AggregationStep, db, cr, logger)
+ agg, err := libstate.NewAggregator(ctx, cfg.Dirs, config3.HistoryV3AggregationStep, db, logger)
if err != nil {
return nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, fmt.Errorf("create aggregator: %w", err)
}
diff --git a/cmd/rpcdaemon/test.http b/cmd/rpcdaemon/test.http
index a9e21fdf90e..929031c4ead 100644
--- a/cmd/rpcdaemon/test.http
+++ b/cmd/rpcdaemon/test.http
@@ -213,6 +213,7 @@ Content-Type: application/json
}
###
+# 6250464, 6250465, 6250470, 7250471
POST 127.0.0.1:8545
Content-Type: application/json
@@ -222,8 +223,8 @@ Content-Type: application/json
"method": "eth_getLogs",
"params": [
{
- "fromBlock": "0x14ADC0",
- "toBlock": "0x14AEC0"
+ "fromBlock": "0x6EA227",
+ "toBlock": "0x6EA227"
}
],
"id": 537758
diff --git a/cmd/state/exec3/historical_trace_worker.go b/cmd/state/exec3/historical_trace_worker.go
index 902d9f085b5..1356fcf08ec 100644
--- a/cmd/state/exec3/historical_trace_worker.go
+++ b/cmd/state/exec3/historical_trace_worker.go
@@ -21,6 +21,7 @@ import (
"fmt"
"sync"
"sync/atomic"
+ "time"
"golang.org/x/sync/errgroup"
@@ -84,6 +85,7 @@ func NewHistoricalTraceWorker(
consumer TraceConsumer,
in *state.QueueWithRetry,
out *state.ResultsQueue,
+ background bool,
ctx context.Context,
execArgs *ExecArgs,
@@ -101,7 +103,7 @@ func NewHistoricalTraceWorker(
evm: vm.NewEVM(evmtypes.BlockContext{}, evmtypes.TxContext{}, nil, execArgs.ChainConfig, vm.Config{}),
vmConfig: &vm.Config{},
ibs: state.New(stateReader),
- background: true,
+ background: background,
ctx: ctx,
logger: logger,
taskGasPool: new(core.GasPool),
@@ -209,9 +211,8 @@ func (rw *HistoricalTraceWorker) RunTxTask(txTask *state.TxTask) {
txTask.UsedGas = applyRes.UsedGas
// Update the state with pending changes
ibs.SoftFinalise()
- txTask.Logs = ibs.GetLogs(txTask.TxIndex, txTask.Tx.Hash(), txTask.BlockNum, txTask.BlockHash)
+ txTask.Logs = ibs.GetRawLogs(txTask.TxIndex)
}
- //txTask.Tracer = tracer
}
}
func (rw *HistoricalTraceWorker) ResetTx(chainTx kv.Tx) {
@@ -239,62 +240,81 @@ type ExecArgs struct {
Workers int
}
-func NewHistoricalTraceWorkers(consumer TraceConsumer, cfg *ExecArgs, ctx context.Context, toTxNum uint64, in *state.QueueWithRetry, workerCount int, outputTxNum *atomic.Uint64, logger log.Logger) (g *errgroup.Group, clearFunc func()) {
+func NewHistoricalTraceWorkers(consumer TraceConsumer, cfg *ExecArgs, ctx context.Context, toTxNum uint64, in *state.QueueWithRetry, workerCount int, outputTxNum *atomic.Uint64, logger log.Logger) (g *errgroup.Group, applyWorker *HistoricalTraceWorker, clearFunc func()) {
workers := make([]*HistoricalTraceWorker, workerCount)
- resultChSize := workerCount * 8
- rws := state.NewResultsQueue(resultChSize, workerCount) // workerCount * 4
- // we all errors in background workers (except ctx.Cancel), because applyLoop will detect this error anyway.
- // and in applyLoop all errors are critical
- ctx, cancel := context.WithCancel(ctx)
- g, ctx = errgroup.WithContext(ctx)
- for i := 0; i < workerCount; i++ {
- workers[i] = NewHistoricalTraceWorker(consumer, in, rws, ctx, cfg, logger)
- }
- for i := 0; i < workerCount; i++ {
- i := i
- g.Go(func() (err error) {
- defer func() {
- if rec := recover(); rec != nil {
- err = fmt.Errorf("%s, %s", rec, dbg.Stack())
- }
- }()
+ // can afford big limits - because historical execution doesn't need conflicts-resolution
+ resultChannelLimit := workerCount * 128
+ heapLimit := workerCount * 128
+ rws := state.NewResultsQueue(resultChannelLimit, heapLimit) // workerCount * 4
- return workers[i].Run()
- })
- }
+ reducerGroup := &errgroup.Group{}
//Reducer
- g.Go(func() (err error) {
+ reducerGroup.Go(func() (err error) {
defer func() {
if rec := recover(); rec != nil {
err = fmt.Errorf("%s, %s", rec, dbg.Stack())
+ log.Warn("[dbg] 'reduce worker' paniced", "err", err)
}
}()
+ logEvery := time.NewTicker(1 * time.Second)
+ defer logEvery.Stop()
+
tx, err := cfg.ChainDB.BeginRo(ctx)
if err != nil {
- return err
+ panic(err)
+ //return err
}
defer tx.Rollback()
+ ttx := tx.(kv.TemporalTx)
- applyWorker := NewHistoricalTraceWorker(consumer, in, rws, ctx, cfg, logger)
- applyWorker.background = false
- applyWorker.ResetTx(tx)
for outputTxNum.Load() <= toTxNum {
- rws.DrainNonBlocking()
+ if err := rws.DrainNonBlocking(ctx); err != nil {
+ return err
+ }
- processedTxNum, _, err := processResultQueueHistorical(consumer, rws, outputTxNum.Load(), applyWorker, true)
+ processedTxNum, _, err := processResultQueueHistorical(consumer, rws, outputTxNum.Load(), ttx, true)
if err != nil {
return fmt.Errorf("processResultQueueHistorical: %w", err)
}
if processedTxNum > 0 {
outputTxNum.Store(processedTxNum)
}
+
+ //select {
+ //case <-logEvery.C:
+ // log.Info("[dbg] rws", "rws_ch_len", rws.ResultChLen(), "rws_q_len", rws.Len())
+ //default:
+ //}
+
}
return nil
})
+ // we all errors in background workers (except ctx.Cancel), because applyLoop will detect this error anyway.
+ // and in applyLoop all errors are critical
+ ctx, cancel := context.WithCancel(ctx)
+ g, ctx = errgroup.WithContext(ctx)
+ for i := 0; i < workerCount; i++ {
+ workers[i] = NewHistoricalTraceWorker(consumer, in, rws, true, ctx, cfg, logger)
+ }
+ applyWorker = NewHistoricalTraceWorker(consumer, in, rws, false, ctx, cfg, logger)
+ for i := 0; i < workerCount; i++ {
+ i := i
+ g.Go(func() (err error) {
+ defer func() {
+ if rec := recover(); rec != nil {
+ err = fmt.Errorf("%s, %s", rec, dbg.Stack())
+ log.Warn("[dbg] 'worker' paniced", "i", i, "err", err)
+ }
+ }()
+
+ return workers[i].Run()
+ })
+ }
+
var clearDone bool
clearFunc = func() {
if clearDone {
@@ -303,51 +323,34 @@ func NewHistoricalTraceWorkers(consumer TraceConsumer, cfg *ExecArgs, ctx contex
clearDone = true
cancel()
g.Wait()
+ rws.Close()
+ reducerGroup.Wait()
for _, w := range workers {
w.ResetTx(nil)
}
}
- return g, clearFunc
+ return g, applyWorker, clearFunc
}
-func processResultQueueHistorical(consumer TraceConsumer, rws *state.ResultsQueue, outputTxNumIn uint64, applyWorker *HistoricalTraceWorker, forceStopAtBlockEnd bool) (outputTxNum uint64, stopedAtBlockEnd bool, err error) {
+func processResultQueueHistorical(consumer TraceConsumer, rws *state.ResultsQueue, outputTxNumIn uint64, tx kv.TemporalTx, forceStopAtBlockEnd bool) (outputTxNum uint64, stopedAtBlockEnd bool, err error) {
rwsIt := rws.Iter()
defer rwsIt.Close()
- var receipts types.Receipts
- var usedGas, blobGasUsed uint64
-
var i int
outputTxNum = outputTxNumIn
for rwsIt.HasNext(outputTxNum) {
txTask := rwsIt.PopNext()
- if txTask.Final {
- txTask.Reset()
- //re-exec right here, because gnosis expecting TxTask.BlockReceipts field - receipts of all
- txTask.BlockReceipts = receipts
- applyWorker.RunTxTask(txTask)
- }
if txTask.Error != nil {
return outputTxNum, false, err
}
- if err := consumer.Reduce(txTask, applyWorker.chainTx); err != nil {
- return outputTxNum, false, err
- }
- if !txTask.Final && txTask.TxIndex >= 0 {
- // if the transaction created a contract, store the creation address in the receipt.
- //if msg.To() == nil {
- // receipt.ContractAddress = crypto.CreateAddress(evm.Origin, tx.GetNonce())
- //}
- // Set the receipt logs and create a bloom for filtering
- //receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
- receipts = append(receipts, txTask.CreateReceipt(usedGas))
+ if txTask.TxIndex >= 0 && !txTask.Final {
+ txTask.CreateReceipt(tx)
}
- usedGas += txTask.UsedGas
- if txTask.Tx != nil {
- blobGasUsed += txTask.Tx.GetBlobGas()
+ if err := consumer.Reduce(txTask, tx); err != nil {
+ return outputTxNum, false, err
}
i++
@@ -361,19 +364,17 @@ func processResultQueueHistorical(consumer TraceConsumer, rws *state.ResultsQueu
}
func CustomTraceMapReduce(fromBlock, toBlock uint64, consumer TraceConsumer, ctx context.Context, tx kv.TemporalTx, cfg *ExecArgs, logger log.Logger) (err error) {
- log.Info("[CustomTraceMapReduce] start", "fromBlock", fromBlock, "toBlock", toBlock, "workers", cfg.Workers)
+ log.Info("[Receipt] batch start", "fromBlock", fromBlock, "toBlock", toBlock, "workers", cfg.Workers)
br := cfg.BlockReader
chainConfig := cfg.ChainConfig
getHeaderFunc := func(hash common.Hash, number uint64) (h *types.Header) {
- var err error
- if err = cfg.ChainDB.View(ctx, func(tx kv.Tx) error {
- h, err = cfg.BlockReader.Header(ctx, tx, hash, number)
- if err != nil {
- return err
- }
- return nil
- }); err != nil {
- panic(err)
+ if tx != nil {
+ h, _ = cfg.BlockReader.Header(ctx, tx, hash, number)
+ } else {
+ cfg.ChainDB.View(ctx, func(tx kv.Tx) error {
+ h, _ = cfg.BlockReader.Header(ctx, tx, hash, number)
+ return nil
+ })
}
return h
}
@@ -394,15 +395,16 @@ func CustomTraceMapReduce(fromBlock, toBlock uint64, consumer TraceConsumer, ctx
in := state.NewQueueWithRetry(100_000)
defer in.Close()
- var WorkerCount = estimate.AlmostAllCPUs() * 2
+ var WorkerCount = estimate.AlmostAllCPUs()
if cfg.Workers > 0 {
WorkerCount = cfg.Workers
}
outTxNum := &atomic.Uint64{}
outTxNum.Store(fromTxNum)
- workers, cleanup := NewHistoricalTraceWorkers(consumer, cfg, ctx, toTxNum, in, WorkerCount, outTxNum, logger)
+ workers, applyWorker, cleanup := NewHistoricalTraceWorkers(consumer, cfg, ctx, toTxNum, in, WorkerCount, outTxNum, logger)
defer workers.Wait()
defer cleanup()
+ applyWorker.ResetTx(tx)
workersExited := &atomic.Bool{}
go func() {
@@ -414,9 +416,11 @@ func CustomTraceMapReduce(fromBlock, toBlock uint64, consumer TraceConsumer, ctx
if err != nil {
return err
}
+ logEvery := time.NewTicker(1 * time.Second)
+ defer logEvery.Stop()
for blockNum := fromBlock; blockNum <= toBlock; blockNum++ {
var b *types.Block
- b, err = blockWithSenders(nil, tx, br, blockNum)
+ b, err = blockWithSenders(ctx, nil, tx, br, blockNum)
if err != nil {
return err
}
@@ -438,6 +442,7 @@ func CustomTraceMapReduce(fromBlock, toBlock uint64, consumer TraceConsumer, ctx
}
blockContext := core.NewEVMBlockContext(header, getHashFn, cfg.Engine, nil /* author */, chainConfig)
+ blockReceipts := make(types.Receipts, len(txs))
rules := chainConfig.Rules(blockNum, b.Time())
for txIndex := -1; txIndex <= len(txs); txIndex++ {
// Do not oversend, wait for the result heap to go under certain size
@@ -460,6 +465,7 @@ func CustomTraceMapReduce(fromBlock, toBlock uint64, consumer TraceConsumer, ctx
// use history reader instead of state reader to catch up to the tx where we left off
HistoryExecution: true,
+ BlockReceipts: blockReceipts,
}
if txIndex >= 0 && txIndex < len(txs) {
txTask.Tx = txs[txIndex]
@@ -482,8 +488,25 @@ func CustomTraceMapReduce(fromBlock, toBlock uint64, consumer TraceConsumer, ctx
if workersExited.Load() {
return workers.Wait()
}
- in.Add(ctx, txTask)
+ if WorkerCount == 1 {
+ applyWorker.RunTxTask(txTask)
+ if txTask.TxIndex >= 0 && !txTask.Final {
+ txTask.CreateReceipt(tx)
+ }
+ if err := consumer.Reduce(txTask, tx); err != nil {
+ return err
+ }
+ } else {
+ in.Add(ctx, txTask)
+ }
inputTxNum++
+
+ //select {
+ //case <-logEvery.C:
+ // log.Info("[dbg] in", "in", in.Len())
+ //default:
+ //}
+
}
}
in.Close() //no more work. no retries in map-reduce. means can close here.
@@ -495,7 +518,12 @@ func CustomTraceMapReduce(fromBlock, toBlock uint64, consumer TraceConsumer, ctx
return nil
}
-func blockWithSenders(db kv.RoDB, tx kv.Tx, blockReader services.BlockReader, blockNum uint64) (b *types.Block, err error) {
+func blockWithSenders(ctx context.Context, db kv.RoDB, tx kv.Tx, blockReader services.BlockReader, blockNum uint64) (b *types.Block, err error) {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
if tx == nil {
tx, err = db.BeginRo(context.Background())
if err != nil {
diff --git a/cmd/state/exec3/trace_worker.go b/cmd/state/exec3/trace_worker.go
index 3f7f526848b..7c9ceeb8e79 100644
--- a/cmd/state/exec3/trace_worker.go
+++ b/cmd/state/exec3/trace_worker.go
@@ -103,7 +103,7 @@ func (e *TraceWorker) GetLogs(txIndex int, txnHash common.Hash, blockNumber uint
return e.ibs.GetLogs(txIndex, txnHash, blockNumber, blockHash)
}
-func (e *TraceWorker) ExecTxn(txNum uint64, txIndex int, txn types.Transaction) (*evmtypes.ExecutionResult, error) {
+func (e *TraceWorker) ExecTxn(txNum uint64, txIndex int, txn types.Transaction, gasBailout bool) (*evmtypes.ExecutionResult, error) {
e.stateReader.SetTxNum(txNum)
e.ibs.Reset()
e.ibs.SetTxContext(txIndex)
@@ -128,7 +128,7 @@ func (e *TraceWorker) ExecTxn(txNum uint64, txIndex int, txn types.Transaction)
e.evm.ResetBetweenBlocks(*e.blockCtx, txContext, e.ibs, *e.vmConfig, e.rules)
gp := new(core.GasPool).AddGas(txn.GetGas()).AddBlobGas(txn.GetBlobGas())
- res, err := core.ApplyMessage(e.evm, msg, gp, true /* refunds */, false /* gasBailout */)
+ res, err := core.ApplyMessage(e.evm, msg, gp, true /* refunds */, gasBailout /* gasBailout */)
if err != nil {
return nil, fmt.Errorf("%w: blockNum=%d, txNum=%d, %s", err, e.blockNum, txNum, e.ibs.Error())
}
@@ -138,5 +138,6 @@ func (e *TraceWorker) ExecTxn(txNum uint64, txIndex int, txn types.Transaction)
e.tracer.SetTransaction(txn)
}
}
+
return res, nil
}
diff --git a/consensus/aura/aura.go b/consensus/aura/aura.go
index 265313a103f..976ed9895dd 100644
--- a/consensus/aura/aura.go
+++ b/consensus/aura/aura.go
@@ -228,9 +228,9 @@ func epochTransitionFor(chain consensus.ChainHeaderReader, e *NonTransactionalEp
// AuRa
// nolint
type AuRa struct {
- e *NonTransactionalEpochReader
- exitCh chan struct{}
- lock sync.RWMutex // Protects the signer fields
+ e *NonTransactionalEpochReader
+ exitCh chan struct{}
+ signerMutex sync.RWMutex // Protects the signer fields
step PermissionedStep
// History of step hashes recently received from peers.
@@ -858,8 +858,8 @@ func (c *AuRa) FinalizeAndAssemble(config *chain.Config, header *types.Header, s
// Authorize injects a private key into the consensus engine to mint new blocks
// with.
func (c *AuRa) Authorize(signer libcommon.Address, signFn clique.SignerFn) {
- c.lock.Lock()
- defer c.lock.Unlock()
+ c.signerMutex.Lock()
+ defer c.signerMutex.Unlock()
//c.signer = signer
//c.signFn = signFn
diff --git a/core/genesis_write.go b/core/genesis_write.go
index c285c849e92..b28acaace86 100644
--- a/core/genesis_write.go
+++ b/core/genesis_write.go
@@ -509,8 +509,7 @@ func GenesisToBlock(g *types.Genesis, dirs datadir.Dirs, logger log.Logger) (*ty
genesisTmpDB := mdbx.NewMDBX(logger).InMem(dirs.DataDir).MapSize(2 * datasize.GB).GrowthStep(1 * datasize.MB).MustOpen()
defer genesisTmpDB.Close()
- cr := rawdb.NewCanonicalReader(rawdbv3.TxNums)
- agg, err := state2.NewAggregator(context.Background(), dirs, config3.HistoryV3AggregationStep, genesisTmpDB, cr, logger)
+ agg, err := state2.NewAggregator(context.Background(), dirs, config3.HistoryV3AggregationStep, genesisTmpDB, logger)
if err != nil {
return err
}
diff --git a/core/rawdb/accessors_canonical_reader.go b/core/rawdb/accessors_canonical_reader.go
deleted file mode 100644
index 0f9d4551688..00000000000
--- a/core/rawdb/accessors_canonical_reader.go
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright 2024 The Erigon Authors
-// This file is part of Erigon.
-//
-// Erigon is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// Erigon is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with Erigon. If not, see .
-
-package rawdb
-
-import (
- "encoding/binary"
- "fmt"
-
- common2 "github.com/erigontech/erigon-lib/common"
- "github.com/erigontech/erigon-lib/common/hexutility"
- "github.com/erigontech/erigon-lib/kv"
- "github.com/erigontech/erigon-lib/kv/order"
- "github.com/erigontech/erigon-lib/kv/rawdbv3"
- "github.com/erigontech/erigon-lib/kv/stream"
-)
-
-type CanonicalTxnIds struct {
- canonicalMarkers stream.KV
- tx kv.Tx
-
- // input params
- fromTxNum, toTxNum int
- orderAscend order.By
- limit int
-
- // private fields
- currentTxNum int
- hasNext bool
- endOfCurrentBlock uint64
-
- txNumsReader rawdbv3.TxNumsReader
-}
-type CanonicalReader struct {
- txNumsReader rawdbv3.TxNumsReader
-}
-
-func NewCanonicalReader(txNumsReader rawdbv3.TxNumsReader) *CanonicalReader {
- return &CanonicalReader{txNumsReader: txNumsReader}
-}
-func (c *CanonicalReader) TxnIdsOfCanonicalBlocks(tx kv.Tx, fromTxNum, toTxNum int, asc order.By, limit int) (stream.U64, error) {
- return TxnIdsOfCanonicalBlocks(tx, c.txNumsReader, fromTxNum, toTxNum, asc, limit)
-}
-func (c *CanonicalReader) TxNum2ID(tx kv.Tx, blockNum uint64, blockHash common2.Hash, txNum uint64) (kv.TxnId, error) {
- if blockNum == 0 {
- return kv.TxnId(txNum), nil
- }
- b, err := readBodyForStorage(tx, blockHash, blockNum)
- if err != nil {
- return 0, err
- }
- if b == nil { // freezed and pruned
- _min, err := c.txNumsReader.Min(tx, blockNum)
- if err != nil {
- return 0, err
- }
- _max, err := c.txNumsReader.Max(tx, blockNum)
- if err != nil {
- return 0, err
- }
- if txNum < _min || txNum > _max {
- return 0, fmt.Errorf("TxNum2ID: txNum=%d out of range: %d, %d", txNum, _min, _max)
- }
- return kv.TxnId(txNum), nil
- }
- return kv.TxnId(b.BaseTxnID), nil
-}
-
-func (c *CanonicalReader) BaseTxnID(tx kv.Tx, blockNum uint64, blockHash common2.Hash) (kv.TxnId, error) {
- if blockNum == 0 {
- return kv.TxnId(0), nil
- }
-
- //TODO: what if body is in db and files?
- b, err := readBodyForStorage(tx, blockHash, blockNum)
- if err != nil {
- return 0, err
- }
- if b == nil { // freezed and pruned
- _min, err := c.txNumsReader.Min(tx, blockNum)
- if err != nil {
- return 0, err
- }
- return kv.TxnId(_min), nil
- }
- return kv.TxnId(b.BaseTxnID), nil
-}
-
-func (c *CanonicalReader) LastFrozenTxNum(tx kv.Tx) (kv.TxnId, error) {
- n, ok, err := ReadFirstNonGenesisHeaderNumber(tx)
- if err != nil {
- return 0, err
- }
- if !ok {
- //seq, err := tx.ReadSequence(kv.EthTx)
- //seq-1
- _, _lastTxNumInFiles, err := c.txNumsReader.Last(tx)
- return kv.TxnId(_lastTxNumInFiles), err
-
- }
- _max, err := c.txNumsReader.Max(tx, n)
- if err != nil {
- return 0, err
- }
- return kv.TxnId(_max), nil
-}
-
-// TxnIdsOfCanonicalBlocks - returns non-canonical txnIds of canonical block range
-// [fromTxNum, toTxNum)
-// To get all canonical blocks, use fromTxNum=0, toTxNum=-1
-// For reverse iteration use order.Desc and fromTxNum=-1, toTxNum=-1
-func TxnIdsOfCanonicalBlocks(tx kv.Tx, txNumsReader rawdbv3.TxNumsReader, fromTxNum, toTxNum int, asc order.By, limit int) (stream.U64, error) {
- if asc && fromTxNum > 0 && toTxNum > 0 && fromTxNum >= toTxNum {
- return nil, fmt.Errorf("fromTxNum >= toTxNum: %d, %d", fromTxNum, toTxNum)
- }
- if !asc && fromTxNum > 0 && toTxNum > 0 && fromTxNum <= toTxNum {
- return nil, fmt.Errorf("fromTxNum <= toTxNum: %d, %d", fromTxNum, toTxNum)
- }
-
- it := &CanonicalTxnIds{tx: tx, txNumsReader: txNumsReader, fromTxNum: fromTxNum, toTxNum: toTxNum, orderAscend: asc, limit: limit, currentTxNum: -1}
- if err := it.init(); err != nil {
- it.Close() //it's responsibility of constructor (our) to close resource on error
- return nil, err
- }
- if !it.HasNext() {
- it.Close()
- return stream.EmptyU64, nil
- }
- return it, nil
-}
-
-func (s *CanonicalTxnIds) init() (err error) {
- tx := s.tx
- var from, to []byte
- if s.fromTxNum >= 0 {
- ok, blockFrom, err := s.txNumsReader.FindBlockNum(tx, uint64(s.fromTxNum))
- if err != nil {
- return err
- }
- if ok {
- from = hexutility.EncodeTs(blockFrom)
- }
- }
-
- if s.toTxNum >= 0 {
- ok, blockTo, err := s.txNumsReader.FindBlockNum(tx, uint64(s.toTxNum))
- if err != nil {
- return err
- }
- if ok {
- to = hexutility.EncodeTs(blockTo + 1)
- }
- }
-
- if s.orderAscend {
- s.canonicalMarkers, err = tx.RangeAscend(kv.HeaderCanonical, from, to, -1)
- if err != nil {
- return err
- }
- } else {
- s.canonicalMarkers, err = tx.RangeDescend(kv.HeaderCanonical, from, to, -1)
- if err != nil {
- return err
- }
- }
- if err := s.advance(); err != nil {
- return err
- }
- return nil
-}
-
-func (s *CanonicalTxnIds) advance() (err error) {
- var endOfBlock bool
- if s.currentTxNum < 0 {
- endOfBlock = true
- } else {
- if s.orderAscend {
- s.currentTxNum++
- endOfBlock = s.currentTxNum >= int(s.endOfCurrentBlock)
- } else {
- s.currentTxNum--
- endOfBlock = s.currentTxNum <= int(s.endOfCurrentBlock)
- }
- }
-
- if !endOfBlock || s.currentTxNum == int(s.endOfCurrentBlock) {
- return nil
- }
-
- if !s.canonicalMarkers.HasNext() {
- s.currentTxNum = -1
- return nil
- }
-
- k, v, err := s.canonicalMarkers.Next()
- if err != nil {
- return err
- }
- blockNum := binary.BigEndian.Uint64(k)
- blockHash := common2.BytesToHash(v)
- body, err := readBodyForStorage(s.tx, blockHash, blockNum)
- if err != nil {
- return err
- }
- if body == nil {
- return fmt.Errorf("body not found: %d, %x", blockNum, blockHash)
- }
-
- if s.orderAscend {
- s.currentTxNum = int(body.BaseTxnID)
- s.endOfCurrentBlock = body.BaseTxnID.LastSystemTx(body.TxCount)
- } else {
- s.currentTxNum = int(body.BaseTxnID.LastSystemTx(body.TxCount))
- s.endOfCurrentBlock = body.BaseTxnID.U64()
- }
- return nil
-}
-
-func (s *CanonicalTxnIds) HasNext() bool {
- if s.limit == 0 { // limit reached
- return false
- }
- if s.currentTxNum < 0 { // EndOfTable
- return false
- }
- if s.toTxNum < 0 { //no boundaries
- return true
- }
-
- //Asc: [from, to) AND from < to
- //Desc: [from, to) AND from > to
- return (bool(s.orderAscend) && s.currentTxNum < s.toTxNum) ||
- (!bool(s.orderAscend) && s.currentTxNum > s.toTxNum)
-}
-
-func (s *CanonicalTxnIds) Next() (uint64, error) {
- s.limit--
- v := uint64(s.currentTxNum)
- if err := s.advance(); err != nil {
- return 0, err
- }
- return v, nil
-}
-
-func (s *CanonicalTxnIds) Close() {
- if s == nil {
- return
- }
- if s.canonicalMarkers != nil {
- s.canonicalMarkers.Close()
- s.canonicalMarkers = nil
- }
-}
diff --git a/core/rawdb/accessors_canonical_reader_test.go b/core/rawdb/accessors_canonical_reader_test.go
deleted file mode 100644
index 48dc9f56bbc..00000000000
--- a/core/rawdb/accessors_canonical_reader_test.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// (original work)
-// Copyright 2024 The Erigon Authors
-// (modifications)
-// This file is part of Erigon.
-//
-// Erigon is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// Erigon is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with Erigon. If not, see .
-
-package rawdb_test
-
-import (
- "bytes"
- "sort"
- "testing"
-
- libcommon "github.com/erigontech/erigon-lib/common"
- "github.com/erigontech/erigon-lib/common/u256"
- "github.com/erigontech/erigon-lib/kv/order"
- "github.com/erigontech/erigon-lib/kv/rawdbv3"
- "github.com/erigontech/erigon-lib/kv/stream"
- "github.com/erigontech/erigon/core/rawdb"
- "github.com/erigontech/erigon/core/types"
- "github.com/erigontech/erigon/turbo/stages/mock"
- "github.com/stretchr/testify/require"
-)
-
-// Tests block header storage and retrieval operations.
-func TestCanonicalIter(t *testing.T) {
- t.Parallel()
- m, require := mock.Mock(t), require.New(t)
-
- txn := &types.DynamicFeeTransaction{Tip: u256.N1, FeeCap: u256.N1, ChainID: u256.N1, CommonTx: types.CommonTx{Value: u256.N1, Gas: 1, Nonce: 1}}
- buf := bytes.NewBuffer(nil)
- err := txn.MarshalBinary(buf)
- require.NoError(err)
- rlpTxn := buf.Bytes()
- b := &types.RawBody{Transactions: [][]byte{rlpTxn, rlpTxn, rlpTxn, rlpTxn}}
-
- tx, err := m.DB.BeginRw(m.Ctx)
- require.NoError(err)
- defer tx.Rollback()
-
- // write 2 forks - 3 blocks in each fork
- _, err = rawdb.WriteRawBodyIfNotExists(tx, libcommon.Hash{10}, 0, b)
- require.NoError(err)
- _, err = rawdb.WriteRawBodyIfNotExists(tx, libcommon.Hash{20}, 0, b)
- require.NoError(err)
-
- _, err = rawdb.WriteRawBodyIfNotExists(tx, libcommon.Hash{11}, 1, b)
- require.NoError(err)
- _, err = rawdb.WriteRawBodyIfNotExists(tx, libcommon.Hash{21}, 1, b)
- require.NoError(err)
-
- _, err = rawdb.WriteRawBodyIfNotExists(tx, libcommon.Hash{12}, 2, b)
- require.NoError(err)
- _, err = rawdb.WriteRawBodyIfNotExists(tx, libcommon.Hash{22}, 2, b)
- require.NoError(err)
-
- it, err := rawdb.TxnIdsOfCanonicalBlocks(tx, rawdbv3.TxNums, 0, -1, order.Asc, -1)
- require.NoError(err)
- require.Equal(true, it.HasNext())
-
- // tx already contains genesis block of 2 transactions
- t.Logf("genesis: %v", stream.ToArrU64Must(it))
-
- //mark 3 blocks as canonical
- require.NoError(rawdb.WriteCanonicalHash(tx, libcommon.Hash{10}, 0))
- require.NoError(rawdb.WriteCanonicalHash(tx, libcommon.Hash{11}, 1))
- require.NoError(rawdb.WriteCanonicalHash(tx, libcommon.Hash{12}, 2))
-
- txNumsOfBlock := func(bn uint64) (res []uint64) {
- txns := uint64(types.TxCountToTxAmount(len(b.Transactions)))
- s := uint64(1) // genesis block ends at
- if bn > 0 {
- s += bn * txns
- }
- s++ // system
- for i := uint64(0); i < txns; i++ {
- res = append(res, s+i)
- }
- return res
- }
-
- it, err = rawdb.TxnIdsOfCanonicalBlocks(tx, rawdbv3.TxNums, 0, 2+len(b.Transactions)+2, order.Asc, -1)
- require.NoError(err)
- require.Equal(true, it.HasNext())
- exp := txNumsOfBlock(0)
- t.Logf("expected full block 0: %v", exp)
- require.Equal(exp, stream.ToArrU64Must(it))
-
- it, err = rawdb.TxnIdsOfCanonicalBlocks(tx, rawdbv3.TxNums, 0, -1, order.Asc, -1)
- require.NoError(err)
- require.Equal(true, it.HasNext())
- exp = append(append(txNumsOfBlock(0), txNumsOfBlock(2)...), txNumsOfBlock(4)...)
- t.Logf("expected %v", exp)
- require.Equal(exp, stream.ToArrU64Must(it))
-
- rit, err := rawdb.TxnIdsOfCanonicalBlocks(tx, rawdbv3.TxNums, -1, -1, order.Desc, -1)
- require.NoError(err)
- require.Equal(true, rit.HasNext())
- sort.Slice(exp, func(i, j int) bool { return exp[i] > exp[j] })
- t.Logf("reverse expected %v", exp)
- require.Equal(exp, stream.ToArrU64Must(rit))
-}
diff --git a/core/rawdb/rawdbreset/reset_stages.go b/core/rawdb/rawdbreset/reset_stages.go
index 23af54ff15e..42260972f39 100644
--- a/core/rawdb/rawdbreset/reset_stages.go
+++ b/core/rawdb/rawdbreset/reset_stages.go
@@ -187,8 +187,10 @@ func ResetTxLookup(tx kv.RwTx) error {
}
var Tables = map[stages.SyncStage][]string{
- stages.CustomTrace: {},
- stages.Finish: {},
+ stages.CustomTrace: {
+ kv.TblReceiptVals, kv.TblReceiptHistoryKeys, kv.TblReceiptHistoryVals, kv.TblReceiptIdx,
+ },
+ stages.Finish: {},
}
var stateBuckets = []string{
kv.Epoch, kv.PendingEpoch, kv.BorReceipts,
@@ -207,8 +209,9 @@ var stateHistoryV3Buckets = []string{
kv.TblTracesToKeys, kv.TblTracesToIdx,
}
var stateV3Buckets = []string{
- kv.TblAccountVals, kv.TblStorageVals, kv.TblCodeVals, kv.TblCommitmentVals,
+ kv.TblAccountVals, kv.TblStorageVals, kv.TblCodeVals, kv.TblCommitmentVals, kv.TblReceiptVals,
kv.TblCommitmentHistoryKeys, kv.TblCommitmentHistoryVals, kv.TblCommitmentIdx,
+ kv.TblReceiptHistoryKeys, kv.TblReceiptHistoryVals, kv.TblReceiptIdx,
kv.TblPruningProgress,
kv.ChangeSets3,
}
diff --git a/core/rawdb/rawtemporaldb/accessors_receipt.go b/core/rawdb/rawtemporaldb/accessors_receipt.go
new file mode 100644
index 00000000000..611a00f37cd
--- /dev/null
+++ b/core/rawdb/rawtemporaldb/accessors_receipt.go
@@ -0,0 +1,111 @@
+package rawtemporaldb
+
+import (
+ "encoding/binary"
+
+ "github.com/erigontech/erigon-lib/common"
+ "github.com/erigontech/erigon-lib/kv"
+ "github.com/erigontech/erigon/core/types"
+)
+
+var (
+ CumulativeGasUsedInBlockKey = []byte{0x0}
+ CumulativeBlobGasUsedInBlockKey = []byte{0x1}
+ FirstLogIndexKey = []byte{0x2}
+)
+
+// `ReadReceipt` does fill `rawLogs` calulated fields. but we don't need it anymore.
+func ReceiptAsOfWithApply(tx kv.TemporalTx, txNum uint64, rawLogs types.Logs, txnIdx int, blockHash common.Hash, blockNum uint64, txn types.Transaction) (*types.Receipt, error) {
+ cumulativeGasUsedBeforeTxn, cumulativeBlobGasUsed, firstLogIndexWithinBlock, err := ReceiptAsOf(tx, txNum+1)
+ if err != nil {
+ return nil, err
+ }
+ //if txnIdx == 0 {
+ //logIndex always 0
+ //}
+
+ r := &types.Receipt{
+ Logs: rawLogs,
+ CumulativeGasUsed: cumulativeGasUsedBeforeTxn,
+ FirstLogIndexWithinBlock: firstLogIndexWithinBlock,
+ }
+ _ = cumulativeBlobGasUsed
+
+ if err := r.DeriveFieldsV3ForSingleReceipt(txnIdx, blockHash, blockNum, txn, cumulativeGasUsedBeforeTxn); err != nil {
+ return nil, err
+ }
+ return r, nil
+}
+
+func ReceiptAsOf(tx kv.TemporalTx, txNum uint64) (cumGasUsed uint64, cumBlobGasused uint64, firstLogIndexWithinBlock uint32, err error) {
+ var v []byte
+ var ok bool
+
+ v, ok, err = tx.DomainGetAsOf(kv.ReceiptDomain, CumulativeGasUsedInBlockKey, nil, txNum)
+ if err != nil {
+ return
+ }
+ if ok && v != nil {
+ cumGasUsed = uvarint(v)
+ }
+
+ v, ok, err = tx.DomainGetAsOf(kv.ReceiptDomain, CumulativeBlobGasUsedInBlockKey, nil, txNum)
+ if err != nil {
+ return
+ }
+ if ok && v != nil {
+ cumBlobGasused = uvarint(v)
+ }
+
+ //if txnIdx == 0 {
+ //logIndex always 0
+ //}
+
+ v, ok, err = tx.DomainGetAsOf(kv.ReceiptDomain, FirstLogIndexKey, nil, txNum)
+ if err != nil {
+ return
+ }
+ if ok && v != nil {
+ firstLogIndexWithinBlock = uint32(uvarint(v))
+ }
+ return
+}
+
+func AppendReceipt(ttx kv.TemporalPutDel, receipt *types.Receipt, cumBlobGasUsed uint64) error {
+ var cumGasUsedInBlock uint64
+ var firstLogIndexWithinBlock uint32
+ if receipt != nil {
+ cumGasUsedInBlock = receipt.CumulativeGasUsed
+ firstLogIndexWithinBlock = receipt.FirstLogIndexWithinBlock
+ }
+
+ {
+ var buf [binary.MaxVarintLen64]byte
+ i := binary.PutUvarint(buf[:], cumGasUsedInBlock)
+ if err := ttx.DomainPut(kv.ReceiptDomain, CumulativeGasUsedInBlockKey, nil, buf[:i], nil, 0); err != nil {
+ return err
+ }
+ }
+
+ {
+ var buf [binary.MaxVarintLen64]byte
+ i := binary.PutUvarint(buf[:], cumBlobGasUsed)
+ if err := ttx.DomainPut(kv.ReceiptDomain, CumulativeBlobGasUsedInBlockKey, nil, buf[:i], nil, 0); err != nil {
+ return err
+ }
+ }
+
+ {
+ var buf [binary.MaxVarintLen64]byte
+ i := binary.PutUvarint(buf[:], uint64(firstLogIndexWithinBlock))
+ if err := ttx.DomainPut(kv.ReceiptDomain, FirstLogIndexKey, nil, buf[:i], nil, 0); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func uvarint(in []byte) (res uint64) {
+ res, _ = binary.Uvarint(in)
+ return res
+}
diff --git a/core/rawdb/rawtemporaldb/accessors_receipt_test.go b/core/rawdb/rawtemporaldb/accessors_receipt_test.go
new file mode 100644
index 00000000000..d96d99e257e
--- /dev/null
+++ b/core/rawdb/rawtemporaldb/accessors_receipt_test.go
@@ -0,0 +1,111 @@
+package rawtemporaldb
+
+import (
+ "context"
+ "testing"
+
+ "github.com/erigontech/erigon-lib/common/datadir"
+ "github.com/erigontech/erigon-lib/kv"
+ "github.com/erigontech/erigon-lib/kv/temporal/temporaltest"
+ "github.com/erigontech/erigon-lib/log/v3"
+ "github.com/erigontech/erigon-lib/state"
+ "github.com/erigontech/erigon/core/types"
+ "github.com/stretchr/testify/require"
+)
+
+func TestAppendReceipt(t *testing.T) {
+ dirs, require := datadir.New(t.TempDir()), require.New(t)
+ db, _ := temporaltest.NewTestDB(t, dirs)
+ tx, err := db.BeginRw(context.Background())
+ require.NoError(err)
+ defer tx.Rollback()
+
+ doms, err := state.NewSharedDomains(tx, log.New())
+ require.NoError(err)
+ defer doms.Close()
+ doms.SetTx(tx)
+
+ doms.SetTxNum(0) // block1
+ err = AppendReceipt(doms, &types.Receipt{CumulativeGasUsed: 10, FirstLogIndexWithinBlock: 0}, 0)
+ require.NoError(err)
+
+ doms.SetTxNum(1) // block1
+ err = AppendReceipt(doms, &types.Receipt{CumulativeGasUsed: 11, FirstLogIndexWithinBlock: 1}, 0)
+ require.NoError(err)
+
+ doms.SetTxNum(2) // block1
+
+ doms.SetTxNum(3) // block2
+ err = AppendReceipt(doms, &types.Receipt{CumulativeGasUsed: 12, FirstLogIndexWithinBlock: 0}, 0)
+ require.NoError(err)
+
+ doms.SetTxNum(4) // block2
+ err = AppendReceipt(doms, &types.Receipt{CumulativeGasUsed: 14, FirstLogIndexWithinBlock: 4}, 0)
+ require.NoError(err)
+
+ doms.SetTxNum(5) // block2
+
+ err = doms.Flush(context.Background(), tx)
+ require.NoError(err)
+
+ ttx := tx.(kv.TemporalTx)
+ v, ok, err := ttx.HistorySeek(kv.ReceiptHistory, FirstLogIndexKey, 0)
+ require.NoError(err)
+ require.True(ok)
+ require.Empty(v)
+
+ v, ok, err = ttx.HistorySeek(kv.ReceiptHistory, FirstLogIndexKey, 1)
+ require.NoError(err)
+ require.True(ok)
+ require.Equal(uint64(0), uvarint(v))
+
+ v, ok, err = ttx.HistorySeek(kv.ReceiptHistory, FirstLogIndexKey, 2)
+ require.NoError(err)
+ require.True(ok)
+ require.Equal(uint64(1), uvarint(v))
+
+ v, ok, err = ttx.HistorySeek(kv.ReceiptHistory, FirstLogIndexKey, 3)
+ require.NoError(err)
+ require.True(ok)
+ require.Equal(uint64(1), uvarint(v))
+
+ v, ok, err = ttx.HistorySeek(kv.ReceiptHistory, FirstLogIndexKey, 4)
+ require.NoError(err)
+ require.True(ok)
+ require.Equal(uint64(0), uvarint(v))
+
+ //block1
+ cumGasUsed, _, firstLogIndex, err := ReceiptAsOf(ttx, 0)
+ require.NoError(err)
+ require.Equal(uint32(0), firstLogIndex)
+ require.Equal(uint64(0), cumGasUsed)
+
+ cumGasUsed, _, firstLogIndex, err = ReceiptAsOf(ttx, 1)
+ require.NoError(err)
+ require.Equal(uint32(0), firstLogIndex)
+ require.Equal(uint64(10), cumGasUsed)
+
+ cumGasUsed, _, firstLogIndex, err = ReceiptAsOf(ttx, 2)
+ require.NoError(err)
+ require.Equal(uint32(1), firstLogIndex)
+ require.Equal(uint64(11), cumGasUsed)
+
+ //block2
+ cumGasUsed, _, firstLogIndex, err = ReceiptAsOf(ttx, 3)
+ require.NoError(err)
+ require.Equal(uint32(1), firstLogIndex)
+ require.Equal(uint64(11), cumGasUsed)
+
+ cumGasUsed, _, firstLogIndex, err = ReceiptAsOf(ttx, 4)
+ require.NoError(err)
+ require.Equal(uint32(0), firstLogIndex)
+ require.Equal(uint64(12), cumGasUsed)
+
+ cumGasUsed, _, firstLogIndex, err = ReceiptAsOf(ttx, 5)
+ require.NoError(err)
+ require.Equal(uint32(4), firstLogIndex)
+ require.Equal(uint64(14), cumGasUsed)
+
+ // reader
+
+}
diff --git a/core/state/intra_block_state_test.go b/core/state/intra_block_state_test.go
index f3e8d3cd76d..b2c1c5321e6 100644
--- a/core/state/intra_block_state_test.go
+++ b/core/state/intra_block_state_test.go
@@ -44,7 +44,6 @@ import (
"github.com/erigontech/erigon-lib/log/v3"
stateLib "github.com/erigontech/erigon-lib/state"
- "github.com/erigontech/erigon/core/rawdb"
"github.com/erigontech/erigon/core/tracing"
"github.com/erigontech/erigon/core/types"
)
@@ -242,8 +241,7 @@ func (test *snapshotTest) run() bool {
db := memdb.NewStateDB("")
defer db.Close()
- cr := rawdb.NewCanonicalReader(rawdbv3.TxNums)
- agg, err := stateLib.NewAggregator(context.Background(), datadir.New(""), 16, db, cr, log.New())
+ agg, err := stateLib.NewAggregator(context.Background(), datadir.New(""), 16, db, log.New())
if err != nil {
test.err = err
return false
diff --git a/core/state/rw_v3.go b/core/state/rw_v3.go
index 51499d3b446..3759d288e26 100644
--- a/core/state/rw_v3.go
+++ b/core/state/rw_v3.go
@@ -23,9 +23,8 @@ import (
"sync"
"time"
- "github.com/holiman/uint256"
-
"github.com/erigontech/erigon-lib/log/v3"
+ "github.com/holiman/uint256"
"github.com/erigontech/erigon-lib/common"
"github.com/erigontech/erigon-lib/common/dbg"
diff --git a/core/state/state_test.go b/core/state/state_test.go
index 58c10b4fd3b..e01648f74fa 100644
--- a/core/state/state_test.go
+++ b/core/state/state_test.go
@@ -39,7 +39,6 @@ import (
"github.com/erigontech/erigon-lib/state"
stateLib "github.com/erigontech/erigon-lib/state"
- "github.com/erigontech/erigon/core/rawdb"
"github.com/erigontech/erigon/core/tracing"
"github.com/erigontech/erigon/core/types/accounts"
"github.com/erigontech/erigon/crypto"
@@ -121,8 +120,7 @@ func (s *StateSuite) SetUpTest(c *checker.C) {
db := memdb.NewStateDB("")
defer db.Close()
- cr := rawdb.NewCanonicalReader(rawdbv3.TxNums)
- agg, err := stateLib.NewAggregator(context.Background(), datadir.New(""), 16, db, cr, log.New())
+ agg, err := stateLib.NewAggregator(context.Background(), datadir.New(""), 16, db, log.New())
if err != nil {
panic(err)
}
@@ -379,8 +377,7 @@ func NewTestTemporalDb(tb testing.TB) (kv.RwDB, kv.RwTx, *state.Aggregator) {
db := memdb.NewStateDB(tb.TempDir())
tb.Cleanup(db.Close)
- cr := rawdb.NewCanonicalReader(rawdbv3.TxNums)
- agg, err := state.NewAggregator(context.Background(), datadir.New(tb.TempDir()), 16, db, cr, log.New())
+ agg, err := state.NewAggregator(context.Background(), datadir.New(tb.TempDir()), 16, db, log.New())
if err != nil {
tb.Fatal(err)
}
diff --git a/core/state/txtask.go b/core/state/txtask.go
index f6ccb937d4c..c42cc3f949f 100644
--- a/core/state/txtask.go
+++ b/core/state/txtask.go
@@ -22,6 +22,8 @@ import (
"sync"
"time"
+ "github.com/erigontech/erigon-lib/kv"
+ "github.com/erigontech/erigon/core/rawdb/rawtemporaldb"
"github.com/holiman/uint256"
"github.com/erigontech/erigon-lib/chain"
@@ -83,7 +85,35 @@ type TxTask struct {
Config *chain.Config
}
-func (t *TxTask) CreateReceipt(cumulativeGasUsed uint64) *types.Receipt {
+func (t *TxTask) CreateReceipt(tx kv.Tx) {
+ if t.TxIndex < 0 || t.Final {
+ return
+ }
+
+ var cumulativeGasUsed uint64
+ var firstLogIndex uint32
+ if t.TxIndex > 0 {
+ prevR := t.BlockReceipts[t.TxIndex-1]
+ if prevR != nil {
+ cumulativeGasUsed = prevR.CumulativeGasUsed
+ firstLogIndex = prevR.FirstLogIndexWithinBlock + uint32(len(prevR.Logs))
+ } else {
+ var err error
+ cumulativeGasUsed, _, firstLogIndex, err = rawtemporaldb.ReceiptAsOf(tx.(kv.TemporalTx), t.TxNum)
+ if err != nil {
+ panic(err)
+ }
+ }
+ }
+
+ cumulativeGasUsed += t.UsedGas
+
+ r := t.createReceipt(cumulativeGasUsed)
+ r.FirstLogIndexWithinBlock = firstLogIndex
+ t.BlockReceipts[t.TxIndex] = r
+}
+
+func (t *TxTask) createReceipt(cumulativeGasUsed uint64) *types.Receipt {
receipt := &types.Receipt{
BlockNumber: t.Header.Number,
BlockHash: t.BlockHash,
@@ -297,11 +327,11 @@ type ResultsQueue struct {
results *TxTaskQueue
}
-func NewResultsQueue(newTasksLimit, queueLimit int) *ResultsQueue {
+func NewResultsQueue(resultChannelLimit, heapLimit int) *ResultsQueue {
r := &ResultsQueue{
results: &TxTaskQueue{},
- limit: queueLimit,
- resultCh: make(chan *TxTask, newTasksLimit),
+ limit: heapLimit,
+ resultCh: make(chan *TxTask, resultChannelLimit),
ticker: time.NewTicker(2 * time.Second),
}
heap.Init(r.results)
@@ -318,7 +348,7 @@ func (q *ResultsQueue) Add(ctx context.Context, task *TxTask) error {
}
return nil
}
-func (q *ResultsQueue) drainNoBlock(task *TxTask) {
+func (q *ResultsQueue) drainNoBlock(ctx context.Context, task *TxTask) error {
q.Lock()
defer q.Unlock()
if task != nil {
@@ -327,16 +357,21 @@ func (q *ResultsQueue) drainNoBlock(task *TxTask) {
for {
select {
+ case <-ctx.Done():
+ return ctx.Err()
case txTask, ok := <-q.resultCh:
if !ok {
- return
+ return nil
}
- if txTask != nil {
- heap.Push(q.results, txTask)
- q.results.Len()
+ if txTask == nil {
+ continue
+ }
+ heap.Push(q.results, txTask)
+ if q.results.Len() > q.limit {
+ return nil
}
default: // we are inside mutex section, can't block here
- return
+ return nil
}
}
}
@@ -369,7 +404,9 @@ func (q *ResultsQueue) Drain(ctx context.Context) error {
if !ok {
return nil
}
- q.drainNoBlock(txTask)
+ if err := q.drainNoBlock(ctx, txTask); err != nil {
+ return err
+ }
case <-q.ticker.C:
// Corner case: workers processed all new tasks (no more q.resultCh events) when we are inside Drain() func
// it means - naive-wait for new q.resultCh events will not work here (will cause dead-lock)
@@ -383,14 +420,16 @@ func (q *ResultsQueue) Drain(ctx context.Context) error {
return nil
}
-func (q *ResultsQueue) DrainNonBlocking() { q.drainNoBlock(nil) }
+func (q *ResultsQueue) DrainNonBlocking(ctx context.Context) error { return q.drainNoBlock(ctx, nil) }
-func (q *ResultsQueue) DropResults(f func(t *TxTask)) {
+func (q *ResultsQueue) DropResults(ctx context.Context, f func(t *TxTask)) {
q.Lock()
defer q.Unlock()
Loop:
for {
select {
+ case <-ctx.Done():
+ return
case txTask, ok := <-q.resultCh:
if !ok {
break Loop
diff --git a/core/test/domains_restart_test.go b/core/test/domains_restart_test.go
index 788359103aa..1dae41b8675 100644
--- a/core/test/domains_restart_test.go
+++ b/core/test/domains_restart_test.go
@@ -29,8 +29,6 @@ import (
"testing"
"time"
- "github.com/erigontech/erigon/core/rawdb"
-
"github.com/holiman/uint256"
"github.com/stretchr/testify/require"
@@ -71,8 +69,7 @@ func testDbAndAggregatorv3(t *testing.T, fpath string, aggStep uint64) (kv.RwDB,
}).MustOpen()
t.Cleanup(db.Close)
- cr := rawdb.NewCanonicalReader(rawdbv3.TxNums)
- agg, err := state.NewAggregator(context.Background(), dirs, aggStep, db, cr, logger)
+ agg, err := state.NewAggregator(context.Background(), dirs, aggStep, db, logger)
require.NoError(t, err)
t.Cleanup(agg.Close)
err = agg.OpenFolder()
diff --git a/core/types/receipt.go b/core/types/receipt.go
index ed88a6df26e..b9190c24928 100644
--- a/core/types/receipt.go
+++ b/core/types/receipt.go
@@ -71,7 +71,7 @@ type Receipt struct {
BlockNumber *big.Int `json:"blockNumber,omitempty"`
TransactionIndex uint `json:"transactionIndex"`
- FirstLogIndex uint32 `json:"-"` // field which used to store in db and re-calc
+ FirstLogIndexWithinBlock uint32 `json:"-"` // field which used to store in db and re-calc
}
type receiptMarshaling struct {
@@ -333,7 +333,7 @@ func (r *ReceiptForStorage) DecodeRLP(s *rlp.Stream) error {
return err
}
r.CumulativeGasUsed = stored.CumulativeGasUsed
- r.FirstLogIndex = stored.FirstLogIndex
+ r.FirstLogIndexWithinBlock = stored.FirstLogIndex
//r.Logs = make([]*Log, len(stored.Logs))
//for i, log := range stored.Logs {
@@ -438,21 +438,8 @@ func (r Receipts) DeriveFields(hash libcommon.Hash, number uint64, txs Transacti
// DeriveFields fills the receipts with their computed fields based on consensus
// data and contextual infos like containing block and transactions.
-func (rl Receipts) DeriveFieldsV3ForSingleReceipt(i int, blockHash libcommon.Hash, blockNum uint64, txn Transaction) (*Receipt, error) {
- r := rl[i]
- var prevReceipt *Receipt
- if i > 0 {
- prevReceipt = rl[i-1]
- }
- err := r.DeriveFieldsV3ForSingleReceipt(i, blockHash, blockNum, txn, prevReceipt)
- if err != nil {
- return nil, err
- }
- return r, nil
-}
-
-func (r *Receipt) DeriveFieldsV3ForSingleReceipt(txnIdx int, blockHash libcommon.Hash, blockNum uint64, txn Transaction, prevReceipt *Receipt) error {
- logIndex := r.FirstLogIndex // logIdx is unique within the block and starts from 0
+func (r *Receipt) DeriveFieldsV3ForSingleReceipt(txnIdx int, blockHash libcommon.Hash, blockNum uint64, txn Transaction, prevCumulativeGasUsed uint64) error {
+ logIndex := r.FirstLogIndexWithinBlock // logIdx is unique within the block and starts from 0
sender, ok := txn.cachedSender()
if !ok {
@@ -480,7 +467,7 @@ func (r *Receipt) DeriveFieldsV3ForSingleReceipt(txnIdx int, blockHash libcommon
if txnIdx == 0 {
r.GasUsed = r.CumulativeGasUsed
} else {
- r.GasUsed = r.CumulativeGasUsed - prevReceipt.CumulativeGasUsed
+ r.GasUsed = r.CumulativeGasUsed - prevCumulativeGasUsed
}
// The derived log fields can simply be set from the block and transaction
diff --git a/core/types/receipt_test.go b/core/types/receipt_test.go
index c9da6d1cb12..026c90c8ee4 100644
--- a/core/types/receipt_test.go
+++ b/core/types/receipt_test.go
@@ -104,7 +104,7 @@ func TestLegacyReceiptDecoding(t *testing.T) {
if dec.CumulativeGasUsed != receipt.CumulativeGasUsed {
t.Fatalf("Receipt CumulativeGasUsed mismatch, want %v, have %v", receipt.CumulativeGasUsed, dec.CumulativeGasUsed)
}
- assert.Equal(t, uint32(receipt.Logs[0].Index), dec.FirstLogIndex)
+ assert.Equal(t, uint32(receipt.Logs[0].Index), dec.FirstLogIndexWithinBlock)
//if len(dec.Logs) != len(receipt.Logs) {
// t.Fatalf("Receipt log number mismatch, want %v, have %v", len(receipt.Logs), len(dec.Logs))
//}
@@ -178,10 +178,10 @@ func TestDeriveFields(t *testing.T) {
{Address: libcommon.BytesToAddress([]byte{0x11})},
{Address: libcommon.BytesToAddress([]byte{0x01, 0x11})},
},
- TxHash: txs[0].Hash(),
- ContractAddress: libcommon.BytesToAddress([]byte{0x01, 0x11, 0x11}),
- GasUsed: 1,
- FirstLogIndex: 0,
+ TxHash: txs[0].Hash(),
+ ContractAddress: libcommon.BytesToAddress([]byte{0x01, 0x11, 0x11}),
+ GasUsed: 1,
+ FirstLogIndexWithinBlock: 0,
},
&Receipt{
PostState: libcommon.Hash{2}.Bytes(),
@@ -190,10 +190,10 @@ func TestDeriveFields(t *testing.T) {
{Address: libcommon.BytesToAddress([]byte{0x22})},
{Address: libcommon.BytesToAddress([]byte{0x02, 0x22})},
},
- TxHash: txs[1].Hash(),
- ContractAddress: libcommon.BytesToAddress([]byte{0x02, 0x22, 0x22}),
- GasUsed: 2,
- FirstLogIndex: 2,
+ TxHash: txs[1].Hash(),
+ ContractAddress: libcommon.BytesToAddress([]byte{0x02, 0x22, 0x22}),
+ GasUsed: 2,
+ FirstLogIndexWithinBlock: 2,
},
&Receipt{
Type: AccessListTxType,
@@ -203,10 +203,10 @@ func TestDeriveFields(t *testing.T) {
{Address: libcommon.BytesToAddress([]byte{0x33})},
{Address: libcommon.BytesToAddress([]byte{0x03, 0x33})},
},
- TxHash: txs[2].Hash(),
- ContractAddress: libcommon.BytesToAddress([]byte{0x03, 0x33, 0x33}),
- GasUsed: 3,
- FirstLogIndex: 4,
+ TxHash: txs[2].Hash(),
+ ContractAddress: libcommon.BytesToAddress([]byte{0x03, 0x33, 0x33}),
+ GasUsed: 3,
+ FirstLogIndexWithinBlock: 4,
},
}
// Clear all the computed fields and re-derive them
@@ -273,68 +273,68 @@ func TestDeriveFields(t *testing.T) {
}
})
- t.Run("DeriveV3", func(t *testing.T) {
- clearComputedFieldsOnReceipts(t, receipts)
- // Iterate over all the computed fields and check that they're correct
- signer := MakeSigner(params.TestChainConfig, number.Uint64(), 0)
-
- logIndex := uint(0)
- for i := range receipts {
- txs[i].SetSender(libcommon.BytesToAddress([]byte{0x0}))
- r, err := receipts.DeriveFieldsV3ForSingleReceipt(i, hash, number.Uint64(), txs[i])
- if err != nil {
- panic(err)
- }
-
- if r.Type != txs[i].Type() {
- t.Errorf("receipts[%d].Type = %d, want %d", i, r.Type, txs[i].Type())
- }
- if r.TxHash != txs[i].Hash() {
- t.Errorf("receipts[%d].TxHash = %s, want %s", i, r.TxHash.String(), txs[i].Hash().String())
- }
- if r.BlockHash != hash {
- t.Errorf("receipts[%d].BlockHash = %s, want %s", i, r.BlockHash.String(), hash.String())
- }
- if r.BlockNumber.Cmp(number) != 0 {
- t.Errorf("receipts[%c].BlockNumber = %s, want %s", i, r.BlockNumber.String(), number.String())
- }
- if r.TransactionIndex != uint(i) {
- t.Errorf("receipts[%d].TransactionIndex = %d, want %d", i, r.TransactionIndex, i)
- }
- if r.GasUsed != txs[i].GetGas() {
- t.Errorf("receipts[%d].GasUsed = %d, want %d", i, r.GasUsed, txs[i].GetGas())
- }
- if txs[i].GetTo() != nil && r.ContractAddress != (libcommon.Address{}) {
- t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, r.ContractAddress.String(), (libcommon.Address{}).String())
- }
- from, _ := txs[i].Sender(*signer)
- contractAddress := crypto.CreateAddress(from, txs[i].GetNonce())
- if txs[i].GetTo() == nil && r.ContractAddress != contractAddress {
- t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, r.ContractAddress.String(), contractAddress.String())
- }
- for j := range r.Logs {
- if r.Logs[j].BlockNumber != number.Uint64() {
- t.Errorf("receipts[%d].Logs[%d].BlockNumber = %d, want %d", i, j, r.Logs[j].BlockNumber, number.Uint64())
- }
- if r.Logs[j].BlockHash != hash {
- t.Errorf("receipts[%d].Logs[%d].BlockHash = %s, want %s", i, j, r.Logs[j].BlockHash.String(), hash.String())
- }
- if r.Logs[j].TxHash != txs[i].Hash() {
- t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, r.Logs[j].TxHash.String(), txs[i].Hash().String())
- }
- if r.Logs[j].TxHash != txs[i].Hash() {
- t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, r.Logs[j].TxHash.String(), txs[i].Hash().String())
- }
- if r.Logs[j].TxIndex != uint(i) {
- t.Errorf("receipts[%d].Logs[%d].TransactionIndex = %d, want %d", i, j, r.Logs[j].TxIndex, i)
- }
- if r.Logs[j].Index != logIndex {
- t.Errorf("receipts[%d].Logs[%d].Index = %d, want %d", i, j, r.Logs[j].Index, logIndex)
- }
- logIndex++
- }
- }
- })
+ //t.Run("DeriveV3", func(t *testing.T) {
+ // clearComputedFieldsOnReceipts(t, receipts)
+ // // Iterate over all the computed fields and check that they're correct
+ // signer := MakeSigner(params.TestChainConfig, number.Uint64(), 0)
+ //
+ // logIndex := uint(0)
+ // for i := range receipts {
+ // txs[i].SetSender(libcommon.BytesToAddress([]byte{0x0}))
+ // r, err := receipts.DeriveFieldsV3ForSingleReceipt(i, hash, number.Uint64(), txs[i])
+ // if err != nil {
+ // panic(err)
+ // }
+ //
+ // if r.Type != txs[i].Type() {
+ // t.Errorf("receipts[%d].Type = %d, want %d", i, r.Type, txs[i].Type())
+ // }
+ // if r.TxHash != txs[i].Hash() {
+ // t.Errorf("receipts[%d].TxHash = %s, want %s", i, r.TxHash.String(), txs[i].Hash().String())
+ // }
+ // if r.BlockHash != hash {
+ // t.Errorf("receipts[%d].BlockHash = %s, want %s", i, r.BlockHash.String(), hash.String())
+ // }
+ // if r.BlockNumber.Cmp(number) != 0 {
+ // t.Errorf("receipts[%c].BlockNumber = %s, want %s", i, r.BlockNumber.String(), number.String())
+ // }
+ // if r.TransactionIndex != uint(i) {
+ // t.Errorf("receipts[%d].TransactionIndex = %d, want %d", i, r.TransactionIndex, i)
+ // }
+ // if r.GasUsed != txs[i].GetGas() {
+ // t.Errorf("receipts[%d].GasUsed = %d, want %d", i, r.GasUsed, txs[i].GetGas())
+ // }
+ // if txs[i].GetTo() != nil && r.ContractAddress != (libcommon.Address{}) {
+ // t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, r.ContractAddress.String(), (libcommon.Address{}).String())
+ // }
+ // from, _ := txs[i].Sender(*signer)
+ // contractAddress := crypto.CreateAddress(from, txs[i].GetNonce())
+ // if txs[i].GetTo() == nil && r.ContractAddress != contractAddress {
+ // t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, r.ContractAddress.String(), contractAddress.String())
+ // }
+ // for j := range r.Logs {
+ // if r.Logs[j].BlockNumber != number.Uint64() {
+ // t.Errorf("receipts[%d].Logs[%d].BlockNumber = %d, want %d", i, j, r.Logs[j].BlockNumber, number.Uint64())
+ // }
+ // if r.Logs[j].BlockHash != hash {
+ // t.Errorf("receipts[%d].Logs[%d].BlockHash = %s, want %s", i, j, r.Logs[j].BlockHash.String(), hash.String())
+ // }
+ // if r.Logs[j].TxHash != txs[i].Hash() {
+ // t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, r.Logs[j].TxHash.String(), txs[i].Hash().String())
+ // }
+ // if r.Logs[j].TxHash != txs[i].Hash() {
+ // t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, r.Logs[j].TxHash.String(), txs[i].Hash().String())
+ // }
+ // if r.Logs[j].TxIndex != uint(i) {
+ // t.Errorf("receipts[%d].Logs[%d].TransactionIndex = %d, want %d", i, j, r.Logs[j].TxIndex, i)
+ // }
+ // if r.Logs[j].Index != logIndex {
+ // t.Errorf("receipts[%d].Logs[%d].Index = %d, want %d", i, j, r.Logs[j].Index, logIndex)
+ // }
+ // logIndex++
+ // }
+ // }
+ //})
}
diff --git a/core/vm/analysis_test.go b/core/vm/analysis_test.go
index d6441d6c725..f4b3162ae82 100644
--- a/core/vm/analysis_test.go
+++ b/core/vm/analysis_test.go
@@ -94,7 +94,7 @@ func BenchmarkJumpDest(b *testing.B) {
contractRef := dummyContractRef{}
- c := NewJumpDestCache(false)
+ c := NewJumpDestCache()
b.ResetTimer()
for n := 0; n < b.N; n++ {
contract := NewContract(contractRef, libcommon.Address{}, nil, 0, false /* skipAnalysis */, c)
diff --git a/core/vm/contract.go b/core/vm/contract.go
index 6c7547b15fe..1acbd804ce2 100644
--- a/core/vm/contract.go
+++ b/core/vm/contract.go
@@ -82,12 +82,12 @@ var (
jumpDestCacheTrace = dbg.EnvBool("JD_LRU_TRACE", false)
)
-func NewJumpDestCache(trace bool) *JumpDestCache {
+func NewJumpDestCache() *JumpDestCache {
c, err := simplelru.NewLRU[libcommon.Hash, bitvec](jumpDestCacheLimit, nil)
if err != nil {
panic(err)
}
- return &JumpDestCache{LRU: c, trace: trace || jumpDestCacheTrace}
+ return &JumpDestCache{LRU: c, trace: jumpDestCacheTrace}
}
func (c *JumpDestCache) LogStats() {
diff --git a/core/vm/evm.go b/core/vm/evm.go
index 4190b46613a..33dd84242a4 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -116,7 +116,7 @@ func NewEVM(blockCtx evmtypes.BlockContext, txCtx evmtypes.TxContext, state evmt
config: vmConfig,
chainConfig: chainConfig,
chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Time),
- JumpDestCache: NewJumpDestCache(false),
+ JumpDestCache: NewJumpDestCache(),
}
evm.interpreter = NewEVMInterpreter(evm, vmConfig)
diff --git a/core/vm/evm_test.go b/core/vm/evm_test.go
index b47791d4271..5436eb54eed 100644
--- a/core/vm/evm_test.go
+++ b/core/vm/evm_test.go
@@ -29,7 +29,7 @@ import (
func TestInterpreterReadonly(t *testing.T) {
t.Parallel()
- c := NewJumpDestCache(false)
+ c := NewJumpDestCache()
rapid.Check(t, func(t *rapid.T) {
env := NewEVM(evmtypes.BlockContext{}, evmtypes.TxContext{}, &dummyStatedb{}, params.TestChainConfig, Config{})
@@ -137,7 +137,7 @@ func TestInterpreterReadonly(t *testing.T) {
func TestReadonlyBasicCases(t *testing.T) {
t.Parallel()
- c := NewJumpDestCache(false)
+ c := NewJumpDestCache()
cases := []struct {
testName string
@@ -405,7 +405,7 @@ func newTestSequential(env *EVM, currentIdx *int, readonlies []bool, isEVMCalled
func (st *testSequential) Run(_ *Contract, _ []byte, _ bool) ([]byte, error) {
*st.currentIdx++
- c := NewJumpDestCache(false)
+ c := NewJumpDestCache()
nextContract := NewContract(
&dummyContractRef{},
libcommon.Address{},
diff --git a/core/vm/gas_table_test.go b/core/vm/gas_table_test.go
index 04b0c2cbd91..1d74f5945a2 100644
--- a/core/vm/gas_table_test.go
+++ b/core/vm/gas_table_test.go
@@ -34,14 +34,11 @@ import (
"github.com/erigontech/erigon-lib/common/hexutil"
"github.com/erigontech/erigon-lib/kv"
"github.com/erigontech/erigon-lib/kv/memdb"
- "github.com/erigontech/erigon-lib/kv/rawdbv3"
"github.com/erigontech/erigon-lib/kv/temporal"
"github.com/erigontech/erigon-lib/kv/temporal/temporaltest"
"github.com/erigontech/erigon-lib/log/v3"
state3 "github.com/erigontech/erigon-lib/state"
"github.com/erigontech/erigon-lib/wrap"
- "github.com/erigontech/erigon/core/rawdb"
-
"github.com/erigontech/erigon/core/state"
"github.com/erigontech/erigon/core/vm/evmtypes"
"github.com/erigontech/erigon/params"
@@ -103,8 +100,7 @@ func testTemporalDB(t *testing.T) *temporal.DB {
t.Cleanup(db.Close)
- cr := rawdb.NewCanonicalReader(rawdbv3.TxNums)
- agg, err := state3.NewAggregator(context.Background(), datadir.New(t.TempDir()), 16, db, cr, log.New())
+ agg, err := state3.NewAggregator(context.Background(), datadir.New(t.TempDir()), 16, db, log.New())
require.NoError(t, err)
t.Cleanup(agg.Close)
diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go
index 8136b51580b..f374a330070 100644
--- a/core/vm/instructions_test.go
+++ b/core/vm/instructions_test.go
@@ -594,7 +594,7 @@ func TestOpTstore(t *testing.T) {
caller = libcommon.Address{}
to = libcommon.Address{1}
contractRef = contractRef{caller}
- contract = NewContract(contractRef, to, u256.Num0, 0, false, NewJumpDestCache(false))
+ contract = NewContract(contractRef, to, u256.Num0, 0, false, NewJumpDestCache())
scopeContext = ScopeContext{mem, stack, contract}
value = libcommon.Hex2Bytes("abcdef00000000000000abba000000000deaf000000c0de00100000000133700")
)
diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go
index e22c4a226f1..db606fd07b7 100644
--- a/core/vm/runtime/runtime.go
+++ b/core/vm/runtime/runtime.go
@@ -37,10 +37,8 @@ import (
"github.com/erigontech/erigon-lib/config3"
"github.com/erigontech/erigon-lib/kv"
"github.com/erigontech/erigon-lib/kv/memdb"
- "github.com/erigontech/erigon-lib/kv/rawdbv3"
"github.com/erigontech/erigon-lib/kv/temporal"
state3 "github.com/erigontech/erigon-lib/state"
- "github.com/erigontech/erigon/core/rawdb"
"github.com/erigontech/erigon/core/state"
"github.com/erigontech/erigon/core/vm"
"github.com/erigontech/erigon/crypto"
@@ -134,8 +132,7 @@ func Execute(code, input []byte, cfg *Config, tempdir string) ([]byte, *state.In
if !externalState {
db := memdb.NewStateDB(tempdir)
defer db.Close()
- cr := rawdb.NewCanonicalReader(rawdbv3.TxNums)
- agg, err := state3.NewAggregator(context.Background(), datadir.New(tempdir), config3.HistoryV3AggregationStep, db, cr, log.New())
+ agg, err := state3.NewAggregator(context.Background(), datadir.New(tempdir), config3.HistoryV3AggregationStep, db, log.New())
if err != nil {
return nil, nil, err
}
@@ -197,8 +194,7 @@ func Create(input []byte, cfg *Config, blockNr uint64) ([]byte, libcommon.Addres
db := memdb.NewStateDB(tmp)
defer db.Close()
- cr := rawdb.NewCanonicalReader(rawdbv3.TxNums)
- agg, err := state3.NewAggregator(context.Background(), datadir.New(tmp), config3.HistoryV3AggregationStep, db, cr, log.New())
+ agg, err := state3.NewAggregator(context.Background(), datadir.New(tmp), config3.HistoryV3AggregationStep, db, log.New())
if err != nil {
return nil, [20]byte{}, 0, err
}
diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go
index 9673b8df5c5..874b6cbdf91 100644
--- a/core/vm/runtime/runtime_test.go
+++ b/core/vm/runtime/runtime_test.go
@@ -45,7 +45,6 @@ import (
"github.com/erigontech/erigon/consensus"
"github.com/erigontech/erigon/core"
"github.com/erigontech/erigon/core/asm"
- "github.com/erigontech/erigon/core/rawdb"
"github.com/erigontech/erigon/core/state"
"github.com/erigontech/erigon/core/types"
"github.com/erigontech/erigon/core/vm"
@@ -58,8 +57,7 @@ func NewTestTemporalDb(tb testing.TB) (kv.RwDB, kv.RwTx, *stateLib.Aggregator) {
db := memdb.NewStateDB(tb.TempDir())
tb.Cleanup(db.Close)
- cr := rawdb.NewCanonicalReader(rawdbv3.TxNums)
- agg, err := stateLib.NewAggregator(context.Background(), datadir.New(tb.TempDir()), 16, db, cr, log.New())
+ agg, err := stateLib.NewAggregator(context.Background(), datadir.New(tb.TempDir()), 16, db, log.New())
if err != nil {
tb.Fatal(err)
}
@@ -180,8 +178,7 @@ func testTemporalDB(t testing.TB) *temporal.DB {
t.Cleanup(db.Close)
- cr := rawdb.NewCanonicalReader(rawdbv3.TxNums)
- agg, err := stateLib.NewAggregator(context.Background(), datadir.New(t.TempDir()), 16, db, cr, log.New())
+ agg, err := stateLib.NewAggregator(context.Background(), datadir.New(t.TempDir()), 16, db, log.New())
require.NoError(t, err)
t.Cleanup(agg.Close)
diff --git a/erigon-lib/chain/snapcfg/util_test.go b/erigon-lib/chain/snapcfg/util_test.go
index d169b267683..86b2622c824 100644
--- a/erigon-lib/chain/snapcfg/util_test.go
+++ b/erigon-lib/chain/snapcfg/util_test.go
@@ -1,8 +1,9 @@
package snapcfg
import (
- "github.com/erigontech/erigon-lib/downloader/snaptype"
"testing"
+
+ "github.com/erigontech/erigon-lib/downloader/snaptype"
)
func TestNameToParts(t *testing.T) {
diff --git a/erigon-lib/downloader/snaptype/files.go b/erigon-lib/downloader/snaptype/files.go
index 08965fe9e5d..85f7cbd34c7 100644
--- a/erigon-lib/downloader/snaptype/files.go
+++ b/erigon-lib/downloader/snaptype/files.go
@@ -206,7 +206,7 @@ func AllV2Extensions() []string {
}
func SeedableV3Extensions() []string {
- return []string{".kv", ".v", ".ef"}
+ return []string{".kv", ".v", ".ef", ".ap"}
}
func AllV3Extensions() []string {
diff --git a/erigon-lib/etl/etl.go b/erigon-lib/etl/etl.go
index 366d09b88d0..f155e92faa1 100644
--- a/erigon-lib/etl/etl.go
+++ b/erigon-lib/etl/etl.go
@@ -73,7 +73,7 @@ type TransformArgs struct {
ExtractEndKey []byte
BufferType int
BufferSize int
- EmptyVals bool
+ EmptyVals bool // `v=nil` case: `false` means `Del(k)`, `true` means `Put(k, nil)`
}
func Transform(
diff --git a/erigon-lib/kv/kv_interface.go b/erigon-lib/kv/kv_interface.go
index 8065674df2e..3f892dc731e 100644
--- a/erigon-lib/kv/kv_interface.go
+++ b/erigon-lib/kv/kv_interface.go
@@ -560,7 +560,15 @@ type TemporalGetter interface {
type TemporalTx interface {
Tx
TemporalGetter
+
+ // DomainGetAsOf - state as of given `ts`
+ // Example: GetAsOf(Account, key, txNum) - retuns account's value before `txNum` transaction changed it
+ // Means if you want re-execute `txNum` on historical state - do `GetAsOf(key, txNum)` to read state
+ // `ok = false` means: key not found. or "future txNum" passed.
DomainGetAsOf(name Domain, k, k2 []byte, ts uint64) (v []byte, ok bool, err error)
+
+ // HistorySeek - like `DomainGetAsOf` but without latest state - only for `History`
+ // `ok == true && v != nil && len(v) == 0` means key-creation even
HistorySeek(name History, k []byte, ts uint64) (v []byte, ok bool, err error)
// IndexRange - return iterator over range of inverted index for given key `k`
@@ -576,8 +584,6 @@ type TemporalTx interface {
// HistoryRange - producing "state patch" - sorted list of keys updated at [fromTs,toTs) with their most-recent value.
// no duplicates
HistoryRange(name History, fromTs, toTs int, asc order.By, limit int) (it stream.KV, err error)
-
- AppendableGet(name Appendable, ts TxnId) ([]byte, bool, error)
}
type TxnId uint64 // internal auto-increment ID. can't cast to eth-network canonical blocks txNum
@@ -606,8 +612,6 @@ type TemporalPutDel interface {
// - if `val == nil` it will call DomainDel
DomainDel(domain Domain, k1, k2 []byte, prevVal []byte, prevStep uint64) error
DomainDelPrefix(domain Domain, prefix []byte) error
-
- AppendablePut(name Appendable, ts TxnId, v []byte) error
}
type CanWarmupDB interface {
WarmupDB(force bool) error
diff --git a/erigon-lib/kv/membatchwithdb/memory_mutation.go b/erigon-lib/kv/membatchwithdb/memory_mutation.go
index 61f6ba656eb..70d3df91530 100644
--- a/erigon-lib/kv/membatchwithdb/memory_mutation.go
+++ b/erigon-lib/kv/membatchwithdb/memory_mutation.go
@@ -758,8 +758,3 @@ func (m *MemoryMutation) DomainRange(name kv.Domain, fromKey, toKey []byte, ts u
panic("not supported")
//return m.db.(kv.TemporalTx).DomainRange(name, fromKey, toKey, ts, asc, limit)
}
-
-func (m *MemoryMutation) AppendableGet(name kv.Appendable, ts kv.TxnId) ([]byte, bool, error) {
- panic("not supported")
- //return m.db.(kv.TemporalTx).AppendableGet(name, ts)
-}
diff --git a/erigon-lib/kv/membatchwithdb/memory_mutation_test.go b/erigon-lib/kv/membatchwithdb/memory_mutation_test.go
index f407775f1d1..463d0f67783 100644
--- a/erigon-lib/kv/membatchwithdb/memory_mutation_test.go
+++ b/erigon-lib/kv/membatchwithdb/memory_mutation_test.go
@@ -212,7 +212,7 @@ func NewTestTemporalDb(tb testing.TB) (kv.RwDB, kv.RwTx, *stateLib.Aggregator) {
db := memdb.NewStateDB(tb.TempDir())
tb.Cleanup(db.Close)
- agg, err := stateLib.NewAggregator(context.Background(), datadir.New(tb.TempDir()), 16, db, nil, log.New())
+ agg, err := stateLib.NewAggregator(context.Background(), datadir.New(tb.TempDir()), 16, db, log.New())
if err != nil {
tb.Fatal(err)
}
diff --git a/erigon-lib/kv/rawdbv3/txnum.go b/erigon-lib/kv/rawdbv3/txnum.go
index d1d6a38746f..b15919d8cef 100644
--- a/erigon-lib/kv/rawdbv3/txnum.go
+++ b/erigon-lib/kv/rawdbv3/txnum.go
@@ -67,7 +67,7 @@ func DefaultReadTxNumFunc(tx kv.Tx, c kv.Cursor, blockNum uint64) (maxTxNum uint
return 0, false, nil
}
if len(v) != 8 {
- return 0, false, fmt.Errorf("seems broken TxNum value: %x", v)
+ return 0, false, fmt.Errorf("DefaultReadTxNumFunc: seems broken TxNum value: %x", v)
}
return binary.BigEndian.Uint64(v), true, nil
}
@@ -201,7 +201,7 @@ func (t TxNumsReader) FindBlockNum(tx kv.Tx, endTxNumMinimax uint64) (ok bool, b
return false, 0, nil
}
if len(lastK) != 8 {
- return false, 0, fmt.Errorf("seems broken TxNum value: %x", lastK)
+ return false, 0, fmt.Errorf("FindBlockNum: seems broken TxNum value: %x", lastK)
}
lastBlockNum := binary.BigEndian.Uint64(lastK)
@@ -215,8 +215,9 @@ func (t TxNumsReader) FindBlockNum(tx kv.Tx, endTxNumMinimax uint64) (ok bool, b
return true
}
if !ok {
+ _fb, _ft, _ := t.First(tx)
_lb, _lt, _ := t.Last(tx)
- err = fmt.Errorf("FindBlockNum(%d): seems broken TxNum value: %x -> (%x, %x); last in db: (%d, %d)", endTxNumMinimax, seek, i, maxTxNum, _lb, _lt)
+ err = fmt.Errorf("FindBlockNum(%d): seems broken TxNum value: %x -> (%d, %d); db has: (%d-%d, %d-%d)", endTxNumMinimax, seek, i, maxTxNum, _fb, _lb, _ft, _lt)
return true
}
return maxTxNum >= endTxNumMinimax
diff --git a/erigon-lib/kv/remotedb/kv_remote.go b/erigon-lib/kv/remotedb/kv_remote.go
index ab511ec51bc..a69a246003f 100644
--- a/erigon-lib/kv/remotedb/kv_remote.go
+++ b/erigon-lib/kv/remotedb/kv_remote.go
@@ -677,9 +677,6 @@ func (tx *tx) IndexRange(name kv.InvertedIdx, k []byte, fromTs, toTs int, asc or
return reply.Timestamps, reply.NextPageToken, nil
}), nil
}
-func (tx *tx) AppendableGet(name kv.Appendable, ts kv.TxnId) ([]byte, bool, error) {
- panic("not implemented yet")
-}
func (tx *tx) Prefix(table string, prefix []byte) (stream.KV, error) {
nextPrefix, ok := kv.NextSubtree(prefix)
diff --git a/erigon-lib/kv/tables.go b/erigon-lib/kv/tables.go
index 7600d39af17..c3065dd8f0e 100644
--- a/erigon-lib/kv/tables.go
+++ b/erigon-lib/kv/tables.go
@@ -372,6 +372,11 @@ const (
TblCommitmentHistoryVals = "CommitmentHistoryVals"
TblCommitmentIdx = "CommitmentIdx"
+ TblReceiptVals = "ReceiptVals"
+ TblReceiptHistoryKeys = "ReceiptHistoryKeys"
+ TblReceiptHistoryVals = "ReceiptHistoryVals"
+ TblReceiptIdx = "ReceiptIdx"
+
TblLogAddressKeys = "LogAddressKeys"
TblLogAddressIdx = "LogAddressIdx"
TblLogTopicsKeys = "LogTopicsKeys"
@@ -565,6 +570,11 @@ var ChaindataTables = []string{
TblCommitmentHistoryVals,
TblCommitmentIdx,
+ TblReceiptVals,
+ TblReceiptHistoryKeys,
+ TblReceiptHistoryVals,
+ TblReceiptIdx,
+
TblLogAddressKeys,
TblLogAddressIdx,
TblLogTopicsKeys,
@@ -723,6 +733,10 @@ var ChaindataTablesCfg = TableCfg{
TblCommitmentHistoryKeys: {Flags: DupSort},
TblCommitmentHistoryVals: {Flags: DupSort},
TblCommitmentIdx: {Flags: DupSort},
+ TblReceiptVals: {Flags: DupSort},
+ TblReceiptHistoryKeys: {Flags: DupSort},
+ TblReceiptHistoryVals: {Flags: DupSort},
+ TblReceiptIdx: {Flags: DupSort},
TblLogAddressKeys: {Flags: DupSort},
TblLogAddressIdx: {Flags: DupSort},
TblLogTopicsKeys: {Flags: DupSort},
@@ -848,7 +862,8 @@ const (
StorageDomain Domain = 1
CodeDomain Domain = 2
CommitmentDomain Domain = 3
- DomainLen Domain = 4
+ ReceiptDomain Domain = 4
+ DomainLen Domain = 5
)
const (
@@ -856,6 +871,7 @@ const (
StorageHistory History = "StorageHistory"
CodeHistory History = "CodeHistory"
CommitmentHistory History = "CommitmentHistory"
+ ReceiptHistory History = "ReceiptHistory"
)
const (
@@ -863,6 +879,7 @@ const (
StorageHistoryIdx InvertedIdx = "StorageHistoryIdx"
CodeHistoryIdx InvertedIdx = "CodeHistoryIdx"
CommitmentHistoryIdx InvertedIdx = "CommitmentHistoryIdx"
+ ReceiptHistoryIdx InvertedIdx = "ReceiptHistoryIdx"
LogTopicIdx InvertedIdx = "LogTopicIdx"
LogAddrIdx InvertedIdx = "LogAddrIdx"
@@ -877,9 +894,8 @@ const (
)
const (
- //ReceiptsAppendable Appendable = 0
- //AppendableLen Appendable = 1
- AppendableLen Appendable = 0
+ ReceiptsAppendable Appendable = 0
+ AppendableLen Appendable = 0
)
func (iip InvertedIdxPos) String() string {
@@ -907,6 +923,8 @@ func (d Domain) String() string {
return "code"
case CommitmentDomain:
return "commitment"
+ case ReceiptDomain:
+ return "receipt"
default:
return "unknown domain"
}
@@ -922,6 +940,8 @@ func String2Domain(in string) (Domain, error) {
return CodeDomain, nil
case "commitment":
return CommitmentDomain, nil
+ case "receipt":
+ return ReceiptDomain, nil
default:
return Domain(MaxUint16), fmt.Errorf("unknown history name: %s", in)
}
@@ -931,9 +951,18 @@ const MaxUint16 uint16 = 1<<16 - 1
func (iip Appendable) String() string {
switch iip {
- //case ReceiptsAppendable:
- // return "receipts"
+ case ReceiptsAppendable:
+ return "receipts"
default:
return "unknown Appendable"
}
}
+
+func String2Appendable(in string) (Appendable, error) {
+ switch in {
+ case "receipts":
+ return ReceiptsAppendable, nil
+ default:
+ return Appendable(MaxUint16), fmt.Errorf("unknown Appendable name: %s", in)
+ }
+}
diff --git a/erigon-lib/kv/temporal/kv_temporal.go b/erigon-lib/kv/temporal/kv_temporal.go
index adb5d8464a1..8aecc7565c5 100644
--- a/erigon-lib/kv/temporal/kv_temporal.go
+++ b/erigon-lib/kv/temporal/kv_temporal.go
@@ -242,7 +242,3 @@ func (tx *Tx) HistoryRange(name kv.History, fromTs, toTs int, asc order.By, limi
tx.resourcesToClose = append(tx.resourcesToClose, it)
return it, nil
}
-
-func (tx *Tx) AppendableGet(name kv.Appendable, ts kv.TxnId) ([]byte, bool, error) {
- return tx.filesTx.AppendableGet(name, ts, tx.MdbxTx)
-}
diff --git a/erigon-lib/kv/temporal/temporaltest/kv_temporal_testdb.go b/erigon-lib/kv/temporal/temporaltest/kv_temporal_testdb.go
index 6065614849e..0076d15daac 100644
--- a/erigon-lib/kv/temporal/temporaltest/kv_temporal_testdb.go
+++ b/erigon-lib/kv/temporal/temporaltest/kv_temporal_testdb.go
@@ -42,7 +42,7 @@ func NewTestDB(tb testing.TB, dirs datadir.Dirs) (db kv.RwDB, agg *state.Aggrega
}
var err error
- agg, err = state.NewAggregator(context.Background(), dirs, config3.HistoryV3AggregationStep, db, nil, log.New())
+ agg, err = state.NewAggregator(context.Background(), dirs, config3.HistoryV3AggregationStep, db, log.New())
if err != nil {
panic(err)
}
diff --git a/erigon-lib/state/aggregator.go b/erigon-lib/state/aggregator.go
index fac94c3bf4e..4931a5b7f35 100644
--- a/erigon-lib/state/aggregator.go
+++ b/erigon-lib/state/aggregator.go
@@ -56,7 +56,6 @@ type Aggregator struct {
db kv.RoDB
d [kv.DomainLen]*Domain
iis [kv.StandaloneIdxLen]*InvertedIndex
- ap [kv.AppendableLen]*Appendable //nolint
dirs datadir.Dirs
tmpdir string
aggregationStep uint64
@@ -101,7 +100,7 @@ type OnFreezeFunc func(frozenFileNames []string)
const AggregatorSqueezeCommitmentValues = true
const MaxNonFuriousDirtySpacePerTx = 64 * datasize.MB
-func NewAggregator(ctx context.Context, dirs datadir.Dirs, aggregationStep uint64, db kv.RoDB, iters CanonicalsReader, logger log.Logger) (*Aggregator, error) {
+func NewAggregator(ctx context.Context, dirs datadir.Dirs, aggregationStep uint64, db kv.RoDB, logger log.Logger) (*Aggregator, error) {
tmpdir := dirs.Tmp
salt, err := getStateIndicesSalt(dirs.Snap)
if err != nil {
@@ -201,12 +200,17 @@ func NewAggregator(ctx context.Context, dirs datadir.Dirs, aggregationStep uint6
if a.d[kv.CommitmentDomain], err = NewDomain(cfg, aggregationStep, kv.CommitmentDomain, kv.TblCommitmentVals, kv.TblCommitmentHistoryKeys, kv.TblCommitmentHistoryVals, kv.TblCommitmentIdx, integrityCheck, logger); err != nil {
return nil, err
}
- //aCfg := AppendableCfg{
- // Salt: salt, Dirs: dirs, DB: db, iters: iters,
- //}
- //if a.ap[kv.ReceiptsAppendable], err = NewAppendable(aCfg, aggregationStep, "receipts", kv.Receipts, nil, logger); err != nil {
- // return nil, err
- //}
+ cfg = domainCfg{
+ hist: histCfg{
+ iiCfg: iiCfg{salt: salt, dirs: dirs, db: db},
+ withLocalityIndex: false, withExistenceIndex: false,
+ compression: seg.CompressNone, historyLargeValues: false,
+ },
+ compress: seg.CompressNone, //seg.CompressKeys | seg.CompressVals,
+ }
+ if a.d[kv.ReceiptDomain], err = NewDomain(cfg, aggregationStep, kv.ReceiptDomain, kv.TblReceiptVals, kv.TblReceiptHistoryKeys, kv.TblReceiptHistoryVals, kv.TblReceiptIdx, integrityCheck, logger); err != nil {
+ return nil, err
+ }
if err := a.registerII(kv.LogAddrIdxPos, salt, dirs, db, aggregationStep, kv.FileLogAddressIdx, kv.TblLogAddressKeys, kv.TblLogAddressIdx, logger); err != nil {
return nil, err
}
@@ -352,9 +356,6 @@ func (a *Aggregator) closeDirtyFiles() {
for _, ii := range a.iis {
ii.Close()
}
- for _, ap := range a.ap {
- ap.Close()
- }
}
func (a *Aggregator) SetCollateAndBuildWorkers(i int) { a.collateAndBuildWorkers = i }
@@ -366,9 +367,6 @@ func (a *Aggregator) SetCompressWorkers(i int) {
for _, ii := range a.iis {
ii.compressCfg.Workers = i
}
- for _, ap := range a.ap {
- ap.compressCfg.Workers = i
- }
}
func (a *Aggregator) DiscardHistory(name kv.Domain) *Aggregator {
@@ -395,9 +393,6 @@ func (ac *AggregatorRoTx) Files() []string {
for _, ii := range ac.iis {
res = append(res, ii.Files()...)
}
- for _, ap := range ac.appendable {
- res = append(res, ap.Files()...)
- }
return res
}
func (a *Aggregator) Files() []string {
@@ -428,9 +423,6 @@ func (a *Aggregator) LS() {
for _, d := range a.iis {
doLS(d.dirtyFiles)
}
- for _, d := range a.ap {
- doLS(d.dirtyFiles)
- }
}
func (a *Aggregator) BuildOptionalMissedIndicesInBackground(ctx context.Context, workers int) {
@@ -509,9 +501,6 @@ func (a *Aggregator) BuildMissedIndices(ctx context.Context, workers int) error
for _, ii := range a.iis {
ii.BuildMissedAccessors(ctx, g, ps)
}
- for _, appendable := range a.ap {
- appendable.BuildMissedAccessors(ctx, g, ps)
- }
if err := g.Wait(); err != nil {
return err
@@ -590,9 +579,8 @@ func (c AggV3Collation) Close() {
}
type AggV3StaticFiles struct {
- d [kv.DomainLen]StaticFiles
- ivfs [kv.StandaloneIdxLen]InvertedFiles
- appendable [kv.AppendableLen]AppendableFiles
+ d [kv.DomainLen]StaticFiles
+ ivfs [kv.StandaloneIdxLen]InvertedFiles
}
// CleanupOnError - call it on collation fail. It's closing all files
@@ -603,9 +591,6 @@ func (sf AggV3StaticFiles) CleanupOnError() {
for _, ivf := range sf.ivfs {
ivf.CleanupOnError()
}
- for _, ap := range sf.appendable {
- ap.CleanupOnError()
- }
}
func (a *Aggregator) buildFiles(ctx context.Context, step uint64) error {
@@ -637,6 +622,12 @@ func (a *Aggregator) buildFiles(ctx context.Context, step uint64) error {
g.SetLimit(a.collateAndBuildWorkers)
for _, d := range a.d {
d := d
+ dc := d.BeginFilesRo()
+ firstStepNotInFiles := dc.FirstStepNotInFiles()
+ dc.Close()
+ if step < firstStepNotInFiles {
+ continue
+ }
a.wg.Add(1)
g.Go(func() error {
@@ -673,6 +664,13 @@ func (a *Aggregator) buildFiles(ctx context.Context, step uint64) error {
// indices are built concurrently
for _, ii := range a.iis {
ii := ii
+ dc := ii.BeginFilesRo()
+ firstStepNotInFiles := dc.FirstStepNotInFiles()
+ dc.Close()
+ if step < firstStepNotInFiles {
+ continue
+ }
+
a.wg.Add(1)
g.Go(func() error {
defer a.wg.Done()
@@ -706,32 +704,6 @@ func (a *Aggregator) buildFiles(ctx context.Context, step uint64) error {
return nil
})
}
-
- for name, ap := range a.ap {
- name := name
- ap := ap
- a.wg.Add(1)
- g.Go(func() error {
- defer a.wg.Done()
-
- var collation AppendableCollation
- err := a.db.View(ctx, func(tx kv.Tx) (err error) {
- collation, err = ap.collate(ctx, step, tx)
- return err
- })
- if err != nil {
- return fmt.Errorf("index collation %q has failed: %w", ap.filenameBase, err)
- }
- sf, err := ap.buildFiles(ctx, step, collation, a.ps)
- if err != nil {
- sf.CleanupOnError()
- return err
- }
- static.appendable[name] = sf
- return nil
- })
- }
-
if err := g.Wait(); err != nil {
static.CleanupOnError()
return fmt.Errorf("domain collate-build: %w", err)
@@ -773,7 +745,41 @@ Loop:
return nil
}
-func (a *Aggregator) mergeLoopStep(ctx context.Context) (somethingDone bool, err error) {
+// [from, to)
+func (a *Aggregator) BuildFiles2(ctx context.Context, fromStep, toStep uint64) error {
+ if ok := a.buildingFiles.CompareAndSwap(false, true); !ok {
+ return nil
+ }
+ go func() {
+ defer a.buildingFiles.Store(false)
+ if toStep > fromStep {
+ log.Info("[agg] build", "fromStep", fromStep, "toStep", toStep)
+ }
+ for step := fromStep; step < toStep; step++ { //`step` must be fully-written - means `step+1` records must be visible
+ if err := a.buildFiles(ctx, step); err != nil {
+ if errors.Is(err, context.Canceled) || errors.Is(err, common2.ErrStopped) {
+ panic(err)
+ }
+ a.logger.Warn("[snapshots] buildFilesInBackground", "err", err)
+ panic(err)
+ }
+ }
+
+ if ok := a.mergingFiles.CompareAndSwap(false, true); !ok {
+ return
+ }
+ go func() {
+ defer a.mergingFiles.Store(false)
+ if err := a.MergeLoop(ctx); err != nil {
+ panic(err)
+ }
+ }()
+ }()
+
+ return nil
+}
+
+func (a *Aggregator) mergeLoopStep(ctx context.Context, toTxNum uint64) (somethingDone bool, err error) {
a.logger.Debug("[agg] merge", "collate_workers", a.collateAndBuildWorkers, "merge_workers", a.mergeWorkers, "compress_workers", a.d[kv.AccountsDomain].compressCfg.Workers)
aggTx := a.BeginFilesRo()
@@ -783,7 +789,7 @@ func (a *Aggregator) mergeLoopStep(ctx context.Context) (somethingDone bool, err
closeAll := true
maxSpan := StepsInColdFile * a.StepSize()
- r := aggTx.findMergeRange(a.visibleFilesMinimaxTxNum.Load(), maxSpan)
+ r := aggTx.findMergeRange(toTxNum, maxSpan)
if !r.any() {
return false, nil
}
@@ -818,7 +824,7 @@ func (a *Aggregator) mergeLoopStep(ctx context.Context) (somethingDone bool, err
func (a *Aggregator) MergeLoop(ctx context.Context) error {
for {
- somethingMerged, err := a.mergeLoopStep(ctx)
+ somethingMerged, err := a.mergeLoopStep(ctx, a.visibleFilesMinimaxTxNum.Load())
if err != nil {
return err
}
@@ -1083,9 +1089,8 @@ func (a *Aggregator) StepsRangeInDBAsStr(tx kv.Tx) string {
}
type AggregatorPruneStat struct {
- Domains map[string]*DomainPruneStat
- Indices map[string]*InvertedIndexPruneStat
- Appendable map[string]*AppendablePruneStat
+ Domains map[string]*DomainPruneStat
+ Indices map[string]*InvertedIndexPruneStat
}
func (as *AggregatorPruneStat) PrunedNothing() bool {
@@ -1103,7 +1108,7 @@ func (as *AggregatorPruneStat) PrunedNothing() bool {
}
func newAggregatorPruneStat() *AggregatorPruneStat {
- return &AggregatorPruneStat{Domains: make(map[string]*DomainPruneStat), Indices: make(map[string]*InvertedIndexPruneStat), Appendable: make(map[string]*AppendablePruneStat)}
+ return &AggregatorPruneStat{Domains: make(map[string]*DomainPruneStat), Indices: make(map[string]*InvertedIndexPruneStat)}
}
func (as *AggregatorPruneStat) String() string {
@@ -1235,14 +1240,6 @@ func (ac *AggregatorRoTx) Prune(ctx context.Context, tx kv.RwTx, limit uint64, l
aggStat.Indices[ac.iis[i].ii.filenameBase] = stats[i]
}
- for i := 0; i < int(kv.AppendableLen); i++ {
- var err error
- aggStat.Appendable[ac.appendable[i].ap.filenameBase], err = ac.appendable[i].Prune(ctx, tx, txFrom, txTo, limit, logEvery, false, nil)
- if err != nil {
- return nil, err
- }
- }
-
return aggStat, nil
}
@@ -1370,12 +1367,6 @@ func (a *Aggregator) recalcVisibleFiles(toTxNum uint64) {
}
ii.reCalcVisibleFiles(toTxNum)
}
- for _, ap := range a.ap {
- if ap == nil {
- continue
- }
- ap.reCalcVisibleFiles(toTxNum)
- }
}
func (a *Aggregator) recalcVisibleFilesMinimaxTxNum() {
@@ -1387,7 +1378,6 @@ func (a *Aggregator) recalcVisibleFilesMinimaxTxNum() {
type RangesV3 struct {
domain [kv.DomainLen]DomainRanges
invertedIndex [kv.StandaloneIdxLen]*MergeRange
- appendable [kv.AppendableLen]*MergeRange
}
func (r RangesV3) String() string {
@@ -1404,11 +1394,6 @@ func (r RangesV3) String() string {
ss = append(ss, mr.String(kv.InvertedIdxPos(p).String(), aggStep))
}
}
- for p, mr := range r.appendable {
- if mr != nil && mr.needMerge {
- ss = append(ss, mr.String(kv.Appendable(p).String(), aggStep))
- }
- }
return strings.Join(ss, ", ")
}
@@ -1423,11 +1408,6 @@ func (r RangesV3) any() bool {
return true
}
}
- for _, ap := range r.appendable {
- if ap != nil && ap.needMerge {
- return true
- }
- }
return false
}
@@ -1482,9 +1462,7 @@ func (ac *AggregatorRoTx) findMergeRange(maxEndTxNum, maxSpan uint64) RangesV3 {
for id, ii := range ac.iis {
r.invertedIndex[id] = ii.findMergeRange(maxEndTxNum, maxSpan)
}
- for id, ap := range ac.appendable {
- r.appendable[id] = ap.findMergeRange(maxEndTxNum, maxSpan)
- }
+
//log.Info(fmt.Sprintf("findMergeRange(%d, %d)=%s\n", maxEndTxNum/ac.a.aggregationStep, maxSpan/ac.a.aggregationStep, r))
return r
}
@@ -1561,19 +1539,6 @@ func (ac *AggregatorRoTx) mergeFiles(ctx context.Context, files SelectedStaticFi
})
}
- for id, rng := range r.appendable {
- if !rng.needMerge {
- continue
- }
- id := id
- rng := rng
- g.Go(func() error {
- var err error
- mf.appendable[id], err = ac.appendable[id].mergeFiles(ctx, files.appendable[id], rng.from, rng.to, ac.a.ps)
- return err
- })
- }
-
err := g.Wait()
if err == nil {
closeFiles = false
@@ -1596,9 +1561,6 @@ func (a *Aggregator) integrateMergedDirtyFiles(outs SelectedStaticFilesV3, in Me
ii.integrateMergedDirtyFiles(outs.ii[id], in.iis[id])
}
- for id, ap := range a.ap {
- ap.integrateMergedDirtyFiles(outs.appendable[id], in.appendable[id])
- }
}
func (a *Aggregator) cleanAfterMerge(in MergedFilesV3) {
@@ -1614,9 +1576,6 @@ func (a *Aggregator) cleanAfterMerge(in MergedFilesV3) {
for id, ii := range at.iis {
ii.cleanAfterMerge(in.iis[id])
}
- for id, ap := range at.appendable {
- ap.cleanAfterMerge(in.appendable[id])
- }
}
// KeepRecentTxnsOfHistoriesWithDisabledSnapshots limits amount of recent transactions protected from prune in domains history.
@@ -1738,6 +1697,8 @@ func (ac *AggregatorRoTx) IndexRange(name kv.InvertedIdx, k []byte, fromTs, toTs
return ac.d[kv.CodeDomain].ht.IdxRange(k, fromTs, toTs, asc, limit, tx)
case kv.CommitmentHistoryIdx:
return ac.d[kv.StorageDomain].ht.IdxRange(k, fromTs, toTs, asc, limit, tx)
+ case kv.ReceiptHistoryIdx:
+ return ac.d[kv.ReceiptDomain].ht.IdxRange(k, fromTs, toTs, asc, limit, tx)
//case kv.GasUsedHistoryIdx:
// return ac.d[kv.GasUsedDomain].ht.IdxRange(k, fromTs, toTs, asc, limit, tx)
case kv.LogTopicIdx:
@@ -1772,6 +1733,8 @@ func (ac *AggregatorRoTx) HistorySeek(name kv.History, key []byte, ts uint64, tx
return ac.d[kv.CodeDomain].ht.HistorySeek(key, ts, tx)
case kv.CommitmentHistory:
return ac.d[kv.CommitmentDomain].ht.HistorySeek(key, ts, tx)
+ case kv.ReceiptHistory:
+ return ac.d[kv.ReceiptDomain].ht.HistorySeek(key, ts, tx)
//case kv.GasUsedHistory:
// return ac.d[kv.GasUsedDomain].ht.HistorySeek(key, ts, tx)
default:
@@ -1808,10 +1771,9 @@ func (ac *AggregatorRoTx) HistoryRange(name kv.History, fromTs, toTs int, asc or
// - user will not see "partial writes" or "new files appearance"
// - last reader removing garbage files inside `Close` method
type AggregatorRoTx struct {
- a *Aggregator
- d [kv.DomainLen]*DomainRoTx
- iis [kv.StandaloneIdxLen]*InvertedIndexRoTx
- appendable [kv.AppendableLen]*AppendableRoTx
+ a *Aggregator
+ d [kv.DomainLen]*DomainRoTx
+ iis [kv.StandaloneIdxLen]*InvertedIndexRoTx
id uint64 // auto-increment id of ctx for logs
_leakID uint64 // set only if TRACE_AGG=true
@@ -1831,9 +1793,6 @@ func (a *Aggregator) BeginFilesRo() *AggregatorRoTx {
for id, d := range a.d {
ac.d[id] = d.BeginFilesRo()
}
- for id, ap := range a.ap {
- ac.appendable[id] = ap.BeginFilesRo()
- }
a.visibleFilesLock.RUnlock()
return ac
@@ -1850,8 +1809,7 @@ func (ac *AggregatorRoTx) DomainRangeLatest(tx kv.Tx, domain kv.Domain, from, to
}
func (ac *AggregatorRoTx) DomainGetAsOf(tx kv.Tx, name kv.Domain, key []byte, ts uint64) (v []byte, ok bool, err error) {
- v, err = ac.d[name].GetAsOf(key, ts, tx)
- return v, v != nil, err
+ return ac.d[name].GetAsOf(key, ts, tx)
}
func (ac *AggregatorRoTx) GetLatest(domain kv.Domain, k, k2 []byte, tx kv.Tx) (v []byte, step uint64, ok bool, err error) {
return ac.d[domain].GetLatest(k, k2, tx)
@@ -1894,6 +1852,11 @@ func (ac *AggregatorRoTx) DebugEFAllValuesAreInRange(ctx context.Context, name k
if err != nil {
return err
}
+ case kv.ReceiptHistoryIdx:
+ err := ac.d[kv.ReceiptDomain].ht.iit.DebugEFAllValuesAreInRange(ctx, failFast, fromStep)
+ if err != nil {
+ return err
+ }
//case kv.GasUsedHistoryIdx:
// err := ac.d[kv.GasUsedDomain].ht.iit.DebugEFAllValuesAreInRange(ctx)
// if err != nil {
@@ -1927,14 +1890,6 @@ func (ac *AggregatorRoTx) DebugEFAllValuesAreInRange(ctx context.Context, name k
// --- Domain part END ---
-func (ac *AggregatorRoTx) AppendableGet(name kv.Appendable, ts kv.TxnId, tx kv.Tx) (v []byte, ok bool, err error) {
- return ac.appendable[name].Get(ts, tx)
-}
-
-func (ac *AggregatorRoTx) AppendablePut(name kv.Appendable, txnID kv.TxnId, v []byte, tx kv.RwTx) (err error) {
- return ac.appendable[name].Append(txnID, v, tx)
-}
-
func (ac *AggregatorRoTx) Close() {
if ac == nil || ac.a == nil { // invariant: it's safe to call Close multiple times
return
@@ -1950,9 +1905,6 @@ func (ac *AggregatorRoTx) Close() {
for _, ii := range ac.iis {
ii.Close()
}
- for _, ap := range ac.appendable {
- ap.Close()
- }
}
// Inverted index tables only
diff --git a/erigon-lib/state/aggregator_bench_test.go b/erigon-lib/state/aggregator_bench_test.go
index 7c1be6f6f4b..894563f9eef 100644
--- a/erigon-lib/state/aggregator_bench_test.go
+++ b/erigon-lib/state/aggregator_bench_test.go
@@ -48,7 +48,7 @@ func testDbAndAggregatorBench(b *testing.B, aggStep uint64) (kv.RwDB, *Aggregato
return kv.ChaindataTablesCfg
}).MustOpen()
b.Cleanup(db.Close)
- agg, err := NewAggregator(context.Background(), dirs, aggStep, db, nil, logger)
+ agg, err := NewAggregator(context.Background(), dirs, aggStep, db, logger)
require.NoError(b, err)
b.Cleanup(agg.Close)
return db, agg
diff --git a/erigon-lib/state/aggregator_files.go b/erigon-lib/state/aggregator_files.go
index 9b5ade8b7fb..679b0dae6b6 100644
--- a/erigon-lib/state/aggregator_files.go
+++ b/erigon-lib/state/aggregator_files.go
@@ -27,11 +27,10 @@ import (
)
type SelectedStaticFilesV3 struct {
- d [kv.DomainLen][]*filesItem
- dHist [kv.DomainLen][]*filesItem
- dIdx [kv.DomainLen][]*filesItem
- ii [kv.StandaloneIdxLen][]*filesItem
- appendable [kv.AppendableLen][]*filesItem
+ d [kv.DomainLen][]*filesItem
+ dHist [kv.DomainLen][]*filesItem
+ dIdx [kv.DomainLen][]*filesItem
+ ii [kv.StandaloneIdxLen][]*filesItem
}
func (sf SelectedStaticFilesV3) Close() {
@@ -70,21 +69,14 @@ func (ac *AggregatorRoTx) staticFilesInRange(r RangesV3) (sf SelectedStaticFiles
}
sf.ii[id] = ac.iis[id].staticFilesInRange(rng.from, rng.to)
}
- for id, rng := range r.appendable {
- if rng == nil || !rng.needMerge {
- continue
- }
- sf.appendable[id] = ac.appendable[id].staticFilesInRange(rng.from, rng.to)
- }
return sf, err
}
type MergedFilesV3 struct {
- d [kv.DomainLen]*filesItem
- dHist [kv.DomainLen]*filesItem
- dIdx [kv.DomainLen]*filesItem
- iis [kv.StandaloneIdxLen]*filesItem
- appendable [kv.AppendableLen]*filesItem
+ d [kv.DomainLen]*filesItem
+ dHist [kv.DomainLen]*filesItem
+ dIdx [kv.DomainLen]*filesItem
+ iis [kv.StandaloneIdxLen]*filesItem
}
func (mf MergedFilesV3) FrozenList() (frozen []string) {
diff --git a/erigon-lib/state/aggregator_test.go b/erigon-lib/state/aggregator_test.go
index 54aa5861886..f2e1d3c7a9c 100644
--- a/erigon-lib/state/aggregator_test.go
+++ b/erigon-lib/state/aggregator_test.go
@@ -34,10 +34,6 @@ import (
"github.com/erigontech/erigon-lib/common/background"
"github.com/c2h5oh/datasize"
- "github.com/holiman/uint256"
- "github.com/stretchr/testify/require"
- "go.uber.org/mock/gomock"
-
"github.com/erigontech/erigon-lib/common"
"github.com/erigontech/erigon-lib/common/datadir"
"github.com/erigontech/erigon-lib/common/length"
@@ -50,6 +46,8 @@ import (
"github.com/erigontech/erigon-lib/log/v3"
"github.com/erigontech/erigon-lib/seg"
"github.com/erigontech/erigon-lib/types"
+ "github.com/holiman/uint256"
+ "github.com/stretchr/testify/require"
)
func TestAggregatorV3_Merge(t *testing.T) {
@@ -364,15 +362,8 @@ func aggregatorV3_RestartOnDatadir(t *testing.T, rc runCfg) {
agg.Close()
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
- canonicalsReader := NewMockCanonicalsReader(ctrl)
- canonicalsReader.EXPECT().TxnIdsOfCanonicalBlocks(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
- Return(stream.EmptyU64, nil).
- AnyTimes()
-
// Start another aggregator on same datadir
- anotherAgg, err := NewAggregator(context.Background(), agg.dirs, aggStep, db, canonicalsReader, logger)
+ anotherAgg, err := NewAggregator(context.Background(), agg.dirs, aggStep, db, logger)
require.NoError(t, err)
defer anotherAgg.Close()
@@ -835,18 +826,7 @@ func TestAggregatorV3_RestartOnFiles(t *testing.T) {
}).MustOpen()
t.Cleanup(newDb.Close)
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
- canonicalsReader := NewMockCanonicalsReader(ctrl)
- canonicalsReader.EXPECT().TxnIdsOfCanonicalBlocks(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
- DoAndReturn(func(tx kv.Tx, txFrom, txTo int, by order.By, i3 int) (stream.U64, error) {
- currentStep := uint64(txFrom) / aggStep
- canonicalBlockTxNum := aggStep*currentStep + 1
- it := stream.Array[uint64]([]uint64{canonicalBlockTxNum})
- return it, nil
- }).
- AnyTimes()
- newAgg, err := NewAggregator(context.Background(), agg.dirs, aggStep, newDb, canonicalsReader, logger)
+ newAgg, err := NewAggregator(context.Background(), agg.dirs, aggStep, newDb, logger)
require.NoError(t, err)
require.NoError(t, newAgg.OpenFolder())
@@ -1116,14 +1096,7 @@ func testDbAndAggregatorv3(t *testing.T, aggStep uint64) (kv.RwDB, *Aggregator)
}).MustOpen()
t.Cleanup(db.Close)
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
- canonicalsReader := NewMockCanonicalsReader(ctrl)
- canonicalsReader.EXPECT().TxnIdsOfCanonicalBlocks(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
- Return(stream.EmptyU64, nil).
- AnyTimes()
-
- agg, err := NewAggregator(context.Background(), dirs, aggStep, db, canonicalsReader, logger)
+ agg, err := NewAggregator(context.Background(), dirs, aggStep, db, logger)
require.NoError(err)
t.Cleanup(agg.Close)
err = agg.OpenFolder()
diff --git a/erigon-lib/state/appendable.go b/erigon-lib/state/appendable.go
deleted file mode 100644
index 9fa11665421..00000000000
--- a/erigon-lib/state/appendable.go
+++ /dev/null
@@ -1,828 +0,0 @@
-// Copyright 2022 The Erigon Authors
-// This file is part of Erigon.
-//
-// Erigon is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// Erigon is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with Erigon. If not, see .
-
-package state
-
-import (
- "context"
- "encoding/binary"
- "errors"
- "fmt"
- "math"
- "path"
- "path/filepath"
- "reflect"
- "regexp"
- "strconv"
- "sync"
- "time"
-
- "github.com/erigontech/erigon-lib/common"
-
- btree2 "github.com/tidwall/btree"
- "golang.org/x/sync/errgroup"
-
- "github.com/erigontech/erigon-lib/common/assert"
- "github.com/erigontech/erigon-lib/common/hexutility"
- "github.com/erigontech/erigon-lib/kv/order"
- "github.com/erigontech/erigon-lib/log/v3"
- "github.com/erigontech/erigon-lib/seg"
-
- "github.com/erigontech/erigon-lib/common/background"
- "github.com/erigontech/erigon-lib/common/datadir"
- "github.com/erigontech/erigon-lib/common/dir"
- "github.com/erigontech/erigon-lib/etl"
- "github.com/erigontech/erigon-lib/kv"
- "github.com/erigontech/erigon-lib/recsplit"
-)
-
-// Appendable - data type allows store data for different blockchain forks.
-// - It assign new AutoIncrementID to each entity. Example: receipts, logs.
-// - Each record key has `AutoIncrementID` format.
-// - Use external table to refer it.
-// - Only data which belongs to `canonical` block moving from DB to files.
-// - It doesn't need Unwind - because `AutoIncrementID` always-growing
-type Appendable struct {
- cfg AppendableCfg
-
- // dirtyFiles - list of ALL files - including: un-indexed-yet, garbage, merged-into-bigger-one, ...
- // thread-safe, but maybe need 1 RWLock for all trees in Aggregator
- //
- // _visibleFiles derivative from field `file`, but without garbage:
- // - no files with `canDelete=true`
- // - no overlaps
- // - no un-indexed files (`power-off` may happen between .ef and .efi creation)
- //
- // BeginRo() using _visibleFiles in zero-copy way
- dirtyFiles *btree2.BTreeG[*filesItem]
-
- // _visibleFiles - underscore in name means: don't use this field directly, use BeginFilesRo()
- // underlying array is immutable - means it's ready for zero-copy use
- _visibleFiles []visibleFile
-
- table string // txnNum_u64 -> key (k+auto_increment)
- filenameBase string
- aggregationStep uint64
-
- //TODO: re-visit this check - maybe we don't need it. It's abot kill in the middle of merge
- integrityCheck func(fromStep, toStep uint64) bool
-
- // fields for history write
- logger log.Logger
-
- noFsync bool // fsync is enabled by default, but tests can manually disable
-
- compressCfg seg.Cfg
- compression seg.FileCompression
- indexList idxList
-}
-
-type AppendableCfg struct {
- Salt *uint32
- Dirs datadir.Dirs
- DB kv.RoDB // global db pointer. mostly for background warmup.
-
- iters CanonicalsReader
-}
-
-func NewAppendable(cfg AppendableCfg, aggregationStep uint64, filenameBase, table string, integrityCheck func(fromStep uint64, toStep uint64) bool, logger log.Logger) (*Appendable, error) {
- if cfg.Dirs.SnapHistory == "" {
- panic("empty `dirs` varialbe")
- }
- compressCfg := seg.DefaultCfg
- compressCfg.Workers = 1
- ap := Appendable{
- cfg: cfg,
- dirtyFiles: btree2.NewBTreeGOptions[*filesItem](filesItemLess, btree2.Options{Degree: 128, NoLocks: false}),
- aggregationStep: aggregationStep,
- filenameBase: filenameBase,
- table: table,
- compressCfg: compressCfg,
- compression: seg.CompressNone, //CompressKeys | CompressVals,
-
- integrityCheck: integrityCheck,
- logger: logger,
- }
- ap.indexList = withHashMap
- ap._visibleFiles = []visibleFile{}
-
- return &ap, nil
-}
-
-func (ap *Appendable) accessorFilePath(fromStep, toStep uint64) string {
- return filepath.Join(ap.cfg.Dirs.SnapAccessors, fmt.Sprintf("v1-%s.%d-%d.api", ap.filenameBase, fromStep, toStep))
-}
-func (ap *Appendable) apFilePath(fromStep, toStep uint64) string {
- return filepath.Join(ap.cfg.Dirs.SnapHistory, fmt.Sprintf("v1-%s.%d-%d.ap", ap.filenameBase, fromStep, toStep))
-}
-
-func (ap *Appendable) fileNamesOnDisk() ([]string, error) {
- return filesFromDir(ap.cfg.Dirs.SnapHistory)
-}
-
-func (ap *Appendable) openList(fNames []string, readonly bool) error {
- ap.closeWhatNotInList(fNames)
- ap.scanDirtyFiles(fNames)
- if err := ap.openDirtyFiles(); err != nil {
- return fmt.Errorf("NewHistory.openDirtyFiles: %w, %s", err, ap.filenameBase)
- }
- _ = readonly // for future safety features. RPCDaemon must not delte files
- return nil
-}
-
-func (ap *Appendable) openFolder(readonly bool) error {
- files, err := ap.fileNamesOnDisk()
- if err != nil {
- return err
- }
- return ap.openList(files, readonly)
-}
-
-func (ap *Appendable) scanDirtyFiles(fileNames []string) (garbageFiles []*filesItem) {
- re := regexp.MustCompile("^v([0-9]+)-" + ap.filenameBase + ".([0-9]+)-([0-9]+).ap$")
- var err error
- for _, name := range fileNames {
- subs := re.FindStringSubmatch(name)
- if len(subs) != 4 {
- if len(subs) != 0 {
- ap.logger.Warn("File ignored by inverted index scan, more than 3 submatches", "name", name, "submatches", len(subs))
- }
- continue
- }
- var startStep, endStep uint64
- if startStep, err = strconv.ParseUint(subs[2], 10, 64); err != nil {
- ap.logger.Warn("File ignored by inverted index scan, parsing startTxNum", "error", err, "name", name)
- continue
- }
- if endStep, err = strconv.ParseUint(subs[3], 10, 64); err != nil {
- ap.logger.Warn("File ignored by inverted index scan, parsing endTxNum", "error", err, "name", name)
- continue
- }
- if startStep > endStep {
- ap.logger.Warn("File ignored by inverted index scan, startTxNum > endTxNum", "name", name)
- continue
- }
-
- startTxNum, endTxNum := startStep*ap.aggregationStep, endStep*ap.aggregationStep
- var newFile = newFilesItem(startTxNum, endTxNum, ap.aggregationStep)
-
- if ap.integrityCheck != nil && !ap.integrityCheck(startStep, endStep) {
- continue
- }
-
- if _, has := ap.dirtyFiles.Get(newFile); has {
- continue
- }
-
- ap.dirtyFiles.Set(newFile)
- }
- return garbageFiles
-}
-
-func (ap *Appendable) reCalcVisibleFiles(toTxNum uint64) {
- ap._visibleFiles = calcVisibleFiles(ap.dirtyFiles, ap.indexList, false, toTxNum)
-}
-
-func (ap *Appendable) missedAccessors() (l []*filesItem) {
- ap.dirtyFiles.Walk(func(items []*filesItem) bool {
- for _, item := range items {
- fromStep, toStep := item.startTxNum/ap.aggregationStep, item.endTxNum/ap.aggregationStep
- exists, err := dir.FileExist(ap.accessorFilePath(fromStep, toStep))
- if err != nil {
- panic(err)
- }
- if !exists {
- l = append(l, item)
- }
- }
- return true
- })
- return l
-}
-
-func (ap *Appendable) buildAccessor(ctx context.Context, fromStep, toStep uint64, d *seg.Decompressor, ps *background.ProgressSet) error {
- if d == nil {
- return fmt.Errorf("buildAccessor: passed item with nil decompressor %s %d-%d", ap.filenameBase, fromStep, toStep)
- }
- idxPath := ap.accessorFilePath(fromStep, toStep)
- cfg := recsplit.RecSplitArgs{
- Enums: true,
-
- BucketSize: 2000,
- LeafSize: 8,
- TmpDir: ap.cfg.Dirs.Tmp,
- IndexFile: idxPath,
- Salt: ap.cfg.Salt,
- NoFsync: ap.noFsync,
-
- KeyCount: d.Count(),
- }
- _, fileName := filepath.Split(idxPath)
- count := d.Count()
- p := ps.AddNew(fileName, uint64(count))
- defer ps.Delete(p)
-
- num := make([]byte, binary.MaxVarintLen64)
- return buildSimpleMapAccessor(ctx, d, ap.compression, cfg, ap.logger, func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error {
- if p != nil {
- p.Processed.Add(1)
- }
- n := binary.PutUvarint(num, i)
- if err := idx.AddKey(num[:n], offset); err != nil {
- return err
- }
- return nil
- })
-}
-
-func (ap *Appendable) BuildMissedAccessors(ctx context.Context, g *errgroup.Group, ps *background.ProgressSet) {
- for _, item := range ap.missedAccessors() {
- item := item
- g.Go(func() error {
- fromStep, toStep := item.startTxNum/ap.aggregationStep, item.endTxNum/ap.aggregationStep
- return ap.buildAccessor(ctx, fromStep, toStep, item.decompressor, ps)
- })
- }
-}
-
-func (ap *Appendable) openDirtyFiles() error {
- var invalidFileItems []*filesItem
- invalidFileItemsLock := sync.Mutex{}
- ap.dirtyFiles.Walk(func(items []*filesItem) bool {
- for _, item := range items {
- item := item
- fromStep, toStep := item.startTxNum/ap.aggregationStep, item.endTxNum/ap.aggregationStep
- if item.decompressor == nil {
- fPath := ap.apFilePath(fromStep, toStep)
- exists, err := dir.FileExist(fPath)
- if err != nil {
- _, fName := filepath.Split(fPath)
- ap.logger.Debug("[agg] Appendable.openDirtyFiles", "err", err, "f", fName)
- invalidFileItemsLock.Lock()
- invalidFileItems = append(invalidFileItems, item)
- invalidFileItemsLock.Unlock()
- continue
- }
- if !exists {
- _, fName := filepath.Split(fPath)
- ap.logger.Debug("[agg] Appendable.openDirtyFiles: file does not exists", "f", fName)
- invalidFileItemsLock.Lock()
- invalidFileItems = append(invalidFileItems, item)
- invalidFileItemsLock.Unlock()
- continue
- }
-
- if item.decompressor, err = seg.NewDecompressor(fPath); err != nil {
- _, fName := filepath.Split(fPath)
- if errors.Is(err, &seg.ErrCompressedFileCorrupted{}) {
- ap.logger.Debug("[agg] Appendable.openDirtyFiles", "err", err, "f", fName)
- } else {
- ap.logger.Warn("[agg] Appendable.openDirtyFiles", "err", err, "f", fName)
- }
- invalidFileItemsLock.Lock()
- invalidFileItems = append(invalidFileItems, item)
- invalidFileItemsLock.Unlock()
- // don't interrupt on error. other files may be good. but skip indices open.
- continue
- }
- }
-
- if item.index == nil {
- fPath := ap.accessorFilePath(fromStep, toStep)
- exists, err := dir.FileExist(fPath)
- if err != nil {
- _, fName := filepath.Split(fPath)
- ap.logger.Warn("[agg] Appendable.openDirtyFiles", "err", err, "f", fName)
- }
- if exists {
- if item.index, err = recsplit.OpenIndex(fPath); err != nil {
- _, fName := filepath.Split(fPath)
- ap.logger.Warn("[agg] Appendable.openDirtyFiles", "err", err, "f", fName)
- // don't interrupt on error. other files may be good
- }
- }
- }
- }
-
- return true
- })
- for _, item := range invalidFileItems {
- item.closeFiles()
- ap.dirtyFiles.Delete(item)
- }
-
- return nil
-}
-
-func (ap *Appendable) closeWhatNotInList(fNames []string) {
- var toClose []*filesItem
- ap.dirtyFiles.Walk(func(items []*filesItem) bool {
- Loop1:
- for _, item := range items {
- for _, protectName := range fNames {
- if item.decompressor != nil && item.decompressor.FileName() == protectName {
- continue Loop1
- }
- }
- toClose = append(toClose, item)
- }
- return true
- })
- for _, item := range toClose {
- item.closeFiles()
- ap.dirtyFiles.Delete(item)
- }
-}
-
-func (ap *Appendable) Close() {
- if ap == nil {
- return
- }
- ap.closeWhatNotInList([]string{})
-}
-
-// DisableFsync - just for tests
-func (ap *Appendable) DisableFsync() { ap.noFsync = true }
-
-func (tx *AppendableRoTx) Files() (res []string) {
- for _, item := range tx.files {
- if item.src.decompressor != nil {
- res = append(res, item.src.decompressor.FileName())
- }
- }
- return res
-}
-
-func (tx *AppendableRoTx) Get(txnID kv.TxnId, dbtx kv.Tx) (v []byte, ok bool, err error) {
- v, ok = tx.getFromFiles(uint64(txnID))
- if ok {
- return v, true, nil
- }
- return tx.ap.getFromDBByTs(uint64(txnID), dbtx)
-}
-func (tx *AppendableRoTx) Append(txnID kv.TxnId, v []byte, dbtx kv.RwTx) error {
- return dbtx.Put(tx.ap.table, hexutility.EncodeTs(uint64(txnID)), v)
-}
-
-func (tx *AppendableRoTx) getFromFiles(ts uint64) (v []byte, ok bool) {
- i, ok := tx.fileByTS(ts)
- if !ok {
- return nil, false
- }
-
- baseTxNum := tx.files[i].startTxNum // we are very lucky: each txNum has 1 appendable
- lookup := ts - baseTxNum
- accessor := tx.files[i].src.index
- if accessor.KeyCount() <= lookup {
- return nil, false
- }
- offset := accessor.OrdinalLookup(lookup)
- g := tx.statelessGetter(i)
- g.Reset(offset)
- k, _ := g.Next(nil)
- return k, true
-}
-
-func (tx *AppendableRoTx) fileByTS(ts uint64) (i int, ok bool) {
- for i = 0; i < len(tx.files); i++ {
- if tx.files[i].hasTS(ts) {
- return i, true
- }
- }
- return 0, false
-}
-
-func (ap *Appendable) getFromDBByTs(ts uint64, dbtx kv.Tx) ([]byte, bool, error) {
- return ap.getFromDB(hexutility.EncodeTs(ts), dbtx)
-}
-func (ap *Appendable) getFromDB(k []byte, dbtx kv.Tx) ([]byte, bool, error) {
- v, err := dbtx.GetOne(ap.table, k)
- if err != nil {
- return nil, false, err
- }
- return v, v != nil, err
-}
-
-func (ap *Appendable) maxTxNumInDB(dbtx kv.Tx) (txNum uint64, err error) { //nolint
- first, err := kv.LastKey(dbtx, ap.table)
- if err != nil {
- return 0, err
- }
- if len(first) == 0 {
- return 0, nil
- }
- return binary.BigEndian.Uint64(first), nil
-}
-
-// Add - !NotThreadSafe. Must use WalRLock/BatchHistoryWriteEnd
-func (w *appendableBufferedWriter) Append(ts kv.TxnId, v []byte) error {
- if w.discard {
- return nil
- }
- if err := w.tableCollector.Collect(hexutility.EncodeTs(uint64(ts)), v); err != nil {
- return err
- }
- return nil
-}
-
-func (tx *AppendableRoTx) NewWriter() *appendableBufferedWriter {
- return tx.newWriter(tx.ap.cfg.Dirs.Tmp, false)
-}
-
-type appendableBufferedWriter struct {
- tableCollector *etl.Collector
- tmpdir string
- discard bool
- filenameBase string
-
- table string
-
- aggregationStep uint64
-}
-
-func (w *appendableBufferedWriter) Flush(ctx context.Context, tx kv.RwTx) error {
- if w.discard {
- return nil
- }
- if err := w.tableCollector.Load(tx, w.table, loadFunc, etl.TransformArgs{Quit: ctx.Done()}); err != nil {
- return err
- }
- w.close()
- return nil
-}
-
-func (w *appendableBufferedWriter) close() {
- if w == nil {
- return
- }
- if w.tableCollector != nil {
- w.tableCollector.Close()
- }
-}
-
-func (tx *AppendableRoTx) newWriter(tmpdir string, discard bool) *appendableBufferedWriter {
- w := &appendableBufferedWriter{
- discard: discard,
- tmpdir: tmpdir,
- filenameBase: tx.ap.filenameBase,
- aggregationStep: tx.ap.aggregationStep,
-
- table: tx.ap.table,
- // etl collector doesn't fsync: means if have enough ram, all files produced by all collectors will be in ram
- tableCollector: etl.NewCollector("flush "+tx.ap.table, tmpdir, etl.NewSortableBuffer(WALCollectorRAM), tx.ap.logger),
- }
- w.tableCollector.LogLvl(log.LvlTrace)
- w.tableCollector.SortAndFlushInBackground(true)
- return w
-}
-
-func (ap *Appendable) BeginFilesRo() *AppendableRoTx {
- files := ap._visibleFiles
- for i := 0; i < len(files); i++ {
- if !files[i].src.frozen {
- files[i].src.refcount.Add(1)
- }
- }
- return &AppendableRoTx{
- ap: ap,
- files: files,
- }
-}
-
-func (tx *AppendableRoTx) Close() {
- if tx.files == nil { // invariant: it's safe to call Close multiple times
- return
- }
- files := tx.files
- tx.files = nil
- for i := range files {
- src := files[i].src
- if src == nil || src.frozen {
- continue
- }
- refCnt := src.refcount.Add(-1)
- //GC: last reader responsible to remove useles files: close it and delete
- if refCnt == 0 && src.canDelete.Load() {
- if traceFileLife != "" && tx.ap.filenameBase == traceFileLife {
- tx.ap.logger.Warn("[agg.dbg] real remove at AppendableRoTx.Close", "file", src.decompressor.FileName())
- }
- src.closeFilesAndRemove()
- }
- }
-
- for _, r := range tx.readers {
- r.Close()
- }
-}
-
-type AppendableRoTx struct {
- ap *Appendable
- files visibleFiles // have no garbage (overlaps, etc...)
- getters []*seg.Reader
- readers []*recsplit.IndexReader
-}
-
-func (tx *AppendableRoTx) statelessGetter(i int) *seg.Reader {
- if tx.getters == nil {
- tx.getters = make([]*seg.Reader, len(tx.files))
- }
- r := tx.getters[i]
- if r == nil {
- g := tx.files[i].src.decompressor.MakeGetter()
- r = seg.NewReader(g, tx.ap.compression)
- tx.getters[i] = r
- }
- return r
-}
-
-func (tx *AppendableRoTx) mainTxNumInDB(dbtx kv.Tx) uint64 {
- fst, _ := kv.FirstKey(dbtx, tx.ap.table)
- if len(fst) > 0 {
- fstInDb := binary.BigEndian.Uint64(fst)
- return min(fstInDb, math.MaxUint64)
- }
- return math.MaxUint64
-}
-
-func (tx *AppendableRoTx) CanPrune(dbtx kv.Tx) bool {
- return tx.mainTxNumInDB(dbtx) < tx.files.EndTxNum()
-}
-func (tx *AppendableRoTx) canBuild(dbtx kv.Tx) (bool, error) { //nolint
- //TODO: support "keep in db" parameter
- //TODO: what if all files are pruned?
- maxTxNumInDB, err := tx.ap.maxTxNumInDB(dbtx)
- if err != nil {
- return false, err
- }
- maxStepInDB := maxTxNumInDB / tx.ap.aggregationStep
- maxStepInFiles := tx.files.EndTxNum() / tx.ap.aggregationStep
- return maxStepInFiles < maxStepInDB, nil
-}
-
-type AppendablePruneStat struct {
- MinTxNum uint64
- MaxTxNum uint64
- PruneCountTx uint64
-}
-
-func (is *AppendablePruneStat) String() string {
- if is.MinTxNum == math.MaxUint64 && is.PruneCountTx == 0 {
- return ""
- }
- return fmt.Sprintf("ap %d txs in %s-%s", is.PruneCountTx, common.PrettyCounter(is.MinTxNum), common.PrettyCounter(is.MaxTxNum))
-}
-
-func (is *AppendablePruneStat) Accumulate(other *AppendablePruneStat) {
- if other == nil {
- return
- }
- is.MinTxNum = min(is.MinTxNum, other.MinTxNum)
- is.MaxTxNum = max(is.MaxTxNum, other.MaxTxNum)
- is.PruneCountTx += other.PruneCountTx
-}
-
-// [txFrom; txTo)
-// forced - prune even if CanPrune returns false, so its true only when we do Unwind.
-func (tx *AppendableRoTx) Prune(ctx context.Context, rwTx kv.RwTx, txFrom, txTo, limit uint64, logEvery *time.Ticker, forced bool, fn func(key []byte, txnum []byte) error) (stat *AppendablePruneStat, err error) {
- stat = &AppendablePruneStat{MinTxNum: math.MaxUint64}
- if !forced && !tx.CanPrune(rwTx) {
- return stat, nil
- }
-
- mxPruneInProgress.Inc()
- defer mxPruneInProgress.Dec()
- defer func(t time.Time) { mxPruneTookIndex.ObserveDuration(t) }(time.Now())
-
- if limit == 0 {
- limit = math.MaxUint64
- }
-
- fromID, toID, ok, err := tx.txNum2id(rwTx, txFrom, txTo)
- if err != nil {
- return nil, err
- }
- if !ok {
- panic(ok)
- }
- // [from:to)
- r, err := rwTx.Range(tx.ap.table, hexutility.EncodeTs(fromID), hexutility.EncodeTs(toID))
- if err != nil {
- return nil, err
- }
- defer r.Close()
- for r.HasNext() {
- k, _, err := r.Next()
- if err != nil {
- return nil, err
- }
- limit--
- if limit == 0 {
- break
- }
- if err = rwTx.Delete(tx.ap.table, k); err != nil {
- return nil, err
- }
- }
-
- return stat, err
-}
-func (tx *AppendableRoTx) txNum2id(rwTx kv.RwTx, txFrom, txTo uint64) (fromID, toID uint64, ok bool, err error) {
- var found1, found2 bool
- it, err := tx.ap.cfg.iters.TxnIdsOfCanonicalBlocks(rwTx, int(txFrom), -1, order.Asc, 1)
- if err != nil {
- return fromID, toID, ok, err
- }
- defer it.Close()
- if it.HasNext() {
- fromID, err = it.Next()
- if err != nil {
- return fromID, toID, ok, err
- }
- found1 = true
- }
- it.Close()
-
- it, err = tx.ap.cfg.iters.TxnIdsOfCanonicalBlocks(rwTx, int(txTo), -1, order.Asc, 1)
- if err != nil {
- return fromID, toID, ok, err
- }
- defer it.Close()
- if it.HasNext() {
- toID, err = it.Next()
- if err != nil {
- return fromID, toID, ok, err
- }
- found2 = true
- }
-
- return fromID, toID, found1 && found2, nil
-}
-
-func (ap *Appendable) collate(ctx context.Context, step uint64, roTx kv.Tx) (AppendableCollation, error) {
- stepTo := step + 1
- txFrom, txTo := step*ap.aggregationStep, stepTo*ap.aggregationStep
- start := time.Now()
- defer mxCollateTookIndex.ObserveDuration(start)
-
- var (
- coll = AppendableCollation{
- iiPath: ap.apFilePath(step, stepTo),
- }
- closeComp bool
- )
- defer func() {
- if closeComp {
- coll.Close()
- }
- }()
-
- comp, err := seg.NewCompressor(ctx, "collate "+ap.filenameBase, coll.iiPath, ap.cfg.Dirs.Tmp, ap.compressCfg, log.LvlTrace, ap.logger)
- if err != nil {
- return coll, fmt.Errorf("create %s compressor: %w", ap.filenameBase, err)
- }
- coll.writer = seg.NewWriter(comp, ap.compression)
-
- it, err := ap.cfg.iters.TxnIdsOfCanonicalBlocks(roTx, int(txFrom), int(txTo), order.Asc, -1)
- if err != nil {
- return coll, fmt.Errorf("collate %s: %w", ap.filenameBase, err)
- }
- defer it.Close()
-
- for it.HasNext() {
- k, err := it.Next()
- if err != nil {
- return coll, fmt.Errorf("collate %s: %w", ap.filenameBase, err)
- }
- v, ok, err := ap.getFromDBByTs(k, roTx)
- if err != nil {
- return coll, fmt.Errorf("collate %s: %w", ap.filenameBase, err)
- }
- if !ok {
- continue
- }
- if err = coll.writer.AddWord(v); err != nil {
- return coll, fmt.Errorf("collate %s: %w", ap.filenameBase, err)
- }
- }
-
- closeComp = false
- return coll, nil
-}
-
-func (ap *Appendable) stepsRangeInDB(tx kv.Tx) (from, to float64) {
- fst, _ := kv.FirstKey(tx, ap.table)
- if len(fst) > 0 {
- from = float64(binary.BigEndian.Uint64(fst)) / float64(ap.aggregationStep)
- }
- lst, _ := kv.LastKey(tx, ap.table)
- if len(lst) > 0 {
- to = float64(binary.BigEndian.Uint64(lst)) / float64(ap.aggregationStep)
- }
- if to == 0 {
- to = from
- }
- return from, to
-}
-
-type AppendableFiles struct {
- decomp *seg.Decompressor
- index *recsplit.Index
-}
-
-func (sf AppendableFiles) CleanupOnError() {
- if sf.decomp != nil {
- sf.decomp.Close()
- }
- if sf.index != nil {
- sf.index.Close()
- }
-}
-
-type AppendableCollation struct {
- iiPath string
- writer *seg.Writer
-}
-
-func (collation AppendableCollation) Close() {
- if collation.writer != nil {
- collation.writer.Close()
- collation.writer = nil //nolint
- }
-}
-
-// buildFiles - `step=N` means build file `[N:N+1)` which is equal to [N:N+1)
-func (ap *Appendable) buildFiles(ctx context.Context, step uint64, coll AppendableCollation, ps *background.ProgressSet) (AppendableFiles, error) {
- var (
- decomp *seg.Decompressor
- index *recsplit.Index
- err error
- )
- mxRunningFilesBuilding.Inc()
- defer mxRunningFilesBuilding.Dec()
- closeComp := true
- defer func() {
- if closeComp {
- coll.Close()
- if decomp != nil {
- decomp.Close()
- }
- if index != nil {
- index.Close()
- }
- }
- }()
-
- if assert.Enable {
- if coll.iiPath == "" && reflect.ValueOf(coll.writer).IsNil() {
- panic("assert: collation is not initialized " + ap.filenameBase)
- }
- }
-
- {
- p := ps.AddNew(path.Base(coll.iiPath), 1)
- if err = coll.writer.Compress(); err != nil {
- ps.Delete(p)
- return AppendableFiles{}, fmt.Errorf("compress %s: %w", ap.filenameBase, err)
- }
- coll.Close()
- ps.Delete(p)
- }
-
- if decomp, err = seg.NewDecompressor(coll.iiPath); err != nil {
- return AppendableFiles{}, fmt.Errorf("open %s decompressor: %w", ap.filenameBase, err)
- }
-
- if err := ap.buildAccessor(ctx, step, step+1, decomp, ps); err != nil {
- return AppendableFiles{}, fmt.Errorf("build %s api: %w", ap.filenameBase, err)
- }
- if index, err = recsplit.OpenIndex(ap.accessorFilePath(step, step+1)); err != nil {
- return AppendableFiles{}, err
- }
-
- closeComp = false
- return AppendableFiles{decomp: decomp, index: index}, nil
-}
-
-func (ap *Appendable) integrateDirtyFiles(sf AppendableFiles, txNumFrom, txNumTo uint64) {
- fi := newFilesItem(txNumFrom, txNumTo, ap.aggregationStep)
- fi.decompressor = sf.decomp
- fi.index = sf.index
- ap.dirtyFiles.Set(fi)
-}
-
-func (tx *AppendableRoTx) Unwind(ctx context.Context, rwTx kv.RwTx, txFrom, txTo, limit uint64, logEvery *time.Ticker, forced bool, fn func(key []byte, txnum []byte) error) error {
- return nil //Appendable type is unwind-less. See docs of Appendable type.
-}
diff --git a/erigon-lib/state/appendable_test.go b/erigon-lib/state/appendable_test.go
deleted file mode 100644
index 705ada71126..00000000000
--- a/erigon-lib/state/appendable_test.go
+++ /dev/null
@@ -1,366 +0,0 @@
-// Copyright 2022 The Erigon Authors
-// This file is part of Erigon.
-//
-// Erigon is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// Erigon is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with Erigon. If not, see .
-
-package state
-
-import (
- "context"
- "encoding/binary"
- "math"
- "os"
- "testing"
- "time"
-
- "go.uber.org/mock/gomock"
-
- "github.com/erigontech/erigon-lib/common/hexutility"
- "github.com/erigontech/erigon-lib/kv/order"
- "github.com/erigontech/erigon-lib/kv/stream"
- "github.com/erigontech/erigon-lib/log/v3"
-
- "github.com/stretchr/testify/require"
- btree2 "github.com/tidwall/btree"
-
- "github.com/erigontech/erigon-lib/common/background"
- "github.com/erigontech/erigon-lib/common/datadir"
- "github.com/erigontech/erigon-lib/kv"
- "github.com/erigontech/erigon-lib/kv/mdbx"
- "github.com/erigontech/erigon-lib/seg"
-)
-
-func testDbAndAppendable(tb testing.TB, aggStep uint64, logger log.Logger) (kv.RwDB, *Appendable) {
- tb.Helper()
- dirs := datadir.New(tb.TempDir())
- table := "Appendable"
- db := mdbx.NewMDBX(logger).InMem(dirs.Chaindata).WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg {
- return kv.TableCfg{
- table: kv.TableCfgItem{Flags: kv.DupSort},
- kv.TblPruningProgress: kv.TableCfgItem{},
- kv.HeaderCanonical: kv.TableCfgItem{},
- }
- }).MustOpen()
- tb.Cleanup(db.Close)
- salt := uint32(1)
- cfg := AppendableCfg{Salt: &salt, Dirs: dirs, DB: db}
- ii, err := NewAppendable(cfg, aggStep, "receipt", table, nil, logger)
- require.NoError(tb, err)
- ii.DisableFsync()
- tb.Cleanup(ii.Close)
- return db, ii
-}
-
-func TestAppendableCollationBuild(t *testing.T) {
- logEvery := time.NewTicker(30 * time.Second)
- defer logEvery.Stop()
- db, ii, txs := filledAppendable(t, log.New())
- ctx := context.Background()
- aggStep := uint64(16)
- steps := txs / aggStep
-
- t.Run("can see own writes", func(t *testing.T) {
- //nonbuf api can see own writes
- require := require.New(t)
-
- tx, err := db.BeginRo(ctx)
- require.NoError(err)
- defer tx.Rollback()
- ic := ii.BeginFilesRo()
- defer ic.Close()
-
- //can see own writes
- v, ok, err := ic.Get(1, tx)
- require.NoError(err)
- require.True(ok)
- require.Equal(1, int(binary.BigEndian.Uint64(v)))
-
- //never existed key
- _, ok, err = ic.Get(kv.TxnId(txs+1), tx)
- require.NoError(err)
- require.False(ok)
-
- //non-canonical key: must exist before collate+prune
- _, ok, err = ic.Get(kv.TxnId(steps+1), tx)
- require.NoError(err)
- require.True(ok)
-
- err = tx.Commit()
- require.NoError(err)
- })
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
-
- //see only canonical records in files
- iters := NewMockCanonicalsReader(ctrl)
- iters.EXPECT().TxnIdsOfCanonicalBlocks(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
- DoAndReturn(func(tx kv.Tx, txFrom, txTo int, by order.By, i3 int) (stream.U64, error) {
- currentStep := uint64(txFrom) / aggStep
- canonicalBlockTxNum := aggStep*currentStep + 1
- it := stream.Array[uint64]([]uint64{canonicalBlockTxNum})
- return it, nil
- }).
- AnyTimes()
- ii.cfg.iters = iters
-
- mergeAppendable(t, db, ii, txs)
-
- t.Run("read after collate and prune", func(t *testing.T) {
- require := require.New(t)
-
- ic := ii.BeginFilesRo()
- defer ic.Close()
-
- tx, err := db.BeginRo(ctx)
- require.NoError(err)
- defer tx.Rollback()
-
- checkAppendableGet(t, tx, ic, txs)
- })
-
- t.Run("scan files", func(t *testing.T) {
- require := require.New(t)
-
- require.Equal(5, ii.dirtyFiles.Len())
- require.Equal(5, len(ii._visibleFiles))
-
- // Recreate to scan the files
- ii, err := NewAppendable(ii.cfg, ii.aggregationStep, ii.filenameBase, ii.table, nil, log.New())
- require.NoError(err)
- defer ii.Close()
- err = ii.openFolder(true)
- require.NoError(err)
- require.Equal(5, ii.dirtyFiles.Len())
- require.Equal(0, len(ii._visibleFiles))
- ii.reCalcVisibleFiles(ii.dirtyFilesEndTxNumMinimax())
- require.Equal(5, len(ii._visibleFiles))
-
- ic := ii.BeginFilesRo()
- defer ic.Close()
-
- require.Equal(5, len(ic.files))
-
- tx, err := db.BeginRo(ctx)
- require.NoError(err)
- defer tx.Rollback()
-
- checkAppendableGet(t, tx, ic, txs)
- })
-
- t.Run("open_folder_can_handle_broken_files", func(t *testing.T) {
- require := require.New(t)
-
- list := ii._visibleFiles
- require.NotEmpty(list)
- ff := list[len(list)-1]
- fn := ff.src.decompressor.FilePath()
- ii.Close()
-
- err := os.Remove(fn)
- require.NoError(err)
- err = os.WriteFile(fn, make([]byte, 33), 0644)
- require.NoError(err)
-
- err = ii.openFolder(true)
- require.NoError(err)
- ii.Close()
- })
-
-}
-
-func filledAppendable(tb testing.TB, logger log.Logger) (kv.RwDB, *Appendable, uint64) {
- tb.Helper()
- return filledAppendableOfSize(tb, uint64(1000), 16, logger)
-}
-
-func filledAppendableOfSize(tb testing.TB, txs, aggStep uint64, logger log.Logger) (kv.RwDB, *Appendable, uint64) {
- tb.Helper()
- db, ii := testDbAndAppendable(tb, aggStep, logger)
- ctx, require := context.Background(), require.New(tb)
- tx, err := db.BeginRw(ctx)
- require.NoError(err)
- defer tx.Rollback()
- ic := ii.BeginFilesRo()
- defer ic.Close()
-
- for i := uint64(0); i < txs; i++ {
- err = ic.Append(kv.TxnId(i), hexutility.EncodeTs(i), tx)
- require.NoError(err)
- }
- err = tx.Commit()
- require.NoError(err)
- return db, ii, txs
-}
-
-func checkAppendableGet(t *testing.T, dbtx kv.Tx, tx *AppendableRoTx, txs uint64) {
- t.Helper()
- aggStep := tx.ap.aggregationStep
- steps := txs / aggStep
-
- require := require.New(t)
- //canonical keys
- w, ok, err := tx.Get(0, dbtx)
- require.NoError(err)
- require.True(ok)
- require.Equal(1, int(binary.BigEndian.Uint64(w)))
-
- w, ok, err = tx.Get(1, dbtx)
- require.NoError(err)
- require.True(ok)
- require.Equal(int(aggStep+1), int(binary.BigEndian.Uint64(w)))
-
- //non-canonical key: must exist before collate+prune
- _, ok = tx.getFromFiles(steps + 1)
- require.False(ok)
-
- from, to := tx.ap.stepsRangeInDB(dbtx)
- require.Equal(float64(0), from)
- require.Equal(62.4375, to)
-
- //non-canonical key: must exist before collate+prune
- _, ok, err = tx.Get(kv.TxnId(steps+1), dbtx)
- require.NoError(err)
- require.False(ok)
-
- //non-canonical keys of last step: must exist after collate+prune
- _, ok, err = tx.Get(kv.TxnId(aggStep*steps+2), dbtx)
- require.NoError(err)
- require.True(ok)
-}
-
-func mergeAppendable(tb testing.TB, db kv.RwDB, ii *Appendable, txs uint64) {
- tb.Helper()
- logEvery := time.NewTicker(30 * time.Second)
- defer logEvery.Stop()
- ctx := context.Background()
- // Leave the last 2 aggregation steps un-collated
- tx, err := db.BeginRw(ctx)
- require.NoError(tb, err)
- defer tx.Rollback()
- //panic("implement me")
-
- // Leave the last 2 aggregation steps un-collated
- for step := uint64(0); step < txs/ii.aggregationStep-1; step++ {
- func() {
- bs, err := ii.collate(ctx, step, tx)
- require.NoError(tb, err)
- sf, err := ii.buildFiles(ctx, step, bs, background.NewProgressSet())
- require.NoError(tb, err)
-
- ii.integrateDirtyFiles(sf, step*ii.aggregationStep, (step+1)*ii.aggregationStep)
- ii.reCalcVisibleFiles(ii.dirtyFilesEndTxNumMinimax())
- ic := ii.BeginFilesRo()
- defer ic.Close()
- _, err = ic.Prune(ctx, tx, step*ii.aggregationStep, (step+1)*ii.aggregationStep, math.MaxUint64, logEvery, false, nil)
- require.NoError(tb, err)
- maxSpan := ii.aggregationStep * StepsInColdFile
-
- for {
- if stop := func() bool {
- ic := ii.BeginFilesRo()
- defer ic.Close()
- r := ic.findMergeRange(ic.files.EndTxNum(), maxSpan)
- if !r.needMerge {
- return true
- }
- outs := ic.staticFilesInRange(r.from, r.to)
- in, err := ic.mergeFiles(ctx, outs, r.from, r.to, background.NewProgressSet())
- require.NoError(tb, err)
- ii.integrateMergedDirtyFiles(outs, in)
- ii.reCalcVisibleFiles(ii.dirtyFilesEndTxNumMinimax())
- return false
- }(); stop {
- break
- }
- }
- }()
- }
- err = tx.Commit()
- require.NoError(tb, err)
-
-}
-
-func emptyTestAppendable(aggStep uint64) *Appendable {
- salt := uint32(1)
- logger := log.New()
- return &Appendable{cfg: AppendableCfg{Salt: &salt, DB: nil},
- logger: logger,
- filenameBase: "test", aggregationStep: aggStep, dirtyFiles: btree2.NewBTreeG[*filesItem](filesItemLess)}
-}
-
-func TestAppendableScanStaticFiles(t *testing.T) {
- ii := emptyTestAppendable(1)
- files := []string{
- "v1-test.0-1.ap",
- "v1-test.1-2.ap",
- "v1-test.0-4.ap",
- "v1-test.2-3.ap",
- "v1-test.3-4.ap",
- "v1-test.4-5.ap",
- }
- ii.scanDirtyFiles(files)
- require.Equal(t, 6, ii.dirtyFiles.Len())
-
- //integrity extension case
- ii.dirtyFiles.Clear()
- ii.integrityCheck = func(fromStep, toStep uint64) bool { return false }
- ii.scanDirtyFiles(files)
- require.Equal(t, 0, ii.dirtyFiles.Len())
-}
-
-func TestAppendableCtxFiles(t *testing.T) {
- ii := emptyTestAppendable(1)
- files := []string{
- "v1-test.0-1.ap", // overlap with same `endTxNum=4`
- "v1-test.1-2.ap",
- "v1-test.0-4.ap",
- "v1-test.2-3.ap",
- "v1-test.3-4.ap",
- "v1-test.4-5.ap", // no overlap
- "v1-test.480-484.ap", // overlap with same `startTxNum=480`
- "v1-test.480-488.ap",
- "v1-test.480-496.ap",
- "v1-test.480-512.ap",
- }
- ii.scanDirtyFiles(files)
- require.Equal(t, 10, ii.dirtyFiles.Len())
- ii.dirtyFiles.Scan(func(item *filesItem) bool {
- fName := ii.apFilePath(item.startTxNum/ii.aggregationStep, item.endTxNum/ii.aggregationStep)
- item.decompressor = &seg.Decompressor{FileName1: fName}
- return true
- })
-
- visibleFiles := calcVisibleFiles(ii.dirtyFiles, 0, false, ii.dirtyFilesEndTxNumMinimax())
- for i, item := range visibleFiles {
- if item.src.canDelete.Load() {
- require.Failf(t, "deleted file", "%d-%d", item.startTxNum, item.endTxNum)
- }
- if i == 0 {
- continue
- }
- if item.src.isSubsetOf(visibleFiles[i-1].src) || visibleFiles[i-1].src.isSubsetOf(item.src) {
- require.Failf(t, "overlaping files", "%d-%d, %d-%d", item.startTxNum, item.endTxNum, visibleFiles[i-1].startTxNum, visibleFiles[i-1].endTxNum)
- }
- }
- require.Equal(t, 3, len(visibleFiles))
-
- require.Equal(t, 0, int(visibleFiles[0].startTxNum))
- require.Equal(t, 4, int(visibleFiles[0].endTxNum))
-
- require.Equal(t, 4, int(visibleFiles[1].startTxNum))
- require.Equal(t, 5, int(visibleFiles[1].endTxNum))
-
- require.Equal(t, 480, int(visibleFiles[2].startTxNum))
- require.Equal(t, 512, int(visibleFiles[2].endTxNum))
-}
diff --git a/erigon-lib/state/bps_tree.go b/erigon-lib/state/bps_tree.go
index 66cb35b983a..7011530f26e 100644
--- a/erigon-lib/state/bps_tree.go
+++ b/erigon-lib/state/bps_tree.go
@@ -25,11 +25,12 @@ import (
"time"
"unsafe"
+ "github.com/erigontech/erigon-lib/common/dbg"
+
"github.com/c2h5oh/datasize"
"github.com/erigontech/erigon-lib/seg"
"github.com/erigontech/erigon-lib/common"
- "github.com/erigontech/erigon-lib/common/dbg"
"github.com/erigontech/erigon-lib/log/v3"
"github.com/erigontech/erigon-lib/recsplit/eliasfano32"
)
diff --git a/erigon-lib/state/domain.go b/erigon-lib/state/domain.go
index bfa2af7eb3c..7357753f27f 100644
--- a/erigon-lib/state/domain.go
+++ b/erigon-lib/state/domain.go
@@ -1493,30 +1493,28 @@ func (dt *DomainRoTx) getFromFiles(filekey []byte) (v []byte, found bool, fileSt
// GetAsOf does not always require usage of roTx. If it is possible to determine
// historical value based only on static files, roTx will not be used.
-func (dt *DomainRoTx) GetAsOf(key []byte, txNum uint64, roTx kv.Tx) ([]byte, error) {
+func (dt *DomainRoTx) GetAsOf(key []byte, txNum uint64, roTx kv.Tx) ([]byte, bool, error) {
v, hOk, err := dt.ht.HistorySeek(key, txNum, roTx)
if err != nil {
- return nil, err
+ return nil, false, err
}
if hOk {
- // if history returned marker of key creation
- // domain must return nil
- if len(v) == 0 {
+ if len(v) == 0 { // if history successfuly found marker of key creation
if traceGetAsOf == dt.d.filenameBase {
fmt.Printf("GetAsOf(%s , %x, %d) -> not found in history\n", dt.d.filenameBase, key, txNum)
}
- return nil, nil
+ return nil, false, nil
}
if traceGetAsOf == dt.d.filenameBase {
fmt.Printf("GetAsOf(%s, %x, %d) -> found in history\n", dt.d.filenameBase, key, txNum)
}
- return v, nil
+ return v, v != nil, nil
}
v, _, _, err = dt.GetLatest(key, nil, roTx)
if err != nil {
- return nil, err
+ return nil, false, err
}
- return v, nil
+ return v, v != nil, nil
}
func (dt *DomainRoTx) Close() {
diff --git a/erigon-lib/state/domain_shared.go b/erigon-lib/state/domain_shared.go
index 09ab134eb45..1aad6334bd6 100644
--- a/erigon-lib/state/domain_shared.go
+++ b/erigon-lib/state/domain_shared.go
@@ -96,9 +96,8 @@ type SharedDomains struct {
domains [kv.DomainLen]map[string]dataWithPrevStep
storage *btree2.Map[string, dataWithPrevStep]
- domainWriters [kv.DomainLen]*domainBufferedWriter
- iiWriters [kv.StandaloneIdxLen]*invertedIndexBufferedWriter
- appendableWriter [kv.AppendableLen]*appendableBufferedWriter
+ domainWriters [kv.DomainLen]*domainBufferedWriter
+ iiWriters [kv.StandaloneIdxLen]*invertedIndexBufferedWriter
currentChangesAccumulator *StateChangeSet
pastChangesAccumulator map[string]*StateChangeSet
@@ -131,10 +130,6 @@ func NewSharedDomains(tx kv.Tx, logger log.Logger) (*SharedDomains, error) {
sd.domainWriters[id] = d.NewWriter()
}
- for id, a := range sd.aggTx.appendable {
- sd.appendableWriter[id] = a.NewWriter()
- }
-
sd.SetTxNum(0)
sd.sdCtx = NewSharedDomainsCommitmentContext(sd, commitment.ModeDirect, commitment.VariantHexPatriciaTrie)
@@ -181,10 +176,6 @@ func (sd *SharedDomains) GetDiffset(tx kv.RwTx, blockHash common.Hash, blockNumb
}
func (sd *SharedDomains) AggTx() any { return sd.aggTx }
-func (sd *SharedDomains) CanonicalReader() CanonicalsReader {
- return nil
- //return sd.aggTx.appendable[kv.ReceiptsAppendable].ap.cfg.iters
-}
// aggregator context should call aggTx.Unwind before this one.
func (sd *SharedDomains) Unwind(ctx context.Context, rwTx kv.RwTx, blockUnwindTo, txUnwindTo uint64, changeset *[kv.DomainLen][]DomainEntryDiff) error {
@@ -212,12 +203,6 @@ func (sd *SharedDomains) Unwind(ctx context.Context, rwTx kv.RwTx, blockUnwindTo
}
}
- for _, ap := range sd.aggTx.appendable {
- if err := ap.Unwind(ctx, rwTx, txUnwindTo, math.MaxUint64, math.MaxUint64, logEvery, true, nil); err != nil {
- return err
- }
- }
-
sd.ClearRam(true)
sd.SetTxNum(txUnwindTo)
sd.SetBlockNum(blockUnwindTo)
@@ -807,9 +792,6 @@ func (sd *SharedDomains) Close() {
for _, iiWriter := range sd.iiWriters {
iiWriter.close()
}
- for _, a := range sd.appendableWriter {
- a.close()
- }
}
if sd.sdCtx != nil {
@@ -852,14 +834,6 @@ func (sd *SharedDomains) Flush(ctx context.Context, tx kv.RwTx) error {
return err
}
}
- for _, w := range sd.appendableWriter {
- if w == nil {
- continue
- }
- if err := w.Flush(ctx, tx); err != nil {
- return err
- }
- }
if dbg.PruneOnFlushTimeout != 0 {
_, err = sd.aggTx.PruneSmallBatches(ctx, dbg.PruneOnFlushTimeout, tx)
if err != nil {
@@ -879,12 +853,6 @@ func (sd *SharedDomains) Flush(ctx context.Context, tx kv.RwTx) error {
}
w.close()
}
- for _, w := range sd.appendableWriter {
- if w == nil {
- continue
- }
- w.close()
- }
return nil
}
@@ -909,7 +877,7 @@ func (sd *SharedDomains) DomainGet(domain kv.Domain, k, k2 []byte) (v []byte, st
// DomainPut
// Optimizations:
-// - user can prvide `prevVal != nil` - then it will not read prev value from storage
+// - user can provide `prevVal != nil` - then it will not read prev value from storage
// - user can append k2 into k1, then underlying methods will not preform append
// - if `val == nil` it will call DomainDel
func (sd *SharedDomains) DomainPut(domain kv.Domain, k1, k2 []byte, val, prevVal []byte, prevStep uint64) error {
@@ -934,7 +902,13 @@ func (sd *SharedDomains) DomainPut(domain kv.Domain, k1, k2 []byte, val, prevVal
return nil
}
return sd.updateAccountCode(k1, val, prevVal, prevStep)
+ case kv.CommitmentDomain:
+ sd.put(domain, string(append(k1, k2...)), val)
+ return sd.domainWriters[domain].PutWithPrev(k1, k2, val, prevVal, prevStep)
default:
+ if bytes.Equal(prevVal, val) {
+ return nil
+ }
sd.put(domain, string(append(k1, k2...)), val)
return sd.domainWriters[domain].PutWithPrev(k1, k2, val, prevVal, prevStep)
}
@@ -1010,10 +984,6 @@ func (sd *SharedDomains) DomainDelPrefix(domain kv.Domain, prefix []byte) error
}
func (sd *SharedDomains) Tx() kv.Tx { return sd.roTx }
-func (sd *SharedDomains) AppendablePut(name kv.Appendable, ts kv.TxnId, v []byte) error {
- return sd.appendableWriter[name].Append(ts, v)
-}
-
type SharedDomainsCommitmentContext struct {
sharedDomains *SharedDomains
discard bool // could be replaced with using ModeDisabled
diff --git a/erigon-lib/state/domain_test.go b/erigon-lib/state/domain_test.go
index 1411401d79f..bc8c8956bde 100644
--- a/erigon-lib/state/domain_test.go
+++ b/erigon-lib/state/domain_test.go
@@ -33,22 +33,22 @@ import (
"testing"
"time"
- datadir2 "github.com/erigontech/erigon-lib/common/datadir"
- "github.com/erigontech/erigon-lib/kv/order"
- "github.com/erigontech/erigon-lib/kv/stream"
- "github.com/erigontech/erigon-lib/log/v3"
- "github.com/erigontech/erigon-lib/seg"
- "github.com/erigontech/erigon-lib/types"
-
"github.com/holiman/uint256"
"github.com/stretchr/testify/require"
btree2 "github.com/tidwall/btree"
"github.com/erigontech/erigon-lib/common"
"github.com/erigontech/erigon-lib/common/background"
+ datadir2 "github.com/erigontech/erigon-lib/common/datadir"
+ "github.com/erigontech/erigon-lib/common/hexutility"
"github.com/erigontech/erigon-lib/common/length"
"github.com/erigontech/erigon-lib/kv"
"github.com/erigontech/erigon-lib/kv/mdbx"
+ "github.com/erigontech/erigon-lib/kv/order"
+ "github.com/erigontech/erigon-lib/kv/stream"
+ "github.com/erigontech/erigon-lib/log/v3"
+ "github.com/erigontech/erigon-lib/seg"
+ "github.com/erigontech/erigon-lib/types"
)
func testDbAndDomain(t *testing.T, logger log.Logger) (kv.RwDB, *Domain) {
@@ -438,7 +438,7 @@ func checkHistory(t *testing.T, db kv.RwDB, d *Domain, txs uint64) {
label := fmt.Sprintf("key %x txNum=%d, keyNum=%d", k, txNum, keyNum)
- val, err := dc.GetAsOf(k[:], txNum+1, roTx)
+ val, _, err := dc.GetAsOf(k[:], txNum+1, roTx)
require.NoError(err, label)
if txNum >= keyNum {
require.Equal(v[:], val, label)
@@ -644,7 +644,7 @@ func TestDomain_Delete(t *testing.T) {
// require.Nil(val, label)
//}
//if txNum == 976 {
- val, err := dc.GetAsOf([]byte("key2"), txNum+1, tx)
+ val, _, err := dc.GetAsOf([]byte("key2"), txNum+1, tx)
require.NoError(err)
//require.False(ok, label)
require.Nil(val, label)
@@ -699,7 +699,7 @@ func TestDomain_Prune_AfterAllWrites(t *testing.T) {
binary.BigEndian.PutUint64(k[:], keyNum)
binary.BigEndian.PutUint64(v[:], txNum)
- val, err := dc.GetAsOf(k[:], txNum+1, roTx)
+ val, _, err := dc.GetAsOf(k[:], txNum+1, roTx)
// during generation such keys are skipped so value should be nil for this call
require.NoError(t, err, label)
if !data[keyNum][txNum] {
@@ -800,7 +800,7 @@ func TestDomain_PruneOnWrite(t *testing.T) {
binary.BigEndian.PutUint64(k[:], keyNum)
binary.BigEndian.PutUint64(v[:], valNum)
- val, err := dc.GetAsOf(k[:], txNum+1, tx)
+ val, _, err := dc.GetAsOf(k[:], txNum+1, tx)
require.NoError(t, err)
if keyNum == txNum%d.aggregationStep {
if txNum > 1 {
@@ -1142,7 +1142,7 @@ func TestDomainContext_getFromFiles(t *testing.T) {
beforeTx := d.aggregationStep
for i = 0; i < len(bufs); i++ {
ks, _ := hex.DecodeString(key)
- val, err := dc.GetAsOf(ks, beforeTx, tx)
+ val, _, err := dc.GetAsOf(ks, beforeTx, tx)
require.NoError(t, err)
require.EqualValuesf(t, bufs[i], val, "key %s, txn %d", key, beforeTx)
beforeTx += d.aggregationStep
@@ -1354,12 +1354,11 @@ func generateRandomTxNum(r *rand.Rand, maxTxNum uint64, usedTxNums map[uint64]bo
}
func TestDomain_GetAfterAggregation(t *testing.T) {
- t.Parallel()
-
db, d := testDbAndDomainOfStep(t, 25, log.New())
+ require := require.New(t)
tx, err := db.BeginRw(context.Background())
- require.NoError(t, err)
+ require.NoError(err)
defer tx.Rollback()
d.historyLargeValues = false
@@ -1391,14 +1390,14 @@ func TestDomain_GetAfterAggregation(t *testing.T) {
writer.SetTxNum(totalTx)
err = writer.Flush(context.Background(), tx)
- require.NoError(t, err)
+ require.NoError(err)
// aggregate
collateAndMerge(t, db, tx, d, totalTx)
- require.NoError(t, tx.Commit())
+ require.NoError(tx.Commit())
tx, err = db.BeginRw(context.Background())
- require.NoError(t, err)
+ require.NoError(err)
defer tx.Rollback()
dc.Close()
@@ -1409,17 +1408,18 @@ func TestDomain_GetAfterAggregation(t *testing.T) {
for key, updates := range data {
kc++
for i := 1; i < len(updates); i++ {
- v, err := dc.GetAsOf([]byte(key), updates[i].txNum, tx)
- require.NoError(t, err)
- require.EqualValuesf(t, updates[i-1].value, v, "(%d/%d) key %x, txn %d", kc, len(data), []byte(key), updates[i-1].txNum)
+ v, ok, err := dc.GetAsOf([]byte(key), updates[i].txNum, tx)
+ require.NoError(err)
+ require.True(ok)
+ require.EqualValuesf(updates[i-1].value, v, "(%d/%d) key %x, txn %d", kc, len(data), []byte(key), updates[i-1].txNum)
}
if len(updates) == 0 {
continue
}
v, _, ok, err := dc.GetLatest([]byte(key), nil, tx)
- require.NoError(t, err)
- require.EqualValuesf(t, updates[len(updates)-1].value, v, "key %x latest", []byte(key))
- require.True(t, ok)
+ require.NoError(err)
+ require.EqualValuesf(updates[len(updates)-1].value, v, "key %x latest", []byte(key))
+ require.True(ok)
}
}
@@ -1581,7 +1581,7 @@ func TestDomain_PruneAfterAggregation(t *testing.T) {
for key, updates := range data {
kc++
for i := 1; i < len(updates); i++ {
- v, err := dc.GetAsOf([]byte(key), updates[i].txNum, tx)
+ v, _, err := dc.GetAsOf([]byte(key), updates[i].txNum, tx)
require.NoError(t, err)
require.EqualValuesf(t, updates[i-1].value, v, "(%d/%d) key %x, txn %d", kc, len(data), []byte(key), updates[i-1].txNum)
}
@@ -2300,3 +2300,46 @@ func TestDomainContext_findShortenedKey(t *testing.T) {
ki++
}
}
+
+func TestCanBuild(t *testing.T) {
+ db, d := testDbAndDomain(t, log.New())
+ tx, err := db.BeginRw(context.Background())
+ require.NoError(t, err)
+ defer tx.Rollback()
+
+ d.historyLargeValues = true
+ dc := d.BeginFilesRo()
+ defer dc.Close()
+
+ dc.files = append(dc.files, visibleFile{startTxNum: 0, endTxNum: d.aggregationStep})
+
+ writer := dc.NewWriter()
+ defer writer.close()
+
+ k, v := []byte{1}, []byte{1}
+ // db has data which already in files
+ writer.SetTxNum(0)
+ _ = writer.PutWithPrev(k, nil, v, nil, 0)
+ _ = writer.Flush(context.Background(), tx)
+ canBuild := dc.canBuild(tx)
+ require.NoError(t, err)
+ require.False(t, canBuild)
+
+ // db has data which already in files and next step. still not enough - we need full step in db.
+ writer.SetTxNum(d.aggregationStep)
+ _ = writer.PutWithPrev(k, nil, v, nil, 0)
+ _ = writer.Flush(context.Background(), tx)
+ canBuild = dc.canBuild(tx)
+ require.NoError(t, err)
+ require.False(t, canBuild)
+ _ = writer.PutWithPrev(k, nil, v, nil, 0)
+
+ // db has: 1. data which already in files 2. full next step 3. a bit of next-next step. -> can build
+ writer.SetTxNum(d.aggregationStep * 2)
+ _ = writer.PutWithPrev(k, nil, v, nil, 0)
+ _ = writer.Flush(context.Background(), tx)
+ canBuild = dc.canBuild(tx)
+ require.NoError(t, err)
+ require.True(t, canBuild)
+ _ = writer.PutWithPrev(k, nil, hexutility.EncodeTs(d.aggregationStep*2+1), nil, 0)
+}
diff --git a/erigon-lib/state/files_item.go b/erigon-lib/state/files_item.go
index 451c54d49d4..92ad3220890 100644
--- a/erigon-lib/state/files_item.go
+++ b/erigon-lib/state/files_item.go
@@ -197,7 +197,6 @@ type visibleFile struct {
src *filesItem
}
-func (i *visibleFile) hasTS(ts uint64) bool { return i.startTxNum <= ts && i.endTxNum > ts }
func (i *visibleFile) isSubSetOf(j *visibleFile) bool { return i.src.isSubsetOf(j.src) } //nolint
func (i *visibleFile) isSubsetOf(j *visibleFile) bool { return i.src.isSubsetOf(j.src) } //nolint
diff --git a/erigon-lib/state/inverted_index.go b/erigon-lib/state/inverted_index.go
index 53465fd5429..2f3e444ba13 100644
--- a/erigon-lib/state/inverted_index.go
+++ b/erigon-lib/state/inverted_index.go
@@ -30,6 +30,7 @@ import (
"reflect"
"regexp"
"strconv"
+ "strings"
"sync"
"time"
@@ -145,6 +146,9 @@ func filesFromDir(dir string) ([]string, error) {
if f.IsDir() || !f.Type().IsRegular() {
continue
}
+ if strings.HasPrefix(f.Name(), ".") { // hidden files
+ continue
+ }
filtered = append(filtered, f.Name())
}
return filtered, nil
diff --git a/erigon-lib/state/iters.go b/erigon-lib/state/iters.go
deleted file mode 100644
index e6b65572144..00000000000
--- a/erigon-lib/state/iters.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2024 The Erigon Authors
-// This file is part of Erigon.
-//
-// Erigon is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// Erigon is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with Erigon. If not, see .
-
-package state
-
-import (
- "github.com/erigontech/erigon-lib/common"
- "github.com/erigontech/erigon-lib/kv"
- "github.com/erigontech/erigon-lib/kv/order"
- "github.com/erigontech/erigon-lib/kv/stream"
-)
-
-//go:generate mockgen -typed=true -destination=./iters_mock.go -package=state . CanonicalsReader
-type CanonicalsReader interface {
- // TxnIdsOfCanonicalBlocks - for given canonical blocks range returns non-canonical txnIds (not txNums)
- // [fromTxNum, toTxNum)
- // To get all canonical blocks, use fromTxNum=0, toTxNum=-1
- // For reverse iteration use order.Desc and fromTxNum=-1, toTxNum=-1
- TxnIdsOfCanonicalBlocks(tx kv.Tx, fromTxNum, toTxNum int, asc order.By, limit int) (stream.U64, error)
- BaseTxnID(tx kv.Tx, blockNum uint64, blockHash common.Hash) (kv.TxnId, error)
- TxNum2ID(tx kv.Tx, blockNum uint64, blockHash common.Hash, txNum uint64) (kv.TxnId, error)
- LastFrozenTxNum(tx kv.Tx) (kv.TxnId, error)
-}
diff --git a/erigon-lib/state/iters_mock.go b/erigon-lib/state/iters_mock.go
deleted file mode 100644
index ee9ab6869e1..00000000000
--- a/erigon-lib/state/iters_mock.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// Code generated by MockGen. DO NOT EDIT.
-// Source: github.com/erigontech/erigon-lib/state (interfaces: CanonicalsReader)
-//
-// Generated by this command:
-//
-// mockgen -typed=true -destination=./iters_mock.go -package=state . CanonicalsReader
-//
-
-// Package state is a generated GoMock package.
-package state
-
-import (
- reflect "reflect"
-
- common "github.com/erigontech/erigon-lib/common"
- kv "github.com/erigontech/erigon-lib/kv"
- order "github.com/erigontech/erigon-lib/kv/order"
- stream "github.com/erigontech/erigon-lib/kv/stream"
- gomock "go.uber.org/mock/gomock"
-)
-
-// MockCanonicalsReader is a mock of CanonicalsReader interface.
-type MockCanonicalsReader struct {
- ctrl *gomock.Controller
- recorder *MockCanonicalsReaderMockRecorder
-}
-
-// MockCanonicalsReaderMockRecorder is the mock recorder for MockCanonicalsReader.
-type MockCanonicalsReaderMockRecorder struct {
- mock *MockCanonicalsReader
-}
-
-// NewMockCanonicalsReader creates a new mock instance.
-func NewMockCanonicalsReader(ctrl *gomock.Controller) *MockCanonicalsReader {
- mock := &MockCanonicalsReader{ctrl: ctrl}
- mock.recorder = &MockCanonicalsReaderMockRecorder{mock}
- return mock
-}
-
-// EXPECT returns an object that allows the caller to indicate expected use.
-func (m *MockCanonicalsReader) EXPECT() *MockCanonicalsReaderMockRecorder {
- return m.recorder
-}
-
-// BaseTxnID mocks base method.
-func (m *MockCanonicalsReader) BaseTxnID(arg0 kv.Tx, arg1 uint64, arg2 common.Hash) (kv.TxnId, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "BaseTxnID", arg0, arg1, arg2)
- ret0, _ := ret[0].(kv.TxnId)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// BaseTxnID indicates an expected call of BaseTxnID.
-func (mr *MockCanonicalsReaderMockRecorder) BaseTxnID(arg0, arg1, arg2 any) *MockCanonicalsReaderBaseTxnIDCall {
- mr.mock.ctrl.T.Helper()
- call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BaseTxnID", reflect.TypeOf((*MockCanonicalsReader)(nil).BaseTxnID), arg0, arg1, arg2)
- return &MockCanonicalsReaderBaseTxnIDCall{Call: call}
-}
-
-// MockCanonicalsReaderBaseTxnIDCall wrap *gomock.Call
-type MockCanonicalsReaderBaseTxnIDCall struct {
- *gomock.Call
-}
-
-// Return rewrite *gomock.Call.Return
-func (c *MockCanonicalsReaderBaseTxnIDCall) Return(arg0 kv.TxnId, arg1 error) *MockCanonicalsReaderBaseTxnIDCall {
- c.Call = c.Call.Return(arg0, arg1)
- return c
-}
-
-// Do rewrite *gomock.Call.Do
-func (c *MockCanonicalsReaderBaseTxnIDCall) Do(f func(kv.Tx, uint64, common.Hash) (kv.TxnId, error)) *MockCanonicalsReaderBaseTxnIDCall {
- c.Call = c.Call.Do(f)
- return c
-}
-
-// DoAndReturn rewrite *gomock.Call.DoAndReturn
-func (c *MockCanonicalsReaderBaseTxnIDCall) DoAndReturn(f func(kv.Tx, uint64, common.Hash) (kv.TxnId, error)) *MockCanonicalsReaderBaseTxnIDCall {
- c.Call = c.Call.DoAndReturn(f)
- return c
-}
-
-// LastFrozenTxNum mocks base method.
-func (m *MockCanonicalsReader) LastFrozenTxNum(arg0 kv.Tx) (kv.TxnId, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "LastFrozenTxNum", arg0)
- ret0, _ := ret[0].(kv.TxnId)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// LastFrozenTxNum indicates an expected call of LastFrozenTxNum.
-func (mr *MockCanonicalsReaderMockRecorder) LastFrozenTxNum(arg0 any) *MockCanonicalsReaderLastFrozenTxNumCall {
- mr.mock.ctrl.T.Helper()
- call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastFrozenTxNum", reflect.TypeOf((*MockCanonicalsReader)(nil).LastFrozenTxNum), arg0)
- return &MockCanonicalsReaderLastFrozenTxNumCall{Call: call}
-}
-
-// MockCanonicalsReaderLastFrozenTxNumCall wrap *gomock.Call
-type MockCanonicalsReaderLastFrozenTxNumCall struct {
- *gomock.Call
-}
-
-// Return rewrite *gomock.Call.Return
-func (c *MockCanonicalsReaderLastFrozenTxNumCall) Return(arg0 kv.TxnId, arg1 error) *MockCanonicalsReaderLastFrozenTxNumCall {
- c.Call = c.Call.Return(arg0, arg1)
- return c
-}
-
-// Do rewrite *gomock.Call.Do
-func (c *MockCanonicalsReaderLastFrozenTxNumCall) Do(f func(kv.Tx) (kv.TxnId, error)) *MockCanonicalsReaderLastFrozenTxNumCall {
- c.Call = c.Call.Do(f)
- return c
-}
-
-// DoAndReturn rewrite *gomock.Call.DoAndReturn
-func (c *MockCanonicalsReaderLastFrozenTxNumCall) DoAndReturn(f func(kv.Tx) (kv.TxnId, error)) *MockCanonicalsReaderLastFrozenTxNumCall {
- c.Call = c.Call.DoAndReturn(f)
- return c
-}
-
-// TxNum2ID mocks base method.
-func (m *MockCanonicalsReader) TxNum2ID(arg0 kv.Tx, arg1 uint64, arg2 common.Hash, arg3 uint64) (kv.TxnId, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "TxNum2ID", arg0, arg1, arg2, arg3)
- ret0, _ := ret[0].(kv.TxnId)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// TxNum2ID indicates an expected call of TxNum2ID.
-func (mr *MockCanonicalsReaderMockRecorder) TxNum2ID(arg0, arg1, arg2, arg3 any) *MockCanonicalsReaderTxNum2IDCall {
- mr.mock.ctrl.T.Helper()
- call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TxNum2ID", reflect.TypeOf((*MockCanonicalsReader)(nil).TxNum2ID), arg0, arg1, arg2, arg3)
- return &MockCanonicalsReaderTxNum2IDCall{Call: call}
-}
-
-// MockCanonicalsReaderTxNum2IDCall wrap *gomock.Call
-type MockCanonicalsReaderTxNum2IDCall struct {
- *gomock.Call
-}
-
-// Return rewrite *gomock.Call.Return
-func (c *MockCanonicalsReaderTxNum2IDCall) Return(arg0 kv.TxnId, arg1 error) *MockCanonicalsReaderTxNum2IDCall {
- c.Call = c.Call.Return(arg0, arg1)
- return c
-}
-
-// Do rewrite *gomock.Call.Do
-func (c *MockCanonicalsReaderTxNum2IDCall) Do(f func(kv.Tx, uint64, common.Hash, uint64) (kv.TxnId, error)) *MockCanonicalsReaderTxNum2IDCall {
- c.Call = c.Call.Do(f)
- return c
-}
-
-// DoAndReturn rewrite *gomock.Call.DoAndReturn
-func (c *MockCanonicalsReaderTxNum2IDCall) DoAndReturn(f func(kv.Tx, uint64, common.Hash, uint64) (kv.TxnId, error)) *MockCanonicalsReaderTxNum2IDCall {
- c.Call = c.Call.DoAndReturn(f)
- return c
-}
-
-// TxnIdsOfCanonicalBlocks mocks base method.
-func (m *MockCanonicalsReader) TxnIdsOfCanonicalBlocks(arg0 kv.Tx, arg1, arg2 int, arg3 order.By, arg4 int) (stream.U64, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "TxnIdsOfCanonicalBlocks", arg0, arg1, arg2, arg3, arg4)
- ret0, _ := ret[0].(stream.U64)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// TxnIdsOfCanonicalBlocks indicates an expected call of TxnIdsOfCanonicalBlocks.
-func (mr *MockCanonicalsReaderMockRecorder) TxnIdsOfCanonicalBlocks(arg0, arg1, arg2, arg3, arg4 any) *MockCanonicalsReaderTxnIdsOfCanonicalBlocksCall {
- mr.mock.ctrl.T.Helper()
- call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TxnIdsOfCanonicalBlocks", reflect.TypeOf((*MockCanonicalsReader)(nil).TxnIdsOfCanonicalBlocks), arg0, arg1, arg2, arg3, arg4)
- return &MockCanonicalsReaderTxnIdsOfCanonicalBlocksCall{Call: call}
-}
-
-// MockCanonicalsReaderTxnIdsOfCanonicalBlocksCall wrap *gomock.Call
-type MockCanonicalsReaderTxnIdsOfCanonicalBlocksCall struct {
- *gomock.Call
-}
-
-// Return rewrite *gomock.Call.Return
-func (c *MockCanonicalsReaderTxnIdsOfCanonicalBlocksCall) Return(arg0 stream.U64, arg1 error) *MockCanonicalsReaderTxnIdsOfCanonicalBlocksCall {
- c.Call = c.Call.Return(arg0, arg1)
- return c
-}
-
-// Do rewrite *gomock.Call.Do
-func (c *MockCanonicalsReaderTxnIdsOfCanonicalBlocksCall) Do(f func(kv.Tx, int, int, order.By, int) (stream.U64, error)) *MockCanonicalsReaderTxnIdsOfCanonicalBlocksCall {
- c.Call = c.Call.Do(f)
- return c
-}
-
-// DoAndReturn rewrite *gomock.Call.DoAndReturn
-func (c *MockCanonicalsReaderTxnIdsOfCanonicalBlocksCall) DoAndReturn(f func(kv.Tx, int, int, order.By, int) (stream.U64, error)) *MockCanonicalsReaderTxnIdsOfCanonicalBlocksCall {
- c.Call = c.Call.DoAndReturn(f)
- return c
-}
diff --git a/erigon-lib/state/merge.go b/erigon-lib/state/merge.go
index b4fa41e298d..4ade27d1407 100644
--- a/erigon-lib/state/merge.go
+++ b/erigon-lib/state/merge.go
@@ -73,16 +73,6 @@ func (h *History) dirtyFilesEndTxNumMinimax() uint64 {
}
return minimax
}
-func (a *Appendable) dirtyFilesEndTxNumMinimax() uint64 {
- var minimax uint64
- if _max, ok := a.dirtyFiles.Max(); ok {
- endTxNum := _max.endTxNum
- if minimax == 0 || endTxNum < minimax {
- minimax = endTxNum
- }
- }
- return minimax
-}
type DomainRanges struct {
name kv.Domain
@@ -107,6 +97,14 @@ func (r DomainRanges) String() string {
func (r DomainRanges) any() bool { return r.values.needMerge || r.history.any() }
+func (dt *DomainRoTx) FirstStepNotInFiles() uint64 { return dt.files.EndTxNum() / dt.d.aggregationStep }
+func (ht *HistoryRoTx) FirstStepNotInFiles() uint64 {
+ return ht.files.EndTxNum() / ht.h.aggregationStep
+}
+func (iit *InvertedIndexRoTx) FirstStepNotInFiles() uint64 {
+ return iit.files.EndTxNum() / iit.ii.aggregationStep
+}
+
// findMergeRange
// assumes that all fTypes in d.files have items at least as far as maxEndTxNum
// That is why only Values type is inspected
@@ -216,33 +214,6 @@ func (iit *InvertedIndexRoTx) findMergeRange(maxEndTxNum, maxSpan uint64) *Merge
return &MergeRange{minFound, startTxNum, endTxNum}
}
-func (tx *AppendableRoTx) findMergeRange(maxEndTxNum, maxSpan uint64) *MergeRange {
- var minFound bool
- var startTxNum, endTxNum uint64
- for _, item := range tx.files {
- if item.endTxNum > maxEndTxNum {
- continue
- }
- endStep := item.endTxNum / tx.ap.aggregationStep
- spanStep := endStep & -endStep // Extract rightmost bit in the binary representation of endStep, this corresponds to size of maximally possible merge ending at endStep
- span := min(spanStep*tx.ap.aggregationStep, maxSpan)
- start := item.endTxNum - span
- foundSuperSet := startTxNum == item.startTxNum && item.endTxNum >= endTxNum
- if foundSuperSet {
- minFound = false
- startTxNum = start
- endTxNum = item.endTxNum
- } else if start < item.startTxNum {
- if !minFound || start < startTxNum {
- minFound = true
- startTxNum = start
- endTxNum = item.endTxNum
- }
- }
- }
- return &MergeRange{minFound, startTxNum, endTxNum}
-}
-
type HistoryRanges struct {
history MergeRange
index MergeRange
@@ -326,27 +297,6 @@ func (iit *InvertedIndexRoTx) staticFilesInRange(startTxNum, endTxNum uint64) []
return files
}
-func (tx *AppendableRoTx) staticFilesInRange(startTxNum, endTxNum uint64) []*filesItem {
- files := make([]*filesItem, 0, len(tx.files))
-
- for _, item := range tx.files {
- if item.startTxNum < startTxNum {
- continue
- }
- if item.endTxNum > endTxNum {
- break
- }
- files = append(files, item.src)
- }
- for _, f := range files {
- if f == nil {
- panic("must not happen")
- }
- }
-
- return files
-}
-
func (ht *HistoryRoTx) staticFilesInRange(r HistoryRanges) (indexFiles, historyFiles []*filesItem, err error) {
if !r.history.needMerge && r.index.needMerge {
indexFiles = ht.iit.staticFilesInRange(r.index.from, r.index.to)
@@ -988,75 +938,6 @@ func (d *Domain) integrateMergedDirtyFiles(valuesOuts, indexOuts, historyOuts []
}
}
-func (tx *AppendableRoTx) mergeFiles(ctx context.Context, files []*filesItem, startTxNum, endTxNum uint64, ps *background.ProgressSet) (*filesItem, error) {
- for _, h := range files {
- defer h.decompressor.EnableReadAhead().DisableReadAhead()
- }
-
- var outItem *filesItem
- var comp *seg.Compressor
- var err error
- var closeItem = true
- defer func() {
- if closeItem {
- if comp != nil {
- comp.Close()
- }
- if outItem != nil {
- outItem.closeFilesAndRemove()
- }
- }
- }()
- if ctx.Err() != nil {
- return nil, ctx.Err()
- }
- fromStep, toStep := startTxNum/tx.ap.aggregationStep, endTxNum/tx.ap.aggregationStep
-
- datPath := tx.ap.apFilePath(fromStep, toStep)
- if comp, err = seg.NewCompressor(ctx, "merge fk "+tx.ap.filenameBase, datPath, tx.ap.cfg.Dirs.Tmp, tx.ap.compressCfg, log.LvlTrace, tx.ap.logger); err != nil {
- return nil, fmt.Errorf("merge %s inverted index compressor: %w", tx.ap.filenameBase, err)
- }
- defer comp.Close()
- if tx.ap.noFsync {
- comp.DisableFsync()
- }
- write := seg.NewWriter(comp, tx.ap.compression)
- defer write.Close()
- p := ps.AddNew(path.Base(datPath), 1)
- defer ps.Delete(p)
-
- var word = make([]byte, 0, 4096)
-
- for _, item := range files {
- g := seg.NewReader(item.decompressor.MakeGetter(), tx.ap.compression)
- g.Reset(0)
- for g.HasNext() {
- word, _ = g.Next(word[:0])
- if err := write.AddWord(word); err != nil {
- return nil, err
- }
- }
- }
- if err = write.Compress(); err != nil {
- return nil, err
- }
-
- outItem = newFilesItem(startTxNum, endTxNum, tx.ap.aggregationStep)
- if outItem.decompressor, err = seg.NewDecompressor(datPath); err != nil {
- return nil, fmt.Errorf("merge %s decompressor [%d-%d]: %w", tx.ap.filenameBase, startTxNum, endTxNum, err)
- }
- ps.Delete(p)
-
- if err := tx.ap.buildAccessor(ctx, fromStep, toStep, outItem.decompressor, ps); err != nil {
- return nil, fmt.Errorf("merge %s buildIndex [%d-%d]: %w", tx.ap.filenameBase, startTxNum, endTxNum, err)
- }
- if outItem.index, err = recsplit.OpenIndex(tx.ap.accessorFilePath(fromStep, toStep)); err != nil {
- return nil, err
- }
-
- closeItem = false
- return outItem, nil
-}
func (ii *InvertedIndex) integrateMergedDirtyFiles(outs []*filesItem, in *filesItem) {
if in != nil {
ii.dirtyFiles.Set(in)
@@ -1077,26 +958,6 @@ func (ii *InvertedIndex) integrateMergedDirtyFiles(outs []*filesItem, in *filesI
}
deleteMergeFile(ii.dirtyFiles, outs, ii.filenameBase, ii.logger)
}
-func (ap *Appendable) integrateMergedDirtyFiles(outs []*filesItem, in *filesItem) {
- if in != nil {
- ap.dirtyFiles.Set(in)
-
- // `kill -9` may leave some garbage
- // but it still may be useful for merges, until we finish merge frozen file
- if in.frozen {
- ap.dirtyFiles.Walk(func(items []*filesItem) bool {
- for _, item := range items {
- if item.frozen || item.endTxNum > in.endTxNum {
- continue
- }
- outs = append(outs, item)
- }
- return true
- })
- }
- }
- deleteMergeFile(ap.dirtyFiles, outs, ap.filenameBase, ap.logger)
-}
func (h *History) integrateMergedDirtyFiles(indexOuts, historyOuts []*filesItem, indexIn, historyIn *filesItem) {
h.InvertedIndex.integrateMergedDirtyFiles(indexOuts, indexIn)
@@ -1157,17 +1018,6 @@ func (iit *InvertedIndexRoTx) cleanAfterMerge(merged *filesItem) {
deleteMergeFile(iit.ii.dirtyFiles, outs, iit.ii.filenameBase, iit.ii.logger)
}
-func (tx *AppendableRoTx) cleanAfterMerge(merged *filesItem) {
- if merged == nil {
- return
- }
- if merged.endTxNum == 0 {
- return
- }
- outs := garbage(tx.ap.dirtyFiles, tx.files, merged)
- deleteMergeFile(tx.ap.dirtyFiles, outs, tx.ap.filenameBase, tx.ap.logger)
-}
-
// garbage - returns list of garbage files after merge step is done. at startup pass here last frozen file
func (dt *DomainRoTx) garbage(merged *filesItem) (outs []*filesItem) {
if merged == nil {
@@ -1237,3 +1087,6 @@ func hasCoverVisibleFile(visibleFiles []visibleFile, item *filesItem) bool {
}
return false
}
+
+func (ac *AggregatorRoTx) DbgDomain(idx kv.Domain) *DomainRoTx { return ac.d[idx] }
+func (ac *AggregatorRoTx) DbgII(idx kv.InvertedIdxPos) *InvertedIndexRoTx { return ac.iis[idx] }
diff --git a/erigon-lib/state/metrics.go b/erigon-lib/state/metrics.go
index 91e1869a106..918ae91f630 100644
--- a/erigon-lib/state/metrics.go
+++ b/erigon-lib/state/metrics.go
@@ -95,5 +95,13 @@ var (
metrics.GetOrCreateSummary(`kv_get{level="L4",domain="commitment"}`),
metrics.GetOrCreateSummary(`kv_get{level="recent",domain="commitment"}`),
},
+ kv.ReceiptDomain: {
+ metrics.GetOrCreateSummary(`kv_get{level="L0",domain="receipt"}`),
+ metrics.GetOrCreateSummary(`kv_get{level="L1",domain="receipt"}`),
+ metrics.GetOrCreateSummary(`kv_get{level="L2",domain="receipt"}`),
+ metrics.GetOrCreateSummary(`kv_get{level="L3",domain="receipt"}`),
+ metrics.GetOrCreateSummary(`kv_get{level="L4",domain="receipt"}`),
+ metrics.GetOrCreateSummary(`kv_get{level="recent",domain="receipt"}`),
+ },
}
)
diff --git a/erigon-lib/state/state_util.go b/erigon-lib/state/state_util.go
index 475c4b17d57..67a6266bbdc 100644
--- a/erigon-lib/state/state_util.go
+++ b/erigon-lib/state/state_util.go
@@ -17,62 +17,11 @@
package state
import (
- "context"
"encoding/binary"
- "fmt"
"github.com/erigontech/erigon-lib/kv"
- "github.com/erigontech/erigon-lib/log/v3"
- "github.com/erigontech/erigon-lib/recsplit"
- "github.com/erigontech/erigon-lib/seg"
)
-func buildSimpleMapAccessor(ctx context.Context, d *seg.Decompressor, compression seg.FileCompression, cfg recsplit.RecSplitArgs, logger log.Logger, walker func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error) error {
- count := d.Count()
-
- defer d.EnableReadAhead().DisableReadAhead()
-
- var rs *recsplit.RecSplit
- var err error
- cfg.KeyCount = count
- if rs, err = recsplit.NewRecSplit(cfg, logger); err != nil {
- return fmt.Errorf("create recsplit: %w", err)
- }
- defer rs.Close()
- rs.LogLvl(log.LvlTrace)
-
- for {
- g := seg.NewReader(d.MakeGetter(), compression)
- var i, offset, nextPos uint64
- word := make([]byte, 0, 256)
- for g.HasNext() {
- word, nextPos = g.Next(word[:0])
- if err := walker(rs, i, offset, word); err != nil {
- return err
- }
- i++
- offset = nextPos
-
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
- }
- if err = rs.Build(ctx); err != nil {
- if rs.Collision() {
- logger.Info("Building recsplit. Collision happened. It's ok. Restarting...")
- rs.ResetNextSalt()
- } else {
- return fmt.Errorf("build idx: %w", err)
- }
- } else {
- break
- }
- }
- return nil
-}
-
// SaveExecV3PruneProgress saves latest pruned key in given table to the database.
// nil key also allowed and means that latest pruning run has been finished.
func SaveExecV3PruneProgress(db kv.Putter, prunedTblName string, prunedKey []byte) error {
diff --git a/eth/backend.go b/eth/backend.go
index b9280e2476c..bffc485f093 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -71,7 +71,6 @@ import (
prototypes "github.com/erigontech/erigon-lib/gointerfaces/typesproto"
"github.com/erigontech/erigon-lib/kv"
"github.com/erigontech/erigon-lib/kv/kvcache"
- "github.com/erigontech/erigon-lib/kv/rawdbv3"
"github.com/erigontech/erigon-lib/kv/remotedbserver"
"github.com/erigontech/erigon-lib/kv/temporal"
"github.com/erigontech/erigon-lib/log/v3"
@@ -1478,8 +1477,7 @@ func setUpBlockReader(ctx context.Context, db kv.RwDB, dirs datadir.Dirs, snConf
})
blockReader := freezeblocks.NewBlockReader(allSnapshots, allBorSnapshots)
- cr := rawdb.NewCanonicalReader(rawdbv3.TxNums.WithCustomReadTxNumFunc(freezeblocks.ReadTxNumFuncFromBlockReader(ctx, blockReader)))
- agg, err := libstate.NewAggregator(ctx, dirs, config3.HistoryV3AggregationStep, db, cr, logger)
+ agg, err := libstate.NewAggregator(ctx, dirs, config3.HistoryV3AggregationStep, db, logger)
if err != nil {
return nil, nil, nil, nil, nil, err
}
diff --git a/eth/integrity/e3_ef_files.go b/eth/integrity/e3_ef_files.go
index 07f8f068b1b..929413725c4 100644
--- a/eth/integrity/e3_ef_files.go
+++ b/eth/integrity/e3_ef_files.go
@@ -34,7 +34,7 @@ func E3EfFiles(ctx context.Context, chainDB kv.RwDB, agg *state.Aggregator, fail
return err
}
g := &errgroup.Group{}
- for _, idx := range []kv.InvertedIdx{kv.AccountsHistoryIdx, kv.StorageHistoryIdx, kv.CodeHistoryIdx, kv.CommitmentHistoryIdx, kv.LogTopicIdx, kv.LogAddrIdx, kv.TracesFromIdx, kv.TracesToIdx} {
+ for _, idx := range []kv.InvertedIdx{kv.AccountsHistoryIdx, kv.StorageHistoryIdx, kv.CodeHistoryIdx, kv.CommitmentHistoryIdx, kv.ReceiptHistoryIdx, kv.LogTopicIdx, kv.LogAddrIdx, kv.TracesFromIdx, kv.TracesToIdx} {
idx := idx
g.Go(func() error {
tx, err := db.BeginTemporalRo(ctx)
diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go
index a84add90dc8..611f5468ed4 100644
--- a/eth/protocols/eth/handler_test.go
+++ b/eth/protocols/eth/handler_test.go
@@ -23,9 +23,6 @@ import (
"math/big"
"testing"
- "github.com/erigontech/erigon/p2p/sentry/sentry_multi_client"
- "github.com/erigontech/erigon/turbo/jsonrpc/receipts"
-
"github.com/holiman/uint256"
"github.com/stretchr/testify/require"
@@ -39,8 +36,10 @@ import (
"github.com/erigontech/erigon/core/types"
"github.com/erigontech/erigon/crypto"
"github.com/erigontech/erigon/eth/protocols/eth"
+ "github.com/erigontech/erigon/p2p/sentry/sentry_multi_client"
"github.com/erigontech/erigon/params"
"github.com/erigontech/erigon/rlp"
+ "github.com/erigontech/erigon/turbo/jsonrpc/receipts"
"github.com/erigontech/erigon/turbo/stages/mock"
)
diff --git a/eth/stagedsync/exec3.go b/eth/stagedsync/exec3.go
index 110a42727a1..f569d016a02 100644
--- a/eth/stagedsync/exec3.go
+++ b/eth/stagedsync/exec3.go
@@ -29,14 +29,11 @@ import (
"sync/atomic"
"time"
- "github.com/erigontech/erigon/eth/ethconfig/estimate"
-
"github.com/c2h5oh/datasize"
+ "github.com/erigontech/erigon/core/rawdb/rawtemporaldb"
"github.com/erigontech/mdbx-go/mdbx"
"golang.org/x/sync/errgroup"
- "github.com/erigontech/erigon-lib/log/v3"
-
"github.com/erigontech/erigon-lib/chain"
"github.com/erigontech/erigon-lib/common"
"github.com/erigontech/erigon-lib/common/cmp"
@@ -49,6 +46,7 @@ import (
"github.com/erigontech/erigon-lib/kv"
kv2 "github.com/erigontech/erigon-lib/kv/mdbx"
"github.com/erigontech/erigon-lib/kv/rawdbv3"
+ "github.com/erigontech/erigon-lib/log/v3"
"github.com/erigontech/erigon-lib/metrics"
state2 "github.com/erigontech/erigon-lib/state"
"github.com/erigontech/erigon-lib/wrap"
@@ -61,6 +59,7 @@ import (
"github.com/erigontech/erigon/core/state"
"github.com/erigontech/erigon/core/types"
"github.com/erigontech/erigon/core/types/accounts"
+ "github.com/erigontech/erigon/eth/ethconfig/estimate"
"github.com/erigontech/erigon/eth/stagedsync/stages"
"github.com/erigontech/erigon/turbo/services"
"github.com/erigontech/erigon/turbo/shards"
@@ -522,7 +521,7 @@ func ExecV3(ctx context.Context,
if err := func() error {
//Drain results (and process) channel because read sets do not carry over
for !blockComplete.Load() {
- rws.DrainNonBlocking()
+ rws.DrainNonBlocking(ctx)
applyWorker.ResetTx(tx)
processedTxNum, conflicts, triggers, processedBlockNum, stoppedAtBlockEnd, err := processResultQueue(ctx, in, rws, outputTxNum.Load(), rs, agg, tx, nil, applyWorker, false, true, isMining)
@@ -550,7 +549,7 @@ func ExecV3(ctx context.Context,
}
// Drain results channel because read sets do not carry over
- rws.DropResults(func(txTask *state.TxTask) {
+ rws.DropResults(ctx, func(txTask *state.TxTask) {
rs.ReTry(txTask, in)
})
@@ -674,8 +673,6 @@ func ExecV3(ctx context.Context,
}
}
- //fmt.Printf("exec blocks: %d -> %d\n", blockNum, maxBlockNum)
-
var b *types.Block
// Only needed by bor chains
@@ -772,7 +769,7 @@ Loop:
}
rules := chainConfig.Rules(blockNum, b.Time())
- var receipts types.Receipts
+ blockReceipts := make(types.Receipts, len(txs))
// During the first block execution, we may have half-block data in the snapshots.
// Thus, we need to skip the first txs in the block, however, this causes the GasUsed to be incorrect.
// So we skip that check for the first block, if we find half-executed data.
@@ -802,10 +799,17 @@ Loop:
// use history reader instead of state reader to catch up to the tx where we left off
HistoryExecution: offsetFromBlockBeginning > 0 && txIndex < int(offsetFromBlockBeginning),
- BlockReceipts: receipts,
+ BlockReceipts: blockReceipts,
Config: chainConfig,
}
+ if txTask.HistoryExecution && usedGas == 0 {
+ usedGas, blobGasUsed, _, err = rawtemporaldb.ReceiptAsOf(applyTx.(kv.TemporalTx), txTask.TxNum)
+ if err != nil {
+ return err
+ }
+ }
+
if cfg.genesis != nil {
txTask.Config = cfg.genesis.Config
}
@@ -818,9 +822,6 @@ Loop:
doms.SetTxNum(txTask.TxNum)
doms.SetBlockNum(txTask.BlockNum)
- //if txTask.HistoryExecution { // nolint
- // fmt.Printf("[dbg] txNum: %d, hist=%t\n", txTask.TxNum, txTask.HistoryExecution)
- //}
if txIndex >= 0 && txIndex < len(txs) {
txTask.Tx = txs[txIndex]
txTask.TxAsMessage, err = txTask.Tx.AsMessage(signer, header.BaseFee, txTask.Rules)
@@ -875,20 +876,20 @@ Loop:
if txTask.Tx != nil {
blobGasUsed += txTask.Tx.GetBlobGas()
}
+
+ txTask.CreateReceipt(applyTx)
+
if txTask.Final {
if !isMining && !inMemExec && !execStage.CurrentSyncCycle.IsInitialCycle {
- cfg.notifications.RecentLogs.Add(receipts)
+ cfg.notifications.RecentLogs.Add(blockReceipts)
}
checkReceipts := !cfg.vmConfig.StatelessExec && chainConfig.IsByzantium(txTask.BlockNum) && !cfg.vmConfig.NoReceipts && !isMining
if txTask.BlockNum > 0 && !skipPostEvaluation { //Disable check for genesis. Maybe need somehow improve it in future - to satisfy TestExecutionSpec
- if err := core.BlockPostValidation(usedGas, blobGasUsed, checkReceipts, receipts, txTask.Header, isMining); err != nil {
+ if err := core.BlockPostValidation(usedGas, blobGasUsed, checkReceipts, txTask.BlockReceipts, txTask.Header, isMining); err != nil {
return fmt.Errorf("%w, txnIdx=%d, %v", consensus.ErrInvalidBlock, txTask.TxIndex, err) //same as in stage_exec.go
}
}
usedGas, blobGasUsed = 0, 0
- receipts = receipts[:0]
- } else if txTask.TxIndex >= 0 {
- receipts = append(receipts, txTask.CreateReceipt(usedGas))
}
return nil
}(); err != nil {
@@ -918,7 +919,18 @@ Loop:
break Loop
}
- if err = rs.ApplyState4(ctx, txTask); err != nil {
+ if !txTask.Final {
+ var receipt *types.Receipt
+ if txTask.TxIndex >= 0 && !txTask.Final {
+ receipt = txTask.BlockReceipts[txTask.TxIndex]
+ }
+ if err := rawtemporaldb.AppendReceipt(doms, receipt, blobGasUsed); err != nil {
+ return err
+ }
+ }
+
+ // MA applystate
+ if err := rs.ApplyState4(ctx, txTask); err != nil {
return err
}
diff --git a/eth/stagedsync/stage_custom_trace.go b/eth/stagedsync/stage_custom_trace.go
index d7be07cff56..dfd2b80a075 100644
--- a/eth/stagedsync/stage_custom_trace.go
+++ b/eth/stagedsync/stage_custom_trace.go
@@ -19,18 +19,26 @@ package stagedsync
import (
"context"
"fmt"
+ "runtime"
+ "time"
"github.com/erigontech/erigon-lib/chain"
+ libcommon "github.com/erigontech/erigon-lib/common"
"github.com/erigontech/erigon-lib/common/datadir"
+ "github.com/erigontech/erigon-lib/common/dbg"
"github.com/erigontech/erigon-lib/kv"
+ "github.com/erigontech/erigon-lib/kv/rawdbv3"
"github.com/erigontech/erigon-lib/log/v3"
- "github.com/erigontech/erigon-lib/wrap"
+ state2 "github.com/erigontech/erigon-lib/state"
"github.com/erigontech/erigon/cmd/state/exec3"
"github.com/erigontech/erigon/consensus"
+ "github.com/erigontech/erigon/core/rawdb/rawtemporaldb"
+ "github.com/erigontech/erigon/core/state"
"github.com/erigontech/erigon/core/types"
"github.com/erigontech/erigon/eth/ethconfig"
"github.com/erigontech/erigon/ethdb/prune"
"github.com/erigontech/erigon/turbo/services"
+ "github.com/erigontech/erigon/turbo/snapshotsync/freezeblocks"
)
type CustomTraceCfg struct {
@@ -59,131 +67,38 @@ func StageCustomTraceCfg(db kv.RwDB, prune prune.Mode, dirs datadir.Dirs, br ser
}
}
-func SpawnCustomTrace(s *StageState, txc wrap.TxContainer, cfg CustomTraceCfg, ctx context.Context, prematureEndBlock uint64, logger log.Logger) error {
- useExternalTx := txc.Ttx != nil
- var tx kv.TemporalRwTx
- if !useExternalTx {
- _tx, err := cfg.db.BeginRw(ctx)
+func SpawnCustomTrace(cfg CustomTraceCfg, ctx context.Context, logger log.Logger) error {
+ var startBlock, endBlock uint64
+ if err := cfg.db.View(ctx, func(tx kv.Tx) (err error) {
+ txNumsReader := rawdbv3.TxNums.WithCustomReadTxNumFunc(freezeblocks.ReadTxNumFuncFromBlockReader(ctx, cfg.execArgs.BlockReader))
+
+ ac := tx.(state2.HasAggTx).AggTx().(*state2.AggregatorRoTx)
+ txNum := ac.DbgDomain(kv.AccountsDomain).FirstStepNotInFiles() * cfg.db.(state2.HasAgg).Agg().(*state2.Aggregator).StepSize()
+ var ok bool
+ ok, endBlock, err = txNumsReader.FindBlockNum(tx, txNum)
if err != nil {
- return err
+ return fmt.Errorf("getting last executed block: %w", err)
+ }
+ if !ok {
+ panic(ok)
+ }
+
+ txNum = ac.DbgDomain(kv.ReceiptDomain).FirstStepNotInFiles() * cfg.db.(state2.HasAgg).Agg().(*state2.Aggregator).StepSize()
+ ok, startBlock, err = txNumsReader.FindBlockNum(tx, txNum)
+ if err != nil {
+ return fmt.Errorf("getting last executed block: %w", err)
}
- defer _tx.Rollback()
- tx = _tx.(kv.TemporalRwTx)
- } else {
- tx = txc.Ttx.(kv.TemporalRwTx)
+ if !ok {
+ panic(ok)
+ }
+ return nil
+ }); err != nil {
+ return err
}
+ defer cfg.execArgs.BlockReader.Snapshots().(*freezeblocks.RoSnapshots).EnableReadAhead().DisableReadAhead()
- //endBlock, err := s.ExecutionAt(tx)
- //if err != nil {
- // return fmt.Errorf("getting last executed block: %w", err)
- //}
- //if s.BlockNumber > endBlock { // Erigon will self-heal (download missed blocks) eventually
- // return nil
- //}
- //// if prematureEndBlock is nonzero and less than the latest executed block,
- //// then we only run the log index stage until prematureEndBlock
- //if prematureEndBlock != 0 && prematureEndBlock < endBlock {
- // endBlock = prematureEndBlock
- //}
- //// It is possible that prematureEndBlock < s.BlockNumber,
- //// in which case it is important that we skip this stage,
- //// or else we could overwrite stage_at with prematureEndBlock
- //if endBlock <= s.BlockNumber {
- // return nil
- //}
- //
- //startBlock := s.BlockNumber
- //if startBlock > 0 {
- // startBlock++
- //}
- //
- //logEvery := time.NewTicker(10 * time.Second)
- //defer logEvery.Stop()
- //var m runtime.MemStats
- //var prevBlockNumLog uint64 = startBlock
- //
- //doms, err := state2.NewSharedDomains(tx, logger)
- //if err != nil {
- // return err
- //}
- //defer doms.Close()
- //
- //cumulative := uint256.NewInt(0)
- //var lastBlockNum uint64
- //
- //canonicalReader := doms.CanonicalReader()
- //lastFrozenID, err := canonicalReader.LastFrozenTxNum(tx)
- //if err != nil {
- // return err
- //}
- //
- //var baseBlockTxnID, txnID kv.TxnId
- //fmt.Printf("dbg1: %s\n", tx.ViewID())
- ////TODO: new tracer may get tracer from pool, maybe add it to TxTask field
- ///// maybe need startTxNum/endTxNum
- //if err = exec3.CustomTraceMapReduce(startBlock, endBlock, exec3.TraceConsumer{
- // NewTracer: func() exec3.GenericTracer { return nil },
- // Reduce: func(txTask *state.TxTask, tx kv.Tx) error {
- // if txTask.Error != nil {
- // return err
- // }
- //
- // if lastBlockNum != txTask.BlockNum {
- // cumulative.Set(u256.N0)
- // lastBlockNum = txTask.BlockNum
- //
- // if txTask.TxNum < uint64(lastFrozenID) {
- // txnID = kv.TxnId(txTask.TxNum)
- // } else {
- // h, err := rawdb.ReadCanonicalHash(tx, txTask.BlockNum)
- // baseBlockTxnID, err = canonicalReader.BaseTxnID(tx, txTask.BlockNum, h)
- // if err != nil {
- // return err
- // }
- // txnID = baseBlockTxnID
- // }
- // } else {
- // txnID++
- // }
- // cumulative.AddUint64(cumulative, txTask.UsedGas)
- //
- // if txTask.Final || txTask.TxIndex < 0 {
- // return nil
- // }
- // r := txTask.CreateReceipt(cumulative.Uint64())
- // v, err := rlp.EncodeToBytes(r)
- // if err != nil {
- // return err
- // }
- // doms.SetTx(tx)
- // err = doms.AppendablePut(kv.ReceiptsAppendable, txnID, v)
- // if err != nil {
- // return err
- // }
- //
- // select {
- // case <-logEvery.C:
- // dbg.ReadMemStats(&m)
- // log.Info("Scanned", "block", txTask.BlockNum, "blk/sec", float64(txTask.BlockNum-prevBlockNumLog)/10, "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys))
- // prevBlockNumLog = txTask.BlockNum
- // default:
- // }
- //
- // return nil
- // },
- //}, ctx, tx, cfg.execArgs, logger); err != nil {
- // return err
- //}
- //if err := doms.Flush(ctx, tx); err != nil {
- // return err
- //}
- //
- //if err = s.Update(tx.(kv.RwTx), endBlock); err != nil {
- // return err
- //}
-
- if !useExternalTx {
- if err := tx.Commit(); err != nil {
+ for ; startBlock < endBlock; startBlock += 1_000_000 {
+ if err := customTraceBatchProduce(ctx, cfg.execArgs, cfg.db, startBlock, startBlock+1_000_000, "custom_trace", logger); err != nil {
return err
}
}
@@ -191,31 +106,122 @@ func SpawnCustomTrace(s *StageState, txc wrap.TxContainer, cfg CustomTraceCfg, c
return nil
}
-func UnwindCustomTrace(u *UnwindState, s *StageState, txc wrap.TxContainer, cfg CustomTraceCfg, ctx context.Context, logger log.Logger) (err error) {
- useExternalTx := txc.Ttx != nil
- var tx kv.TemporalTx
- if !useExternalTx {
- _tx, err := cfg.db.BeginRw(ctx)
+func customTraceBatchProduce(ctx context.Context, cfg *exec3.ExecArgs, db kv.RwDB, fromBlock, toBlock uint64, logPrefix string, logger log.Logger) error {
+ var lastTxNum uint64
+ if err := db.Update(ctx, func(tx kv.RwTx) error {
+ ttx := tx.(kv.TemporalRwTx)
+ doms, err := state2.NewSharedDomains(tx, logger)
if err != nil {
return err
}
- defer _tx.Rollback()
- tx = _tx.(kv.TemporalTx)
- } else {
- tx = txc.Ttx
- }
+ defer doms.Close()
- if err := u.Done(tx.(kv.RwTx)); err != nil {
- return fmt.Errorf("%w", err)
- }
- if !useExternalTx {
+ if err := customTraceBatch(ctx, cfg, ttx, doms, fromBlock, toBlock, logPrefix, logger); err != nil {
+ return err
+ }
+ doms.SetTx(tx)
+ if err := doms.Flush(ctx, tx); err != nil {
+ return err
+ }
+ lastTxNum = doms.TxNum()
if err := tx.Commit(); err != nil {
return err
}
+ return nil
+ }); err != nil {
+ return err
+ }
+ agg := db.(state2.HasAgg).Agg().(*state2.Aggregator)
+ var fromStep, toStep uint64
+ if lastTxNum/agg.StepSize() > 0 {
+ toStep = lastTxNum / agg.StepSize()
+ }
+ if err := db.View(ctx, func(tx kv.Tx) error {
+ ac := tx.(state2.HasAggTx).AggTx().(*state2.AggregatorRoTx)
+ fromStep = ac.DbgDomain(kv.ReceiptDomain).FirstStepNotInFiles()
+ return nil
+ }); err != nil {
+ return err
+ }
+ if err := agg.BuildFiles2(ctx, fromStep, toStep); err != nil {
+ return err
+ }
+
+ if err := db.Update(ctx, func(tx kv.RwTx) error {
+ ac := tx.(state2.HasAggTx).AggTx().(*state2.AggregatorRoTx)
+ if _, err := ac.PruneSmallBatches(ctx, 10*time.Hour, tx); err != nil { // prune part of retired data, before commit
+ return err
+ }
+ return nil
+ }); err != nil {
+ return err
}
return nil
}
-func PruneCustomTrace(s *PruneState, tx kv.RwTx, cfg CustomTraceCfg, ctx context.Context, logger log.Logger) (err error) {
+func customTraceBatch(ctx context.Context, cfg *exec3.ExecArgs, tx kv.TemporalRwTx, doms *state2.SharedDomains, fromBlock, toBlock uint64, logPrefix string, logger log.Logger) error {
+ const logPeriod = 5 * time.Second
+ logEvery := time.NewTicker(logPeriod)
+ defer logEvery.Stop()
+
+ var cumulativeBlobGasUsedInBlock uint64
+ //var cumulativeGasUsedTotal = uint256.NewInt(0)
+
+ //TODO: new tracer may get tracer from pool, maybe add it to TxTask field
+ /// maybe need startTxNum/endTxNum
+ var prevTxNumLog = fromBlock
+ var m runtime.MemStats
+ if err := exec3.CustomTraceMapReduce(fromBlock, toBlock, exec3.TraceConsumer{
+ NewTracer: func() exec3.GenericTracer { return nil },
+ Reduce: func(txTask *state.TxTask, tx kv.Tx) (err error) {
+ if txTask.Error != nil {
+ return err
+ }
+
+ if txTask.Tx != nil {
+ cumulativeBlobGasUsedInBlock += txTask.Tx.GetBlobGas()
+ }
+ //if txTask.Final {
+ // cumulativeGasUsedTotal.AddUint64(cumulativeGasUsedTotal, cumulativeGasUsedInBlock)
+ //}
+
+ if txTask.Final { // TODO: move asserts to 1 level higher
+ if txTask.Header.BlobGasUsed != nil && *txTask.Header.BlobGasUsed != cumulativeBlobGasUsedInBlock {
+ err := fmt.Errorf("assert: %d != %d", *txTask.Header.BlobGasUsed, cumulativeBlobGasUsedInBlock)
+ panic(err)
+ }
+ }
+
+ doms.SetTx(tx)
+ doms.SetTxNum(txTask.TxNum)
+ if !txTask.Final {
+ var receipt *types.Receipt
+ if txTask.TxIndex >= 0 && !txTask.Final {
+ receipt = txTask.BlockReceipts[txTask.TxIndex]
+ }
+ if err := rawtemporaldb.AppendReceipt(doms, receipt, cumulativeBlobGasUsedInBlock); err != nil {
+ return err
+ }
+ }
+
+ if txTask.Final { // block changed
+ cumulativeBlobGasUsedInBlock = 0
+ }
+
+ select {
+ case <-logEvery.C:
+ if prevTxNumLog > 0 {
+ dbg.ReadMemStats(&m)
+ log.Info(fmt.Sprintf("[%s] Scanned", logPrefix), "block", txTask.BlockNum, "txs/sec", (txTask.TxNum-prevTxNumLog)/uint64(logPeriod.Seconds()), "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys))
+ }
+ prevTxNumLog = txTask.TxNum
+ default:
+ }
+ return nil
+ },
+ }, ctx, tx, cfg, logger); err != nil {
+ return err
+ }
+
return nil
}
diff --git a/eth/tracers/js/tracer_test.go b/eth/tracers/js/tracer_test.go
index 88a2a5d6cbd..b83e034a580 100644
--- a/eth/tracers/js/tracer_test.go
+++ b/eth/tracers/js/tracer_test.go
@@ -69,7 +69,7 @@ func testCtx() *vmContext {
}
func runTrace(tracer tracers.Tracer, vmctx *vmContext, chaincfg *chain.Config, contractCode []byte) (json.RawMessage, error) {
- c := vm.NewJumpDestCache(false)
+ c := vm.NewJumpDestCache()
var (
env = vm.NewEVM(vmctx.blockCtx, vmctx.txCtx, &dummyStatedb{}, chaincfg, vm.Config{Debug: true, Tracer: tracer})
gasLimit uint64 = 31000
@@ -185,7 +185,7 @@ func TestHalt(t *testing.T) {
}
func TestHaltBetweenSteps(t *testing.T) {
- c := vm.NewJumpDestCache(false)
+ c := vm.NewJumpDestCache()
tracer, err := newJsTracer("{step: function() {}, fault: function() {}, result: function() { return null; }}", nil, nil)
if err != nil {
t.Fatal(err)
@@ -270,7 +270,7 @@ func TestIsPrecompile(t *testing.T) {
}
func TestEnterExit(t *testing.T) {
- c := vm.NewJumpDestCache(false)
+ c := vm.NewJumpDestCache()
// test that either both or none of enter() and exit() are defined
if _, err := newJsTracer("{step: function() {}, fault: function() {}, result: function() { return null; }, enter: function() {}}", new(tracers.Context), nil); err == nil {
t.Fatal("tracer creation should've failed without exit() definition")
diff --git a/eth/tracers/logger/logger_test.go b/eth/tracers/logger/logger_test.go
index c991b17c67d..903323e6c6a 100644
--- a/eth/tracers/logger/logger_test.go
+++ b/eth/tracers/logger/logger_test.go
@@ -58,7 +58,7 @@ type dummyStatedb struct {
func (*dummyStatedb) GetRefund() uint64 { return 1337 }
func TestStoreCapture(t *testing.T) {
- c := vm.NewJumpDestCache(false)
+ c := vm.NewJumpDestCache()
var (
env = vm.NewEVM(evmtypes.BlockContext{}, evmtypes.TxContext{}, &dummyStatedb{}, params.TestChainConfig, vm.Config{})
logger = NewStructLogger(nil)
diff --git a/p2p/sentry/sentry_multi_client/sentry_multi_client.go b/p2p/sentry/sentry_multi_client/sentry_multi_client.go
index 6bc5945e8f6..2ef80d2eda1 100644
--- a/p2p/sentry/sentry_multi_client/sentry_multi_client.go
+++ b/p2p/sentry/sentry_multi_client/sentry_multi_client.go
@@ -28,6 +28,7 @@ import (
"github.com/c2h5oh/datasize"
"golang.org/x/sync/semaphore"
+
"google.golang.org/grpc"
"google.golang.org/grpc/backoff"
"google.golang.org/grpc/credentials/insecure"
diff --git a/spectest/suite.go b/spectest/suite.go
index 0922e35dc1d..a234a94bcd0 100644
--- a/spectest/suite.go
+++ b/spectest/suite.go
@@ -1,11 +1,12 @@
package spectest
import (
- "github.com/erigontech/erigon/cl/transition/machine"
"io/fs"
"path/filepath"
"testing"
+ "github.com/erigontech/erigon/cl/transition/machine"
+
"gfx.cafe/util/go/generic"
"github.com/stretchr/testify/require"
)
diff --git a/turbo/app/snapshots_cmd.go b/turbo/app/snapshots_cmd.go
index 52086f86410..3c46de4bdf4 100644
--- a/turbo/app/snapshots_cmd.go
+++ b/turbo/app/snapshots_cmd.go
@@ -61,7 +61,6 @@ import (
"github.com/erigontech/erigon/cl/clparams"
"github.com/erigontech/erigon/cmd/hack/tool/fromdb"
"github.com/erigontech/erigon/cmd/utils"
- "github.com/erigontech/erigon/core/rawdb"
"github.com/erigontech/erigon/core/rawdb/blockio"
coresnaptype "github.com/erigontech/erigon/core/snaptype"
"github.com/erigontech/erigon/diagnostics"
@@ -481,6 +480,8 @@ func doDebugKey(cliCtx *cli.Context) error {
domain, idx = kv.CodeDomain, kv.CodeHistoryIdx
case "commitment":
domain, idx = kv.CommitmentDomain, kv.CommitmentHistoryIdx
+ case "receipt":
+ domain, idx = kv.ReceiptDomain, kv.ReceiptHistoryIdx
default:
panic(ds)
}
@@ -491,8 +492,11 @@ func doDebugKey(cliCtx *cli.Context) error {
chainDB := dbCfg(kv.ChainDB, dirs.Chaindata).MustOpen()
defer chainDB.Close()
- cr := rawdb.NewCanonicalReader(rawdbv3.TxNums)
- agg := openAgg(ctx, dirs, chainDB, cr, logger)
+ _, _, _, _, agg, clean, err := openSnaps(ctx, dirs, chainDB, logger)
+ if err != nil {
+ return err
+ }
+ defer clean()
view := agg.BeginFilesRo()
defer view.Close()
@@ -1080,8 +1084,7 @@ func openSnaps(ctx context.Context, dirs datadir.Dirs, chainDB kv.RwDB, logger l
blockSnapBuildSema := semaphore.NewWeighted(int64(dbg.BuildSnapshotAllowance))
br = freezeblocks.NewBlockRetire(estimate.CompressSnapshot.Workers(), dirs, blockReader, blockWriter, chainDB, chainConfig, nil, blockSnapBuildSema, logger)
- cr := rawdb.NewCanonicalReader(rawdbv3.TxNums.WithCustomReadTxNumFunc(freezeblocks.ReadTxNumFuncFromBlockReader(ctx, blockReader)))
- agg = openAgg(ctx, dirs, chainDB, cr, logger)
+ agg = openAgg(ctx, dirs, chainDB, logger)
agg.SetSnapshotBuildSema(blockSnapBuildSema)
clean = func() {
defer blockSnaps.Close()
@@ -1438,8 +1441,8 @@ func dbCfg(label kv.Label, path string) mdbx.MdbxOpts {
opts = opts.Accede()
return opts
}
-func openAgg(ctx context.Context, dirs datadir.Dirs, chainDB kv.RwDB, cr *rawdb.CanonicalReader, logger log.Logger) *libstate.Aggregator {
- agg, err := libstate.NewAggregator(ctx, dirs, config3.HistoryV3AggregationStep, chainDB, cr, logger)
+func openAgg(ctx context.Context, dirs datadir.Dirs, chainDB kv.RwDB, logger log.Logger) *libstate.Aggregator {
+ agg, err := libstate.NewAggregator(ctx, dirs, config3.HistoryV3AggregationStep, chainDB, logger)
if err != nil {
panic(err)
}
diff --git a/turbo/app/sqeeze_cmd.go b/turbo/app/sqeeze_cmd.go
index 8600f175ca7..e42e3c59a22 100644
--- a/turbo/app/sqeeze_cmd.go
+++ b/turbo/app/sqeeze_cmd.go
@@ -24,23 +24,21 @@ import (
"strings"
"time"
+ "github.com/urfave/cli/v2"
+
+ "github.com/erigontech/erigon-lib/common/datadir"
"github.com/erigontech/erigon-lib/common/dir"
"github.com/erigontech/erigon-lib/config3"
"github.com/erigontech/erigon-lib/downloader/snaptype"
"github.com/erigontech/erigon-lib/kv"
- "github.com/erigontech/erigon-lib/kv/rawdbv3"
"github.com/erigontech/erigon-lib/log/v3"
"github.com/erigontech/erigon-lib/state"
"github.com/erigontech/erigon/cmd/hack/tool/fromdb"
- "github.com/erigontech/erigon/core/rawdb"
+ "github.com/erigontech/erigon/cmd/utils"
snaptype2 "github.com/erigontech/erigon/core/snaptype"
"github.com/erigontech/erigon/eth/ethconfig/estimate"
- "github.com/erigontech/erigon/turbo/snapshotsync/freezeblocks"
- "github.com/urfave/cli/v2"
-
- "github.com/erigontech/erigon-lib/common/datadir"
- "github.com/erigontech/erigon/cmd/utils"
"github.com/erigontech/erigon/turbo/debug"
+ "github.com/erigontech/erigon/turbo/snapshotsync/freezeblocks"
)
type Sqeeze string
@@ -86,8 +84,11 @@ func doSqueeze(cliCtx *cli.Context) error {
func squeezeCommitment(ctx context.Context, dirs datadir.Dirs, logger log.Logger) error {
db := dbCfg(kv.ChainDB, dirs.Chaindata).MustOpen()
defer db.Close()
- cr := rawdb.NewCanonicalReader(rawdbv3.TxNums)
- agg := openAgg(ctx, dirs, db, cr, logger)
+ _, _, _, _, agg, clean, err := openSnaps(ctx, dirs, db, logger)
+ if err != nil {
+ return err
+ }
+ defer clean()
agg.SetCompressWorkers(estimate.CompressSnapshot.Workers())
if err := agg.OpenFolder(); err != nil {
return err
@@ -110,8 +111,11 @@ func squeezeCommitment(ctx context.Context, dirs datadir.Dirs, logger log.Logger
func squeezeStorage(ctx context.Context, dirs datadir.Dirs, logger log.Logger) error {
db := dbCfg(kv.ChainDB, dirs.Chaindata).MustOpen()
defer db.Close()
- cr := rawdb.NewCanonicalReader(rawdbv3.TxNums)
- agg := openAgg(ctx, dirs, db, cr, logger)
+ _, _, _, _, agg, clean, err := openSnaps(ctx, dirs, db, logger)
+ if err != nil {
+ return err
+ }
+ defer clean()
agg.SetCompressWorkers(estimate.CompressSnapshot.Workers())
dirsOld := dirs
dirsOld.SnapDomain += "_old"
@@ -129,7 +133,7 @@ func squeezeStorage(ctx context.Context, dirs datadir.Dirs, logger log.Logger) e
ac := agg.BeginFilesRo()
defer ac.Close()
- aggOld, err := state.NewAggregator(ctx, dirsOld, config3.HistoryV3AggregationStep, db, nil, logger)
+ aggOld, err := state.NewAggregator(ctx, dirsOld, config3.HistoryV3AggregationStep, db, logger)
if err != nil {
panic(err)
}
@@ -170,7 +174,7 @@ func squeezeStorage(ctx context.Context, dirs datadir.Dirs, logger log.Logger) e
func squeezeCode(ctx context.Context, dirs datadir.Dirs, logger log.Logger) error {
db := dbCfg(kv.ChainDB, dirs.Chaindata).MustOpen()
defer db.Close()
- agg, err := state.NewAggregator(ctx, dirs, config3.HistoryV3AggregationStep, db, nil, logger)
+ agg, err := state.NewAggregator(ctx, dirs, config3.HistoryV3AggregationStep, db, logger)
if err != nil {
return err
}
diff --git a/turbo/jsonrpc/erigon_receipts.go b/turbo/jsonrpc/erigon_receipts.go
index ce05bd7ddfc..c83abef1007 100644
--- a/turbo/jsonrpc/erigon_receipts.go
+++ b/turbo/jsonrpc/erigon_receipts.go
@@ -257,7 +257,7 @@ func (api *ErigonImpl) GetLatestLogs(ctx context.Context, crit filters.FilterCri
continue
}
- _, err = exec.ExecTxn(txNum, txIndex, txn)
+ _, err = exec.ExecTxn(txNum, txIndex, txn, true)
if err != nil {
return nil, err
}
diff --git a/turbo/jsonrpc/eth_api.go b/turbo/jsonrpc/eth_api.go
index bc951c16024..656e7dd262d 100644
--- a/turbo/jsonrpc/eth_api.go
+++ b/turbo/jsonrpc/eth_api.go
@@ -25,24 +25,19 @@ import (
"sync/atomic"
"time"
- "github.com/erigontech/erigon-lib/common/datadir"
- "github.com/erigontech/erigon/turbo/jsonrpc/receipts"
-
- "github.com/erigontech/erigon-lib/common/hexutil"
-
lru "github.com/hashicorp/golang-lru/v2"
"github.com/holiman/uint256"
- "github.com/erigontech/erigon-lib/log/v3"
-
"github.com/erigontech/erigon-lib/chain"
"github.com/erigontech/erigon-lib/common"
+ "github.com/erigontech/erigon-lib/common/datadir"
+ "github.com/erigontech/erigon-lib/common/hexutil"
"github.com/erigontech/erigon-lib/common/hexutility"
txpool "github.com/erigontech/erigon-lib/gointerfaces/txpoolproto"
"github.com/erigontech/erigon-lib/kv"
"github.com/erigontech/erigon-lib/kv/kvcache"
+ "github.com/erigontech/erigon-lib/log/v3"
types2 "github.com/erigontech/erigon-lib/types"
-
"github.com/erigontech/erigon/common/math"
"github.com/erigontech/erigon/consensus"
"github.com/erigontech/erigon/consensus/misc"
@@ -53,6 +48,7 @@ import (
"github.com/erigontech/erigon/ethdb/prune"
"github.com/erigontech/erigon/rpc"
ethapi2 "github.com/erigontech/erigon/turbo/adapter/ethapi"
+ "github.com/erigontech/erigon/turbo/jsonrpc/receipts"
"github.com/erigontech/erigon/turbo/rpchelper"
"github.com/erigontech/erigon/turbo/services"
)
diff --git a/turbo/jsonrpc/eth_receipts.go b/turbo/jsonrpc/eth_receipts.go
index 4ae3f15ec14..f4e499a4514 100644
--- a/turbo/jsonrpc/eth_receipts.go
+++ b/turbo/jsonrpc/eth_receipts.go
@@ -21,8 +21,8 @@ import (
"fmt"
"github.com/RoaringBitmap/roaring"
-
"github.com/erigontech/erigon-lib/log/v3"
+ "github.com/erigontech/erigon/core/rawdb/rawtemporaldb"
"github.com/erigontech/erigon-lib/common"
"github.com/erigontech/erigon-lib/kv"
@@ -205,25 +205,6 @@ func getAddrsBitmap(tx kv.Tx, addrs []common.Address, from, to uint64) (*roaring
return roaring.FastOr(rx...), nil
}
-func applyFilters(out *roaring.Bitmap, tx kv.Tx, begin, end uint64, crit filters.FilterCriteria) error {
- out.AddRange(begin, end+1) // [from,to)
- topicsBitmap, err := getTopicsBitmap(tx, crit.Topics, begin, end)
- if err != nil {
- return err
- }
- if topicsBitmap != nil {
- out.And(topicsBitmap)
- }
- addrBitmap, err := getAddrsBitmap(tx, crit.Addresses, begin, end)
- if err != nil {
- return err
- }
- if addrBitmap != nil {
- out.And(addrBitmap)
- }
- return nil
-}
-
func applyFiltersV3(txNumsReader rawdbv3.TxNumsReader, tx kv.TemporalTx, begin, end uint64, crit filters.FilterCriteria) (out stream.U64, err error) {
//[from,to)
var fromTxNum, toTxNum uint64
@@ -264,7 +245,7 @@ func applyFiltersV3(txNumsReader rawdbv3.TxNumsReader, tx kv.TemporalTx, begin,
}
func (api *BaseAPI) getLogsV3(ctx context.Context, tx kv.TemporalTx, begin, end uint64, crit filters.FilterCriteria) ([]*types.ErigonLog, error) {
- logs := []*types.ErigonLog{}
+ logs := []*types.ErigonLog{} //nolint
addrMap := make(map[common.Address]struct{}, len(crit.Addresses))
for _, v := range crit.Addresses {
@@ -286,11 +267,11 @@ func (api *BaseAPI) getLogsV3(ctx context.Context, tx kv.TemporalTx, begin, end
if err != nil {
return logs, err
}
+
it := rawdbv3.TxNums2BlockNums(tx,
txNumsReader,
txNumbers, order.Asc)
defer it.Close()
- var timestamp uint64
for it.HasNext() {
if err = ctx.Err(); err != nil {
return nil, err
@@ -314,8 +295,11 @@ func (api *BaseAPI) getLogsV3(ctx context.Context, tx kv.TemporalTx, begin, end
continue
}
blockHash = header.Hash()
+
+ if err != nil {
+ return nil, err
+ }
exec.ChangeBlock(header)
- timestamp = header.Time
}
//fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d, maxTxNumInBlock=%d,mixTxNumInBlock=%d\n", txNum, blockNum, txIndex, maxTxNumInBlock, minTxNumInBlock)
@@ -327,24 +311,29 @@ func (api *BaseAPI) getLogsV3(ctx context.Context, tx kv.TemporalTx, begin, end
continue
}
- _, err = exec.ExecTxn(txNum, txIndex, txn)
+ _, err = exec.ExecTxn(txNum, txIndex, txn, false)
if err != nil {
return nil, err
}
rawLogs := exec.GetRawLogs(txIndex)
- //TODO: logIndex within the block! no way to calc it now
- //logIndex := uint(0)
- //for _, log := range rawLogs {
- // log.Index = logIndex
- // logIndex++
- //}
- filtered := rawLogs.Filter(addrMap, crit.Topics, 0)
- for _, log := range filtered {
- log.BlockNumber = blockNum
- log.BlockHash = blockHash
- log.TxHash = txn.Hash()
- }
- //TODO: maybe Logs by default and enreach them with
+
+ // `ReadReceipt` does fill `rawLogs` calulated fields. but we don't need it anymore.
+ r, err := rawtemporaldb.ReceiptAsOfWithApply(tx, txNum, rawLogs, txIndex, blockHash, blockNum, txn)
+ if err != nil {
+ return nil, err
+ }
+ var filtered types.Logs
+ if r == nil { // if receipt data is not released yet. fallback to manual field filling. can remove in future.
+ filtered = rawLogs.Filter(addrMap, crit.Topics, 0)
+ for _, log := range filtered {
+ log.BlockNumber = blockNum
+ log.BlockHash = blockHash
+ log.TxHash = txn.Hash()
+ }
+ } else {
+ filtered = r.Logs
+ }
+
for _, filteredLog := range filtered {
logs = append(logs, &types.ErigonLog{
Address: filteredLog.Address,
@@ -356,13 +345,11 @@ func (api *BaseAPI) getLogsV3(ctx context.Context, tx kv.TemporalTx, begin, end
BlockHash: filteredLog.BlockHash,
Index: filteredLog.Index,
Removed: filteredLog.Removed,
- Timestamp: timestamp,
+ Timestamp: header.Time,
})
}
}
- //stats := api._agg.GetAndResetStats()
- //log.Info("Finished", "duration", time.Since(start), "history queries", stats.FilesQueries, "ef search duration", stats.EfSearchTime)
return logs, nil
}
@@ -587,7 +574,7 @@ func (i *MapTxNum2BlockNumIter) Next() (txNum, blockNum uint64, txIndex int, isF
if !ok {
_lb, _lt, _ := i.txNumsReader.Last(i.tx)
_fb, _ft, _ := i.txNumsReader.First(i.tx)
- return txNum, i.blockNum, txIndex, isFinalTxn, blockNumChanged, fmt.Errorf("can't find blockNumber by txnID=%d; last in db: (%d-%d, %d-%d)", txNum, _fb, _lb, _ft, _lt)
+ return txNum, i.blockNum, txIndex, isFinalTxn, blockNumChanged, fmt.Errorf("can't find blockNumber by txNum=%d; last in db: (%d-%d, %d-%d)", txNum, _fb, _lb, _ft, _lt)
}
}
blockNum = i.blockNum
diff --git a/turbo/jsonrpc/otterscan_generic_tracer.go b/turbo/jsonrpc/otterscan_generic_tracer.go
index 49041e5599a..485da9bd28b 100644
--- a/turbo/jsonrpc/otterscan_generic_tracer.go
+++ b/turbo/jsonrpc/otterscan_generic_tracer.go
@@ -58,7 +58,7 @@ func (api *OtterscanAPIImpl) genericTracer(dbtx kv.Tx, ctx context.Context, bloc
log.Warn("[rpc genericTracer] txn is nil", "blockNum", blockNum, "txIndex", txIndex)
return nil
}
- _, err = executor.ExecTxn(txnID, txIndex, txn)
+ _, err = executor.ExecTxn(txnID, txIndex, txn, false)
if err != nil {
return err
}
diff --git a/turbo/jsonrpc/otterscan_search_v3.go b/turbo/jsonrpc/otterscan_search_v3.go
index aa3ebb3d527..72a39a388fb 100644
--- a/turbo/jsonrpc/otterscan_search_v3.go
+++ b/turbo/jsonrpc/otterscan_search_v3.go
@@ -100,7 +100,7 @@ func (api *OtterscanAPIImpl) buildSearchResults(ctx context.Context, tx kv.Tempo
log.Warn("[rpc] txn not found", "blockNum", blockNum, "txIndex", txIndex)
continue
}
- res, err := exec.ExecTxn(txNum, txIndex, txn)
+ res, err := exec.ExecTxn(txNum, txIndex, txn, true)
if err != nil {
return nil, nil, false, err
}
diff --git a/turbo/jsonrpc/receipts/receipts_generator.go b/turbo/jsonrpc/receipts/receipts_generator.go
index 6cafd842cdc..305b3126d12 100644
--- a/turbo/jsonrpc/receipts/receipts_generator.go
+++ b/turbo/jsonrpc/receipts/receipts_generator.go
@@ -45,10 +45,8 @@ func (g *Generator) GetReceipts(ctx context.Context, cfg *chain.Config, tx kv.Tx
return receipts, nil
}
- engine := g.engine
-
txNumsReader := rawdbv3.TxNums.WithCustomReadTxNumFunc(freezeblocks.ReadTxNumFuncFromBlockReader(ctx, g.blockReader))
- _, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, cfg, g.blockReader, txNumsReader, tx, 0)
+ _, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, g.engine, block, cfg, g.blockReader, txNumsReader, tx, 0)
if err != nil {
return nil, err
}
@@ -71,7 +69,7 @@ func (g *Generator) GetReceipts(ctx context.Context, cfg *chain.Config, tx kv.Tx
header := block.HeaderNoCopy()
for i, txn := range block.Transactions() {
ibs.SetTxContext(i)
- receipt, _, err := core.ApplyTransaction(cfg, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, noopWriter, header, txn, usedGas, usedBlobGas, vm.Config{})
+ receipt, _, err := core.ApplyTransaction(cfg, core.GetHashFn(header, getHeader), g.engine, nil, gp, ibs, noopWriter, header, txn, usedGas, usedBlobGas, vm.Config{})
if err != nil {
return nil, err
}
diff --git a/turbo/jsonrpc/trace_adhoc.go b/turbo/jsonrpc/trace_adhoc.go
index cc16df78e7e..65224f87829 100644
--- a/turbo/jsonrpc/trace_adhoc.go
+++ b/turbo/jsonrpc/trace_adhoc.go
@@ -1306,12 +1306,7 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type
header.Time,
)
} else {
- if args.txHash != nil {
- ibs.SetTxContext(txIndex)
- } else {
- ibs.SetTxContext(txIndex)
- }
-
+ ibs.SetTxContext(txIndex)
txCtx := core.NewEVMTxContext(msg)
evm := vm.NewEVM(blockCtx, txCtx, ibs, chainConfig, vmConfig)
gp := new(core.GasPool).AddGas(msg.Gas()).AddBlobGas(msg.BlobGas())