From cc06bcebf3ea8fa46ce463e8c832ecc6a4ecb4b4 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 6 Dec 2022 11:27:22 +0000 Subject: [PATCH] fix: state listener observe writes at wrong time (backport #13516) (#14138) * fix: state listener observe writes at wrong time (#13516) * fix: state listener observe writes at wrong time Closes: #13457 Currently state listener is notified when the cache store write, which happens in commit event only, which breaks the current design. The solution (as discussed in the issue) is to listen state writes on rootmulti store only. It also changes the file streamer to output single data file for the writes in the whole block, since we can't distinguish writes from different stage of abci events. It adds new config items for file streamer: - streamers.file.output-metadata - streamers.file.stop-node-on-error - streamers.file.fsync * synchronous abci call, and format doc * fix comment * update file streamer readme and fix typos * typo * fix: state listener observe writes at wrong time Closes: #13457 Currently state listener is notified when the cache store write, which happens in commit event only, which breaks the current design. The solution (as discussed in the issue) is to listen state writes on rootmulti store only. It also changes the file streamer to output single data file for the writes in the whole block, since we can't distinguish writes from different stage of abci events. It adds new config items for file streamer: - streamers.file.output-metadata - streamers.file.stop-node-on-error - streamers.file.fsync synchronous abci call, and format doc fix comment update file streamer readme and fix typos typo * improve UX of file streamer, make it immediately usable after enabled - set default value to write_dir. - make write_dir based on home directory by default. - auto-create the directory if not exists. * get homePage from opts Co-authored-by: Marko (cherry picked from commit 1f91ee2ee941fd9a1dd4bc3acecd753e3cb7e237) # Conflicts: # CHANGELOG.md # api/cosmos/base/store/v1beta1/listening.pulsar.go # baseapp/streaming.go # docs/architecture/adr-038-state-listening.md # server/config/toml.go # simapp/app_v2.go # store/cachemulti/store.go # store/iavl/store.go # store/mem/store.go # store/streaming/constructor.go # store/streaming/file/service.go # store/streaming/file/service_test.go # store/types/listening.pb.go # store/types/store.go * `make proto-gen`, update changelog, delete uncessary files * fix conflicts * fix conflicts * revert api breaking change * fix build * fix unit test Co-authored-by: yihuang Co-authored-by: Julien Robert Co-authored-by: Marko --- CHANGELOG.md | 8 + baseapp/abci.go | 26 +- baseapp/streaming.go | 14 +- docs/architecture/adr-038-state-listening.md | 438 +++++----- .../cosmos/base/store/v1beta1/listening.proto | 18 + server/config/config.go | 17 +- server/config/toml.go | 7 + store/cachekv/store.go | 6 - store/cachemulti/store.go | 40 +- store/dbadapter/store.go | 6 - store/dbadapter/store_test.go | 3 - store/gaskv/store.go | 5 - store/gaskv/store_test.go | 1 - store/iavl/store.go | 6 - store/iavl/store_test.go | 3 - store/listenkv/store.go | 6 - store/listenkv/store_test.go | 5 - store/mem/mem_test.go | 3 - store/mem/store.go | 6 - store/prefix/store.go | 6 - store/prefix/store_test.go | 3 - store/rootmulti/store.go | 40 +- store/rootmulti/store_test.go | 3 - store/streaming/constructor.go | 68 +- store/streaming/constructor_test.go | 10 +- store/streaming/file/README.md | 85 +- store/streaming/file/service.go | 313 +++---- store/streaming/file/service_test.go | 200 ++--- store/tracekv/store.go | 5 - store/tracekv/store_test.go | 5 - store/types/listening.go | 34 + store/types/listening.pb.go | 770 +++++++++++++++++- store/types/store.go | 20 +- 33 files changed, 1446 insertions(+), 734 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 678d84e3236c..7424698b161c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,12 +48,20 @@ Ref: https://keepachangelog.com/en/1.0.0/ ### API Breaking Changes +* (store) [#13516](https://github.com/cosmos/cosmos-sdk/pull/13516) Update State Streaming APIs: + * Add method `ListenCommit` to `ABCIListener` + * Move `ListeningEnabled` and `AddListener` methods to `CommitMultiStore` + * Remove `CacheWrapWithListeners` from `CacheWrap` and `CacheWrapper` interfaces + * Remove listening APIs from the caching layer (it should only listen to the `rootmulti.Store`) + * Add three new options to file streaming service constructor. + * Modify `ABCIListener` such that any error from any method will always halt the app via `panic` * (store) [#13529](https://github.com/cosmos/cosmos-sdk/pull/13529) Add method `LatestVersion` to `MultiStore` interface, add method `SetQueryMultiStore` to baesapp to support alternative `MultiStore` implementation for query service. ### Bug Fixes * (baseapp) [#13983](https://github.com/cosmos/cosmos-sdk/pull/13983) Don't emit duplicate ante-handler events when a post-handler is defined. * (baseapp) [#14049](https://github.com/cosmos/cosmos-sdk/pull/14049) Fix state sync when interval is zero. +* (store) [#13516](https://github.com/cosmos/cosmos-sdk/pull/13516) Fix state listener that was observing writes at wrong time. ## [v0.46.6](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.46.6) - 2022-11-18 diff --git a/baseapp/abci.go b/baseapp/abci.go index ec5a2d8812fa..d93779d02943 100644 --- a/baseapp/abci.go +++ b/baseapp/abci.go @@ -189,7 +189,7 @@ func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeg // call the hooks with the BeginBlock messages for _, streamingListener := range app.abciListeners { if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { - app.logger.Error("BeginBlock listening hook failed", "height", req.Header.Height, "err", err) + panic(fmt.Errorf("BeginBlock listening hook failed, height: %d, err: %w", req.Header.Height, err)) } } @@ -214,7 +214,7 @@ func (app *BaseApp) EndBlock(req abci.RequestEndBlock) (res abci.ResponseEndBloc // call the streaming service hooks with the EndBlock messages for _, streamingListener := range app.abciListeners { if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { - app.logger.Error("EndBlock listening hook failed", "height", req.Height, "err", err) + panic(fmt.Errorf("EndBlock listening hook failed, height: %d, err: %w", req.Height, err)) } } @@ -268,7 +268,7 @@ func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) (res abci.ResponseDeliv defer func() { for _, streamingListener := range app.abciListeners { if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, res); err != nil { - app.logger.Error("DeliverTx listening hook failed", "err", err) + panic(fmt.Errorf("DeliverTx listening hook failed: %w", err)) } } }() @@ -302,7 +302,7 @@ func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) (res abci.ResponseDeliv // defined in config, Commit will execute a deferred function call to check // against that height and gracefully halt if it matches the latest committed // height. -func (app *BaseApp) Commit() (res abci.ResponseCommit) { +func (app *BaseApp) Commit() abci.ResponseCommit { header := app.deliverState.ctx.BlockHeader() retainHeight := app.GetBlockRetentionHeight(header.Height) @@ -311,6 +311,19 @@ func (app *BaseApp) Commit() (res abci.ResponseCommit) { // MultiStore (app.cms) so when Commit() is called is persists those values. app.deliverState.ms.Write() commitID := app.cms.Commit() + + res := abci.ResponseCommit{ + Data: commitID.Hash, + RetainHeight: retainHeight, + } + + // call the hooks with the Commit message + for _, streamingListener := range app.abciListeners { + if err := streamingListener.ListenCommit(app.deliverState.ctx, res); err != nil { + panic(fmt.Errorf("Commit listening hook failed, height: %d, err: %w", header.Height, err)) + } + } + app.logger.Info("commit synced", "commit", fmt.Sprintf("%X", commitID)) // Reset the Check state to the latest committed. @@ -342,10 +355,7 @@ func (app *BaseApp) Commit() (res abci.ResponseCommit) { go app.snapshotManager.SnapshotIfApplicable(header.Height) - return abci.ResponseCommit{ - Data: commitID.Hash, - RetainHeight: retainHeight, - } + return res } // halt attempts to gracefully shutdown the node via SIGINT and SIGTERM falling diff --git a/baseapp/streaming.go b/baseapp/streaming.go index 39e0f1ca6e9b..b8b382ae05b3 100644 --- a/baseapp/streaming.go +++ b/baseapp/streaming.go @@ -1,23 +1,27 @@ package baseapp import ( + "context" "io" "sync" abci "github.com/tendermint/tendermint/abci/types" store "github.com/cosmos/cosmos-sdk/store/types" - "github.com/cosmos/cosmos-sdk/types" ) -// ABCIListener interface used to hook into the ABCI message processing of the BaseApp +// ABCIListener interface used to hook into the ABCI message processing of the BaseApp. +// the error results are propagated to consensus state machine, +// if you don't want to affect consensus, handle the errors internally and always return `nil` in these APIs. type ABCIListener interface { // ListenBeginBlock updates the streaming service with the latest BeginBlock messages - ListenBeginBlock(ctx types.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error + ListenBeginBlock(ctx context.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error // ListenEndBlock updates the steaming service with the latest EndBlock messages - ListenEndBlock(ctx types.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error + ListenEndBlock(ctx context.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error // ListenDeliverTx updates the steaming service with the latest DeliverTx messages - ListenDeliverTx(ctx types.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error + ListenDeliverTx(ctx context.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error + // ListenCommit updates the steaming service with the latest Commit event + ListenCommit(ctx context.Context, res abci.ResponseCommit) error } // StreamingService interface for registering WriteListeners with the BaseApp and updating the service with the ABCI messages using the hooks diff --git a/docs/architecture/adr-038-state-listening.md b/docs/architecture/adr-038-state-listening.md index b9c209b4aa32..74f92d2f67b5 100644 --- a/docs/architecture/adr-038-state-listening.md +++ b/docs/architecture/adr-038-state-listening.md @@ -3,6 +3,11 @@ ## Changelog * 11/23/2020: Initial draft +* 10/14/2022: + * Add `ListenCommit`, flatten the state writes in a block to a single batch. + * Remove listeners from cache stores, should only listen to `rootmulti.Store`. + * Remove `HaltAppOnDeliveryError()`, the errors are propogated by default, the implementations should return nil if don't want to propogate errors. + ## Status @@ -20,7 +25,7 @@ In addition to these request/response queries, it would be beneficial to have a ## Decision -We will modify the `MultiStore` interface and its concrete (`rootmulti` and `cachemulti`) implementations and introduce a new `listenkv.Store` to allow listening to state changes in underlying KVStores. +We will modify the `CommitMultiStore` interface and its concrete (`rootmulti`) implementations and introduce a new `listenkv.Store` to allow listening to state changes in underlying KVStores. We don't need to listen to cache stores, because we can't be sure that the writes will be committed eventually, and the writes are duplicated in `rootmulti.Store` eventually, so we should only listen to `rootmulti.Store`. We will introduce a plugin system for configuring and running streaming services that write these state changes and their surrounding ABCI message context to different destinations. ### Listening interface @@ -39,8 +44,8 @@ type WriteListener interface { ### Listener type -We will create a concrete implementation of the `WriteListener` interface in `store/types/listening.go`, that writes out protobuf -encoded KV pairs to an underlying `io.Writer`. +We will create two concrete implementations of the `WriteListener` interface in `store/types/listening.go`, that writes out protobuf +encoded KV pairs to an underlying `io.Writer`, and simply accumulate them in memory. This will include defining a simple protobuf type for the KV pairs. In addition to the key and value fields this message will include the StoreKey for the originating KVStore so that we can write out from separate KVStores to the same stream/file @@ -73,19 +78,55 @@ func NewStoreKVPairWriteListener(w io.Writer, m codec.BinaryCodec) *StoreKVPairW // OnWrite satisfies the WriteListener interface by writing length-prefixed protobuf encoded StoreKVPairs func (wl *StoreKVPairWriteListener) OnWrite(storeKey types.StoreKey, key []byte, value []byte, delete bool) error error { - kvPair := new(types.StoreKVPair) - kvPair.StoreKey = storeKey.Name() - kvPair.Delete = Delete - kvPair.Key = key - kvPair.Value = value - by, err := wl.marshaller.MarshalBinaryLengthPrefixed(kvPair) - if err != nil { - return err - } - if _, err := wl.writer.Write(by); err != nil { - return err - } - return nil + kvPair := new(types.StoreKVPair) + kvPair.StoreKey = storeKey.Name() + kvPair.Delete = Delete + kvPair.Key = key + kvPair.Value = value + by, err := wl.marshaller.MarshalBinaryLengthPrefixed(kvPair) + if err != nil { + return err + } + if _, err := wl.writer.Write(by); err != nil { + return err + } + return nil +} +``` + +```golang +// MemoryListener listens to the state writes and accumulate the records in memory. +type MemoryListener struct { + key StoreKey + stateCache []StoreKVPair +} + +// NewMemoryListener creates a listener that accumulate the state writes in memory. +func NewMemoryListener(key StoreKey) *MemoryListener { + return &MemoryListener{key: key} +} + +// OnWrite implements WriteListener interface +func (fl *MemoryListener) OnWrite(storeKey StoreKey, key []byte, value []byte, delete bool) error { + fl.stateCache = append(fl.stateCache, StoreKVPair{ + StoreKey: storeKey.Name(), + Delete: delete, + Key: key, + Value: value, + }) + return nil +} + +// PopStateCache returns the current state caches and set to nil +func (fl *MemoryListener) PopStateCache() []StoreKVPair { + res := fl.stateCache + fl.stateCache = nil + return res +} + +// StoreKey returns the storeKey it listens to +func (fl *MemoryListener) StoreKey() StoreKey { + return fl.key } ``` @@ -99,106 +140,94 @@ We can configure the `Store` with a set of `WriteListener`s which stream the out // Operations are traced on each core KVStore call and written to any of the // underlying listeners with the proper key and operation permissions type Store struct { - parent types.KVStore - listeners []types.WriteListener - parentStoreKey types.StoreKey + parent types.KVStore + listeners []types.WriteListener + parentStoreKey types.StoreKey } // NewStore returns a reference to a new traceKVStore given a parent // KVStore implementation and a buffered writer. func NewStore(parent types.KVStore, psk types.StoreKey, listeners []types.WriteListener) *Store { - return &Store{parent: parent, listeners: listeners, parentStoreKey: psk} + return &Store{parent: parent, listeners: listeners, parentStoreKey: psk} } // Set implements the KVStore interface. It traces a write operation and // delegates the Set call to the parent KVStore. func (s *Store) Set(key []byte, value []byte) { - types.AssertValidKey(key) - s.parent.Set(key, value) - s.onWrite(false, key, value) + types.AssertValidKey(key) + s.parent.Set(key, value) + s.onWrite(false, key, value) } // Delete implements the KVStore interface. It traces a write operation and // delegates the Delete call to the parent KVStore. func (s *Store) Delete(key []byte) { - s.parent.Delete(key) - s.onWrite(true, key, nil) + s.parent.Delete(key) + s.onWrite(true, key, nil) } // onWrite writes a KVStore operation to all the WriteListeners func (s *Store) onWrite(delete bool, key, value []byte) { - for _, l := range s.listeners { - if err := l.OnWrite(s.parentStoreKey, key, value, delete); err != nil { - // log error - } - } + for _, l := range s.listeners { + if err := l.OnWrite(s.parentStoreKey, key, value, delete); err != nil { + // log error + } + } } ``` ### MultiStore interface updates -We will update the `MultiStore` interface to allow us to wrap a set of listeners around a specific `KVStore`. -Additionally, we will update the `CacheWrap` and `CacheWrapper` interfaces to enable listening in the caching layer. +We will update the `CommitMultiStore` interface to allow us to wrap a set of listeners around a specific `KVStore`. ```go -type MultiStore interface { - ... +type CommitMultiStore interface { + ... - // ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey - ListeningEnabled(key StoreKey) bool + // ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey + ListeningEnabled(key StoreKey) bool - // AddListeners adds WriteListeners for the KVStore belonging to the provided StoreKey - // It appends the listeners to a current set, if one already exists - AddListeners(key StoreKey, listeners []WriteListener) -} -``` - -```go -type CacheWrap interface { - ... - - // CacheWrapWithListeners recursively wraps again with listening enabled - CacheWrapWithListeners(storeKey types.StoreKey, listeners []WriteListener) CacheWrap -} - -type CacheWrapper interface { - ... - - // CacheWrapWithListeners recursively wraps again with listening enabled - CacheWrapWithListeners(storeKey types.StoreKey, listeners []WriteListener) CacheWrap + // AddListeners adds WriteListeners for the KVStore belonging to the provided StoreKey + // It appends the listeners to a current set, if one already exists + AddListeners(key StoreKey, listeners []WriteListener) } ``` ### MultiStore implementation updates -We will modify all of the `Store` and `MultiStore` implementations to satisfy these new interfaces, and adjust the `rootmulti` `GetKVStore` method +We will modify all of the `CommitMultiStore` implementations to satisfy these new interfaces, and adjust the `rootmulti` `GetKVStore` method to wrap the returned `KVStore` with a `listenkv.Store` if listening is turned on for that `Store`. ```go func (rs *Store) GetKVStore(key types.StoreKey) types.KVStore { - store := rs.stores[key].(types.KVStore) + store := rs.stores[key].(types.KVStore) - if rs.TracingEnabled() { - store = tracekv.NewStore(store, rs.traceWriter, rs.traceContext) - } - if rs.ListeningEnabled(key) { - store = listenkv.NewStore(key, store, rs.listeners[key]) - } + if rs.TracingEnabled() { + store = tracekv.NewStore(store, rs.traceWriter, rs.traceContext) + } + if rs.ListeningEnabled(key) { + store = listenkv.NewStore(key, store, rs.listeners[key]) + } - return store + return store } ``` -We will also adjust the `cachemulti` constructor methods and the `rootmulti` `CacheMultiStore` method to forward the listeners -to and enable listening in the cache layer. +We will also adjust the `rootmulti` `CacheMultiStore` method to wrap the stores with `listenkv.Store` to enable listening when the cache layer writes. ```go func (rs *Store) CacheMultiStore() types.CacheMultiStore { stores := make(map[types.StoreKey]types.CacheWrapper) for k, v := range rs.stores { - stores[k] = v + store := v.(types.KVStore) + // Wire the listenkv.Store to allow listeners to observe the writes from the cache store, + // set same listeners on cache store will observe duplicated writes. + if rs.ListeningEnabled(k) { + store = listenkv.NewStore(store, k, rs.listeners[k]) + } + stores[k] = store } - return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.traceContext, rs.listeners) + return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.getTracingContext()) } ``` @@ -208,35 +237,34 @@ func (rs *Store) CacheMultiStore() types.CacheMultiStore { We will introduce a new `StreamingService` interface for exposing `WriteListener` data streams to external consumers. In addition to streaming state changes as `StoreKVPair`s, the interface satisfies an `ABCIListener` interface that plugs -into the BaseApp and relays ABCI requests and responses so that the service can group the state changes with the ABCI -requests that affected them and the ABCI responses they affected. The `ABCIListener` interface also exposes a -`ListenSuccess` method which is (optionally) used by the `BaseApp` to await positive acknowledgement of message -receipt from the `StreamingService`. +into the BaseApp and relays ABCI requests and responses so that the service can observe those block metadatas as well. + +The `WriteListener`s of `StreamingService` listens to the `rootmulti.Store`, which is only written into at commit event by the cache store of `deliverState`. ```go // ABCIListener interface used to hook into the ABCI message processing of the BaseApp type ABCIListener interface { - // ListenBeginBlock updates the streaming service with the latest BeginBlock messages - ListenBeginBlock(ctx types.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error - // ListenEndBlock updates the steaming service with the latest EndBlock messages - ListenEndBlock(ctx types.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error - // ListenDeliverTx updates the steaming service with the latest DeliverTx messages - ListenDeliverTx(ctx types.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error - // ListenSuccess returns a chan that is used to acknowledge successful receipt of messages by the external service - // after some configurable delay, `false` is sent to this channel from the service to signify failure of receipt - ListenSuccess() <-chan bool + // ListenBeginBlock updates the streaming service with the latest BeginBlock messages + ListenBeginBlock(ctx types.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error + // ListenEndBlock updates the steaming service with the latest EndBlock messages + ListenEndBlock(ctx types.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error + // ListenDeliverTx updates the steaming service with the latest DeliverTx messages + ListenDeliverTx(ctx types.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error + // ListenCommit updates the steaming service with the latest Commit message, + // All the state writes of current block should have notified before this message. + ListenCommit(ctx types.Context, res abci.ResponseCommit) error } // StreamingService interface for registering WriteListeners with the BaseApp and updating the service with the ABCI messages using the hooks type StreamingService interface { - // Stream is the streaming service loop, awaits kv pairs and writes them to a destination stream or file - Stream(wg *sync.WaitGroup) error - // Listeners returns the streaming service's listeners for the BaseApp to register - Listeners() map[types.StoreKey][]store.WriteListener - // ABCIListener interface for hooking into the ABCI messages from inside the BaseApp - ABCIListener - // Closer interface - io.Closer + // Stream is the streaming service loop, awaits kv pairs and writes them to a destination stream or file + Stream(wg *sync.WaitGroup) error + // Listeners returns the streaming service's listeners for the BaseApp to register + Listeners() map[types.StoreKey][]store.WriteListener + // ABCIListener interface for hooking into the ABCI messages from inside the BaseApp + ABCIListener + // Closer interface + io.Closer } ``` @@ -257,15 +285,6 @@ func (app *BaseApp) SetStreamingService(s StreamingService) { } ``` -We will add a new method to the `BaseApp` that is used to configure a global wait limit for receiving positive acknowledgement -of message receipt from the integrated `StreamingService`s. - -```go -func (app *BaseApp) SetGlobalWaitLimit(t time.Duration) { - app.globalWaitLimit = t -} -``` - We will also modify the `BeginBlock`, `EndBlock`, and `DeliverTx` methods to pass ABCI requests and responses to any streaming service hooks registered with the `BaseApp`. @@ -274,10 +293,14 @@ func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeg ... - // Call the streaming service hooks with the BeginBlock messages - for _, listener := range app.abciListeners { - listener.ListenBeginBlock(app.deliverState.ctx, req, res) - } + defer func() { + // call the hooks with the BeginBlock messages + for _, streamingListener := range app.abciListeners { + if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { + panic(sdkerrors.Wrapf(err, "BeginBlock listening hook failed, height: %d", req.Header.Height)) + } + } + }() return res } @@ -288,96 +311,82 @@ func (app *BaseApp) EndBlock(req abci.RequestEndBlock) (res abci.ResponseEndBloc ... - // Call the streaming service hooks with the EndBlock messages - for _, listener := range app.abciListeners { - listener.ListenEndBlock(app.deliverState.ctx, req, res) - } + defer func() { + // Call the streaming service hooks with the EndBlock messages + for _, streamingListener := range app.abciListeners { + if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { + panic(sdkerrors.Wrapf(err, "EndBlock listening hook failed, height: %d", req.Height)) + } + } + }() return res } ``` ```go -func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { +func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) (res abci.ResponseDeliverTx) { - ... - - gInfo, result, err := app.runTx(runTxModeDeliver, req.Tx) - if err != nil { - resultStr = "failed" - res := sdkerrors.ResponseDeliverTx(err, gInfo.GasWanted, gInfo.GasUsed, app.trace) - // If we throw an error, be sure to still call the streaming service's hook - for _, listener := range app.abciListeners { - listener.ListenDeliverTx(app.deliverState.ctx, req, res) + defer func() { + // call the hooks with the DeliverTx messages + for _, streamingListener := range app.abciListeners { + if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, res); err != nil { + panic(sdkerrors.Wrap(err, "DeliverTx listening hook failed")) + } } - return res - } - - res := abci.ResponseDeliverTx{ - GasWanted: int64(gInfo.GasWanted), // TODO: Should type accept unsigned ints? - GasUsed: int64(gInfo.GasUsed), // TODO: Should type accept unsigned ints? - Log: result.Log, - Data: result.Data, - Events: sdk.MarkEventsToIndex(result.Events, app.indexEvents), - } + }() - // Call the streaming service hooks with the DeliverTx messages - for _, listener := range app.abciListeners { - listener.ListenDeliverTx(app.deliverState.ctx, req, res) - } + ... return res } ``` -We will also modify the `Commit` method to process `success/failure` signals from the integrated `StreamingService`s using -the `ABCIListener.ListenSuccess()` method. Each `StreamingService` has an internal wait threshold after which it sends -`false` to the `ListenSuccess()` channel, and the BaseApp also imposes a configurable global wait limit. -If the `StreamingService` is operating in a "fire-and-forget" mode, `ListenSuccess()` should immediately return `true` -off the channel despite the success status of the service. - -```go -func (app *BaseApp) Commit() (res abci.ResponseCommit) { - - ... +```golang +func (app *BaseApp) Commit() abci.ResponseCommit { + header := app.deliverState.ctx.BlockHeader() + retainHeight := app.GetBlockRetentionHeight(header.Height) - var halt bool + // Write the DeliverTx state into branched storage and commit the MultiStore. + // The write to the DeliverTx state writes all state transitions to the root + // MultiStore (app.cms) so when Commit() is called is persists those values. + app.deliverState.ms.Write() + commitID := app.cms.Commit() - switch { - case app.haltHeight > 0 && uint64(header.Height) >= app.haltHeight: - halt = true - - case app.haltTime > 0 && header.Time.Unix() >= int64(app.haltTime): - halt = true + res := abci.ResponseCommit{ + Data: commitID.Hash, + RetainHeight: retainHeight, } - // each listener has an internal wait threshold after which it sends `false` to the ListenSuccess() channel - // but the BaseApp also imposes a global wait limit - maxWait := time.NewTicker(app.globalWaitLimit) - for _, lis := range app.abciListeners { - select { - case success := <- lis.ListenSuccess(): - if success == false { - halt = true - break - } - case <- maxWait.C: - halt = true - break + // call the hooks with the Commit message + for _, streamingListener := range app.abciListeners { + if err := streamingListener.ListenCommit(app.deliverState.ctx, res); err != nil { + panic(sdkerrors.Wrapf(err, "Commit listening hook failed, height: %d", header.Height)) } } - if halt { - // Halt the binary and allow Tendermint to receive the ResponseCommit - // response with the commit ID hash. This will allow the node to successfully - // restart and process blocks assuming the halt configuration has been - // reset or moved to a more distant value. - app.halt() - } + app.logger.Info("commit synced", "commit", fmt.Sprintf("%X", commitID)) + ... +} +``` - ... +#### Error Handling And Async Consumers -} +`ABCIListener`s are called synchronously inside the consensus state machine, the returned error causes panic which in turn halt the consensus state machine. The implementer should be careful not to break consensus unexpectedly or slow down it too much. + +For some async use cases, one can spawn a go-routine internanlly to avoid slow down consensus state machine, and handle the errors internally and always returns `nil` to avoid halting consensus state machine on error. + +Furthermore, for most of the cases, we only need to use the builtin file streamer to listen to state changes directly inside cosmos-sdk, the other consumers should subscribe to the file streamer output externally. + +#### File Streamer + +We provide a minimal filesystem based implementation inside cosmos-sdk, and provides options to write output files reliably, the output files can be further consumed by external consumers, so most of the state listeners actually don't need to live inside the sdk and node, which improves the node robustness and simplify sdk internals. + +The file streamer can be wired in app like this: +```golang +exposeStoreKeys := ... // decide the key list to listen +service, err := file.NewStreamingService(streamingDir, "", exposeStoreKeys, appCodec, logger) +bApp.SetStreamingService(service) ``` #### Plugin system @@ -421,7 +430,7 @@ type StateStreamingPlugin interface { Register(bApp *baseapp.BaseApp, marshaller codec.BinaryCodec, keys map[string]*types.KVStoreKey) error // Start starts the background streaming process of the plugin streaming service - Start(wg *sync.WaitGroup) + Start(wg *sync.WaitGroup) error // Plugin is the base Plugin interface Plugin @@ -435,41 +444,47 @@ e.g. in `NewSimApp`: ```go func NewSimApp( - logger log.Logger, db dbm.DB, traceStore io.Writer, loadLatest bool, skipUpgradeHeights map[int64]bool, - homePath string, invCheckPeriod uint, encodingConfig simappparams.EncodingConfig, - appOpts servertypes.AppOptions, baseAppOptions ...func(*baseapp.BaseApp), + logger log.Logger, + db dbm.DB, + traceStore io.Writer, + loadLatest bool, + appOpts servertypes.AppOptions, + baseAppOptions ...func(*baseapp.BaseApp), ) *SimApp { ... - // this loads the preloaded and any plugins found in `plugins.dir` - pluginLoader, err := loader.NewPluginLoader(appOpts, logger) - if err != nil { - // handle error - } - - // initialize the loaded plugins - if err := pluginLoader.Initialize(); err != nil { - // hanlde error - } - keys := sdk.NewKVStoreKeys( - authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey, - minttypes.StoreKey, distrtypes.StoreKey, slashingtypes.StoreKey, - govtypes.StoreKey, paramstypes.StoreKey, ibchost.StoreKey, upgradetypes.StoreKey, - evidencetypes.StoreKey, ibctransfertypes.StoreKey, capabilitytypes.StoreKey, + authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey, + minttypes.StoreKey, distrtypes.StoreKey, slashingtypes.StoreKey, + govtypes.StoreKey, paramstypes.StoreKey, ibchost.StoreKey, upgradetypes.StoreKey, + evidencetypes.StoreKey, ibctransfertypes.StoreKey, capabilitytypes.StoreKey, ) - // register the plugin(s) with the BaseApp - if err := pluginLoader.Inject(bApp, appCodec, keys); err != nil { - // handle error - } + pluginsOnKey := fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_ON_TOML_KEY) + if cast.ToBool(appOpts.Get(pluginsOnKey)) { + // this loads the preloaded and any plugins found in `plugins.dir` + pluginLoader, err := loader.NewPluginLoader(appOpts, logger) + if err != nil { + // handle error + } - // start the plugin services, optionally use wg to synchronize shutdown using io.Closer - wg := new(sync.WaitGroup) - if err := pluginLoader.Start(wg); err != nil { - // handler error - } + // initialize the loaded plugins + if err := pluginLoader.Initialize(); err != nil { + // handle error + } + + // register the plugin(s) with the BaseApp + if err := pluginLoader.Inject(bApp, appCodec, keys); err != nil { + // handle error + } + + // start the plugin services, optionally use wg to synchronize shutdown using io.Closer + wg := new(sync.WaitGroup) + if err := pluginLoader.Start(wg); err != nil { + // handler error + } + } ... @@ -485,39 +500,42 @@ The plugin system will be configured within an app's app.toml file. ```toml [plugins] on = false # turn the plugin system, as a whole, on or off - disabled = ["list", "of", "plugin", "names", "to", "disable"] + enabled = ["list", "of", "plugin", "names", "to", "enable"] dir = "the directory to load non-preloaded plugins from; defaults to cosmos-sdk/plugin/plugins" ``` -There will be three parameters for configuring the plugin system: `plugins.on`, `plugins.disabled` and `plugins.dir`. +There will be three parameters for configuring the plugin system: `plugins.on`, `plugins.enabled` and `plugins.dir`. `plugins.on` is a bool that turns on or off the plugin system at large, `plugins.dir` directs the system to a directory -to load plugins from, and `plugins.disabled` is a list of names for the plugins we want to disable (useful for disabling preloaded plugins). +to load plugins from, and `plugins.enabled` provides `opt-in` semantics to plugin names to enable (including preloaded plugins). Configuration of a given plugin is ultimately specific to the plugin, but we will introduce some standards here: Plugin TOML configuration should be split into separate sub-tables for each kind of plugin (e.g. `plugins.streaming`). + Within these sub-tables, the parameters for a specific plugin of that kind are included in another sub-table (e.g. `plugins.streaming.file`). It is generally expected, but not required, that a streaming service plugin can be configured with a set of store keys -(e.g. `plugins.streaming.file.keys`) for the stores it listens to and a mode (e.g. `plugins.streaming.file.mode`) -that signifies whether the service operates in a fire-and-forget capacity (`faf`) or the BaseApp should require positive -acknowledgement of message receipt by the service (`ack`). +(e.g. `plugins.streaming.file.keys`) for the stores it listens to and a flag (e.g. `plugins.streaming.file.halt_app_on_delivery_error`) +that signifies whether the service operates in a fire-and-forget capacity, or stop the BaseApp when an error occurs in +any of `ListenBeginBlock`, `ListenEndBlock` and `ListenDeliverTx`. e.g. ```toml [plugins] on = false # turn the plugin system, as a whole, on or off - disabled = ["list", "of", "plugin", "names", "to", "disable"] + enabled = ["list", "of", "plugin", "names", "to", "enable"] dir = "the directory to load non-preloaded plugins from; defaults to " [plugins.streaming] # a mapping of plugin-specific streaming service parameters, mapped to their plugin name [plugins.streaming.file] # the specific parameters for the file streaming service plugin keys = ["list", "of", "store", "keys", "we", "want", "to", "expose", "for", "this", "streaming", "service"] - writeDir = "path to the write directory" + write_dir = "path to the write directory" prefix = "optional prefix to prepend to the generated file names" - mode = "faf" # faf == fire-and-forget; ack == require positive acknowledge of receipt + halt_app_on_delivery_error = "false" # false == fire-and-forget; true == stop the application [plugins.streaming.kafka] - ... - [plugins.modules] + keys = [] + topic_prefix = "block" # Optional prefix for topic names where data will be stored. + flush_timeout_ms = 5000 # Flush and wait for outstanding messages and requests to complete delivery when calling `StreamingService.Close(). (milliseconds) + halt_app_on_delivery_error = true # Whether or not to halt the application when plugin fails to deliver message(s). ... ``` @@ -535,7 +553,7 @@ These changes will provide a means of subscribing to KVStore state changes in re ### Backwards Compatibility -* This ADR changes the `MultiStore`, `CacheWrap`, and `CacheWrapper` interfaces, implementations supporting the previous version of these interfaces will not support the new ones +* This ADR changes the `CommitMultiStore` interface, implementations supporting the previous version of these interfaces will not support the new ones ### Positive @@ -543,7 +561,7 @@ These changes will provide a means of subscribing to KVStore state changes in re ### Negative -* Changes `MultiStore`, `CacheWrap`, and `CacheWrapper` interfaces +* Changes `CommitMultiStore`interface ### Neutral diff --git a/proto/cosmos/base/store/v1beta1/listening.proto b/proto/cosmos/base/store/v1beta1/listening.proto index 359997109c10..753f7c165512 100644 --- a/proto/cosmos/base/store/v1beta1/listening.proto +++ b/proto/cosmos/base/store/v1beta1/listening.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package cosmos.base.store.v1beta1; +import "tendermint/abci/types.proto"; + option go_package = "github.com/cosmos/cosmos-sdk/store/types"; // StoreKVPair is a KVStore KVPair used for listening to state changes (Sets and Deletes) @@ -14,3 +16,19 @@ message StoreKVPair { bytes key = 3; bytes value = 4; } + +// BlockMetadata contains all the abci event data of a block +// the file streamer dump them into files together with the state changes. +message BlockMetadata { + // DeliverTx encapulate deliver tx request and response. + message DeliverTx { + tendermint.abci.RequestDeliverTx request = 1; + tendermint.abci.ResponseDeliverTx response = 2; + } + tendermint.abci.RequestBeginBlock request_begin_block = 1; + tendermint.abci.ResponseBeginBlock response_begin_block = 2; + repeated DeliverTx deliver_txs = 3; + tendermint.abci.RequestEndBlock request_end_block = 4; + tendermint.abci.ResponseEndBlock response_end_block = 5; + tendermint.abci.ResponseCommit response_commit = 6; +} diff --git a/server/config/config.go b/server/config/config.go index e34de76614d3..75b25df63ed5 100644 --- a/server/config/config.go +++ b/server/config/config.go @@ -218,6 +218,15 @@ type ( Keys []string `mapstructure:"keys"` WriteDir string `mapstructure:"write_dir"` Prefix string `mapstructure:"prefix"` + // OutputMetadata specifies if output the block metadata file which includes + // the abci requests/responses, otherwise only the data file is outputted. + OutputMetadata bool `mapstructure:"output-metadata"` + // StopNodeOnError specifies if propagate the streamer errors to the consensus + // state machine, it's nesserary for data integrity of output. + StopNodeOnError bool `mapstructure:"stop-node-on-error"` + // Fsync specifies if calling fsync after writing the files, it slows down + // the commit, but don't lose data in face of system crash. + Fsync bool `mapstructure:"fsync"` } ) @@ -320,7 +329,13 @@ func DefaultConfig() *Config { }, Streamers: StreamersConfig{ File: FileStreamerConfig{ - Keys: []string{"*"}, + Keys: []string{"*"}, + WriteDir: "data/file_streamer", + OutputMetadata: true, + StopNodeOnError: true, + // NOTICE: the default config don't protect the streamer data integrity + // in face of system crash. + Fsync: false, }, }, } diff --git a/server/config/toml.go b/server/config/toml.go index e69f3845222b..e729371271bd 100644 --- a/server/config/toml.go +++ b/server/config/toml.go @@ -248,6 +248,13 @@ streamers = [{{ range .Store.Streamers }}{{ printf "%q, " . }}{{end}}] keys = [{{ range .Streamers.File.Keys }}{{ printf "%q, " . }}{{end}}] write_dir = "{{ .Streamers.File.WriteDir }}" prefix = "{{ .Streamers.File.Prefix }}" +# output-metadata specifies if output the metadata file which includes the abci request/responses +# during processing the block. +output-metadata = "{{ .Streamers.File.OutputMetadata }}" +# stop-node-on-error specifies if propagate the file streamer errors to consensus state machine. +stop-node-on-error = "{{ .Streamers.File.StopNodeOnError }}" +# fsync specifies if call fsync after writing the files. +fsync = "{{ .Streamers.File.Fsync }}" ` var configTemplate *template.Template diff --git a/store/cachekv/store.go b/store/cachekv/store.go index a70becf13587..0ebc52268548 100644 --- a/store/cachekv/store.go +++ b/store/cachekv/store.go @@ -9,7 +9,6 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/internal/conv" - "github.com/cosmos/cosmos-sdk/store/listenkv" "github.com/cosmos/cosmos-sdk/store/tracekv" "github.com/cosmos/cosmos-sdk/store/types" "github.com/cosmos/cosmos-sdk/types/kv" @@ -158,11 +157,6 @@ func (store *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types return NewStore(tracekv.NewStore(store, w, tc)) } -// CacheWrapWithListeners implements the CacheWrapper interface. -func (store *Store) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap { - return NewStore(listenkv.NewStore(store, storeKey, listeners)) -} - //---------------------------------------- // Iteration diff --git a/store/cachemulti/store.go b/store/cachemulti/store.go index 42409ed160f8..a082588db268 100644 --- a/store/cachemulti/store.go +++ b/store/cachemulti/store.go @@ -8,7 +8,6 @@ import ( "github.com/cosmos/cosmos-sdk/store/cachekv" "github.com/cosmos/cosmos-sdk/store/dbadapter" - "github.com/cosmos/cosmos-sdk/store/listenkv" "github.com/cosmos/cosmos-sdk/store/tracekv" "github.com/cosmos/cosmos-sdk/store/types" ) @@ -31,8 +30,6 @@ type Store struct { traceWriter io.Writer traceContext types.TraceContext - - listeners map[types.StoreKey][]types.WriteListener } var _ types.CacheMultiStore = Store{} @@ -43,18 +40,13 @@ var _ types.CacheMultiStore = Store{} func NewFromKVStore( store types.KVStore, stores map[types.StoreKey]types.CacheWrapper, keys map[string]types.StoreKey, traceWriter io.Writer, traceContext types.TraceContext, - listeners map[types.StoreKey][]types.WriteListener, ) Store { - if listeners == nil { - listeners = make(map[types.StoreKey][]types.WriteListener) - } cms := Store{ db: cachekv.NewStore(store), stores: make(map[types.StoreKey]types.CacheWrap, len(stores)), keys: keys, traceWriter: traceWriter, traceContext: traceContext, - listeners: listeners, } for key, store := range stores { @@ -65,9 +57,6 @@ func NewFromKVStore( store = tracekv.NewStore(store.(types.KVStore), cms.traceWriter, tctx) } - if cms.ListeningEnabled(key) { - store = listenkv.NewStore(store.(types.KVStore), key, listeners[key]) - } cms.stores[key] = cachekv.NewStore(store.(types.KVStore)) } @@ -78,9 +67,9 @@ func NewFromKVStore( // CacheWrapper objects. Each CacheWrapper store is a branched store. func NewStore( db dbm.DB, stores map[types.StoreKey]types.CacheWrapper, keys map[string]types.StoreKey, - traceWriter io.Writer, traceContext types.TraceContext, listeners map[types.StoreKey][]types.WriteListener, + traceWriter io.Writer, traceContext types.TraceContext, ) Store { - return NewFromKVStore(dbadapter.Store{DB: db}, stores, keys, traceWriter, traceContext, listeners) + return NewFromKVStore(dbadapter.Store{DB: db}, stores, keys, traceWriter, traceContext) } func newCacheMultiStoreFromCMS(cms Store) Store { @@ -89,8 +78,7 @@ func newCacheMultiStoreFromCMS(cms Store) Store { stores[k] = v } - // don't pass listeners to nested cache store. - return NewFromKVStore(cms.db, stores, nil, cms.traceWriter, cms.traceContext, nil) + return NewFromKVStore(cms.db, stores, nil, cms.traceWriter, cms.traceContext) } // SetTracer sets the tracer for the MultiStore that the underlying @@ -121,23 +109,6 @@ func (cms Store) TracingEnabled() bool { return cms.traceWriter != nil } -// AddListeners adds listeners for a specific KVStore -func (cms Store) AddListeners(key types.StoreKey, listeners []types.WriteListener) { - if ls, ok := cms.listeners[key]; ok { - cms.listeners[key] = append(ls, listeners...) - } else { - cms.listeners[key] = listeners - } -} - -// ListeningEnabled returns if listening is enabled for a specific KVStore -func (cms Store) ListeningEnabled(key types.StoreKey) bool { - if ls, ok := cms.listeners[key]; ok { - return len(ls) != 0 - } - return false -} - // LatestVersion returns the branch version of the store func (cms Store) LatestVersion() int64 { panic("cannot get latest version from branch cached multi-store") @@ -166,11 +137,6 @@ func (cms Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.Cac return cms.CacheWrap() } -// CacheWrapWithListeners implements the CacheWrapper interface. -func (cms Store) CacheWrapWithListeners(_ types.StoreKey, _ []types.WriteListener) types.CacheWrap { - return cms.CacheWrap() -} - // Implements MultiStore. func (cms Store) CacheMultiStore() types.CacheMultiStore { return newCacheMultiStoreFromCMS(cms) diff --git a/store/dbadapter/store.go b/store/dbadapter/store.go index 2f0ceb5df54a..e9ea4f847d14 100644 --- a/store/dbadapter/store.go +++ b/store/dbadapter/store.go @@ -6,7 +6,6 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/store/cachekv" - "github.com/cosmos/cosmos-sdk/store/listenkv" "github.com/cosmos/cosmos-sdk/store/tracekv" "github.com/cosmos/cosmos-sdk/store/types" ) @@ -86,10 +85,5 @@ func (dsa Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.Ca return cachekv.NewStore(tracekv.NewStore(dsa, w, tc)) } -// CacheWrapWithListeners implements the CacheWrapper interface. -func (dsa Store) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap { - return cachekv.NewStore(listenkv.NewStore(dsa, storeKey, listeners)) -} - // dbm.DB implements KVStore so we can CacheKVStore it. var _ types.KVStore = Store{} diff --git a/store/dbadapter/store_test.go b/store/dbadapter/store_test.go index 9f8ac71b25cf..658b9d1b999e 100644 --- a/store/dbadapter/store_test.go +++ b/store/dbadapter/store_test.go @@ -84,7 +84,4 @@ func TestCacheWraps(t *testing.T) { cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil) require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) - - cacheWrappedWithListeners := store.CacheWrapWithListeners(nil, nil) - require.IsType(t, &cachekv.Store{}, cacheWrappedWithListeners) } diff --git a/store/gaskv/store.go b/store/gaskv/store.go index 845e59cf9363..85bf598d0626 100644 --- a/store/gaskv/store.go +++ b/store/gaskv/store.go @@ -92,11 +92,6 @@ func (gs *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.Cac panic("cannot CacheWrapWithTrace a GasKVStore") } -// CacheWrapWithListeners implements the CacheWrapper interface. -func (gs *Store) CacheWrapWithListeners(_ types.StoreKey, _ []types.WriteListener) types.CacheWrap { - panic("cannot CacheWrapWithListeners a GasKVStore") -} - func (gs *Store) iterator(start, end []byte, ascending bool) types.Iterator { var parent types.Iterator if ascending { diff --git a/store/gaskv/store_test.go b/store/gaskv/store_test.go index 2401a9805d94..701f44217921 100644 --- a/store/gaskv/store_test.go +++ b/store/gaskv/store_test.go @@ -25,7 +25,6 @@ func TestGasKVStoreBasic(t *testing.T) { require.Equal(t, types.StoreTypeDB, st.GetStoreType()) require.Panics(t, func() { st.CacheWrap() }) require.Panics(t, func() { st.CacheWrapWithTrace(nil, nil) }) - require.Panics(t, func() { st.CacheWrapWithListeners(nil, nil) }) require.Panics(t, func() { st.Set(nil, []byte("value")) }, "setting a nil key should panic") require.Panics(t, func() { st.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") diff --git a/store/iavl/store.go b/store/iavl/store.go index 21b1e70069df..a22d8935c202 100644 --- a/store/iavl/store.go +++ b/store/iavl/store.go @@ -15,7 +15,6 @@ import ( pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/store/cachekv" - "github.com/cosmos/cosmos-sdk/store/listenkv" "github.com/cosmos/cosmos-sdk/store/tracekv" "github.com/cosmos/cosmos-sdk/store/types" "github.com/cosmos/cosmos-sdk/telemetry" @@ -191,11 +190,6 @@ func (st *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.Ca return cachekv.NewStore(tracekv.NewStore(st, w, tc)) } -// CacheWrapWithListeners implements the CacheWrapper interface. -func (st *Store) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap { - return cachekv.NewStore(listenkv.NewStore(st, storeKey, listeners)) -} - // Implements types.KVStore. func (st *Store) Set(key, value []byte) { types.AssertValidKey(key) diff --git a/store/iavl/store_test.go b/store/iavl/store_test.go index f1cd586e36a0..dabe1e37f630 100644 --- a/store/iavl/store_test.go +++ b/store/iavl/store_test.go @@ -655,7 +655,4 @@ func TestCacheWraps(t *testing.T) { cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil) require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) - - cacheWrappedWithListeners := store.CacheWrapWithListeners(nil, nil) - require.IsType(t, &cachekv.Store{}, cacheWrappedWithListeners) } diff --git a/store/listenkv/store.go b/store/listenkv/store.go index dfb6dea46c2f..4595d0fe56d1 100644 --- a/store/listenkv/store.go +++ b/store/listenkv/store.go @@ -141,12 +141,6 @@ func (s *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.Cach panic("cannot CacheWrapWithTrace a ListenKVStore") } -// CacheWrapWithListeners implements the KVStore interface. It panics as a -// Store cannot be cache wrapped. -func (s *Store) CacheWrapWithListeners(_ types.StoreKey, _ []types.WriteListener) types.CacheWrap { - panic("cannot CacheWrapWithListeners a ListenKVStore") -} - // onWrite writes a KVStore operation to all of the WriteListeners func (s *Store) onWrite(delete bool, key, value []byte) { for _, l := range s.listeners { diff --git a/store/listenkv/store_test.go b/store/listenkv/store_test.go index 8d0510ba49ce..44be4120427b 100644 --- a/store/listenkv/store_test.go +++ b/store/listenkv/store_test.go @@ -292,8 +292,3 @@ func TestListenKVStoreCacheWrapWithTrace(t *testing.T) { store := newEmptyListenKVStore(nil) require.Panics(t, func() { store.CacheWrapWithTrace(nil, nil) }) } - -func TestListenKVStoreCacheWrapWithListeners(t *testing.T) { - store := newEmptyListenKVStore(nil) - require.Panics(t, func() { store.CacheWrapWithListeners(nil, nil) }) -} diff --git a/store/mem/mem_test.go b/store/mem/mem_test.go index a2fc6add8a3e..893a4d286fbd 100644 --- a/store/mem/mem_test.go +++ b/store/mem/mem_test.go @@ -33,9 +33,6 @@ func TestStore(t *testing.T) { cacheWrappedWithTrace := db.CacheWrapWithTrace(nil, nil) require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) - - cacheWrappedWithListeners := db.CacheWrapWithListeners(nil, nil) - require.IsType(t, &cachekv.Store{}, cacheWrappedWithListeners) } func TestCommit(t *testing.T) { diff --git a/store/mem/store.go b/store/mem/store.go index 06d7b63f5506..9e83422360d6 100644 --- a/store/mem/store.go +++ b/store/mem/store.go @@ -8,7 +8,6 @@ import ( pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/store/cachekv" "github.com/cosmos/cosmos-sdk/store/dbadapter" - "github.com/cosmos/cosmos-sdk/store/listenkv" "github.com/cosmos/cosmos-sdk/store/tracekv" "github.com/cosmos/cosmos-sdk/store/types" ) @@ -47,11 +46,6 @@ func (s Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.Cach return cachekv.NewStore(tracekv.NewStore(s, w, tc)) } -// CacheWrapWithListeners implements the CacheWrapper interface. -func (s Store) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap { - return cachekv.NewStore(listenkv.NewStore(s, storeKey, listeners)) -} - // Commit performs a no-op as entries are persistent between commitments. func (s *Store) Commit() (id types.CommitID) { return } diff --git a/store/prefix/store.go b/store/prefix/store.go index 295278a0a853..4f9d5a75e087 100644 --- a/store/prefix/store.go +++ b/store/prefix/store.go @@ -6,7 +6,6 @@ import ( "io" "github.com/cosmos/cosmos-sdk/store/cachekv" - "github.com/cosmos/cosmos-sdk/store/listenkv" "github.com/cosmos/cosmos-sdk/store/tracekv" "github.com/cosmos/cosmos-sdk/store/types" ) @@ -58,11 +57,6 @@ func (s Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.Cach return cachekv.NewStore(tracekv.NewStore(s, w, tc)) } -// CacheWrapWithListeners implements the CacheWrapper interface. -func (s Store) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap { - return cachekv.NewStore(listenkv.NewStore(s, storeKey, listeners)) -} - // Implements KVStore func (s Store) Get(key []byte) []byte { res := s.parent.Get(s.key(key)) diff --git a/store/prefix/store_test.go b/store/prefix/store_test.go index 25e07cbb1ae3..8da075dc9031 100644 --- a/store/prefix/store_test.go +++ b/store/prefix/store_test.go @@ -438,7 +438,4 @@ func TestCacheWraps(t *testing.T) { cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil) require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) - - cacheWrappedWithListeners := store.CacheWrapWithListeners(nil, nil) - require.IsType(t, &cachekv.Store{}, cacheWrappedWithListeners) } diff --git a/store/rootmulti/store.go b/store/rootmulti/store.go index 8785ef7d6ac1..d02944d95800 100644 --- a/store/rootmulti/store.go +++ b/store/rootmulti/store.go @@ -244,7 +244,7 @@ func (rs *Store) loadVersion(ver int64, upgrades *types.StoreUpgrades) error { // If it was deleted, remove all data if upgrades.IsDeleted(key.Name()) { - if err := deleteKVStore(store.(types.KVStore)); err != nil { + if err := deleteKVStore(types.KVStore(store)); err != nil { return errors.Wrapf(err, "failed to delete store %s", key.Name()) } rs.removalMap[key] = true @@ -262,7 +262,7 @@ func (rs *Store) loadVersion(ver int64, upgrades *types.StoreUpgrades) error { } // move all data - if err := moveKVStoreData(oldStore.(types.KVStore), store.(types.KVStore)); err != nil { + if err := moveKVStoreData(types.KVStore(oldStore), types.KVStore(store)); err != nil { return errors.Wrapf(err, "failed to move store %s -> %s", oldName, key.Name()) } @@ -462,19 +462,20 @@ func (rs *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.Cac return rs.CacheWrap() } -// CacheWrapWithListeners implements the CacheWrapper interface. -func (rs *Store) CacheWrapWithListeners(_ types.StoreKey, _ []types.WriteListener) types.CacheWrap { - return rs.CacheWrap() -} - // CacheMultiStore creates ephemeral branch of the multi-store and returns a CacheMultiStore. // It implements the MultiStore interface. func (rs *Store) CacheMultiStore() types.CacheMultiStore { stores := make(map[types.StoreKey]types.CacheWrapper) for k, v := range rs.stores { - stores[k] = v + store := types.KVStore(v) + // Wire the listenkv.Store to allow listeners to observe the writes from the cache store, + // set same listeners on cache store will observe duplicated writes. + if rs.ListeningEnabled(k) { + store = listenkv.NewStore(store, k, rs.listeners[k]) + } + stores[k] = store } - return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.getTracingContext(), rs.listeners) + return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.getTracingContext()) } // CacheMultiStoreWithVersion is analogous to CacheMultiStore except that it @@ -484,6 +485,7 @@ func (rs *Store) CacheMultiStore() types.CacheMultiStore { func (rs *Store) CacheMultiStoreWithVersion(version int64) (types.CacheMultiStore, error) { cachedStores := make(map[types.StoreKey]types.CacheWrapper) for key, store := range rs.stores { + var cacheStore types.KVStore switch store.GetStoreType() { case types.StoreTypeIAVL: // If the store is wrapped with an inter-block cache, we must first unwrap @@ -492,19 +494,25 @@ func (rs *Store) CacheMultiStoreWithVersion(version int64) (types.CacheMultiStor // Attempt to lazy-load an already saved IAVL store version. If the // version does not exist or is pruned, an error should be returned. - iavlStore, err := store.(*iavl.Store).GetImmutable(version) + var err error + cacheStore, err = store.(*iavl.Store).GetImmutable(version) if err != nil { return nil, err } - - cachedStores[key] = iavlStore - default: - cachedStores[key] = store + cacheStore = store + } + + // Wire the listenkv.Store to allow listeners to observe the writes from the cache store, + // set same listeners on cache store will observe duplicated writes. + if rs.ListeningEnabled(key) { + cacheStore = listenkv.NewStore(cacheStore, key, rs.listeners[key]) } + + cachedStores[key] = cacheStore } - return cachemulti.NewStore(rs.db, cachedStores, rs.keysByName, rs.traceWriter, rs.getTracingContext(), rs.listeners), nil + return cachemulti.NewStore(rs.db, cachedStores, rs.keysByName, rs.traceWriter, rs.getTracingContext()), nil } // GetStore returns a mounted Store for a given StoreKey. If the StoreKey does @@ -533,7 +541,7 @@ func (rs *Store) GetKVStore(key types.StoreKey) types.KVStore { if s == nil { panic(fmt.Sprintf("store does not exist for key: %s", key.Name())) } - store := s.(types.KVStore) + store := types.KVStore(s) if rs.TracingEnabled() { store = tracekv.NewStore(store, rs.traceWriter, rs.getTracingContext()) diff --git a/store/rootmulti/store_test.go b/store/rootmulti/store_test.go index 237a3f24d353..26204bceb21b 100644 --- a/store/rootmulti/store_test.go +++ b/store/rootmulti/store_test.go @@ -741,9 +741,6 @@ func TestCacheWraps(t *testing.T) { cacheWrappedWithTrace := multi.CacheWrapWithTrace(nil, nil) require.IsType(t, cachemulti.Store{}, cacheWrappedWithTrace) - - cacheWrappedWithListeners := multi.CacheWrapWithListeners(nil, nil) - require.IsType(t, cachemulti.Store{}, cacheWrappedWithListeners) } func TestTraceConcurrency(t *testing.T) { diff --git a/store/streaming/constructor.go b/store/streaming/constructor.go index e576f84b83d1..20cd9fe1ff8b 100644 --- a/store/streaming/constructor.go +++ b/store/streaming/constructor.go @@ -2,10 +2,13 @@ package streaming import ( "fmt" + "os" + "path" "strings" "sync" "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/client/flags" "github.com/cosmos/cosmos-sdk/codec" serverTypes "github.com/cosmos/cosmos-sdk/server/types" "github.com/cosmos/cosmos-sdk/store/streaming/file" @@ -15,7 +18,7 @@ import ( ) // ServiceConstructor is used to construct a streaming service -type ServiceConstructor func(opts serverTypes.AppOptions, keys []types.StoreKey, marshaller codec.BinaryCodec) (baseapp.StreamingService, error) +type ServiceConstructor func(serverTypes.AppOptions, []types.StoreKey, codec.BinaryCodec) (baseapp.StreamingService, error) // ServiceType enum for specifying the type of StreamingService type ServiceType int @@ -26,7 +29,19 @@ const ( // add more in the future ) -// ServiceTypeFromString returns the streaming.ServiceType corresponding to the provided name +// Streaming option keys +const ( + OptStreamersFilePrefix = "streamers.file.prefix" + OptStreamersFileWriteDir = "streamers.file.write_dir" + OptStreamersFileOutputMetadata = "streamers.file.output-metadata" + OptStreamersFileStopNodeOnError = "streamers.file.stop-node-on-error" + OptStreamersFileFsync = "streamers.file.fsync" + + OptStoreStreamers = "store.streamers" +) + +// ServiceTypeFromString returns the streaming.ServiceType corresponding to the +// provided name. func ServiceTypeFromString(name string) ServiceType { switch strings.ToLower(name) { case "file", "f": @@ -63,16 +78,45 @@ func NewServiceConstructor(name string) (ServiceConstructor, error) { return nil, fmt.Errorf("streaming service constructor of type %s not found", ssType.String()) } -// NewFileStreamingService is the streaming.ServiceConstructor function for creating a FileStreamingService -func NewFileStreamingService(opts serverTypes.AppOptions, keys []types.StoreKey, marshaller codec.BinaryCodec) (baseapp.StreamingService, error) { - filePrefix := cast.ToString(opts.Get("streamers.file.prefix")) - fileDir := cast.ToString(opts.Get("streamers.file.write_dir")) - return file.NewStreamingService(fileDir, filePrefix, keys, marshaller) +// NewFileStreamingService is the streaming.ServiceConstructor function for +// creating a FileStreamingService. +func NewFileStreamingService( + opts serverTypes.AppOptions, + keys []types.StoreKey, + marshaller codec.BinaryCodec, +) (baseapp.StreamingService, error) { + homePath := cast.ToString(opts.Get(flags.FlagHome)) + filePrefix := cast.ToString(opts.Get(OptStreamersFilePrefix)) + fileDir := cast.ToString(opts.Get(OptStreamersFileWriteDir)) + outputMetadata := cast.ToBool(opts.Get(OptStreamersFileOutputMetadata)) + stopNodeOnErr := cast.ToBool(opts.Get(OptStreamersFileStopNodeOnError)) + fsync := cast.ToBool(opts.Get(OptStreamersFileFsync)) + + // relative path is based on node home directory. + if !path.IsAbs(fileDir) { + fileDir = path.Join(homePath, fileDir) + } + + // try to create output directory if not exists. + if _, err := os.Stat(fileDir); os.IsNotExist(err) { + if err = os.MkdirAll(fileDir, os.ModePerm); err != nil { + return nil, err + } + } + + return file.NewStreamingService(fileDir, filePrefix, keys, marshaller, outputMetadata, stopNodeOnErr, fsync) } -// LoadStreamingServices is a function for loading StreamingServices onto the BaseApp using the provided AppOptions, codec, and keys -// It returns the WaitGroup and quit channel used to synchronize with the streaming services and any error that occurs during the setup -func LoadStreamingServices(bApp *baseapp.BaseApp, appOpts serverTypes.AppOptions, appCodec codec.BinaryCodec, keys map[string]*types.KVStoreKey) ([]baseapp.StreamingService, *sync.WaitGroup, error) { +// LoadStreamingServices is a function for loading StreamingServices onto the +// BaseApp using the provided AppOptions, codec, and keys. It returns the +// WaitGroup and quit channel used to synchronize with the streaming services +// and any error that occurs during the setup. +func LoadStreamingServices( + bApp *baseapp.BaseApp, + appOpts serverTypes.AppOptions, + appCodec codec.BinaryCodec, + keys map[string]*types.KVStoreKey, +) ([]baseapp.StreamingService, *sync.WaitGroup, error) { // waitgroup and quit channel for optional shutdown coordination of the streaming service(s) wg := new(sync.WaitGroup) // configure state listening capabilities using AppOptions @@ -107,7 +151,9 @@ func LoadStreamingServices(bApp *baseapp.BaseApp, appOpts serverTypes.AppOptions } return nil, nil, err } - // generate the streaming service using the constructor, appOptions, and the StoreKeys we want to expose + + // Generate the streaming service using the constructor, appOptions, and the + // StoreKeys we want to expose. streamingService, err := constructor(appOpts, exposeStoreKeys, appCodec) if err != nil { // close any services we may have already spun up before hitting the error on this one diff --git a/store/streaming/constructor_test.go b/store/streaming/constructor_test.go index 1b0479cc7d51..d85de2569ccd 100644 --- a/store/streaming/constructor_test.go +++ b/store/streaming/constructor_test.go @@ -20,7 +20,13 @@ import ( type fakeOptions struct{} -func (f *fakeOptions) Get(string) interface{} { return nil } +func (f *fakeOptions) Get(key string) interface{} { + if key == "streamers.file.write_dir" { + return "data/file_streamer" + + } + return nil +} var ( mockOptions = new(fakeOptions) @@ -93,6 +99,8 @@ func (ao streamingAppOptions) Get(o string) interface{} { return []string{"file"} case "streamers.file.keys": return ao.keys + case "streamers.file.write_dir": + return "data/file_streamer" default: return nil } diff --git a/store/streaming/file/README.md b/store/streaming/file/README.md index f5e0dab86a3f..0c34de7f3bea 100644 --- a/store/streaming/file/README.md +++ b/store/streaming/file/README.md @@ -25,42 +25,67 @@ We turn the service on by adding its name, "file", to `store.streamers`- the lis In `streamers.file` we include three configuration parameters for the file streaming service: -1. `streamers.x.keys` contains the list of `StoreKey` names for the KVStores to expose using this service. -In order to expose *all* KVStores, we can include `*` in this list. An empty list is equivalent to turning the service off. +1. `streamers.file.keys` contains the list of `StoreKey` names for the KVStores to expose using this service. + In order to expose *all* KVStores, we can include `*` in this list. An empty list is equivalent to turning the service off. 2. `streamers.file.write_dir` contains the path to the directory to write the files to. 3. `streamers.file.prefix` contains an optional prefix to prepend to the output files to prevent potential collisions -with other App `StreamingService` output files. + with other App `StreamingService` output files. +4. `streamers.file.output-metadata` specifies if output the metadata file, otherwise only data file is outputted. +5. `streamers.file.stop-node-on-error` specifies if propagate the error to consensus state machine, it's nesserary for data integrity when node restarts. +6. `streamers.file.fsync` specifies if call fsync after writing the files, it's nesserary for data integrity when system crash, but slows down the commit time. ### Encoding -For each pair of `BeginBlock` requests and responses, a file is created and named `block-{N}-begin`, where N is the block number. -At the head of this file the length-prefixed protobuf encoded `BeginBlock` request is written. -At the tail of this file the length-prefixed protobuf encoded `BeginBlock` response is written. -In between these two encoded messages, the state changes that occurred due to the `BeginBlock` request are written chronologically as -a series of length-prefixed protobuf encoded `StoreKVPair`s representing `Set` and `Delete` operations within the KVStores the service -is configured to listen to. - -For each pair of `DeliverTx` requests and responses, a file is created and named `block-{N}-tx-{M}` where N is the block number and M -is the tx number in the block (i.e. 0, 1, 2...). -At the head of this file the length-prefixed protobuf encoded `DeliverTx` request is written. -At the tail of this file the length-prefixed protobuf encoded `DeliverTx` response is written. -In between these two encoded messages, the state changes that occurred due to the `DeliverTx` request are written chronologically as -a series of length-prefixed protobuf encoded `StoreKVPair`s representing `Set` and `Delete` operations within the KVStores the service -is configured to listen to. - -For each pair of `EndBlock` requests and responses, a file is created and named `block-{N}-end`, where N is the block number. -At the head of this file the length-prefixed protobuf encoded `EndBlock` request is written. -At the tail of this file the length-prefixed protobuf encoded `EndBlock` response is written. -In between these two encoded messages, the state changes that occurred due to the `EndBlock` request are written chronologically as -a series of length-prefixed protobuf encoded `StoreKVPair`s representing `Set` and `Delete` operations within the KVStores the service -is configured to listen to. +For each block, two files are created and names `block-{N}-meta` and `block-{N}-data`, where `N` is the block number. + +The meta file contains the protobuf encoded message `BlockMetadata` which contains the abci event requests and responses of the block: + +```protobuf +message BlockMetadata { + message DeliverTx { + tendermint.abci.RequestDeliverTx request = 1; + tendermint.abci.ResponseDeliverTx response = 2; + } + tendermint.abci.RequestBeginBlock request_begin_block = 1; + tendermint.abci.ResponseBeginBlock response_begin_block = 2; + repeated DeliverTx deliver_txs = 3; + tendermint.abci.RequestEndBlock request_end_block = 4; + tendermint.abci.ResponseEndBlock response_end_block = 5; + tendermint.abci.ResponseCommit response_commit = 6; +} +``` + +The data file contains a series of length-prefixed protobuf encoded `StoreKVPair`s representing `Set` and `Delete` operations within the KVStores during the execution of block. + +Both meta and data files are prefixed with the length of the data content for consumer to detect completeness of the file, the length is encoded as 8 bytes with big endianness. + +The files are written at abci commit event, by default the error happens will be propagated to interuppted consensus state machine, but fsync is not called, it'll have good performance but have the risk of lossing data in face of rare event of system crash. ### Decoding -To decode the files written in the above format we read all the bytes from a given file into memory and segment them into proto -messages based on the length-prefixing of each message. Once segmented, it is known that the first message is the ABCI request, -the last message is the ABCI response, and that every message in between is a `StoreKVPair`. This enables us to decode each segment into -the appropriate message type. +The pseudo-code for decoding is like this: -The type of ABCI req/res, the block height, and the transaction index (where relevant) is known -from the file name, and the KVStore each `StoreKVPair` originates from is known since the `StoreKey` is included as a field in the proto message. +```python +def decode_meta_file(file): + bz = file.read(8) + if len(bz) < 8: + raise "incomplete file exception" + size = int.from_bytes(bz, 'big') + + if file.size != size + 8: + raise "incomplete file exception" + + return decode_protobuf_message(BlockMetadata, file) + +def decode_data_file(file): + bz = file.read(8) + if len(bz) < 8: + raise "incomplete file exception" + size = int.from_bytes(bz, 'big') + + if file.size != size + 8: + raise "incomplete file exception" + + while not file.eof(): + yield decode_length_prefixed_protobuf_message(StoreKVStore, file) +``` diff --git a/store/streaming/file/service.go b/store/streaming/file/service.go index b826c7734ae7..c2dafe3d06e1 100644 --- a/store/streaming/file/service.go +++ b/store/streaming/file/service.go @@ -1,11 +1,13 @@ package file import ( - "errors" + "bytes" + "context" "fmt" + "io" "os" "path" - "path/filepath" + "sort" "sync" abci "github.com/tendermint/tendermint/abci/types" @@ -14,52 +16,40 @@ import ( "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" ) var _ baseapp.StreamingService = &StreamingService{} // StreamingService is a concrete implementation of StreamingService that writes state changes out to files type StreamingService struct { - listeners map[types.StoreKey][]types.WriteListener // the listeners that will be initialized with BaseApp - srcChan <-chan []byte // the channel that all the WriteListeners write their data out to - filePrefix string // optional prefix for each of the generated files - writeDir string // directory to write files into - codec codec.BinaryCodec // marshaller used for re-marshalling the ABCI messages to write them out to the destination files - stateCache [][]byte // cache the protobuf binary encoded StoreKVPairs in the order they are received - stateCacheLock *sync.Mutex // mutex for the state cache - currentBlockNumber int64 // the current block number - currentTxIndex int64 // the index of the current tx - quitChan chan struct{} // channel to synchronize closure -} - -// IntermediateWriter is used so that we do not need to update the underlying io.Writer -// inside the StoreKVPairWriteListener everytime we begin writing to a new file -type IntermediateWriter struct { - outChan chan<- []byte -} - -// NewIntermediateWriter create an instance of an IntermediateWriter that sends to the provided channel -func NewIntermediateWriter(outChan chan<- []byte) *IntermediateWriter { - return &IntermediateWriter{ - outChan: outChan, - } -} - -// Write satisfies io.Writer -func (iw *IntermediateWriter) Write(b []byte) (int, error) { - iw.outChan <- b - return len(b), nil + storeListeners []*types.MemoryListener // a series of KVStore listeners for each KVStore + filePrefix string // optional prefix for each of the generated files + writeDir string // directory to write files into + codec codec.BinaryCodec // marshaller used for re-marshalling the ABCI messages to write them out to the destination files + + currentBlockNumber int64 + blockMetadata types.BlockMetadata + // if write the metadata file, otherwise only data file is outputted. + outputMetadata bool + // if true, when commit failed it will panic and stop the consensus state machine to ensure the + // eventual consistency of the output, otherwise the error is ignored and have the risk of lossing data. + stopNodeOnErr bool + // if true, the file.Sync() is called to make sure the data is persisted onto disk, otherwise it risks lossing data when system crash. + fsync bool } // NewStreamingService creates a new StreamingService for the provided writeDir, (optional) filePrefix, and storeKeys -func NewStreamingService(writeDir, filePrefix string, storeKeys []types.StoreKey, c codec.BinaryCodec) (*StreamingService, error) { - listenChan := make(chan []byte) - iw := NewIntermediateWriter(listenChan) - listener := types.NewStoreKVPairWriteListener(iw, c) - listeners := make(map[types.StoreKey][]types.WriteListener, len(storeKeys)) +func NewStreamingService(writeDir, filePrefix string, storeKeys []types.StoreKey, c codec.BinaryCodec, outputMetadata bool, stopNodeOnErr bool, fsync bool) (*StreamingService, error) { + // sort storeKeys for deterministic output + sort.SliceStable(storeKeys, func(i, j int) bool { + return storeKeys[i].Name() < storeKeys[j].Name() + }) + + listeners := make([]*types.MemoryListener, len(storeKeys)) // in this case, we are using the same listener for each Store - for _, key := range storeKeys { - listeners[key] = append(listeners[key], listener) + for i, key := range storeKeys { + listeners[i] = types.NewMemoryListener(key) } // check that the writeDir exists and is writable so that we can catch the error here at initialization if it is not // we don't open a dstFile until we receive our first ABCI message @@ -67,13 +57,13 @@ func NewStreamingService(writeDir, filePrefix string, storeKeys []types.StoreKey return nil, err } return &StreamingService{ - listeners: listeners, - srcChan: listenChan, + storeListeners: listeners, filePrefix: filePrefix, writeDir: writeDir, codec: c, - stateCache: make([][]byte, 0), - stateCacheLock: new(sync.Mutex), + outputMetadata: outputMetadata, + stopNodeOnErr: stopNodeOnErr, + fsync: fsync, }, nil } @@ -81,189 +71,105 @@ func NewStreamingService(writeDir, filePrefix string, storeKeys []types.StoreKey // It returns the StreamingService's underlying WriteListeners // Use for registering the underlying WriteListeners with the BaseApp func (fss *StreamingService) Listeners() map[types.StoreKey][]types.WriteListener { - return fss.listeners + listeners := make(map[types.StoreKey][]types.WriteListener, len(fss.storeListeners)) + for _, listener := range fss.storeListeners { + listeners[listener.StoreKey()] = []types.WriteListener{listener} + } + return listeners } // ListenBeginBlock satisfies the baseapp.ABCIListener interface // It writes the received BeginBlock request and response and the resulting state changes // out to a file as described in the above the naming schema -func (fss *StreamingService) ListenBeginBlock(ctx sdk.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error { - // generate the new file - dstFile, err := fss.openBeginBlockFile(req) - if err != nil { - return err - } - // write req to file - lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req) - if err != nil { - return err - } - if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil { - return err - } - // write all state changes cached for this stage to file - fss.stateCacheLock.Lock() - for _, stateChange := range fss.stateCache { - if _, err = dstFile.Write(stateChange); err != nil { - fss.stateCache = nil - fss.stateCacheLock.Unlock() - return err - } - } - // reset cache - fss.stateCache = nil - fss.stateCacheLock.Unlock() - // write res to file - lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res) - if err != nil { - return err - } - if _, err = dstFile.Write(lengthPrefixedResBytes); err != nil { - return err - } - // close file - return dstFile.Close() -} - -func (fss *StreamingService) openBeginBlockFile(req abci.RequestBeginBlock) (*os.File, error) { - fss.currentBlockNumber = req.GetHeader().Height - fss.currentTxIndex = 0 - fileName := fmt.Sprintf("block-%d-begin", fss.currentBlockNumber) - if fss.filePrefix != "" { - fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName) - } - return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0o600) +func (fss *StreamingService) ListenBeginBlock(ctx context.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) (rerr error) { + fss.blockMetadata.RequestBeginBlock = &req + fss.blockMetadata.ResponseBeginBlock = &res + fss.currentBlockNumber = req.Header.Height + return nil } // ListenDeliverTx satisfies the baseapp.ABCIListener interface // It writes the received DeliverTx request and response and the resulting state changes // out to a file as described in the above the naming schema -func (fss *StreamingService) ListenDeliverTx(ctx sdk.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error { - // generate the new file - dstFile, err := fss.openDeliverTxFile() - if err != nil { - return err - } - // write req to file - lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req) +func (fss *StreamingService) ListenDeliverTx(ctx context.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) (rerr error) { + fss.blockMetadata.DeliverTxs = append(fss.blockMetadata.DeliverTxs, &types.BlockMetadata_DeliverTx{ + Request: &req, + Response: &res, + }) + return nil +} + +// ListenEndBlock satisfies the baseapp.ABCIListener interface +// It writes the received EndBlock request and response and the resulting state changes +// out to a file as described in the above the naming schema +func (fss *StreamingService) ListenEndBlock(ctx context.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) (rerr error) { + fss.blockMetadata.RequestEndBlock = &req + fss.blockMetadata.ResponseEndBlock = &res + return nil +} + +// ListenEndBlock satisfies the baseapp.ABCIListener interface +func (fss *StreamingService) ListenCommit(ctx context.Context, res abci.ResponseCommit) error { + err := fss.doListenCommit(ctx, res) if err != nil { - return err - } - if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil { - return err - } - // write all state changes cached for this stage to file - fss.stateCacheLock.Lock() - for _, stateChange := range fss.stateCache { - if _, err = dstFile.Write(stateChange); err != nil { - fss.stateCache = nil - fss.stateCacheLock.Unlock() + if fss.stopNodeOnErr { return err } } - // reset cache - fss.stateCache = nil - fss.stateCacheLock.Unlock() - // write res to file - lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res) - if err != nil { - return err - } - if _, err = dstFile.Write(lengthPrefixedResBytes); err != nil { - return err - } - // close file - return dstFile.Close() + return nil } -func (fss *StreamingService) openDeliverTxFile() (*os.File, error) { - fileName := fmt.Sprintf("block-%d-tx-%d", fss.currentBlockNumber, fss.currentTxIndex) +func (fss *StreamingService) doListenCommit(ctx context.Context, res abci.ResponseCommit) (err error) { + fss.blockMetadata.ResponseCommit = &res + + // write to target files, the file size is written at the beginning, which can be used to detect completeness. + metaFileName := fmt.Sprintf("block-%d-meta", fss.currentBlockNumber) + dataFileName := fmt.Sprintf("block-%d-data", fss.currentBlockNumber) if fss.filePrefix != "" { - fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName) + metaFileName = fmt.Sprintf("%s-%s", fss.filePrefix, metaFileName) + dataFileName = fmt.Sprintf("%s-%s", fss.filePrefix, dataFileName) } - fss.currentTxIndex++ - return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0o600) -} -// ListenEndBlock satisfies the baseapp.ABCIListener interface -// It writes the received EndBlock request and response and the resulting state changes -// out to a file as described in the above the naming schema -func (fss *StreamingService) ListenEndBlock(ctx sdk.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error { - // generate the new file - dstFile, err := fss.openEndBlockFile() - if err != nil { - return err - } - // write req to file - lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req) - if err != nil { - return err - } - if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil { - return err - } - // write all state changes cached for this stage to file - fss.stateCacheLock.Lock() - for _, stateChange := range fss.stateCache { - if _, err = dstFile.Write(stateChange); err != nil { - fss.stateCache = nil - fss.stateCacheLock.Unlock() + if fss.outputMetadata { + bz, err := fss.codec.Marshal(&fss.blockMetadata) + if err != nil { + return err + } + if err := writeLengthPrefixedFile(path.Join(fss.writeDir, metaFileName), bz, fss.fsync); err != nil { return err } } - // reset cache - fss.stateCache = nil - fss.stateCacheLock.Unlock() - // write res to file - lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res) - if err != nil { - return err - } - if _, err = dstFile.Write(lengthPrefixedResBytes); err != nil { + + var buf bytes.Buffer + if err := fss.writeBlockData(&buf); err != nil { return err } - // close file - return dstFile.Close() + return writeLengthPrefixedFile(path.Join(fss.writeDir, dataFileName), buf.Bytes(), fss.fsync) } -func (fss *StreamingService) openEndBlockFile() (*os.File, error) { - fileName := fmt.Sprintf("block-%d-end", fss.currentBlockNumber) - if fss.filePrefix != "" { - fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName) +func (fss *StreamingService) writeBlockData(writer io.Writer) error { + for _, listener := range fss.storeListeners { + cache := listener.PopStateCache() + for i := range cache { + bz, err := fss.codec.MarshalLengthPrefixed(&cache[i]) + if err != nil { + return err + } + if _, err = writer.Write(bz); err != nil { + return err + } + } } - return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0o600) + return nil } // Stream satisfies the baseapp.StreamingService interface -// It spins up a goroutine select loop which awaits length-prefixed binary encoded KV pairs -// and caches them in the order they were received -// returns an error if it is called twice func (fss *StreamingService) Stream(wg *sync.WaitGroup) error { - if fss.quitChan != nil { - return errors.New("`Stream` has already been called. The stream needs to be closed before it can be started again") - } - fss.quitChan = make(chan struct{}) - wg.Add(1) - go func() { - defer wg.Done() - for { - select { - case <-fss.quitChan: - fss.quitChan = nil - return - case by := <-fss.srcChan: - fss.stateCacheLock.Lock() - fss.stateCache = append(fss.stateCache, by) - fss.stateCacheLock.Unlock() - } - } - }() return nil } // Close satisfies the io.Closer interface, which satisfies the baseapp.StreamingService interface func (fss *StreamingService) Close() error { - close(fss.quitChan) return nil } @@ -276,3 +182,32 @@ func isDirWriteable(dir string) error { } return os.Remove(f) } + +func writeLengthPrefixedFile(path string, data []byte, fsync bool) (err error) { + var f *os.File + f, err = os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600) + if err != nil { + return sdkerrors.Wrapf(err, "open file failed: %s", path) + } + defer func() { + // avoid overriding the real error with file close error + if err1 := f.Close(); err1 != nil && err == nil { + err = sdkerrors.Wrapf(err, "close file failed: %s", path) + } + }() + _, err = f.Write(sdk.Uint64ToBigEndian(uint64(len(data)))) + if err != nil { + return sdkerrors.Wrapf(err, "write length prefix failed: %s", path) + } + _, err = f.Write(data) + if err != nil { + return sdkerrors.Wrapf(err, "write block data failed: %s", path) + } + if fsync { + err = f.Sync() + if err != nil { + return sdkerrors.Wrapf(err, "fsync failed: %s", path) + } + } + return +} diff --git a/store/streaming/file/service_test.go b/store/streaming/file/service_test.go index 52f477ed6a00..d23f38398279 100644 --- a/store/streaming/file/service_test.go +++ b/store/streaming/file/service_test.go @@ -2,20 +2,21 @@ package file import ( "encoding/binary" + "errors" "fmt" "os" "path/filepath" "sync" "testing" + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + types1 "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/cosmos/cosmos-sdk/codec" codecTypes "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" - types1 "github.com/tendermint/tendermint/proto/tendermint/types" ) var ( @@ -56,6 +57,10 @@ var ( ConsensusParamUpdates: &abci.ConsensusParams{}, ValidatorUpdates: []abci.ValidatorUpdate{}, } + testCommitRes = abci.ResponseCommit{ + Data: []byte{1}, + RetainHeight: 0, + } mockTxBytes1 = []byte{9, 8, 7, 6, 5, 4, 3, 2, 1} testDeliverTxReq1 = abci.RequestDeliverTx{ Tx: mockTxBytes1, @@ -104,25 +109,6 @@ var ( mockValue3 = []byte{5, 4, 3} ) -func TestIntermediateWriter(t *testing.T) { - outChan := make(chan []byte, 0) - iw := NewIntermediateWriter(outChan) - require.IsType(t, &IntermediateWriter{}, iw) - testBytes := []byte{1, 2, 3, 4, 5} - var length int - var err error - waitChan := make(chan struct{}, 0) - go func() { - length, err = iw.Write(testBytes) - waitChan <- struct{}{} - }() - receivedBytes := <-outChan - <-waitChan - require.Equal(t, len(testBytes), length) - require.Equal(t, testBytes, receivedBytes) - require.Nil(t, err) -} - func TestFileStreamingService(t *testing.T) { if os.Getenv("CI") != "" { t.Skip("Skipping TestFileStreamingService in CI environment") @@ -132,29 +118,23 @@ func TestFileStreamingService(t *testing.T) { defer os.RemoveAll(testDir) testKeys := []types.StoreKey{mockStoreKey1, mockStoreKey2} - testStreamingService, err = NewStreamingService(testDir, testPrefix, testKeys, testMarshaller) + testStreamingService, err = NewStreamingService(testDir, testPrefix, testKeys, testMarshaller, true, false, false) require.Nil(t, err) require.IsType(t, &StreamingService{}, testStreamingService) require.Equal(t, testPrefix, testStreamingService.filePrefix) require.Equal(t, testDir, testStreamingService.writeDir) require.Equal(t, testMarshaller, testStreamingService.codec) - testListener1 = testStreamingService.listeners[mockStoreKey1][0] - testListener2 = testStreamingService.listeners[mockStoreKey2][0] + testListener1 = testStreamingService.storeListeners[0] + testListener2 = testStreamingService.storeListeners[1] wg := new(sync.WaitGroup) testStreamingService.Stream(wg) - testListenBeginBlock(t) - testListenDeliverTx1(t) - testListenDeliverTx2(t) - testListenEndBlock(t) + testListenBlock(t) testStreamingService.Close() wg.Wait() } -func testListenBeginBlock(t *testing.T) { - expectedBeginBlockReqBytes, err := testMarshaller.Marshal(&testBeginBlockReq) - require.Nil(t, err) - expectedBeginBlockResBytes, err := testMarshaller.Marshal(&testBeginBlockRes) - require.Nil(t, err) +func testListenBlock(t *testing.T) { + var expectKVPairsStore1, expectKVPairsStore2 [][]byte // write state changes testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) @@ -183,162 +163,102 @@ func testListenBeginBlock(t *testing.T) { Delete: false, }) require.Nil(t, err) + expectKVPairsStore1 = append(expectKVPairsStore1, expectedKVPair1, expectedKVPair3) + expectKVPairsStore2 = append(expectKVPairsStore2, expectedKVPair2) // send the ABCI messages err = testStreamingService.ListenBeginBlock(emptyContext, testBeginBlockReq, testBeginBlockRes) require.Nil(t, err) - // load the file, checking that it was created with the expected name - fileName := fmt.Sprintf("%s-block-%d-begin", testPrefix, testBeginBlockReq.GetHeader().Height) - fileBytes, err := readInFile(fileName) - require.Nil(t, err) - - // segment the file into the separate gRPC messages and check the correctness of each - segments, err := segmentBytes(fileBytes) - require.Nil(t, err) - require.Equal(t, 5, len(segments)) - require.Equal(t, expectedBeginBlockReqBytes, segments[0]) - require.Equal(t, expectedKVPair1, segments[1]) - require.Equal(t, expectedKVPair2, segments[2]) - require.Equal(t, expectedKVPair3, segments[3]) - require.Equal(t, expectedBeginBlockResBytes, segments[4]) -} - -func testListenDeliverTx1(t *testing.T) { - expectedDeliverTxReq1Bytes, err := testMarshaller.Marshal(&testDeliverTxReq1) - require.Nil(t, err) - expectedDeliverTxRes1Bytes, err := testMarshaller.Marshal(&testDeliverTxRes1) - require.Nil(t, err) - // write state changes testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) testListener2.OnWrite(mockStoreKey2, mockKey2, mockValue2, false) - testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + testListener2.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) // expected KV pairs - expectedKVPair1, err := testMarshaller.Marshal(&types.StoreKVPair{ + expectedKVPair1, err = testMarshaller.Marshal(&types.StoreKVPair{ StoreKey: mockStoreKey1.Name(), Key: mockKey1, Value: mockValue1, Delete: false, }) require.Nil(t, err) - expectedKVPair2, err := testMarshaller.Marshal(&types.StoreKVPair{ + expectedKVPair2, err = testMarshaller.Marshal(&types.StoreKVPair{ StoreKey: mockStoreKey2.Name(), Key: mockKey2, Value: mockValue2, Delete: false, }) require.Nil(t, err) - expectedKVPair3, err := testMarshaller.Marshal(&types.StoreKVPair{ + expectedKVPair3, err = testMarshaller.Marshal(&types.StoreKVPair{ StoreKey: mockStoreKey2.Name(), Key: mockKey3, Value: mockValue3, Delete: false, }) require.Nil(t, err) + expectKVPairsStore1 = append(expectKVPairsStore1, expectedKVPair1) + expectKVPairsStore2 = append(expectKVPairsStore2, expectedKVPair2, expectedKVPair3) // send the ABCI messages err = testStreamingService.ListenDeliverTx(emptyContext, testDeliverTxReq1, testDeliverTxRes1) require.Nil(t, err) - // load the file, checking that it was created with the expected name - fileName := fmt.Sprintf("%s-block-%d-tx-%d", testPrefix, testBeginBlockReq.GetHeader().Height, 0) - fileBytes, err := readInFile(fileName) - require.Nil(t, err) - - // segment the file into the separate gRPC messages and check the correctness of each - segments, err := segmentBytes(fileBytes) - require.Nil(t, err) - require.Equal(t, 5, len(segments)) - require.Equal(t, expectedDeliverTxReq1Bytes, segments[0]) - require.Equal(t, expectedKVPair1, segments[1]) - require.Equal(t, expectedKVPair2, segments[2]) - require.Equal(t, expectedKVPair3, segments[3]) - require.Equal(t, expectedDeliverTxRes1Bytes, segments[4]) -} - -func testListenDeliverTx2(t *testing.T) { - expectedDeliverTxReq2Bytes, err := testMarshaller.Marshal(&testDeliverTxReq2) - require.Nil(t, err) - expectedDeliverTxRes2Bytes, err := testMarshaller.Marshal(&testDeliverTxRes2) - require.Nil(t, err) - // write state changes - testListener1.OnWrite(mockStoreKey2, mockKey1, mockValue1, false) - testListener2.OnWrite(mockStoreKey1, mockKey2, mockValue2, false) - testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + testListener2.OnWrite(mockStoreKey2, mockKey1, mockValue1, false) + testListener1.OnWrite(mockStoreKey1, mockKey2, mockValue2, false) + testListener2.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) // expected KV pairs - expectedKVPair1, err := testMarshaller.Marshal(&types.StoreKVPair{ + expectedKVPair1, err = testMarshaller.Marshal(&types.StoreKVPair{ StoreKey: mockStoreKey2.Name(), Key: mockKey1, Value: mockValue1, Delete: false, }) require.Nil(t, err) - expectedKVPair2, err := testMarshaller.Marshal(&types.StoreKVPair{ + expectedKVPair2, err = testMarshaller.Marshal(&types.StoreKVPair{ StoreKey: mockStoreKey1.Name(), Key: mockKey2, Value: mockValue2, Delete: false, }) require.Nil(t, err) - expectedKVPair3, err := testMarshaller.Marshal(&types.StoreKVPair{ + expectedKVPair3, err = testMarshaller.Marshal(&types.StoreKVPair{ StoreKey: mockStoreKey2.Name(), Key: mockKey3, Value: mockValue3, Delete: false, }) require.Nil(t, err) + expectKVPairsStore1 = append(expectKVPairsStore1, expectedKVPair2) + expectKVPairsStore2 = append(expectKVPairsStore2, expectedKVPair1, expectedKVPair3) // send the ABCI messages err = testStreamingService.ListenDeliverTx(emptyContext, testDeliverTxReq2, testDeliverTxRes2) require.Nil(t, err) - // load the file, checking that it was created with the expected name - fileName := fmt.Sprintf("%s-block-%d-tx-%d", testPrefix, testBeginBlockReq.GetHeader().Height, 1) - fileBytes, err := readInFile(fileName) - require.Nil(t, err) - - // segment the file into the separate gRPC messages and check the correctness of each - segments, err := segmentBytes(fileBytes) - require.Nil(t, err) - require.Equal(t, 5, len(segments)) - require.Equal(t, expectedDeliverTxReq2Bytes, segments[0]) - require.Equal(t, expectedKVPair1, segments[1]) - require.Equal(t, expectedKVPair2, segments[2]) - require.Equal(t, expectedKVPair3, segments[3]) - require.Equal(t, expectedDeliverTxRes2Bytes, segments[4]) -} - -func testListenEndBlock(t *testing.T) { - expectedEndBlockReqBytes, err := testMarshaller.Marshal(&testEndBlockReq) - require.Nil(t, err) - expectedEndBlockResBytes, err := testMarshaller.Marshal(&testEndBlockRes) - require.Nil(t, err) - // write state changes testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) - testListener2.OnWrite(mockStoreKey1, mockKey2, mockValue2, false) - testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + testListener1.OnWrite(mockStoreKey1, mockKey2, mockValue2, false) + testListener2.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) // expected KV pairs - expectedKVPair1, err := testMarshaller.Marshal(&types.StoreKVPair{ + expectedKVPair1, err = testMarshaller.Marshal(&types.StoreKVPair{ StoreKey: mockStoreKey1.Name(), Key: mockKey1, Value: mockValue1, Delete: false, }) require.Nil(t, err) - expectedKVPair2, err := testMarshaller.Marshal(&types.StoreKVPair{ + expectedKVPair2, err = testMarshaller.Marshal(&types.StoreKVPair{ StoreKey: mockStoreKey1.Name(), Key: mockKey2, Value: mockValue2, Delete: false, }) require.Nil(t, err) - expectedKVPair3, err := testMarshaller.Marshal(&types.StoreKVPair{ + expectedKVPair3, err = testMarshaller.Marshal(&types.StoreKVPair{ StoreKey: mockStoreKey2.Name(), Key: mockKey3, Value: mockValue3, @@ -346,29 +266,57 @@ func testListenEndBlock(t *testing.T) { }) require.Nil(t, err) + expectKVPairsStore1 = append(expectKVPairsStore1, expectedKVPair1, expectedKVPair2) + expectKVPairsStore2 = append(expectKVPairsStore2, expectedKVPair3) + // send the ABCI messages err = testStreamingService.ListenEndBlock(emptyContext, testEndBlockReq, testEndBlockRes) require.Nil(t, err) + err = testStreamingService.ListenCommit(emptyContext, testCommitRes) + require.Nil(t, err) + // load the file, checking that it was created with the expected name - fileName := fmt.Sprintf("%s-block-%d-end", testPrefix, testEndBlockReq.Height) - fileBytes, err := readInFile(fileName) + metaFileName := fmt.Sprintf("%s-block-%d-meta", testPrefix, testBeginBlockReq.GetHeader().Height) + dataFileName := fmt.Sprintf("%s-block-%d-data", testPrefix, testBeginBlockReq.GetHeader().Height) + metaFileBytes, err := readInFile(metaFileName) + dataFileBytes, err := readInFile(dataFileName) + require.Nil(t, err) + + metadata := types.BlockMetadata{ + RequestBeginBlock: &testBeginBlockReq, + ResponseBeginBlock: &testBeginBlockRes, + RequestEndBlock: &testEndBlockReq, + ResponseEndBlock: &testEndBlockRes, + ResponseCommit: &testCommitRes, + DeliverTxs: []*types.BlockMetadata_DeliverTx{ + {Request: &testDeliverTxReq1, Response: &testDeliverTxRes1}, + {Request: &testDeliverTxReq2, Response: &testDeliverTxRes2}, + }, + } + expectedMetadataBytes, err := testMarshaller.Marshal(&metadata) require.Nil(t, err) + require.Equal(t, expectedMetadataBytes, metaFileBytes) // segment the file into the separate gRPC messages and check the correctness of each - segments, err := segmentBytes(fileBytes) + segments, err := segmentBytes(dataFileBytes) require.Nil(t, err) - require.Equal(t, 5, len(segments)) - require.Equal(t, expectedEndBlockReqBytes, segments[0]) - require.Equal(t, expectedKVPair1, segments[1]) - require.Equal(t, expectedKVPair2, segments[2]) - require.Equal(t, expectedKVPair3, segments[3]) - require.Equal(t, expectedEndBlockResBytes, segments[4]) + require.Equal(t, len(expectKVPairsStore1)+len(expectKVPairsStore2), len(segments)) + require.Equal(t, expectKVPairsStore1, segments[:len(expectKVPairsStore1)]) + require.Equal(t, expectKVPairsStore2, segments[len(expectKVPairsStore1):]) } func readInFile(name string) ([]byte, error) { path := filepath.Join(testDir, name) - return os.ReadFile(path) + bz, err := os.ReadFile(path) + if err != nil { + return nil, err + } + size := sdk.BigEndianToUint64(bz[:8]) + if len(bz) != int(size)+8 { + return nil, errors.New("incomplete file ") + } + return bz[8:], nil } // segmentBytes returns all of the protobuf messages contained in the byte array as an array of byte arrays diff --git a/store/tracekv/store.go b/store/tracekv/store.go index 91f3c657682c..caf871552f56 100644 --- a/store/tracekv/store.go +++ b/store/tracekv/store.go @@ -173,11 +173,6 @@ func (tkv *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.Ca panic("cannot CacheWrapWithTrace a TraceKVStore") } -// CacheWrapWithListeners implements the CacheWrapper interface. -func (tkv *Store) CacheWrapWithListeners(_ types.StoreKey, _ []types.WriteListener) types.CacheWrap { - panic("cannot CacheWrapWithListeners a TraceKVStore") -} - // writeOperation writes a KVStore operation to the underlying io.Writer as // JSON-encoded data where the key/value pair is base64 encoded. func writeOperation(w io.Writer, op operation, tc types.TraceContext, key, value []byte) { diff --git a/store/tracekv/store_test.go b/store/tracekv/store_test.go index 1b81e89bafd2..b7a3b13ea964 100644 --- a/store/tracekv/store_test.go +++ b/store/tracekv/store_test.go @@ -290,8 +290,3 @@ func TestTraceKVStoreCacheWrapWithTrace(t *testing.T) { store := newEmptyTraceKVStore(nil) require.Panics(t, func() { store.CacheWrapWithTrace(nil, nil) }) } - -func TestTraceKVStoreCacheWrapWithListeners(t *testing.T) { - store := newEmptyTraceKVStore(nil) - require.Panics(t, func() { store.CacheWrapWithListeners(nil, nil) }) -} diff --git a/store/types/listening.go b/store/types/listening.go index 02cde4c715c7..5f21689449fd 100644 --- a/store/types/listening.go +++ b/store/types/listening.go @@ -45,3 +45,37 @@ func (wl *StoreKVPairWriteListener) OnWrite(storeKey StoreKey, key []byte, value } return nil } + +// MemoryListener listens to the state writes and accumulate the records in memory. +type MemoryListener struct { + key StoreKey + stateCache []StoreKVPair +} + +// NewMemoryListener creates a listener that accumulate the state writes in memory. +func NewMemoryListener(key StoreKey) *MemoryListener { + return &MemoryListener{key: key} +} + +// OnWrite implements WriteListener interface +func (fl *MemoryListener) OnWrite(storeKey StoreKey, key []byte, value []byte, delete bool) error { + fl.stateCache = append(fl.stateCache, StoreKVPair{ + StoreKey: storeKey.Name(), + Delete: delete, + Key: key, + Value: value, + }) + return nil +} + +// PopStateCache returns the current state caches and set to nil +func (fl *MemoryListener) PopStateCache() []StoreKVPair { + res := fl.stateCache + fl.stateCache = nil + return res +} + +// StoreKey returns the storeKey it listens to +func (fl *MemoryListener) StoreKey() StoreKey { + return fl.key +} diff --git a/store/types/listening.pb.go b/store/types/listening.pb.go index 47d5a23a8367..4505b11f1b69 100644 --- a/store/types/listening.pb.go +++ b/store/types/listening.pb.go @@ -6,6 +6,7 @@ package types import ( fmt "fmt" proto "github.com/gogo/protobuf/proto" + types "github.com/tendermint/tendermint/abci/types" io "io" math "math" math_bits "math/bits" @@ -95,8 +96,149 @@ func (m *StoreKVPair) GetValue() []byte { return nil } +// BlockMetadata contains all the abci event data of a block +// the file streamer dump them into files together with the state changes. +type BlockMetadata struct { + RequestBeginBlock *types.RequestBeginBlock `protobuf:"bytes,1,opt,name=request_begin_block,json=requestBeginBlock,proto3" json:"request_begin_block,omitempty"` + ResponseBeginBlock *types.ResponseBeginBlock `protobuf:"bytes,2,opt,name=response_begin_block,json=responseBeginBlock,proto3" json:"response_begin_block,omitempty"` + DeliverTxs []*BlockMetadata_DeliverTx `protobuf:"bytes,3,rep,name=deliver_txs,json=deliverTxs,proto3" json:"deliver_txs,omitempty"` + RequestEndBlock *types.RequestEndBlock `protobuf:"bytes,4,opt,name=request_end_block,json=requestEndBlock,proto3" json:"request_end_block,omitempty"` + ResponseEndBlock *types.ResponseEndBlock `protobuf:"bytes,5,opt,name=response_end_block,json=responseEndBlock,proto3" json:"response_end_block,omitempty"` + ResponseCommit *types.ResponseCommit `protobuf:"bytes,6,opt,name=response_commit,json=responseCommit,proto3" json:"response_commit,omitempty"` +} + +func (m *BlockMetadata) Reset() { *m = BlockMetadata{} } +func (m *BlockMetadata) String() string { return proto.CompactTextString(m) } +func (*BlockMetadata) ProtoMessage() {} +func (*BlockMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_a5d350879fe4fecd, []int{1} +} +func (m *BlockMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlockMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockMetadata.Merge(m, src) +} +func (m *BlockMetadata) XXX_Size() int { + return m.Size() +} +func (m *BlockMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_BlockMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockMetadata proto.InternalMessageInfo + +func (m *BlockMetadata) GetRequestBeginBlock() *types.RequestBeginBlock { + if m != nil { + return m.RequestBeginBlock + } + return nil +} + +func (m *BlockMetadata) GetResponseBeginBlock() *types.ResponseBeginBlock { + if m != nil { + return m.ResponseBeginBlock + } + return nil +} + +func (m *BlockMetadata) GetDeliverTxs() []*BlockMetadata_DeliverTx { + if m != nil { + return m.DeliverTxs + } + return nil +} + +func (m *BlockMetadata) GetRequestEndBlock() *types.RequestEndBlock { + if m != nil { + return m.RequestEndBlock + } + return nil +} + +func (m *BlockMetadata) GetResponseEndBlock() *types.ResponseEndBlock { + if m != nil { + return m.ResponseEndBlock + } + return nil +} + +func (m *BlockMetadata) GetResponseCommit() *types.ResponseCommit { + if m != nil { + return m.ResponseCommit + } + return nil +} + +// DeliverTx encapulate deliver tx request and response. +type BlockMetadata_DeliverTx struct { + Request *types.RequestDeliverTx `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"` + Response *types.ResponseDeliverTx `protobuf:"bytes,2,opt,name=response,proto3" json:"response,omitempty"` +} + +func (m *BlockMetadata_DeliverTx) Reset() { *m = BlockMetadata_DeliverTx{} } +func (m *BlockMetadata_DeliverTx) String() string { return proto.CompactTextString(m) } +func (*BlockMetadata_DeliverTx) ProtoMessage() {} +func (*BlockMetadata_DeliverTx) Descriptor() ([]byte, []int) { + return fileDescriptor_a5d350879fe4fecd, []int{1, 0} +} +func (m *BlockMetadata_DeliverTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockMetadata_DeliverTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockMetadata_DeliverTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlockMetadata_DeliverTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockMetadata_DeliverTx.Merge(m, src) +} +func (m *BlockMetadata_DeliverTx) XXX_Size() int { + return m.Size() +} +func (m *BlockMetadata_DeliverTx) XXX_DiscardUnknown() { + xxx_messageInfo_BlockMetadata_DeliverTx.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockMetadata_DeliverTx proto.InternalMessageInfo + +func (m *BlockMetadata_DeliverTx) GetRequest() *types.RequestDeliverTx { + if m != nil { + return m.Request + } + return nil +} + +func (m *BlockMetadata_DeliverTx) GetResponse() *types.ResponseDeliverTx { + if m != nil { + return m.Response + } + return nil +} + func init() { proto.RegisterType((*StoreKVPair)(nil), "cosmos.base.store.v1beta1.StoreKVPair") + proto.RegisterType((*BlockMetadata)(nil), "cosmos.base.store.v1beta1.BlockMetadata") + proto.RegisterType((*BlockMetadata_DeliverTx)(nil), "cosmos.base.store.v1beta1.BlockMetadata.DeliverTx") } func init() { @@ -104,21 +246,37 @@ func init() { } var fileDescriptor_a5d350879fe4fecd = []byte{ - // 224 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4c, 0xce, 0x2f, 0xce, - 0xcd, 0x2f, 0xd6, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x2e, 0xc9, 0x2f, 0x4a, 0xd5, 0x2f, 0x33, - 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd4, 0xcf, 0xc9, 0x2c, 0x2e, 0x49, 0xcd, 0xcb, 0xcc, 0x4b, 0xd7, - 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x84, 0x28, 0xd5, 0x03, 0x29, 0xd5, 0x03, 0x2b, 0xd5, - 0x83, 0x2a, 0x55, 0xca, 0xe2, 0xe2, 0x0e, 0x06, 0x09, 0x78, 0x87, 0x05, 0x24, 0x66, 0x16, 0x09, - 0x49, 0x73, 0x71, 0x82, 0xe5, 0xe3, 0xb3, 0x53, 0x2b, 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, - 0x38, 0xc0, 0x02, 0xde, 0xa9, 0x95, 0x42, 0x62, 0x5c, 0x6c, 0x29, 0xa9, 0x39, 0xa9, 0x25, 0xa9, - 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0x1c, 0x41, 0x50, 0x9e, 0x90, 0x00, 0x17, 0x33, 0x48, 0x39, 0xb3, - 0x02, 0xa3, 0x06, 0x4f, 0x10, 0x88, 0x29, 0x24, 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, - 0xc1, 0x02, 0x16, 0x83, 0x70, 0x9c, 0x9c, 0x4e, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, - 0xc1, 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, - 0x21, 0x4a, 0x23, 0x3d, 0xb3, 0x24, 0xa3, 0x34, 0x49, 0x2f, 0x39, 0x3f, 0x57, 0x1f, 0xea, 0x2d, - 0x08, 0xa5, 0x5b, 0x9c, 0x92, 0x0d, 0xf5, 0x5c, 0x49, 0x65, 0x41, 0x6a, 0x71, 0x12, 0x1b, 0xd8, - 0x47, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2b, 0xe0, 0xb3, 0x51, 0xfe, 0x00, 0x00, 0x00, + // 473 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x93, 0x4f, 0x6f, 0xd3, 0x40, + 0x10, 0xc5, 0xe3, 0xa6, 0x09, 0xc9, 0x04, 0x68, 0x59, 0x2a, 0x64, 0x5a, 0xc9, 0xb8, 0xe1, 0x62, + 0x0e, 0xac, 0xd5, 0x70, 0x44, 0xe2, 0x10, 0x40, 0x42, 0x2a, 0x08, 0xe4, 0x02, 0x07, 0x2e, 0x96, + 0xff, 0x8c, 0xc2, 0x12, 0xdb, 0x1b, 0x76, 0x37, 0x51, 0x73, 0xe6, 0xc2, 0x91, 0x8f, 0xc5, 0xb1, + 0x47, 0x8e, 0x28, 0xf9, 0x22, 0xc8, 0x6b, 0xc7, 0xa9, 0x43, 0x7d, 0xca, 0xee, 0xe4, 0xbd, 0x9f, + 0xdf, 0xcc, 0x6a, 0xe0, 0x49, 0xc4, 0x65, 0xca, 0xa5, 0x1b, 0x06, 0x12, 0x5d, 0xa9, 0xb8, 0x40, + 0x77, 0x71, 0x16, 0xa2, 0x0a, 0xce, 0xdc, 0x84, 0x49, 0x85, 0x19, 0xcb, 0x26, 0x74, 0x26, 0xb8, + 0xe2, 0xe4, 0x61, 0x21, 0xa5, 0xb9, 0x94, 0x6a, 0x29, 0x2d, 0xa5, 0xc7, 0x27, 0x0a, 0xb3, 0x18, + 0x45, 0xca, 0x32, 0xe5, 0x06, 0x61, 0xc4, 0x5c, 0xb5, 0x9c, 0xa1, 0x2c, 0x7c, 0xc3, 0x6f, 0x30, + 0xb8, 0xc8, 0xd5, 0xe7, 0x9f, 0x3f, 0x04, 0x4c, 0x90, 0x13, 0xe8, 0x6b, 0xb3, 0x3f, 0xc5, 0xa5, + 0x69, 0xd8, 0x86, 0xd3, 0xf7, 0x7a, 0xba, 0x70, 0x8e, 0x4b, 0xf2, 0x00, 0xba, 0x31, 0x26, 0xa8, + 0xd0, 0xdc, 0xb3, 0x0d, 0xa7, 0xe7, 0x95, 0x37, 0x72, 0x08, 0xed, 0x5c, 0xde, 0xb6, 0x0d, 0xe7, + 0xb6, 0x97, 0x1f, 0xc9, 0x11, 0x74, 0x16, 0x41, 0x32, 0x47, 0x73, 0x5f, 0xd7, 0x8a, 0xcb, 0xf0, + 0x47, 0x07, 0xee, 0x8c, 0x13, 0x1e, 0x4d, 0xdf, 0xa1, 0x0a, 0xe2, 0x40, 0x05, 0xc4, 0x83, 0xfb, + 0x02, 0xbf, 0xcf, 0x51, 0x2a, 0x3f, 0xc4, 0x09, 0xcb, 0xfc, 0x30, 0xff, 0x5b, 0x7f, 0x78, 0x30, + 0x1a, 0xd2, 0x6d, 0x70, 0x9a, 0x07, 0xa7, 0x5e, 0xa1, 0x1d, 0xe7, 0x52, 0x0d, 0xf2, 0xee, 0x89, + 0xdd, 0x12, 0xf9, 0x04, 0x47, 0x02, 0xe5, 0x8c, 0x67, 0x12, 0x6b, 0xd0, 0x3d, 0x0d, 0x7d, 0x7c, + 0x03, 0xb4, 0x10, 0x5f, 0xa3, 0x12, 0xf1, 0x5f, 0x8d, 0x5c, 0xc0, 0x20, 0xc6, 0x84, 0x2d, 0x50, + 0xf8, 0xea, 0x52, 0x9a, 0x6d, 0xbb, 0xed, 0x0c, 0x46, 0x23, 0xda, 0x38, 0x76, 0x5a, 0xeb, 0x94, + 0xbe, 0x2a, 0xbc, 0x1f, 0x2f, 0x3d, 0x88, 0x37, 0x47, 0x49, 0xde, 0xc2, 0xa6, 0x01, 0x1f, 0xb3, + 0xb8, 0x0c, 0xba, 0xaf, 0x83, 0xda, 0x4d, 0xdd, 0xbf, 0xce, 0xe2, 0x22, 0xe5, 0x81, 0xa8, 0x17, + 0xc8, 0x7b, 0xa8, 0x82, 0x5f, 0xc3, 0x75, 0x34, 0xee, 0xb4, 0xb1, 0xef, 0x8a, 0x77, 0x28, 0x76, + 0x2a, 0xe4, 0x0d, 0x1c, 0x54, 0xc0, 0x88, 0xa7, 0x29, 0x53, 0x66, 0x57, 0xd3, 0x1e, 0x35, 0xd2, + 0x5e, 0x6a, 0x99, 0x77, 0x57, 0xd4, 0xee, 0xc7, 0x3f, 0x0d, 0xe8, 0x57, 0x23, 0x20, 0xcf, 0xe1, + 0x56, 0x99, 0xbd, 0x7c, 0xea, 0xd3, 0xa6, 0x66, 0xb7, 0x63, 0xdb, 0x38, 0xc8, 0x0b, 0xe8, 0x6d, + 0xe0, 0xe5, 0x9b, 0x0e, 0x1b, 0xd3, 0x6c, 0xed, 0x95, 0x67, 0x3c, 0xfe, 0xbd, 0xb2, 0x8c, 0xab, + 0x95, 0x65, 0xfc, 0x5d, 0x59, 0xc6, 0xaf, 0xb5, 0xd5, 0xba, 0x5a, 0x5b, 0xad, 0x3f, 0x6b, 0xab, + 0xf5, 0xc5, 0x99, 0x30, 0xf5, 0x75, 0x1e, 0xd2, 0x88, 0xa7, 0x6e, 0xb9, 0x79, 0xc5, 0xcf, 0x53, + 0x19, 0x4f, 0xcb, 0xfd, 0xd3, 0xbb, 0x13, 0x76, 0xf5, 0xf2, 0x3c, 0xfb, 0x17, 0x00, 0x00, 0xff, + 0xff, 0x69, 0x5c, 0x8f, 0x23, 0xa1, 0x03, 0x00, 0x00, } func (m *StoreKVPair) Marshal() (dAtA []byte, err error) { @@ -175,6 +333,150 @@ func (m *StoreKVPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *BlockMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ResponseCommit != nil { + { + size, err := m.ResponseCommit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintListening(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.ResponseEndBlock != nil { + { + size, err := m.ResponseEndBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintListening(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.RequestEndBlock != nil { + { + size, err := m.RequestEndBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintListening(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.DeliverTxs) > 0 { + for iNdEx := len(m.DeliverTxs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DeliverTxs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintListening(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.ResponseBeginBlock != nil { + { + size, err := m.ResponseBeginBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintListening(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.RequestBeginBlock != nil { + { + size, err := m.RequestBeginBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintListening(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BlockMetadata_DeliverTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockMetadata_DeliverTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockMetadata_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Response != nil { + { + size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintListening(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Request != nil { + { + size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintListening(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintListening(dAtA []byte, offset int, v uint64) int { offset -= sovListening(v) base := offset @@ -210,6 +512,58 @@ func (m *StoreKVPair) Size() (n int) { return n } +func (m *BlockMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestBeginBlock != nil { + l = m.RequestBeginBlock.Size() + n += 1 + l + sovListening(uint64(l)) + } + if m.ResponseBeginBlock != nil { + l = m.ResponseBeginBlock.Size() + n += 1 + l + sovListening(uint64(l)) + } + if len(m.DeliverTxs) > 0 { + for _, e := range m.DeliverTxs { + l = e.Size() + n += 1 + l + sovListening(uint64(l)) + } + } + if m.RequestEndBlock != nil { + l = m.RequestEndBlock.Size() + n += 1 + l + sovListening(uint64(l)) + } + if m.ResponseEndBlock != nil { + l = m.ResponseEndBlock.Size() + n += 1 + l + sovListening(uint64(l)) + } + if m.ResponseCommit != nil { + l = m.ResponseCommit.Size() + n += 1 + l + sovListening(uint64(l)) + } + return n +} + +func (m *BlockMetadata_DeliverTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Request != nil { + l = m.Request.Size() + n += 1 + l + sovListening(uint64(l)) + } + if m.Response != nil { + l = m.Response.Size() + n += 1 + l + sovListening(uint64(l)) + } + return n +} + func sovListening(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -386,6 +740,392 @@ func (m *StoreKVPair) Unmarshal(dAtA []byte) error { } return nil } +func (m *BlockMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestBeginBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthListening + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthListening + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestBeginBlock == nil { + m.RequestBeginBlock = &types.RequestBeginBlock{} + } + if err := m.RequestBeginBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseBeginBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthListening + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthListening + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseBeginBlock == nil { + m.ResponseBeginBlock = &types.ResponseBeginBlock{} + } + if err := m.ResponseBeginBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeliverTxs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthListening + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthListening + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeliverTxs = append(m.DeliverTxs, &BlockMetadata_DeliverTx{}) + if err := m.DeliverTxs[len(m.DeliverTxs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestEndBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthListening + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthListening + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestEndBlock == nil { + m.RequestEndBlock = &types.RequestEndBlock{} + } + if err := m.RequestEndBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseEndBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthListening + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthListening + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseEndBlock == nil { + m.ResponseEndBlock = &types.ResponseEndBlock{} + } + if err := m.ResponseEndBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseCommit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthListening + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthListening + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseCommit == nil { + m.ResponseCommit = &types.ResponseCommit{} + } + if err := m.ResponseCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipListening(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthListening + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockMetadata_DeliverTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeliverTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeliverTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthListening + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthListening + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Request == nil { + m.Request = &types.RequestDeliverTx{} + } + if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthListening + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthListening + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Response == nil { + m.Response = &types.ResponseDeliverTx{} + } + if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipListening(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthListening + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipListening(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/store/types/store.go b/store/types/store.go index 403ede5f706f..ef5ff36c8c42 100644 --- a/store/types/store.go +++ b/store/types/store.go @@ -128,13 +128,6 @@ type MultiStore interface { // tracing operations. The modified MultiStore is returned. SetTracingContext(TraceContext) MultiStore - // ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey - ListeningEnabled(key StoreKey) bool - - // AddListeners adds WriteListeners for the KVStore belonging to the provided StoreKey - // It appends the listeners to a current set, if one already exists - AddListeners(key StoreKey, listeners []WriteListener) - // LatestVersion returns the latest version in the store LatestVersion() int64 } @@ -197,6 +190,13 @@ type CommitMultiStore interface { // RollbackToVersion rollback the db to specific version(height). RollbackToVersion(version int64) error + + // ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey + ListeningEnabled(key StoreKey) bool + + // AddListeners adds WriteListeners for the KVStore belonging to the provided StoreKey + // It appends the listeners to a current set, if one already exists + AddListeners(key StoreKey, listeners []WriteListener) } //---------subsp------------------------------- @@ -273,9 +273,6 @@ type CacheWrap interface { // CacheWrapWithTrace recursively wraps again with tracing enabled. CacheWrapWithTrace(w io.Writer, tc TraceContext) CacheWrap - - // CacheWrapWithListeners recursively wraps again with listening enabled - CacheWrapWithListeners(storeKey StoreKey, listeners []WriteListener) CacheWrap } type CacheWrapper interface { @@ -284,9 +281,6 @@ type CacheWrapper interface { // CacheWrapWithTrace branches a store with tracing enabled. CacheWrapWithTrace(w io.Writer, tc TraceContext) CacheWrap - - // CacheWrapWithListeners recursively wraps again with listening enabled - CacheWrapWithListeners(storeKey StoreKey, listeners []WriteListener) CacheWrap } func (cid CommitID) IsZero() bool {