From 0bc3d7e4750db8f98c5dd66f3377147532021c62 Mon Sep 17 00:00:00 2001 From: cce <51567+cce@users.noreply.github.com> Date: Thu, 21 Nov 2024 11:55:42 -0500 Subject: [PATCH 01/15] CI: update reviewdog warning linter job (#6175) --- .github/workflows/reviewdog.yml | 10 ++++++---- .golangci-warnings.yml | 4 ++-- data/transactions/logic/assembler.go | 2 +- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml index 4a398e7101..f967bbd916 100644 --- a/.github/workflows/reviewdog.yml +++ b/.github/workflows/reviewdog.yml @@ -1,4 +1,6 @@ name: "ReviewDog workflow" +env: + GOLANGCI_LINT_VERSION: "v1.62.0" on: push: branches: @@ -20,7 +22,7 @@ jobs: uses: reviewdog/action-golangci-lint@v2.6.2 with: go_version_file: go.mod - golangci_lint_version: "v1.62.0" + golangci_lint_version: ${{ env.GOLANGCI_LINT_VERSION }} golangci_lint_flags: "-c .golangci.yml --allow-parallel-runners" reporter: "github-pr-check" tool_name: "Lint Errors" @@ -56,14 +58,14 @@ jobs: uses: actions/cache@v3.3.1 with: path: cicdtmp/golangci-lint/golangci-lint-cgo - key: cicd-golangci-lint-cgo-v0.0.2-${{ env.GO_VERSION }} + key: cicd-golangci-lint-cgo-v0.0.3-${{ env.GO_VERSION }}-${{ env.GOLANGCI_LINT_VERSION }} - name: Build custom golangci-lint with CGO_ENABLED if: steps.cache-golangci-lint.outputs.cache-hit != 'true' run: | cd cicdtmp/golangci-lint git clone https://github.com/golangci/golangci-lint.git . - git checkout tags/v1.62.0 + git checkout tags/${GOLANGCI_LINT_VERSION} CGO_ENABLED=true go build -trimpath -o golangci-lint-cgo ./cmd/golangci-lint ./golangci-lint-cgo --version cd ../../ @@ -92,7 +94,7 @@ jobs: cat temp_golangci-lint-cgo.txt | reviewdog \ -f=golangci-lint \ -name="Lint Warnings" \ - -reporter=github-check \ + -reporter=github-pr-check \ -filter-mode=added \ -fail-on-error=true \ -level=warning diff --git a/.golangci-warnings.yml b/.golangci-warnings.yml index d813e97e59..8ab68f9faf 100644 --- a/.golangci-warnings.yml +++ b/.golangci-warnings.yml @@ -9,8 +9,8 @@ linters: - partitiontest linters-settings: - gosec: # we are mostly only interested in G601 - excludes: [G101, G103, G104, G107, G202, G301, G302, G303, G304, G306, G307, G404] + gosec: # Go 1.22 makes G601 irrelevant + excludes: [G101, G103, G104, G107, G115, G202, G301, G302, G303, G304, G306, G307, G404, G601] custom: partitiontest: path: cmd/partitiontest_linter/plugin.so diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go index 06fd1f3953..9ba52138ec 100644 --- a/data/transactions/logic/assembler.go +++ b/data/transactions/logic/assembler.go @@ -2269,7 +2269,7 @@ func define(ops *OpStream, tokens []token) *sourceError { } else { delete(ops.macros, name) // remove new macro that caused cycle } - return tokens[1].errorf("macro expansion cycle discovered: %s", strings.Join(found, " -> ")) + return tokens[1].errorf("macro expansion cycle discovered: %s", strings.Join(found, " -> ")) //nolint:gosec // false positive, len(tokens) >= 3 } return nil } From 92e0e009da2af6828aecede9d9547fa3e7724aa7 Mon Sep 17 00:00:00 2001 From: Eng Zer Jun Date: Tue, 26 Nov 2024 03:26:43 +0800 Subject: [PATCH 02/15] refactor: replace experimental `maps` and `slices` with stdlib (#6179) Signed-off-by: Eng Zer Jun --- agreement/agreementtest/simulate_test.go | 2 +- agreement/autopsy.go | 2 +- agreement/common_test.go | 2 +- cmd/algokey/keyreg.go | 5 +++-- cmd/goal/account.go | 2 +- cmd/tealdbg/local.go | 2 +- crypto/merklearray/merkle.go | 2 +- crypto/merkletrie/cache.go | 8 +++---- crypto/merkletrie/committer.go | 2 +- crypto/merkletrie/committer_test.go | 2 +- crypto/merkletrie/node.go | 2 +- daemon/algod/api/server/v2/account.go | 2 +- .../algod/api/server/v2/test/handlers_test.go | 2 +- daemon/algod/api/server/v2/utils.go | 16 +++++++------- data/basics/teal.go | 2 +- data/basics/userBalance.go | 2 +- data/transactions/application.go | 2 +- data/transactions/logic/crypto_test.go | 2 +- data/transactions/logic/eval.go | 3 +-- data/transactions/logic/opcodes.go | 9 ++++---- data/transactions/logic/opcodes_test.go | 2 +- data/transactions/teal.go | 4 ++-- data/transactions/transaction.go | 2 +- ledger/apply/application_test.go | 2 +- ledger/apply/mockBalances_test.go | 3 ++- ledger/eval/cow.go | 5 ++--- ledger/eval/txntracer.go | 4 ++-- ledger/ledger_test.go | 2 +- ledger/ledgercore/statedelta.go | 22 +++++++++---------- ledger/simulation/simulation_eval_test.go | 3 +-- netdeploy/network.go | 5 +++-- network/p2p/peerstore/peerstore.go | 2 +- network/phonebook/phonebook.go | 2 +- stateproof/builder.go | 7 +++--- test/reflectionhelpers/helpers.go | 3 +-- util/metrics/opencensus.go | 2 +- util/metrics/tagcounter.go | 2 +- 37 files changed, 69 insertions(+), 74 deletions(-) diff --git a/agreement/agreementtest/simulate_test.go b/agreement/agreementtest/simulate_test.go index 6c071ed8a8..02ba048cb7 100644 --- a/agreement/agreementtest/simulate_test.go +++ b/agreement/agreementtest/simulate_test.go @@ -19,6 +19,7 @@ package agreementtest import ( "context" "fmt" + "maps" "math/rand" "os" "strconv" @@ -27,7 +28,6 @@ import ( "github.com/algorand/go-deadlock" "github.com/stretchr/testify/require" - "golang.org/x/exp/maps" "github.com/algorand/go-algorand/agreement" "github.com/algorand/go-algorand/config" diff --git a/agreement/autopsy.go b/agreement/autopsy.go index 82df84473a..dd9fe81420 100644 --- a/agreement/autopsy.go +++ b/agreement/autopsy.go @@ -20,11 +20,11 @@ import ( "fmt" "io" "os" + "slices" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" - "golang.org/x/exp/slices" ) // An Autopsy is a trace of the ordered input events and output diff --git a/agreement/common_test.go b/agreement/common_test.go index ca8983705e..f468bae4a1 100644 --- a/agreement/common_test.go +++ b/agreement/common_test.go @@ -19,12 +19,12 @@ package agreement import ( "context" "fmt" + "maps" "math/rand" "testing" "github.com/algorand/go-deadlock" "github.com/stretchr/testify/require" - "golang.org/x/exp/maps" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" diff --git a/cmd/algokey/keyreg.go b/cmd/algokey/keyreg.go index 43b072f4d0..4b034cef50 100644 --- a/cmd/algokey/keyreg.go +++ b/cmd/algokey/keyreg.go @@ -20,11 +20,12 @@ import ( "encoding/base64" "errors" "fmt" + "maps" "os" + "slices" "strings" "github.com/spf13/cobra" - "golang.org/x/exp/maps" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/account" @@ -95,7 +96,7 @@ func init() { "betanet": mustConvertB64ToDigest("mFgazF+2uRS1tMiL9dsj01hJGySEmPN28B/TjjvpVW0="), "devnet": mustConvertB64ToDigest("sC3P7e2SdbqKJK0tbiCdK9tdSpbe6XeCGKdoNzmlj0E="), } - validNetworkList = maps.Keys(validNetworks) + validNetworkList = slices.Collect(maps.Keys(validNetworks)) } func mustConvertB64ToDigest(b64 string) (digest crypto.Digest) { diff --git a/cmd/goal/account.go b/cmd/goal/account.go index 67d382bf45..b92d58f962 100644 --- a/cmd/goal/account.go +++ b/cmd/goal/account.go @@ -24,12 +24,12 @@ import ( "net/http" "os" "path/filepath" + "slices" "sort" "strings" "time" "github.com/spf13/cobra" - "golang.org/x/exp/slices" "github.com/algorand/go-algorand/cmd/util/datadir" "github.com/algorand/go-algorand/config" diff --git a/cmd/tealdbg/local.go b/cmd/tealdbg/local.go index 6f61fb2da3..332b581cfb 100644 --- a/cmd/tealdbg/local.go +++ b/cmd/tealdbg/local.go @@ -20,6 +20,7 @@ import ( "fmt" "io" "log" + "slices" "time" "github.com/algorand/go-algorand/config" @@ -28,7 +29,6 @@ import ( "github.com/algorand/go-algorand/data/transactions/logic" "github.com/algorand/go-algorand/ledger/apply" "github.com/algorand/go-algorand/protocol" - "golang.org/x/exp/slices" ) func protoFromString(protoString string) (name string, proto config.ConsensusParams, err error) { diff --git a/crypto/merklearray/merkle.go b/crypto/merklearray/merkle.go index e540586b53..af74ad416f 100644 --- a/crypto/merklearray/merkle.go +++ b/crypto/merklearray/merkle.go @@ -21,10 +21,10 @@ import ( "errors" "fmt" "hash" + "slices" "sort" "github.com/algorand/go-algorand/crypto" - "golang.org/x/exp/slices" ) const ( diff --git a/crypto/merkletrie/cache.go b/crypto/merkletrie/cache.go index d4d60b2c39..1491517e07 100644 --- a/crypto/merkletrie/cache.go +++ b/crypto/merkletrie/cache.go @@ -21,9 +21,8 @@ import ( "encoding/binary" "errors" "fmt" - - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" + "maps" + "slices" ) // storedNodeIdentifier is the "equivalent" of a node-ptr, but oriented around persisting the @@ -448,8 +447,7 @@ func (mtc *merkleTrieCache) reallocatePendingPages(stats *CommitStats) (pagesToC } // create a sorted list of created pages - sortedCreatedPages := maps.Keys(createdPages) - slices.Sort(sortedCreatedPages) + sortedCreatedPages := slices.Sorted(maps.Keys(createdPages)) mtc.reallocatedPages = make(map[uint64]map[storedNodeIdentifier]*node) diff --git a/crypto/merkletrie/committer.go b/crypto/merkletrie/committer.go index 66dd2c65c8..bbfb54966e 100644 --- a/crypto/merkletrie/committer.go +++ b/crypto/merkletrie/committer.go @@ -16,7 +16,7 @@ package merkletrie -import "golang.org/x/exp/slices" +import "slices" // Committer is the interface supporting serializing tries into persistent storage. type Committer interface { diff --git a/crypto/merkletrie/committer_test.go b/crypto/merkletrie/committer_test.go index 3dc6c39d2a..6260379aad 100644 --- a/crypto/merkletrie/committer_test.go +++ b/crypto/merkletrie/committer_test.go @@ -18,10 +18,10 @@ package merkletrie import ( "encoding/binary" + "slices" "testing" "github.com/stretchr/testify/require" - "golang.org/x/exp/slices" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/test/partitiontest" diff --git a/crypto/merkletrie/node.go b/crypto/merkletrie/node.go index f63d2b12ab..327a6eba65 100644 --- a/crypto/merkletrie/node.go +++ b/crypto/merkletrie/node.go @@ -19,11 +19,11 @@ package merkletrie import ( "bytes" "encoding/binary" + "slices" "sort" "unsafe" "github.com/algorand/go-algorand/crypto" - "golang.org/x/exp/slices" ) type childEntry struct { diff --git a/daemon/algod/api/server/v2/account.go b/daemon/algod/api/server/v2/account.go index 10ec183919..dc6e3cd14d 100644 --- a/daemon/algod/api/server/v2/account.go +++ b/daemon/algod/api/server/v2/account.go @@ -20,6 +20,7 @@ import ( "encoding/base64" "errors" "math" + "slices" "sort" "github.com/algorand/go-algorand/config" @@ -27,7 +28,6 @@ import ( "github.com/algorand/go-algorand/crypto/merklesignature" "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" "github.com/algorand/go-algorand/data/basics" - "golang.org/x/exp/slices" ) // AssetHolding converts between basics.AssetHolding and model.AssetHolding diff --git a/daemon/algod/api/server/v2/test/handlers_test.go b/daemon/algod/api/server/v2/test/handlers_test.go index bc4799dc13..4b97f8f3a4 100644 --- a/daemon/algod/api/server/v2/test/handlers_test.go +++ b/daemon/algod/api/server/v2/test/handlers_test.go @@ -28,6 +28,7 @@ import ( "net/http" "net/http/httptest" "reflect" + "slices" "strings" "testing" "time" @@ -37,7 +38,6 @@ import ( "github.com/algorand/go-algorand/daemon/algod/api/server" "github.com/algorand/go-algorand/ledger/eval" "github.com/algorand/go-algorand/ledger/ledgercore" - "golang.org/x/exp/slices" "github.com/labstack/echo/v4" "github.com/stretchr/testify/assert" diff --git a/daemon/algod/api/server/v2/utils.go b/daemon/algod/api/server/v2/utils.go index 6f36784ee4..58120f46d8 100644 --- a/daemon/algod/api/server/v2/utils.go +++ b/daemon/algod/api/server/v2/utils.go @@ -20,15 +20,15 @@ import ( "encoding/base64" "errors" "fmt" + "maps" "net/http" + "slices" "strings" "unicode" "unicode/utf8" "github.com/algorand/go-codec/codec" "github.com/labstack/echo/v4" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" @@ -494,23 +494,23 @@ func convertUnnamedResourcesAccessed(resources *simulation.ResourceTracker) *mod return nil } return &model.SimulateUnnamedResourcesAccessed{ - Accounts: sliceOrNil(stringSlice(maps.Keys(resources.Accounts))), - Assets: sliceOrNil(uint64Slice(maps.Keys(resources.Assets))), - Apps: sliceOrNil(uint64Slice(maps.Keys(resources.Apps))), - Boxes: sliceOrNil(convertSlice(maps.Keys(resources.Boxes), func(box logic.BoxRef) model.BoxReference { + Accounts: sliceOrNil(stringSlice(slices.Collect(maps.Keys(resources.Accounts)))), + Assets: sliceOrNil(uint64Slice(slices.Collect(maps.Keys(resources.Assets)))), + Apps: sliceOrNil(uint64Slice(slices.Collect(maps.Keys(resources.Apps)))), + Boxes: sliceOrNil(convertSlice(slices.Collect(maps.Keys(resources.Boxes)), func(box logic.BoxRef) model.BoxReference { return model.BoxReference{ App: uint64(box.App), Name: []byte(box.Name), } })), ExtraBoxRefs: omitEmpty(uint64(resources.NumEmptyBoxRefs)), - AssetHoldings: sliceOrNil(convertSlice(maps.Keys(resources.AssetHoldings), func(holding ledgercore.AccountAsset) model.AssetHoldingReference { + AssetHoldings: sliceOrNil(convertSlice(slices.Collect(maps.Keys(resources.AssetHoldings)), func(holding ledgercore.AccountAsset) model.AssetHoldingReference { return model.AssetHoldingReference{ Account: holding.Address.String(), Asset: uint64(holding.Asset), } })), - AppLocals: sliceOrNil(convertSlice(maps.Keys(resources.AppLocals), func(local ledgercore.AccountApp) model.ApplicationLocalReference { + AppLocals: sliceOrNil(convertSlice(slices.Collect(maps.Keys(resources.AppLocals)), func(local ledgercore.AccountApp) model.ApplicationLocalReference { return model.ApplicationLocalReference{ Account: local.Address.String(), App: uint64(local.App), diff --git a/data/basics/teal.go b/data/basics/teal.go index f3156ae4b8..7268c8aadf 100644 --- a/data/basics/teal.go +++ b/data/basics/teal.go @@ -19,9 +19,9 @@ package basics import ( "encoding/hex" "fmt" + "maps" "github.com/algorand/go-algorand/config" - "golang.org/x/exp/maps" ) // DeltaAction is an enum of actions that may be performed when applying a diff --git a/data/basics/userBalance.go b/data/basics/userBalance.go index d8f86aea54..3bccd3f4ed 100644 --- a/data/basics/userBalance.go +++ b/data/basics/userBalance.go @@ -20,13 +20,13 @@ import ( "encoding/binary" "fmt" "reflect" + "slices" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/crypto/merklesignature" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" - "golang.org/x/exp/slices" ) // Status is the delegation status of an account's MicroAlgos diff --git a/data/transactions/application.go b/data/transactions/application.go index 48a5788c04..1cff14760d 100644 --- a/data/transactions/application.go +++ b/data/transactions/application.go @@ -18,9 +18,9 @@ package transactions import ( "fmt" + "slices" "github.com/algorand/go-algorand/data/basics" - "golang.org/x/exp/slices" ) const ( diff --git a/data/transactions/logic/crypto_test.go b/data/transactions/logic/crypto_test.go index c528424ef6..70283be7e3 100644 --- a/data/transactions/logic/crypto_test.go +++ b/data/transactions/logic/crypto_test.go @@ -25,11 +25,11 @@ import ( "encoding/hex" "fmt" "math/big" + "slices" "strconv" "testing" "github.com/stretchr/testify/require" - "golang.org/x/exp/slices" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/crypto/secp256k1" diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go index d599d56119..ae8614e824 100644 --- a/data/transactions/logic/eval.go +++ b/data/transactions/logic/eval.go @@ -28,11 +28,10 @@ import ( "math/big" "math/bits" "runtime" + "slices" "strconv" "strings" - "golang.org/x/exp/slices" - "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go index f3f8bfe37d..419e937874 100644 --- a/data/transactions/logic/opcodes.go +++ b/data/transactions/logic/opcodes.go @@ -19,12 +19,12 @@ package logic import ( "cmp" "fmt" + "maps" + "slices" "strconv" "strings" "github.com/algorand/go-algorand/data/basics" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ) // LogicVersion defines default assembler and max eval versions @@ -838,11 +838,10 @@ func OpcodesByVersion(version uint64) []OpSpec { } } } - result := maps.Values(subv) - slices.SortFunc(result, func(a, b OpSpec) int { + values := maps.Values(subv) + return slices.SortedFunc(values, func(a, b OpSpec) int { return cmp.Compare(a.Opcode, b.Opcode) }) - return result } // direct opcode bytes diff --git a/data/transactions/logic/opcodes_test.go b/data/transactions/logic/opcodes_test.go index 57a2d5eb76..a39be85106 100644 --- a/data/transactions/logic/opcodes_test.go +++ b/data/transactions/logic/opcodes_test.go @@ -19,11 +19,11 @@ package logic import ( "fmt" "reflect" + "slices" "testing" "github.com/algorand/go-algorand/test/partitiontest" "github.com/stretchr/testify/require" - "golang.org/x/exp/slices" ) func TestOpSpecs(t *testing.T) { diff --git a/data/transactions/teal.go b/data/transactions/teal.go index bdb68b525d..fa25a71520 100644 --- a/data/transactions/teal.go +++ b/data/transactions/teal.go @@ -18,11 +18,11 @@ package transactions import ( "bytes" + "maps" + "slices" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/protocol" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ) // EvalDelta stores StateDeltas for an application's global key/value store, as diff --git a/data/transactions/transaction.go b/data/transactions/transaction.go index 4a6d5b6603..a8226654b5 100644 --- a/data/transactions/transaction.go +++ b/data/transactions/transaction.go @@ -21,13 +21,13 @@ import ( "encoding/binary" "errors" "fmt" + "slices" "sync" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/protocol" - "golang.org/x/exp/slices" ) // Txid is a hash used to uniquely identify individual transactions diff --git a/ledger/apply/application_test.go b/ledger/apply/application_test.go index dfc298bf84..77efd5b075 100644 --- a/ledger/apply/application_test.go +++ b/ledger/apply/application_test.go @@ -18,11 +18,11 @@ package apply import ( "fmt" + "maps" "math/rand" "testing" "github.com/stretchr/testify/require" - "golang.org/x/exp/maps" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" diff --git a/ledger/apply/mockBalances_test.go b/ledger/apply/mockBalances_test.go index 43af5fa11d..a02a2108fd 100644 --- a/ledger/apply/mockBalances_test.go +++ b/ledger/apply/mockBalances_test.go @@ -17,13 +17,14 @@ package apply import ( + "maps" + "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/protocol" - "golang.org/x/exp/maps" ) type mockBalances struct { diff --git a/ledger/eval/cow.go b/ledger/eval/cow.go index 9511af7ce7..046fb46598 100644 --- a/ledger/eval/cow.go +++ b/ledger/eval/cow.go @@ -28,7 +28,6 @@ import ( "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/protocol" - "golang.org/x/exp/maps" ) // ___________________ @@ -352,9 +351,9 @@ func (cb *roundCowState) reset() { cb.proto = config.ConsensusParams{} cb.mods.Reset() cb.txnCount = 0 - maps.Clear(cb.sdeltas) + clear(cb.sdeltas) cb.compatibilityMode = false - maps.Clear(cb.compatibilityGetKeyCache) + clear(cb.compatibilityGetKeyCache) cb.prevTotals = ledgercore.AccountTotals{} cb.feesCollected = basics.MicroAlgos{} } diff --git a/ledger/eval/txntracer.go b/ledger/eval/txntracer.go index 96d307390a..9d19d42e69 100644 --- a/ledger/eval/txntracer.go +++ b/ledger/eval/txntracer.go @@ -18,10 +18,10 @@ package eval import ( "fmt" + "maps" + "slices" "github.com/algorand/go-deadlock" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index e73d648b4d..9018d5d73b 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -25,11 +25,11 @@ import ( "os" "path/filepath" "runtime" + "slices" "sort" "testing" "github.com/stretchr/testify/require" - "golang.org/x/exp/slices" "github.com/algorand/go-algorand/agreement" "github.com/algorand/go-algorand/config" diff --git a/ledger/ledgercore/statedelta.go b/ledger/ledgercore/statedelta.go index 1d2562ca4f..90bf7afeaf 100644 --- a/ledger/ledgercore/statedelta.go +++ b/ledger/ledgercore/statedelta.go @@ -18,11 +18,11 @@ package ledgercore import ( "fmt" + "maps" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/transactions" - "golang.org/x/exp/maps" ) const ( @@ -294,24 +294,24 @@ func (ad *AccountDeltas) Dehydrate() { if ad.acctsCache == nil { ad.acctsCache = make(map[basics.Address]int) } - maps.Clear(ad.acctsCache) + clear(ad.acctsCache) if ad.appResourcesCache == nil { ad.appResourcesCache = make(map[AccountApp]int) } - maps.Clear(ad.appResourcesCache) + clear(ad.appResourcesCache) if ad.assetResourcesCache == nil { ad.assetResourcesCache = make(map[AccountAsset]int) } - maps.Clear(ad.assetResourcesCache) + clear(ad.assetResourcesCache) } // Reset resets the StateDelta for re-use with sync.Pool func (sd *StateDelta) Reset() { sd.Accts.reset() - maps.Clear(sd.Txids) - maps.Clear(sd.Txleases) - maps.Clear(sd.Creatables) - maps.Clear(sd.KvMods) + clear(sd.Txids) + clear(sd.Txleases) + clear(sd.Creatables) + clear(sd.KvMods) sd.Totals = AccountTotals{} // these fields are going to be populated on next use but resetting them anyway for safety. @@ -329,9 +329,9 @@ func (ad *AccountDeltas) reset() { ad.AssetResources = ad.AssetResources[:0] // reset the maps - maps.Clear(ad.acctsCache) - maps.Clear(ad.appResourcesCache) - maps.Clear(ad.assetResourcesCache) + clear(ad.acctsCache) + clear(ad.appResourcesCache) + clear(ad.assetResourcesCache) } // notAllocated returns true if any of the fields allocated by MakeAccountDeltas is nil diff --git a/ledger/simulation/simulation_eval_test.go b/ledger/simulation/simulation_eval_test.go index 98ce3bd695..ef81a8a10e 100644 --- a/ledger/simulation/simulation_eval_test.go +++ b/ledger/simulation/simulation_eval_test.go @@ -21,11 +21,10 @@ import ( "encoding/hex" "fmt" "math" + "slices" "strings" "testing" - "golang.org/x/exp/slices" - "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/transactions" diff --git a/netdeploy/network.go b/netdeploy/network.go index 6f31673a54..1ca99239f4 100644 --- a/netdeploy/network.go +++ b/netdeploy/network.go @@ -20,8 +20,10 @@ import ( "encoding/json" "fmt" "io" + "maps" "os" "path/filepath" + "slices" "sort" "strings" "time" @@ -34,7 +36,6 @@ import ( "github.com/algorand/go-algorand/nodecontrol" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util" - "golang.org/x/exp/maps" ) const configFileName = "network.json" @@ -365,7 +366,7 @@ func (n Network) GetPeerAddresses(binDir string) []string { } func (n Network) startNodes(binDir string, relayNameToAddress map[string]string, redirectOutput bool) error { - allRelaysAddresses := strings.Join(maps.Values(relayNameToAddress), ";") + allRelaysAddresses := strings.Join(slices.Collect(maps.Values(relayNameToAddress)), ";") nodeConfigToEntry := make(map[string]remote.NodeConfigGoal, len(n.cfg.Template.Nodes)) for _, n := range n.cfg.Template.Nodes { diff --git a/network/p2p/peerstore/peerstore.go b/network/p2p/peerstore/peerstore.go index 5ae9c6aa04..4a4e7e6ddf 100644 --- a/network/p2p/peerstore/peerstore.go +++ b/network/p2p/peerstore/peerstore.go @@ -20,12 +20,12 @@ import ( "fmt" "math" "math/rand" + "slices" "time" "github.com/libp2p/go-libp2p/core/peer" libp2p "github.com/libp2p/go-libp2p/core/peerstore" mempstore "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem" - "golang.org/x/exp/slices" "github.com/algorand/go-algorand/network/phonebook" "github.com/algorand/go-deadlock" diff --git a/network/phonebook/phonebook.go b/network/phonebook/phonebook.go index b3aeafb0fa..100d624c04 100644 --- a/network/phonebook/phonebook.go +++ b/network/phonebook/phonebook.go @@ -19,10 +19,10 @@ package phonebook import ( "math" "math/rand" + "slices" "time" "github.com/algorand/go-deadlock" - "golang.org/x/exp/slices" ) // getAllAddresses when using GetAddresses with getAllAddresses, all the addresses will be retrieved, regardless diff --git a/stateproof/builder.go b/stateproof/builder.go index 317e813602..96ca279a4b 100644 --- a/stateproof/builder.go +++ b/stateproof/builder.go @@ -22,6 +22,8 @@ import ( "encoding/binary" "errors" "fmt" + "maps" + "slices" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto/stateproof" @@ -34,8 +36,6 @@ import ( "github.com/algorand/go-algorand/network" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/stateproof/verify" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ) var errVotersNotTracked = errors.New("voters not tracked for the given lookback round") @@ -642,8 +642,7 @@ func (spw *Worker) tryBroadcast() { spw.mu.Lock() defer spw.mu.Unlock() - sortedRounds := maps.Keys(spw.provers) - slices.Sort(sortedRounds) + sortedRounds := slices.Sorted(maps.Keys(spw.provers)) for _, rnd := range sortedRounds { // Iterate over the provers in a sequential manner. If the earlist state proof is not ready/rejected diff --git a/test/reflectionhelpers/helpers.go b/test/reflectionhelpers/helpers.go index de11d3c9e9..d3a7122042 100644 --- a/test/reflectionhelpers/helpers.go +++ b/test/reflectionhelpers/helpers.go @@ -19,9 +19,8 @@ package reflectionhelpers import ( "fmt" "reflect" + "slices" "strings" - - "golang.org/x/exp/slices" ) // TypeSegmentKind is a enum for the types of TypeSegment diff --git a/util/metrics/opencensus.go b/util/metrics/opencensus.go index fefb1d054b..d61c6206cd 100644 --- a/util/metrics/opencensus.go +++ b/util/metrics/opencensus.go @@ -21,11 +21,11 @@ package metrics import ( "context" + "slices" "strings" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricexport" - "golang.org/x/exp/slices" ) type defaultOpencensusGatherer struct { diff --git a/util/metrics/tagcounter.go b/util/metrics/tagcounter.go index c2b5fcb9bb..d12f10ead5 100644 --- a/util/metrics/tagcounter.go +++ b/util/metrics/tagcounter.go @@ -17,12 +17,12 @@ package metrics import ( + "maps" "strconv" "strings" "sync/atomic" "github.com/algorand/go-deadlock" - "golang.org/x/exp/maps" ) // NewTagCounterFiltered makes a set of metrics under rootName for tagged counting. From 0a135015e17113fb7e17c0a2c56e8d18b5ccfb5d Mon Sep 17 00:00:00 2001 From: DevOps Service Date: Tue, 26 Nov 2024 16:06:51 +0000 Subject: [PATCH 03/15] Bump Version, Remove buildnumber.dat and genesistimestamp.dat files. --- buildnumber.dat | 1 - config/version.go | 2 +- genesistimestamp.dat | 1 - 3 files changed, 1 insertion(+), 3 deletions(-) delete mode 100644 buildnumber.dat delete mode 100644 genesistimestamp.dat diff --git a/buildnumber.dat b/buildnumber.dat deleted file mode 100644 index 573541ac97..0000000000 --- a/buildnumber.dat +++ /dev/null @@ -1 +0,0 @@ -0 diff --git a/config/version.go b/config/version.go index 6dc23ee5b1..94a82a6ca8 100644 --- a/config/version.go +++ b/config/version.go @@ -33,7 +33,7 @@ const VersionMajor = 3 // VersionMinor is the Minor semantic version number (x.#.z) - changed when backwards-compatible features are introduced. // Not enforced until after initial public release (x > 0). -const VersionMinor = 27 +const VersionMinor = 28 // Version is the type holding our full version information. type Version struct { diff --git a/genesistimestamp.dat b/genesistimestamp.dat deleted file mode 100644 index c72c6a7795..0000000000 --- a/genesistimestamp.dat +++ /dev/null @@ -1 +0,0 @@ -1558657885 From b7b3e5e3c9a83cbd6bd038f4f1856039d941b958 Mon Sep 17 00:00:00 2001 From: John <153272819+hishope@users.noreply.github.com> Date: Tue, 3 Dec 2024 09:23:07 +0800 Subject: [PATCH 04/15] chore: fix some problematic function names (#6184) Signed-off-by: hishope --- rpcs/ledgerService.go | 2 +- util/execpool/backlog.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/rpcs/ledgerService.go b/rpcs/ledgerService.go index d76273de62..d8f45b3a05 100644 --- a/rpcs/ledgerService.go +++ b/rpcs/ledgerService.go @@ -108,7 +108,7 @@ func (ls *LedgerService) Stop() { } } -// ServerHTTP returns ledgers for a particular round +// ServeHTTP returns ledgers for a particular round // Either /v{version}/{genesisID}/ledger/{round} or ?r={round}&v={version} // Uses gorilla/mux for path argument parsing. func (ls *LedgerService) ServeHTTP(response http.ResponseWriter, request *http.Request) { diff --git a/util/execpool/backlog.go b/util/execpool/backlog.go index c98a2fd427..969587de68 100644 --- a/util/execpool/backlog.go +++ b/util/execpool/backlog.go @@ -102,7 +102,7 @@ func (b *backlog) BufferSize() (length, capacity int) { return len(b.buffer), cap(b.buffer) } -// Enqueue enqueues a single task into the backlog +// EnqueueBacklog enqueues a single task into the backlog func (b *backlog) EnqueueBacklog(enqueueCtx context.Context, t ExecFunc, arg interface{}, out chan interface{}) error { select { case b.buffer <- backlogItemTask{ From b0f13969131a1bef4670deae01f683bad7fa0fff Mon Sep 17 00:00:00 2001 From: needsure <166317845+needsure@users.noreply.github.com> Date: Tue, 10 Dec 2024 22:28:32 +0800 Subject: [PATCH 05/15] chore: fix some function name in comment (#6192) Signed-off-by: needsure --- netdeploy/remote/nodecfg/nodeConfigurator.go | 2 +- network/p2p/testing/httpNode.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/netdeploy/remote/nodecfg/nodeConfigurator.go b/netdeploy/remote/nodecfg/nodeConfigurator.go index 8e6bea9718..1de017f10d 100644 --- a/netdeploy/remote/nodecfg/nodeConfigurator.go +++ b/netdeploy/remote/nodecfg/nodeConfigurator.go @@ -202,7 +202,7 @@ func (nc *nodeConfigurator) prepareNodeDirs(configs []remote.NodeConfig, rootCon return } -// getHostName creates a DNS name for a host +// getNetworkHostName creates a DNS name for a host func (nc *nodeConfigurator) getNetworkHostName() string { return nc.config.Name + "." + string(nc.genesisData.Network) + ".algodev.network" } diff --git a/network/p2p/testing/httpNode.go b/network/p2p/testing/httpNode.go index f73b26999f..ea84144180 100644 --- a/network/p2p/testing/httpNode.go +++ b/network/p2p/testing/httpNode.go @@ -102,7 +102,7 @@ func (p httpPeer) GetAddress() string { return mas[0].String() } -// GetAddress implements HTTPPeer interface and returns the http client for a peer +// GetHTTPClient implements HTTPPeer interface and returns the http client for a peer func (p httpPeer) GetHTTPClient() *http.Client { c, err := p2p.MakeTestHTTPClient(&p.addrInfo) require.NoError(p.tb, err) From f87ae8a83df015946c5d9f84b5bcb699b5f7377d Mon Sep 17 00:00:00 2001 From: cce <51567+cce@users.noreply.github.com> Date: Tue, 10 Dec 2024 14:19:03 -0500 Subject: [PATCH 06/15] ledger: add callback to clear state between commitRound retries (#6190) --- ledger/acctonline.go | 10 --- ledger/acctonline_test.go | 4 +- ledger/acctupdates.go | 10 --- ledger/acctupdates_test.go | 2 - ledger/bulletin.go | 10 --- ledger/catchpointtracker.go | 8 +++ ledger/catchpointtracker_test.go | 63 +++++++++++++++++++ ledger/metrics.go | 10 --- ledger/notifier.go | 10 --- ledger/simple_test.go | 11 +++- ledger/spverificationtracker.go | 10 --- ledger/spverificationtracker_test.go | 1 - .../store/trackerdb/dualdriver/dualdriver.go | 21 +++++++ .../trackerdb/pebbledbdriver/pebbledriver.go | 12 ++++ .../trackerdb/sqlitedriver/sqlitedriver.go | 10 +++ ledger/store/trackerdb/store.go | 5 ++ .../store/trackerdb/testsuite/utils_test.go | 10 +++ ledger/tracker.go | 47 +++++++++++--- ledger/tracker_test.go | 3 + ledger/txtail.go | 10 --- util/db/dbutil.go | 24 +++++-- 21 files changed, 202 insertions(+), 89 deletions(-) diff --git a/ledger/acctonline.go b/ledger/acctonline.go index 0db04e92ad..380ff45852 100644 --- a/ledger/acctonline.go +++ b/ledger/acctonline.go @@ -355,13 +355,6 @@ func (ao *onlineAccounts) consecutiveVersion(offset uint64) uint64 { return offset } -func (ao *onlineAccounts) handleUnorderedCommit(dcc *deferredCommitContext) { -} -func (ao *onlineAccounts) handlePrepareCommitError(dcc *deferredCommitContext) { -} -func (ao *onlineAccounts) handleCommitError(dcc *deferredCommitContext) { -} - func (ao *onlineAccounts) maxBalLookback() uint64 { lastProtoVersion := ao.onlineRoundParamsData[len(ao.onlineRoundParamsData)-1].CurrentProtocol return config.Consensus[lastProtoVersion].MaxBalLookback @@ -535,9 +528,6 @@ func (ao *onlineAccounts) postCommit(ctx context.Context, dcc *deferredCommitCon ao.voters.postCommit(dcc) } -func (ao *onlineAccounts) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) { -} - // onlineCirculation return the total online balance for the given round, for use by agreement. func (ao *onlineAccounts) onlineCirculation(rnd basics.Round, voteRnd basics.Round) (basics.MicroAlgos, error) { // Get cached total stake for rnd diff --git a/ledger/acctonline_test.go b/ledger/acctonline_test.go index afc7244082..296a5a2481 100644 --- a/ledger/acctonline_test.go +++ b/ledger/acctonline_test.go @@ -110,7 +110,9 @@ func commitSyncPartialComplete(t *testing.T, oa *onlineAccounts, ml *mockLedgerF ml.trackers.lastFlushTime = dcc.flushTime for _, lt := range ml.trackers.trackers { - lt.postCommitUnlocked(ml.trackers.ctx, dcc) + if lt, ok := lt.(trackerCommitLifetimeHandlers); ok { + lt.postCommitUnlocked(ml.trackers.ctx, dcc) + } } } diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index 3f12955666..6acbb12ae5 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -1483,13 +1483,6 @@ func (au *accountUpdates) roundOffset(rnd basics.Round) (offset uint64, err erro return off, nil } -func (au *accountUpdates) handleUnorderedCommit(dcc *deferredCommitContext) { -} -func (au *accountUpdates) handlePrepareCommitError(dcc *deferredCommitContext) { -} -func (au *accountUpdates) handleCommitError(dcc *deferredCommitContext) { -} - // prepareCommit prepares data to write to the database a "chunk" of rounds, and update the cached dbRound accordingly. func (au *accountUpdates) prepareCommit(dcc *deferredCommitContext) error { if au.logAccountUpdatesMetrics { @@ -1745,9 +1738,6 @@ func (au *accountUpdates) postCommit(ctx context.Context, dcc *deferredCommitCon } } -func (au *accountUpdates) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) { -} - // compactKvDeltas takes an array of StateDeltas containing kv deltas (one array entry per round), and // compacts the array into a single map that contains all the // changes. Intermediate changes are eliminated. It counts the number of diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go index a27a2be795..aa43b07b90 100644 --- a/ledger/acctupdates_test.go +++ b/ledger/acctupdates_test.go @@ -2044,7 +2044,6 @@ func TestAcctUpdatesResources(t *testing.T) { require.NoError(t, err) ml.trackers.dbRound = newBase au.postCommit(ml.trackers.ctx, dcc) - au.postCommitUnlocked(ml.trackers.ctx, dcc) }() } @@ -2330,7 +2329,6 @@ func auCommitSync(t *testing.T, rnd basics.Round, au *accountUpdates, ml *mockLe require.NoError(t, err) ml.trackers.dbRound = newBase au.postCommit(ml.trackers.ctx, dcc) - au.postCommitUnlocked(ml.trackers.ctx, dcc) }() } } diff --git a/ledger/bulletin.go b/ledger/bulletin.go index 0b3f08a6b4..8af69f472d 100644 --- a/ledger/bulletin.go +++ b/ledger/bulletin.go @@ -142,16 +142,6 @@ func (b *bulletin) commitRound(context.Context, trackerdb.TransactionScope, *def func (b *bulletin) postCommit(ctx context.Context, dcc *deferredCommitContext) { } -func (b *bulletin) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) { -} - -func (b *bulletin) handleUnorderedCommit(dcc *deferredCommitContext) { -} -func (b *bulletin) handlePrepareCommitError(dcc *deferredCommitContext) { -} -func (b *bulletin) handleCommitError(dcc *deferredCommitContext) { -} - func (b *bulletin) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange { return dcr } diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go index 036f5490b3..df7772de53 100644 --- a/ledger/catchpointtracker.go +++ b/ledger/catchpointtracker.go @@ -982,6 +982,14 @@ func (ct *catchpointTracker) handlePrepareCommitError(dcc *deferredCommitContext ct.cancelWrite(dcc) } +// if an error is encountered between retries, clear the balancesTrie to clear in-memory changes made in commitRound(). +func (ct *catchpointTracker) clearCommitRoundRetry(ctx context.Context, dcc *deferredCommitContext) { + ct.log.Infof("rolling back failed commitRound for oldBase %d offset %d, clearing balancesTrie", dcc.oldBase, dcc.offset) + ct.catchpointsMu.Lock() + ct.balancesTrie = nil // balancesTrie will be re-created in the next call to commitRound + ct.catchpointsMu.Unlock() +} + // if an error is encountered during commit, cancel writing and clear the balances trie func (ct *catchpointTracker) handleCommitError(dcc *deferredCommitContext) { // in cases where the commitRound fails, it is not certain that the merkle trie is in a clean state, and should be cleared. diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go index 6790344889..da2408946b 100644 --- a/ledger/catchpointtracker_test.go +++ b/ledger/catchpointtracker_test.go @@ -17,6 +17,7 @@ package ledger import ( + "bytes" "context" "encoding/hex" "errors" @@ -39,6 +40,7 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/data/txntest" "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/ledger/store/trackerdb" ledgertesting "github.com/algorand/go-algorand/ledger/testing" @@ -48,6 +50,9 @@ import ( "github.com/algorand/go-algorand/test/partitiontest" ) +// assert catchpointTracker implements the trackerCommitLifetimeHandlers interface +var _ trackerCommitLifetimeHandlers = &catchpointTracker{} + func TestCatchpointIsWritingCatchpointFile(t *testing.T) { partitiontest.PartitionTest(t) @@ -2094,3 +2099,61 @@ func TestMakeCatchpointFilePath(t *testing.T) { } } + +// Test a case where in-memory SQLite, combined with fast locking (improved performance, or no +// deadlock detection) and concurrent reads (from transaction evaluation, stake lookups, etc) can +// cause the SQLite implementation in util/db/dbutil.go to retry the function looping over all +// tracker commitRound implementations. Since catchpointtracker' commitRound updates a merkle trie's +// DB storage and its in-memory cache, the retry can cause the the balancesTrie's cache to become +// corrupted and out of sync with the DB (which uses transaction rollback between retries). The +// merkle trie corruption manifests as error log messages like: +// - "attempted to add duplicate hash 'X' to merkle trie for account Y" +// - "failed to delete hash 'X' from merkle trie for account Y" +// +// So we assert that those errors do not occur after the fix in #6190. +// +//nolint:paralleltest // deadlock detection is globally disabled, so this test is not parallel-safe +func TestCatchpointTrackerFastRoundsDBRetry(t *testing.T) { + partitiontest.PartitionTest(t) + + var bufNewLogger bytes.Buffer + log := logging.NewLogger() + log.SetOutput(&bufNewLogger) + + // disabling deadlock detection globally causes the race detector to go off, but this + // bug can still happen even when deadlock detection is not disabled + //deadlock.Opts.Disable = true // disable deadlock detection during this test + //defer func() { deadlock.Opts.Disable = false }() + + genBalances, addrs, _ := ledgertesting.NewTestGenesis(func(cfg *ledgertesting.GenesisCfg) { + cfg.OnlineCount = 1 + ledgertesting.TurnOffRewards(cfg) + }) + cfg := config.GetDefaultLocal() + dl := NewDoubleLedger(t, genBalances, protocol.ConsensusFuture, cfg, simpleLedgerLogger(log)) // in-memory SQLite + defer dl.Close() + + appSrc := main(`int 1; int 1; ==; assert`) + app := dl.fundedApp(addrs[1], 1_000_000, appSrc) + + makeTxn := func() *txntest.Txn { + return &txntest.Txn{ + Type: "appl", + Sender: addrs[2], + ApplicationID: app, + Note: ledgertesting.RandomNote(), + } + } + + for vb := dl.fullBlock(makeTxn()); vb.Block().Round() <= 1500; vb = dl.fullBlock(makeTxn()) { + nextRnd := vb.Block().Round() + 1 + _, err := dl.generator.OnlineCirculation(nextRnd.SubSaturate(320), nextRnd) + require.NoError(t, err) + require.Empty(t, vb.Block().ExpiredParticipationAccounts) + require.Empty(t, vb.Block().AbsentParticipationAccounts) + } + + // assert that no corruption of merkle trie happened due to DB retries leaving + // incorrect state in the merkle trie cache. + require.NotContains(t, bufNewLogger.String(), "to merkle trie for account", "Merkle trie was corrupted!") +} diff --git a/ledger/metrics.go b/ledger/metrics.go index 7a56d58d56..9f8a0ace0d 100644 --- a/ledger/metrics.go +++ b/ledger/metrics.go @@ -84,16 +84,6 @@ func (mt *metricsTracker) postCommit(ctx context.Context, dcc *deferredCommitCon mt.ledgerDBRound.Set(uint64(dcc.newBase())) } -func (mt *metricsTracker) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) { -} - -func (mt *metricsTracker) handleUnorderedCommit(dcc *deferredCommitContext) { -} -func (mt *metricsTracker) handlePrepareCommitError(dcc *deferredCommitContext) { -} -func (mt *metricsTracker) handleCommitError(dcc *deferredCommitContext) { -} - func (mt *metricsTracker) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange { return dcr } diff --git a/ledger/notifier.go b/ledger/notifier.go index f97e1c77e6..c7a8996551 100644 --- a/ledger/notifier.go +++ b/ledger/notifier.go @@ -122,16 +122,6 @@ func (bn *blockNotifier) commitRound(context.Context, trackerdb.TransactionScope func (bn *blockNotifier) postCommit(ctx context.Context, dcc *deferredCommitContext) { } -func (bn *blockNotifier) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) { -} - -func (bn *blockNotifier) handleUnorderedCommit(dcc *deferredCommitContext) { -} -func (bn *blockNotifier) handlePrepareCommitError(dcc *deferredCommitContext) { -} -func (bn *blockNotifier) handleCommitError(dcc *deferredCommitContext) { -} - func (bn *blockNotifier) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange { return dcr } diff --git a/ledger/simple_test.go b/ledger/simple_test.go index 9d4a480b49..8b4632d1de 100644 --- a/ledger/simple_test.go +++ b/ledger/simple_test.go @@ -42,6 +42,7 @@ import ( type simpleLedgerCfg struct { onDisk bool // default is in-memory notArchival bool // default is archival + logger logging.Logger } type simpleLedgerOption func(*simpleLedgerCfg) @@ -54,6 +55,10 @@ func simpleLedgerNotArchival() simpleLedgerOption { return func(cfg *simpleLedgerCfg) { cfg.notArchival = true } } +func simpleLedgerLogger(l logging.Logger) simpleLedgerOption { + return func(cfg *simpleLedgerCfg) { cfg.logger = l } +} + func newSimpleLedgerWithConsensusVersion(t testing.TB, balances bookkeeping.GenesisBalances, cv protocol.ConsensusVersion, cfg config.Local, opts ...simpleLedgerOption) *Ledger { var genHash crypto.Digest crypto.RandBytes(genHash[:]) @@ -72,7 +77,11 @@ func newSimpleLedgerFull(t testing.TB, balances bookkeeping.GenesisBalances, cv dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64()) dbName = strings.Replace(dbName, "/", "_", -1) cfg.Archival = !slCfg.notArchival - l, err := OpenLedger(logging.Base(), dbName, !slCfg.onDisk, ledgercore.InitState{ + log := slCfg.logger + if log == nil { + log = logging.Base() + } + l, err := OpenLedger(log, dbName, !slCfg.onDisk, ledgercore.InitState{ Block: genBlock, Accounts: balances.Balances, GenesisHash: genHash, diff --git a/ledger/spverificationtracker.go b/ledger/spverificationtracker.go index b430368981..ba8b537d16 100644 --- a/ledger/spverificationtracker.go +++ b/ledger/spverificationtracker.go @@ -157,16 +157,6 @@ func (spt *spVerificationTracker) postCommit(_ context.Context, dcc *deferredCom spt.pendingDeleteContexts = spt.pendingDeleteContexts[dcc.spVerification.lastDeleteIndex+1:] } -func (spt *spVerificationTracker) postCommitUnlocked(context.Context, *deferredCommitContext) { -} - -func (spt *spVerificationTracker) handleUnorderedCommit(dcc *deferredCommitContext) { -} -func (spt *spVerificationTracker) handlePrepareCommitError(dcc *deferredCommitContext) { -} -func (spt *spVerificationTracker) handleCommitError(dcc *deferredCommitContext) { -} - func (spt *spVerificationTracker) close() { } diff --git a/ledger/spverificationtracker_test.go b/ledger/spverificationtracker_test.go index 7bba5eac87..d306d9b60f 100644 --- a/ledger/spverificationtracker_test.go +++ b/ledger/spverificationtracker_test.go @@ -88,7 +88,6 @@ func mockCommit(t *testing.T, spt *spVerificationTracker, ml *mockLedgerForTrack postCommitCtx, cancel := context.WithCancel(context.Background()) defer cancel() spt.postCommit(postCommitCtx, &dcc) - spt.postCommitUnlocked(postCommitCtx, &dcc) } func genesisBlock() *blockEntry { diff --git a/ledger/store/trackerdb/dualdriver/dualdriver.go b/ledger/store/trackerdb/dualdriver/dualdriver.go index cbcba9c480..e51b05929f 100644 --- a/ledger/store/trackerdb/dualdriver/dualdriver.go +++ b/ledger/store/trackerdb/dualdriver/dualdriver.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "reflect" + "sync" "time" "github.com/algorand/go-algorand/ledger/store/trackerdb" @@ -123,6 +124,10 @@ func (s *trackerStore) Transaction(fn trackerdb.TransactionFn) (err error) { return s.TransactionContext(context.Background(), fn) } +func (s *trackerStore) TransactionWithRetryClearFn(fn trackerdb.TransactionFn, rollbackFn trackerdb.RetryClearFn) error { + return s.TransactionContextWithRetryClearFn(context.Background(), fn, rollbackFn) +} + func (s *trackerStore) TransactionContext(ctx context.Context, fn trackerdb.TransactionFn) error { handle, err := s.BeginTransaction(ctx) if err != nil { @@ -138,6 +143,22 @@ func (s *trackerStore) TransactionContext(ctx context.Context, fn trackerdb.Tran return handle.Commit() } +func (s *trackerStore) TransactionContextWithRetryClearFn(ctx context.Context, fn trackerdb.TransactionFn, rollbackFn trackerdb.RetryClearFn) error { + var wg sync.WaitGroup + wg.Add(2) + var pErr, sErr error + go func() { + pErr = s.primary.TransactionContextWithRetryClearFn(ctx, fn, rollbackFn) + wg.Done() + }() + go func() { + sErr = s.secondary.TransactionContextWithRetryClearFn(ctx, fn, rollbackFn) + wg.Done() + }() + wg.Wait() + return coalesceErrors(pErr, sErr) +} + func (s *trackerStore) BeginTransaction(ctx context.Context) (trackerdb.Transaction, error) { primary, err := s.primary.BeginTransaction(ctx) if err != nil { diff --git a/ledger/store/trackerdb/pebbledbdriver/pebbledriver.go b/ledger/store/trackerdb/pebbledbdriver/pebbledriver.go index 7d1b394015..2bb64456b5 100644 --- a/ledger/store/trackerdb/pebbledbdriver/pebbledriver.go +++ b/ledger/store/trackerdb/pebbledbdriver/pebbledriver.go @@ -322,6 +322,11 @@ func (s *trackerStore) Transaction(fn trackerdb.TransactionFn) (err error) { return s.TransactionContext(context.Background(), fn) } +// TransactionWithRetryClearFn implements trackerdb.Store +func (s *trackerStore) TransactionWithRetryClearFn(fn trackerdb.TransactionFn, rollbackFn trackerdb.RetryClearFn) (err error) { + return s.TransactionContextWithRetryClearFn(context.Background(), fn, rollbackFn) +} + // TransactionContext implements trackerdb.Store func (s *trackerStore) TransactionContext(ctx context.Context, fn trackerdb.TransactionFn) (err error) { handle, err := s.BeginTransaction(ctx) @@ -345,6 +350,13 @@ func (s *trackerStore) TransactionContext(ctx context.Context, fn trackerdb.Tran return err } +// TransactionContextWithRetryClearFn implements trackerdb.Store. +// It ignores the RetryClearFn, since it does not need to retry +// transactions to work around SQLite issues like the sqlitedriver. +func (s *trackerStore) TransactionContextWithRetryClearFn(ctx context.Context, fn trackerdb.TransactionFn, _ trackerdb.RetryClearFn) error { + return s.TransactionContext(ctx, fn) +} + // BeginTransaction implements trackerdb.Store func (s *trackerStore) BeginTransaction(ctx context.Context) (trackerdb.Transaction, error) { scope := transactionScope{ diff --git a/ledger/store/trackerdb/sqlitedriver/sqlitedriver.go b/ledger/store/trackerdb/sqlitedriver/sqlitedriver.go index 07459fa0c0..54a080fe94 100644 --- a/ledger/store/trackerdb/sqlitedriver/sqlitedriver.go +++ b/ledger/store/trackerdb/sqlitedriver/sqlitedriver.go @@ -103,12 +103,22 @@ func (s *trackerSQLStore) Transaction(fn trackerdb.TransactionFn) (err error) { return wrapIOError(s.TransactionContext(context.Background(), fn)) } +func (s *trackerSQLStore) TransactionWithRetryClearFn(fn trackerdb.TransactionFn, rollbackFn trackerdb.RetryClearFn) (err error) { + return wrapIOError(s.TransactionContextWithRetryClearFn(context.Background(), fn, rollbackFn)) +} + func (s *trackerSQLStore) TransactionContext(ctx context.Context, fn trackerdb.TransactionFn) (err error) { return wrapIOError(s.pair.Wdb.AtomicContext(ctx, func(ctx context.Context, tx *sql.Tx) error { return fn(ctx, &sqlTransactionScope{tx, false, &sqlReader{tx}, &sqlWriter{tx}, &sqlCatchpoint{tx}}) })) } +func (s *trackerSQLStore) TransactionContextWithRetryClearFn(ctx context.Context, fn trackerdb.TransactionFn, rollbackFn trackerdb.RetryClearFn) (err error) { + return wrapIOError(s.pair.Wdb.AtomicContextWithRetryClearFn(ctx, func(ctx context.Context, tx *sql.Tx) error { + return fn(ctx, &sqlTransactionScope{tx, false, &sqlReader{tx}, &sqlWriter{tx}, &sqlCatchpoint{tx}}) + }, rollbackFn)) +} + func (s *trackerSQLStore) BeginTransaction(ctx context.Context) (trackerdb.Transaction, error) { handle, err := s.pair.Wdb.Handle.BeginTx(ctx, nil) if err != nil { diff --git a/ledger/store/trackerdb/store.go b/ledger/store/trackerdb/store.go index 735550ed96..257ff63842 100644 --- a/ledger/store/trackerdb/store.go +++ b/ledger/store/trackerdb/store.go @@ -40,7 +40,9 @@ type Store interface { BeginSnapshot(ctx context.Context) (Snapshot, error) // transaction support Transaction(fn TransactionFn) (err error) + TransactionWithRetryClearFn(TransactionFn, RetryClearFn) (err error) TransactionContext(ctx context.Context, fn TransactionFn) (err error) + TransactionContextWithRetryClearFn(context.Context, TransactionFn, RetryClearFn) (err error) BeginTransaction(ctx context.Context) (Transaction, error) // maintenance Vacuum(ctx context.Context) (stats db.VacuumStats, err error) @@ -153,3 +155,6 @@ type SnapshotFn func(ctx context.Context, tx SnapshotScope) error // TransactionFn is the callback lambda used in `Transaction`. type TransactionFn func(ctx context.Context, tx TransactionScope) error + +// RetryClearFn is the rollback callback lambda used in `TransactionWithRetryClearFn`. +type RetryClearFn func(ctx context.Context) diff --git a/ledger/store/trackerdb/testsuite/utils_test.go b/ledger/store/trackerdb/testsuite/utils_test.go index 826402a357..be11fd9ac8 100644 --- a/ledger/store/trackerdb/testsuite/utils_test.go +++ b/ledger/store/trackerdb/testsuite/utils_test.go @@ -228,6 +228,16 @@ func (db *mockDB) TransactionContext(ctx context.Context, fn trackerdb.Transacti return err } +// TransactionWithRetryClearFn implements trackerdb.Store but ignores the RetryClearFn +func (db *mockDB) TransactionWithRetryClearFn(fn trackerdb.TransactionFn, _ trackerdb.RetryClearFn) (err error) { + return db.TransactionContext(context.Background(), fn) +} + +// TransactionContextWithRetryClearFn implements trackerdb.Store but ignores the RetryClearFn +func (db *mockDB) TransactionContextWithRetryClearFn(ctx context.Context, fn trackerdb.TransactionFn, _ trackerdb.RetryClearFn) (err error) { + return db.TransactionContext(ctx, fn) +} + // BeginTransaction implements trackerdb.Store func (db *mockDB) BeginTransaction(ctx context.Context) (trackerdb.Transaction, error) { scope := mockTransaction{db, db.proto} diff --git a/ledger/tracker.go b/ledger/tracker.go index 716b8f8cfb..1f7950a1c2 100644 --- a/ledger/tracker.go +++ b/ledger/tracker.go @@ -110,11 +110,23 @@ type ledgerTracker interface { // by all the prepareCommit calls. The commitRound is being executed within a single transactional // context, and so, if any of the tracker's commitRound calls fails, the transaction is rolled back. commitRound(context.Context, trackerdb.TransactionScope, *deferredCommitContext) error + // postCommit is called only on a successful commitRound. In that case, each of the trackers have // the chance to update it's internal data structures, knowing that the given deferredCommitContext // has completed. An optional context is provided for long-running operations. postCommit(context.Context, *deferredCommitContext) + // close terminates the tracker, reclaiming any resources + // like open database connections or goroutines. close may + // be called even if loadFromDisk() is not called or does + // not succeed. + close() +} + +// trackerCommitLifetimeHandlers defines additional methods some ledgerTrackers +// might implement to manage and clear state on error or success. In practice, +// it is only used by the catchpointtracker. +type trackerCommitLifetimeHandlers interface { // postCommitUnlocked is called only on a successful commitRound. In that case, each of the trackers have // the chance to make changes that aren't state-dependent. // An optional context is provided for long-running operations. @@ -131,11 +143,12 @@ type ledgerTracker interface { // error during the commit phase of commitRound handleCommitError(*deferredCommitContext) - // close terminates the tracker, reclaiming any resources - // like open database connections or goroutines. close may - // be called even if loadFromDisk() is not called or does - // not succeed. - close() + // clearCommitRoundRetry is called after a failure is encountered in the transaction that commitRound + // uses. It allows trackers to clear any in-memory state associated with the commitRound work they + // did, since even if the tracker returns no error in commitRound, another tracker might be responsible + // for the rollback. The call to commitRound for the same round range may be retried after + // clearCommitRoundRetry is called. + clearCommitRoundRetry(context.Context, *deferredCommitContext) } // ledgerForTracker defines the part of the ledger that a tracker can @@ -561,7 +574,9 @@ func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) error { if tr.dbRound < dbRound || offset < uint64(tr.dbRound-dbRound) { tr.log.Warnf("out of order deferred commit: offset %d, dbRound %d but current tracker DB round is %d", offset, dbRound, tr.dbRound) for _, lt := range tr.trackers { - lt.handleUnorderedCommit(dcc) + if lt, ok := lt.(trackerCommitLifetimeHandlers); ok { + lt.handleUnorderedCommit(dcc) + } } tr.mu.RUnlock() return nil @@ -596,7 +611,9 @@ func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) error { } if err != nil { for _, lt := range tr.trackers { - lt.handlePrepareCommitError(dcc) + if lt, ok := lt.(trackerCommitLifetimeHandlers); ok { + lt.handlePrepareCommitError(dcc) + } } tr.mu.RUnlock() return err @@ -606,7 +623,7 @@ func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) error { start := time.Now() ledgerCommitroundCount.Inc(nil) - err = tr.dbs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { + err = tr.dbs.TransactionWithRetryClearFn(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { // TransactionFn tr.accountsCommitting.Store(true) defer func() { tr.accountsCommitting.Store(false) @@ -625,13 +642,21 @@ func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) error { } return aw.UpdateAccountsRound(dbRound + basics.Round(offset)) + }, func(ctx context.Context) { // RetryClearFn + for _, lt := range tr.trackers { + if lt, ok := lt.(trackerCommitLifetimeHandlers); ok { + lt.clearCommitRoundRetry(ctx, dcc) + } + } }) ledgerCommitroundMicros.AddMicrosecondsSince(start, nil) if err != nil { for _, lt := range tr.trackers { - lt.handleCommitError(dcc) + if lt, ok := lt.(trackerCommitLifetimeHandlers); ok { + lt.handleCommitError(dcc) + } } tr.log.Warnf("unable to advance tracker db snapshot (%d-%d): %v", dbRound, dbRound+basics.Round(offset), err) @@ -653,7 +678,9 @@ func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) error { tr.mu.Unlock() for _, lt := range tr.trackers { - lt.postCommitUnlocked(tr.ctx, dcc) + if lt, ok := lt.(trackerCommitLifetimeHandlers); ok { + lt.postCommitUnlocked(tr.ctx, dcc) + } } tr.log.Debugf("commitRound completed for (%d-%d)", dbRound, dbRound+basics.Round(offset)) diff --git a/ledger/tracker_test.go b/ledger/tracker_test.go index 9c26223c39..3c7bf51faa 100644 --- a/ledger/tracker_test.go +++ b/ledger/tracker_test.go @@ -188,6 +188,9 @@ func (t *emptyTracker) postCommit(ctx context.Context, dcc *deferredCommitContex func (t *emptyTracker) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) { } +func (t *emptyTracker) clearCommitRoundRetry(ctx context.Context, dcc *deferredCommitContext) { +} + // control functions are not used by the emptyTracker func (t *emptyTracker) handleUnorderedCommit(dcc *deferredCommitContext) { } diff --git a/ledger/txtail.go b/ledger/txtail.go index 129fbb3985..92ce068ef3 100644 --- a/ledger/txtail.go +++ b/ledger/txtail.go @@ -333,16 +333,6 @@ func (t *txTail) postCommit(ctx context.Context, dcc *deferredCommitContext) { } } -func (t *txTail) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) { -} - -func (t *txTail) handleUnorderedCommit(dcc *deferredCommitContext) { -} -func (t *txTail) handlePrepareCommitError(dcc *deferredCommitContext) { -} -func (t *txTail) handleCommitError(dcc *deferredCommitContext) { -} - func (t *txTail) produceCommittingTask(committedRound basics.Round, dbRound basics.Round, dcr *deferredCommitRange) *deferredCommitRange { return dcr } diff --git a/util/db/dbutil.go b/util/db/dbutil.go index a6e524464d..8b045ad70c 100644 --- a/util/db/dbutil.go +++ b/util/db/dbutil.go @@ -219,6 +219,14 @@ func (db *Accessor) Atomic(fn idemFn, extras ...interface{}) (err error) { // For transactions where readOnly is false, sync determines whether or not to wait for the result. // Like for Atomic, the return error of fn should be a native sqlite3.Error type or an error wrapping it. func (db *Accessor) AtomicContext(ctx context.Context, fn idemFn, extras ...interface{}) (err error) { + + return db.AtomicContextWithRetryClearFn(ctx, fn, nil, extras...) +} + +// AtomicContextWithRetryClearFn is like AtomicContext, but calls retryClearFn if the database +// txn was rolled back, due to error or in between retries. This helps a caller that +// might change in-memory state inside fn. +func (db *Accessor) AtomicContextWithRetryClearFn(ctx context.Context, fn idemFn, retryClearFn func(context.Context), extras ...interface{}) (err error) { atomicDeadline := time.Now().Add(time.Second) // note that the sql library will drop panics inside an active transaction @@ -294,9 +302,12 @@ func (db *Accessor) AtomicContext(ctx context.Context, fn idemFn, extras ...inte if err != nil { tx.Rollback() if dbretry(err) { - continue + if retryClearFn != nil { + retryClearFn(ctx) + } + continue // retry } else { - break + break // exit, returns error } } @@ -305,8 +316,13 @@ func (db *Accessor) AtomicContext(ctx context.Context, fn idemFn, extras ...inte // update the deadline, as it might have been updated. atomicDeadline = txContextData.deadline break - } else if !dbretry(err) { - break + } else if dbretry(err) { + if retryClearFn != nil { + retryClearFn(ctx) + } + continue // retry + } else { + break // exit, returns error } } From 548e3f6f7c6421a108b40d0a9df80686ffec5ade Mon Sep 17 00:00:00 2001 From: Giulio Date: Mon, 16 Dec 2024 20:00:05 +0100 Subject: [PATCH 07/15] AVM: new teal opcodes for the MiMC hash function to support Zero Knowledge Proofs (#5978) Co-authored-by: John Jannotti --- cmd/opdoc/opdoc.go | 2 +- data/transactions/logic/README.md | 13 + data/transactions/logic/TEAL_opcodes_v11.md | 1830 +++++++ data/transactions/logic/assembler_test.go | 38 +- data/transactions/logic/crypto.go | 45 + data/transactions/logic/crypto_test.go | 83 +- data/transactions/logic/doc.go | 9 +- data/transactions/logic/evalStateful_test.go | 3 + data/transactions/logic/fields.go | 71 +- data/transactions/logic/fields_string.go | 21 +- data/transactions/logic/langspec_v11.json | 4949 ++++++++++++++++++ data/transactions/logic/opcodes.go | 12 + data/transactions/logic/pairing.go | 28 +- data/transactions/logic/teal.tmLanguage.json | 4 +- 14 files changed, 7069 insertions(+), 39 deletions(-) create mode 100644 data/transactions/logic/TEAL_opcodes_v11.md create mode 100644 data/transactions/logic/langspec_v11.json diff --git a/cmd/opdoc/opdoc.go b/cmd/opdoc/opdoc.go index a6e77504b5..2090462261 100644 --- a/cmd/opdoc/opdoc.go +++ b/cmd/opdoc/opdoc.go @@ -443,7 +443,7 @@ func create(file string) *os.File { } func main() { - const docVersion = uint64(10) + const docVersion = uint64(11) opGroups := make(map[string][]string, len(logic.OpSpecs)) for grp, names := range logic.OpGroups { diff --git a/data/transactions/logic/README.md b/data/transactions/logic/README.md index e1c5cfe218..ca5e04bef4 100644 --- a/data/transactions/logic/README.md +++ b/data/transactions/logic/README.md @@ -463,6 +463,8 @@ these results may contain leading zero bytes. | `keccak256` | Keccak256 hash of value A, yields [32]byte | | `sha512_256` | SHA512_256 hash of value A, yields [32]byte | | `sha3_256` | SHA3_256 hash of value A, yields [32]byte | +| `sumhash512` | sumhash512 of value A, yields [64]byte | +| `falcon_verify` | for (data A, compressed-format signature B, pubkey C) verify the signature of data against the pubkey | | `ed25519verify` | for (data A, signature B, pubkey C) verify the signature of ("ProgData" \|\| program_hash \|\| data) against the pubkey => {0 or 1} | | `ed25519verify_bare` | for (data A, signature B, pubkey C) verify the signature of the data against the pubkey => {0 or 1} | | `ecdsa_verify v` | for (data A, signature B, C and pubkey D, E) verify the signature of the data against the pubkey => {0 or 1} | @@ -475,6 +477,7 @@ these results may contain leading zero bytes. | `ec_multi_scalar_mul g` | for curve points A and scalars B, return curve point B0A0 + B1A1 + B2A2 + ... + BnAn | | `ec_subgroup_check g` | 1 if A is in the main prime-order subgroup of G (including the point at infinity) else 0. Program fails if A is not in G at all. | | `ec_map_to g` | maps field element A to group G | +| `mimc c` | MiMC hash of scalars A, using curve and parameters specified by configuration C | ### Loading Values @@ -631,6 +634,11 @@ Global fields are fields that are common to all the transactions in the group. I | 15 | AssetCreateMinBalance | uint64 | v10 | The additional minimum balance required to create (and opt-in to) an asset. | | 16 | AssetOptInMinBalance | uint64 | v10 | The additional minimum balance required to opt-in to an asset. | | 17 | GenesisHash | [32]byte | v10 | The Genesis Hash for the network. | +| 18 | PayoutsEnabled | bool | v11 | Whether block proposal payouts are enabled. | +| 19 | PayoutsGoOnlineFee | uint64 | v11 | The fee required in a keyreg transaction to make an account incentive eligible. | +| 20 | PayoutsPercent | uint64 | v11 | The percentage of transaction fees in a block that can be paid to the block proposer. | +| 21 | PayoutsMinBalance | uint64 | v11 | The minimum algo balance an account must have in the agreement round to receive block payouts in the proposal round. | +| 22 | PayoutsMaxBalance | uint64 | v11 | The maximum algo balance an account can have in the agreement round to receive block payouts in the proposal round. | **Asset Fields** @@ -694,6 +702,9 @@ Account fields used in the `acct_params_get` opcode. | 9 | AcctTotalAssets | uint64 | v8 | The numbers of ASAs held by this account (including ASAs this account created). | | 10 | AcctTotalBoxes | uint64 | v8 | The number of existing boxes created by this account's app. | | 11 | AcctTotalBoxBytes | uint64 | v8 | The total number of bytes used by this account's app's box keys and values. | +| 12 | AcctIncentiveEligible | bool | v11 | Has this account opted into block payouts | +| 13 | AcctLastProposed | uint64 | v11 | The round number of the last block this account proposed. | +| 14 | AcctLastHeartbeat | uint64 | v11 | The round number of the last block this account sent a heartbeat. | ### Flow Control @@ -744,6 +755,8 @@ Account fields used in the `acct_params_get` opcode. | `asset_params_get f` | X is field F from asset A. Y is 1 if A exists, else 0 | | `app_params_get f` | X is field F from app A. Y is 1 if A exists, else 0 | | `acct_params_get f` | X is field F from account A. Y is 1 if A owns positive algos, else 0 | +| `voter_params_get f` | X is field F from online account A as of the balance round: 320 rounds before the current round. Y is 1 if A had positive algos online in the agreement round, else Y is 0 and X is a type specific zero-value | +| `online_stake` | the total online stake in the agreement round | | `log` | write A to log state of the current application | | `block f` | field F of block A. Fail unless A falls between txn.LastValid-1002 and txn.FirstValid (exclusive) | diff --git a/data/transactions/logic/TEAL_opcodes_v11.md b/data/transactions/logic/TEAL_opcodes_v11.md new file mode 100644 index 0000000000..345ebca43e --- /dev/null +++ b/data/transactions/logic/TEAL_opcodes_v11.md @@ -0,0 +1,1830 @@ +# v11 Opcodes + +Ops have a 'cost' of 1 unless otherwise specified. + + +## err + +- Bytecode: 0x00 +- Stack: ... → _exits_ +- Fail immediately. + +## sha256 + +- Bytecode: 0x01 +- Stack: ..., A: []byte → ..., [32]byte +- SHA256 hash of value A, yields [32]byte +- **Cost**: 35 + +## keccak256 + +- Bytecode: 0x02 +- Stack: ..., A: []byte → ..., [32]byte +- Keccak256 hash of value A, yields [32]byte +- **Cost**: 130 + +## sha512_256 + +- Bytecode: 0x03 +- Stack: ..., A: []byte → ..., [32]byte +- SHA512_256 hash of value A, yields [32]byte +- **Cost**: 45 + +## ed25519verify + +- Bytecode: 0x04 +- Stack: ..., A: []byte, B: [64]byte, C: [32]byte → ..., bool +- for (data A, signature B, pubkey C) verify the signature of ("ProgData" || program_hash || data) against the pubkey => {0 or 1} +- **Cost**: 1900 + +The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack. + +## ecdsa_verify + +- Syntax: `ecdsa_verify V` where V: [ECDSA](#field-group-ecdsa) +- Bytecode: 0x05 {uint8} +- Stack: ..., A: [32]byte, B: [32]byte, C: [32]byte, D: [32]byte, E: [32]byte → ..., bool +- for (data A, signature B, C and pubkey D, E) verify the signature of the data against the pubkey => {0 or 1} +- **Cost**: Secp256k1=1700; Secp256r1=2500 +- Availability: v5 + +### ECDSA + +Curves + +| Index | Name | In | Notes | +| - | ------ | - | --------- | +| 0 | Secp256k1 | | secp256k1 curve, used in Bitcoin | +| 1 | Secp256r1 | v7 | secp256r1 curve, NIST standard | + + +The 32 byte Y-component of a public key is the last element on the stack, preceded by X-component of a pubkey, preceded by S and R components of a signature, preceded by the data that is fifth element on the stack. All values are big-endian encoded. The signed data must be 32 bytes long, and signatures in lower-S form are only accepted. + +## ecdsa_pk_decompress + +- Syntax: `ecdsa_pk_decompress V` where V: [ECDSA](#field-group-ecdsa) +- Bytecode: 0x06 {uint8} +- Stack: ..., A: [33]byte → ..., X: [32]byte, Y: [32]byte +- decompress pubkey A into components X, Y +- **Cost**: Secp256k1=650; Secp256r1=2400 +- Availability: v5 + +The 33 byte public key in a compressed form to be decompressed into X and Y (top) components. All values are big-endian encoded. + +## ecdsa_pk_recover + +- Syntax: `ecdsa_pk_recover V` where V: [ECDSA](#field-group-ecdsa) +- Bytecode: 0x07 {uint8} +- Stack: ..., A: [32]byte, B: uint64, C: [32]byte, D: [32]byte → ..., X: [32]byte, Y: [32]byte +- for (data A, recovery id B, signature C, D) recover a public key +- **Cost**: 2000 +- Availability: v5 + +S (top) and R elements of a signature, recovery id and data (bottom) are expected on the stack and used to deriver a public key. All values are big-endian encoded. The signed data must be 32 bytes long. + +## + + +- Bytecode: 0x08 +- Stack: ..., A: uint64, B: uint64 → ..., uint64 +- A plus B. Fail on overflow. + +Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `addw`. + +## - + +- Bytecode: 0x09 +- Stack: ..., A: uint64, B: uint64 → ..., uint64 +- A minus B. Fail if B > A. + +## / + +- Bytecode: 0x0a +- Stack: ..., A: uint64, B: uint64 → ..., uint64 +- A divided by B (truncated division). Fail if B == 0. + +`divmodw` is available to divide the two-element values produced by `mulw` and `addw`. + +## * + +- Bytecode: 0x0b +- Stack: ..., A: uint64, B: uint64 → ..., uint64 +- A times B. Fail on overflow. + +Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `mulw`. + +## < + +- Bytecode: 0x0c +- Stack: ..., A: uint64, B: uint64 → ..., bool +- A less than B => {0 or 1} + +## > + +- Bytecode: 0x0d +- Stack: ..., A: uint64, B: uint64 → ..., bool +- A greater than B => {0 or 1} + +## <= + +- Bytecode: 0x0e +- Stack: ..., A: uint64, B: uint64 → ..., bool +- A less than or equal to B => {0 or 1} + +## >= + +- Bytecode: 0x0f +- Stack: ..., A: uint64, B: uint64 → ..., bool +- A greater than or equal to B => {0 or 1} + +## && + +- Bytecode: 0x10 +- Stack: ..., A: uint64, B: uint64 → ..., bool +- A is not zero and B is not zero => {0 or 1} + +## || + +- Bytecode: 0x11 +- Stack: ..., A: uint64, B: uint64 → ..., bool +- A is not zero or B is not zero => {0 or 1} + +## == + +- Bytecode: 0x12 +- Stack: ..., A, B → ..., bool +- A is equal to B => {0 or 1} + +## != + +- Bytecode: 0x13 +- Stack: ..., A, B → ..., bool +- A is not equal to B => {0 or 1} + +## ! + +- Bytecode: 0x14 +- Stack: ..., A: uint64 → ..., uint64 +- A == 0 yields 1; else 0 + +## len + +- Bytecode: 0x15 +- Stack: ..., A: []byte → ..., uint64 +- yields length of byte value A + +## itob + +- Bytecode: 0x16 +- Stack: ..., A: uint64 → ..., [8]byte +- converts uint64 A to big-endian byte array, always of length 8 + +## btoi + +- Bytecode: 0x17 +- Stack: ..., A: []byte → ..., uint64 +- converts big-endian byte array A to uint64. Fails if len(A) > 8. Padded by leading 0s if len(A) < 8. + +`btoi` fails if the input is longer than 8 bytes. + +## % + +- Bytecode: 0x18 +- Stack: ..., A: uint64, B: uint64 → ..., uint64 +- A modulo B. Fail if B == 0. + +## | + +- Bytecode: 0x19 +- Stack: ..., A: uint64, B: uint64 → ..., uint64 +- A bitwise-or B + +## & + +- Bytecode: 0x1a +- Stack: ..., A: uint64, B: uint64 → ..., uint64 +- A bitwise-and B + +## ^ + +- Bytecode: 0x1b +- Stack: ..., A: uint64, B: uint64 → ..., uint64 +- A bitwise-xor B + +## ~ + +- Bytecode: 0x1c +- Stack: ..., A: uint64 → ..., uint64 +- bitwise invert value A + +## mulw + +- Bytecode: 0x1d +- Stack: ..., A: uint64, B: uint64 → ..., X: uint64, Y: uint64 +- A times B as a 128-bit result in two uint64s. X is the high 64 bits, Y is the low + +## addw + +- Bytecode: 0x1e +- Stack: ..., A: uint64, B: uint64 → ..., X: uint64, Y: uint64 +- A plus B as a 128-bit result. X is the carry-bit, Y is the low-order 64 bits. +- Availability: v2 + +## divmodw + +- Bytecode: 0x1f +- Stack: ..., A: uint64, B: uint64, C: uint64, D: uint64 → ..., W: uint64, X: uint64, Y: uint64, Z: uint64 +- W,X = (A,B / C,D); Y,Z = (A,B modulo C,D) +- **Cost**: 20 +- Availability: v4 + +The notation J,K indicates that two uint64 values J and K are interpreted as a uint128 value, with J as the high uint64 and K the low. + +## intcblock + +- Syntax: `intcblock UINT ...` where UINT ...: a block of int constant values +- Bytecode: 0x20 {varuint count, [varuint ...]} +- Stack: ... → ... +- prepare block of uint64 constants for use by intc + +`intcblock` loads following program bytes into an array of integer constants in the evaluator. These integer constants can be referred to by `intc` and `intc_*` which will push the value onto the stack. Subsequent calls to `intcblock` reset and replace the integer constants available to the script. + +## intc + +- Syntax: `intc I` where I: an index in the intcblock +- Bytecode: 0x21 {uint8} +- Stack: ... → ..., uint64 +- Ith constant from intcblock + +## intc_0 + +- Bytecode: 0x22 +- Stack: ... → ..., uint64 +- constant 0 from intcblock + +## intc_1 + +- Bytecode: 0x23 +- Stack: ... → ..., uint64 +- constant 1 from intcblock + +## intc_2 + +- Bytecode: 0x24 +- Stack: ... → ..., uint64 +- constant 2 from intcblock + +## intc_3 + +- Bytecode: 0x25 +- Stack: ... → ..., uint64 +- constant 3 from intcblock + +## bytecblock + +- Syntax: `bytecblock BYTES ...` where BYTES ...: a block of byte constant values +- Bytecode: 0x26 {varuint count, [varuint length, bytes ...]} +- Stack: ... → ... +- prepare block of byte-array constants for use by bytec + +`bytecblock` loads the following program bytes into an array of byte-array constants in the evaluator. These constants can be referred to by `bytec` and `bytec_*` which will push the value onto the stack. Subsequent calls to `bytecblock` reset and replace the bytes constants available to the script. + +## bytec + +- Syntax: `bytec I` where I: an index in the bytecblock +- Bytecode: 0x27 {uint8} +- Stack: ... → ..., []byte +- Ith constant from bytecblock + +## bytec_0 + +- Bytecode: 0x28 +- Stack: ... → ..., []byte +- constant 0 from bytecblock + +## bytec_1 + +- Bytecode: 0x29 +- Stack: ... → ..., []byte +- constant 1 from bytecblock + +## bytec_2 + +- Bytecode: 0x2a +- Stack: ... → ..., []byte +- constant 2 from bytecblock + +## bytec_3 + +- Bytecode: 0x2b +- Stack: ... → ..., []byte +- constant 3 from bytecblock + +## arg + +- Syntax: `arg N` where N: an arg index +- Bytecode: 0x2c {uint8} +- Stack: ... → ..., []byte +- Nth LogicSig argument +- Mode: Signature + +## arg_0 + +- Bytecode: 0x2d +- Stack: ... → ..., []byte +- LogicSig argument 0 +- Mode: Signature + +## arg_1 + +- Bytecode: 0x2e +- Stack: ... → ..., []byte +- LogicSig argument 1 +- Mode: Signature + +## arg_2 + +- Bytecode: 0x2f +- Stack: ... → ..., []byte +- LogicSig argument 2 +- Mode: Signature + +## arg_3 + +- Bytecode: 0x30 +- Stack: ... → ..., []byte +- LogicSig argument 3 +- Mode: Signature + +## txn + +- Syntax: `txn F` where F: [txn](#field-group-txn) +- Bytecode: 0x31 {uint8} +- Stack: ... → ..., any +- field F of current transaction + +### txn + +Fields (see [transaction reference](https://developer.algorand.org/docs/reference/transactions/)) + +| Index | Name | Type | In | Notes | +| - | ------ | -- | - | --------- | +| 0 | Sender | address | | 32 byte address | +| 1 | Fee | uint64 | | microalgos | +| 2 | FirstValid | uint64 | | round number | +| 3 | FirstValidTime | uint64 | v7 | UNIX timestamp of block before txn.FirstValid. Fails if negative | +| 4 | LastValid | uint64 | | round number | +| 5 | Note | []byte | | Any data up to 1024 bytes | +| 6 | Lease | [32]byte | | 32 byte lease value | +| 7 | Receiver | address | | 32 byte address | +| 8 | Amount | uint64 | | microalgos | +| 9 | CloseRemainderTo | address | | 32 byte address | +| 10 | VotePK | [32]byte | | 32 byte address | +| 11 | SelectionPK | [32]byte | | 32 byte address | +| 12 | VoteFirst | uint64 | | The first round that the participation key is valid. | +| 13 | VoteLast | uint64 | | The last round that the participation key is valid. | +| 14 | VoteKeyDilution | uint64 | | Dilution for the 2-level participation key | +| 15 | Type | []byte | | Transaction type as bytes | +| 16 | TypeEnum | uint64 | | Transaction type as integer | +| 17 | XferAsset | uint64 | | Asset ID | +| 18 | AssetAmount | uint64 | | value in Asset's units | +| 19 | AssetSender | address | | 32 byte address. Source of assets if Sender is the Asset's Clawback address. | +| 20 | AssetReceiver | address | | 32 byte address | +| 21 | AssetCloseTo | address | | 32 byte address | +| 22 | GroupIndex | uint64 | | Position of this transaction within an atomic transaction group. A stand-alone transaction is implicitly element 0 in a group of 1 | +| 23 | TxID | [32]byte | | The computed ID for this transaction. 32 bytes. | +| 24 | ApplicationID | uint64 | v2 | ApplicationID from ApplicationCall transaction | +| 25 | OnCompletion | uint64 | v2 | ApplicationCall transaction on completion action | +| 27 | NumAppArgs | uint64 | v2 | Number of ApplicationArgs | +| 29 | NumAccounts | uint64 | v2 | Number of Accounts | +| 30 | ApprovalProgram | []byte | v2 | Approval program | +| 31 | ClearStateProgram | []byte | v2 | Clear state program | +| 32 | RekeyTo | address | v2 | 32 byte Sender's new AuthAddr | +| 33 | ConfigAsset | uint64 | v2 | Asset ID in asset config transaction | +| 34 | ConfigAssetTotal | uint64 | v2 | Total number of units of this asset created | +| 35 | ConfigAssetDecimals | uint64 | v2 | Number of digits to display after the decimal place when displaying the asset | +| 36 | ConfigAssetDefaultFrozen | bool | v2 | Whether the asset's slots are frozen by default or not, 0 or 1 | +| 37 | ConfigAssetUnitName | []byte | v2 | Unit name of the asset | +| 38 | ConfigAssetName | []byte | v2 | The asset name | +| 39 | ConfigAssetURL | []byte | v2 | URL | +| 40 | ConfigAssetMetadataHash | [32]byte | v2 | 32 byte commitment to unspecified asset metadata | +| 41 | ConfigAssetManager | address | v2 | 32 byte address | +| 42 | ConfigAssetReserve | address | v2 | 32 byte address | +| 43 | ConfigAssetFreeze | address | v2 | 32 byte address | +| 44 | ConfigAssetClawback | address | v2 | 32 byte address | +| 45 | FreezeAsset | uint64 | v2 | Asset ID being frozen or un-frozen | +| 46 | FreezeAssetAccount | address | v2 | 32 byte address of the account whose asset slot is being frozen or un-frozen | +| 47 | FreezeAssetFrozen | bool | v2 | The new frozen value, 0 or 1 | +| 49 | NumAssets | uint64 | v3 | Number of Assets | +| 51 | NumApplications | uint64 | v3 | Number of Applications | +| 52 | GlobalNumUint | uint64 | v3 | Number of global state integers in ApplicationCall | +| 53 | GlobalNumByteSlice | uint64 | v3 | Number of global state byteslices in ApplicationCall | +| 54 | LocalNumUint | uint64 | v3 | Number of local state integers in ApplicationCall | +| 55 | LocalNumByteSlice | uint64 | v3 | Number of local state byteslices in ApplicationCall | +| 56 | ExtraProgramPages | uint64 | v4 | Number of additional pages for each of the application's approval and clear state programs. An ExtraProgramPages of 1 means 2048 more total bytes, or 1024 for each program. | +| 57 | Nonparticipation | bool | v5 | Marks an account nonparticipating for rewards | +| 59 | NumLogs | uint64 | v5 | Number of Logs (only with `itxn` in v5). Application mode only | +| 60 | CreatedAssetID | uint64 | v5 | Asset ID allocated by the creation of an ASA (only with `itxn` in v5). Application mode only | +| 61 | CreatedApplicationID | uint64 | v5 | ApplicationID allocated by the creation of an application (only with `itxn` in v5). Application mode only | +| 62 | LastLog | []byte | v6 | The last message emitted. Empty bytes if none were emitted. Application mode only | +| 63 | StateProofPK | []byte | v6 | 64 byte state proof public key | +| 65 | NumApprovalProgramPages | uint64 | v7 | Number of Approval Program pages | +| 67 | NumClearStateProgramPages | uint64 | v7 | Number of ClearState Program pages | + + +## global + +- Syntax: `global F` where F: [global](#field-group-global) +- Bytecode: 0x32 {uint8} +- Stack: ... → ..., any +- global field F + +### global + +Fields + +| Index | Name | Type | In | Notes | +| - | ------ | -- | - | --------- | +| 0 | MinTxnFee | uint64 | | microalgos | +| 1 | MinBalance | uint64 | | microalgos | +| 2 | MaxTxnLife | uint64 | | rounds | +| 3 | ZeroAddress | address | | 32 byte address of all zero bytes | +| 4 | GroupSize | uint64 | | Number of transactions in this atomic transaction group. At least 1 | +| 5 | LogicSigVersion | uint64 | v2 | Maximum supported version | +| 6 | Round | uint64 | v2 | Current round number. Application mode only. | +| 7 | LatestTimestamp | uint64 | v2 | Last confirmed block UNIX timestamp. Fails if negative. Application mode only. | +| 8 | CurrentApplicationID | uint64 | v2 | ID of current application executing. Application mode only. | +| 9 | CreatorAddress | address | v3 | Address of the creator of the current application. Application mode only. | +| 10 | CurrentApplicationAddress | address | v5 | Address that the current application controls. Application mode only. | +| 11 | GroupID | [32]byte | v5 | ID of the transaction group. 32 zero bytes if the transaction is not part of a group. | +| 12 | OpcodeBudget | uint64 | v6 | The remaining cost that can be spent by opcodes in this program. | +| 13 | CallerApplicationID | uint64 | v6 | The application ID of the application that called this application. 0 if this application is at the top-level. Application mode only. | +| 14 | CallerApplicationAddress | address | v6 | The application address of the application that called this application. ZeroAddress if this application is at the top-level. Application mode only. | +| 15 | AssetCreateMinBalance | uint64 | v10 | The additional minimum balance required to create (and opt-in to) an asset. | +| 16 | AssetOptInMinBalance | uint64 | v10 | The additional minimum balance required to opt-in to an asset. | +| 17 | GenesisHash | [32]byte | v10 | The Genesis Hash for the network. | +| 18 | PayoutsEnabled | bool | v11 | Whether block proposal payouts are enabled. | +| 19 | PayoutsGoOnlineFee | uint64 | v11 | The fee required in a keyreg transaction to make an account incentive eligible. | +| 20 | PayoutsPercent | uint64 | v11 | The percentage of transaction fees in a block that can be paid to the block proposer. | +| 21 | PayoutsMinBalance | uint64 | v11 | The minimum algo balance an account must have in the agreement round to receive block payouts in the proposal round. | +| 22 | PayoutsMaxBalance | uint64 | v11 | The maximum algo balance an account can have in the agreement round to receive block payouts in the proposal round. | + + +## gtxn + +- Syntax: `gtxn T F` where T: transaction group index, F: [txn](#field-group-txn) +- Bytecode: 0x33 {uint8}, {uint8} +- Stack: ... → ..., any +- field F of the Tth transaction in the current group + +for notes on transaction fields available, see `txn`. If this transaction is _i_ in the group, `gtxn i field` is equivalent to `txn field`. + +## load + +- Syntax: `load I` where I: position in scratch space to load from +- Bytecode: 0x34 {uint8} +- Stack: ... → ..., any +- Ith scratch space value. All scratch spaces are 0 at program start. + +## store + +- Syntax: `store I` where I: position in scratch space to store to +- Bytecode: 0x35 {uint8} +- Stack: ..., A → ... +- store A to the Ith scratch space + +## txna + +- Syntax: `txna F I` where F: [txna](#field-group-txna), I: transaction field array index +- Bytecode: 0x36 {uint8}, {uint8} +- Stack: ... → ..., any +- Ith value of the array field F of the current transaction
`txna` can be called using `txn` with 2 immediates. +- Availability: v2 + +### txna + +Fields (see [transaction reference](https://developer.algorand.org/docs/reference/transactions/)) + +| Index | Name | Type | In | Notes | +| - | ------ | -- | - | --------- | +| 26 | ApplicationArgs | []byte | v2 | Arguments passed to the application in the ApplicationCall transaction | +| 28 | Accounts | address | v2 | Accounts listed in the ApplicationCall transaction | +| 48 | Assets | uint64 | v3 | Foreign Assets listed in the ApplicationCall transaction | +| 50 | Applications | uint64 | v3 | Foreign Apps listed in the ApplicationCall transaction | +| 58 | Logs | []byte | v5 | Log messages emitted by an application call (only with `itxn` in v5). Application mode only | +| 64 | ApprovalProgramPages | []byte | v7 | Approval Program as an array of pages | +| 66 | ClearStateProgramPages | []byte | v7 | ClearState Program as an array of pages | + + +## gtxna + +- Syntax: `gtxna T F I` where T: transaction group index, F: [txna](#field-group-txna), I: transaction field array index +- Bytecode: 0x37 {uint8}, {uint8}, {uint8} +- Stack: ... → ..., any +- Ith value of the array field F from the Tth transaction in the current group
`gtxna` can be called using `gtxn` with 3 immediates. +- Availability: v2 + +## gtxns + +- Syntax: `gtxns F` where F: [txn](#field-group-txn) +- Bytecode: 0x38 {uint8} +- Stack: ..., A: uint64 → ..., any +- field F of the Ath transaction in the current group +- Availability: v3 + +for notes on transaction fields available, see `txn`. If top of stack is _i_, `gtxns field` is equivalent to `gtxn _i_ field`. gtxns exists so that _i_ can be calculated, often based on the index of the current transaction. + +## gtxnsa + +- Syntax: `gtxnsa F I` where F: [txna](#field-group-txna), I: transaction field array index +- Bytecode: 0x39 {uint8}, {uint8} +- Stack: ..., A: uint64 → ..., any +- Ith value of the array field F from the Ath transaction in the current group
`gtxnsa` can be called using `gtxns` with 2 immediates. +- Availability: v3 + +## gload + +- Syntax: `gload T I` where T: transaction group index, I: position in scratch space to load from +- Bytecode: 0x3a {uint8}, {uint8} +- Stack: ... → ..., any +- Ith scratch space value of the Tth transaction in the current group +- Availability: v4 +- Mode: Application + +`gload` fails unless the requested transaction is an ApplicationCall and T < GroupIndex. + +## gloads + +- Syntax: `gloads I` where I: position in scratch space to load from +- Bytecode: 0x3b {uint8} +- Stack: ..., A: uint64 → ..., any +- Ith scratch space value of the Ath transaction in the current group +- Availability: v4 +- Mode: Application + +`gloads` fails unless the requested transaction is an ApplicationCall and A < GroupIndex. + +## gaid + +- Syntax: `gaid T` where T: transaction group index +- Bytecode: 0x3c {uint8} +- Stack: ... → ..., uint64 +- ID of the asset or application created in the Tth transaction of the current group +- Availability: v4 +- Mode: Application + +`gaid` fails unless the requested transaction created an asset or application and T < GroupIndex. + +## gaids + +- Bytecode: 0x3d +- Stack: ..., A: uint64 → ..., uint64 +- ID of the asset or application created in the Ath transaction of the current group +- Availability: v4 +- Mode: Application + +`gaids` fails unless the requested transaction created an asset or application and A < GroupIndex. + +## loads + +- Bytecode: 0x3e +- Stack: ..., A: uint64 → ..., any +- Ath scratch space value. All scratch spaces are 0 at program start. +- Availability: v5 + +## stores + +- Bytecode: 0x3f +- Stack: ..., A: uint64, B → ... +- store B to the Ath scratch space +- Availability: v5 + +## bnz + +- Syntax: `bnz TARGET` where TARGET: branch offset +- Bytecode: 0x40 {int16 (big-endian)} +- Stack: ..., A: uint64 → ... +- branch to TARGET if value A is not zero + +The `bnz` instruction opcode 0x40 is followed by two immediate data bytes which are a high byte first and low byte second which together form a 16 bit offset which the instruction may branch to. For a bnz instruction at `pc`, if the last element of the stack is not zero then branch to instruction at `pc + 3 + N`, else proceed to next instruction at `pc + 3`. Branch targets must be aligned instructions. (e.g. Branching to the second byte of a 2 byte op will be rejected.) Starting at v4, the offset is treated as a signed 16 bit integer allowing for backward branches and looping. In prior version (v1 to v3), branch offsets are limited to forward branches only, 0-0x7fff. + +At v2 it became allowed to branch to the end of the program exactly after the last instruction: bnz to byte N (with 0-indexing) was illegal for a TEAL program with N bytes before v2, and is legal after it. This change eliminates the need for a last instruction of no-op as a branch target at the end. (Branching beyond the end--in other words, to a byte larger than N--is still illegal and will cause the program to fail.) + +## bz + +- Syntax: `bz TARGET` where TARGET: branch offset +- Bytecode: 0x41 {int16 (big-endian)} +- Stack: ..., A: uint64 → ... +- branch to TARGET if value A is zero +- Availability: v2 + +See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`. + +## b + +- Syntax: `b TARGET` where TARGET: branch offset +- Bytecode: 0x42 {int16 (big-endian)} +- Stack: ... → ... +- branch unconditionally to TARGET +- Availability: v2 + +See `bnz` for details on how branches work. `b` always jumps to the offset. + +## return + +- Bytecode: 0x43 +- Stack: ..., A: uint64 → _exits_ +- use A as success value; end +- Availability: v2 + +## assert + +- Bytecode: 0x44 +- Stack: ..., A: uint64 → ... +- immediately fail unless A is a non-zero number +- Availability: v3 + +## bury + +- Syntax: `bury N` where N: depth +- Bytecode: 0x45 {uint8} +- Stack: ..., A → ... +- replace the Nth value from the top of the stack with A. bury 0 fails. +- Availability: v8 + +## popn + +- Syntax: `popn N` where N: stack depth +- Bytecode: 0x46 {uint8} +- Stack: ..., [N items] → ... +- remove N values from the top of the stack +- Availability: v8 + +## dupn + +- Syntax: `dupn N` where N: copy count +- Bytecode: 0x47 {uint8} +- Stack: ..., A → ..., A, [N copies of A] +- duplicate A, N times +- Availability: v8 + +## pop + +- Bytecode: 0x48 +- Stack: ..., A → ... +- discard A + +## dup + +- Bytecode: 0x49 +- Stack: ..., A → ..., A, A +- duplicate A + +## dup2 + +- Bytecode: 0x4a +- Stack: ..., A, B → ..., A, B, A, B +- duplicate A and B +- Availability: v2 + +## dig + +- Syntax: `dig N` where N: depth +- Bytecode: 0x4b {uint8} +- Stack: ..., A, [N items] → ..., A, [N items], A +- Nth value from the top of the stack. dig 0 is equivalent to dup +- Availability: v3 + +## swap + +- Bytecode: 0x4c +- Stack: ..., A, B → ..., B, A +- swaps A and B on stack +- Availability: v3 + +## select + +- Bytecode: 0x4d +- Stack: ..., A, B, C: uint64 → ..., A or B +- selects one of two values based on top-of-stack: B if C != 0, else A +- Availability: v3 + +## cover + +- Syntax: `cover N` where N: depth +- Bytecode: 0x4e {uint8} +- Stack: ..., [N items], A → ..., A, [N items] +- remove top of stack, and place it deeper in the stack such that N elements are above it. Fails if stack depth <= N. +- Availability: v5 + +## uncover + +- Syntax: `uncover N` where N: depth +- Bytecode: 0x4f {uint8} +- Stack: ..., A, [N items] → ..., [N items], A +- remove the value at depth N in the stack and shift above items down so the Nth deep value is on top of the stack. Fails if stack depth <= N. +- Availability: v5 + +## concat + +- Bytecode: 0x50 +- Stack: ..., A: []byte, B: []byte → ..., []byte +- join A and B +- Availability: v2 + +`concat` fails if the result would be greater than 4096 bytes. + +## substring + +- Syntax: `substring S E` where S: start position, E: end position +- Bytecode: 0x51 {uint8}, {uint8} +- Stack: ..., A: []byte → ..., []byte +- A range of bytes from A starting at S up to but not including E. If E < S, or either is larger than the array length, the program fails +- Availability: v2 + +## substring3 + +- Bytecode: 0x52 +- Stack: ..., A: []byte, B: uint64, C: uint64 → ..., []byte +- A range of bytes from A starting at B up to but not including C. If C < B, or either is larger than the array length, the program fails +- Availability: v2 + +## getbit + +- Bytecode: 0x53 +- Stack: ..., A, B: uint64 → ..., uint64 +- Bth bit of (byte-array or integer) A. If B is greater than or equal to the bit length of the value (8*byte length), the program fails +- Availability: v3 + +see explanation of bit ordering in setbit + +## setbit + +- Bytecode: 0x54 +- Stack: ..., A, B: uint64, C: uint64 → ..., any +- Copy of (byte-array or integer) A, with the Bth bit set to (0 or 1) C. If B is greater than or equal to the bit length of the value (8*byte length), the program fails +- Availability: v3 + +When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on the integer 0 yields 8, or 2^3. When A is a byte array, index 0 is the leftmost bit of the leftmost byte. Setting bits 0 through 11 to 1 in a 4-byte-array of 0s yields the byte array 0xfff00000. Setting bit 3 to 1 on the 1-byte-array 0x00 yields the byte array 0x10. + +## getbyte + +- Bytecode: 0x55 +- Stack: ..., A: []byte, B: uint64 → ..., uint64 +- Bth byte of A, as an integer. If B is greater than or equal to the array length, the program fails +- Availability: v3 + +## setbyte + +- Bytecode: 0x56 +- Stack: ..., A: []byte, B: uint64, C: uint64 → ..., []byte +- Copy of A with the Bth byte set to small integer (between 0..255) C. If B is greater than or equal to the array length, the program fails +- Availability: v3 + +## extract + +- Syntax: `extract S L` where S: start position, L: length +- Bytecode: 0x57 {uint8}, {uint8} +- Stack: ..., A: []byte → ..., []byte +- A range of bytes from A starting at S up to but not including S+L. If L is 0, then extract to the end of the string. If S or S+L is larger than the array length, the program fails +- Availability: v5 + +## extract3 + +- Bytecode: 0x58 +- Stack: ..., A: []byte, B: uint64, C: uint64 → ..., []byte +- A range of bytes from A starting at B up to but not including B+C. If B+C is larger than the array length, the program fails
`extract3` can be called using `extract` with no immediates. +- Availability: v5 + +## extract_uint16 + +- Bytecode: 0x59 +- Stack: ..., A: []byte, B: uint64 → ..., uint64 +- A uint16 formed from a range of big-endian bytes from A starting at B up to but not including B+2. If B+2 is larger than the array length, the program fails +- Availability: v5 + +## extract_uint32 + +- Bytecode: 0x5a +- Stack: ..., A: []byte, B: uint64 → ..., uint64 +- A uint32 formed from a range of big-endian bytes from A starting at B up to but not including B+4. If B+4 is larger than the array length, the program fails +- Availability: v5 + +## extract_uint64 + +- Bytecode: 0x5b +- Stack: ..., A: []byte, B: uint64 → ..., uint64 +- A uint64 formed from a range of big-endian bytes from A starting at B up to but not including B+8. If B+8 is larger than the array length, the program fails +- Availability: v5 + +## replace2 + +- Syntax: `replace2 S` where S: start position +- Bytecode: 0x5c {uint8} +- Stack: ..., A: []byte, B: []byte → ..., []byte +- Copy of A with the bytes starting at S replaced by the bytes of B. Fails if S+len(B) exceeds len(A)
`replace2` can be called using `replace` with 1 immediate. +- Availability: v7 + +## replace3 + +- Bytecode: 0x5d +- Stack: ..., A: []byte, B: uint64, C: []byte → ..., []byte +- Copy of A with the bytes starting at B replaced by the bytes of C. Fails if B+len(C) exceeds len(A)
`replace3` can be called using `replace` with no immediates. +- Availability: v7 + +## base64_decode + +- Syntax: `base64_decode E` where E: [base64](#field-group-base64) +- Bytecode: 0x5e {uint8} +- Stack: ..., A: []byte → ..., []byte +- decode A which was base64-encoded using _encoding_ E. Fail if A is not base64 encoded with encoding E +- **Cost**: 1 + 1 per 16 bytes of A +- Availability: v7 + +### base64 + +Encodings + +| Index | Name | Notes | +| - | ------ | --------- | +| 0 | URLEncoding | | +| 1 | StdEncoding | | + + +*Warning*: Usage should be restricted to very rare use cases. In almost all cases, smart contracts should directly handle non-encoded byte-strings. This opcode should only be used in cases where base64 is the only available option, e.g. interoperability with a third-party that only signs base64 strings. + + Decodes A using the base64 encoding E. Specify the encoding with an immediate arg either as URL and Filename Safe (`URLEncoding`) or Standard (`StdEncoding`). See [RFC 4648 sections 4 and 5](https://rfc-editor.org/rfc/rfc4648.html#section-4). It is assumed that the encoding ends with the exact number of `=` padding characters as required by the RFC. When padding occurs, any unused pad bits in the encoding must be set to zero or the decoding will fail. The special cases of `\n` and `\r` are allowed but completely ignored. An error will result when attempting to decode a string with a character that is not in the encoding alphabet or not one of `=`, `\r`, or `\n`. + +## json_ref + +- Syntax: `json_ref R` where R: [json_ref](#field-group-json_ref) +- Bytecode: 0x5f {uint8} +- Stack: ..., A: []byte, B: []byte → ..., any +- key B's value, of type R, from a [valid](jsonspec.md) utf-8 encoded json object A +- **Cost**: 25 + 2 per 7 bytes of A +- Availability: v7 + +### json_ref + +Types + +| Index | Name | Type | Notes | +| - | ------ | -- | --------- | +| 0 | JSONString | []byte | | +| 1 | JSONUint64 | uint64 | | +| 2 | JSONObject | []byte | | + + +*Warning*: Usage should be restricted to very rare use cases, as JSON decoding is expensive and quite limited. In addition, JSON objects are large and not optimized for size. + +Almost all smart contracts should use simpler and smaller methods (such as the [ABI](https://arc.algorand.foundation/ARCs/arc-0004). This opcode should only be used in cases where JSON is only available option, e.g. when a third-party only signs JSON. + +## balance + +- Bytecode: 0x60 +- Stack: ..., A → ..., uint64 +- balance for account A, in microalgos. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted. Changes caused by inner transactions are observable immediately following `itxn_submit` +- Availability: v2 +- Mode: Application + +params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value. + +## app_opted_in + +- Bytecode: 0x61 +- Stack: ..., A, B: uint64 → ..., bool +- 1 if account A is opted in to application B, else 0 +- Availability: v2 +- Mode: Application + +params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: 1 if opted in and 0 otherwise. + +## app_local_get + +- Bytecode: 0x62 +- Stack: ..., A, B: stateKey → ..., any +- local state of the key B in the current application in account A +- Availability: v2 +- Mode: Application + +params: Txn.Accounts offset (or, since v4, an _available_ account address), state key. Return: value. The value is zero (of type uint64) if the key does not exist. + +## app_local_get_ex + +- Bytecode: 0x63 +- Stack: ..., A, B: uint64, C: stateKey → ..., X: any, Y: bool +- X is the local state of application B, key C in account A. Y is 1 if key existed, else 0 +- Availability: v2 +- Mode: Application + +params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset), state key. Return: did_exist flag (top of the stack, 1 if the application and key existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist. + +## app_global_get + +- Bytecode: 0x64 +- Stack: ..., A: stateKey → ..., any +- global state of the key A in the current application +- Availability: v2 +- Mode: Application + +params: state key. Return: value. The value is zero (of type uint64) if the key does not exist. + +## app_global_get_ex + +- Bytecode: 0x65 +- Stack: ..., A: uint64, B: stateKey → ..., X: any, Y: bool +- X is the global state of application A, key B. Y is 1 if key existed, else 0 +- Availability: v2 +- Mode: Application + +params: Txn.ForeignApps offset (or, since v4, an _available_ application id), state key. Return: did_exist flag (top of the stack, 1 if the application and key existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist. + +## app_local_put + +- Bytecode: 0x66 +- Stack: ..., A, B: stateKey, C → ... +- write C to key B in account A's local state of the current application +- Availability: v2 +- Mode: Application + +params: Txn.Accounts offset (or, since v4, an _available_ account address), state key, value. + +## app_global_put + +- Bytecode: 0x67 +- Stack: ..., A: stateKey, B → ... +- write B to key A in the global state of the current application +- Availability: v2 +- Mode: Application + +## app_local_del + +- Bytecode: 0x68 +- Stack: ..., A, B: stateKey → ... +- delete key B from account A's local state of the current application +- Availability: v2 +- Mode: Application + +params: Txn.Accounts offset (or, since v4, an _available_ account address), state key. + +Deleting a key which is already absent has no effect on the application local state. (In particular, it does _not_ cause the program to fail.) + +## app_global_del + +- Bytecode: 0x69 +- Stack: ..., A: stateKey → ... +- delete key A from the global state of the current application +- Availability: v2 +- Mode: Application + +params: state key. + +Deleting a key which is already absent has no effect on the application global state. (In particular, it does _not_ cause the program to fail.) + +## asset_holding_get + +- Syntax: `asset_holding_get F` where F: [asset_holding](#field-group-asset_holding) +- Bytecode: 0x70 {uint8} +- Stack: ..., A, B: uint64 → ..., X: any, Y: bool +- X is field F from account A's holding of asset B. Y is 1 if A is opted into B, else 0 +- Availability: v2 +- Mode: Application + +### asset_holding + +Fields + +| Index | Name | Type | Notes | +| - | ------ | -- | --------- | +| 0 | AssetBalance | uint64 | Amount of the asset unit held by this account | +| 1 | AssetFrozen | bool | Is the asset frozen or not | + + +params: Txn.Accounts offset (or, since v4, an _available_ address), asset id (or, since v4, a Txn.ForeignAssets offset). Return: did_exist flag (1 if the asset existed and 0 otherwise), value. + +## asset_params_get + +- Syntax: `asset_params_get F` where F: [asset_params](#field-group-asset_params) +- Bytecode: 0x71 {uint8} +- Stack: ..., A: uint64 → ..., X: any, Y: bool +- X is field F from asset A. Y is 1 if A exists, else 0 +- Availability: v2 +- Mode: Application + +### asset_params + +Fields + +| Index | Name | Type | In | Notes | +| - | ------ | -- | - | --------- | +| 0 | AssetTotal | uint64 | | Total number of units of this asset | +| 1 | AssetDecimals | uint64 | | See AssetParams.Decimals | +| 2 | AssetDefaultFrozen | bool | | Frozen by default or not | +| 3 | AssetUnitName | []byte | | Asset unit name | +| 4 | AssetName | []byte | | Asset name | +| 5 | AssetURL | []byte | | URL with additional info about the asset | +| 6 | AssetMetadataHash | [32]byte | | Arbitrary commitment | +| 7 | AssetManager | address | | Manager address | +| 8 | AssetReserve | address | | Reserve address | +| 9 | AssetFreeze | address | | Freeze address | +| 10 | AssetClawback | address | | Clawback address | +| 11 | AssetCreator | address | v5 | Creator address | + + +params: Txn.ForeignAssets offset (or, since v4, an _available_ asset id. Return: did_exist flag (1 if the asset existed and 0 otherwise), value. + +## app_params_get + +- Syntax: `app_params_get F` where F: [app_params](#field-group-app_params) +- Bytecode: 0x72 {uint8} +- Stack: ..., A: uint64 → ..., X: any, Y: bool +- X is field F from app A. Y is 1 if A exists, else 0 +- Availability: v5 +- Mode: Application + +### app_params + +Fields + +| Index | Name | Type | Notes | +| - | ------ | -- | --------- | +| 0 | AppApprovalProgram | []byte | Bytecode of Approval Program | +| 1 | AppClearStateProgram | []byte | Bytecode of Clear State Program | +| 2 | AppGlobalNumUint | uint64 | Number of uint64 values allowed in Global State | +| 3 | AppGlobalNumByteSlice | uint64 | Number of byte array values allowed in Global State | +| 4 | AppLocalNumUint | uint64 | Number of uint64 values allowed in Local State | +| 5 | AppLocalNumByteSlice | uint64 | Number of byte array values allowed in Local State | +| 6 | AppExtraProgramPages | uint64 | Number of Extra Program Pages of code space | +| 7 | AppCreator | address | Creator address | +| 8 | AppAddress | address | Address for which this application has authority | + + +params: Txn.ForeignApps offset or an _available_ app id. Return: did_exist flag (1 if the application existed and 0 otherwise), value. + +## acct_params_get + +- Syntax: `acct_params_get F` where F: [acct_params](#field-group-acct_params) +- Bytecode: 0x73 {uint8} +- Stack: ..., A → ..., X: any, Y: bool +- X is field F from account A. Y is 1 if A owns positive algos, else 0 +- Availability: v6 +- Mode: Application + +### acct_params + +Fields + +| Index | Name | Type | In | Notes | +| - | ------ | -- | - | --------- | +| 0 | AcctBalance | uint64 | | Account balance in microalgos | +| 1 | AcctMinBalance | uint64 | | Minimum required balance for account, in microalgos | +| 2 | AcctAuthAddr | address | | Address the account is rekeyed to. | +| 3 | AcctTotalNumUint | uint64 | v8 | The total number of uint64 values allocated by this account in Global and Local States. | +| 4 | AcctTotalNumByteSlice | uint64 | v8 | The total number of byte array values allocated by this account in Global and Local States. | +| 5 | AcctTotalExtraAppPages | uint64 | v8 | The number of extra app code pages used by this account. | +| 6 | AcctTotalAppsCreated | uint64 | v8 | The number of existing apps created by this account. | +| 7 | AcctTotalAppsOptedIn | uint64 | v8 | The number of apps this account is opted into. | +| 8 | AcctTotalAssetsCreated | uint64 | v8 | The number of existing ASAs created by this account. | +| 9 | AcctTotalAssets | uint64 | v8 | The numbers of ASAs held by this account (including ASAs this account created). | +| 10 | AcctTotalBoxes | uint64 | v8 | The number of existing boxes created by this account's app. | +| 11 | AcctTotalBoxBytes | uint64 | v8 | The total number of bytes used by this account's app's box keys and values. | +| 12 | AcctIncentiveEligible | bool | v11 | Has this account opted into block payouts | +| 13 | AcctLastProposed | uint64 | v11 | The round number of the last block this account proposed. | +| 14 | AcctLastHeartbeat | uint64 | v11 | The round number of the last block this account sent a heartbeat. | + + +## voter_params_get + +- Syntax: `voter_params_get F` where F: [voter_params](#field-group-voter_params) +- Bytecode: 0x74 {uint8} +- Stack: ..., A → ..., X: any, Y: bool +- X is field F from online account A as of the balance round: 320 rounds before the current round. Y is 1 if A had positive algos online in the agreement round, else Y is 0 and X is a type specific zero-value +- Availability: v11 +- Mode: Application + +### voter_params + +Fields + +| Index | Name | Type | In | Notes | +| - | ------ | -- | - | --------- | +| 0 | VoterBalance | uint64 | v6 | Online stake in microalgos | +| 1 | VoterIncentiveEligible | bool | | Had this account opted into block payouts | + + +## online_stake + +- Bytecode: 0x75 +- Stack: ... → ..., uint64 +- the total online stake in the agreement round +- Availability: v11 +- Mode: Application + +## min_balance + +- Bytecode: 0x78 +- Stack: ..., A → ..., uint64 +- minimum required balance for account A, in microalgos. Required balance is affected by ASA, App, and Box usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes. Changes caused by inner transactions or box usage are observable immediately following the opcode effecting the change. +- Availability: v3 +- Mode: Application + +params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value. + +## pushbytes + +- Syntax: `pushbytes BYTES` where BYTES: a byte constant +- Bytecode: 0x80 {varuint length, bytes} +- Stack: ... → ..., []byte +- immediate BYTES +- Availability: v3 + +pushbytes args are not added to the bytecblock during assembly processes + +## pushint + +- Syntax: `pushint UINT` where UINT: an int constant +- Bytecode: 0x81 {varuint} +- Stack: ... → ..., uint64 +- immediate UINT +- Availability: v3 + +pushint args are not added to the intcblock during assembly processes + +## pushbytess + +- Syntax: `pushbytess BYTES ...` where BYTES ...: a list of byte constants +- Bytecode: 0x82 {varuint count, [varuint length, bytes ...]} +- Stack: ... → ..., [N items] +- push sequences of immediate byte arrays to stack (first byte array being deepest) +- Availability: v8 + +pushbytess args are not added to the bytecblock during assembly processes + +## pushints + +- Syntax: `pushints UINT ...` where UINT ...: a list of int constants +- Bytecode: 0x83 {varuint count, [varuint ...]} +- Stack: ... → ..., [N items] +- push sequence of immediate uints to stack in the order they appear (first uint being deepest) +- Availability: v8 + +pushints args are not added to the intcblock during assembly processes + +## ed25519verify_bare + +- Bytecode: 0x84 +- Stack: ..., A: []byte, B: [64]byte, C: [32]byte → ..., bool +- for (data A, signature B, pubkey C) verify the signature of the data against the pubkey => {0 or 1} +- **Cost**: 1900 +- Availability: v7 + +## falcon_verify + +- Bytecode: 0x85 +- Stack: ..., A: []byte, B: [1232]byte, C: [1793]byte → ..., bool +- for (data A, compressed-format signature B, pubkey C) verify the signature of data against the pubkey +- **Cost**: 1700 +- Availability: v11 + +## sumhash512 + +- Bytecode: 0x86 +- Stack: ..., A: []byte → ..., [64]byte +- sumhash512 of value A, yields [64]byte +- **Cost**: 150 + 7 per 4 bytes of A +- Availability: v11 + +## callsub + +- Syntax: `callsub TARGET` where TARGET: branch offset +- Bytecode: 0x88 {int16 (big-endian)} +- Stack: ... → ... +- branch unconditionally to TARGET, saving the next instruction on the call stack +- Availability: v4 + +The call stack is separate from the data stack. Only `callsub`, `retsub`, and `proto` manipulate it. + +## retsub + +- Bytecode: 0x89 +- Stack: ... → ... +- pop the top instruction from the call stack and branch to it +- Availability: v4 + +If the current frame was prepared by `proto A R`, `retsub` will remove the 'A' arguments from the stack, move the `R` return values down, and pop any stack locations above the relocated return values. + +## proto + +- Syntax: `proto A R` where A: number of arguments, R: number of return values +- Bytecode: 0x8a {uint8}, {uint8} +- Stack: ... → ... +- Prepare top call frame for a retsub that will assume A args and R return values. +- Availability: v8 + +Fails unless the last instruction executed was a `callsub`. + +## frame_dig + +- Syntax: `frame_dig I` where I: frame slot +- Bytecode: 0x8b {int8} +- Stack: ... → ..., any +- Nth (signed) value from the frame pointer. +- Availability: v8 + +## frame_bury + +- Syntax: `frame_bury I` where I: frame slot +- Bytecode: 0x8c {int8} +- Stack: ..., A → ... +- replace the Nth (signed) value from the frame pointer in the stack with A +- Availability: v8 + +## switch + +- Syntax: `switch TARGET ...` where TARGET ...: list of labels +- Bytecode: 0x8d {varuint count, [int16 (big-endian) ...]} +- Stack: ..., A: uint64 → ... +- branch to the Ath label. Continue at following instruction if index A exceeds the number of labels. +- Availability: v8 + +## match + +- Syntax: `match TARGET ...` where TARGET ...: list of labels +- Bytecode: 0x8e {varuint count, [int16 (big-endian) ...]} +- Stack: ..., [A1, A2, ..., AN], B → ... +- given match cases from A[1] to A[N], branch to the Ith label where A[I] = B. Continue to the following instruction if no matches are found. +- Availability: v8 + +`match` consumes N+1 values from the stack. Let the top stack value be B. The following N values represent an ordered list of match cases/constants (A), where the first value (A[0]) is the deepest in the stack. The immediate arguments are an ordered list of N labels (T). `match` will branch to target T[I], where A[I] = B. If there are no matches then execution continues on to the next instruction. + +## shl + +- Bytecode: 0x90 +- Stack: ..., A: uint64, B: uint64 → ..., uint64 +- A times 2^B, modulo 2^64 +- Availability: v4 + +## shr + +- Bytecode: 0x91 +- Stack: ..., A: uint64, B: uint64 → ..., uint64 +- A divided by 2^B +- Availability: v4 + +## sqrt + +- Bytecode: 0x92 +- Stack: ..., A: uint64 → ..., uint64 +- The largest integer I such that I^2 <= A +- **Cost**: 4 +- Availability: v4 + +## bitlen + +- Bytecode: 0x93 +- Stack: ..., A → ..., uint64 +- The highest set bit in A. If A is a byte-array, it is interpreted as a big-endian unsigned integer. bitlen of 0 is 0, bitlen of 8 is 4 +- Availability: v4 + +bitlen interprets arrays as big-endian integers, unlike setbit/getbit + +## exp + +- Bytecode: 0x94 +- Stack: ..., A: uint64, B: uint64 → ..., uint64 +- A raised to the Bth power. Fail if A == B == 0 and on overflow +- Availability: v4 + +## expw + +- Bytecode: 0x95 +- Stack: ..., A: uint64, B: uint64 → ..., X: uint64, Y: uint64 +- A raised to the Bth power as a 128-bit result in two uint64s. X is the high 64 bits, Y is the low. Fail if A == B == 0 or if the results exceeds 2^128-1 +- **Cost**: 10 +- Availability: v4 + +## bsqrt + +- Bytecode: 0x96 +- Stack: ..., A: bigint → ..., bigint +- The largest integer I such that I^2 <= A. A and I are interpreted as big-endian unsigned integers +- **Cost**: 40 +- Availability: v6 + +## divw + +- Bytecode: 0x97 +- Stack: ..., A: uint64, B: uint64, C: uint64 → ..., uint64 +- A,B / C. Fail if C == 0 or if result overflows. +- Availability: v6 + +The notation A,B indicates that A and B are interpreted as a uint128 value, with A as the high uint64 and B the low. + +## sha3_256 + +- Bytecode: 0x98 +- Stack: ..., A: []byte → ..., [32]byte +- SHA3_256 hash of value A, yields [32]byte +- **Cost**: 130 +- Availability: v7 + +## b+ + +- Bytecode: 0xa0 +- Stack: ..., A: bigint, B: bigint → ..., []byte +- A plus B. A and B are interpreted as big-endian unsigned integers +- **Cost**: 10 +- Availability: v4 + +## b- + +- Bytecode: 0xa1 +- Stack: ..., A: bigint, B: bigint → ..., bigint +- A minus B. A and B are interpreted as big-endian unsigned integers. Fail on underflow. +- **Cost**: 10 +- Availability: v4 + +## b/ + +- Bytecode: 0xa2 +- Stack: ..., A: bigint, B: bigint → ..., bigint +- A divided by B (truncated division). A and B are interpreted as big-endian unsigned integers. Fail if B is zero. +- **Cost**: 20 +- Availability: v4 + +## b* + +- Bytecode: 0xa3 +- Stack: ..., A: bigint, B: bigint → ..., []byte +- A times B. A and B are interpreted as big-endian unsigned integers. +- **Cost**: 20 +- Availability: v4 + +## b< + +- Bytecode: 0xa4 +- Stack: ..., A: bigint, B: bigint → ..., bool +- 1 if A is less than B, else 0. A and B are interpreted as big-endian unsigned integers +- Availability: v4 + +## b> + +- Bytecode: 0xa5 +- Stack: ..., A: bigint, B: bigint → ..., bool +- 1 if A is greater than B, else 0. A and B are interpreted as big-endian unsigned integers +- Availability: v4 + +## b<= + +- Bytecode: 0xa6 +- Stack: ..., A: bigint, B: bigint → ..., bool +- 1 if A is less than or equal to B, else 0. A and B are interpreted as big-endian unsigned integers +- Availability: v4 + +## b>= + +- Bytecode: 0xa7 +- Stack: ..., A: bigint, B: bigint → ..., bool +- 1 if A is greater than or equal to B, else 0. A and B are interpreted as big-endian unsigned integers +- Availability: v4 + +## b== + +- Bytecode: 0xa8 +- Stack: ..., A: bigint, B: bigint → ..., bool +- 1 if A is equal to B, else 0. A and B are interpreted as big-endian unsigned integers +- Availability: v4 + +## b!= + +- Bytecode: 0xa9 +- Stack: ..., A: bigint, B: bigint → ..., bool +- 0 if A is equal to B, else 1. A and B are interpreted as big-endian unsigned integers +- Availability: v4 + +## b% + +- Bytecode: 0xaa +- Stack: ..., A: bigint, B: bigint → ..., bigint +- A modulo B. A and B are interpreted as big-endian unsigned integers. Fail if B is zero. +- **Cost**: 20 +- Availability: v4 + +## b| + +- Bytecode: 0xab +- Stack: ..., A: []byte, B: []byte → ..., []byte +- A bitwise-or B. A and B are zero-left extended to the greater of their lengths +- **Cost**: 6 +- Availability: v4 + +## b& + +- Bytecode: 0xac +- Stack: ..., A: []byte, B: []byte → ..., []byte +- A bitwise-and B. A and B are zero-left extended to the greater of their lengths +- **Cost**: 6 +- Availability: v4 + +## b^ + +- Bytecode: 0xad +- Stack: ..., A: []byte, B: []byte → ..., []byte +- A bitwise-xor B. A and B are zero-left extended to the greater of their lengths +- **Cost**: 6 +- Availability: v4 + +## b~ + +- Bytecode: 0xae +- Stack: ..., A: []byte → ..., []byte +- A with all bits inverted +- **Cost**: 4 +- Availability: v4 + +## bzero + +- Bytecode: 0xaf +- Stack: ..., A: uint64 → ..., []byte +- zero filled byte-array of length A +- Availability: v4 + +## log + +- Bytecode: 0xb0 +- Stack: ..., A: []byte → ... +- write A to log state of the current application +- Availability: v5 +- Mode: Application + +`log` fails if called more than MaxLogCalls times in a program, or if the sum of logged bytes exceeds 1024 bytes. + +## itxn_begin + +- Bytecode: 0xb1 +- Stack: ... → ... +- begin preparation of a new inner transaction in a new transaction group +- Availability: v5 +- Mode: Application + +`itxn_begin` initializes Sender to the application address; Fee to the minimum allowable, taking into account MinTxnFee and credit from overpaying in earlier transactions; FirstValid/LastValid to the values in the invoking transaction, and all other fields to zero or empty values. + +## itxn_field + +- Syntax: `itxn_field F` where F: [txn](#field-group-txn) +- Bytecode: 0xb2 {uint8} +- Stack: ..., A → ... +- set field F of the current inner transaction to A +- Availability: v5 +- Mode: Application + +`itxn_field` fails if A is of the wrong type for F, including a byte array of the wrong size for use as an address when F is an address field. `itxn_field` also fails if A is an account, asset, or app that is not _available_, or an attempt is made extend an array field beyond the limit imposed by consensus parameters. (Addresses set into asset params of acfg transactions need not be _available_.) + +## itxn_submit + +- Bytecode: 0xb3 +- Stack: ... → ... +- execute the current inner transaction group. Fail if executing this group would exceed the inner transaction limit, or if any transaction in the group fails. +- Availability: v5 +- Mode: Application + +`itxn_submit` resets the current transaction so that it can not be resubmitted. A new `itxn_begin` is required to prepare another inner transaction. + +## itxn + +- Syntax: `itxn F` where F: [txn](#field-group-txn) +- Bytecode: 0xb4 {uint8} +- Stack: ... → ..., any +- field F of the last inner transaction +- Availability: v5 +- Mode: Application + +## itxna + +- Syntax: `itxna F I` where F: [txna](#field-group-txna), I: a transaction field array index +- Bytecode: 0xb5 {uint8}, {uint8} +- Stack: ... → ..., any +- Ith value of the array field F of the last inner transaction +- Availability: v5 +- Mode: Application + +## itxn_next + +- Bytecode: 0xb6 +- Stack: ... → ... +- begin preparation of a new inner transaction in the same transaction group +- Availability: v6 +- Mode: Application + +`itxn_next` initializes the transaction exactly as `itxn_begin` does + +## gitxn + +- Syntax: `gitxn T F` where T: transaction group index, F: [txn](#field-group-txn) +- Bytecode: 0xb7 {uint8}, {uint8} +- Stack: ... → ..., any +- field F of the Tth transaction in the last inner group submitted +- Availability: v6 +- Mode: Application + +## gitxna + +- Syntax: `gitxna T F I` where T: transaction group index, F: [txna](#field-group-txna), I: transaction field array index +- Bytecode: 0xb8 {uint8}, {uint8}, {uint8} +- Stack: ... → ..., any +- Ith value of the array field F from the Tth transaction in the last inner group submitted +- Availability: v6 +- Mode: Application + +## box_create + +- Bytecode: 0xb9 +- Stack: ..., A: boxName, B: uint64 → ..., bool +- create a box named A, of length B. Fail if the name A is empty or B exceeds 32,768. Returns 0 if A already existed, else 1 +- Availability: v8 +- Mode: Application + +Newly created boxes are filled with 0 bytes. `box_create` will fail if the referenced box already exists with a different size. Otherwise, existing boxes are unchanged by `box_create`. + +## box_extract + +- Bytecode: 0xba +- Stack: ..., A: boxName, B: uint64, C: uint64 → ..., []byte +- read C bytes from box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size. +- Availability: v8 +- Mode: Application + +## box_replace + +- Bytecode: 0xbb +- Stack: ..., A: boxName, B: uint64, C: []byte → ... +- write byte-array C into box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size. +- Availability: v8 +- Mode: Application + +## box_del + +- Bytecode: 0xbc +- Stack: ..., A: boxName → ..., bool +- delete box named A if it exists. Return 1 if A existed, 0 otherwise +- Availability: v8 +- Mode: Application + +## box_len + +- Bytecode: 0xbd +- Stack: ..., A: boxName → ..., X: uint64, Y: bool +- X is the length of box A if A exists, else 0. Y is 1 if A exists, else 0. +- Availability: v8 +- Mode: Application + +## box_get + +- Bytecode: 0xbe +- Stack: ..., A: boxName → ..., X: []byte, Y: bool +- X is the contents of box A if A exists, else ''. Y is 1 if A exists, else 0. +- Availability: v8 +- Mode: Application + +For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `box_replace` + +## box_put + +- Bytecode: 0xbf +- Stack: ..., A: boxName, B: []byte → ... +- replaces the contents of box A with byte-array B. Fails if A exists and len(B) != len(box A). Creates A if it does not exist +- Availability: v8 +- Mode: Application + +For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `box_replace` + +## txnas + +- Syntax: `txnas F` where F: [txna](#field-group-txna) +- Bytecode: 0xc0 {uint8} +- Stack: ..., A: uint64 → ..., any +- Ath value of the array field F of the current transaction +- Availability: v5 + +## gtxnas + +- Syntax: `gtxnas T F` where T: transaction group index, F: [txna](#field-group-txna) +- Bytecode: 0xc1 {uint8}, {uint8} +- Stack: ..., A: uint64 → ..., any +- Ath value of the array field F from the Tth transaction in the current group +- Availability: v5 + +## gtxnsas + +- Syntax: `gtxnsas F` where F: [txna](#field-group-txna) +- Bytecode: 0xc2 {uint8} +- Stack: ..., A: uint64, B: uint64 → ..., any +- Bth value of the array field F from the Ath transaction in the current group +- Availability: v5 + +## args + +- Bytecode: 0xc3 +- Stack: ..., A: uint64 → ..., []byte +- Ath LogicSig argument +- Availability: v5 +- Mode: Signature + +## gloadss + +- Bytecode: 0xc4 +- Stack: ..., A: uint64, B: uint64 → ..., any +- Bth scratch space value of the Ath transaction in the current group +- Availability: v6 +- Mode: Application + +## itxnas + +- Syntax: `itxnas F` where F: [txna](#field-group-txna) +- Bytecode: 0xc5 {uint8} +- Stack: ..., A: uint64 → ..., any +- Ath value of the array field F of the last inner transaction +- Availability: v6 +- Mode: Application + +## gitxnas + +- Syntax: `gitxnas T F` where T: transaction group index, F: [txna](#field-group-txna) +- Bytecode: 0xc6 {uint8}, {uint8} +- Stack: ..., A: uint64 → ..., any +- Ath value of the array field F from the Tth transaction in the last inner group submitted +- Availability: v6 +- Mode: Application + +## vrf_verify + +- Syntax: `vrf_verify S` where S: [vrf_verify](#field-group-vrf_verify) +- Bytecode: 0xd0 {uint8} +- Stack: ..., A: []byte, B: [80]byte, C: [32]byte → ..., X: [64]byte, Y: bool +- Verify the proof B of message A against pubkey C. Returns vrf output and verification flag. +- **Cost**: 5700 +- Availability: v7 + +### vrf_verify + +Standards + +| Index | Name | Notes | +| - | ------ | --------- | +| 0 | VrfAlgorand | | + + +`VrfAlgorand` is the VRF used in Algorand. It is ECVRF-ED25519-SHA512-Elligator2, specified in the IETF internet draft [draft-irtf-cfrg-vrf-03](https://datatracker.ietf.org/doc/draft-irtf-cfrg-vrf/03/). + +## block + +- Syntax: `block F` where F: [block](#field-group-block) +- Bytecode: 0xd1 {uint8} +- Stack: ..., A: uint64 → ..., any +- field F of block A. Fail unless A falls between txn.LastValid-1002 and txn.FirstValid (exclusive) +- Availability: v7 + +### block + +Fields + +| Index | Name | Type | In | Notes | +| - | ------ | -- | - | --------- | +| 0 | BlkSeed | [32]byte | | | +| 1 | BlkTimestamp | uint64 | | | +| 2 | BlkProposer | address | v11 | | +| 3 | BlkFeesCollected | uint64 | v11 | | +| 4 | BlkBonus | uint64 | v11 | | +| 5 | BlkBranch | [32]byte | v11 | | +| 6 | BlkFeeSink | address | v11 | | +| 7 | BlkProtocol | []byte | v11 | | +| 8 | BlkTxnCounter | uint64 | v11 | | +| 9 | BlkProposerPayout | uint64 | v11 | | + + +## box_splice + +- Bytecode: 0xd2 +- Stack: ..., A: boxName, B: uint64, C: uint64, D: []byte → ... +- set box A to contain its previous bytes up to index B, followed by D, followed by the original bytes of A that began at index B+C. +- Availability: v10 +- Mode: Application + +Boxes are of constant length. If C < len(D), then len(D)-C bytes will be removed from the end. If C > len(D), zero bytes will be appended to the end to reach the box length. + +## box_resize + +- Bytecode: 0xd3 +- Stack: ..., A: boxName, B: uint64 → ... +- change the size of box named A to be of length B, adding zero bytes to end or removing bytes from the end, as needed. Fail if the name A is empty, A is not an existing box, or B exceeds 32,768. +- Availability: v10 +- Mode: Application + +## ec_add + +- Syntax: `ec_add G` where G: [EC](#field-group-ec) +- Bytecode: 0xe0 {uint8} +- Stack: ..., A: []byte, B: []byte → ..., []byte +- for curve points A and B, return the curve point A + B +- **Cost**: BN254g1=125; BN254g2=170; BLS12_381g1=205; BLS12_381g2=290 +- Availability: v10 + +### EC + +Groups + +| Index | Name | Notes | +| - | ------ | --------- | +| 0 | BN254g1 | G1 of the BN254 curve. Points encoded as 32 byte X following by 32 byte Y | +| 1 | BN254g2 | G2 of the BN254 curve. Points encoded as 64 byte X following by 64 byte Y | +| 2 | BLS12_381g1 | G1 of the BLS 12-381 curve. Points encoded as 48 byte X following by 48 byte Y | +| 3 | BLS12_381g2 | G2 of the BLS 12-381 curve. Points encoded as 96 byte X following by 96 byte Y | + + +A and B are curve points in affine representation: field element X concatenated with field element Y. Field element `Z` is encoded as follows. +For the base field elements (Fp), `Z` is encoded as a big-endian number and must be lower than the field modulus. +For the quadratic field extension (Fp2), `Z` is encoded as the concatenation of the individual encoding of the coefficients. For an Fp2 element of the form `Z = Z0 + Z1 i`, where `i` is a formal quadratic non-residue, the encoding of Z is the concatenation of the encoding of `Z0` and `Z1` in this order. (`Z0` and `Z1` must be less than the field modulus). + +The point at infinity is encoded as `(X,Y) = (0,0)`. +Groups G1 and G2 are denoted additively. + +Fails if A or B is not in G. +A and/or B are allowed to be the point at infinity. +Does _not_ check if A and B are in the main prime-order subgroup. + +## ec_scalar_mul + +- Syntax: `ec_scalar_mul G` where G: [EC](#field-group-ec) +- Bytecode: 0xe1 {uint8} +- Stack: ..., A: []byte, B: []byte → ..., []byte +- for curve point A and scalar B, return the curve point BA, the point A multiplied by the scalar B. +- **Cost**: BN254g1=1810; BN254g2=3430; BLS12_381g1=2950; BLS12_381g2=6530 +- Availability: v10 + +A is a curve point encoded and checked as described in `ec_add`. Scalar B is interpreted as a big-endian unsigned integer. Fails if B exceeds 32 bytes. + +## ec_pairing_check + +- Syntax: `ec_pairing_check G` where G: [EC](#field-group-ec) +- Bytecode: 0xe2 {uint8} +- Stack: ..., A: []byte, B: []byte → ..., bool +- 1 if the product of the pairing of each point in A with its respective point in B is equal to the identity element of the target group Gt, else 0 +- **Cost**: BN254g1=8000 + 7400 per 64 bytes of B; BN254g2=8000 + 7400 per 128 bytes of B; BLS12_381g1=13000 + 10000 per 96 bytes of B; BLS12_381g2=13000 + 10000 per 192 bytes of B +- Availability: v10 + +A and B are concatenated points, encoded and checked as described in `ec_add`. A contains points of the group G, B contains points of the associated group (G2 if G is G1, and vice versa). Fails if A and B have a different number of points, or if any point is not in its described group or outside the main prime-order subgroup - a stronger condition than other opcodes. AVM values are limited to 4096 bytes, so `ec_pairing_check` is limited by the size of the points in the groups being operated upon. + +## ec_multi_scalar_mul + +- Syntax: `ec_multi_scalar_mul G` where G: [EC](#field-group-ec) +- Bytecode: 0xe3 {uint8} +- Stack: ..., A: []byte, B: []byte → ..., []byte +- for curve points A and scalars B, return curve point B0A0 + B1A1 + B2A2 + ... + BnAn +- **Cost**: BN254g1=3600 + 90 per 32 bytes of B; BN254g2=7200 + 270 per 32 bytes of B; BLS12_381g1=6500 + 95 per 32 bytes of B; BLS12_381g2=14850 + 485 per 32 bytes of B +- Availability: v10 + +A is a list of concatenated points, encoded and checked as described in `ec_add`. B is a list of concatenated scalars which, unlike ec_scalar_mul, must all be exactly 32 bytes long. +The name `ec_multi_scalar_mul` was chosen to reflect common usage, but a more consistent name would be `ec_multi_scalar_mul`. AVM values are limited to 4096 bytes, so `ec_multi_scalar_mul` is limited by the size of the points in the group being operated upon. + +## ec_subgroup_check + +- Syntax: `ec_subgroup_check G` where G: [EC](#field-group-ec) +- Bytecode: 0xe4 {uint8} +- Stack: ..., A: []byte → ..., bool +- 1 if A is in the main prime-order subgroup of G (including the point at infinity) else 0. Program fails if A is not in G at all. +- **Cost**: BN254g1=20; BN254g2=3100; BLS12_381g1=1850; BLS12_381g2=2340 +- Availability: v10 + +## ec_map_to + +- Syntax: `ec_map_to G` where G: [EC](#field-group-ec) +- Bytecode: 0xe5 {uint8} +- Stack: ..., A: []byte → ..., []byte +- maps field element A to group G +- **Cost**: BN254g1=630; BN254g2=3300; BLS12_381g1=1950; BLS12_381g2=8150 +- Availability: v10 + +BN254 points are mapped by the SVDW map. BLS12-381 points are mapped by the SSWU map. +G1 element inputs are base field elements and G2 element inputs are quadratic field elements, with nearly the same encoding rules (for field elements) as defined in `ec_add`. There is one difference of encoding rule: G1 element inputs do not need to be 0-padded if they fit in less than 32 bytes for BN254 and less than 48 bytes for BLS12-381. (As usual, the empty byte array represents 0.) G2 elements inputs need to be always have the required size. + +## mimc + +- Syntax: `mimc C` where C: [Mimc Configurations](#field-group-mimc configurations) +- Bytecode: 0xe6 {uint8} +- Stack: ..., A: []byte → ..., [32]byte +- MiMC hash of scalars A, using curve and parameters specified by configuration C +- **Cost**: BN254Mp110=10 + 550 per 32 bytes of A; BLS12_381Mp111=10 + 550 per 32 bytes of A +- Availability: v11 + +### Mimc Configurations + +Parameters + +| Index | Name | Notes | +| - | ------ | --------- | +| 0 | BN254Mp110 | MiMC configuration for the BN254 curve with Miyaguchi-Preneel mode, 110 rounds, exponent 5, seed "seed" | +| 1 | BLS12_381Mp111 | MiMC configuration for the BLS12-381 curve with Miyaguchi-Preneel mode, 111 rounds, exponent 5, seed "seed" | + + +A is a list of concatenated 32 byte big-endian unsigned integer scalars. Fail if A's length is not a multiple of 32 or any element exceeds the curve modulus. + +The MiMC hash function has known collisions since any input which is a multiple of the elliptic curve modulus will hash to the same value. MiMC is thus not a general purpose hash function, but meant to be used in zero knowledge applications to match a zk-circuit implementation. diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go index d1ebeb3b4a..34b2ed1995 100644 --- a/data/transactions/logic/assembler_test.go +++ b/data/transactions/logic/assembler_test.go @@ -439,6 +439,11 @@ dup; dup falcon_verify ` +const mimcNonsense = ` +pushbytes 0x11223344556677889900aabbccddeeff11223344556677889900aabbccddeeff +mimc BLS12_381Mp111 +` + const v8Nonsense = v7Nonsense + switchNonsense + frameNonsense + matchNonsense + boxNonsense const v9Nonsense = v8Nonsense @@ -450,7 +455,7 @@ const spliceNonsence = ` const v10Nonsense = v9Nonsense + pairingNonsense + spliceNonsence -const v11Nonsense = v10Nonsense + incentiveNonsense + stateProofNonsense +const v11Nonsense = v10Nonsense + incentiveNonsense + stateProofNonsense + mimcNonsense const v6Compiled = "2004010002b7a60c26050242420c68656c6c6f20776f726c6421070123456789abcd208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292b0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f2310231123122313231418191a1b1c28171615400003290349483403350222231d4a484848482b50512a632223524100034200004322602261222704634848222862482864286548482228246628226723286828692322700048482371004848361c0037001a0031183119311b311d311e311f312023221e312131223123312431253126312731283129312a312b312c312d312e312f447825225314225427042455220824564c4d4b0222382124391c0081e80780046a6f686e2281d00f23241f880003420001892224902291922494249593a0a1a2a3a4a5a6a7a8a9aaabacadae24af3a00003b003c003d816472064e014f012a57000823810858235b235a2359b03139330039b1b200b322c01a23c1001a2323c21a23c3233e233f8120af06002a494905002a49490700b400b53a03b6b7043cb8033a0c2349c42a9631007300810881088120978101c53a8101c6003a" @@ -475,8 +480,9 @@ const v10Compiled = v9Compiled + pairingCompiled + spliceCompiled const incentiveCompiled = "757401" const stateProofCompiled = "80070123456789abcd86494985" +const mimcCompiled = "802011223344556677889900aabbccddeeff11223344556677889900aabbccddeeffe601" -const V11Compiled = v10Compiled + incentiveCompiled + stateProofCompiled +const V11Compiled = v10Compiled + incentiveCompiled + stateProofCompiled + mimcCompiled var nonsense = map[uint64]string{ 1: v1Nonsense, @@ -3226,7 +3232,7 @@ func TestMacros(t *testing.T) { #define ==? ==; bnz pushint 1; pushint 2; ==? label1 err - label1: + label1: pushint 1`, ) @@ -3264,19 +3270,19 @@ func TestMacros(t *testing.T) { pushbytes 0xddf2554d txna ApplicationArgs 0 == - bnz kickstart - pushbytes 0x903f4535 + bnz kickstart + pushbytes 0x903f4535 txna ApplicationArgs 0 == - bnz portal_transfer + bnz portal_transfer kickstart: pushint 1 portal_transfer: pushint 1 `, ` - #define abi-route txna ApplicationArgs 0; ==; bnz - method "kickstart(account)void"; abi-route kickstart - method "portal_transfer(byte[])byte[]"; abi-route portal_transfer + #define abi-route txna ApplicationArgs 0; ==; bnz + method "kickstart(account)void"; abi-route kickstart + method "portal_transfer(byte[])byte[]"; abi-route portal_transfer kickstart: pushint 1 portal_transfer: @@ -3332,7 +3338,7 @@ add: extract_uint32 stores - load 1; load 2; + + load 1; load 2; + store 255 int 255 @@ -3356,11 +3362,11 @@ add: #define abi-decode-uint32 ;int 0; extract_uint32; #define abi-encode-uint32 ;itob;extract 4 0; -#define abi-encode-bytes ;dup; len; abi-encode-uint16; swap; concat; +#define abi-encode-bytes ;dup; len; abi-encode-uint16; swap; concat; #define abi-decode-bytes ;extract 2 0; -// abi method handling -#define abi-route ;txna ApplicationArgs 0; ==; bnz +// abi method handling +#define abi-route ;txna ApplicationArgs 0; ==; bnz #define abi-return ;pushbytes 0x151f7c75; swap; concat; log; int 1; return; // stanza: "set $var from-{type}" @@ -3389,15 +3395,15 @@ echo: // add handler -method "add(uint32,uint32)uint32"; abi-route add +method "add(uint32,uint32)uint32"; abi-route add add: #define x 1 - parse x from-uint32 + parse x from-uint32 #define y 2 parse y from-uint32 - #define sum 255 + #define sum 255 load x; load y; +; store sum returns sum as-uint32 diff --git a/data/transactions/logic/crypto.go b/data/transactions/logic/crypto.go index c5c39b654c..043f670ee5 100644 --- a/data/transactions/logic/crypto.go +++ b/data/transactions/logic/crypto.go @@ -23,6 +23,7 @@ import ( "crypto/sha512" "errors" "fmt" + "hash" "math/big" "github.com/algorand/go-algorand/crypto" @@ -30,8 +31,52 @@ import ( "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-sumhash" "golang.org/x/crypto/sha3" + + bls12_381mimc "github.com/consensys/gnark-crypto/ecc/bls12-381/fr/mimc" + bn254mimc "github.com/consensys/gnark-crypto/ecc/bn254/fr/mimc" ) +// mimc is implemented for compatibility with zk circuits, +// matching the implementation in circuits generated by gnark +func opMimc(cx *EvalContext) error { + config := MimcConfig(cx.program[cx.pc+1]) + fs, ok := mimcConfigSpecByField(config) + if !ok { // no version check yet, all configs appeared at once + return fmt.Errorf("invalid mimc config %s", config) + } + + last := len(cx.Stack) - 1 + data := cx.Stack[last].Bytes + if len(data) == 0 { + return fmt.Errorf("the input data cannot be empty") + } + if len(data)%32 != 0 { + return fmt.Errorf("the input data must be a multiple of 32 bytes") + } + + var mimc hash.Hash + + switch fs.field { + case BN254Mp110: + mimc = bn254mimc.NewMiMC() + case BLS12_381Mp111: + mimc = bls12_381mimc.NewMiMC() + default: + return fmt.Errorf("invalid mimc group %s", config) + } + + // unlike most hash.Hash objects, a mimc hasher has strict requirements, + // therefore Write() can return an error. The input must be a multiple of + // the curve's encoded element size, and no element may exceed the curve + // modulus. + if _, err := mimc.Write(cx.Stack[last].Bytes); err != nil { + return fmt.Errorf("invalid mimc input %w", err) + } + + cx.Stack[last].Bytes = mimc.Sum(nil) + return nil +} + func opSHA256(cx *EvalContext) error { last := len(cx.Stack) - 1 hash := sha256.Sum256(cx.Stack[last].Bytes) diff --git a/data/transactions/logic/crypto_test.go b/data/transactions/logic/crypto_test.go index 70283be7e3..c0ffd76242 100644 --- a/data/transactions/logic/crypto_test.go +++ b/data/transactions/logic/crypto_test.go @@ -117,6 +117,84 @@ byte 0x98D2C31612EA500279B6753E5F6E780CA63EBA8274049664DAD66A2565ED1D2A testAccepts(t, progText, 1) } +func TestMimc(t *testing.T) { + // We created test vectors for the MiMC hash function by defining a set of preimages for different + // input sizes and calling gnark-crypto's MiMC implementation to compute the expected hash values. + // E.g.: + // import "github.com/consensys/gnark-crypto/ecc/bn254/fr/mimc" + // hasher := mimc.NewMiMC() + // hasher.Write(inputBytes) + // hashBytes := hasher.Sum(nil) + // Since we are hardcoding the expected hash values, we are also testing that gnark-crypto's MiMC + // output does not change under the hood with new versions. + // + // We test that malformed inputs panic, in particular we test malfornmed inputs of: + // 0 length, lenghts not multiple of 32 bytes, chunks representing values greater than the modulus. + // We test that well formed inputs hash correctly, testing both single chunk inputs (32-byte) and + // multiple chunk inputs (96 bytes). + partitiontest.PartitionTest(t) + t.Parallel() + + type PreImageTestVector struct { + PreImage string + ShouldSucceed bool + } + preImageTestVectors := []PreImageTestVector{ + {"0x", + false}, // zero-length input + {"0x23a950068dd3d1e21cee48e7919be7ae32cdef70311fc486336ea9d4b5042535", + true}, // 32 bytes, less than modulus + {"0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000002", + false}, // 32 bytes, more than modulus + {"0xdeadf00d", + false}, // less than 32 byte + {"0x183de351a72141d79c51a27d10405549c98302cb2536c5968deeb3cba635121723a950068dd3d1e21cee48e7919be7ae32cdef70311fc486336ea9d4b504253530644e72e131a029b85045b68181585d2833e84879b9709143e1f593ef676981", + true}, // 32 bytes, less than modulus | 32 bytes, less than modulus | 32 bytes, less than modulus + {"0x183de351a72141d79c51a27d10405549c98302cb2536c5968deeb3cba635121723a950068dd3d1e21cee48e7919be7ae32cdef70311fc486336ea9d4b504253573eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000002", + false}, // 32 bytes, less than modulus | 32 bytes, less than modulus | 32 bytes, more than modulus + {"0x183de351a72141d79c51a27d10405549c98302cb2536c5968deeb3cba635121723a950068dd3d1e21cee48e7919be7ae32cdef70311fc486336ea9d4b5042535abba", + false}, // 32 bytes, less than modulus | 32 bytes, less than modulus | less than 32 bytes + } + + circuitHashTestVectors := map[string][]string{ + "BN254Mp110": { + "20104241803663641422577121134203490505137011783614913652735802145961801733870", + "12886436712380113721405259596386800092738845035233065858332878701083870690753", + "19565877911319815535452130675266047290072088868113536892077808700068649624391", + "1037254799353855871006189384309576393135431139055333626960622147300727796413", + "6040222623731283351958201178122781676432899642144860863024149088913741383362", + "21691351735381703396517600859480938764038501053226864452091917666642352837076", + "10501393540371963307040960561318023073151272109639330842515119353134949995409", + }, + "BLS12_381Mp111": { + "17991912493598890696181760734961918471863781118188078948205844982816313445306", + "8791766422525455185980675814845076441443662947059416063736889106252015893524", + "35137972692771717943992759113612269767581262500164574105059686144346651628747", + "15039173432183897369859775531867817848264266283034981501223857291379142522368", + "12964111614552580241101202600014316932811348627866250816177200046290462797607", + "21773894974440411325489312534417904228129169539217646609523079291104496302656", + "9873666029497961930790892458408217321483390383568592297687427911011295910871", + }, + } + + for _, config := range []string{"BN254Mp110", "BLS12_381Mp111"} { + for i, preImageTestVector := range preImageTestVectors { + var n big.Int + n.SetString(circuitHashTestVectors[config][i], 10) + circuitHash := n.Bytes() + progText := fmt.Sprintf(`byte %s +mimc %s +byte 0x%x +==`, preImageTestVector.PreImage, config, circuitHash) + if preImageTestVector.ShouldSucceed { + testAccepts(t, progText, 11) + } else { + testPanics(t, progText, 11) + } + } + } +} + // This is patterned off vrf_test.go, but we don't create proofs here, we only // check that the output is correct, given the proof. func testVrfApp(pubkey, proof, data string, output string) string { @@ -713,8 +791,11 @@ int ` + fmt.Sprintf("%d", testLogicBudget-2500-8) + ` } func BenchmarkHashes(b *testing.B) { - for _, hash := range []string{"sha256", "keccak256" /* skip, same as keccak "sha3_256", */, "sha512_256", "sumhash512"} { + for _, hash := range []string{"sha256", "keccak256" /* skip, same as keccak "sha3_256", */, "sha512_256", "sumhash512", "mimc BN254Mp110", "mimc BLS12_381Mp111"} { for _, size := range []int{0, 32, 128, 512, 1024, 4096} { + if size == 0 && (hash == "mimc BN254Mp110" || hash == "mimc BLS12_381Mp111") { + continue + } b.Run(hash+"-"+strconv.Itoa(size), func(b *testing.B) { benchmarkOperation(b, "", fmt.Sprintf("int %d; bzero; %s; pop", size, hash), "int 1") }) diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go index 3bab156561..2b2f88b850 100644 --- a/data/transactions/logic/doc.go +++ b/data/transactions/logic/doc.go @@ -41,6 +41,13 @@ var opDescByName = map[string]OpDesc{ "sumhash512": {"sumhash512 of value A, yields [64]byte", "", nil}, "falcon_verify": {"for (data A, compressed-format signature B, pubkey C) verify the signature of data against the pubkey", "", nil}, + "mimc": {"MiMC hash of scalars A, using curve and parameters specified by configuration C", "" + + "A is a list of concatenated 32 byte big-endian unsigned integer scalars. Fail if A's length is not a multiple of 32 or any element exceeds the curve modulus.\n\n" + + "The MiMC hash function has known collisions since any input which is a multiple of the elliptic curve modulus will hash to the same value. " + + "MiMC is thus not a general purpose hash function, but meant to be used in zero knowledge applications to match a zk-circuit implementation.", + []string{"configuration index"}, + }, + "ed25519verify": {"for (data A, signature B, pubkey C) verify the signature of (\"ProgData\" || program_hash || data) against the pubkey => {0 or 1}", "The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack.", nil}, "ed25519verify_bare": {"for (data A, signature B, pubkey C) verify the signature of the data against the pubkey => {0 or 1}", "", nil}, "ecdsa_verify": {"for (data A, signature B, C and pubkey D, E) verify the signature of the data against the pubkey => {0 or 1}", "The 32 byte Y-component of a public key is the last element on the stack, preceded by X-component of a pubkey, preceded by S and R components of a signature, preceded by the data that is fifth element on the stack. All values are big-endian encoded. The signed data must be 32 bytes long, and signatures in lower-S form are only accepted.", []string{"curve index"}}, @@ -354,7 +361,7 @@ var OpGroups = map[string][]string{ "Byte Array Manipulation": {"getbit", "setbit", "getbyte", "setbyte", "concat", "len", "substring", "substring3", "extract", "extract3", "extract_uint16", "extract_uint32", "extract_uint64", "replace2", "replace3", "base64_decode", "json_ref"}, "Byte Array Arithmetic": {"b+", "b-", "b/", "b*", "b<", "b>", "b<=", "b>=", "b==", "b!=", "b%", "bsqrt"}, "Byte Array Logic": {"b|", "b&", "b^", "b~"}, - "Cryptography": {"sha256", "keccak256", "sha512_256", "sha3_256", "sumhash512", "falcon_verify", "ed25519verify", "ed25519verify_bare", "ecdsa_verify", "ecdsa_pk_recover", "ecdsa_pk_decompress", "vrf_verify", "ec_add", "ec_scalar_mul", "ec_pairing_check", "ec_multi_scalar_mul", "ec_subgroup_check", "ec_map_to"}, + "Cryptography": {"sha256", "keccak256", "sha512_256", "sha3_256", "sumhash512", "falcon_verify", "ed25519verify", "ed25519verify_bare", "ecdsa_verify", "ecdsa_pk_recover", "ecdsa_pk_decompress", "vrf_verify", "ec_add", "ec_scalar_mul", "ec_pairing_check", "ec_multi_scalar_mul", "ec_subgroup_check", "ec_map_to", "mimc"}, "Loading Values": {"intcblock", "intc", "intc_0", "intc_1", "intc_2", "intc_3", "pushint", "pushints", "bytecblock", "bytec", "bytec_0", "bytec_1", "bytec_2", "bytec_3", "pushbytes", "pushbytess", "bzero", "arg", "arg_0", "arg_1", "arg_2", "arg_3", "args", "txn", "gtxn", "txna", "txnas", "gtxna", "gtxnas", "gtxns", "gtxnsa", "gtxnsas", "global", "load", "loads", "store", "stores", "gload", "gloads", "gloadss", "gaid", "gaids"}, "Flow Control": {"err", "bnz", "bz", "b", "return", "pop", "popn", "dup", "dup2", "dupn", "dig", "bury", "cover", "uncover", "frame_dig", "frame_bury", "swap", "select", "assert", "callsub", "proto", "retsub", "switch", "match"}, "State Access": {"balance", "min_balance", "app_opted_in", "app_local_get", "app_local_get_ex", "app_global_get", "app_global_get_ex", "app_local_put", "app_global_put", "app_local_del", "app_global_del", "asset_holding_get", "asset_params_get", "app_params_get", "acct_params_get", "voter_params_get", "online_stake", "log", "block"}, diff --git a/data/transactions/logic/evalStateful_test.go b/data/transactions/logic/evalStateful_test.go index 270925a84f..5f85e8d012 100644 --- a/data/transactions/logic/evalStateful_test.go +++ b/data/transactions/logic/evalStateful_test.go @@ -3250,6 +3250,9 @@ func TestReturnTypes(t *testing.T) { "box_create": "int 9; +; box_create", // make the size match the 10 in CreateBox "box_put": "byte 0x010203040506; concat; box_put", // make the 4 byte arg into a 10 + + // mimc requires an input size multiple of 32 bytes. + "mimc": ": byte 0x0000000000000000000000000000000000000000000000000000000000000001; mimc BN254Mp110", } /* Make sure the specialCmd tests the opcode in question */ diff --git a/data/transactions/logic/fields.go b/data/transactions/logic/fields.go index b2f384c259..b4d3ca53c9 100644 --- a/data/transactions/logic/fields.go +++ b/data/transactions/logic/fields.go @@ -23,7 +23,7 @@ import ( "github.com/algorand/go-algorand/protocol" ) -//go:generate stringer -type=TxnField,GlobalField,AssetParamsField,AppParamsField,AcctParamsField,AssetHoldingField,OnCompletionConstType,EcdsaCurve,EcGroup,Base64Encoding,JSONRefType,VoterParamsField,VrfStandard,BlockField -output=fields_string.go +//go:generate stringer -type=TxnField,GlobalField,AssetParamsField,AppParamsField,AcctParamsField,AssetHoldingField,OnCompletionConstType,EcdsaCurve,EcGroup,MimcConfig,Base64Encoding,JSONRefType,VoterParamsField,VrfStandard,BlockField -output=fields_string.go // FieldSpec unifies the various specs for assembly, disassembly, and doc generation. type FieldSpec interface { @@ -785,6 +785,68 @@ var EcGroups = FieldGroup{ ecGroupSpecByName, } +// MimcConfig is an enum for the `mimc` opcode +type MimcConfig int + +const ( + // BN254Mp110 is the default MiMC configuration for the BN254 curve with Miyaguchi-Preneel mode, 110 rounds, exponent 5, seed "seed" + BN254Mp110 MimcConfig = iota + // BLS12_381Mp111 is the default MiMC configuration for the BLS12-381 curve with Miyaguchi-Preneel mode, 111 rounds, exponent 5, seed "seed" + BLS12_381Mp111 + invalidMimcConfig // compile-time constant for number of fields +) + +var mimcConfigNames [invalidMimcConfig]string + +type mimcConfigSpec struct { + field MimcConfig + doc string +} + +func (fs mimcConfigSpec) Field() byte { + return byte(fs.field) +} +func (fs mimcConfigSpec) Type() StackType { + return StackNone // Will not show, since all are untyped +} +func (fs mimcConfigSpec) OpVersion() uint64 { + return mimcVersion +} +func (fs mimcConfigSpec) Version() uint64 { + return mimcVersion +} +func (fs mimcConfigSpec) Note() string { + return fs.doc +} + +var mimcConfigSpecs = [...]mimcConfigSpec{ + {BN254Mp110, "MiMC configuration for the BN254 curve with Miyaguchi-Preneel mode, 110 rounds, exponent 5, seed \"seed\""}, + {BLS12_381Mp111, "MiMC configuration for the BLS12-381 curve with Miyaguchi-Preneel mode, 111 rounds, exponent 5, seed \"seed\""}, +} + +func mimcConfigSpecByField(c MimcConfig) (mimcConfigSpec, bool) { + if int(c) >= len(mimcConfigSpecs) { + return mimcConfigSpec{}, false + } + return mimcConfigSpecs[c], true +} + +var mimcConfigSpecByName = make(mimcConfigNameSpecMap, len(mimcConfigNames)) + +type mimcConfigNameSpecMap map[string]mimcConfigSpec + +func (s mimcConfigNameSpecMap) get(name string) (FieldSpec, bool) { + fs, ok := s[name] + return fs, ok +} + +// MimcConfigs collects details about the constants used to describe MimcConfigs +var MimcConfigs = FieldGroup{ + "Mimc Configurations", "Parameters", + mimcConfigNames[:], + mimcConfigSpecByName, +} + // Base64Encoding is an enum for the `base64decode` opcode type Base64Encoding int @@ -1548,6 +1610,13 @@ func init() { ecGroupSpecByName[s.field.String()] = s } + equal(len(mimcConfigSpecs), len(mimcConfigNames)) + for i, s := range mimcConfigSpecs { + equal(int(s.field), i) + mimcConfigNames[s.field] = s.field.String() + mimcConfigSpecByName[s.field.String()] = s + } + equal(len(base64EncodingSpecs), len(base64EncodingNames)) for i, s := range base64EncodingSpecs { equal(int(s.field), i) diff --git a/data/transactions/logic/fields_string.go b/data/transactions/logic/fields_string.go index df9922abf2..81f1b33d29 100644 --- a/data/transactions/logic/fields_string.go +++ b/data/transactions/logic/fields_string.go @@ -1,4 +1,4 @@ -// Code generated by "stringer -type=TxnField,GlobalField,AssetParamsField,AppParamsField,AcctParamsField,AssetHoldingField,OnCompletionConstType,EcdsaCurve,EcGroup,Base64Encoding,JSONRefType,VoterParamsField,VrfStandard,BlockField -output=fields_string.go"; DO NOT EDIT. +// Code generated by "stringer -type=TxnField,GlobalField,AssetParamsField,AppParamsField,AcctParamsField,AssetHoldingField,OnCompletionConstType,EcdsaCurve,EcGroup,MimcConfig,Base64Encoding,JSONRefType,VoterParamsField,VrfStandard,BlockField -output=fields_string.go"; DO NOT EDIT. package logic @@ -297,6 +297,25 @@ func (i EcGroup) String() string { } return _EcGroup_name[_EcGroup_index[i]:_EcGroup_index[i+1]] } +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[BN254Mp110-0] + _ = x[BLS12_381Mp111-1] + _ = x[invalidMimcConfig-2] +} + +const _MimcConfig_name = "BN254Mp110BLS12_381Mp111invalidMimcConfig" + +var _MimcConfig_index = [...]uint8{0, 10, 24, 41} + +func (i MimcConfig) String() string { + if i < 0 || i >= MimcConfig(len(_MimcConfig_index)-1) { + return "MimcConfig(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _MimcConfig_name[_MimcConfig_index[i]:_MimcConfig_index[i+1]] +} func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. diff --git a/data/transactions/logic/langspec_v11.json b/data/transactions/logic/langspec_v11.json new file mode 100644 index 0000000000..cb054ebcd8 --- /dev/null +++ b/data/transactions/logic/langspec_v11.json @@ -0,0 +1,4949 @@ +{ + "Version": 11, + "LogicSigVersion": 10, + "NamedTypes": [ + { + "Name": "[]byte", + "Abbreviation": "b", + "Bound": [ + 0, + 4096 + ], + "AVMType": "[]byte" + }, + { + "Name": "address", + "Abbreviation": "A", + "Bound": [ + 32, + 32 + ], + "AVMType": "[]byte" + }, + { + "Name": "any", + "Abbreviation": "a", + "Bound": [ + 0, + 0 + ], + "AVMType": "any" + }, + { + "Name": "bigint", + "Abbreviation": "I", + "Bound": [ + 0, + 64 + ], + "AVMType": "[]byte" + }, + { + "Name": "bool", + "Abbreviation": "T", + "Bound": [ + 0, + 1 + ], + "AVMType": "uint64" + }, + { + "Name": "boxName", + "Abbreviation": "N", + "Bound": [ + 1, + 64 + ], + "AVMType": "[]byte" + }, + { + "Name": "method", + "Abbreviation": "M", + "Bound": [ + 4, + 4 + ], + "AVMType": "[]byte" + }, + { + "Name": "none", + "Abbreviation": "x", + "Bound": [ + 0, + 0 + ], + "AVMType": "none" + }, + { + "Name": "stateKey", + "Abbreviation": "K", + "Bound": [ + 0, + 64 + ], + "AVMType": "[]byte" + }, + { + "Name": "uint64", + "Abbreviation": "i", + "Bound": [ + 0, + 18446744073709551615 + ], + "AVMType": "uint64" + } + ], + "Ops": [ + { + "Opcode": 0, + "Name": "err", + "Size": 1, + "DocCost": "1", + "Doc": "Fail immediately.", + "IntroducedVersion": 1, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 1, + "Name": "sha256", + "Args": [ + "[]byte" + ], + "Returns": [ + "[32]byte" + ], + "Size": 1, + "DocCost": "35", + "Doc": "SHA256 hash of value A, yields [32]byte", + "IntroducedVersion": 1, + "Groups": [ + "Cryptography" + ] + }, + { + "Opcode": 2, + "Name": "keccak256", + "Args": [ + "[]byte" + ], + "Returns": [ + "[32]byte" + ], + "Size": 1, + "DocCost": "130", + "Doc": "Keccak256 hash of value A, yields [32]byte", + "IntroducedVersion": 1, + "Groups": [ + "Cryptography" + ] + }, + { + "Opcode": 3, + "Name": "sha512_256", + "Args": [ + "[]byte" + ], + "Returns": [ + "[32]byte" + ], + "Size": 1, + "DocCost": "45", + "Doc": "SHA512_256 hash of value A, yields [32]byte", + "IntroducedVersion": 1, + "Groups": [ + "Cryptography" + ] + }, + { + "Opcode": 4, + "Name": "ed25519verify", + "Args": [ + "[]byte", + "[64]byte", + "[32]byte" + ], + "Returns": [ + "bool" + ], + "Size": 1, + "DocCost": "1900", + "Doc": "for (data A, signature B, pubkey C) verify the signature of (\"ProgData\" || program_hash || data) against the pubkey =\u003e {0 or 1}", + "DocExtra": "The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack.", + "IntroducedVersion": 1, + "Groups": [ + "Cryptography" + ] + }, + { + "Opcode": 5, + "Name": "ecdsa_verify", + "Args": [ + "[32]byte", + "[32]byte", + "[32]byte", + "[32]byte", + "[32]byte" + ], + "Returns": [ + "bool" + ], + "Size": 2, + "ArgEnum": [ + "Secp256k1", + "Secp256r1" + ], + "DocCost": "Secp256k1=1700; Secp256r1=2500", + "Doc": "for (data A, signature B, C and pubkey D, E) verify the signature of the data against the pubkey =\u003e {0 or 1}", + "DocExtra": "The 32 byte Y-component of a public key is the last element on the stack, preceded by X-component of a pubkey, preceded by S and R components of a signature, preceded by the data that is fifth element on the stack. All values are big-endian encoded. The signed data must be 32 bytes long, and signatures in lower-S form are only accepted.", + "ImmediateNote": [ + { + "Comment": "curve index", + "Encoding": "uint8", + "Name": "V", + "Reference": "ECDSA" + } + ], + "IntroducedVersion": 5, + "Groups": [ + "Cryptography" + ] + }, + { + "Opcode": 6, + "Name": "ecdsa_pk_decompress", + "Args": [ + "[33]byte" + ], + "Returns": [ + "[32]byte", + "[32]byte" + ], + "Size": 2, + "ArgEnum": [ + "Secp256k1", + "Secp256r1" + ], + "DocCost": "Secp256k1=650; Secp256r1=2400", + "Doc": "decompress pubkey A into components X, Y", + "DocExtra": "The 33 byte public key in a compressed form to be decompressed into X and Y (top) components. All values are big-endian encoded.", + "ImmediateNote": [ + { + "Comment": "curve index", + "Encoding": "uint8", + "Name": "V", + "Reference": "ECDSA" + } + ], + "IntroducedVersion": 5, + "Groups": [ + "Cryptography" + ] + }, + { + "Opcode": 7, + "Name": "ecdsa_pk_recover", + "Args": [ + "[32]byte", + "uint64", + "[32]byte", + "[32]byte" + ], + "Returns": [ + "[32]byte", + "[32]byte" + ], + "Size": 2, + "ArgEnum": [ + "Secp256k1", + "Secp256r1" + ], + "DocCost": "2000", + "Doc": "for (data A, recovery id B, signature C, D) recover a public key", + "DocExtra": "S (top) and R elements of a signature, recovery id and data (bottom) are expected on the stack and used to deriver a public key. All values are big-endian encoded. The signed data must be 32 bytes long.", + "ImmediateNote": [ + { + "Comment": "curve index", + "Encoding": "uint8", + "Name": "V", + "Reference": "ECDSA" + } + ], + "IntroducedVersion": 5, + "Groups": [ + "Cryptography" + ] + }, + { + "Opcode": 8, + "Name": "+", + "Args": [ + "uint64", + "uint64" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A plus B. Fail on overflow.", + "DocExtra": "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `addw`.", + "IntroducedVersion": 1, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 9, + "Name": "-", + "Args": [ + "uint64", + "uint64" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A minus B. Fail if B \u003e A.", + "IntroducedVersion": 1, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 10, + "Name": "/", + "Args": [ + "uint64", + "uint64" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A divided by B (truncated division). Fail if B == 0.", + "DocExtra": "`divmodw` is available to divide the two-element values produced by `mulw` and `addw`.", + "IntroducedVersion": 1, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 11, + "Name": "*", + "Args": [ + "uint64", + "uint64" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A times B. Fail on overflow.", + "DocExtra": "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `mulw`.", + "IntroducedVersion": 1, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 12, + "Name": "\u003c", + "Args": [ + "uint64", + "uint64" + ], + "Returns": [ + "bool" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A less than B =\u003e {0 or 1}", + "IntroducedVersion": 1, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 13, + "Name": "\u003e", + "Args": [ + "uint64", + "uint64" + ], + "Returns": [ + "bool" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A greater than B =\u003e {0 or 1}", + "IntroducedVersion": 1, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 14, + "Name": "\u003c=", + "Args": [ + "uint64", + "uint64" + ], + "Returns": [ + "bool" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A less than or equal to B =\u003e {0 or 1}", + "IntroducedVersion": 1, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 15, + "Name": "\u003e=", + "Args": [ + "uint64", + "uint64" + ], + "Returns": [ + "bool" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A greater than or equal to B =\u003e {0 or 1}", + "IntroducedVersion": 1, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 16, + "Name": "\u0026\u0026", + "Args": [ + "uint64", + "uint64" + ], + "Returns": [ + "bool" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A is not zero and B is not zero =\u003e {0 or 1}", + "IntroducedVersion": 1, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 17, + "Name": "||", + "Args": [ + "uint64", + "uint64" + ], + "Returns": [ + "bool" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A is not zero or B is not zero =\u003e {0 or 1}", + "IntroducedVersion": 1, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 18, + "Name": "==", + "Args": [ + "any", + "any" + ], + "Returns": [ + "bool" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A is equal to B =\u003e {0 or 1}", + "IntroducedVersion": 1, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 19, + "Name": "!=", + "Args": [ + "any", + "any" + ], + "Returns": [ + "bool" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A is not equal to B =\u003e {0 or 1}", + "IntroducedVersion": 1, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 20, + "Name": "!", + "Args": [ + "uint64" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A == 0 yields 1; else 0", + "IntroducedVersion": 1, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 21, + "Name": "len", + "Args": [ + "[]byte" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "yields length of byte value A", + "IntroducedVersion": 1, + "Groups": [ + "Byte Array Manipulation" + ] + }, + { + "Opcode": 22, + "Name": "itob", + "Args": [ + "uint64" + ], + "Returns": [ + "[8]byte" + ], + "Size": 1, + "DocCost": "1", + "Doc": "converts uint64 A to big-endian byte array, always of length 8", + "IntroducedVersion": 1, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 23, + "Name": "btoi", + "Args": [ + "[]byte" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "converts big-endian byte array A to uint64. Fails if len(A) \u003e 8. Padded by leading 0s if len(A) \u003c 8.", + "DocExtra": "`btoi` fails if the input is longer than 8 bytes.", + "IntroducedVersion": 1, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 24, + "Name": "%", + "Args": [ + "uint64", + "uint64" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A modulo B. Fail if B == 0.", + "IntroducedVersion": 1, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 25, + "Name": "|", + "Args": [ + "uint64", + "uint64" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A bitwise-or B", + "IntroducedVersion": 1, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 26, + "Name": "\u0026", + "Args": [ + "uint64", + "uint64" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A bitwise-and B", + "IntroducedVersion": 1, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 27, + "Name": "^", + "Args": [ + "uint64", + "uint64" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A bitwise-xor B", + "IntroducedVersion": 1, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 28, + "Name": "~", + "Args": [ + "uint64" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "bitwise invert value A", + "IntroducedVersion": 1, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 29, + "Name": "mulw", + "Args": [ + "uint64", + "uint64" + ], + "Returns": [ + "uint64", + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A times B as a 128-bit result in two uint64s. X is the high 64 bits, Y is the low", + "IntroducedVersion": 1, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 30, + "Name": "addw", + "Args": [ + "uint64", + "uint64" + ], + "Returns": [ + "uint64", + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A plus B as a 128-bit result. X is the carry-bit, Y is the low-order 64 bits.", + "IntroducedVersion": 2, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 31, + "Name": "divmodw", + "Args": [ + "uint64", + "uint64", + "uint64", + "uint64" + ], + "Returns": [ + "uint64", + "uint64", + "uint64", + "uint64" + ], + "Size": 1, + "DocCost": "20", + "Doc": "W,X = (A,B / C,D); Y,Z = (A,B modulo C,D)", + "DocExtra": "The notation J,K indicates that two uint64 values J and K are interpreted as a uint128 value, with J as the high uint64 and K the low.", + "IntroducedVersion": 4, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 32, + "Name": "intcblock", + "Size": 0, + "DocCost": "1", + "Doc": "prepare block of uint64 constants for use by intc", + "DocExtra": "`intcblock` loads following program bytes into an array of integer constants in the evaluator. These integer constants can be referred to by `intc` and `intc_*` which will push the value onto the stack. Subsequent calls to `intcblock` reset and replace the integer constants available to the script.", + "ImmediateNote": [ + { + "Comment": "a block of int constant values", + "Encoding": "varuint count, [varuint ...]", + "Name": "UINT ..." + } + ], + "IntroducedVersion": 1, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 33, + "Name": "intc", + "Returns": [ + "uint64" + ], + "Size": 2, + "DocCost": "1", + "Doc": "Ith constant from intcblock", + "ImmediateNote": [ + { + "Comment": "an index in the intcblock", + "Encoding": "uint8", + "Name": "I" + } + ], + "IntroducedVersion": 1, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 34, + "Name": "intc_0", + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "constant 0 from intcblock", + "IntroducedVersion": 1, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 35, + "Name": "intc_1", + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "constant 1 from intcblock", + "IntroducedVersion": 1, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 36, + "Name": "intc_2", + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "constant 2 from intcblock", + "IntroducedVersion": 1, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 37, + "Name": "intc_3", + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "constant 3 from intcblock", + "IntroducedVersion": 1, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 38, + "Name": "bytecblock", + "Size": 0, + "DocCost": "1", + "Doc": "prepare block of byte-array constants for use by bytec", + "DocExtra": "`bytecblock` loads the following program bytes into an array of byte-array constants in the evaluator. These constants can be referred to by `bytec` and `bytec_*` which will push the value onto the stack. Subsequent calls to `bytecblock` reset and replace the bytes constants available to the script.", + "ImmediateNote": [ + { + "Comment": "a block of byte constant values", + "Encoding": "varuint count, [varuint length, bytes ...]", + "Name": "BYTES ..." + } + ], + "IntroducedVersion": 1, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 39, + "Name": "bytec", + "Returns": [ + "[]byte" + ], + "Size": 2, + "DocCost": "1", + "Doc": "Ith constant from bytecblock", + "ImmediateNote": [ + { + "Comment": "an index in the bytecblock", + "Encoding": "uint8", + "Name": "I" + } + ], + "IntroducedVersion": 1, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 40, + "Name": "bytec_0", + "Returns": [ + "[]byte" + ], + "Size": 1, + "DocCost": "1", + "Doc": "constant 0 from bytecblock", + "IntroducedVersion": 1, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 41, + "Name": "bytec_1", + "Returns": [ + "[]byte" + ], + "Size": 1, + "DocCost": "1", + "Doc": "constant 1 from bytecblock", + "IntroducedVersion": 1, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 42, + "Name": "bytec_2", + "Returns": [ + "[]byte" + ], + "Size": 1, + "DocCost": "1", + "Doc": "constant 2 from bytecblock", + "IntroducedVersion": 1, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 43, + "Name": "bytec_3", + "Returns": [ + "[]byte" + ], + "Size": 1, + "DocCost": "1", + "Doc": "constant 3 from bytecblock", + "IntroducedVersion": 1, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 44, + "Name": "arg", + "Returns": [ + "[]byte" + ], + "Size": 2, + "DocCost": "1", + "Doc": "Nth LogicSig argument", + "ImmediateNote": [ + { + "Comment": "an arg index", + "Encoding": "uint8", + "Name": "N" + } + ], + "IntroducedVersion": 1, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 45, + "Name": "arg_0", + "Returns": [ + "[]byte" + ], + "Size": 1, + "DocCost": "1", + "Doc": "LogicSig argument 0", + "IntroducedVersion": 1, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 46, + "Name": "arg_1", + "Returns": [ + "[]byte" + ], + "Size": 1, + "DocCost": "1", + "Doc": "LogicSig argument 1", + "IntroducedVersion": 1, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 47, + "Name": "arg_2", + "Returns": [ + "[]byte" + ], + "Size": 1, + "DocCost": "1", + "Doc": "LogicSig argument 2", + "IntroducedVersion": 1, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 48, + "Name": "arg_3", + "Returns": [ + "[]byte" + ], + "Size": 1, + "DocCost": "1", + "Doc": "LogicSig argument 3", + "IntroducedVersion": 1, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 49, + "Name": "txn", + "Returns": [ + "any" + ], + "Size": 2, + "ArgEnum": [ + "Sender", + "Fee", + "FirstValid", + "FirstValidTime", + "LastValid", + "Note", + "Lease", + "Receiver", + "Amount", + "CloseRemainderTo", + "VotePK", + "SelectionPK", + "VoteFirst", + "VoteLast", + "VoteKeyDilution", + "Type", + "TypeEnum", + "XferAsset", + "AssetAmount", + "AssetSender", + "AssetReceiver", + "AssetCloseTo", + "GroupIndex", + "TxID", + "ApplicationID", + "OnCompletion", + "ApplicationArgs", + "NumAppArgs", + "Accounts", + "NumAccounts", + "ApprovalProgram", + "ClearStateProgram", + "RekeyTo", + "ConfigAsset", + "ConfigAssetTotal", + "ConfigAssetDecimals", + "ConfigAssetDefaultFrozen", + "ConfigAssetUnitName", + "ConfigAssetName", + "ConfigAssetURL", + "ConfigAssetMetadataHash", + "ConfigAssetManager", + "ConfigAssetReserve", + "ConfigAssetFreeze", + "ConfigAssetClawback", + "FreezeAsset", + "FreezeAssetAccount", + "FreezeAssetFrozen", + "Assets", + "NumAssets", + "Applications", + "NumApplications", + "GlobalNumUint", + "GlobalNumByteSlice", + "LocalNumUint", + "LocalNumByteSlice", + "ExtraProgramPages", + "Nonparticipation", + "Logs", + "NumLogs", + "CreatedAssetID", + "CreatedApplicationID", + "LastLog", + "StateProofPK", + "ApprovalProgramPages", + "NumApprovalProgramPages", + "ClearStateProgramPages", + "NumClearStateProgramPages" + ], + "ArgEnumTypes": [ + "address", + "uint64", + "uint64", + "uint64", + "uint64", + "[]byte", + "[32]byte", + "address", + "uint64", + "address", + "[32]byte", + "[32]byte", + "uint64", + "uint64", + "uint64", + "[]byte", + "uint64", + "uint64", + "uint64", + "address", + "address", + "address", + "uint64", + "[32]byte", + "uint64", + "uint64", + "[]byte", + "uint64", + "address", + "uint64", + "[]byte", + "[]byte", + "address", + "uint64", + "uint64", + "uint64", + "bool", + "[]byte", + "[]byte", + "[]byte", + "[32]byte", + "address", + "address", + "address", + "address", + "uint64", + "address", + "bool", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "bool", + "[]byte", + "uint64", + "uint64", + "uint64", + "[]byte", + "[]byte", + "[]byte", + "uint64", + "[]byte", + "uint64" + ], + "DocCost": "1", + "Doc": "field F of current transaction", + "ImmediateNote": [ + { + "Comment": "transaction field index", + "Encoding": "uint8", + "Name": "F", + "Reference": "txn" + } + ], + "IntroducedVersion": 1, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 50, + "Name": "global", + "Returns": [ + "any" + ], + "Size": 2, + "ArgEnum": [ + "MinTxnFee", + "MinBalance", + "MaxTxnLife", + "ZeroAddress", + "GroupSize", + "LogicSigVersion", + "Round", + "LatestTimestamp", + "CurrentApplicationID", + "CreatorAddress", + "CurrentApplicationAddress", + "GroupID", + "OpcodeBudget", + "CallerApplicationID", + "CallerApplicationAddress", + "AssetCreateMinBalance", + "AssetOptInMinBalance", + "GenesisHash", + "PayoutsEnabled", + "PayoutsGoOnlineFee", + "PayoutsPercent", + "PayoutsMinBalance", + "PayoutsMaxBalance" + ], + "ArgEnumTypes": [ + "uint64", + "uint64", + "uint64", + "address", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "address", + "address", + "[32]byte", + "uint64", + "uint64", + "address", + "uint64", + "uint64", + "[32]byte", + "bool", + "uint64", + "uint64", + "uint64", + "uint64" + ], + "DocCost": "1", + "Doc": "global field F", + "ImmediateNote": [ + { + "Comment": "a global field index", + "Encoding": "uint8", + "Name": "F", + "Reference": "global" + } + ], + "IntroducedVersion": 1, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 51, + "Name": "gtxn", + "Returns": [ + "any" + ], + "Size": 3, + "ArgEnum": [ + "Sender", + "Fee", + "FirstValid", + "FirstValidTime", + "LastValid", + "Note", + "Lease", + "Receiver", + "Amount", + "CloseRemainderTo", + "VotePK", + "SelectionPK", + "VoteFirst", + "VoteLast", + "VoteKeyDilution", + "Type", + "TypeEnum", + "XferAsset", + "AssetAmount", + "AssetSender", + "AssetReceiver", + "AssetCloseTo", + "GroupIndex", + "TxID", + "ApplicationID", + "OnCompletion", + "ApplicationArgs", + "NumAppArgs", + "Accounts", + "NumAccounts", + "ApprovalProgram", + "ClearStateProgram", + "RekeyTo", + "ConfigAsset", + "ConfigAssetTotal", + "ConfigAssetDecimals", + "ConfigAssetDefaultFrozen", + "ConfigAssetUnitName", + "ConfigAssetName", + "ConfigAssetURL", + "ConfigAssetMetadataHash", + "ConfigAssetManager", + "ConfigAssetReserve", + "ConfigAssetFreeze", + "ConfigAssetClawback", + "FreezeAsset", + "FreezeAssetAccount", + "FreezeAssetFrozen", + "Assets", + "NumAssets", + "Applications", + "NumApplications", + "GlobalNumUint", + "GlobalNumByteSlice", + "LocalNumUint", + "LocalNumByteSlice", + "ExtraProgramPages", + "Nonparticipation", + "Logs", + "NumLogs", + "CreatedAssetID", + "CreatedApplicationID", + "LastLog", + "StateProofPK", + "ApprovalProgramPages", + "NumApprovalProgramPages", + "ClearStateProgramPages", + "NumClearStateProgramPages" + ], + "ArgEnumTypes": [ + "address", + "uint64", + "uint64", + "uint64", + "uint64", + "[]byte", + "[32]byte", + "address", + "uint64", + "address", + "[32]byte", + "[32]byte", + "uint64", + "uint64", + "uint64", + "[]byte", + "uint64", + "uint64", + "uint64", + "address", + "address", + "address", + "uint64", + "[32]byte", + "uint64", + "uint64", + "[]byte", + "uint64", + "address", + "uint64", + "[]byte", + "[]byte", + "address", + "uint64", + "uint64", + "uint64", + "bool", + "[]byte", + "[]byte", + "[]byte", + "[32]byte", + "address", + "address", + "address", + "address", + "uint64", + "address", + "bool", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "bool", + "[]byte", + "uint64", + "uint64", + "uint64", + "[]byte", + "[]byte", + "[]byte", + "uint64", + "[]byte", + "uint64" + ], + "DocCost": "1", + "Doc": "field F of the Tth transaction in the current group", + "DocExtra": "for notes on transaction fields available, see `txn`. If this transaction is _i_ in the group, `gtxn i field` is equivalent to `txn field`.", + "ImmediateNote": [ + { + "Comment": "transaction group index", + "Encoding": "uint8", + "Name": "T" + }, + { + "Comment": "transaction field index", + "Encoding": "uint8", + "Name": "F", + "Reference": "txn" + } + ], + "IntroducedVersion": 1, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 52, + "Name": "load", + "Returns": [ + "any" + ], + "Size": 2, + "DocCost": "1", + "Doc": "Ith scratch space value. All scratch spaces are 0 at program start.", + "ImmediateNote": [ + { + "Comment": "position in scratch space to load from", + "Encoding": "uint8", + "Name": "I" + } + ], + "IntroducedVersion": 1, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 53, + "Name": "store", + "Args": [ + "any" + ], + "Size": 2, + "DocCost": "1", + "Doc": "store A to the Ith scratch space", + "ImmediateNote": [ + { + "Comment": "position in scratch space to store to", + "Encoding": "uint8", + "Name": "I" + } + ], + "IntroducedVersion": 1, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 54, + "Name": "txna", + "Returns": [ + "any" + ], + "Size": 3, + "ArgEnum": [ + "ApplicationArgs", + "Accounts", + "Assets", + "Applications", + "Logs", + "ApprovalProgramPages", + "ClearStateProgramPages" + ], + "ArgEnumTypes": [ + "[]byte", + "address", + "uint64", + "uint64", + "[]byte", + "[]byte", + "[]byte" + ], + "DocCost": "1", + "Doc": "Ith value of the array field F of the current transaction\n`txna` can be called using `txn` with 2 immediates.", + "ImmediateNote": [ + { + "Comment": "transaction field index", + "Encoding": "uint8", + "Name": "F", + "Reference": "txna" + }, + { + "Comment": "transaction field array index", + "Encoding": "uint8", + "Name": "I" + } + ], + "IntroducedVersion": 2, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 55, + "Name": "gtxna", + "Returns": [ + "any" + ], + "Size": 4, + "ArgEnum": [ + "ApplicationArgs", + "Accounts", + "Assets", + "Applications", + "Logs", + "ApprovalProgramPages", + "ClearStateProgramPages" + ], + "ArgEnumTypes": [ + "[]byte", + "address", + "uint64", + "uint64", + "[]byte", + "[]byte", + "[]byte" + ], + "DocCost": "1", + "Doc": "Ith value of the array field F from the Tth transaction in the current group\n`gtxna` can be called using `gtxn` with 3 immediates.", + "ImmediateNote": [ + { + "Comment": "transaction group index", + "Encoding": "uint8", + "Name": "T" + }, + { + "Comment": "transaction field index", + "Encoding": "uint8", + "Name": "F", + "Reference": "txna" + }, + { + "Comment": "transaction field array index", + "Encoding": "uint8", + "Name": "I" + } + ], + "IntroducedVersion": 2, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 56, + "Name": "gtxns", + "Args": [ + "uint64" + ], + "Returns": [ + "any" + ], + "Size": 2, + "ArgEnum": [ + "Sender", + "Fee", + "FirstValid", + "FirstValidTime", + "LastValid", + "Note", + "Lease", + "Receiver", + "Amount", + "CloseRemainderTo", + "VotePK", + "SelectionPK", + "VoteFirst", + "VoteLast", + "VoteKeyDilution", + "Type", + "TypeEnum", + "XferAsset", + "AssetAmount", + "AssetSender", + "AssetReceiver", + "AssetCloseTo", + "GroupIndex", + "TxID", + "ApplicationID", + "OnCompletion", + "ApplicationArgs", + "NumAppArgs", + "Accounts", + "NumAccounts", + "ApprovalProgram", + "ClearStateProgram", + "RekeyTo", + "ConfigAsset", + "ConfigAssetTotal", + "ConfigAssetDecimals", + "ConfigAssetDefaultFrozen", + "ConfigAssetUnitName", + "ConfigAssetName", + "ConfigAssetURL", + "ConfigAssetMetadataHash", + "ConfigAssetManager", + "ConfigAssetReserve", + "ConfigAssetFreeze", + "ConfigAssetClawback", + "FreezeAsset", + "FreezeAssetAccount", + "FreezeAssetFrozen", + "Assets", + "NumAssets", + "Applications", + "NumApplications", + "GlobalNumUint", + "GlobalNumByteSlice", + "LocalNumUint", + "LocalNumByteSlice", + "ExtraProgramPages", + "Nonparticipation", + "Logs", + "NumLogs", + "CreatedAssetID", + "CreatedApplicationID", + "LastLog", + "StateProofPK", + "ApprovalProgramPages", + "NumApprovalProgramPages", + "ClearStateProgramPages", + "NumClearStateProgramPages" + ], + "ArgEnumTypes": [ + "address", + "uint64", + "uint64", + "uint64", + "uint64", + "[]byte", + "[32]byte", + "address", + "uint64", + "address", + "[32]byte", + "[32]byte", + "uint64", + "uint64", + "uint64", + "[]byte", + "uint64", + "uint64", + "uint64", + "address", + "address", + "address", + "uint64", + "[32]byte", + "uint64", + "uint64", + "[]byte", + "uint64", + "address", + "uint64", + "[]byte", + "[]byte", + "address", + "uint64", + "uint64", + "uint64", + "bool", + "[]byte", + "[]byte", + "[]byte", + "[32]byte", + "address", + "address", + "address", + "address", + "uint64", + "address", + "bool", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "bool", + "[]byte", + "uint64", + "uint64", + "uint64", + "[]byte", + "[]byte", + "[]byte", + "uint64", + "[]byte", + "uint64" + ], + "DocCost": "1", + "Doc": "field F of the Ath transaction in the current group", + "DocExtra": "for notes on transaction fields available, see `txn`. If top of stack is _i_, `gtxns field` is equivalent to `gtxn _i_ field`. gtxns exists so that _i_ can be calculated, often based on the index of the current transaction.", + "ImmediateNote": [ + { + "Comment": "transaction field index", + "Encoding": "uint8", + "Name": "F", + "Reference": "txn" + } + ], + "IntroducedVersion": 3, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 57, + "Name": "gtxnsa", + "Args": [ + "uint64" + ], + "Returns": [ + "any" + ], + "Size": 3, + "ArgEnum": [ + "ApplicationArgs", + "Accounts", + "Assets", + "Applications", + "Logs", + "ApprovalProgramPages", + "ClearStateProgramPages" + ], + "ArgEnumTypes": [ + "[]byte", + "address", + "uint64", + "uint64", + "[]byte", + "[]byte", + "[]byte" + ], + "DocCost": "1", + "Doc": "Ith value of the array field F from the Ath transaction in the current group\n`gtxnsa` can be called using `gtxns` with 2 immediates.", + "ImmediateNote": [ + { + "Comment": "transaction field index", + "Encoding": "uint8", + "Name": "F", + "Reference": "txna" + }, + { + "Comment": "transaction field array index", + "Encoding": "uint8", + "Name": "I" + } + ], + "IntroducedVersion": 3, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 58, + "Name": "gload", + "Returns": [ + "any" + ], + "Size": 3, + "DocCost": "1", + "Doc": "Ith scratch space value of the Tth transaction in the current group", + "DocExtra": "`gload` fails unless the requested transaction is an ApplicationCall and T \u003c GroupIndex.", + "ImmediateNote": [ + { + "Comment": "transaction group index", + "Encoding": "uint8", + "Name": "T" + }, + { + "Comment": "position in scratch space to load from", + "Encoding": "uint8", + "Name": "I" + } + ], + "IntroducedVersion": 4, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 59, + "Name": "gloads", + "Args": [ + "uint64" + ], + "Returns": [ + "any" + ], + "Size": 2, + "DocCost": "1", + "Doc": "Ith scratch space value of the Ath transaction in the current group", + "DocExtra": "`gloads` fails unless the requested transaction is an ApplicationCall and A \u003c GroupIndex.", + "ImmediateNote": [ + { + "Comment": "position in scratch space to load from", + "Encoding": "uint8", + "Name": "I" + } + ], + "IntroducedVersion": 4, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 60, + "Name": "gaid", + "Returns": [ + "uint64" + ], + "Size": 2, + "DocCost": "1", + "Doc": "ID of the asset or application created in the Tth transaction of the current group", + "DocExtra": "`gaid` fails unless the requested transaction created an asset or application and T \u003c GroupIndex.", + "ImmediateNote": [ + { + "Comment": "transaction group index", + "Encoding": "uint8", + "Name": "T" + } + ], + "IntroducedVersion": 4, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 61, + "Name": "gaids", + "Args": [ + "uint64" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "ID of the asset or application created in the Ath transaction of the current group", + "DocExtra": "`gaids` fails unless the requested transaction created an asset or application and A \u003c GroupIndex.", + "IntroducedVersion": 4, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 62, + "Name": "loads", + "Args": [ + "uint64" + ], + "Returns": [ + "any" + ], + "Size": 1, + "DocCost": "1", + "Doc": "Ath scratch space value. All scratch spaces are 0 at program start.", + "IntroducedVersion": 5, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 63, + "Name": "stores", + "Args": [ + "uint64", + "any" + ], + "Size": 1, + "DocCost": "1", + "Doc": "store B to the Ath scratch space", + "IntroducedVersion": 5, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 64, + "Name": "bnz", + "Args": [ + "uint64" + ], + "Size": 3, + "DocCost": "1", + "Doc": "branch to TARGET if value A is not zero", + "DocExtra": "The `bnz` instruction opcode 0x40 is followed by two immediate data bytes which are a high byte first and low byte second which together form a 16 bit offset which the instruction may branch to. For a bnz instruction at `pc`, if the last element of the stack is not zero then branch to instruction at `pc + 3 + N`, else proceed to next instruction at `pc + 3`. Branch targets must be aligned instructions. (e.g. Branching to the second byte of a 2 byte op will be rejected.) Starting at v4, the offset is treated as a signed 16 bit integer allowing for backward branches and looping. In prior version (v1 to v3), branch offsets are limited to forward branches only, 0-0x7fff.\n\nAt v2 it became allowed to branch to the end of the program exactly after the last instruction: bnz to byte N (with 0-indexing) was illegal for a TEAL program with N bytes before v2, and is legal after it. This change eliminates the need for a last instruction of no-op as a branch target at the end. (Branching beyond the end--in other words, to a byte larger than N--is still illegal and will cause the program to fail.)", + "ImmediateNote": [ + { + "Comment": "branch offset", + "Encoding": "int16 (big-endian)", + "Name": "TARGET" + } + ], + "IntroducedVersion": 1, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 65, + "Name": "bz", + "Args": [ + "uint64" + ], + "Size": 3, + "DocCost": "1", + "Doc": "branch to TARGET if value A is zero", + "DocExtra": "See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`.", + "ImmediateNote": [ + { + "Comment": "branch offset", + "Encoding": "int16 (big-endian)", + "Name": "TARGET" + } + ], + "IntroducedVersion": 2, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 66, + "Name": "b", + "Size": 3, + "DocCost": "1", + "Doc": "branch unconditionally to TARGET", + "DocExtra": "See `bnz` for details on how branches work. `b` always jumps to the offset.", + "ImmediateNote": [ + { + "Comment": "branch offset", + "Encoding": "int16 (big-endian)", + "Name": "TARGET" + } + ], + "IntroducedVersion": 2, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 67, + "Name": "return", + "Args": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "use A as success value; end", + "IntroducedVersion": 2, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 68, + "Name": "assert", + "Args": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "immediately fail unless A is a non-zero number", + "IntroducedVersion": 3, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 69, + "Name": "bury", + "Args": [ + "any" + ], + "Size": 2, + "DocCost": "1", + "Doc": "replace the Nth value from the top of the stack with A. bury 0 fails.", + "ImmediateNote": [ + { + "Comment": "depth", + "Encoding": "uint8", + "Name": "N" + } + ], + "IntroducedVersion": 8, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 70, + "Name": "popn", + "Size": 2, + "DocCost": "1", + "Doc": "remove N values from the top of the stack", + "ImmediateNote": [ + { + "Comment": "stack depth", + "Encoding": "uint8", + "Name": "N" + } + ], + "IntroducedVersion": 8, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 71, + "Name": "dupn", + "Args": [ + "any" + ], + "Size": 2, + "DocCost": "1", + "Doc": "duplicate A, N times", + "ImmediateNote": [ + { + "Comment": "copy count", + "Encoding": "uint8", + "Name": "N" + } + ], + "IntroducedVersion": 8, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 72, + "Name": "pop", + "Args": [ + "any" + ], + "Size": 1, + "DocCost": "1", + "Doc": "discard A", + "IntroducedVersion": 1, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 73, + "Name": "dup", + "Args": [ + "any" + ], + "Returns": [ + "any", + "any" + ], + "Size": 1, + "DocCost": "1", + "Doc": "duplicate A", + "IntroducedVersion": 1, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 74, + "Name": "dup2", + "Args": [ + "any", + "any" + ], + "Returns": [ + "any", + "any", + "any", + "any" + ], + "Size": 1, + "DocCost": "1", + "Doc": "duplicate A and B", + "IntroducedVersion": 2, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 75, + "Name": "dig", + "Args": [ + "any" + ], + "Returns": [ + "any", + "any" + ], + "Size": 2, + "DocCost": "1", + "Doc": "Nth value from the top of the stack. dig 0 is equivalent to dup", + "ImmediateNote": [ + { + "Comment": "depth", + "Encoding": "uint8", + "Name": "N" + } + ], + "IntroducedVersion": 3, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 76, + "Name": "swap", + "Args": [ + "any", + "any" + ], + "Returns": [ + "any", + "any" + ], + "Size": 1, + "DocCost": "1", + "Doc": "swaps A and B on stack", + "IntroducedVersion": 3, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 77, + "Name": "select", + "Args": [ + "any", + "any", + "uint64" + ], + "Returns": [ + "any" + ], + "Size": 1, + "DocCost": "1", + "Doc": "selects one of two values based on top-of-stack: B if C != 0, else A", + "IntroducedVersion": 3, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 78, + "Name": "cover", + "Args": [ + "any" + ], + "Returns": [ + "any" + ], + "Size": 2, + "DocCost": "1", + "Doc": "remove top of stack, and place it deeper in the stack such that N elements are above it. Fails if stack depth \u003c= N.", + "ImmediateNote": [ + { + "Comment": "depth", + "Encoding": "uint8", + "Name": "N" + } + ], + "IntroducedVersion": 5, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 79, + "Name": "uncover", + "Args": [ + "any" + ], + "Returns": [ + "any" + ], + "Size": 2, + "DocCost": "1", + "Doc": "remove the value at depth N in the stack and shift above items down so the Nth deep value is on top of the stack. Fails if stack depth \u003c= N.", + "ImmediateNote": [ + { + "Comment": "depth", + "Encoding": "uint8", + "Name": "N" + } + ], + "IntroducedVersion": 5, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 80, + "Name": "concat", + "Args": [ + "[]byte", + "[]byte" + ], + "Returns": [ + "[]byte" + ], + "Size": 1, + "DocCost": "1", + "Doc": "join A and B", + "DocExtra": "`concat` fails if the result would be greater than 4096 bytes.", + "IntroducedVersion": 2, + "Groups": [ + "Byte Array Manipulation" + ] + }, + { + "Opcode": 81, + "Name": "substring", + "Args": [ + "[]byte" + ], + "Returns": [ + "[]byte" + ], + "Size": 3, + "DocCost": "1", + "Doc": "A range of bytes from A starting at S up to but not including E. If E \u003c S, or either is larger than the array length, the program fails", + "ImmediateNote": [ + { + "Comment": "start position", + "Encoding": "uint8", + "Name": "S" + }, + { + "Comment": "end position", + "Encoding": "uint8", + "Name": "E" + } + ], + "IntroducedVersion": 2, + "Groups": [ + "Byte Array Manipulation" + ] + }, + { + "Opcode": 82, + "Name": "substring3", + "Args": [ + "[]byte", + "uint64", + "uint64" + ], + "Returns": [ + "[]byte" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A range of bytes from A starting at B up to but not including C. If C \u003c B, or either is larger than the array length, the program fails", + "IntroducedVersion": 2, + "Groups": [ + "Byte Array Manipulation" + ] + }, + { + "Opcode": 83, + "Name": "getbit", + "Args": [ + "any", + "uint64" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "Bth bit of (byte-array or integer) A. If B is greater than or equal to the bit length of the value (8*byte length), the program fails", + "DocExtra": "see explanation of bit ordering in setbit", + "IntroducedVersion": 3, + "Groups": [ + "Byte Array Manipulation" + ] + }, + { + "Opcode": 84, + "Name": "setbit", + "Args": [ + "any", + "uint64", + "uint64" + ], + "Returns": [ + "any" + ], + "Size": 1, + "DocCost": "1", + "Doc": "Copy of (byte-array or integer) A, with the Bth bit set to (0 or 1) C. If B is greater than or equal to the bit length of the value (8*byte length), the program fails", + "DocExtra": "When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on the integer 0 yields 8, or 2^3. When A is a byte array, index 0 is the leftmost bit of the leftmost byte. Setting bits 0 through 11 to 1 in a 4-byte-array of 0s yields the byte array 0xfff00000. Setting bit 3 to 1 on the 1-byte-array 0x00 yields the byte array 0x10.", + "IntroducedVersion": 3, + "Groups": [ + "Byte Array Manipulation" + ] + }, + { + "Opcode": 85, + "Name": "getbyte", + "Args": [ + "[]byte", + "uint64" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "Bth byte of A, as an integer. If B is greater than or equal to the array length, the program fails", + "IntroducedVersion": 3, + "Groups": [ + "Byte Array Manipulation" + ] + }, + { + "Opcode": 86, + "Name": "setbyte", + "Args": [ + "[]byte", + "uint64", + "uint64" + ], + "Returns": [ + "[]byte" + ], + "Size": 1, + "DocCost": "1", + "Doc": "Copy of A with the Bth byte set to small integer (between 0..255) C. If B is greater than or equal to the array length, the program fails", + "IntroducedVersion": 3, + "Groups": [ + "Byte Array Manipulation" + ] + }, + { + "Opcode": 87, + "Name": "extract", + "Args": [ + "[]byte" + ], + "Returns": [ + "[]byte" + ], + "Size": 3, + "DocCost": "1", + "Doc": "A range of bytes from A starting at S up to but not including S+L. If L is 0, then extract to the end of the string. If S or S+L is larger than the array length, the program fails", + "ImmediateNote": [ + { + "Comment": "start position", + "Encoding": "uint8", + "Name": "S" + }, + { + "Comment": "length", + "Encoding": "uint8", + "Name": "L" + } + ], + "IntroducedVersion": 5, + "Groups": [ + "Byte Array Manipulation" + ] + }, + { + "Opcode": 88, + "Name": "extract3", + "Args": [ + "[]byte", + "uint64", + "uint64" + ], + "Returns": [ + "[]byte" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A range of bytes from A starting at B up to but not including B+C. If B+C is larger than the array length, the program fails\n`extract3` can be called using `extract` with no immediates.", + "IntroducedVersion": 5, + "Groups": [ + "Byte Array Manipulation" + ] + }, + { + "Opcode": 89, + "Name": "extract_uint16", + "Args": [ + "[]byte", + "uint64" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A uint16 formed from a range of big-endian bytes from A starting at B up to but not including B+2. If B+2 is larger than the array length, the program fails", + "IntroducedVersion": 5, + "Groups": [ + "Byte Array Manipulation" + ] + }, + { + "Opcode": 90, + "Name": "extract_uint32", + "Args": [ + "[]byte", + "uint64" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A uint32 formed from a range of big-endian bytes from A starting at B up to but not including B+4. If B+4 is larger than the array length, the program fails", + "IntroducedVersion": 5, + "Groups": [ + "Byte Array Manipulation" + ] + }, + { + "Opcode": 91, + "Name": "extract_uint64", + "Args": [ + "[]byte", + "uint64" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A uint64 formed from a range of big-endian bytes from A starting at B up to but not including B+8. If B+8 is larger than the array length, the program fails", + "IntroducedVersion": 5, + "Groups": [ + "Byte Array Manipulation" + ] + }, + { + "Opcode": 92, + "Name": "replace2", + "Args": [ + "[]byte", + "[]byte" + ], + "Returns": [ + "[]byte" + ], + "Size": 2, + "DocCost": "1", + "Doc": "Copy of A with the bytes starting at S replaced by the bytes of B. Fails if S+len(B) exceeds len(A)\n`replace2` can be called using `replace` with 1 immediate.", + "ImmediateNote": [ + { + "Comment": "start position", + "Encoding": "uint8", + "Name": "S" + } + ], + "IntroducedVersion": 7, + "Groups": [ + "Byte Array Manipulation" + ] + }, + { + "Opcode": 93, + "Name": "replace3", + "Args": [ + "[]byte", + "uint64", + "[]byte" + ], + "Returns": [ + "[]byte" + ], + "Size": 1, + "DocCost": "1", + "Doc": "Copy of A with the bytes starting at B replaced by the bytes of C. Fails if B+len(C) exceeds len(A)\n`replace3` can be called using `replace` with no immediates.", + "IntroducedVersion": 7, + "Groups": [ + "Byte Array Manipulation" + ] + }, + { + "Opcode": 94, + "Name": "base64_decode", + "Args": [ + "[]byte" + ], + "Returns": [ + "[]byte" + ], + "Size": 2, + "ArgEnum": [ + "URLEncoding", + "StdEncoding" + ], + "ArgEnumTypes": [ + "any", + "any" + ], + "DocCost": "1 + 1 per 16 bytes of A", + "Doc": "decode A which was base64-encoded using _encoding_ E. Fail if A is not base64 encoded with encoding E", + "DocExtra": "*Warning*: Usage should be restricted to very rare use cases. In almost all cases, smart contracts should directly handle non-encoded byte-strings.\tThis opcode should only be used in cases where base64 is the only available option, e.g. interoperability with a third-party that only signs base64 strings.\n\n Decodes A using the base64 encoding E. Specify the encoding with an immediate arg either as URL and Filename Safe (`URLEncoding`) or Standard (`StdEncoding`). See [RFC 4648 sections 4 and 5](https://rfc-editor.org/rfc/rfc4648.html#section-4). It is assumed that the encoding ends with the exact number of `=` padding characters as required by the RFC. When padding occurs, any unused pad bits in the encoding must be set to zero or the decoding will fail. The special cases of `\\n` and `\\r` are allowed but completely ignored. An error will result when attempting to decode a string with a character that is not in the encoding alphabet or not one of `=`, `\\r`, or `\\n`.", + "ImmediateNote": [ + { + "Comment": "encoding index", + "Encoding": "uint8", + "Name": "E", + "Reference": "base64" + } + ], + "IntroducedVersion": 7, + "Groups": [ + "Byte Array Manipulation" + ] + }, + { + "Opcode": 95, + "Name": "json_ref", + "Args": [ + "[]byte", + "[]byte" + ], + "Returns": [ + "any" + ], + "Size": 2, + "ArgEnum": [ + "JSONString", + "JSONUint64", + "JSONObject" + ], + "ArgEnumTypes": [ + "[]byte", + "uint64", + "[]byte" + ], + "DocCost": "25 + 2 per 7 bytes of A", + "Doc": "key B's value, of type R, from a [valid](jsonspec.md) utf-8 encoded json object A", + "DocExtra": "*Warning*: Usage should be restricted to very rare use cases, as JSON decoding is expensive and quite limited. In addition, JSON objects are large and not optimized for size.\n\nAlmost all smart contracts should use simpler and smaller methods (such as the [ABI](https://arc.algorand.foundation/ARCs/arc-0004). This opcode should only be used in cases where JSON is only available option, e.g. when a third-party only signs JSON.", + "ImmediateNote": [ + { + "Comment": "return type index", + "Encoding": "uint8", + "Name": "R", + "Reference": "json_ref" + } + ], + "IntroducedVersion": 7, + "Groups": [ + "Byte Array Manipulation" + ] + }, + { + "Opcode": 96, + "Name": "balance", + "Args": [ + "any" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "balance for account A, in microalgos. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted. Changes caused by inner transactions are observable immediately following `itxn_submit`", + "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value.", + "IntroducedVersion": 2, + "Groups": [ + "State Access" + ] + }, + { + "Opcode": 97, + "Name": "app_opted_in", + "Args": [ + "any", + "uint64" + ], + "Returns": [ + "bool" + ], + "Size": 1, + "DocCost": "1", + "Doc": "1 if account A is opted in to application B, else 0", + "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: 1 if opted in and 0 otherwise.", + "IntroducedVersion": 2, + "Groups": [ + "State Access" + ] + }, + { + "Opcode": 98, + "Name": "app_local_get", + "Args": [ + "any", + "stateKey" + ], + "Returns": [ + "any" + ], + "Size": 1, + "DocCost": "1", + "Doc": "local state of the key B in the current application in account A", + "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), state key. Return: value. The value is zero (of type uint64) if the key does not exist.", + "IntroducedVersion": 2, + "Groups": [ + "State Access" + ] + }, + { + "Opcode": 99, + "Name": "app_local_get_ex", + "Args": [ + "any", + "uint64", + "stateKey" + ], + "Returns": [ + "any", + "bool" + ], + "Size": 1, + "DocCost": "1", + "Doc": "X is the local state of application B, key C in account A. Y is 1 if key existed, else 0", + "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset), state key. Return: did_exist flag (top of the stack, 1 if the application and key existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.", + "IntroducedVersion": 2, + "Groups": [ + "State Access" + ] + }, + { + "Opcode": 100, + "Name": "app_global_get", + "Args": [ + "stateKey" + ], + "Returns": [ + "any" + ], + "Size": 1, + "DocCost": "1", + "Doc": "global state of the key A in the current application", + "DocExtra": "params: state key. Return: value. The value is zero (of type uint64) if the key does not exist.", + "IntroducedVersion": 2, + "Groups": [ + "State Access" + ] + }, + { + "Opcode": 101, + "Name": "app_global_get_ex", + "Args": [ + "uint64", + "stateKey" + ], + "Returns": [ + "any", + "bool" + ], + "Size": 1, + "DocCost": "1", + "Doc": "X is the global state of application A, key B. Y is 1 if key existed, else 0", + "DocExtra": "params: Txn.ForeignApps offset (or, since v4, an _available_ application id), state key. Return: did_exist flag (top of the stack, 1 if the application and key existed and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.", + "IntroducedVersion": 2, + "Groups": [ + "State Access" + ] + }, + { + "Opcode": 102, + "Name": "app_local_put", + "Args": [ + "any", + "stateKey", + "any" + ], + "Size": 1, + "DocCost": "1", + "Doc": "write C to key B in account A's local state of the current application", + "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), state key, value.", + "IntroducedVersion": 2, + "Groups": [ + "State Access" + ] + }, + { + "Opcode": 103, + "Name": "app_global_put", + "Args": [ + "stateKey", + "any" + ], + "Size": 1, + "DocCost": "1", + "Doc": "write B to key A in the global state of the current application", + "IntroducedVersion": 2, + "Groups": [ + "State Access" + ] + }, + { + "Opcode": 104, + "Name": "app_local_del", + "Args": [ + "any", + "stateKey" + ], + "Size": 1, + "DocCost": "1", + "Doc": "delete key B from account A's local state of the current application", + "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), state key.\n\nDeleting a key which is already absent has no effect on the application local state. (In particular, it does _not_ cause the program to fail.)", + "IntroducedVersion": 2, + "Groups": [ + "State Access" + ] + }, + { + "Opcode": 105, + "Name": "app_global_del", + "Args": [ + "stateKey" + ], + "Size": 1, + "DocCost": "1", + "Doc": "delete key A from the global state of the current application", + "DocExtra": "params: state key.\n\nDeleting a key which is already absent has no effect on the application global state. (In particular, it does _not_ cause the program to fail.)", + "IntroducedVersion": 2, + "Groups": [ + "State Access" + ] + }, + { + "Opcode": 112, + "Name": "asset_holding_get", + "Args": [ + "any", + "uint64" + ], + "Returns": [ + "any", + "bool" + ], + "Size": 2, + "ArgEnum": [ + "AssetBalance", + "AssetFrozen" + ], + "ArgEnumTypes": [ + "uint64", + "bool" + ], + "DocCost": "1", + "Doc": "X is field F from account A's holding of asset B. Y is 1 if A is opted into B, else 0", + "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ address), asset id (or, since v4, a Txn.ForeignAssets offset). Return: did_exist flag (1 if the asset existed and 0 otherwise), value.", + "ImmediateNote": [ + { + "Comment": "asset holding field index", + "Encoding": "uint8", + "Name": "F", + "Reference": "asset_holding" + } + ], + "IntroducedVersion": 2, + "Groups": [ + "State Access" + ] + }, + { + "Opcode": 113, + "Name": "asset_params_get", + "Args": [ + "uint64" + ], + "Returns": [ + "any", + "bool" + ], + "Size": 2, + "ArgEnum": [ + "AssetTotal", + "AssetDecimals", + "AssetDefaultFrozen", + "AssetUnitName", + "AssetName", + "AssetURL", + "AssetMetadataHash", + "AssetManager", + "AssetReserve", + "AssetFreeze", + "AssetClawback", + "AssetCreator" + ], + "ArgEnumTypes": [ + "uint64", + "uint64", + "bool", + "[]byte", + "[]byte", + "[]byte", + "[32]byte", + "address", + "address", + "address", + "address", + "address" + ], + "DocCost": "1", + "Doc": "X is field F from asset A. Y is 1 if A exists, else 0", + "DocExtra": "params: Txn.ForeignAssets offset (or, since v4, an _available_ asset id. Return: did_exist flag (1 if the asset existed and 0 otherwise), value.", + "ImmediateNote": [ + { + "Comment": "asset params field index", + "Encoding": "uint8", + "Name": "F", + "Reference": "asset_params" + } + ], + "IntroducedVersion": 2, + "Groups": [ + "State Access" + ] + }, + { + "Opcode": 114, + "Name": "app_params_get", + "Args": [ + "uint64" + ], + "Returns": [ + "any", + "bool" + ], + "Size": 2, + "ArgEnum": [ + "AppApprovalProgram", + "AppClearStateProgram", + "AppGlobalNumUint", + "AppGlobalNumByteSlice", + "AppLocalNumUint", + "AppLocalNumByteSlice", + "AppExtraProgramPages", + "AppCreator", + "AppAddress" + ], + "ArgEnumTypes": [ + "[]byte", + "[]byte", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "address", + "address" + ], + "DocCost": "1", + "Doc": "X is field F from app A. Y is 1 if A exists, else 0", + "DocExtra": "params: Txn.ForeignApps offset or an _available_ app id. Return: did_exist flag (1 if the application existed and 0 otherwise), value.", + "ImmediateNote": [ + { + "Comment": "app params field index", + "Encoding": "uint8", + "Name": "F", + "Reference": "app_params" + } + ], + "IntroducedVersion": 5, + "Groups": [ + "State Access" + ] + }, + { + "Opcode": 115, + "Name": "acct_params_get", + "Args": [ + "any" + ], + "Returns": [ + "any", + "bool" + ], + "Size": 2, + "ArgEnum": [ + "AcctBalance", + "AcctMinBalance", + "AcctAuthAddr", + "AcctTotalNumUint", + "AcctTotalNumByteSlice", + "AcctTotalExtraAppPages", + "AcctTotalAppsCreated", + "AcctTotalAppsOptedIn", + "AcctTotalAssetsCreated", + "AcctTotalAssets", + "AcctTotalBoxes", + "AcctTotalBoxBytes", + "AcctIncentiveEligible", + "AcctLastProposed", + "AcctLastHeartbeat" + ], + "ArgEnumTypes": [ + "uint64", + "uint64", + "address", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "bool", + "uint64", + "uint64" + ], + "DocCost": "1", + "Doc": "X is field F from account A. Y is 1 if A owns positive algos, else 0", + "ImmediateNote": [ + { + "Comment": "account params field index", + "Encoding": "uint8", + "Name": "F", + "Reference": "acct_params" + } + ], + "IntroducedVersion": 6, + "Groups": [ + "State Access" + ] + }, + { + "Opcode": 116, + "Name": "voter_params_get", + "Args": [ + "any" + ], + "Returns": [ + "any", + "bool" + ], + "Size": 2, + "DocCost": "1", + "Doc": "X is field F from online account A as of the balance round: 320 rounds before the current round. Y is 1 if A had positive algos online in the agreement round, else Y is 0 and X is a type specific zero-value", + "ImmediateNote": [ + { + "Comment": "voter params field index", + "Encoding": "uint8", + "Name": "F", + "Reference": "voter_params" + } + ], + "IntroducedVersion": 11, + "Groups": [ + "State Access" + ] + }, + { + "Opcode": 117, + "Name": "online_stake", + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "the total online stake in the agreement round", + "IntroducedVersion": 11, + "Groups": [ + "State Access" + ] + }, + { + "Opcode": 120, + "Name": "min_balance", + "Args": [ + "any" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "minimum required balance for account A, in microalgos. Required balance is affected by ASA, App, and Box usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes. Changes caused by inner transactions or box usage are observable immediately following the opcode effecting the change.", + "DocExtra": "params: Txn.Accounts offset (or, since v4, an _available_ account address), _available_ application id (or, since v4, a Txn.ForeignApps offset). Return: value.", + "IntroducedVersion": 3, + "Groups": [ + "State Access" + ] + }, + { + "Opcode": 128, + "Name": "pushbytes", + "Returns": [ + "[]byte" + ], + "Size": 0, + "DocCost": "1", + "Doc": "immediate BYTES", + "DocExtra": "pushbytes args are not added to the bytecblock during assembly processes", + "ImmediateNote": [ + { + "Comment": "a byte constant", + "Encoding": "varuint length, bytes", + "Name": "BYTES" + } + ], + "IntroducedVersion": 3, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 129, + "Name": "pushint", + "Returns": [ + "uint64" + ], + "Size": 0, + "DocCost": "1", + "Doc": "immediate UINT", + "DocExtra": "pushint args are not added to the intcblock during assembly processes", + "ImmediateNote": [ + { + "Comment": "an int constant", + "Encoding": "varuint", + "Name": "UINT" + } + ], + "IntroducedVersion": 3, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 130, + "Name": "pushbytess", + "Size": 0, + "DocCost": "1", + "Doc": "push sequences of immediate byte arrays to stack (first byte array being deepest)", + "DocExtra": "pushbytess args are not added to the bytecblock during assembly processes", + "ImmediateNote": [ + { + "Comment": "a list of byte constants", + "Encoding": "varuint count, [varuint length, bytes ...]", + "Name": "BYTES ..." + } + ], + "IntroducedVersion": 8, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 131, + "Name": "pushints", + "Size": 0, + "DocCost": "1", + "Doc": "push sequence of immediate uints to stack in the order they appear (first uint being deepest)", + "DocExtra": "pushints args are not added to the intcblock during assembly processes", + "ImmediateNote": [ + { + "Comment": "a list of int constants", + "Encoding": "varuint count, [varuint ...]", + "Name": "UINT ..." + } + ], + "IntroducedVersion": 8, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 132, + "Name": "ed25519verify_bare", + "Args": [ + "[]byte", + "[64]byte", + "[32]byte" + ], + "Returns": [ + "bool" + ], + "Size": 1, + "DocCost": "1900", + "Doc": "for (data A, signature B, pubkey C) verify the signature of the data against the pubkey =\u003e {0 or 1}", + "IntroducedVersion": 7, + "Groups": [ + "Cryptography" + ] + }, + { + "Opcode": 133, + "Name": "falcon_verify", + "Args": [ + "[]byte", + "[1232]byte", + "[1793]byte" + ], + "Returns": [ + "bool" + ], + "Size": 1, + "DocCost": "1700", + "Doc": "for (data A, compressed-format signature B, pubkey C) verify the signature of data against the pubkey", + "IntroducedVersion": 11, + "Groups": [ + "Cryptography" + ] + }, + { + "Opcode": 134, + "Name": "sumhash512", + "Args": [ + "[]byte" + ], + "Returns": [ + "[64]byte" + ], + "Size": 1, + "DocCost": "150 + 7 per 4 bytes of A", + "Doc": "sumhash512 of value A, yields [64]byte", + "IntroducedVersion": 11, + "Groups": [ + "Cryptography" + ] + }, + { + "Opcode": 136, + "Name": "callsub", + "Size": 3, + "DocCost": "1", + "Doc": "branch unconditionally to TARGET, saving the next instruction on the call stack", + "DocExtra": "The call stack is separate from the data stack. Only `callsub`, `retsub`, and `proto` manipulate it.", + "ImmediateNote": [ + { + "Comment": "branch offset", + "Encoding": "int16 (big-endian)", + "Name": "TARGET" + } + ], + "IntroducedVersion": 4, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 137, + "Name": "retsub", + "Size": 1, + "DocCost": "1", + "Doc": "pop the top instruction from the call stack and branch to it", + "DocExtra": "If the current frame was prepared by `proto A R`, `retsub` will remove the 'A' arguments from the stack, move the `R` return values down, and pop any stack locations above the relocated return values.", + "IntroducedVersion": 4, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 138, + "Name": "proto", + "Size": 3, + "DocCost": "1", + "Doc": "Prepare top call frame for a retsub that will assume A args and R return values.", + "DocExtra": "Fails unless the last instruction executed was a `callsub`.", + "ImmediateNote": [ + { + "Comment": "number of arguments", + "Encoding": "uint8", + "Name": "A" + }, + { + "Comment": "number of return values", + "Encoding": "uint8", + "Name": "R" + } + ], + "IntroducedVersion": 8, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 139, + "Name": "frame_dig", + "Returns": [ + "any" + ], + "Size": 2, + "DocCost": "1", + "Doc": "Nth (signed) value from the frame pointer.", + "ImmediateNote": [ + { + "Comment": "frame slot", + "Encoding": "int8", + "Name": "I" + } + ], + "IntroducedVersion": 8, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 140, + "Name": "frame_bury", + "Args": [ + "any" + ], + "Size": 2, + "DocCost": "1", + "Doc": "replace the Nth (signed) value from the frame pointer in the stack with A", + "ImmediateNote": [ + { + "Comment": "frame slot", + "Encoding": "int8", + "Name": "I" + } + ], + "IntroducedVersion": 8, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 141, + "Name": "switch", + "Args": [ + "uint64" + ], + "Size": 0, + "DocCost": "1", + "Doc": "branch to the Ath label. Continue at following instruction if index A exceeds the number of labels.", + "ImmediateNote": [ + { + "Comment": "list of labels", + "Encoding": "varuint count, [int16 (big-endian) ...]", + "Name": "TARGET ..." + } + ], + "IntroducedVersion": 8, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 142, + "Name": "match", + "Size": 0, + "DocCost": "1", + "Doc": "given match cases from A[1] to A[N], branch to the Ith label where A[I] = B. Continue to the following instruction if no matches are found.", + "DocExtra": "`match` consumes N+1 values from the stack. Let the top stack value be B. The following N values represent an ordered list of match cases/constants (A), where the first value (A[0]) is the deepest in the stack. The immediate arguments are an ordered list of N labels (T). `match` will branch to target T[I], where A[I] = B. If there are no matches then execution continues on to the next instruction.", + "ImmediateNote": [ + { + "Comment": "list of labels", + "Encoding": "varuint count, [int16 (big-endian) ...]", + "Name": "TARGET ..." + } + ], + "IntroducedVersion": 8, + "Groups": [ + "Flow Control" + ] + }, + { + "Opcode": 144, + "Name": "shl", + "Args": [ + "uint64", + "uint64" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A times 2^B, modulo 2^64", + "IntroducedVersion": 4, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 145, + "Name": "shr", + "Args": [ + "uint64", + "uint64" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A divided by 2^B", + "IntroducedVersion": 4, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 146, + "Name": "sqrt", + "Args": [ + "uint64" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "4", + "Doc": "The largest integer I such that I^2 \u003c= A", + "IntroducedVersion": 4, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 147, + "Name": "bitlen", + "Args": [ + "any" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "The highest set bit in A. If A is a byte-array, it is interpreted as a big-endian unsigned integer. bitlen of 0 is 0, bitlen of 8 is 4", + "DocExtra": "bitlen interprets arrays as big-endian integers, unlike setbit/getbit", + "IntroducedVersion": 4, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 148, + "Name": "exp", + "Args": [ + "uint64", + "uint64" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A raised to the Bth power. Fail if A == B == 0 and on overflow", + "IntroducedVersion": 4, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 149, + "Name": "expw", + "Args": [ + "uint64", + "uint64" + ], + "Returns": [ + "uint64", + "uint64" + ], + "Size": 1, + "DocCost": "10", + "Doc": "A raised to the Bth power as a 128-bit result in two uint64s. X is the high 64 bits, Y is the low. Fail if A == B == 0 or if the results exceeds 2^128-1", + "IntroducedVersion": 4, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 150, + "Name": "bsqrt", + "Args": [ + "bigint" + ], + "Returns": [ + "bigint" + ], + "Size": 1, + "DocCost": "40", + "Doc": "The largest integer I such that I^2 \u003c= A. A and I are interpreted as big-endian unsigned integers", + "IntroducedVersion": 6, + "Groups": [ + "Byte Array Arithmetic" + ] + }, + { + "Opcode": 151, + "Name": "divw", + "Args": [ + "uint64", + "uint64", + "uint64" + ], + "Returns": [ + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "A,B / C. Fail if C == 0 or if result overflows.", + "DocExtra": "The notation A,B indicates that A and B are interpreted as a uint128 value, with A as the high uint64 and B the low.", + "IntroducedVersion": 6, + "Groups": [ + "Arithmetic" + ] + }, + { + "Opcode": 152, + "Name": "sha3_256", + "Args": [ + "[]byte" + ], + "Returns": [ + "[32]byte" + ], + "Size": 1, + "DocCost": "130", + "Doc": "SHA3_256 hash of value A, yields [32]byte", + "IntroducedVersion": 7, + "Groups": [ + "Cryptography" + ] + }, + { + "Opcode": 160, + "Name": "b+", + "Args": [ + "bigint", + "bigint" + ], + "Returns": [ + "[]byte" + ], + "Size": 1, + "DocCost": "10", + "Doc": "A plus B. A and B are interpreted as big-endian unsigned integers", + "IntroducedVersion": 4, + "Groups": [ + "Byte Array Arithmetic" + ] + }, + { + "Opcode": 161, + "Name": "b-", + "Args": [ + "bigint", + "bigint" + ], + "Returns": [ + "bigint" + ], + "Size": 1, + "DocCost": "10", + "Doc": "A minus B. A and B are interpreted as big-endian unsigned integers. Fail on underflow.", + "IntroducedVersion": 4, + "Groups": [ + "Byte Array Arithmetic" + ] + }, + { + "Opcode": 162, + "Name": "b/", + "Args": [ + "bigint", + "bigint" + ], + "Returns": [ + "bigint" + ], + "Size": 1, + "DocCost": "20", + "Doc": "A divided by B (truncated division). A and B are interpreted as big-endian unsigned integers. Fail if B is zero.", + "IntroducedVersion": 4, + "Groups": [ + "Byte Array Arithmetic" + ] + }, + { + "Opcode": 163, + "Name": "b*", + "Args": [ + "bigint", + "bigint" + ], + "Returns": [ + "[]byte" + ], + "Size": 1, + "DocCost": "20", + "Doc": "A times B. A and B are interpreted as big-endian unsigned integers.", + "IntroducedVersion": 4, + "Groups": [ + "Byte Array Arithmetic" + ] + }, + { + "Opcode": 164, + "Name": "b\u003c", + "Args": [ + "bigint", + "bigint" + ], + "Returns": [ + "bool" + ], + "Size": 1, + "DocCost": "1", + "Doc": "1 if A is less than B, else 0. A and B are interpreted as big-endian unsigned integers", + "IntroducedVersion": 4, + "Groups": [ + "Byte Array Arithmetic" + ] + }, + { + "Opcode": 165, + "Name": "b\u003e", + "Args": [ + "bigint", + "bigint" + ], + "Returns": [ + "bool" + ], + "Size": 1, + "DocCost": "1", + "Doc": "1 if A is greater than B, else 0. A and B are interpreted as big-endian unsigned integers", + "IntroducedVersion": 4, + "Groups": [ + "Byte Array Arithmetic" + ] + }, + { + "Opcode": 166, + "Name": "b\u003c=", + "Args": [ + "bigint", + "bigint" + ], + "Returns": [ + "bool" + ], + "Size": 1, + "DocCost": "1", + "Doc": "1 if A is less than or equal to B, else 0. A and B are interpreted as big-endian unsigned integers", + "IntroducedVersion": 4, + "Groups": [ + "Byte Array Arithmetic" + ] + }, + { + "Opcode": 167, + "Name": "b\u003e=", + "Args": [ + "bigint", + "bigint" + ], + "Returns": [ + "bool" + ], + "Size": 1, + "DocCost": "1", + "Doc": "1 if A is greater than or equal to B, else 0. A and B are interpreted as big-endian unsigned integers", + "IntroducedVersion": 4, + "Groups": [ + "Byte Array Arithmetic" + ] + }, + { + "Opcode": 168, + "Name": "b==", + "Args": [ + "bigint", + "bigint" + ], + "Returns": [ + "bool" + ], + "Size": 1, + "DocCost": "1", + "Doc": "1 if A is equal to B, else 0. A and B are interpreted as big-endian unsigned integers", + "IntroducedVersion": 4, + "Groups": [ + "Byte Array Arithmetic" + ] + }, + { + "Opcode": 169, + "Name": "b!=", + "Args": [ + "bigint", + "bigint" + ], + "Returns": [ + "bool" + ], + "Size": 1, + "DocCost": "1", + "Doc": "0 if A is equal to B, else 1. A and B are interpreted as big-endian unsigned integers", + "IntroducedVersion": 4, + "Groups": [ + "Byte Array Arithmetic" + ] + }, + { + "Opcode": 170, + "Name": "b%", + "Args": [ + "bigint", + "bigint" + ], + "Returns": [ + "bigint" + ], + "Size": 1, + "DocCost": "20", + "Doc": "A modulo B. A and B are interpreted as big-endian unsigned integers. Fail if B is zero.", + "IntroducedVersion": 4, + "Groups": [ + "Byte Array Arithmetic" + ] + }, + { + "Opcode": 171, + "Name": "b|", + "Args": [ + "[]byte", + "[]byte" + ], + "Returns": [ + "[]byte" + ], + "Size": 1, + "DocCost": "6", + "Doc": "A bitwise-or B. A and B are zero-left extended to the greater of their lengths", + "IntroducedVersion": 4, + "Groups": [ + "Byte Array Logic" + ] + }, + { + "Opcode": 172, + "Name": "b\u0026", + "Args": [ + "[]byte", + "[]byte" + ], + "Returns": [ + "[]byte" + ], + "Size": 1, + "DocCost": "6", + "Doc": "A bitwise-and B. A and B are zero-left extended to the greater of their lengths", + "IntroducedVersion": 4, + "Groups": [ + "Byte Array Logic" + ] + }, + { + "Opcode": 173, + "Name": "b^", + "Args": [ + "[]byte", + "[]byte" + ], + "Returns": [ + "[]byte" + ], + "Size": 1, + "DocCost": "6", + "Doc": "A bitwise-xor B. A and B are zero-left extended to the greater of their lengths", + "IntroducedVersion": 4, + "Groups": [ + "Byte Array Logic" + ] + }, + { + "Opcode": 174, + "Name": "b~", + "Args": [ + "[]byte" + ], + "Returns": [ + "[]byte" + ], + "Size": 1, + "DocCost": "4", + "Doc": "A with all bits inverted", + "IntroducedVersion": 4, + "Groups": [ + "Byte Array Logic" + ] + }, + { + "Opcode": 175, + "Name": "bzero", + "Args": [ + "uint64" + ], + "Returns": [ + "[]byte" + ], + "Size": 1, + "DocCost": "1", + "Doc": "zero filled byte-array of length A", + "IntroducedVersion": 4, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 176, + "Name": "log", + "Args": [ + "[]byte" + ], + "Size": 1, + "DocCost": "1", + "Doc": "write A to log state of the current application", + "DocExtra": "`log` fails if called more than MaxLogCalls times in a program, or if the sum of logged bytes exceeds 1024 bytes.", + "IntroducedVersion": 5, + "Groups": [ + "State Access" + ] + }, + { + "Opcode": 177, + "Name": "itxn_begin", + "Size": 1, + "DocCost": "1", + "Doc": "begin preparation of a new inner transaction in a new transaction group", + "DocExtra": "`itxn_begin` initializes Sender to the application address; Fee to the minimum allowable, taking into account MinTxnFee and credit from overpaying in earlier transactions; FirstValid/LastValid to the values in the invoking transaction, and all other fields to zero or empty values.", + "IntroducedVersion": 5, + "Groups": [ + "Inner Transactions" + ] + }, + { + "Opcode": 178, + "Name": "itxn_field", + "Args": [ + "any" + ], + "Size": 2, + "ArgEnum": [ + "Sender", + "Fee", + "Note", + "Receiver", + "Amount", + "CloseRemainderTo", + "VotePK", + "SelectionPK", + "VoteFirst", + "VoteLast", + "VoteKeyDilution", + "Type", + "TypeEnum", + "XferAsset", + "AssetAmount", + "AssetSender", + "AssetReceiver", + "AssetCloseTo", + "ApplicationID", + "OnCompletion", + "ApplicationArgs", + "Accounts", + "ApprovalProgram", + "ClearStateProgram", + "RekeyTo", + "ConfigAsset", + "ConfigAssetTotal", + "ConfigAssetDecimals", + "ConfigAssetDefaultFrozen", + "ConfigAssetUnitName", + "ConfigAssetName", + "ConfigAssetURL", + "ConfigAssetMetadataHash", + "ConfigAssetManager", + "ConfigAssetReserve", + "ConfigAssetFreeze", + "ConfigAssetClawback", + "FreezeAsset", + "FreezeAssetAccount", + "FreezeAssetFrozen", + "Assets", + "Applications", + "GlobalNumUint", + "GlobalNumByteSlice", + "LocalNumUint", + "LocalNumByteSlice", + "ExtraProgramPages", + "Nonparticipation", + "StateProofPK", + "ApprovalProgramPages", + "ClearStateProgramPages" + ], + "ArgEnumTypes": [ + "address", + "uint64", + "[]byte", + "address", + "uint64", + "address", + "[32]byte", + "[32]byte", + "uint64", + "uint64", + "uint64", + "[]byte", + "uint64", + "uint64", + "uint64", + "address", + "address", + "address", + "uint64", + "uint64", + "[]byte", + "address", + "[]byte", + "[]byte", + "address", + "uint64", + "uint64", + "uint64", + "bool", + "[]byte", + "[]byte", + "[]byte", + "[32]byte", + "address", + "address", + "address", + "address", + "uint64", + "address", + "bool", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "bool", + "[]byte", + "[]byte", + "[]byte" + ], + "DocCost": "1", + "Doc": "set field F of the current inner transaction to A", + "DocExtra": "`itxn_field` fails if A is of the wrong type for F, including a byte array of the wrong size for use as an address when F is an address field. `itxn_field` also fails if A is an account, asset, or app that is not _available_, or an attempt is made extend an array field beyond the limit imposed by consensus parameters. (Addresses set into asset params of acfg transactions need not be _available_.)", + "ImmediateNote": [ + { + "Comment": "transaction field index", + "Encoding": "uint8", + "Name": "F", + "Reference": "txn" + } + ], + "IntroducedVersion": 5, + "Groups": [ + "Inner Transactions" + ] + }, + { + "Opcode": 179, + "Name": "itxn_submit", + "Size": 1, + "DocCost": "1", + "Doc": "execute the current inner transaction group. Fail if executing this group would exceed the inner transaction limit, or if any transaction in the group fails.", + "DocExtra": "`itxn_submit` resets the current transaction so that it can not be resubmitted. A new `itxn_begin` is required to prepare another inner transaction.", + "IntroducedVersion": 5, + "Groups": [ + "Inner Transactions" + ] + }, + { + "Opcode": 180, + "Name": "itxn", + "Returns": [ + "any" + ], + "Size": 2, + "ArgEnum": [ + "Sender", + "Fee", + "FirstValid", + "FirstValidTime", + "LastValid", + "Note", + "Lease", + "Receiver", + "Amount", + "CloseRemainderTo", + "VotePK", + "SelectionPK", + "VoteFirst", + "VoteLast", + "VoteKeyDilution", + "Type", + "TypeEnum", + "XferAsset", + "AssetAmount", + "AssetSender", + "AssetReceiver", + "AssetCloseTo", + "GroupIndex", + "TxID", + "ApplicationID", + "OnCompletion", + "ApplicationArgs", + "NumAppArgs", + "Accounts", + "NumAccounts", + "ApprovalProgram", + "ClearStateProgram", + "RekeyTo", + "ConfigAsset", + "ConfigAssetTotal", + "ConfigAssetDecimals", + "ConfigAssetDefaultFrozen", + "ConfigAssetUnitName", + "ConfigAssetName", + "ConfigAssetURL", + "ConfigAssetMetadataHash", + "ConfigAssetManager", + "ConfigAssetReserve", + "ConfigAssetFreeze", + "ConfigAssetClawback", + "FreezeAsset", + "FreezeAssetAccount", + "FreezeAssetFrozen", + "Assets", + "NumAssets", + "Applications", + "NumApplications", + "GlobalNumUint", + "GlobalNumByteSlice", + "LocalNumUint", + "LocalNumByteSlice", + "ExtraProgramPages", + "Nonparticipation", + "Logs", + "NumLogs", + "CreatedAssetID", + "CreatedApplicationID", + "LastLog", + "StateProofPK", + "ApprovalProgramPages", + "NumApprovalProgramPages", + "ClearStateProgramPages", + "NumClearStateProgramPages" + ], + "ArgEnumTypes": [ + "address", + "uint64", + "uint64", + "uint64", + "uint64", + "[]byte", + "[32]byte", + "address", + "uint64", + "address", + "[32]byte", + "[32]byte", + "uint64", + "uint64", + "uint64", + "[]byte", + "uint64", + "uint64", + "uint64", + "address", + "address", + "address", + "uint64", + "[32]byte", + "uint64", + "uint64", + "[]byte", + "uint64", + "address", + "uint64", + "[]byte", + "[]byte", + "address", + "uint64", + "uint64", + "uint64", + "bool", + "[]byte", + "[]byte", + "[]byte", + "[32]byte", + "address", + "address", + "address", + "address", + "uint64", + "address", + "bool", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "bool", + "[]byte", + "uint64", + "uint64", + "uint64", + "[]byte", + "[]byte", + "[]byte", + "uint64", + "[]byte", + "uint64" + ], + "DocCost": "1", + "Doc": "field F of the last inner transaction", + "ImmediateNote": [ + { + "Comment": "transaction field index", + "Encoding": "uint8", + "Name": "F", + "Reference": "txn" + } + ], + "IntroducedVersion": 5, + "Groups": [ + "Inner Transactions" + ] + }, + { + "Opcode": 181, + "Name": "itxna", + "Returns": [ + "any" + ], + "Size": 3, + "ArgEnum": [ + "ApplicationArgs", + "Accounts", + "Assets", + "Applications", + "Logs", + "ApprovalProgramPages", + "ClearStateProgramPages" + ], + "ArgEnumTypes": [ + "[]byte", + "address", + "uint64", + "uint64", + "[]byte", + "[]byte", + "[]byte" + ], + "DocCost": "1", + "Doc": "Ith value of the array field F of the last inner transaction", + "ImmediateNote": [ + { + "Comment": "transaction field index", + "Encoding": "uint8", + "Name": "F", + "Reference": "txna" + }, + { + "Comment": "a transaction field array index", + "Encoding": "uint8", + "Name": "I" + } + ], + "IntroducedVersion": 5, + "Groups": [ + "Inner Transactions" + ] + }, + { + "Opcode": 182, + "Name": "itxn_next", + "Size": 1, + "DocCost": "1", + "Doc": "begin preparation of a new inner transaction in the same transaction group", + "DocExtra": "`itxn_next` initializes the transaction exactly as `itxn_begin` does", + "IntroducedVersion": 6, + "Groups": [ + "Inner Transactions" + ] + }, + { + "Opcode": 183, + "Name": "gitxn", + "Returns": [ + "any" + ], + "Size": 3, + "ArgEnum": [ + "Sender", + "Fee", + "FirstValid", + "FirstValidTime", + "LastValid", + "Note", + "Lease", + "Receiver", + "Amount", + "CloseRemainderTo", + "VotePK", + "SelectionPK", + "VoteFirst", + "VoteLast", + "VoteKeyDilution", + "Type", + "TypeEnum", + "XferAsset", + "AssetAmount", + "AssetSender", + "AssetReceiver", + "AssetCloseTo", + "GroupIndex", + "TxID", + "ApplicationID", + "OnCompletion", + "ApplicationArgs", + "NumAppArgs", + "Accounts", + "NumAccounts", + "ApprovalProgram", + "ClearStateProgram", + "RekeyTo", + "ConfigAsset", + "ConfigAssetTotal", + "ConfigAssetDecimals", + "ConfigAssetDefaultFrozen", + "ConfigAssetUnitName", + "ConfigAssetName", + "ConfigAssetURL", + "ConfigAssetMetadataHash", + "ConfigAssetManager", + "ConfigAssetReserve", + "ConfigAssetFreeze", + "ConfigAssetClawback", + "FreezeAsset", + "FreezeAssetAccount", + "FreezeAssetFrozen", + "Assets", + "NumAssets", + "Applications", + "NumApplications", + "GlobalNumUint", + "GlobalNumByteSlice", + "LocalNumUint", + "LocalNumByteSlice", + "ExtraProgramPages", + "Nonparticipation", + "Logs", + "NumLogs", + "CreatedAssetID", + "CreatedApplicationID", + "LastLog", + "StateProofPK", + "ApprovalProgramPages", + "NumApprovalProgramPages", + "ClearStateProgramPages", + "NumClearStateProgramPages" + ], + "ArgEnumTypes": [ + "address", + "uint64", + "uint64", + "uint64", + "uint64", + "[]byte", + "[32]byte", + "address", + "uint64", + "address", + "[32]byte", + "[32]byte", + "uint64", + "uint64", + "uint64", + "[]byte", + "uint64", + "uint64", + "uint64", + "address", + "address", + "address", + "uint64", + "[32]byte", + "uint64", + "uint64", + "[]byte", + "uint64", + "address", + "uint64", + "[]byte", + "[]byte", + "address", + "uint64", + "uint64", + "uint64", + "bool", + "[]byte", + "[]byte", + "[]byte", + "[32]byte", + "address", + "address", + "address", + "address", + "uint64", + "address", + "bool", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "uint64", + "bool", + "[]byte", + "uint64", + "uint64", + "uint64", + "[]byte", + "[]byte", + "[]byte", + "uint64", + "[]byte", + "uint64" + ], + "DocCost": "1", + "Doc": "field F of the Tth transaction in the last inner group submitted", + "ImmediateNote": [ + { + "Comment": "transaction group index", + "Encoding": "uint8", + "Name": "T" + }, + { + "Comment": "transaction field index", + "Encoding": "uint8", + "Name": "F", + "Reference": "txn" + } + ], + "IntroducedVersion": 6, + "Groups": [ + "Inner Transactions" + ] + }, + { + "Opcode": 184, + "Name": "gitxna", + "Returns": [ + "any" + ], + "Size": 4, + "ArgEnum": [ + "ApplicationArgs", + "Accounts", + "Assets", + "Applications", + "Logs", + "ApprovalProgramPages", + "ClearStateProgramPages" + ], + "ArgEnumTypes": [ + "[]byte", + "address", + "uint64", + "uint64", + "[]byte", + "[]byte", + "[]byte" + ], + "DocCost": "1", + "Doc": "Ith value of the array field F from the Tth transaction in the last inner group submitted", + "ImmediateNote": [ + { + "Comment": "transaction group index", + "Encoding": "uint8", + "Name": "T" + }, + { + "Comment": "transaction field index", + "Encoding": "uint8", + "Name": "F", + "Reference": "txna" + }, + { + "Comment": "transaction field array index", + "Encoding": "uint8", + "Name": "I" + } + ], + "IntroducedVersion": 6, + "Groups": [ + "Inner Transactions" + ] + }, + { + "Opcode": 185, + "Name": "box_create", + "Args": [ + "boxName", + "uint64" + ], + "Returns": [ + "bool" + ], + "Size": 1, + "DocCost": "1", + "Doc": "create a box named A, of length B. Fail if the name A is empty or B exceeds 32,768. Returns 0 if A already existed, else 1", + "DocExtra": "Newly created boxes are filled with 0 bytes. `box_create` will fail if the referenced box already exists with a different size. Otherwise, existing boxes are unchanged by `box_create`.", + "IntroducedVersion": 8, + "Groups": [ + "Box Access" + ] + }, + { + "Opcode": 186, + "Name": "box_extract", + "Args": [ + "boxName", + "uint64", + "uint64" + ], + "Returns": [ + "[]byte" + ], + "Size": 1, + "DocCost": "1", + "Doc": "read C bytes from box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size.", + "IntroducedVersion": 8, + "Groups": [ + "Box Access" + ] + }, + { + "Opcode": 187, + "Name": "box_replace", + "Args": [ + "boxName", + "uint64", + "[]byte" + ], + "Size": 1, + "DocCost": "1", + "Doc": "write byte-array C into box A, starting at offset B. Fail if A does not exist, or the byte range is outside A's size.", + "IntroducedVersion": 8, + "Groups": [ + "Box Access" + ] + }, + { + "Opcode": 188, + "Name": "box_del", + "Args": [ + "boxName" + ], + "Returns": [ + "bool" + ], + "Size": 1, + "DocCost": "1", + "Doc": "delete box named A if it exists. Return 1 if A existed, 0 otherwise", + "IntroducedVersion": 8, + "Groups": [ + "Box Access" + ] + }, + { + "Opcode": 189, + "Name": "box_len", + "Args": [ + "boxName" + ], + "Returns": [ + "uint64", + "bool" + ], + "Size": 1, + "DocCost": "1", + "Doc": "X is the length of box A if A exists, else 0. Y is 1 if A exists, else 0.", + "IntroducedVersion": 8, + "Groups": [ + "Box Access" + ] + }, + { + "Opcode": 190, + "Name": "box_get", + "Args": [ + "boxName" + ], + "Returns": [ + "[]byte", + "bool" + ], + "Size": 1, + "DocCost": "1", + "Doc": "X is the contents of box A if A exists, else ''. Y is 1 if A exists, else 0.", + "DocExtra": "For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `box_replace`", + "IntroducedVersion": 8, + "Groups": [ + "Box Access" + ] + }, + { + "Opcode": 191, + "Name": "box_put", + "Args": [ + "boxName", + "[]byte" + ], + "Size": 1, + "DocCost": "1", + "Doc": "replaces the contents of box A with byte-array B. Fails if A exists and len(B) != len(box A). Creates A if it does not exist", + "DocExtra": "For boxes that exceed 4,096 bytes, consider `box_create`, `box_extract`, and `box_replace`", + "IntroducedVersion": 8, + "Groups": [ + "Box Access" + ] + }, + { + "Opcode": 192, + "Name": "txnas", + "Args": [ + "uint64" + ], + "Returns": [ + "any" + ], + "Size": 2, + "ArgEnum": [ + "ApplicationArgs", + "Accounts", + "Assets", + "Applications", + "Logs", + "ApprovalProgramPages", + "ClearStateProgramPages" + ], + "ArgEnumTypes": [ + "[]byte", + "address", + "uint64", + "uint64", + "[]byte", + "[]byte", + "[]byte" + ], + "DocCost": "1", + "Doc": "Ath value of the array field F of the current transaction", + "ImmediateNote": [ + { + "Comment": "transaction field index", + "Encoding": "uint8", + "Name": "F", + "Reference": "txna" + } + ], + "IntroducedVersion": 5, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 193, + "Name": "gtxnas", + "Args": [ + "uint64" + ], + "Returns": [ + "any" + ], + "Size": 3, + "ArgEnum": [ + "ApplicationArgs", + "Accounts", + "Assets", + "Applications", + "Logs", + "ApprovalProgramPages", + "ClearStateProgramPages" + ], + "ArgEnumTypes": [ + "[]byte", + "address", + "uint64", + "uint64", + "[]byte", + "[]byte", + "[]byte" + ], + "DocCost": "1", + "Doc": "Ath value of the array field F from the Tth transaction in the current group", + "ImmediateNote": [ + { + "Comment": "transaction group index", + "Encoding": "uint8", + "Name": "T" + }, + { + "Comment": "transaction field index", + "Encoding": "uint8", + "Name": "F", + "Reference": "txna" + } + ], + "IntroducedVersion": 5, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 194, + "Name": "gtxnsas", + "Args": [ + "uint64", + "uint64" + ], + "Returns": [ + "any" + ], + "Size": 2, + "ArgEnum": [ + "ApplicationArgs", + "Accounts", + "Assets", + "Applications", + "Logs", + "ApprovalProgramPages", + "ClearStateProgramPages" + ], + "ArgEnumTypes": [ + "[]byte", + "address", + "uint64", + "uint64", + "[]byte", + "[]byte", + "[]byte" + ], + "DocCost": "1", + "Doc": "Bth value of the array field F from the Ath transaction in the current group", + "ImmediateNote": [ + { + "Comment": "transaction field index", + "Encoding": "uint8", + "Name": "F", + "Reference": "txna" + } + ], + "IntroducedVersion": 5, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 195, + "Name": "args", + "Args": [ + "uint64" + ], + "Returns": [ + "[]byte" + ], + "Size": 1, + "DocCost": "1", + "Doc": "Ath LogicSig argument", + "IntroducedVersion": 5, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 196, + "Name": "gloadss", + "Args": [ + "uint64", + "uint64" + ], + "Returns": [ + "any" + ], + "Size": 1, + "DocCost": "1", + "Doc": "Bth scratch space value of the Ath transaction in the current group", + "IntroducedVersion": 6, + "Groups": [ + "Loading Values" + ] + }, + { + "Opcode": 197, + "Name": "itxnas", + "Args": [ + "uint64" + ], + "Returns": [ + "any" + ], + "Size": 2, + "DocCost": "1", + "Doc": "Ath value of the array field F of the last inner transaction", + "ImmediateNote": [ + { + "Comment": "transaction field index", + "Encoding": "uint8", + "Name": "F", + "Reference": "txna" + } + ], + "IntroducedVersion": 6, + "Groups": [ + "Inner Transactions" + ] + }, + { + "Opcode": 198, + "Name": "gitxnas", + "Args": [ + "uint64" + ], + "Returns": [ + "any" + ], + "Size": 3, + "DocCost": "1", + "Doc": "Ath value of the array field F from the Tth transaction in the last inner group submitted", + "ImmediateNote": [ + { + "Comment": "transaction group index", + "Encoding": "uint8", + "Name": "T" + }, + { + "Comment": "transaction field index", + "Encoding": "uint8", + "Name": "F", + "Reference": "txna" + } + ], + "IntroducedVersion": 6, + "Groups": [ + "Inner Transactions" + ] + }, + { + "Opcode": 208, + "Name": "vrf_verify", + "Args": [ + "[]byte", + "[80]byte", + "[32]byte" + ], + "Returns": [ + "[64]byte", + "bool" + ], + "Size": 2, + "ArgEnum": [ + "VrfAlgorand" + ], + "DocCost": "5700", + "Doc": "Verify the proof B of message A against pubkey C. Returns vrf output and verification flag.", + "DocExtra": "`VrfAlgorand` is the VRF used in Algorand. It is ECVRF-ED25519-SHA512-Elligator2, specified in the IETF internet draft [draft-irtf-cfrg-vrf-03](https://datatracker.ietf.org/doc/draft-irtf-cfrg-vrf/03/).", + "ImmediateNote": [ + { + "Comment": " parameters index", + "Encoding": "uint8", + "Name": "S", + "Reference": "vrf_verify" + } + ], + "IntroducedVersion": 7, + "Groups": [ + "Cryptography" + ] + }, + { + "Opcode": 209, + "Name": "block", + "Args": [ + "uint64" + ], + "Returns": [ + "any" + ], + "Size": 2, + "ArgEnum": [ + "BlkSeed", + "BlkTimestamp", + "BlkProposer", + "BlkFeesCollected", + "BlkBonus", + "BlkBranch", + "BlkFeeSink", + "BlkProtocol", + "BlkTxnCounter", + "BlkProposerPayout" + ], + "ArgEnumTypes": [ + "[32]byte", + "uint64", + "address", + "uint64", + "uint64", + "[32]byte", + "address", + "[]byte", + "uint64", + "uint64" + ], + "DocCost": "1", + "Doc": "field F of block A. Fail unless A falls between txn.LastValid-1002 and txn.FirstValid (exclusive)", + "ImmediateNote": [ + { + "Comment": " block field index", + "Encoding": "uint8", + "Name": "F", + "Reference": "block" + } + ], + "IntroducedVersion": 7, + "Groups": [ + "State Access" + ] + }, + { + "Opcode": 210, + "Name": "box_splice", + "Args": [ + "boxName", + "uint64", + "uint64", + "[]byte" + ], + "Size": 1, + "DocCost": "1", + "Doc": "set box A to contain its previous bytes up to index B, followed by D, followed by the original bytes of A that began at index B+C.", + "DocExtra": "Boxes are of constant length. If C \u003c len(D), then len(D)-C bytes will be removed from the end. If C \u003e len(D), zero bytes will be appended to the end to reach the box length.", + "IntroducedVersion": 10, + "Groups": [ + "Box Access" + ] + }, + { + "Opcode": 211, + "Name": "box_resize", + "Args": [ + "boxName", + "uint64" + ], + "Size": 1, + "DocCost": "1", + "Doc": "change the size of box named A to be of length B, adding zero bytes to end or removing bytes from the end, as needed. Fail if the name A is empty, A is not an existing box, or B exceeds 32,768.", + "IntroducedVersion": 10, + "Groups": [ + "Box Access" + ] + }, + { + "Opcode": 224, + "Name": "ec_add", + "Args": [ + "[]byte", + "[]byte" + ], + "Returns": [ + "[]byte" + ], + "Size": 2, + "DocCost": "BN254g1=125; BN254g2=170; BLS12_381g1=205; BLS12_381g2=290", + "Doc": "for curve points A and B, return the curve point A + B", + "DocExtra": "A and B are curve points in affine representation: field element X concatenated with field element Y. Field element `Z` is encoded as follows.\nFor the base field elements (Fp), `Z` is encoded as a big-endian number and must be lower than the field modulus.\nFor the quadratic field extension (Fp2), `Z` is encoded as the concatenation of the individual encoding of the coefficients. For an Fp2 element of the form `Z = Z0 + Z1 i`, where `i` is a formal quadratic non-residue, the encoding of Z is the concatenation of the encoding of `Z0` and `Z1` in this order. (`Z0` and `Z1` must be less than the field modulus).\n\nThe point at infinity is encoded as `(X,Y) = (0,0)`.\nGroups G1 and G2 are denoted additively.\n\nFails if A or B is not in G.\nA and/or B are allowed to be the point at infinity.\nDoes _not_ check if A and B are in the main prime-order subgroup.", + "ImmediateNote": [ + { + "Comment": "curve index", + "Encoding": "uint8", + "Name": "G", + "Reference": "EC" + } + ], + "IntroducedVersion": 10, + "Groups": [ + "Cryptography" + ] + }, + { + "Opcode": 225, + "Name": "ec_scalar_mul", + "Args": [ + "[]byte", + "[]byte" + ], + "Returns": [ + "[]byte" + ], + "Size": 2, + "DocCost": "BN254g1=1810; BN254g2=3430; BLS12_381g1=2950; BLS12_381g2=6530", + "Doc": "for curve point A and scalar B, return the curve point BA, the point A multiplied by the scalar B.", + "DocExtra": "A is a curve point encoded and checked as described in `ec_add`. Scalar B is interpreted as a big-endian unsigned integer. Fails if B exceeds 32 bytes.", + "ImmediateNote": [ + { + "Comment": "curve index", + "Encoding": "uint8", + "Name": "G", + "Reference": "EC" + } + ], + "IntroducedVersion": 10, + "Groups": [ + "Cryptography" + ] + }, + { + "Opcode": 226, + "Name": "ec_pairing_check", + "Args": [ + "[]byte", + "[]byte" + ], + "Returns": [ + "bool" + ], + "Size": 2, + "DocCost": "BN254g1=8000 + 7400 per 64 bytes of B; BN254g2=8000 + 7400 per 128 bytes of B; BLS12_381g1=13000 + 10000 per 96 bytes of B; BLS12_381g2=13000 + 10000 per 192 bytes of B", + "Doc": "1 if the product of the pairing of each point in A with its respective point in B is equal to the identity element of the target group Gt, else 0", + "DocExtra": "A and B are concatenated points, encoded and checked as described in `ec_add`. A contains points of the group G, B contains points of the associated group (G2 if G is G1, and vice versa). Fails if A and B have a different number of points, or if any point is not in its described group or outside the main prime-order subgroup - a stronger condition than other opcodes. AVM values are limited to 4096 bytes, so `ec_pairing_check` is limited by the size of the points in the groups being operated upon.", + "ImmediateNote": [ + { + "Comment": "curve index", + "Encoding": "uint8", + "Name": "G", + "Reference": "EC" + } + ], + "IntroducedVersion": 10, + "Groups": [ + "Cryptography" + ] + }, + { + "Opcode": 227, + "Name": "ec_multi_scalar_mul", + "Args": [ + "[]byte", + "[]byte" + ], + "Returns": [ + "[]byte" + ], + "Size": 2, + "DocCost": "BN254g1=3600 + 90 per 32 bytes of B; BN254g2=7200 + 270 per 32 bytes of B; BLS12_381g1=6500 + 95 per 32 bytes of B; BLS12_381g2=14850 + 485 per 32 bytes of B", + "Doc": "for curve points A and scalars B, return curve point B0A0 + B1A1 + B2A2 + ... + BnAn", + "DocExtra": "A is a list of concatenated points, encoded and checked as described in `ec_add`. B is a list of concatenated scalars which, unlike ec_scalar_mul, must all be exactly 32 bytes long.\nThe name `ec_multi_scalar_mul` was chosen to reflect common usage, but a more consistent name would be `ec_multi_scalar_mul`. AVM values are limited to 4096 bytes, so `ec_multi_scalar_mul` is limited by the size of the points in the group being operated upon.", + "ImmediateNote": [ + { + "Comment": "curve index", + "Encoding": "uint8", + "Name": "G", + "Reference": "EC" + } + ], + "IntroducedVersion": 10, + "Groups": [ + "Cryptography" + ] + }, + { + "Opcode": 228, + "Name": "ec_subgroup_check", + "Args": [ + "[]byte" + ], + "Returns": [ + "bool" + ], + "Size": 2, + "DocCost": "BN254g1=20; BN254g2=3100; BLS12_381g1=1850; BLS12_381g2=2340", + "Doc": "1 if A is in the main prime-order subgroup of G (including the point at infinity) else 0. Program fails if A is not in G at all.", + "ImmediateNote": [ + { + "Comment": "curve index", + "Encoding": "uint8", + "Name": "G", + "Reference": "EC" + } + ], + "IntroducedVersion": 10, + "Groups": [ + "Cryptography" + ] + }, + { + "Opcode": 229, + "Name": "ec_map_to", + "Args": [ + "[]byte" + ], + "Returns": [ + "[]byte" + ], + "Size": 2, + "DocCost": "BN254g1=630; BN254g2=3300; BLS12_381g1=1950; BLS12_381g2=8150", + "Doc": "maps field element A to group G", + "DocExtra": "BN254 points are mapped by the SVDW map. BLS12-381 points are mapped by the SSWU map.\nG1 element inputs are base field elements and G2 element inputs are quadratic field elements, with nearly the same encoding rules (for field elements) as defined in `ec_add`. There is one difference of encoding rule: G1 element inputs do not need to be 0-padded if they fit in less than 32 bytes for BN254 and less than 48 bytes for BLS12-381. (As usual, the empty byte array represents 0.) G2 elements inputs need to be always have the required size.", + "ImmediateNote": [ + { + "Comment": "curve index", + "Encoding": "uint8", + "Name": "G", + "Reference": "EC" + } + ], + "IntroducedVersion": 10, + "Groups": [ + "Cryptography" + ] + }, + { + "Opcode": 230, + "Name": "mimc", + "Args": [ + "[]byte" + ], + "Returns": [ + "[32]byte" + ], + "Size": 2, + "DocCost": "BN254Mp110=10 + 550 per 32 bytes of A; BLS12_381Mp111=10 + 550 per 32 bytes of A", + "Doc": "MiMC hash of scalars A, using curve and parameters specified by configuration C", + "DocExtra": "A is a list of concatenated 32 byte big-endian unsigned integer scalars. Fail if A's length is not a multiple of 32 or any element exceeds the curve modulus.\n\nThe MiMC hash function has known collisions since any input which is a multiple of the elliptic curve modulus will hash to the same value. MiMC is thus not a general purpose hash function, but meant to be used in zero knowledge applications to match a zk-circuit implementation.", + "ImmediateNote": [ + { + "Comment": "configuration index", + "Encoding": "uint8", + "Name": "C", + "Reference": "Mimc Configurations" + } + ], + "IntroducedVersion": 11, + "Groups": [ + "Cryptography" + ] + } + ] +} diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go index 419e937874..b283c49656 100644 --- a/data/transactions/logic/opcodes.go +++ b/data/transactions/logic/opcodes.go @@ -81,6 +81,7 @@ const spliceVersion = 10 // box splicing/resizing const incentiveVersion = 11 // block fields, heartbeat const spOpcodesVersion = 11 // falcon_verify, sumhash512 +const mimcVersion = 11 // Unlimited Global Storage opcodes const boxVersion = 8 // box_* @@ -798,6 +799,17 @@ var OpSpecs = []OpSpec{ costByField("g", &EcGroups, []int{ BN254g1: 630, BN254g2: 3_300, BLS12_381g1: 1_950, BLS12_381g2: 8_150})}, + {0xe6, "mimc", opMimc, proto("b:b{32}"), mimcVersion, costByFieldAndLength("c", &MimcConfigs, []linearCost{ + BN254Mp110: { + baseCost: 10, + chunkCost: 550, + chunkSize: 32, + }, + BLS12_381Mp111: { + baseCost: 10, + chunkCost: 550, + chunkSize: 32, + }})}, } // OpcodesByVersion returns list of opcodes available in a specific version of TEAL diff --git a/data/transactions/logic/pairing.go b/data/transactions/logic/pairing.go index fc61996e66..13df35c570 100644 --- a/data/transactions/logic/pairing.go +++ b/data/transactions/logic/pairing.go @@ -477,20 +477,18 @@ func bls12381G1MultiMulLarge(points []bls12381.G1Affine, scalarBytes []byte) ([] func bls12381G1MultiMulSmall(points []bls12381.G1Affine, scalarBytes []byte) ([]byte, error) { // There must be at least one point. Start with it, rather than the identity. k := new(big.Int).SetBytes(scalarBytes[:scalarSize]) - var sum bls12381.G1Jac - sum.ScalarMultiplicationAffine(&points[0], k) + var sum bls12381.G1Affine + sum.ScalarMultiplication(&points[0], k) for i := range points { if i == 0 { continue } k.SetBytes(scalarBytes[i*scalarSize : (i+1)*scalarSize]) - var prod bls12381.G1Jac - prod.ScalarMultiplicationAffine(&points[i], k) - sum.AddAssign(&prod) + var prod bls12381.G1Affine + prod.ScalarMultiplication(&points[i], k) + sum.Add(&sum, &prod) } - var res bls12381.G1Affine - res.FromJacobian(&sum) - return bls12381G1ToBytes(&res), nil + return bls12381G1ToBytes(&sum), nil } const bls12381G2MultiMulThreshold = 2 // determined by BenchmarkFindMultiMulCutoff @@ -794,20 +792,18 @@ func bn254G1MultiMulLarge(points []bn254.G1Affine, scalarBytes []byte) ([]byte, func bn254G1MultiMulSmall(points []bn254.G1Affine, scalarBytes []byte) ([]byte, error) { // There must be at least one point. Start with it, rather than the identity. k := new(big.Int).SetBytes(scalarBytes[:scalarSize]) - var sum bn254.G1Jac - sum.ScalarMultiplicationAffine(&points[0], k) + var sum bn254.G1Affine + sum.ScalarMultiplication(&points[0], k) for i := range points { if i == 0 { continue } k.SetBytes(scalarBytes[i*scalarSize : (i+1)*scalarSize]) - var prod bn254.G1Jac - prod.ScalarMultiplicationAffine(&points[i], k) - sum.AddAssign(&prod) + var prod bn254.G1Affine + prod.ScalarMultiplication(&points[i], k) + sum.Add(&sum, &prod) } - var res bn254.G1Affine - res.FromJacobian(&sum) - return bn254G1ToBytes(&res), nil + return bn254G1ToBytes(&sum), nil } const bn254G2MultiMulThreshold = 2 // determined by BenchmarkFindMultiMulCutoff diff --git a/data/transactions/logic/teal.tmLanguage.json b/data/transactions/logic/teal.tmLanguage.json index 53984e8dd2..1a86c280e8 100644 --- a/data/transactions/logic/teal.tmLanguage.json +++ b/data/transactions/logic/teal.tmLanguage.json @@ -76,7 +76,7 @@ }, { "name": "keyword.operator.teal", - "match": "^(\\!|\\!\\=|%|\u0026|\u0026\u0026|\\*|\\+|\\-|/|\\\u003c|\\\u003c\\=|\\=\\=|\\\u003e|\\\u003e\\=|\\^|addw|bitlen|btoi|divmodw|divw|exp|expw|itob|mulw|shl|shr|sqrt|\\||\\|\\||\\~|b\\!\\=|b%|b\\*|b\\+|b\\-|b/|b\\\u003c|b\\\u003c\\=|b\\=\\=|b\\\u003e|b\\\u003e\\=|bsqrt|b\u0026|b\\^|b\\||b\\~|base64_decode|concat|extract|extract3|extract_uint16|extract_uint32|extract_uint64|getbit|getbyte|json_ref|len|replace2|replace3|setbit|setbyte|substring|substring3|ec_add|ec_map_to|ec_multi_scalar_mul|ec_pairing_check|ec_scalar_mul|ec_subgroup_check|ecdsa_pk_decompress|ecdsa_pk_recover|ecdsa_verify|ed25519verify|ed25519verify_bare|falcon_verify|keccak256|sha256|sha3_256|sha512_256|sumhash512|vrf_verify|gitxn|gitxna|gitxnas|itxn|itxn_begin|itxn_field|itxn_next|itxn_submit|itxna|itxnas)\\b" + "match": "^(\\!|\\!\\=|%|\u0026|\u0026\u0026|\\*|\\+|\\-|/|\\\u003c|\\\u003c\\=|\\=\\=|\\\u003e|\\\u003e\\=|\\^|addw|bitlen|btoi|divmodw|divw|exp|expw|itob|mulw|shl|shr|sqrt|\\||\\|\\||\\~|b\\!\\=|b%|b\\*|b\\+|b\\-|b/|b\\\u003c|b\\\u003c\\=|b\\=\\=|b\\\u003e|b\\\u003e\\=|bsqrt|b\u0026|b\\^|b\\||b\\~|base64_decode|concat|extract|extract3|extract_uint16|extract_uint32|extract_uint64|getbit|getbyte|json_ref|len|replace2|replace3|setbit|setbyte|substring|substring3|ec_add|ec_map_to|ec_multi_scalar_mul|ec_pairing_check|ec_scalar_mul|ec_subgroup_check|ecdsa_pk_decompress|ecdsa_pk_recover|ecdsa_verify|ed25519verify|ed25519verify_bare|falcon_verify|keccak256|mimc|sha256|sha3_256|sha512_256|sumhash512|vrf_verify|gitxn|gitxna|gitxnas|itxn|itxn_begin|itxn_field|itxn_next|itxn_submit|itxna|itxnas)\\b" } ] }, @@ -112,7 +112,7 @@ }, { "name": "variable.parameter.teal", - "match": "\\b(unknown|pay|keyreg|acfg|axfer|afrz|appl|NoOp|OptIn|CloseOut|ClearState|UpdateApplication|DeleteApplication|Secp256k1|Secp256r1|Sender|Fee|FirstValid|FirstValidTime|LastValid|Note|Lease|Receiver|Amount|CloseRemainderTo|VotePK|SelectionPK|VoteFirst|VoteLast|VoteKeyDilution|Type|TypeEnum|XferAsset|AssetAmount|AssetSender|AssetReceiver|AssetCloseTo|GroupIndex|TxID|ApplicationID|OnCompletion|NumAppArgs|NumAccounts|ApprovalProgram|ClearStateProgram|RekeyTo|ConfigAsset|ConfigAssetTotal|ConfigAssetDecimals|ConfigAssetDefaultFrozen|ConfigAssetUnitName|ConfigAssetName|ConfigAssetURL|ConfigAssetMetadataHash|ConfigAssetManager|ConfigAssetReserve|ConfigAssetFreeze|ConfigAssetClawback|FreezeAsset|FreezeAssetAccount|FreezeAssetFrozen|NumAssets|NumApplications|GlobalNumUint|GlobalNumByteSlice|LocalNumUint|LocalNumByteSlice|ExtraProgramPages|Nonparticipation|NumLogs|CreatedAssetID|CreatedApplicationID|LastLog|StateProofPK|NumApprovalProgramPages|NumClearStateProgramPages|MinTxnFee|MinBalance|MaxTxnLife|ZeroAddress|GroupSize|LogicSigVersion|Round|LatestTimestamp|CurrentApplicationID|CreatorAddress|CurrentApplicationAddress|GroupID|OpcodeBudget|CallerApplicationID|CallerApplicationAddress|AssetCreateMinBalance|AssetOptInMinBalance|GenesisHash|ApplicationArgs|Accounts|Assets|Applications|Logs|ApprovalProgramPages|ClearStateProgramPages|URLEncoding|StdEncoding|JSONString|JSONUint64|JSONObject|AssetBalance|AssetFrozen|AssetTotal|AssetDecimals|AssetDefaultFrozen|AssetUnitName|AssetName|AssetURL|AssetMetadataHash|AssetManager|AssetReserve|AssetFreeze|AssetClawback|AssetCreator|AppApprovalProgram|AppClearStateProgram|AppGlobalNumUint|AppGlobalNumByteSlice|AppLocalNumUint|AppLocalNumByteSlice|AppExtraProgramPages|AppCreator|AppAddress|AcctBalance|AcctMinBalance|AcctAuthAddr|AcctTotalNumUint|AcctTotalNumByteSlice|AcctTotalExtraAppPages|AcctTotalAppsCreated|AcctTotalAppsOptedIn|AcctTotalAssetsCreated|AcctTotalAssets|AcctTotalBoxes|AcctTotalBoxBytes|VrfAlgorand|BlkSeed|BlkTimestamp|BN254g1|BN254g2|BLS12_381g1|BLS12_381g2)\\b" + "match": "\\b(unknown|pay|keyreg|acfg|axfer|afrz|appl|NoOp|OptIn|CloseOut|ClearState|UpdateApplication|DeleteApplication|Secp256k1|Secp256r1|Sender|Fee|FirstValid|FirstValidTime|LastValid|Note|Lease|Receiver|Amount|CloseRemainderTo|VotePK|SelectionPK|VoteFirst|VoteLast|VoteKeyDilution|Type|TypeEnum|XferAsset|AssetAmount|AssetSender|AssetReceiver|AssetCloseTo|GroupIndex|TxID|ApplicationID|OnCompletion|NumAppArgs|NumAccounts|ApprovalProgram|ClearStateProgram|RekeyTo|ConfigAsset|ConfigAssetTotal|ConfigAssetDecimals|ConfigAssetDefaultFrozen|ConfigAssetUnitName|ConfigAssetName|ConfigAssetURL|ConfigAssetMetadataHash|ConfigAssetManager|ConfigAssetReserve|ConfigAssetFreeze|ConfigAssetClawback|FreezeAsset|FreezeAssetAccount|FreezeAssetFrozen|NumAssets|NumApplications|GlobalNumUint|GlobalNumByteSlice|LocalNumUint|LocalNumByteSlice|ExtraProgramPages|Nonparticipation|NumLogs|CreatedAssetID|CreatedApplicationID|LastLog|StateProofPK|NumApprovalProgramPages|NumClearStateProgramPages|MinTxnFee|MinBalance|MaxTxnLife|ZeroAddress|GroupSize|LogicSigVersion|Round|LatestTimestamp|CurrentApplicationID|CreatorAddress|CurrentApplicationAddress|GroupID|OpcodeBudget|CallerApplicationID|CallerApplicationAddress|AssetCreateMinBalance|AssetOptInMinBalance|GenesisHash|PayoutsEnabled|PayoutsGoOnlineFee|PayoutsPercent|PayoutsMinBalance|PayoutsMaxBalance|ApplicationArgs|Accounts|Assets|Applications|Logs|ApprovalProgramPages|ClearStateProgramPages|URLEncoding|StdEncoding|JSONString|JSONUint64|JSONObject|AssetBalance|AssetFrozen|AssetTotal|AssetDecimals|AssetDefaultFrozen|AssetUnitName|AssetName|AssetURL|AssetMetadataHash|AssetManager|AssetReserve|AssetFreeze|AssetClawback|AssetCreator|AppApprovalProgram|AppClearStateProgram|AppGlobalNumUint|AppGlobalNumByteSlice|AppLocalNumUint|AppLocalNumByteSlice|AppExtraProgramPages|AppCreator|AppAddress|AcctBalance|AcctMinBalance|AcctAuthAddr|AcctTotalNumUint|AcctTotalNumByteSlice|AcctTotalExtraAppPages|AcctTotalAppsCreated|AcctTotalAppsOptedIn|AcctTotalAssetsCreated|AcctTotalAssets|AcctTotalBoxes|AcctTotalBoxBytes|AcctIncentiveEligible|AcctLastProposed|AcctLastHeartbeat|VoterBalance|VoterIncentiveEligible|VrfAlgorand|BlkSeed|BlkTimestamp|BlkProposer|BlkFeesCollected|BlkBonus|BlkBranch|BlkFeeSink|BlkProtocol|BlkTxnCounter|BlkProposerPayout|BN254g1|BN254g2|BLS12_381g1|BLS12_381g2|BN254Mp110|BLS12_381Mp111)\\b" } ] }, From a625d0327613e536f238ee19188e6022879dca3c Mon Sep 17 00:00:00 2001 From: cce <51567+cce@users.noreply.github.com> Date: Thu, 19 Dec 2024 15:52:00 -0500 Subject: [PATCH 08/15] CI: update reviewdog config for deprecated fail_on_error (#6196) --- .github/workflows/reviewdog.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml index f967bbd916..ee70063ce3 100644 --- a/.github/workflows/reviewdog.yml +++ b/.github/workflows/reviewdog.yml @@ -19,7 +19,7 @@ jobs: - name: Make libsodium.a run: sudo mv /usr/bin/go /usr/bin/go.bak && make crypto/libs/linux/amd64/lib/libsodium.a && sudo mv /usr/bin/go.bak /usr/bin/go - name: reviewdog-golangci-lint - uses: reviewdog/action-golangci-lint@v2.6.2 + uses: reviewdog/action-golangci-lint@v2.7.0 with: go_version_file: go.mod golangci_lint_version: ${{ env.GOLANGCI_LINT_VERSION }} @@ -27,7 +27,7 @@ jobs: reporter: "github-pr-check" tool_name: "Lint Errors" level: "error" - fail_on_error: true + fail_level: any filter_mode: "nofilter" # Non-Blocking Warnings Section reviewdog-warnings: @@ -71,7 +71,7 @@ jobs: cd ../../ - name: Install reviewdog run: | - curl -sfL https://raw.githubusercontent.com/reviewdog/reviewdog/v0.20.2/install.sh | sh -s -- v0.20.2 + curl -sfL https://raw.githubusercontent.com/reviewdog/reviewdog/v0.20.3/install.sh | sh -s -- v0.20.3 reviewdog --version - name: Build custom linters run: | @@ -96,7 +96,7 @@ jobs: -name="Lint Warnings" \ -reporter=github-pr-check \ -filter-mode=added \ - -fail-on-error=true \ + -fail-level=any \ -level=warning - name: Slack Notification env: @@ -109,11 +109,11 @@ jobs: steps: - uses: actions/checkout@v4 - name: shellcheck - uses: reviewdog/action-shellcheck@v1 + uses: reviewdog/action-shellcheck@v1.28.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} reporter: "github-pr-check" shellcheck_flags: "-e SC2034,SC2046,SC2053,SC2207,SC2145 -S warning" - fail_on_error: true + fail_level: any path: | test/scripts/e2e_subs From fcad0bbcc035a8d253cac08e4f90c9c813c40668 Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Thu, 19 Dec 2024 16:02:31 -0500 Subject: [PATCH 09/15] Eval: Feature/heartbeats (#6189) Co-authored-by: cce <51567+cce@users.noreply.github.com> Co-authored-by: Gary Malouf <982483+gmalouf@users.noreply.github.com> --- Makefile | 3 + agreement/gossip/networkFull_test.go | 2 +- agreement/selector.go | 8 +- catchup/universalFetcher.go | 2 +- cmd/goal/clerk.go | 3 +- cmd/loadgenerator/main.go | 11 +- cmd/tealdbg/localLedger.go | 4 + config/consensus.go | 11 +- config/consensus_test.go | 5 + crypto/msgp_gen.go | 236 +++++++++++ crypto/msgp_gen_test.go | 60 +++ crypto/onetimesig.go | 50 +++ daemon/algod/api/algod.oas2.json | 2 +- daemon/algod/api/algod.oas3.yml | 2 +- daemon/algod/api/client/restClient.go | 69 +++- daemon/algod/api/server/v2/dryrun.go | 4 + .../nonparticipating/public/routes.go | 190 ++++----- data/basics/userBalance.go | 15 +- data/bookkeeping/block.go | 4 +- data/bookkeeping/block_test.go | 24 +- data/committee/common_test.go | 67 +-- data/committee/credential_test.go | 21 +- data/transactions/heartbeat.go | 49 +++ data/transactions/logic/assembler.go | 10 + data/transactions/logic/crypto_test.go | 18 +- data/transactions/logic/eval_test.go | 18 +- data/transactions/logic/fields.go | 2 +- data/transactions/logic/ledger_test.go | 31 +- data/transactions/msgp_gen.go | 358 ++++++++++++++-- data/transactions/msgp_gen_test.go | 60 +++ data/transactions/stateproof.go | 8 - data/transactions/transaction.go | 57 ++- data/transactions/transaction_test.go | 150 ++++++- data/transactions/verify/txn.go | 19 +- data/transactions/verify/txn_test.go | 102 ++++- .../verify/verifiedTxnCache_test.go | 2 +- data/txntest/txn.go | 19 + heartbeat/README.md | 180 ++++++++ heartbeat/abstractions.go | 56 +++ heartbeat/service.go | 196 +++++++++ heartbeat/service_test.go | 300 ++++++++++++++ ledger/acctonline.go | 5 - ledger/apply/apply.go | 7 +- ledger/apply/challenge.go | 114 ++++++ ledger/apply/challenge_test.go | 121 ++++++ ledger/apply/heartbeat.go | 102 +++++ ledger/apply/heartbeat_test.go | 208 ++++++++++ ledger/apply/keyreg.go | 4 +- ledger/apply/mockBalances_test.go | 32 ++ ledger/apptxn_test.go | 6 +- ledger/eval/eval.go | 285 ++++++++----- ledger/eval/eval_test.go | 315 ++++++-------- ledger/eval/prefetcher/prefetcher.go | 4 +- .../prefetcher/prefetcher_alignment_test.go | 66 +++ ledger/eval_simple_test.go | 385 +++++++++++------- ledger/ledger.go | 49 ++- ledger/ledger_perf_test.go | 11 +- ledger/ledger_test.go | 29 ++ ledger/ledgercore/accountdata.go | 9 +- ledger/ledgercore/onlineacct.go | 2 +- ledger/ledgercore/votersForRound.go | 8 + ledger/onlineaccountscache_test.go | 9 + ledger/simple_test.go | 7 +- ledger/store/trackerdb/data.go | 11 +- ledger/store/trackerdb/data_test.go | 4 +- ledger/store/trackerdb/msgp_gen.go | 66 ++- ledger/tracker.go | 7 +- ledger/voters.go | 25 +- ledger/voters_test.go | 82 ++++ libgoal/libgoal.go | 51 +-- network/connPerfMon_test.go | 4 +- node/node.go | 9 + protocol/txntype.go | 3 + stateproof/builder.go | 2 +- stateproof/worker.go | 4 +- .../features/accountPerf/sixMillion_test.go | 5 +- .../features/catchup/basicCatchup_test.go | 19 +- .../catchup/catchpointCatchup_test.go | 18 +- .../catchup/stateproofsCatchup_test.go | 12 +- .../features/followernode/syncDeltas_test.go | 6 +- .../features/followernode/syncRestart_test.go | 2 +- .../features/incentives/challenge_test.go | 222 ++++++++++ .../features/incentives/payouts_test.go | 17 +- .../features/incentives/suspension_test.go | 72 +--- .../features/incentives/whalejoin_test.go | 324 +++++++++++++++ .../onlineOfflineParticipation_test.go | 4 +- .../partitionRecovery_test.go | 9 +- .../features/stateproofs/stateproofs_test.go | 1 + test/e2e-go/restAPI/other/misc_test.go | 2 +- .../restAPI/simulate/simulateRestAPI_test.go | 6 +- .../upgrades/application_support_test.go | 4 +- test/e2e-go/upgrades/rekey_support_test.go | 2 +- test/framework/fixtures/libgoalFixture.go | 126 ++---- test/framework/fixtures/restClientFixture.go | 92 ++--- test/testdata/nettemplates/Challenges.json | 60 +++ .../x-repo-types/typeAnalyzer/typeAnalyzer.go | 9 +- util/db/dbutil.go | 2 +- util/execpool/stream.go | 2 +- util/set.go | 36 ++ util/set_test.go | 75 ++++ 100 files changed, 4519 insertions(+), 1082 deletions(-) create mode 100644 data/transactions/heartbeat.go create mode 100644 heartbeat/README.md create mode 100644 heartbeat/abstractions.go create mode 100644 heartbeat/service.go create mode 100644 heartbeat/service_test.go create mode 100644 ledger/apply/challenge.go create mode 100644 ledger/apply/challenge_test.go create mode 100644 ledger/apply/heartbeat.go create mode 100644 ledger/apply/heartbeat_test.go create mode 100644 test/e2e-go/features/incentives/challenge_test.go create mode 100644 test/e2e-go/features/incentives/whalejoin_test.go create mode 100644 test/testdata/nettemplates/Challenges.json create mode 100644 util/set_test.go diff --git a/Makefile b/Makefile index b6823e665a..60606cf4c6 100644 --- a/Makefile +++ b/Makefile @@ -293,6 +293,9 @@ $(GOPATH1)/bin/%: test: build $(GOTESTCOMMAND) $(GOTAGS) -race $(UNIT_TEST_SOURCES) -timeout 1h -coverprofile=coverage.txt -covermode=atomic +testc: + echo $(UNIT_TEST_SOURCES) | xargs -P8 -n1 go test -c + benchcheck: build $(GOTESTCOMMAND) $(GOTAGS) -race $(UNIT_TEST_SOURCES) -run ^NOTHING -bench Benchmark -benchtime 1x -timeout 1h diff --git a/agreement/gossip/networkFull_test.go b/agreement/gossip/networkFull_test.go index d2971a17c5..fa0133d1a7 100644 --- a/agreement/gossip/networkFull_test.go +++ b/agreement/gossip/networkFull_test.go @@ -103,7 +103,7 @@ func spinNetwork(t *testing.T, nodesCount int, cfg config.Local) ([]*networkImpl break } } - log.Infof("network established, %d nodes connected in %s", nodesCount, time.Now().Sub(start).String()) + log.Infof("network established, %d nodes connected in %s", nodesCount, time.Since(start).String()) return networkImpls, msgCounters } diff --git a/agreement/selector.go b/agreement/selector.go index 2d0f980ac3..1496027bd6 100644 --- a/agreement/selector.go +++ b/agreement/selector.go @@ -51,7 +51,13 @@ func (sel selector) CommitteeSize(proto config.ConsensusParams) uint64 { // looking at online stake (and status and key material). It is exported so that // AVM can provide opcodes that return the same data. func BalanceRound(r basics.Round, cparams config.ConsensusParams) basics.Round { - return r.SubSaturate(basics.Round(2 * cparams.SeedRefreshInterval * cparams.SeedLookback)) + return r.SubSaturate(BalanceLookback(cparams)) +} + +// BalanceLookback is how far back agreement looks when considering balances for +// voting stake. +func BalanceLookback(cparams config.ConsensusParams) basics.Round { + return basics.Round(2 * cparams.SeedRefreshInterval * cparams.SeedLookback) } func seedRound(r basics.Round, cparams config.ConsensusParams) basics.Round { diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go index c7a8a9a4cf..fd99bcc612 100644 --- a/catchup/universalFetcher.go +++ b/catchup/universalFetcher.go @@ -88,7 +88,7 @@ func (uf *universalBlockFetcher) fetchBlock(ctx context.Context, round basics.Ro } else { return nil, nil, time.Duration(0), fmt.Errorf("fetchBlock: UniversalFetcher only supports HTTPPeer and UnicastPeer") } - downloadDuration = time.Now().Sub(blockDownloadStartTime) + downloadDuration = time.Since(blockDownloadStartTime) block, cert, err := processBlockBytes(fetchedBuf, round, address) if err != nil { return nil, nil, time.Duration(0), err diff --git a/cmd/goal/clerk.go b/cmd/goal/clerk.go index c1b534a722..9387918881 100644 --- a/cmd/goal/clerk.go +++ b/cmd/goal/clerk.go @@ -221,8 +221,7 @@ func waitForCommit(client libgoal.Client, txid string, transactionLastValidRound } reportInfof(infoTxPending, txid, stat.LastRound) - // WaitForRound waits until round "stat.LastRound+1" is committed - stat, err = client.WaitForRound(stat.LastRound) + stat, err = client.WaitForRound(stat.LastRound + 1) if err != nil { return model.PendingTransactionResponse{}, fmt.Errorf(errorRequestFail, err) } diff --git a/cmd/loadgenerator/main.go b/cmd/loadgenerator/main.go index 2474081a0e..00b3a96727 100644 --- a/cmd/loadgenerator/main.go +++ b/cmd/loadgenerator/main.go @@ -200,22 +200,23 @@ func waitForRound(restClient client.RestClient, cfg config, spendingRound bool) time.Sleep(1 * time.Second) continue } - if isSpendRound(cfg, nodeStatus.LastRound) == spendingRound { + lastRound := nodeStatus.LastRound + if isSpendRound(cfg, lastRound) == spendingRound { // time to send transactions. return } if spendingRound { - fmt.Printf("Last round %d, waiting for spending round %d\n", nodeStatus.LastRound, nextSpendRound(cfg, nodeStatus.LastRound)) + fmt.Printf("Last round %d, waiting for spending round %d\n", lastRound, nextSpendRound(cfg, nodeStatus.LastRound)) } for { // wait for the next round. - nodeStatus, err = restClient.WaitForBlock(basics.Round(nodeStatus.LastRound)) + err = restClient.WaitForRoundWithTimeout(lastRound + 1) if err != nil { fmt.Fprintf(os.Stderr, "unable to wait for next round node status : %v", err) - time.Sleep(1 * time.Second) break } - if isSpendRound(cfg, nodeStatus.LastRound) == spendingRound { + lastRound++ + if isSpendRound(cfg, lastRound) == spendingRound { // time to send transactions. return } diff --git a/cmd/tealdbg/localLedger.go b/cmd/tealdbg/localLedger.go index d495fbb328..16f28fd904 100644 --- a/cmd/tealdbg/localLedger.go +++ b/cmd/tealdbg/localLedger.go @@ -359,6 +359,10 @@ func (l *localLedger) LookupAgreement(rnd basics.Round, addr basics.Address) (ba }, nil } +func (l *localLedger) GetKnockOfflineCandidates(basics.Round, config.ConsensusParams) (map[basics.Address]basics.OnlineAccountData, error) { + return nil, nil +} + func (l *localLedger) OnlineCirculation(rnd basics.Round, voteRound basics.Round) (basics.MicroAlgos, error) { // A constant is fine for tealdbg return basics.Algos(1_000_000_000), nil // 1B diff --git a/config/consensus.go b/config/consensus.go index 7e111ecc89..b153848230 100644 --- a/config/consensus.go +++ b/config/consensus.go @@ -544,6 +544,9 @@ type ConsensusParams struct { // occur, extra funds need to be put into the FeeSink. The bonus amount // decays exponentially. Bonus BonusPlan + + // Heartbeat support + Heartbeat bool } // ProposerPayoutRules puts several related consensus parameters in one place. The same @@ -603,7 +606,7 @@ type ProposerPayoutRules struct { // // BaseAmount: 0, DecayInterval: XXX // -// by using a zero baseAmount, the amount not affected. +// by using a zero baseAmount, the amount is not affected. // For a bigger change, we'd use a plan like: // // BaseRound: , BaseAmount: , DecayInterval: @@ -1519,7 +1522,7 @@ func initConsensusProtocols() { vFuture.EnableLogicSigSizePooling = true vFuture.Payouts.Enabled = true - vFuture.Payouts.Percent = 75 + vFuture.Payouts.Percent = 50 vFuture.Payouts.GoOnlineFee = 2_000_000 // 2 algos vFuture.Payouts.MinBalance = 30_000_000_000 // 30,000 algos vFuture.Payouts.MaxBalance = 70_000_000_000_000 // 70M algos @@ -1530,7 +1533,9 @@ func initConsensusProtocols() { vFuture.Bonus.BaseAmount = 10_000_000 // 10 Algos // 2.9 sec rounds gives about 10.8M rounds per year. - vFuture.Bonus.DecayInterval = 250_000 // .99^(10.8/0.25) ~ .648. So 35% decay per year + vFuture.Bonus.DecayInterval = 1_000_000 // .99^(10.8M/1M) ~ .897. So ~10% decay per year + + vFuture.Heartbeat = true Consensus[protocol.ConsensusFuture] = vFuture diff --git a/config/consensus_test.go b/config/consensus_test.go index c0d079cdf0..6bc8d45c45 100644 --- a/config/consensus_test.go +++ b/config/consensus_test.go @@ -37,6 +37,11 @@ func TestConsensusParams(t *testing.T) { if params.ApplyData && params.PaysetCommit == PaysetCommitUnsupported { t.Errorf("Protocol %s: ApplyData with PaysetCommitUnsupported", proto) } + + // To figure out challenges, nodes must be able to lookup headers up to two GracePeriods back + if 2*params.Payouts.ChallengeGracePeriod > params.MaxTxnLife+params.DeeperBlockHeaderHistory { + t.Errorf("Protocol %s: Grace period is too long", proto) + } } } diff --git a/crypto/msgp_gen.go b/crypto/msgp_gen.go index ab5bdceb88..fc279029a0 100644 --- a/crypto/msgp_gen.go +++ b/crypto/msgp_gen.go @@ -111,6 +111,16 @@ import ( // |-----> MsgIsZero // |-----> HashTypeMaxSize() // +// HeartbeatProof +// |-----> (*) MarshalMsg +// |-----> (*) CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) UnmarshalMsgWithState +// |-----> (*) CanUnmarshalMsg +// |-----> (*) Msgsize +// |-----> (*) MsgIsZero +// |-----> HeartbeatProofMaxSize() +// // MasterDerivationKey // |-----> (*) MarshalMsg // |-----> (*) CanMarshalMsg @@ -1169,6 +1179,232 @@ func HashTypeMaxSize() (s int) { return } +// MarshalMsg implements msgp.Marshaler +func (z *HeartbeatProof) MarshalMsg(b []byte) (o []byte) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0006Len := uint32(5) + var zb0006Mask uint8 /* 6 bits */ + if (*z).PK == (ed25519PublicKey{}) { + zb0006Len-- + zb0006Mask |= 0x2 + } + if (*z).PK1Sig == (ed25519Signature{}) { + zb0006Len-- + zb0006Mask |= 0x4 + } + if (*z).PK2 == (ed25519PublicKey{}) { + zb0006Len-- + zb0006Mask |= 0x8 + } + if (*z).PK2Sig == (ed25519Signature{}) { + zb0006Len-- + zb0006Mask |= 0x10 + } + if (*z).Sig == (ed25519Signature{}) { + zb0006Len-- + zb0006Mask |= 0x20 + } + // variable map header, size zb0006Len + o = append(o, 0x80|uint8(zb0006Len)) + if zb0006Len != 0 { + if (zb0006Mask & 0x2) == 0 { // if not empty + // string "p" + o = append(o, 0xa1, 0x70) + o = msgp.AppendBytes(o, ((*z).PK)[:]) + } + if (zb0006Mask & 0x4) == 0 { // if not empty + // string "p1s" + o = append(o, 0xa3, 0x70, 0x31, 0x73) + o = msgp.AppendBytes(o, ((*z).PK1Sig)[:]) + } + if (zb0006Mask & 0x8) == 0 { // if not empty + // string "p2" + o = append(o, 0xa2, 0x70, 0x32) + o = msgp.AppendBytes(o, ((*z).PK2)[:]) + } + if (zb0006Mask & 0x10) == 0 { // if not empty + // string "p2s" + o = append(o, 0xa3, 0x70, 0x32, 0x73) + o = msgp.AppendBytes(o, ((*z).PK2Sig)[:]) + } + if (zb0006Mask & 0x20) == 0 { // if not empty + // string "s" + o = append(o, 0xa1, 0x73) + o = msgp.AppendBytes(o, ((*z).Sig)[:]) + } + } + return +} + +func (_ *HeartbeatProof) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(*HeartbeatProof) + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *HeartbeatProof) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o []byte, err error) { + if st.AllowableDepth == 0 { + err = msgp.ErrMaxDepthExceeded{} + return + } + st.AllowableDepth-- + var field []byte + _ = field + var zb0006 int + var zb0007 bool + zb0006, zb0007, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0006 > 0 { + zb0006-- + bts, err = msgp.ReadExactBytes(bts, ((*z).Sig)[:]) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Sig") + return + } + } + if zb0006 > 0 { + zb0006-- + bts, err = msgp.ReadExactBytes(bts, ((*z).PK)[:]) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "PK") + return + } + } + if zb0006 > 0 { + zb0006-- + bts, err = msgp.ReadExactBytes(bts, ((*z).PK2)[:]) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "PK2") + return + } + } + if zb0006 > 0 { + zb0006-- + bts, err = msgp.ReadExactBytes(bts, ((*z).PK1Sig)[:]) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "PK1Sig") + return + } + } + if zb0006 > 0 { + zb0006-- + bts, err = msgp.ReadExactBytes(bts, ((*z).PK2Sig)[:]) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "PK2Sig") + return + } + } + if zb0006 > 0 { + err = msgp.ErrTooManyArrayFields(zb0006) + if err != nil { + err = msgp.WrapError(err, "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0007 { + (*z) = HeartbeatProof{} + } + for zb0006 > 0 { + zb0006-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch string(field) { + case "s": + bts, err = msgp.ReadExactBytes(bts, ((*z).Sig)[:]) + if err != nil { + err = msgp.WrapError(err, "Sig") + return + } + case "p": + bts, err = msgp.ReadExactBytes(bts, ((*z).PK)[:]) + if err != nil { + err = msgp.WrapError(err, "PK") + return + } + case "p2": + bts, err = msgp.ReadExactBytes(bts, ((*z).PK2)[:]) + if err != nil { + err = msgp.WrapError(err, "PK2") + return + } + case "p1s": + bts, err = msgp.ReadExactBytes(bts, ((*z).PK1Sig)[:]) + if err != nil { + err = msgp.WrapError(err, "PK1Sig") + return + } + case "p2s": + bts, err = msgp.ReadExactBytes(bts, ((*z).PK2Sig)[:]) + if err != nil { + err = msgp.WrapError(err, "PK2Sig") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + } + o = bts + return +} + +func (z *HeartbeatProof) UnmarshalMsg(bts []byte) (o []byte, err error) { + return z.UnmarshalMsgWithState(bts, msgp.DefaultUnmarshalState) +} +func (_ *HeartbeatProof) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*HeartbeatProof) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *HeartbeatProof) Msgsize() (s int) { + s = 1 + 2 + msgp.ArrayHeaderSize + (64 * (msgp.ByteSize)) + 2 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 3 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 4 + msgp.ArrayHeaderSize + (64 * (msgp.ByteSize)) + 4 + msgp.ArrayHeaderSize + (64 * (msgp.ByteSize)) + return +} + +// MsgIsZero returns whether this is a zero value +func (z *HeartbeatProof) MsgIsZero() bool { + return ((*z).Sig == (ed25519Signature{})) && ((*z).PK == (ed25519PublicKey{})) && ((*z).PK2 == (ed25519PublicKey{})) && ((*z).PK1Sig == (ed25519Signature{})) && ((*z).PK2Sig == (ed25519Signature{})) +} + +// MaxSize returns a maximum valid message size for this message type +func HeartbeatProofMaxSize() (s int) { + s = 1 + 2 + // Calculating size of array: z.Sig + s += msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize)) + s += 2 + // Calculating size of array: z.PK + s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize)) + s += 3 + // Calculating size of array: z.PK2 + s += msgp.ArrayHeaderSize + ((32) * (msgp.ByteSize)) + s += 4 + // Calculating size of array: z.PK1Sig + s += msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize)) + s += 4 + // Calculating size of array: z.PK2Sig + s += msgp.ArrayHeaderSize + ((64) * (msgp.ByteSize)) + return +} + // MarshalMsg implements msgp.Marshaler func (z *MasterDerivationKey) MarshalMsg(b []byte) (o []byte) { o = msgp.Require(b, z.Msgsize()) diff --git a/crypto/msgp_gen_test.go b/crypto/msgp_gen_test.go index b3fb95150b..0105a58f1d 100644 --- a/crypto/msgp_gen_test.go +++ b/crypto/msgp_gen_test.go @@ -434,6 +434,66 @@ func BenchmarkUnmarshalHashFactory(b *testing.B) { } } +func TestMarshalUnmarshalHeartbeatProof(t *testing.T) { + partitiontest.PartitionTest(t) + v := HeartbeatProof{} + bts := v.MarshalMsg(nil) + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func TestRandomizedEncodingHeartbeatProof(t *testing.T) { + protocol.RunEncodingTest(t, &HeartbeatProof{}) +} + +func BenchmarkMarshalMsgHeartbeatProof(b *testing.B) { + v := HeartbeatProof{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgHeartbeatProof(b *testing.B) { + v := HeartbeatProof{} + bts := make([]byte, 0, v.Msgsize()) + bts = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalHeartbeatProof(b *testing.B) { + v := HeartbeatProof{} + bts := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + func TestMarshalUnmarshalMasterDerivationKey(t *testing.T) { partitiontest.PartitionTest(t) v := MasterDerivationKey{} diff --git a/crypto/onetimesig.go b/crypto/onetimesig.go index d05ccaa961..aba2385f0f 100644 --- a/crypto/onetimesig.go +++ b/crypto/onetimesig.go @@ -57,6 +57,56 @@ type OneTimeSignature struct { PK2Sig ed25519Signature `codec:"p2s"` } +// A HeartbeatProof is functionally equivalent to a OneTimeSignature, but it has +// been cleaned up for use as a transaction field in heartbeat transactions. +type HeartbeatProof struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + // Sig is a signature of msg under the key PK. + Sig ed25519Signature `codec:"s"` + PK ed25519PublicKey `codec:"p"` + + // PK2 is used to verify a two-level ephemeral signature. + PK2 ed25519PublicKey `codec:"p2"` + // PK1Sig is a signature of OneTimeSignatureSubkeyOffsetID(PK, Batch, Offset) under the key PK2. + PK1Sig ed25519Signature `codec:"p1s"` + // PK2Sig is a signature of OneTimeSignatureSubkeyBatchID(PK2, Batch) under the master key (OneTimeSignatureVerifier). + PK2Sig ed25519Signature `codec:"p2s"` +} + +// ToOneTimeSignature converts a HeartbeatProof to a OneTimeSignature. +func (hbp HeartbeatProof) ToOneTimeSignature() OneTimeSignature { + return OneTimeSignature{ + Sig: hbp.Sig, + PK: hbp.PK, + PK2: hbp.PK2, + PK1Sig: hbp.PK1Sig, + PK2Sig: hbp.PK2Sig, + } +} + +// ToHeartbeatProof converts a OneTimeSignature to a HeartbeatProof. +func (ots OneTimeSignature) ToHeartbeatProof() HeartbeatProof { + return HeartbeatProof{ + Sig: ots.Sig, + PK: ots.PK, + PK2: ots.PK2, + PK1Sig: ots.PK1Sig, + PK2Sig: ots.PK2Sig, + } +} + +// BatchPrep enqueues the necessary checks into the batch. The caller must call +// batchVerifier.verify() to verify it. +func (hbp HeartbeatProof) BatchPrep(voteID OneTimeSignatureVerifier, id OneTimeSignatureIdentifier, msg Hashable, batchVerifier BatchVerifier) { + offsetID := OneTimeSignatureSubkeyOffsetID{SubKeyPK: hbp.PK, Batch: id.Batch, Offset: id.Offset} + batchID := OneTimeSignatureSubkeyBatchID{SubKeyPK: hbp.PK2, Batch: id.Batch} + batchVerifier.EnqueueSignature(PublicKey(voteID), batchID, Signature(hbp.PK2Sig)) + batchVerifier.EnqueueSignature(PublicKey(batchID.SubKeyPK), offsetID, Signature(hbp.PK1Sig)) + batchVerifier.EnqueueSignature(PublicKey(offsetID.SubKeyPK), msg, Signature(hbp.Sig)) + +} + // A OneTimeSignatureSubkeyBatchID identifies an ephemeralSubkey of a batch // for the purposes of signing it with the top-level master key. type OneTimeSignatureSubkeyBatchID struct { diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json index e4990fe779..51cd801803 100644 --- a/daemon/algod/api/algod.oas2.json +++ b/daemon/algod/api/algod.oas2.json @@ -245,7 +245,7 @@ }, "/v2/accounts/{address}": { "get": { - "description": "Given a specific account public key, this call returns the accounts status, balance and spendable amounts", + "description": "Given a specific account public key, this call returns the account's status, balance and spendable amounts", "tags": [ "public", "nonparticipating" diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml index c2c39a5372..34832fcda3 100644 --- a/daemon/algod/api/algod.oas3.yml +++ b/daemon/algod/api/algod.oas3.yml @@ -2999,7 +2999,7 @@ }, "/v2/accounts/{address}": { "get": { - "description": "Given a specific account public key, this call returns the accounts status, balance and spendable amounts", + "description": "Given a specific account public key, this call returns the account's status, balance and spendable amounts", "operationId": "AccountInformation", "parameters": [ { diff --git a/daemon/algod/api/client/restClient.go b/daemon/algod/api/client/restClient.go index c349d3ecbf..6a44a32eaa 100644 --- a/daemon/algod/api/client/restClient.go +++ b/daemon/algod/api/client/restClient.go @@ -26,6 +26,7 @@ import ( "net/http" "net/url" "strings" + "time" "github.com/google/go-querystring/query" @@ -39,6 +40,8 @@ import ( "github.com/algorand/go-algorand/ledger/eval" "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/rpcs" + "github.com/algorand/go-algorand/test/e2e-go/globals" ) const ( @@ -283,12 +286,56 @@ func (client RestClient) Status() (response model.NodeStatusResponse, err error) return } -// WaitForBlock returns the node status after waiting for the given round. -func (client RestClient) WaitForBlock(round basics.Round) (response model.NodeStatusResponse, err error) { +// WaitForBlockAfter returns the node status after trying to wait for the given +// round+1. This REST API has the documented misfeatures of returning after 1 +// minute, regardless of whether the given block has been reached. +func (client RestClient) WaitForBlockAfter(round basics.Round) (response model.NodeStatusResponse, err error) { err = client.get(&response, fmt.Sprintf("/v2/status/wait-for-block-after/%d/", round), nil) return } +// WaitForRound returns the node status after waiting for the given round. It +// waits no more than waitTime in TOTAL, and returns an error if the round has +// not been reached. +func (client RestClient) WaitForRound(round uint64, waitTime time.Duration) (status model.NodeStatusResponse, err error) { + timeout := time.After(waitTime) + for { + status, err = client.Status() + if err != nil { + return + } + + if status.LastRound >= round { + return + } + select { + case <-timeout: + return model.NodeStatusResponse{}, fmt.Errorf("timeout waiting for round %v with last round = %v", round, status.LastRound) + case <-time.After(200 * time.Millisecond): + } + } +} + +const singleRoundMaxTime = globals.MaxTimePerRound * 40 + +// WaitForRoundWithTimeout waits for a given round to be reached. As it +// waits, it returns early with an error if the wait time for any round exceeds +// singleRoundMaxTime so we can alert when we're getting "hung" waiting. +func (client RestClient) WaitForRoundWithTimeout(roundToWaitFor uint64) error { + status, err := client.Status() + if err != nil { + return err + } + + for lastRound := status.LastRound; lastRound < roundToWaitFor; lastRound = status.LastRound { + status, err = client.WaitForRound(lastRound+1, singleRoundMaxTime) + if err != nil { + return fmt.Errorf("client.WaitForRound took too long between round %d and %d", lastRound, lastRound+1) + } + } + return nil +} + // HealthCheck does a health check on the potentially running node, // returning an error if the API is down func (client RestClient) HealthCheck() error { @@ -301,14 +348,6 @@ func (client RestClient) ReadyCheck() error { return client.get(nil, "/ready", nil) } -// StatusAfterBlock waits for a block to occur then returns the StatusResponse after that block -// blocks on the node end -// Not supported -func (client RestClient) StatusAfterBlock(blockNum uint64) (response model.NodeStatusResponse, err error) { - err = client.get(&response, fmt.Sprintf("/v2/status/wait-for-block-after/%d", blockNum), nil) - return -} - type pendingTransactionsParams struct { Max uint64 `url:"max"` Format string `url:"format"` @@ -557,6 +596,16 @@ func (client RestClient) RawBlock(round uint64) (response []byte, err error) { return } +// EncodedBlockCert takes a round and returns its parsed block and certificate +func (client RestClient) EncodedBlockCert(round uint64) (blockCert rpcs.EncodedBlockCert, err error) { + resp, err := client.RawBlock(round) + if err != nil { + return + } + err = protocol.Decode(resp, &blockCert) + return +} + // Shutdown requests the node to shut itself down func (client RestClient) Shutdown() (err error) { response := 1 diff --git a/daemon/algod/api/server/v2/dryrun.go b/daemon/algod/api/server/v2/dryrun.go index d3924eaf1d..25b3365f4a 100644 --- a/daemon/algod/api/server/v2/dryrun.go +++ b/daemon/algod/api/server/v2/dryrun.go @@ -329,6 +329,10 @@ func (dl *dryrunLedger) LookupAgreement(rnd basics.Round, addr basics.Address) ( }, nil } +func (dl *dryrunLedger) GetKnockOfflineCandidates(basics.Round, config.ConsensusParams) (map[basics.Address]basics.OnlineAccountData, error) { + return nil, nil +} + func (dl *dryrunLedger) OnlineCirculation(rnd basics.Round, voteRnd basics.Round) (basics.MicroAlgos, error) { // dryrun doesn't support setting the global online stake, so we'll just return a constant return basics.Algos(1_000_000_000), nil // 1B diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go index 600972e9bb..6f44c1afad 100644 --- a/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go +++ b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go @@ -985,101 +985,101 @@ var swaggerSpec = []string{ "1jihBfsJzN7gXXkmsK+lQWqKJxGWlOWjJ/6n/9efsEkqlvXw/teR6880WmhdqCdHRxcXF5Pwk6M5pv4n", "WpTp4sjPg90GG/rKm5MqRt/G4eCO1tZj3FRHCsf47O3z0zNy/OZkUhPM6MnoweTB5KFrbc1pwUZPRl/h", "T3h6FrjvR1hf80i50vlHVa7Wx3HnWVHYwvrmkaNR99cCaI4FdswfS9CSpf6RBJpt3P/VBZ3PQU4we8P+", - "tHp05LWRow+ucsJHA1jUbWjrrAfFtX0gYlFOc5b6GmVMWfuxDbBXYXNZa1gv1ZhMbfthH8PLM4xQssUI", - "VNiC+yQzeLafn9S8zndQRrfy6MlvkWpWPvHDN/YNY86CaLT/c/rzayIkcbeiNzQ9r5JefJZTndkVJjmZ", - "Lyee7P9dgtzUZOkY5nikqu7gwMul4T0ue2ap5kWzsGutjMWMRR1c+5kNNQXnoapzUvM7tAwGkNTc23Dk", - "B8m37z98/Y+PowGAYNEdBdjn8Q+a539Y6xqsMbC2FXgz7guJGtd1M/CDeifHaMiqngaf1+8066H/wQWH", - "P/q2wQEW3Qea5+ZFwSG2B++xEyESCx7VRw8eeP7ktP8AuiN3poJZBrUAsM6FahRPEpcYqMvH7KO3VWlM", - "SQt7Fo99+PCmqNw79qWJYVePD7jQZgHPKy+3PVxn0d/TjEiXvoxLefjFLuWE21BQI4+s3Pw4Hn39Be/N", - "CTc8h+YE3wza/HYFzS/8nIsL7t80OlO5XFK5QY1IV7yw3ZeGzhX6VJFF2rMdVF/j89H7j71S7yiMeTz6", - "0CidlF1JJlonS6Or0w4xeUf1cU4cyyaluR/uHhcFhnyeVs+Pi8J2DccwAmAo/WDNlFb3JuSH8OuGb8RC", - "Yl0jjZwA30Tbt+ZuuMqDdpxRod0oSnArvz+t/D5u2khYBlyzGUN9PQZM4xRshakTrHRVAdrNEQpKJO0b", - "D12Vx3aqReJarw0cwzXhP1xfwQGVUexM72M3yJ2M+hZ3PbjrU5MCeCuNqW5qeDOs2VfarSRJQ2RcI+P+", - "wpW+VzQ3dBIst9XR5uTZrTL4t1IGq4qcc6udFcUB1EOfuLHrlaMPrsrkIbRGvB4P0hfDm3fwbRB7f7fF", - "ce5NyHH7ncuxFVelc6cmaN671QE/Bx3Qljndpf05Ov6kel+Y9rVPFlZDYTG/D/r4C1f0/sbI6tXsDKS7", - "dbpLsM+OvuaY9bWx1b+knuaQdquh/a01tKp29pV0tDD09chVIQg0tisZ+NoGPKYrTaxZPz3gbFhuBPPx", - "7REe12H+hsXY+GUXuazG/vKIjlp7r7SbNe5cLbsq1g8Q3mG/35w826VdfUGmoMFtkCNSIL43181Lo56J", - "tzfjmRjGmx4/eHxzEIS78Fpo8gKl+DVzyGtlaXGy2peFbeNIR1Ox3sWVeIstVQXqzKFt8KiqDuk4eG7e", - "tvEfdzHjt9k4696EfO9erauAuIz2uTCMymeKUTm3HxleZ5BB7vg/n+D4dybkBeY/ajXGMDZMrMAXGddP", - "Hj766rF7RdILGyXWfm/6zeMnx999514rJOMaQwbsPafzutLyyQLyXLgPnIzojmsePPnP//rvyWRyZydb", - "FevvN69tp93PhbeOYxUPKwLo260vfJNit3XXAXkn6m7Ew/+9WEelgFjfSqFPJoUM9v8S0mfaJCN3Ea2M", - "nY1ePAeURvaY7COPxk7+YBJHJUwm5LVwbdHKnEpbHwZL6CoyL6mkXANkE0+pmIGnbCG7NGdYOkASBXIF", - "MlGsKlVdSqiKmBQSVhh9Xxd5bUCwm9FjjO5ny+Rf0XWQNj+txLQWbslo9lzSNcE+H5oo0GNbQW1NvvuO", - "PBjXt5c8NwMkFWJizHVJ16MbtPpVxDa0LNAzhx0hd4f+4thDLEi19lPVl6yvGn93zv3Fau6W3N3GHohz", - "7u34qR07oR3BNR/bakGwip3GasiqLIp8U9fBNVqeV6HiLM7MMNQ48Bn7CHaapqOX0DZ6bw/xrRHgSqyk", - "TVB7sg3MZ1VHH/BeHvKMzrnFfLy/l7s08B1JsfTOI0FmoNOFSwVuoT7CnqRLR+znTUvG2dJA+WB87VoN", - "7mK3/nHY+zmjNgF/SHuxIEsTHXggI0T8M/6H5lhTj81saXff8MNXM0TXlKuOXTVctZdv24LZhfz7jOGC", - "NhrI7obyaT15VyFDtBzC/3mL4P0Q3GGOz121A3u83CL+CkkB/iqZkNeiTki3N6i/pOvxOiX7dS/oteBg", - "fexG87W0eOtOrdQOwzgsUnwlEnt/qdtrXVYFOfIVfLbqIT+al3boIkOkN1YD+hJF+I/ROkcNKWPWNtlZ", - "ZqEebQhzNi/afghhIZTJp7zFfBJ++hlebT4Fx7oZFoOH1PMZpxbwAzMdq2DtZDs+r/zqjMee0WtnPeO/", - "1A3tOhhptfPXobBHma19drDLxue0gi6jvkkxcavF32rxt1r8pUSs5RLXK2Sxgp6d6ajw5Q775O1L83LA", - "iWxRwcGSV4sq1hsipfvIFHLB5+rz1Pe30UccLxE6sYUibe+yzvonf0MF+alrLKZdbQ9XrlExngJRYgko", - "JI3m47o+WAj/cXMQaraEjIgSa04GNSQ+sQr/9YOvbm76U5ArlgI5g2UhJJUs35BfeNVA7Cr8ThHq9jx0", - "uUaYA+MY0tEs65mGNQivwATFfEsIi3MO14WJlb1DiFKDtCVpW30iWYdJx5yuyDBemqkPcHfJxfxLs5l4", - "rA/tpPCU5jmia1ckBw48KBUoz+1+wpJpXfdlCqUreU7TRbW34/qCVnXP9Q08xq2Szziya6Vq6+koMPus", - "gQSrCVwCIGEmsC0iSMCWSFMgyzLXrMib31TtpbHdXiTW19Jm2Knn5JlfnY2AErN66Db9+nYfbvCJmds9", - "wpm5sIujEpB3Vw6MVgfHSQNo23jS5zgF7QJd00NXTZjJVnnnOkC1KIDK+mNL+XcLCYkbQtIVSEXxsLYW", - "de/WHvZ52MPWrp/AZ2INiwYCXZXXX14UNVKVPug1yz7u1suDkvx7quSMByp5yC7sWbu8Lr7b6HXWmvHk", - "WZgNKqqilV5B6AHFoGjPhOj/NRoYZ4CV0MTMGTtLbgH1daSdxupSNcVsXCVDmBuumD0h7/h9ohbUtzlw", - "fz76+pseO5yZx5V/7Vri6oHMYzvMkICJW+NipXFU+H1y07u93yaORyxbR3q58AzWQfuw6uiE8vCOIgXd", - "+LTJTjnjIt7SoLqYhsMuwYgptWDFzZfNV5pN431DvLvrFDstnq35Cf++8nra2u5Gayg+Rbn08UhLgAwK", - "vdjZRQHfqncTXD8FplznO1vrfkzYBCa2FHzdoTSbgxNMlORAZ1WrUSGGJMsHfMYQmqeKAOvhQoZo0lH6", - "QZ0XifLmnZF1UrkVdB55baX4kyph+lMpYUlLC2ui5dPpZNgzaRyENxdSaJGK3OYqlEUhpK5Ot5oMsjxA", - "n6LXMDz0Ee6VlLk1y9ROB+YZvnUAG0CTstUXEzdx5tEUc1PFFnXJ2u71XENY2pkoiL3gt0D4pHzt9lIZ", - "42ctf9KXHmKhe0nvwM6glOp0URZHH/A/WNv+Y10YA7t+qSO95kfY5/now9YUFmSpudFNpG0Y1jDpdrpG", - "RxNRXuLndXOyF0IGl9sfzHc7U1RaSBu3hb7tWY25LhH2eD23yb/1JWyr66y14Vd31kZG7JzXqu5T0Om2", - "ot2g5Z0v5WT7XEdI+Da44PNaUO1PnDGeERpsY8vWJGTNCK7Zp3jdi/4ULsqbj6j4+gs+Z6+FJifLIocl", - "cA3Z1bLLSJvDeemxVdzupxg40d9NQevK/FDi+8TZShfZKeD3uPcEpQLBT0cl1u4zsvo2VvPvKMmfVt7W", - "kAxv5fKXI5elT/e9FcGfvwj+6otdzTXGMA0UyZdwDjfFcH0T31Mgd5QBZ8NqGQ62+ZXx6t1epXohpG/s", - "eivFv1CnqN3JwYFYQyw0uyyxbspDZFt8VtAPszPkecTS0HdQx1WsF8OiyCJl2ALvJFNjF1RmjRPuFN8q", - "Pp+14hPs9a3ec2t6+MJMDz1ajrv15/kQRWNfBWi1FBl4x6qYzVwTgj7tp9l12ZCn0nRZEPvlpDcO+4wt", - "4dS8+bOd4qAitga7pRa1wDPIUpAKnqkBURxu1MvKIXQ09QNw457Nagc8LK484eTSJPs2qHHcoQTSRr7C", - "btm+GYNDRgYrYghwcgCyPfpg/0VzWiFUZDWnnoA7G3PXbYvtLmHHbQBI3qASattU+K/EjDywTSZKjpVk", - "Fsy12cdYVi03RlH1NXUl0JykjQoSFRzdk3Pae3J2XgU6q+tZU/wuIOoTesgIhlb1np9u/AA8pdyRfBdB", - "WhBKOMypZivwLv/JbcXHS0szV29xCwMcE5pl9jTWmwArkBuiyqkyug5v5ijdUc3zsgfDgHUBkhkRTfPa", - "AW+vCUe2nOO2OKJT+8YVhVaLF9kikrIZteglqysxKWbkFUulOM7nooqFVxulYWnDCgMp6D79vacpkDck", - "dGNWBc8Zh2QpOGwiJxWfvsKHsa+xJGbfx2fmYd+3LXnbhL8FVnOeITL5qvj9TE7/lQJdWquVUAhpbrfT", - "jc2/QPrf8yj5Q7PhafckbXgaOLXcw2AgxFfs5yOfjlC3lel780PjT1f21b2pFqXOxEUwC9oAbDjjkIqP", - "qHzvmeRR29ya2ZNMXa/V7Tq9TQEeYmerehrpc18/7G91/zdNwnbOmZBIXE7jCqRqXeRuM7H/UpnYg/d9", - "L25shizVLo5WqsPqLq9FBnbcOh3XHP1YpzEuMiDKA9FSWaqwyHjKkJdf9XutJI6UlvOFJmVBtIili9Qf", - "JjS1TDaxF6H4hEFtf3tdwukWdAWE5hJoZi6vwImYmkXXkhQXSRV2V/A5Jy74M6o0BXAVUqSgFGSJ76y2", - "CzT/ng1V11vwhIAjwNUsRAkyo/LKwJ6vdsJ5DpsEL8OK3P3pV3O1vnF4rdK4HbG2pnsEve206y7Uw6bf", - "RnDtyUOyswndlmoxRU4sixxcklwEhXvhpHf/2hB1dvHqaMEsMnbNFO8nuRoBVaBeM71fFdqySIz87oL4", - "1D49Y0vUxDjlwlsgY4PlVOlkF1s2L4VrUWYFASeMcWIcuOdq+pIq/dblS2dYS9mKE5zH6thmin6AjRS1", - "d4vIyL/ah7GxUyMPuSoVcSP4HCjIYmvgsN4y12tYV3Nh7RQ/dpVkZW2Bu0buw1IwvkNW0F6OUB34/c1w", - "kcWhpZI6U0YXlQ0gakRsA+TUvxVgN3T49wDCVI1oSzjYLieknKkQOVBuc1VFURhuoZOSV9/1oenUvn2s", - "f6nf7RKXrYVh5XYmQIUJcA7yC4tZhabcBVXEwUGW9NzlyM1du/AuzOYwJlhmKdlG+WjcNW+FR2DnIS2L", - "uaQZJBnkNGJ0+cU+JvbxtgFwxz15JiuhIZlijZT4pteULHuNSdXQAsdTMeWR4BOSmiNoLs81gbivd4yc", - "AY4dY06Oju5UQ+Fc0S3y4+Gy7Vb3GLDMGGbHHT0gyI6jDwG4Bw/V0JdHBX6c1OaD9hT/BcpNUOkR+0+y", - "AdW3hHr8vRbQNvyFAqwhKVrsvcWBo2yzl43t4CN9RzZmavwi3QLtKKdrTLJrmlqDC+DkMpfbowvKdDIT", - "0irSCZ1pkDtD5/9JmXec+/Rd4aquEBzByU03DjL5sGmr4yIWBOLEhSERV0nKyDBKHpIl46W2T0Spx7bH", - "hASaLozSHtpg7UjYdt8VaZIwpzLLsSX7rJKbQtqiT7ol4BHoSD5i88Zv1v1CyEGda5qlIynTpOSa5UH3", - "vure/vlZL28tErcWiVuLxK1F4tYicWuRuLVI3Fokbi0StxaJW4vErUXi72uR+FRlkhKvcfiKjVzwpB1M", - "eRtL+ZeqKl+JKm8gQevEBWXIloIqBf12iz0MQRpojjhgOfRHd9ug07Pnxy+JEqVMgaQGQsZJkVNzNYC1", - "9v33yZQq+OaxTzW0opMuyXRjeIeRr+aFrx6R0x+PfcXRhauM2Xz37rGNVyNKb3K453qPAs+sJuqbkAI3", - "SHc9SKkXCanLk7QGihnLMTJekef49jNYQS4KkLaYIdGyhK7F5wxo/tThZofB559mchdq+4cZ7Y9xw+jl", - "0LakhVfz/VqpItRmXJJnQQ7mHzOaK/ijLw3TjrekxShSu7gSfNYUhMzke5FtWifE7NoRbmDzbNR1Rxmn", - "chOpEtVNgWiThhaGXTnC6tqyPh68Om6XaLtktovCYtq6LYMfH72PyqNlYasN6wxlE3VnLToZxXJM27VQ", - "RxWAgwoDYpqE3RPy1n73acsAIkTuiNXM/LOJYmy+WTENfNdcIhzr+VJzCTzio6cXz/7YEHZWpkCYVsQX", - "2N0tXsajdWJGmgNPHANKpiLbJA32NWpIoYwpqhQsp7slUcg/8cRVwsc82S6nPo0YeRYsbhtPDolmnTgG", - "3MOdNxoG8+YKWziiY88Bxq+bRfex0RAE4vhTzKjU4n37Mr16ms0t47tlfMFpbGkEjLuC5G0mMrlGxic3", - "suT9PO/5GtLSABee5LtonUeXHKx1w8mawbScz81toeujwzY6OB4T/BOxQrvcoVxwPwqyg7/1MfZXTVJv", - "D9flLkHe+F1fmfEebgflG3RmLAvKN97lC4liyzK3OLRtVA/LaG3N8FiJ6dr212fVfuNNfoHt1ona5u8W", - "LeSCKmL3FzJS8sxlPHVqW6/58DonduizNa/Z9NaaJna9kdW5eYeICL/LzVRzRQqQiV5ze6Aah8l1MLAn", - "95PW0r4VGzcnNmyiOvQw2G41/pohHEh6yICvofgIei7ViXmNTky0mU7YeIYWjf4Ul7A5k33zoIElneGb", - "8SW1ucX5TyEvCCVpztC7KrjSskz1O07RfxMsbNKNPfGG6n7e99S/EnchRjx8bqh3nGKQUeXVifLAGURc", - "GC8APItV5XwOyvDRkIBmAO+4e4txUnJzCxMzsmSpFIlNrTXny+guE/vmkm7IDCuaCPInSEGmRuoHu25t", - "yUqzPHfBLmYaImbvONUkB6o0ecUMBzbD+XIKVcgZ6AshzyssxHv1zIGDYiqJG2Z+sE+xHY5bvjcAojHT", - "Pq7bWNxsHxwPO8t6IT95hjFqWI05Zyrsv9iG/cZ840vGkyiRnS2AuHCxNm2Ru1gDzhHQvabjSC/gHTfS", - "TwuCHJ/qy5FD2wPUOYv2dLSoprERLUeRX+ug699BuAyJMJlbt8tfKIU0oAPv2cSNt/X1W3u/p4ulIXIB", - "W4P2CWT71LVP7HnJXSAaRrJWgRv3xlkD5K3+iy+/rOTh75IejQe7TXYH7LKrZoM8xJvf8DGhueBzW1fR", - "3C4F7hPjRakxAPw6DXiwonkiViAly0ANXCkT/PmK5j9Xn30cj2ANaaIlTSGxFoWhWDsz31g6xUaDnGlG", - "8wRv1UMBghP71an9aIc8DrqNLpeQMaoh35BCQgqZLUTGFKnv8xNboIGkC8rnKLqlKOcL+5od5wIkVI0Z", - "zRW6PUS8EMyaJ7YoXRfGY9eoOazbCzRdRBrHoIAzd3ZPUFmjJ9XAPWiUHO27pI9HvYq2QeqqDp2zyGmy", - "mQFaREMfCPBTT3yIGq23RH9L9F860cdKKiLqZi1rhcVXuC3XbNa67gKiN2gl+yTVhW9L9P/VS/R7DqQI", - "JZI27iDx3nBUEabJBZZFmgIx8qtE67xruOfu65hpFxx1V2lTufZ86YIy7mrqVHkNCIe5Ei+XTGvfnvZa", - "DJuWmaFF06AD0lIyvcFbCy3Y7+dg/v/eqP0K5MpfaEqZj56MFloXT46OcpHSfCGUPhp9HIfPVOvh+wr+", - "D/4uUki2Mverjwi2kGzOuJG5F3Q+B1mbEEePJg9GH/9vAAAA//+jyUunb8cBAA==", + "tHp05LWRow+ucsJHA1jUbWjrrAfFtX0gYlFOc5b6GmVMWfuxDbBXYXNZZ1kv1ZhMbf9hH8TLMwxRstUI", + "VNiD+yQziLbfn9TMzrdQRr/y6MlvkXJWPvPDd/YNg86CcLT/c/rzayIkcdeiNzQ9r7JefJpTndoVZjmZ", + "Lyee7v9dgtzUdOk45nikqvbgwMulYT4ufWap5kWzsmutjcWsRR1k+5kNOQUHoip0UjM8NA0GkNTs27Dk", + "B8m37z98/Y+PowGAYNUdBdjo8Q+a539Y8xqsMbK2FXkz7ouJGteFM/CDeifHaMmqngaf1+80C6L/wQWH", + "P/q2wQEW3Qea5+ZFwSG2B++xFSESC57VRw8eeAbl1P8AuiN3qIJZBvUAsN6FahRPEpcYqMvI7KO3VW1M", + "SQt7GI99/PCmqPw79qWJ4VePD7jQZgXPKy+3PVxn0d/TjEiXv4xLefjFLuWE21hQI5Cs4Pw4Hn39Be/N", + "CTc8h+YE3wz6/HYlzS/8nIsL7t80SlO5XFK5QZVIV7yw3ZiGzhU6VZFF2rMdlF/j89H7j71i7ygMejz6", + "0KidlF1JKFovS6Ot02452cM5cSybleZ+uHtcFBjzeVo9Py4K2zYc4wiAofSDNVNa3ZuQH8KvG84RC4n1", + "jTSSAnwXbd+bu+ErD/pxRoV2oyrBrfz+tPL7uGkkYRlwzWYMFfYYMI1TsBWmTrTSVQVoN0koqJG0b0B0", + "VR/bqRaJ6702cAzXhf9wjQUHlEaxM72PXSF3Mupb3PXgrk9NCuCtNKa6q+HNsGZfareSJA2RcY2M+wtX", + "+l7R3NBJsNxWS5uTZ7fK4N9KGaxKcs6tdlYUB1APfebGrleOPrgyk4fQGvF6PEhfDG/ewbdB8P3dFse5", + "NyHH7Xcux1Zcmc6dmqB571YH/Bx0QFvndJf25+j4k+p9Yd7XPmlYDYXF/D7o4y9c0fsbI6tXszOQ7tbp", + "LsE+O/qaY9bXxlb/knqaQ9qthva31tCq4tlX0tHC2NcjV4Yg0NiuZOBrG/CYrjSxZgH1gLNhvRFMyLdH", + "eFzH+RsWYwOYXeiyGvvLI3pq7b3Sbta4c7Xsqlg/QHiH/X5z8myXdvUFmYIG90GOSIH43lw3L416Jt7e", + "jGdiGG96/ODxzUEQ7sJrockLlOLXzCGvlaXFyWpfFraNIx1NxXoXV+IttlRVqDOHtsGjqkKk4+C5edsG", + "gNzFlN9m56x7E/K9e7UuA+JS2ufCMCqfKkbl3H5keJ1BBrnj/3yC49+ZkBeYAKnVGOPYMLMCX2RcP3n4", + "6KvH7hVJL2yYWPu96TePnxx/9517rZCMawwZsPeczutKyycLyHPhPnAyojuuefDkP//rvyeTyZ2dbFWs", + "v9+8tq12PxfeOo6VPKwIoG+3vvBNit3WXQvknai7EQ//92IdlQJifSuFPpkUMtj/S0ifaZOM3EW0MnY2", + "mvEcUBrZY7KPPBo7+YNZHJUwmZDXwvVFK3MqbYEYrKGryLykknINkE08pWIKnrKV7NKcYe0ASRTIFchE", + "sapWdSmhqmJSSFhh+H1d5bUBwW5Gj0G6ny2Tf0XXQd78tBLTWrglo9lzSdcEG31ookCPbQm1NfnuO/Jg", + "XN9e8twMkFSIiTHXJV2PbtDqVxHb0LpAzxx2hNwd+4tjD7Eg1dpPVWCyvmr83Tn3F6u5W3J3G3sgzrm3", + "46d27IR2BNd9bKsFwSp2Gsshq7Io8k1dCNdoeV6FirM4M8NQ48Bn7CPYaZqOXkLb6L09xLdGgCuxkjZB", + "7ck2MKFVHX3Ae3nIMzrnFhPy/l7u0sB3JMXSO48EmYFOFy4XuIX6CHuSLh+xnzctGWdLA+WD8bVrNbiL", + "3QLIYfPnjNoM/CH9xYI0TXTggYwQ8c/4H5pjUT02s7XdfccPX84QXVOuPHbVcdVevm0PZhfy71OGC9ro", + "ILsbyqf15F2FDNFyCP/nLYL3Q3CHOT535Q7s8XKL+CskBfirZEJeizoj3d6g/pKux+uU7Ne9oNeCg/Wx", + "G83X0uKtO7VSOwzjsEjxpUjs/aXur3VZFeTIl/DZqof8aF7aoYsMkd5YDuhLFOE/RgsdNaSMWdtkZ52F", + "erQhzNm8aBsihJVQJp/yFvNJ+OlneLX5FBzrZlgMHlLPZ5xawA/MdKyCtZPt+MTyqzMee0avnfWM/1I3", + "tOtgpNXOX4fCHmW29tnBLhuf0wq6jPomxcStFn+rxd9q8ZcSsZZLXK+QxRJ6dqajwtc77JO3L83LASey", + "VQUHS14tqlhviNTuI1PIBZ+rz1Pf30YfcbxE6MRWirTNyzrrn/wNFeSnrrOYdrU9XL1GxXgKRIkloJA0", + "mo9r+2Ah/MfNQajZEjIiSiw6GdSQ+MQq/NcPvrq56U9BrlgK5AyWhZBUsnxDfuFVB7Gr8DtFqNvz0OUa", + "YQ6MY0hHs65nGhYhvAITFPMtISzOOVxXJlb2DiFKDdLWpG01imQdJh1zuiLDeGmmPsDdJRfzL81m4rE+", + "tJXCU5rniK5dkRw48KBUoDy3+wlLpnXdmCmUruQ5TRfV3o7rC1rVPtd38Bi3aj7jyK6Xqq2no8DsswYS", + "rCZwCYCEmcC+iCABeyJNgSzLXLMib35T9ZfGfnuRWF9Lm2GrnpNnfnU2AkrM6qHb9Ov7fbjBJ2Zu9whn", + "5sIujkpA3l05MFotHCcNoG3nSZ/jFPQLdF0PXTlhJlv1nesA1aIAKuuPLeXfLSQkbghJVyAVxcPaWtS9", + "W3vY52EPW7uGAp+JNSwaCHRVXn95UdRIVfqg1yz7uFsvD2ry76mSMx6o5CG7sGft8rr4bqPXWWvGk2dh", + "NqioqlZ6BaEHFIOiPROi/9doYJwBVkITM2fsLLkF1BeSdhqrS9UUs3GVDGFuuGL2hLzj94laUN/nwP35", + "6OtveuxwZh5X/7VriasHMo/tMEMCJm6Ni5XGUeH3yU3v9n6bOB6xbB1p5sIzWAf9w6qjE8rDO4oUdOPT", + "Jjv1jIt4T4PqYhoOuwQjptSCFTdfN19pNo03DvHurlNstXi25if8+8rraYu7G62h+BT10scjLQEyKPRi", + "ZxsFfKveTXANFZhyre9ssfsxYROY2FrwdYvSbA5OMFGSA51VvUaFGJIsH/AZQ2ieKgKshwsZoklH6Qd1", + "XiTKm3dG1knlVtB55LWV4k+qhOlPpYQlLS2siZZPp5Nh06RxEN5cSKFFKnKbq1AWhZC6Ot1qMsjyAH2K", + "XsPw0Ee4V1Lm1ixTOx2YZ/jWAWwATcpWX0zcxJlHU8xNFVvUJYu713MNYWlnoiD2gt8C4ZPytdtLZYyf", + "tfxJX3qIhe4lvQM7g1Kq00VZHH3A/2Bx+491YQxs+6WO9JofYaPnow9bU1iQpeZGN5G2Y1jDpNtpGx1N", + "RHmJn9fdyV4IGVxufzDf7UxRaSFt3Bb6tmk15rpE2OP13Cb/1pewra6z1oZf3VkbGbFzXqu6T0Gr24p2", + "g553vpSTbXQdIeHb4ILPa0G1P3HGeEZosI0tW5OQNSO4Zp/idS/6U7gobz6i4usv+Jy9FpqcLIsclsA1", + "ZFfLLiNtDuelx1Zxu59i4ER/NwWtK/NDie8TZytdZKeA3+PeE5QKBD8dlVi7z8jq21jNv6Mkf1p5W0My", + "vJXLX45clj7d91YEf/4i+KsvdjXXGMM0UCRfwjncFMP1TXxPgdxRBpwNq2U42OZXxqt3e5XqhZC+s+ut", + "FP9CnaJ2JwcHYg2x0OyyxLopD5Ft8VlBP8zOkOcRS0PfQR1XsV4MiyKLlGELvJNMjV1QmTVOuFN8q/h8", + "1opPsNe3es+t6eELMz30aDnu1p/nQxSNfRWg1VJk4B2rYjZzTQj6tJ9m22VDnkrTZUHsl5PeOOwztoRT", + "8+bPdoqDitga7JZa1ALPIEtBKnimBkRxuFEvK4fQ0dQPwI17Nqsd8LC48oSTS5Ps26DGcYcSSBv5Cttl", + "+2YMDhkZrIghwMkByPbog/0XzWmFUJHVnHoC7mzMXbcttruEHbcBIHmDSqhtU+G/EjPywDaZKDlWklkw", + "12cfY1m13BhF1dfUlUBzkjYqSFRwdE/Oae/J2XkV6KyuZ03xu4CoT+ghIxha1Xt+uvED8JRyR/JdBGlB", + "KOEwp5qtwLv8J7cVHy8tzVy9xS0McExoltnTWG8CrEBuiCqnyug6vJmjdEc1z8seDAPWBUhmRDTNawe8", + "vSYc2XKO2+KITu0bVxRaLV5ki0jKZtSil6yuxKSYkVcsleI4n4sqFl5tlIalDSsMpKD79PeepkDekNCN", + "WRU8ZxySpeCwiZxUfPoKH8a+xpKYfR+fmYd937bkbRP+FljNeYbI5Kvi9zM5/VcKdGmtVkIhpLndTjc2", + "/wLpf8+j5A/Nhqfdk7ThaeDUcg+DgRBfsZ+PfDpC3Vam780PjT9d2Vf3plqUOhMXwSxoA7DhjEMqPqLy", + "vWeSR21za2ZPMnW9Vrfr9DYFeIidrepppM99/bC/1f3fNAnbOWdCInE5jSuQqnWRu83E/ktlYg/e9724", + "sRmyVLs4WqkOq7u8FhnYcet0XHP0Y53GuMiAKA9ES2WpwiLjKUNeftXvtZI4UlrOF5qUBdEili5Sf5jQ", + "1DLZxF6E4hMGtf3tdQmnW9AVEJpLoJm5vAInYmoWXUtSXCRV2F3B55y44M+o0hTAVUiRglKQJb6z2i7Q", + "/Hs2VF1vwRMCjgBXsxAlyIzKKwN7vtoJ5zlsErwMK3L3p1/N1frG4bVK43bE2pruEfS20667UA+bfhvB", + "tScPyc4mdFuqxRQ5sSxycElyERTuhZPe/WtD1NnFq6MFs8jYNVO8n+RqBFSBes30flVoyyIx8rsL4lP7", + "9IwtURPjlAtvgYwNllOlk11s2bwUrkWZFQScMMaJceCeq+lLqvRbly+dYS1lK05wHqtjmyn6ATZS1N4t", + "IiP/ah/Gxk6NPOSqVMSN4HOgIIutgcN6y1yvYV3NhbVT/NhVkpW1Be4auQ9LwfgOWUF7OUJ14Pc3w0UW", + "h5ZK6kwZXVQ2gKgRsQ2QU/9WgN3Q4d8DCFM1oi3hYLuckHKmQuRAuc1VFUVhuIVOSl5914emU/v2sf6l", + "frdLXLYWhpXbmQAVJsA5yC8sZhWachdUEQcHWdJzlyM3d+3CuzCbw5hgmaVkG+Wjcde8FR6BnYe0LOaS", + "ZpBkkNOI0eUX+5jYx9sGwB335JmshIZkijVS4pteU7LsNSZVQwscT8WUR4JPSGqOoLk81wTivt4xcgY4", + "dow5OTq6Uw2Fc0W3yI+Hy7Zb3WPAMmOYHXf0gCA7jj4E4B48VENfHhX4cVKbD9pT/BcoN0GlR+w/yQZU", + "3xLq8fdaQNvwFwqwhqRosfcWB46yzV42toOP9B3ZmKnxi3QLtKOcrjHJrmlqDS6Ak8tcbo8uKNPJTEir", + "SCd0pkHuDJ3/J2Xece7Td4WrukJwBCc33TjI5MOmrY6LWBCIExeGRFwlKSPDKHlIloyX2j4RpR7bHhMS", + "aLowSntog7UjYdt9V6RJwpzKLMeW7LNKbgppiz7ploBHoCP5iM0bv1n3CyEHda5plo6kTJOSa5YH3fuq", + "e/vnZ728tUjcWiRuLRK3Folbi8StReLWInFrkbi1SNxaJG4tErcWib+vReJTlUlKvMbhKzZywZN2MOVt", + "LOVfqqp8Jaq8gQStExeUIVsKqhT02y32MARpoDnigOXQH91tg07Pnh+/JEqUMgWSGggZJ0VOzdUA1tr3", + "3ydTquCbxz7V0IpOuiTTjeEdRr6aF756RE5/PPYVRxeuMmbz3bvHNl6NKL3J4Z7rPQo8s5qob0IK3CDd", + "9SClXiSkLk/SGihmLMfIeEWe49vPYAW5KEDaYoZEyxK6Fp8zoPlTh5sdBp9/msldqO0fZrQ/xg2jl0Pb", + "khZezfdrpYpQm3FJngU5mH/MaK7gj740TDvekhajSO3iSvBZUxAyk+9FtmmdELNrR7iBzbNR1x1lnMpN", + "pEpUNwWiTRpaGHblCKtry/p48Oq4XaLtktkuCotp67YMfnz0PiqPloWtNqwzlE3UnbXoZBTLMW3XQh1V", + "AA4qDIhpEnZPyFv73actA4gQuSNWM/PPJoqx+WbFNPBdc4lwrOdLzSXwiI+eXjz7Y0PYWZkCYVoRX2B3", + "t3gZj9aJGWkOPHEMKJmKbJM02NeoIYUypqhSsJzulkQh/8QTVwkf82S7nPo0YuRZsLhtPDkkmnXiGHAP", + "d95oGMybK2zhiI49Bxi/bhbdx0ZDEIjjTzGjUov37cv06mk2t4zvlvEFp7GlETDuCpK3mcjkGhmf3MiS", + "9/O852tISwNceJLvonUeXXKw1g0nawbTcj43t4Wujw7b6OB4TPBPxArtcodywf0oyA7+1sfYXzVJvT1c", + "l7sEeeN3fWXGe7gdlG/QmbEsKN94ly8kii3L3OLQtlE9LKO1NcNjJaZr21+fVfuNN/kFtlsnapu/W7SQ", + "C6qI3V/ISMkzl/HUqW295sPrnNihz9a8ZtNba5rY9UZW5+YdIiL8LjdTzRUpQCZ6ze2Bahwm18HAntxP", + "Wkv7VmzcnNiwierQw2C71fhrhnAg6SEDvobiI+i5VCfmNTox0WY6YeMZWjT6U1zC5kz2zYMGlnSGb8aX", + "1OYW5z+FvCCUpDlD76rgSssy1e84Rf9NsLBJN/bEG6r7ed9T/0rchRjx8Lmh3nGKQUaVVyfKA2cQcWG8", + "APAsVpXzOSjDR0MCmgG84+4txknJzS1MzMiSpVIkNrXWnC+ju0zsm0u6ITOsaCLInyAFmRqpH+y6tSUr", + "zfLcBbuYaYiYveNUkxyo0uQVMxzYDOfLKVQhZ6AvhDyvsBDv1TMHDoqpJG6Y+cE+xXY4bvneAIjGTPu4", + "bmNxs31wPOws64X85BnGqGE15pypsP9iG/Yb840vGU+iRHa2AOLCxdq0Re5iDThHQPeajiO9gHfcSD8t", + "CHJ8qi9HDm0PUOcs2tPRoprGRrQcRX6tg65/B+EyJMJkbt0uf6EU0oAOvGcTN97W12/t/Z4ulobIBWwN", + "2ieQ7VPXPrHnJXeBaBjJWgVu3BtnDZC3+i++/LKSh79LejQe7DbZHbDLrpoN8hBvfsPHhOaCz21dRXO7", + "FLhPjBelxgDw6zTgwYrmiViBlCwDNXClTPDnK5r/XH32cTyCNaSJljSFxFoUhmLtzHxj6RQbDXKmGc0T", + "vFUPBQhO7Fen9qMd8jjoNrpcQsaohnxDCgkpZLYQGVOkvs9PbIEGki4on6PolqKcL+xrdpwLkFA1ZjRX", + "6PYQ8UIwa57YonRdGI9do+awbi/QdBFpHIMCztzZPUFljZ5UA/egUXK075I+HvUq2gapqzp0ziKnyWYG", + "aBENfSDATz3xIWq03hL9LdF/6UQfK6mIqJu1rBUWX+G2XLNZ67oLiN6gleyTVBe+LdH/Vy/R7zmQIpRI", + "2riDxHvDUUWYJhdYFmkKxMivEq3zruGeu69jpl1w1F2lTeXa86ULyrirqVPlNSAc5kq8XDKtfXvaazFs", + "WmaGFk2DDkhLyfQGby20YL+fg/n/e6P2K5Arf6EpZT56MlpoXTw5OspFSvOFUPpo9HEcPlOth+8r+D/4", + "u0gh2crcrz4i2EKyOeNG5l7Q+RxkbUIcPZo8GH38vwEAAP//fOeJ6HDHAQA=", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/data/basics/userBalance.go b/data/basics/userBalance.go index 3bccd3f4ed..a9efeca1cb 100644 --- a/data/basics/userBalance.go +++ b/data/basics/userBalance.go @@ -19,7 +19,6 @@ package basics import ( "encoding/binary" "fmt" - "reflect" "slices" "github.com/algorand/go-algorand/config" @@ -111,7 +110,10 @@ type VotingData struct { type OnlineAccountData struct { MicroAlgosWithRewards MicroAlgos VotingData + IncentiveEligible bool + LastProposed Round + LastHeartbeat Round } // AccountData contains the data associated with a given address. @@ -561,6 +563,8 @@ func (u AccountData) OnlineAccountData() OnlineAccountData { VoteKeyDilution: u.VoteKeyDilution, }, IncentiveEligible: u.IncentiveEligible, + LastProposed: u.LastProposed, + LastHeartbeat: u.LastHeartbeat, } } @@ -581,15 +585,6 @@ func (u OnlineAccountData) KeyDilution(proto config.ConsensusParams) uint64 { return proto.DefaultKeyDilution } -// IsZero checks if an AccountData value is the same as its zero value. -func (u AccountData) IsZero() bool { - if u.Assets != nil && len(u.Assets) == 0 { - u.Assets = nil - } - - return reflect.DeepEqual(u, AccountData{}) -} - // NormalizedOnlineBalance returns a “normalized” balance for this account. // // The normalization compensates for rewards that have not yet been applied, diff --git a/data/bookkeeping/block.go b/data/bookkeeping/block.go index 7f2632f3f0..af2068a1fe 100644 --- a/data/bookkeeping/block.go +++ b/data/bookkeeping/block.go @@ -72,8 +72,8 @@ type ( // begins as a consensus parameter value, and decays periodically. Bonus basics.MicroAlgos `codec:"bi"` - // ProposerPayout is the amount that should be moved from the FeeSink to - // the Proposer at the start of the next block. It is basically the + // ProposerPayout is the amount that is moved from the FeeSink to + // the Proposer in this block. It is basically the // bonus + the payouts percent of FeesCollected, but may be zero'd by // proposer ineligibility. ProposerPayout basics.MicroAlgos `codec:"pp"` diff --git a/data/bookkeeping/block_test.go b/data/bookkeeping/block_test.go index 3c305b3c3b..bc8aec6a7a 100644 --- a/data/bookkeeping/block_test.go +++ b/data/bookkeeping/block_test.go @@ -1013,11 +1013,11 @@ func TestFirstYearsBonus(t *testing.T) { fmt.Printf("paid %d algos\n", suma) fmt.Printf("bonus start: %d end: %d\n", plan.BaseAmount, bonus) - // pays about 88M algos - a.InDelta(88_500_000, suma, 100_000) + // pays about 103.5M algos + a.InDelta(103_500_000, suma, 100_000) - // decline about 35% - a.InDelta(0.65, float64(bonus)/float64(plan.BaseAmount), 0.01) + // decline about 10% + a.InDelta(0.90, float64(bonus)/float64(plan.BaseAmount), 0.01) // year 2 for i := 0; i < yearRounds; i++ { @@ -1033,11 +1033,11 @@ func TestFirstYearsBonus(t *testing.T) { fmt.Printf("paid %d algos after 2 years\n", sum2) fmt.Printf("bonus end: %d\n", bonus) - // pays about 146M algos (total for 2 years) - a.InDelta(145_700_000, sum2, 100_000) + // pays about 196M algos (total for 2 years) + a.InDelta(196_300_000, sum2, 100_000) - // decline about 58% - a.InDelta(0.42, float64(bonus)/float64(plan.BaseAmount), 0.01) + // decline to about 81% + a.InDelta(0.81, float64(bonus)/float64(plan.BaseAmount), 0.01) // year 3 for i := 0; i < yearRounds; i++ { @@ -1053,9 +1053,9 @@ func TestFirstYearsBonus(t *testing.T) { fmt.Printf("paid %d algos after 3 years\n", sum3) fmt.Printf("bonus end: %d\n", bonus) - // pays about 182M algos (total for 3 years) - a.InDelta(182_600_000, sum3, 100_000) + // pays about 279M algos (total for 3 years) + a.InDelta(279_500_000, sum3, 100_000) - // declined to about 27% (but foundation funding probably gone anyway) - a.InDelta(0.27, float64(bonus)/float64(plan.BaseAmount), 0.01) + // declined to about 72% (but foundation funding probably gone anyway) + a.InDelta(0.72, float64(bonus)/float64(plan.BaseAmount), 0.01) } diff --git a/data/committee/common_test.go b/data/committee/common_test.go index 1f7e7bd373..8566a9cd2a 100644 --- a/data/committee/common_test.go +++ b/data/committee/common_test.go @@ -24,7 +24,6 @@ import ( "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/protocol" ) @@ -33,40 +32,33 @@ type selectionParameterListFn func(addr []basics.Address) (bool, []BalanceRecord var proto = config.Consensus[protocol.ConsensusCurrentVersion] -func newAccount(t testing.TB, gen io.Reader, latest basics.Round, keyBatchesForward uint) (basics.Address, *crypto.SignatureSecrets, *crypto.VrfPrivkey, *crypto.OneTimeSignatureSecrets) { +func newAccount(t testing.TB, gen io.Reader) (basics.Address, *crypto.SignatureSecrets, *crypto.VrfPrivkey) { var seed crypto.Seed gen.Read(seed[:]) s := crypto.GenerateSignatureSecrets(seed) _, v := crypto.VrfKeygenFromSeed(seed) - o := crypto.GenerateOneTimeSignatureSecrets(basics.OneTimeIDForRound(latest, proto.DefaultKeyDilution).Batch, uint64(keyBatchesForward)) addr := basics.Address(s.SignatureVerifier) - return addr, s, &v, o + return addr, s, &v } -func signTx(s *crypto.SignatureSecrets, t transactions.Transaction) transactions.SignedTxn { - return t.Sign(s) +// testingenv creates a random set of participating accounts and the associated +// selection parameters for use testing committee membership and credential +// validation. seedGen is provided as an external source of randomness for the +// selection seed; if the caller persists seedGen between calls to testingenv, +// each iteration that calls testingenv will exercise a new selection seed. +// formerly, testingenv, generated transactions and one-time secrets as well, +// but they were not used by the tests. +func testingenv(t testing.TB, numAccounts, numTxs int, seedGen io.Reader) (selectionParameterFn, selectionParameterListFn, basics.Round, []basics.Address, []*crypto.SignatureSecrets, []*crypto.VrfPrivkey) { + return testingenvMoreKeys(t, numAccounts, numTxs, seedGen) } -// testingenv creates a random set of participating accounts and random transactions between them, and -// the associated selection parameters for use testing committee membership and credential validation. -// seedGen is provided as an external source of randomness for the selection seed and transaction notes; -// if the caller persists seedGen between calls to testingenv, each iteration that calls testingenv will -// exercise a new selection seed. -func testingenv(t testing.TB, numAccounts, numTxs int, seedGen io.Reader) (selectionParameterFn, selectionParameterListFn, basics.Round, []basics.Address, []*crypto.SignatureSecrets, []*crypto.VrfPrivkey, []*crypto.OneTimeSignatureSecrets, []transactions.SignedTxn) { - return testingenvMoreKeys(t, numAccounts, numTxs, uint(5), seedGen) -} - -func testingenvMoreKeys(t testing.TB, numAccounts, numTxs int, keyBatchesForward uint, seedGen io.Reader) (selectionParameterFn, selectionParameterListFn, basics.Round, []basics.Address, []*crypto.SignatureSecrets, []*crypto.VrfPrivkey, []*crypto.OneTimeSignatureSecrets, []transactions.SignedTxn) { +func testingenvMoreKeys(t testing.TB, numAccounts, numTxs int, seedGen io.Reader) (selectionParameterFn, selectionParameterListFn, basics.Round, []basics.Address, []*crypto.SignatureSecrets, []*crypto.VrfPrivkey) { if seedGen == nil { seedGen = rand.New(rand.NewSource(1)) // same source as setting GODEBUG=randautoseed=0, same as pre-Go 1.20 default seed } P := numAccounts // n accounts - TXs := numTxs // n txns maxMoneyAtStart := 100000 // max money start minMoneyAtStart := 10000 // max money start - transferredMoney := 100 // max money/txn - maxFee := 10 // max maxFee/txn - E := basics.Round(50) // max round // generate accounts genesis := make(map[basics.Address]basics.AccountData) @@ -74,16 +66,14 @@ func testingenvMoreKeys(t testing.TB, numAccounts, numTxs int, keyBatchesForward addrs := make([]basics.Address, P) secrets := make([]*crypto.SignatureSecrets, P) vrfSecrets := make([]*crypto.VrfPrivkey, P) - otSecrets := make([]*crypto.OneTimeSignatureSecrets, P) proto := config.Consensus[protocol.ConsensusCurrentVersion] lookback := basics.Round(2*proto.SeedRefreshInterval + proto.SeedLookback + 1) var total basics.MicroAlgos for i := 0; i < P; i++ { - addr, sigSec, vrfSec, otSec := newAccount(t, gen, lookback, keyBatchesForward) + addr, sigSec, vrfSec := newAccount(t, gen) addrs[i] = addr secrets[i] = sigSec vrfSecrets[i] = vrfSec - otSecrets[i] = otSec startamt := uint64(minMoneyAtStart + (gen.Int() % (maxMoneyAtStart - minMoneyAtStart))) short := addr @@ -91,7 +81,6 @@ func testingenvMoreKeys(t testing.TB, numAccounts, numTxs int, keyBatchesForward Status: basics.Online, MicroAlgos: basics.MicroAlgos{Raw: startamt}, SelectionID: vrfSec.Pubkey(), - VoteID: otSec.OneTimeSignatureVerifier, } total.Raw += startamt } @@ -99,32 +88,8 @@ func testingenvMoreKeys(t testing.TB, numAccounts, numTxs int, keyBatchesForward var seed Seed seedGen.Read(seed[:]) - tx := make([]transactions.SignedTxn, TXs) - for i := 0; i < TXs; i++ { - send := gen.Int() % P - recv := gen.Int() % P - - saddr := addrs[send] - raddr := addrs[recv] - amt := basics.MicroAlgos{Raw: uint64(gen.Int() % transferredMoney)} - fee := basics.MicroAlgos{Raw: uint64(gen.Int() % maxFee)} - - t := transactions.Transaction{ - Type: protocol.PaymentTx, - Header: transactions.Header{ - Sender: saddr, - Fee: fee, - FirstValid: 0, - LastValid: E, - Note: make([]byte, 4), - }, - PaymentTxnFields: transactions.PaymentTxnFields{ - Receiver: raddr, - Amount: amt, - }, - } - seedGen.Read(t.Note) // to match output from previous versions, which shared global RNG for seed & note - tx[i] = t.Sign(secrets[send]) + for i := 0; i < numTxs; i++ { + seedGen.Read(make([]byte, 4)) // to match output from previous versions, which shared global RNG for seed & note } selParams := func(addr basics.Address) (bool, BalanceRecord, Seed, basics.MicroAlgos) { @@ -149,7 +114,7 @@ func testingenvMoreKeys(t testing.TB, numAccounts, numTxs int, keyBatchesForward return } - return selParams, selParamsList, lookback, addrs, secrets, vrfSecrets, otSecrets, tx + return selParams, selParamsList, lookback, addrs, secrets, vrfSecrets } /* TODO deprecate these types after they have been removed successfully */ diff --git a/data/committee/credential_test.go b/data/committee/credential_test.go index da2be625cd..bbabac62e9 100644 --- a/data/committee/credential_test.go +++ b/data/committee/credential_test.go @@ -35,7 +35,7 @@ func TestAccountSelected(t *testing.T) { seedGen := rand.New(rand.NewSource(1)) N := 1 for i := 0; i < N; i++ { - selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000, seedGen) + selParams, _, round, addresses, _, vrfSecrets := testingenv(t, 100, 2000, seedGen) period := Period(0) leaders := uint64(0) @@ -98,7 +98,7 @@ func TestAccountSelected(t *testing.T) { func TestRichAccountSelected(t *testing.T) { partitiontest.PartitionTest(t) - selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 10, 2000, nil) + selParams, _, round, addresses, _, vrfSecrets := testingenv(t, 10, 2000, nil) period := Period(0) ok, record, selectionSeed, _ := selParams(addresses[0]) @@ -159,7 +159,7 @@ func TestPoorAccountSelectedLeaders(t *testing.T) { failsLeaders := 0 leaders := make([]uint64, N) for i := 0; i < N; i++ { - selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000, seedGen) + selParams, _, round, addresses, _, vrfSecrets := testingenv(t, 100, 2000, seedGen) period := Period(0) for j := range addresses { ok, record, selectionSeed, _ := selParams(addresses[j]) @@ -207,7 +207,7 @@ func TestPoorAccountSelectedCommittee(t *testing.T) { N := 1 committee := uint64(0) for i := 0; i < N; i++ { - selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000, seedGen) + selParams, _, round, addresses, _, vrfSecrets := testingenv(t, 100, 2000, seedGen) period := Period(0) step := Cert @@ -250,10 +250,9 @@ func TestNoMoneyAccountNotSelected(t *testing.T) { seedGen := rand.New(rand.NewSource(1)) N := 1 for i := 0; i < N; i++ { - selParams, _, round, addresses, _, _, _, _ := testingenv(t, 10, 2000, seedGen) - lookback := basics.Round(2*proto.SeedRefreshInterval + proto.SeedLookback + 1) + selParams, _, round, addresses, _, _ := testingenv(t, 10, 2000, seedGen) gen := rand.New(rand.NewSource(2)) - _, _, zeroVRFSecret, _ := newAccount(t, gen, lookback, 5) + _, _, zeroVRFSecret := newAccount(t, gen) period := Period(0) ok, record, selectionSeed, _ := selParams(addresses[i]) if !ok { @@ -281,7 +280,7 @@ func TestNoMoneyAccountNotSelected(t *testing.T) { func TestLeadersSelected(t *testing.T) { partitiontest.PartitionTest(t) - selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000, nil) + selParams, _, round, addresses, _, vrfSecrets := testingenv(t, 100, 2000, nil) period := Period(0) step := Propose @@ -313,7 +312,7 @@ func TestLeadersSelected(t *testing.T) { func TestCommitteeSelected(t *testing.T) { partitiontest.PartitionTest(t) - selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000, nil) + selParams, _, round, addresses, _, vrfSecrets := testingenv(t, 100, 2000, nil) period := Period(0) step := Soft @@ -345,7 +344,7 @@ func TestCommitteeSelected(t *testing.T) { func TestAccountNotSelected(t *testing.T) { partitiontest.PartitionTest(t) - selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(t, 100, 2000, nil) + selParams, _, round, addresses, _, vrfSecrets := testingenv(t, 100, 2000, nil) period := Period(0) leaders := uint64(0) for i := range addresses { @@ -375,7 +374,7 @@ func TestAccountNotSelected(t *testing.T) { // TODO update to remove VRF verification overhead func BenchmarkSortition(b *testing.B) { - selParams, _, round, addresses, _, vrfSecrets, _, _ := testingenv(b, 100, 2000, nil) + selParams, _, round, addresses, _, vrfSecrets := testingenv(b, 100, 2000, nil) period := Period(0) step := Soft diff --git a/data/transactions/heartbeat.go b/data/transactions/heartbeat.go new file mode 100644 index 0000000000..2c3120f1a1 --- /dev/null +++ b/data/transactions/heartbeat.go @@ -0,0 +1,49 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package transactions + +import ( + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/committee" +) + +// HeartbeatTxnFields captures the fields used for an account to prove it is +// online (really, it proves that an entity with the account's part keys is able +// to submit transactions, so it should be able to propose/vote.) +type HeartbeatTxnFields struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + // HbAddress is the account this txn is proving onlineness for. + HbAddress basics.Address `codec:"a"` + + // HbProof is a signature using HeartbeatAddress's partkey, thereby showing it is online. + HbProof crypto.HeartbeatProof `codec:"prf"` + + // The final three fields are included to allow early, concurrent check of + // the HbProof. + + // HbSeed must be the block seed for the this transaction's firstValid + // block. It is the message that must be signed with HbAddress's part key. + HbSeed committee.Seed `codec:"sd"` + + // HbVoteID must match the HbAddress account's current VoteID. + HbVoteID crypto.OneTimeSignatureVerifier `codec:"vid"` + + // HbKeyDilution must match HbAddress account's current KeyDilution. + HbKeyDilution uint64 `codec:"kd"` +} diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go index 9ba52138ec..cc8034fdfc 100644 --- a/data/transactions/logic/assembler.go +++ b/data/transactions/logic/assembler.go @@ -2738,6 +2738,16 @@ func AssembleString(text string) (*OpStream, error) { return AssembleStringWithVersion(text, assemblerNoVersion) } +// MustAssemble assembles a program and panics on error. It is useful for +// defining globals. +func MustAssemble(text string) []byte { + ops, err := AssembleString(text) + if err != nil { + panic(err) + } + return ops.Program +} + // AssembleStringWithVersion takes an entire program in a string and // assembles it to bytecode using the assembler version specified. If // version is assemblerNoVersion it uses #pragma version or fallsback diff --git a/data/transactions/logic/crypto_test.go b/data/transactions/logic/crypto_test.go index c0ffd76242..5c14e23049 100644 --- a/data/transactions/logic/crypto_test.go +++ b/data/transactions/logic/crypto_test.go @@ -295,13 +295,17 @@ pop // output`, "int 1"}, } } +func randSeed() crypto.Seed { + var s crypto.Seed + crypto.RandBytes(s[:]) + return s +} + func TestEd25519verify(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - var s crypto.Seed - crypto.RandBytes(s[:]) - c := crypto.GenerateSignatureSecrets(s) + c := crypto.GenerateSignatureSecrets(randSeed()) msg := "62fdfc072182654f163f5f0f9a621d729566c74d0aa413bf009c9800418c19cd" data, err := hex.DecodeString(msg) require.NoError(t, err) @@ -340,9 +344,7 @@ func TestEd25519VerifyBare(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - var s crypto.Seed - crypto.RandBytes(s[:]) - c := crypto.GenerateSignatureSecrets(s) + c := crypto.GenerateSignatureSecrets(randSeed()) msg := "62fdfc072182654f163f5f0f9a621d729566c74d0aa413bf009c9800418c19cd" data, err := hex.DecodeString(msg) require.NoError(t, err) @@ -824,9 +826,7 @@ func BenchmarkEd25519Verifyx1(b *testing.B) { crypto.RandBytes(buffer[:]) data = append(data, buffer) - var s crypto.Seed //generate programs and signatures - crypto.RandBytes(s[:]) - secret := crypto.GenerateSignatureSecrets(s) + secret := crypto.GenerateSignatureSecrets(randSeed()) //generate programs and signatures pk := basics.Address(secret.SignatureVerifier) pkStr := pk.String() ops, err := AssembleStringWithVersion(fmt.Sprintf(`arg 0 diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go index 550a464d56..27a64f1b82 100644 --- a/data/transactions/logic/eval_test.go +++ b/data/transactions/logic/eval_test.go @@ -433,7 +433,7 @@ func TestBlankStackSufficient(t *testing.T) { spec := opsByOpcode[v][i] argLen := len(spec.Arg.Types) blankStackLen := len(blankStack) - require.GreaterOrEqual(t, blankStackLen, argLen) + require.GreaterOrEqual(t, blankStackLen, argLen, spec.Name) } }) } @@ -3232,7 +3232,21 @@ func TestIllegalOp(t *testing.T) { } } -func TestShortProgram(t *testing.T) { +func TestShortSimple(t *testing.T) { + partitiontest.PartitionTest(t) + + t.Parallel() + for v := uint64(1); v <= AssemblerMaxVersion; v++ { + t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) { + ops := testProg(t, `int 8; store 7`, v) + testLogicBytes(t, ops.Program[:len(ops.Program)-1], nil, + "program ends short of immediate values", + "program ends short of immediate values") + }) + } +} + +func TestShortBranch(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() diff --git a/data/transactions/logic/fields.go b/data/transactions/logic/fields.go index b4d3ca53c9..3ec054919a 100644 --- a/data/transactions/logic/fields.go +++ b/data/transactions/logic/fields.go @@ -1548,7 +1548,7 @@ func (fs voterParamsFieldSpec) Note() string { } var voterParamsFieldSpecs = [...]voterParamsFieldSpec{ - {VoterBalance, StackUint64, 6, "Online stake in microalgos"}, + {VoterBalance, StackUint64, incentiveVersion, "Online stake in microalgos"}, {VoterIncentiveEligible, StackBoolean, incentiveVersion, "Had this account opted into block payouts"}, } diff --git a/data/transactions/logic/ledger_test.go b/data/transactions/logic/ledger_test.go index 3dcead5e51..8b75f40855 100644 --- a/data/transactions/logic/ledger_test.go +++ b/data/transactions/logic/ledger_test.go @@ -46,9 +46,14 @@ import ( ) type balanceRecord struct { - addr basics.Address - auth basics.Address - balance uint64 + addr basics.Address + auth basics.Address + balance uint64 + voting basics.VotingData + + proposed basics.Round // The last round that this account proposed the accepted block + heartbeat basics.Round // The last round that this account sent a heartbeat to show it was online. + locals map[basics.AppIndex]basics.TealKeyValue holdings map[basics.AssetIndex]basics.AssetHolding mods map[basics.AppIndex]map[string]basics.ValueDelta @@ -312,7 +317,11 @@ func (l *Ledger) AccountData(addr basics.Address) (ledgercore.AccountData, error TotalBoxes: uint64(boxesTotal), TotalBoxBytes: uint64(boxBytesTotal), + + LastProposed: br.proposed, + LastHeartbeat: br.heartbeat, }, + VotingData: br.voting, }, nil } @@ -329,6 +338,9 @@ func (l *Ledger) AgreementData(addr basics.Address) (basics.OnlineAccountData, e // paid. Here, we ignore that for simple tests. return basics.OnlineAccountData{ MicroAlgosWithRewards: ad.MicroAlgos, + // VotingData is not exposed to `voter_params_get`, the thinking is that + // we don't want them used as "free" storage. And thus far, we don't + // have compelling reasons to examine them in AVM. VotingData: basics.VotingData{ VoteID: ad.VoteID, SelectionID: ad.SelectionID, @@ -940,7 +952,7 @@ func (l *Ledger) Perform(gi int, ep *EvalParams) error { } // Get returns the AccountData of an address. This test ledger does -// not handle rewards, so the pening rewards flag is ignored. +// not handle rewards, so withPendingRewards is ignored. func (l *Ledger) Get(addr basics.Address, withPendingRewards bool) (basics.AccountData, error) { br, ok := l.balances[addr] if !ok { @@ -952,6 +964,17 @@ func (l *Ledger) Get(addr basics.Address, withPendingRewards bool) (basics.Accou Assets: map[basics.AssetIndex]basics.AssetHolding{}, AppLocalStates: map[basics.AppIndex]basics.AppLocalState{}, AppParams: map[basics.AppIndex]basics.AppParams{}, + LastProposed: br.proposed, + LastHeartbeat: br.heartbeat, + // The fields below are not exposed to `acct_params_get`, the thinking + // is that we don't want them used as "free" storage. And thus far, we + // don't have compelling reasons to examine them in AVM. + VoteID: br.voting.VoteID, + SelectionID: br.voting.SelectionID, + StateProofID: br.voting.StateProofID, + VoteFirstValid: br.voting.VoteFirstValid, + VoteLastValid: br.voting.VoteLastValid, + VoteKeyDilution: br.voting.VoteKeyDilution, }, nil } diff --git a/data/transactions/msgp_gen.go b/data/transactions/msgp_gen.go index 15cd34ef2d..ceb12375be 100644 --- a/data/transactions/msgp_gen.go +++ b/data/transactions/msgp_gen.go @@ -12,6 +12,7 @@ import ( "github.com/algorand/go-algorand/crypto/merklesignature" "github.com/algorand/go-algorand/crypto/stateproof" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/committee" "github.com/algorand/go-algorand/data/stateproofmsg" "github.com/algorand/go-algorand/protocol" ) @@ -97,6 +98,16 @@ import ( // |-----> (*) MsgIsZero // |-----> HeaderMaxSize() // +// HeartbeatTxnFields +// |-----> (*) MarshalMsg +// |-----> (*) CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) UnmarshalMsgWithState +// |-----> (*) CanUnmarshalMsg +// |-----> (*) Msgsize +// |-----> (*) MsgIsZero +// |-----> HeartbeatTxnFieldsMaxSize() +// // KeyregTxnFields // |-----> (*) MarshalMsg // |-----> (*) CanMarshalMsg @@ -2907,6 +2918,218 @@ func HeaderMaxSize() (s int) { return } +// MarshalMsg implements msgp.Marshaler +func (z *HeartbeatTxnFields) MarshalMsg(b []byte) (o []byte) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0001Len := uint32(5) + var zb0001Mask uint8 /* 6 bits */ + if (*z).HbAddress.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x2 + } + if (*z).HbKeyDilution == 0 { + zb0001Len-- + zb0001Mask |= 0x4 + } + if (*z).HbProof.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x8 + } + if (*z).HbSeed.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x10 + } + if (*z).HbVoteID.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x20 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len != 0 { + if (zb0001Mask & 0x2) == 0 { // if not empty + // string "a" + o = append(o, 0xa1, 0x61) + o = (*z).HbAddress.MarshalMsg(o) + } + if (zb0001Mask & 0x4) == 0 { // if not empty + // string "kd" + o = append(o, 0xa2, 0x6b, 0x64) + o = msgp.AppendUint64(o, (*z).HbKeyDilution) + } + if (zb0001Mask & 0x8) == 0 { // if not empty + // string "prf" + o = append(o, 0xa3, 0x70, 0x72, 0x66) + o = (*z).HbProof.MarshalMsg(o) + } + if (zb0001Mask & 0x10) == 0 { // if not empty + // string "sd" + o = append(o, 0xa2, 0x73, 0x64) + o = (*z).HbSeed.MarshalMsg(o) + } + if (zb0001Mask & 0x20) == 0 { // if not empty + // string "vid" + o = append(o, 0xa3, 0x76, 0x69, 0x64) + o = (*z).HbVoteID.MarshalMsg(o) + } + } + return +} + +func (_ *HeartbeatTxnFields) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(*HeartbeatTxnFields) + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *HeartbeatTxnFields) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o []byte, err error) { + if st.AllowableDepth == 0 { + err = msgp.ErrMaxDepthExceeded{} + return + } + st.AllowableDepth-- + var field []byte + _ = field + var zb0001 int + var zb0002 bool + zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).HbAddress.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "HbAddress") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).HbProof.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "HbProof") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).HbSeed.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "HbSeed") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).HbVoteID.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "HbVoteID") + return + } + } + if zb0001 > 0 { + zb0001-- + (*z).HbKeyDilution, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "HbKeyDilution") + return + } + } + if zb0001 > 0 { + err = msgp.ErrTooManyArrayFields(zb0001) + if err != nil { + err = msgp.WrapError(err, "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0002 { + (*z) = HeartbeatTxnFields{} + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch string(field) { + case "a": + bts, err = (*z).HbAddress.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "HbAddress") + return + } + case "prf": + bts, err = (*z).HbProof.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "HbProof") + return + } + case "sd": + bts, err = (*z).HbSeed.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "HbSeed") + return + } + case "vid": + bts, err = (*z).HbVoteID.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "HbVoteID") + return + } + case "kd": + (*z).HbKeyDilution, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "HbKeyDilution") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + } + o = bts + return +} + +func (z *HeartbeatTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error) { + return z.UnmarshalMsgWithState(bts, msgp.DefaultUnmarshalState) +} +func (_ *HeartbeatTxnFields) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*HeartbeatTxnFields) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *HeartbeatTxnFields) Msgsize() (s int) { + s = 1 + 2 + (*z).HbAddress.Msgsize() + 4 + (*z).HbProof.Msgsize() + 3 + (*z).HbSeed.Msgsize() + 4 + (*z).HbVoteID.Msgsize() + 3 + msgp.Uint64Size + return +} + +// MsgIsZero returns whether this is a zero value +func (z *HeartbeatTxnFields) MsgIsZero() bool { + return ((*z).HbAddress.MsgIsZero()) && ((*z).HbProof.MsgIsZero()) && ((*z).HbSeed.MsgIsZero()) && ((*z).HbVoteID.MsgIsZero()) && ((*z).HbKeyDilution == 0) +} + +// MaxSize returns a maximum valid message size for this message type +func HeartbeatTxnFieldsMaxSize() (s int) { + s = 1 + 2 + basics.AddressMaxSize() + 4 + crypto.HeartbeatProofMaxSize() + 3 + committee.SeedMaxSize() + 4 + crypto.OneTimeSignatureVerifierMaxSize() + 3 + msgp.Uint64Size + return +} + // MarshalMsg implements msgp.Marshaler func (z *KeyregTxnFields) MarshalMsg(b []byte) (o []byte) { o = msgp.Require(b, z.Msgsize()) @@ -4982,8 +5205,8 @@ func StateProofTxnFieldsMaxSize() (s int) { func (z *Transaction) MarshalMsg(b []byte) (o []byte) { o = msgp.Require(b, z.Msgsize()) // omitempty: check for empty values - zb0007Len := uint32(46) - var zb0007Mask uint64 /* 55 bits */ + zb0007Len := uint32(47) + var zb0007Mask uint64 /* 56 bits */ if (*z).AssetTransferTxnFields.AssetAmount == 0 { zb0007Len-- zb0007Mask |= 0x200 @@ -5096,78 +5319,82 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte) { zb0007Len-- zb0007Mask |= 0x1000000000 } - if (*z).Header.LastValid.MsgIsZero() { + if (*z).HeartbeatTxnFields == nil { zb0007Len-- zb0007Mask |= 0x2000000000 } - if (*z).Header.Lease == ([32]byte{}) { + if (*z).Header.LastValid.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x4000000000 } - if (*z).KeyregTxnFields.Nonparticipation == false { + if (*z).Header.Lease == ([32]byte{}) { zb0007Len-- zb0007Mask |= 0x8000000000 } - if len((*z).Header.Note) == 0 { + if (*z).KeyregTxnFields.Nonparticipation == false { zb0007Len-- zb0007Mask |= 0x10000000000 } - if (*z).PaymentTxnFields.Receiver.MsgIsZero() { + if len((*z).Header.Note) == 0 { zb0007Len-- zb0007Mask |= 0x20000000000 } - if (*z).Header.RekeyTo.MsgIsZero() { + if (*z).PaymentTxnFields.Receiver.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x40000000000 } - if (*z).KeyregTxnFields.SelectionPK.MsgIsZero() { + if (*z).Header.RekeyTo.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x80000000000 } - if (*z).Header.Sender.MsgIsZero() { + if (*z).KeyregTxnFields.SelectionPK.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x100000000000 } - if (*z).StateProofTxnFields.StateProof.MsgIsZero() { + if (*z).Header.Sender.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x200000000000 } - if (*z).StateProofTxnFields.Message.MsgIsZero() { + if (*z).StateProofTxnFields.StateProof.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x400000000000 } - if (*z).KeyregTxnFields.StateProofPK.MsgIsZero() { + if (*z).StateProofTxnFields.Message.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x800000000000 } - if (*z).StateProofTxnFields.StateProofType.MsgIsZero() { + if (*z).KeyregTxnFields.StateProofPK.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x1000000000000 } - if (*z).Type.MsgIsZero() { + if (*z).StateProofTxnFields.StateProofType.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x2000000000000 } - if (*z).KeyregTxnFields.VoteFirst.MsgIsZero() { + if (*z).Type.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x4000000000000 } - if (*z).KeyregTxnFields.VoteKeyDilution == 0 { + if (*z).KeyregTxnFields.VoteFirst.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x8000000000000 } - if (*z).KeyregTxnFields.VotePK.MsgIsZero() { + if (*z).KeyregTxnFields.VoteKeyDilution == 0 { zb0007Len-- zb0007Mask |= 0x10000000000000 } - if (*z).KeyregTxnFields.VoteLast.MsgIsZero() { + if (*z).KeyregTxnFields.VotePK.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x20000000000000 } - if (*z).AssetTransferTxnFields.XferAsset.MsgIsZero() { + if (*z).KeyregTxnFields.VoteLast.MsgIsZero() { zb0007Len-- zb0007Mask |= 0x40000000000000 } + if (*z).AssetTransferTxnFields.XferAsset.MsgIsZero() { + zb0007Len-- + zb0007Mask |= 0x80000000000000 + } // variable map header, size zb0007Len o = msgp.AppendMapHeader(o, zb0007Len) if zb0007Len != 0 { @@ -5369,91 +5596,100 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte) { o = (*z).Header.Group.MarshalMsg(o) } if (zb0007Mask & 0x2000000000) == 0 { // if not empty + // string "hb" + o = append(o, 0xa2, 0x68, 0x62) + if (*z).HeartbeatTxnFields == nil { + o = msgp.AppendNil(o) + } else { + o = (*z).HeartbeatTxnFields.MarshalMsg(o) + } + } + if (zb0007Mask & 0x4000000000) == 0 { // if not empty // string "lv" o = append(o, 0xa2, 0x6c, 0x76) o = (*z).Header.LastValid.MarshalMsg(o) } - if (zb0007Mask & 0x4000000000) == 0 { // if not empty + if (zb0007Mask & 0x8000000000) == 0 { // if not empty // string "lx" o = append(o, 0xa2, 0x6c, 0x78) o = msgp.AppendBytes(o, ((*z).Header.Lease)[:]) } - if (zb0007Mask & 0x8000000000) == 0 { // if not empty + if (zb0007Mask & 0x10000000000) == 0 { // if not empty // string "nonpart" o = append(o, 0xa7, 0x6e, 0x6f, 0x6e, 0x70, 0x61, 0x72, 0x74) o = msgp.AppendBool(o, (*z).KeyregTxnFields.Nonparticipation) } - if (zb0007Mask & 0x10000000000) == 0 { // if not empty + if (zb0007Mask & 0x20000000000) == 0 { // if not empty // string "note" o = append(o, 0xa4, 0x6e, 0x6f, 0x74, 0x65) o = msgp.AppendBytes(o, (*z).Header.Note) } - if (zb0007Mask & 0x20000000000) == 0 { // if not empty + if (zb0007Mask & 0x40000000000) == 0 { // if not empty // string "rcv" o = append(o, 0xa3, 0x72, 0x63, 0x76) o = (*z).PaymentTxnFields.Receiver.MarshalMsg(o) } - if (zb0007Mask & 0x40000000000) == 0 { // if not empty + if (zb0007Mask & 0x80000000000) == 0 { // if not empty // string "rekey" o = append(o, 0xa5, 0x72, 0x65, 0x6b, 0x65, 0x79) o = (*z).Header.RekeyTo.MarshalMsg(o) } - if (zb0007Mask & 0x80000000000) == 0 { // if not empty + if (zb0007Mask & 0x100000000000) == 0 { // if not empty // string "selkey" o = append(o, 0xa6, 0x73, 0x65, 0x6c, 0x6b, 0x65, 0x79) o = (*z).KeyregTxnFields.SelectionPK.MarshalMsg(o) } - if (zb0007Mask & 0x100000000000) == 0 { // if not empty + if (zb0007Mask & 0x200000000000) == 0 { // if not empty // string "snd" o = append(o, 0xa3, 0x73, 0x6e, 0x64) o = (*z).Header.Sender.MarshalMsg(o) } - if (zb0007Mask & 0x200000000000) == 0 { // if not empty + if (zb0007Mask & 0x400000000000) == 0 { // if not empty // string "sp" o = append(o, 0xa2, 0x73, 0x70) o = (*z).StateProofTxnFields.StateProof.MarshalMsg(o) } - if (zb0007Mask & 0x400000000000) == 0 { // if not empty + if (zb0007Mask & 0x800000000000) == 0 { // if not empty // string "spmsg" o = append(o, 0xa5, 0x73, 0x70, 0x6d, 0x73, 0x67) o = (*z).StateProofTxnFields.Message.MarshalMsg(o) } - if (zb0007Mask & 0x800000000000) == 0 { // if not empty + if (zb0007Mask & 0x1000000000000) == 0 { // if not empty // string "sprfkey" o = append(o, 0xa7, 0x73, 0x70, 0x72, 0x66, 0x6b, 0x65, 0x79) o = (*z).KeyregTxnFields.StateProofPK.MarshalMsg(o) } - if (zb0007Mask & 0x1000000000000) == 0 { // if not empty + if (zb0007Mask & 0x2000000000000) == 0 { // if not empty // string "sptype" o = append(o, 0xa6, 0x73, 0x70, 0x74, 0x79, 0x70, 0x65) o = (*z).StateProofTxnFields.StateProofType.MarshalMsg(o) } - if (zb0007Mask & 0x2000000000000) == 0 { // if not empty + if (zb0007Mask & 0x4000000000000) == 0 { // if not empty // string "type" o = append(o, 0xa4, 0x74, 0x79, 0x70, 0x65) o = (*z).Type.MarshalMsg(o) } - if (zb0007Mask & 0x4000000000000) == 0 { // if not empty + if (zb0007Mask & 0x8000000000000) == 0 { // if not empty // string "votefst" o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x66, 0x73, 0x74) o = (*z).KeyregTxnFields.VoteFirst.MarshalMsg(o) } - if (zb0007Mask & 0x8000000000000) == 0 { // if not empty + if (zb0007Mask & 0x10000000000000) == 0 { // if not empty // string "votekd" o = append(o, 0xa6, 0x76, 0x6f, 0x74, 0x65, 0x6b, 0x64) o = msgp.AppendUint64(o, (*z).KeyregTxnFields.VoteKeyDilution) } - if (zb0007Mask & 0x10000000000000) == 0 { // if not empty + if (zb0007Mask & 0x20000000000000) == 0 { // if not empty // string "votekey" o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x6b, 0x65, 0x79) o = (*z).KeyregTxnFields.VotePK.MarshalMsg(o) } - if (zb0007Mask & 0x20000000000000) == 0 { // if not empty + if (zb0007Mask & 0x40000000000000) == 0 { // if not empty // string "votelst" o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x6c, 0x73, 0x74) o = (*z).KeyregTxnFields.VoteLast.MarshalMsg(o) } - if (zb0007Mask & 0x40000000000000) == 0 { // if not empty + if (zb0007Mask & 0x80000000000000) == 0 { // if not empty // string "xaid" o = append(o, 0xa4, 0x78, 0x61, 0x69, 0x64) o = (*z).AssetTransferTxnFields.XferAsset.MarshalMsg(o) @@ -6086,6 +6322,25 @@ func (z *Transaction) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) return } } + if zb0007 > 0 { + zb0007-- + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + (*z).HeartbeatTxnFields = nil + } else { + if (*z).HeartbeatTxnFields == nil { + (*z).HeartbeatTxnFields = new(HeartbeatTxnFields) + } + bts, err = (*z).HeartbeatTxnFields.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "HeartbeatTxnFields") + return + } + } + } if zb0007 > 0 { err = msgp.ErrTooManyArrayFields(zb0007) if err != nil { @@ -6618,6 +6873,23 @@ func (z *Transaction) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) err = msgp.WrapError(err, "Message") return } + case "hb": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + (*z).HeartbeatTxnFields = nil + } else { + if (*z).HeartbeatTxnFields == nil { + (*z).HeartbeatTxnFields = new(HeartbeatTxnFields) + } + bts, err = (*z).HeartbeatTxnFields.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "HeartbeatTxnFields") + return + } + } default: err = msgp.ErrNoField(string(field)) if err != nil { @@ -6661,13 +6933,18 @@ func (z *Transaction) Msgsize() (s int) { for zb0006 := range (*z).ApplicationCallTxnFields.ForeignAssets { s += (*z).ApplicationCallTxnFields.ForeignAssets[zb0006].Msgsize() } - s += 5 + (*z).ApplicationCallTxnFields.LocalStateSchema.Msgsize() + 5 + (*z).ApplicationCallTxnFields.GlobalStateSchema.Msgsize() + 5 + msgp.BytesPrefixSize + len((*z).ApplicationCallTxnFields.ApprovalProgram) + 5 + msgp.BytesPrefixSize + len((*z).ApplicationCallTxnFields.ClearStateProgram) + 5 + msgp.Uint32Size + 7 + (*z).StateProofTxnFields.StateProofType.Msgsize() + 3 + (*z).StateProofTxnFields.StateProof.Msgsize() + 6 + (*z).StateProofTxnFields.Message.Msgsize() + s += 5 + (*z).ApplicationCallTxnFields.LocalStateSchema.Msgsize() + 5 + (*z).ApplicationCallTxnFields.GlobalStateSchema.Msgsize() + 5 + msgp.BytesPrefixSize + len((*z).ApplicationCallTxnFields.ApprovalProgram) + 5 + msgp.BytesPrefixSize + len((*z).ApplicationCallTxnFields.ClearStateProgram) + 5 + msgp.Uint32Size + 7 + (*z).StateProofTxnFields.StateProofType.Msgsize() + 3 + (*z).StateProofTxnFields.StateProof.Msgsize() + 6 + (*z).StateProofTxnFields.Message.Msgsize() + 3 + if (*z).HeartbeatTxnFields == nil { + s += msgp.NilSize + } else { + s += (*z).HeartbeatTxnFields.Msgsize() + } return } // MsgIsZero returns whether this is a zero value func (z *Transaction) MsgIsZero() bool { - return ((*z).Type.MsgIsZero()) && ((*z).Header.Sender.MsgIsZero()) && ((*z).Header.Fee.MsgIsZero()) && ((*z).Header.FirstValid.MsgIsZero()) && ((*z).Header.LastValid.MsgIsZero()) && (len((*z).Header.Note) == 0) && ((*z).Header.GenesisID == "") && ((*z).Header.GenesisHash.MsgIsZero()) && ((*z).Header.Group.MsgIsZero()) && ((*z).Header.Lease == ([32]byte{})) && ((*z).Header.RekeyTo.MsgIsZero()) && ((*z).KeyregTxnFields.VotePK.MsgIsZero()) && ((*z).KeyregTxnFields.SelectionPK.MsgIsZero()) && ((*z).KeyregTxnFields.StateProofPK.MsgIsZero()) && ((*z).KeyregTxnFields.VoteFirst.MsgIsZero()) && ((*z).KeyregTxnFields.VoteLast.MsgIsZero()) && ((*z).KeyregTxnFields.VoteKeyDilution == 0) && ((*z).KeyregTxnFields.Nonparticipation == false) && ((*z).PaymentTxnFields.Receiver.MsgIsZero()) && ((*z).PaymentTxnFields.Amount.MsgIsZero()) && ((*z).PaymentTxnFields.CloseRemainderTo.MsgIsZero()) && ((*z).AssetConfigTxnFields.ConfigAsset.MsgIsZero()) && ((*z).AssetConfigTxnFields.AssetParams.MsgIsZero()) && ((*z).AssetTransferTxnFields.XferAsset.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetAmount == 0) && ((*z).AssetTransferTxnFields.AssetSender.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetReceiver.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetCloseTo.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAccount.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAsset.MsgIsZero()) && ((*z).AssetFreezeTxnFields.AssetFrozen == false) && ((*z).ApplicationCallTxnFields.ApplicationID.MsgIsZero()) && ((*z).ApplicationCallTxnFields.OnCompletion == 0) && (len((*z).ApplicationCallTxnFields.ApplicationArgs) == 0) && (len((*z).ApplicationCallTxnFields.Accounts) == 0) && (len((*z).ApplicationCallTxnFields.ForeignApps) == 0) && (len((*z).ApplicationCallTxnFields.Boxes) == 0) && (len((*z).ApplicationCallTxnFields.ForeignAssets) == 0) && ((*z).ApplicationCallTxnFields.LocalStateSchema.MsgIsZero()) && ((*z).ApplicationCallTxnFields.GlobalStateSchema.MsgIsZero()) && (len((*z).ApplicationCallTxnFields.ApprovalProgram) == 0) && (len((*z).ApplicationCallTxnFields.ClearStateProgram) == 0) && ((*z).ApplicationCallTxnFields.ExtraProgramPages == 0) && ((*z).StateProofTxnFields.StateProofType.MsgIsZero()) && ((*z).StateProofTxnFields.StateProof.MsgIsZero()) && ((*z).StateProofTxnFields.Message.MsgIsZero()) + return ((*z).Type.MsgIsZero()) && ((*z).Header.Sender.MsgIsZero()) && ((*z).Header.Fee.MsgIsZero()) && ((*z).Header.FirstValid.MsgIsZero()) && ((*z).Header.LastValid.MsgIsZero()) && (len((*z).Header.Note) == 0) && ((*z).Header.GenesisID == "") && ((*z).Header.GenesisHash.MsgIsZero()) && ((*z).Header.Group.MsgIsZero()) && ((*z).Header.Lease == ([32]byte{})) && ((*z).Header.RekeyTo.MsgIsZero()) && ((*z).KeyregTxnFields.VotePK.MsgIsZero()) && ((*z).KeyregTxnFields.SelectionPK.MsgIsZero()) && ((*z).KeyregTxnFields.StateProofPK.MsgIsZero()) && ((*z).KeyregTxnFields.VoteFirst.MsgIsZero()) && ((*z).KeyregTxnFields.VoteLast.MsgIsZero()) && ((*z).KeyregTxnFields.VoteKeyDilution == 0) && ((*z).KeyregTxnFields.Nonparticipation == false) && ((*z).PaymentTxnFields.Receiver.MsgIsZero()) && ((*z).PaymentTxnFields.Amount.MsgIsZero()) && ((*z).PaymentTxnFields.CloseRemainderTo.MsgIsZero()) && ((*z).AssetConfigTxnFields.ConfigAsset.MsgIsZero()) && ((*z).AssetConfigTxnFields.AssetParams.MsgIsZero()) && ((*z).AssetTransferTxnFields.XferAsset.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetAmount == 0) && ((*z).AssetTransferTxnFields.AssetSender.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetReceiver.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetCloseTo.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAccount.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAsset.MsgIsZero()) && ((*z).AssetFreezeTxnFields.AssetFrozen == false) && ((*z).ApplicationCallTxnFields.ApplicationID.MsgIsZero()) && ((*z).ApplicationCallTxnFields.OnCompletion == 0) && (len((*z).ApplicationCallTxnFields.ApplicationArgs) == 0) && (len((*z).ApplicationCallTxnFields.Accounts) == 0) && (len((*z).ApplicationCallTxnFields.ForeignApps) == 0) && (len((*z).ApplicationCallTxnFields.Boxes) == 0) && (len((*z).ApplicationCallTxnFields.ForeignAssets) == 0) && ((*z).ApplicationCallTxnFields.LocalStateSchema.MsgIsZero()) && ((*z).ApplicationCallTxnFields.GlobalStateSchema.MsgIsZero()) && (len((*z).ApplicationCallTxnFields.ApprovalProgram) == 0) && (len((*z).ApplicationCallTxnFields.ClearStateProgram) == 0) && ((*z).ApplicationCallTxnFields.ExtraProgramPages == 0) && ((*z).StateProofTxnFields.StateProofType.MsgIsZero()) && ((*z).StateProofTxnFields.StateProof.MsgIsZero()) && ((*z).StateProofTxnFields.Message.MsgIsZero()) && ((*z).HeartbeatTxnFields == nil) } // MaxSize returns a maximum valid message size for this message type @@ -6689,7 +6966,8 @@ func TransactionMaxSize() (s int) { s += 5 // Calculating size of slice: z.ApplicationCallTxnFields.ForeignAssets s += msgp.ArrayHeaderSize + ((encodedMaxForeignAssets) * (basics.AssetIndexMaxSize())) - s += 5 + basics.StateSchemaMaxSize() + 5 + basics.StateSchemaMaxSize() + 5 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 5 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 5 + msgp.Uint32Size + 7 + protocol.StateProofTypeMaxSize() + 3 + stateproof.StateProofMaxSize() + 6 + stateproofmsg.MessageMaxSize() + s += 5 + basics.StateSchemaMaxSize() + 5 + basics.StateSchemaMaxSize() + 5 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 5 + msgp.BytesPrefixSize + config.MaxAvailableAppProgramLen + 5 + msgp.Uint32Size + 7 + protocol.StateProofTypeMaxSize() + 3 + stateproof.StateProofMaxSize() + 6 + stateproofmsg.MessageMaxSize() + 3 + s += HeartbeatTxnFieldsMaxSize() return } diff --git a/data/transactions/msgp_gen_test.go b/data/transactions/msgp_gen_test.go index 0ce6b29c38..49ed14f6e3 100644 --- a/data/transactions/msgp_gen_test.go +++ b/data/transactions/msgp_gen_test.go @@ -494,6 +494,66 @@ func BenchmarkUnmarshalHeader(b *testing.B) { } } +func TestMarshalUnmarshalHeartbeatTxnFields(t *testing.T) { + partitiontest.PartitionTest(t) + v := HeartbeatTxnFields{} + bts := v.MarshalMsg(nil) + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func TestRandomizedEncodingHeartbeatTxnFields(t *testing.T) { + protocol.RunEncodingTest(t, &HeartbeatTxnFields{}) +} + +func BenchmarkMarshalMsgHeartbeatTxnFields(b *testing.B) { + v := HeartbeatTxnFields{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgHeartbeatTxnFields(b *testing.B) { + v := HeartbeatTxnFields{} + bts := make([]byte, 0, v.Msgsize()) + bts = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalHeartbeatTxnFields(b *testing.B) { + v := HeartbeatTxnFields{} + bts := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + func TestMarshalUnmarshalKeyregTxnFields(t *testing.T) { partitiontest.PartitionTest(t) v := KeyregTxnFields{} diff --git a/data/transactions/stateproof.go b/data/transactions/stateproof.go index 7d24526851..ed23420a1c 100644 --- a/data/transactions/stateproof.go +++ b/data/transactions/stateproof.go @@ -33,14 +33,6 @@ type StateProofTxnFields struct { Message stateproofmsg.Message `codec:"spmsg"` } -// Empty returns whether the StateProofTxnFields are all zero, -// in the sense of being omitted in a msgpack encoding. -func (sp StateProofTxnFields) Empty() bool { - return sp.StateProofType == protocol.StateProofBasic && - sp.StateProof.MsgIsZero() && - sp.Message.MsgIsZero() -} - // specialAddr is used to form a unique address that will send out state proofs. // //msgp:ignore specialAddr diff --git a/data/transactions/transaction.go b/data/transactions/transaction.go index a8226654b5..f71aaa3744 100644 --- a/data/transactions/transaction.go +++ b/data/transactions/transaction.go @@ -27,6 +27,7 @@ import ( "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/committee" "github.com/algorand/go-algorand/protocol" ) @@ -100,6 +101,11 @@ type Transaction struct { AssetFreezeTxnFields ApplicationCallTxnFields StateProofTxnFields + + // By making HeartbeatTxnFields a pointer we save a ton of space of the + // Transaction object. Unlike other txn types, the fields will be + // embedded under a named field in the transaction encoding. + *HeartbeatTxnFields `codec:"hb"` } // ApplyData contains information about the transaction's execution. @@ -324,7 +330,7 @@ func (tx Header) Alive(tc TxnContext) error { // MatchAddress checks if the transaction touches a given address. func (tx Transaction) MatchAddress(addr basics.Address, spec SpecialAddresses) bool { - return slices.Contains(tx.RelevantAddrs(spec), addr) + return slices.Contains(tx.relevantAddrs(spec), addr) } var errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound = errors.New("transaction first voting round need to be less than its last voting round") @@ -565,6 +571,42 @@ func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusPa return errLeaseMustBeZeroInStateproofTxn } + case protocol.HeartbeatTx: + if !proto.Heartbeat { + return fmt.Errorf("heartbeat transaction not supported") + } + + // If this is a free/cheap heartbeat, it must be very simple. + if tx.Fee.Raw < proto.MinTxnFee && tx.Group.IsZero() { + kind := "free" + if tx.Fee.Raw > 0 { + kind = "cheap" + } + + if len(tx.Note) > 0 { + return fmt.Errorf("tx.Note is set in %s heartbeat", kind) + } + if tx.Lease != [32]byte{} { + return fmt.Errorf("tx.Lease is set in %s heartbeat", kind) + } + if !tx.RekeyTo.IsZero() { + return fmt.Errorf("tx.RekeyTo is set in %s heartbeat", kind) + } + } + + if (tx.HbProof == crypto.HeartbeatProof{}) { + return errors.New("tx.HbProof is empty") + } + if (tx.HbSeed == committee.Seed{}) { + return errors.New("tx.HbSeed is empty") + } + if tx.HbVoteID.IsEmpty() { + return errors.New("tx.HbVoteID is empty") + } + if tx.HbKeyDilution == 0 { + return errors.New("tx.HbKeyDilution is zero") + } + default: return fmt.Errorf("unknown tx type %v", tx.Type) } @@ -594,10 +636,14 @@ func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusPa nonZeroFields[protocol.ApplicationCallTx] = true } - if !tx.StateProofTxnFields.Empty() { + if !tx.StateProofTxnFields.MsgIsZero() { nonZeroFields[protocol.StateProofTx] = true } + if tx.HeartbeatTxnFields != nil { + nonZeroFields[protocol.HeartbeatTx] = true + } + for t, nonZero := range nonZeroFields { if nonZero && t != tx.Type { return fmt.Errorf("transaction of type %v has non-zero fields for type %v", tx.Type, t) @@ -704,9 +750,8 @@ func (tx Header) Last() basics.Round { return tx.LastValid } -// RelevantAddrs returns the addresses whose balance records this transaction will need to access. -// The header's default is to return just the sender and the fee sink. -func (tx Transaction) RelevantAddrs(spec SpecialAddresses) []basics.Address { +// relevantAddrs returns the addresses whose balance records this transaction will need to access. +func (tx Transaction) relevantAddrs(spec SpecialAddresses) []basics.Address { addrs := []basics.Address{tx.Sender, spec.FeeSink} switch tx.Type { @@ -723,6 +768,8 @@ func (tx Transaction) RelevantAddrs(spec SpecialAddresses) []basics.Address { if !tx.AssetTransferTxnFields.AssetSender.IsZero() { addrs = append(addrs, tx.AssetTransferTxnFields.AssetSender) } + case protocol.HeartbeatTx: + addrs = append(addrs, tx.HeartbeatTxnFields.HbAddress) } return addrs diff --git a/data/transactions/transaction_test.go b/data/transactions/transaction_test.go index 08dd145a8c..183ebdc760 100644 --- a/data/transactions/transaction_test.go +++ b/data/transactions/transaction_test.go @@ -22,6 +22,7 @@ import ( "strings" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/algorand/go-algorand/config" @@ -29,6 +30,7 @@ import ( "github.com/algorand/go-algorand/crypto/merklesignature" "github.com/algorand/go-algorand/crypto/stateproof" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/committee" "github.com/algorand/go-algorand/data/stateproofmsg" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/partitiontest" @@ -591,10 +593,156 @@ func TestWellFormedErrors(t *testing.T) { proto: protoV36, expectedError: nil, }, + { + tx: Transaction{ + Type: protocol.HeartbeatTx, + Header: okHeader, + }, + proto: protoV36, + expectedError: fmt.Errorf("heartbeat transaction not supported"), + }, + { + tx: Transaction{ + Type: protocol.HeartbeatTx, + Header: okHeader, + HeartbeatTxnFields: &HeartbeatTxnFields{ + HbSeed: committee.Seed{0x02}, + HbVoteID: crypto.OneTimeSignatureVerifier{0x03}, + HbKeyDilution: 10, + }, + }, + proto: futureProto, + expectedError: fmt.Errorf("tx.HbProof is empty"), + }, + { + tx: Transaction{ + Type: protocol.HeartbeatTx, + Header: okHeader, + HeartbeatTxnFields: &HeartbeatTxnFields{ + HbProof: crypto.HeartbeatProof{ + Sig: [64]byte{0x01}, + }, + HbVoteID: crypto.OneTimeSignatureVerifier{0x03}, + HbKeyDilution: 10, + }, + }, + proto: futureProto, + expectedError: fmt.Errorf("tx.HbSeed is empty"), + }, + { + tx: Transaction{ + Type: protocol.HeartbeatTx, + Header: okHeader, + HeartbeatTxnFields: &HeartbeatTxnFields{ + HbProof: crypto.HeartbeatProof{ + Sig: [64]byte{0x01}, + }, + HbSeed: committee.Seed{0x02}, + HbKeyDilution: 10, + }, + }, + proto: futureProto, + expectedError: fmt.Errorf("tx.HbVoteID is empty"), + }, + { + tx: Transaction{ + Type: protocol.HeartbeatTx, + Header: okHeader, + HeartbeatTxnFields: &HeartbeatTxnFields{ + HbProof: crypto.HeartbeatProof{ + Sig: [64]byte{0x01}, + }, + HbSeed: committee.Seed{0x02}, + HbVoteID: crypto.OneTimeSignatureVerifier{0x03}, + }, + }, + proto: futureProto, + expectedError: fmt.Errorf("tx.HbKeyDilution is zero"), + }, + { + tx: Transaction{ + Type: protocol.HeartbeatTx, + Header: okHeader, + HeartbeatTxnFields: &HeartbeatTxnFields{ + HbProof: crypto.HeartbeatProof{ + Sig: [64]byte{0x01}, + }, + HbSeed: committee.Seed{0x02}, + HbVoteID: crypto.OneTimeSignatureVerifier{0x03}, + HbKeyDilution: 10, + }, + }, + proto: futureProto, + }, + { + tx: Transaction{ + Type: protocol.HeartbeatTx, + Header: Header{ + Sender: addr1, + Fee: basics.MicroAlgos{Raw: 100}, + LastValid: 105, + FirstValid: 100, + Note: []byte{0x01}, + }, + HeartbeatTxnFields: &HeartbeatTxnFields{ + HbProof: crypto.HeartbeatProof{ + Sig: [64]byte{0x01}, + }, + HbSeed: committee.Seed{0x02}, + HbVoteID: crypto.OneTimeSignatureVerifier{0x03}, + HbKeyDilution: 10, + }, + }, + proto: futureProto, + expectedError: fmt.Errorf("tx.Note is set in cheap heartbeat"), + }, + { + tx: Transaction{ + Type: protocol.HeartbeatTx, + Header: Header{ + Sender: addr1, + Fee: basics.MicroAlgos{Raw: 100}, + LastValid: 105, + FirstValid: 100, + Lease: [32]byte{0x01}, + }, + HeartbeatTxnFields: &HeartbeatTxnFields{ + HbProof: crypto.HeartbeatProof{ + Sig: [64]byte{0x01}, + }, + HbSeed: committee.Seed{0x02}, + HbVoteID: crypto.OneTimeSignatureVerifier{0x03}, + HbKeyDilution: 10, + }, + }, + proto: futureProto, + expectedError: fmt.Errorf("tx.Lease is set in cheap heartbeat"), + }, + { + tx: Transaction{ + Type: protocol.HeartbeatTx, + Header: Header{ + Sender: addr1, + LastValid: 105, + FirstValid: 100, + RekeyTo: [32]byte{0x01}, + }, + HeartbeatTxnFields: &HeartbeatTxnFields{ + HbProof: crypto.HeartbeatProof{ + Sig: [64]byte{0x01}, + }, + HbSeed: committee.Seed{0x02}, + HbVoteID: crypto.OneTimeSignatureVerifier{0x03}, + HbKeyDilution: 10, + }, + }, + proto: futureProto, + expectedError: fmt.Errorf("tx.RekeyTo is set in free heartbeat"), + }, } for _, usecase := range usecases { err := usecase.tx.WellFormed(SpecialAddresses{}, usecase.proto) - require.Equal(t, usecase.expectedError, err) + assert.Equal(t, usecase.expectedError, err) } } diff --git a/data/transactions/verify/txn.go b/data/transactions/verify/txn.go index 46d3c4cf7e..f01727831f 100644 --- a/data/transactions/verify/txn.go +++ b/data/transactions/verify/txn.go @@ -221,11 +221,19 @@ func txnGroupBatchPrep(stxs []transactions.SignedTxn, contextHdr *bookkeeping.Bl prepErr.err = fmt.Errorf("transaction %+v invalid : %w", stxn, prepErr.err) return nil, prepErr } - if stxn.Txn.Type != protocol.StateProofTx { - minFeeCount++ - } feesPaid = basics.AddSaturate(feesPaid, stxn.Txn.Fee.Raw) lSigPooledSize += stxn.Lsig.Len() + if stxn.Txn.Type == protocol.StateProofTx { + // State proofs are free, bail before incrementing + continue + } + if stxn.Txn.Type == protocol.HeartbeatTx && stxn.Txn.Group.IsZero() { + // In apply.Heartbeat, we further confirm that the heartbeat is for + // a challenged account. Such heartbeats are free, bail before + // incrementing + continue + } + minFeeCount++ } if groupCtx.consensusParams.EnableLogicSigSizePooling { lSigMaxPooledSize := len(stxs) * int(groupCtx.consensusParams.LogicSigMaxSize) @@ -305,6 +313,11 @@ func stxnCoreChecks(gi int, groupCtx *GroupContext, batchVerifier crypto.BatchVe return err } + if s.Txn.Type == protocol.HeartbeatTx { + id := basics.OneTimeIDForRound(s.Txn.LastValid, s.Txn.HbKeyDilution) + s.Txn.HbProof.BatchPrep(s.Txn.HbVoteID, id, s.Txn.HbSeed, batchVerifier) + } + switch sigType { case regularSig: batchVerifier.EnqueueSignature(crypto.SignatureVerifier(s.Authorizer()), s.Txn, s.Sig) diff --git a/data/transactions/verify/txn_test.go b/data/transactions/verify/txn_test.go index 7578d0f9da..1e7f39101f 100644 --- a/data/transactions/verify/txn_test.go +++ b/data/transactions/verify/txn_test.go @@ -30,6 +30,7 @@ import ( "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/committee" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" "github.com/algorand/go-algorand/data/transactions/logic/mocktracer" @@ -94,6 +95,41 @@ func keypair() *crypto.SignatureSecrets { return s } +func createHeartbeatTxn(fv basics.Round, t *testing.T) transactions.SignedTxn { + secrets, addrs, _ := generateAccounts(1) + + kd := uint64(111) + lv := fv + 15 + firstID := basics.OneTimeIDForRound(fv, kd) + lastID := basics.OneTimeIDForRound(lv, kd) + numBatches := lastID.Batch - firstID.Batch + 1 + id := basics.OneTimeIDForRound(lv, kd) + + seed := committee.Seed{0x33} + otss := crypto.GenerateOneTimeSignatureSecrets(firstID.Batch, numBatches) + + txn := transactions.Transaction{ + Type: "hb", + Header: transactions.Header{ + Sender: addrs[0], + FirstValid: fv, + LastValid: lv, + }, + HeartbeatTxnFields: &transactions.HeartbeatTxnFields{ + HbProof: otss.Sign(id, seed).ToHeartbeatProof(), + HbSeed: seed, + HbVoteID: otss.OneTimeSignatureVerifier, + HbKeyDilution: kd, + }, + } + + hb := transactions.SignedTxn{ + Sig: secrets[0].Sign(txn), + Txn: txn, + } + return hb +} + func generateMultiSigTxn(numTxs, numAccs int, blockRound basics.Round, t *testing.T) ([]transactions.Transaction, []transactions.SignedTxn, []*crypto.SignatureSecrets, []basics.Address) { secrets, addresses, pks, multiAddress := generateMultiSigAccounts(t, numAccs) @@ -574,7 +610,7 @@ func TestPaysetGroups(t *testing.T) { startPaysetGroupsTime := time.Now() err := PaysetGroups(context.Background(), txnGroups, blkHdr, verificationPool, MakeVerifiedTransactionCache(50000), nil) require.NoError(t, err) - paysetGroupDuration := time.Now().Sub(startPaysetGroupsTime) + paysetGroupDuration := time.Since(startPaysetGroupsTime) // break the signature and see if it fails. txnGroups[0][0].Sig[0] = txnGroups[0][0].Sig[0] + 1 @@ -608,7 +644,7 @@ func TestPaysetGroups(t *testing.T) { // channel is closed without a return require.Failf(t, "Channel got closed ?!", "") } else { - actualDuration := time.Now().Sub(startPaysetGroupsTime) + actualDuration := time.Since(startPaysetGroupsTime) if err == nil { if actualDuration > 4*time.Second { // it took at least 2.5 seconds more than it should have had! @@ -864,6 +900,38 @@ func TestTxnGroupCacheUpdateMultiSig(t *testing.T) { verifyGroup(t, txnGroups, &blkHdr, breakSignatureFunc, restoreSignatureFunc, crypto.ErrBatchHasFailedSigs.Error()) } +// TestTxnHeartbeat makes sure that a heartbeat transaction is valid (and added +// to the cache) only if the normal outer signature is valid AND the inner +// HbProof is valid. +func TestTxnHeartbeat(t *testing.T) { + partitiontest.PartitionTest(t) + + blkHdr := createDummyBlockHeader(protocol.ConsensusFuture) + + txnGroups := make([][]transactions.SignedTxn, 2) // verifyGroup requires at least 2 + for i := 0; i < len(txnGroups); i++ { + txnGroups[i] = make([]transactions.SignedTxn, 1) + txnGroups[i][0] = createHeartbeatTxn(blkHdr.Round-1, t) + } + breakSignatureFunc := func(txn *transactions.SignedTxn) { + txn.Sig[0]++ + } + restoreSignatureFunc := func(txn *transactions.SignedTxn) { + txn.Sig[0]-- + } + // This shows the outer signature must be correct + verifyGroup(t, txnGroups, &blkHdr, breakSignatureFunc, restoreSignatureFunc, crypto.ErrBatchHasFailedSigs.Error()) + + breakHbProofFunc := func(txn *transactions.SignedTxn) { + txn.Txn.HeartbeatTxnFields.HbProof.Sig[0]++ + } + restoreHbProofFunc := func(txn *transactions.SignedTxn) { + txn.Txn.HeartbeatTxnFields.HbProof.Sig[0]-- + } + // This shows the inner signature must be correct + verifyGroup(t, txnGroups, &blkHdr, breakHbProofFunc, restoreHbProofFunc, crypto.ErrBatchHasFailedSigs.Error()) +} + // TestTxnGroupCacheUpdateFailLogic test makes sure that a payment transaction contains a logic (and no signature) // is valid (and added to the cache) only if logic passes func TestTxnGroupCacheUpdateFailLogic(t *testing.T) { @@ -1028,12 +1096,18 @@ byte base64 5rZMNsevs5sULO+54aN+OvU6lQ503z2X+SSYUABIx7E= verifyGroup(t, txnGroups, &blkHdr, breakSignatureFunc, restoreSignatureFunc, "rejected by logic") } -func createDummyBlockHeader() bookkeeping.BlockHeader { +func createDummyBlockHeader(optVer ...protocol.ConsensusVersion) bookkeeping.BlockHeader { + // Most tests in this file were written to use current. Future is probably + // the better test, but I don't want to make that choice now, so optVer. + proto := protocol.ConsensusCurrentVersion + if len(optVer) > 0 { + proto = optVer[0] + } return bookkeeping.BlockHeader{ Round: 50, GenesisHash: crypto.Hash([]byte{1, 2, 3, 4, 5}), UpgradeState: bookkeeping.UpgradeState{ - CurrentProtocol: protocol.ConsensusCurrentVersion, + CurrentProtocol: proto, }, RewardsState: bookkeeping.RewardsState{ FeeSink: feeSink, @@ -1067,32 +1141,32 @@ func verifyGroup(t *testing.T, txnGroups [][]transactions.SignedTxn, blkHdr *boo breakSig(&txnGroups[0][0]) - dummeyLedger := DummyLedgerForSignature{} - _, err := TxnGroup(txnGroups[0], blkHdr, cache, &dummeyLedger) + dummyLedger := DummyLedgerForSignature{} + _, err := TxnGroup(txnGroups[0], blkHdr, cache, &dummyLedger) require.Error(t, err) require.Contains(t, err.Error(), errorString) // The txns should not be in the cache - unverifiedGroups := cache.GetUnverifiedTransactionGroups(txnGroups[:1], spec, protocol.ConsensusCurrentVersion) + unverifiedGroups := cache.GetUnverifiedTransactionGroups(txnGroups[:1], spec, blkHdr.CurrentProtocol) require.Len(t, unverifiedGroups, 1) - unverifiedGroups = cache.GetUnverifiedTransactionGroups(txnGroups[:2], spec, protocol.ConsensusCurrentVersion) + unverifiedGroups = cache.GetUnverifiedTransactionGroups(txnGroups[:2], spec, blkHdr.CurrentProtocol) require.Len(t, unverifiedGroups, 2) - _, err = TxnGroup(txnGroups[1], blkHdr, cache, &dummeyLedger) + _, err = TxnGroup(txnGroups[1], blkHdr, cache, &dummyLedger) require.NoError(t, err) // Only the second txn should be in the cache - unverifiedGroups = cache.GetUnverifiedTransactionGroups(txnGroups[:2], spec, protocol.ConsensusCurrentVersion) + unverifiedGroups = cache.GetUnverifiedTransactionGroups(txnGroups[:2], spec, blkHdr.CurrentProtocol) require.Len(t, unverifiedGroups, 1) restoreSig(&txnGroups[0][0]) - _, err = TxnGroup(txnGroups[0], blkHdr, cache, &dummeyLedger) + _, err = TxnGroup(txnGroups[0], blkHdr, cache, &dummyLedger) require.NoError(t, err) // Both transactions should be in the cache - unverifiedGroups = cache.GetUnverifiedTransactionGroups(txnGroups[:2], spec, protocol.ConsensusCurrentVersion) + unverifiedGroups = cache.GetUnverifiedTransactionGroups(txnGroups[:2], spec, blkHdr.CurrentProtocol) require.Len(t, unverifiedGroups, 0) cache = MakeVerifiedTransactionCache(1000) @@ -1105,7 +1179,7 @@ func verifyGroup(t *testing.T, txnGroups [][]transactions.SignedTxn, blkHdr *boo // Add them to the cache by verifying them for _, txng := range txnGroups { - _, err = TxnGroup(txng, blkHdr, cache, &dummeyLedger) + _, err = TxnGroup(txng, blkHdr, cache, &dummyLedger) if err != nil { require.Error(t, err) require.Contains(t, err.Error(), errorString) @@ -1115,7 +1189,7 @@ func verifyGroup(t *testing.T, txnGroups [][]transactions.SignedTxn, blkHdr *boo require.Equal(t, 1, numFailed) // Only one transaction should not be in cache - unverifiedGroups = cache.GetUnverifiedTransactionGroups(txnGroups, spec, protocol.ConsensusCurrentVersion) + unverifiedGroups = cache.GetUnverifiedTransactionGroups(txnGroups, spec, blkHdr.CurrentProtocol) require.Len(t, unverifiedGroups, 1) require.Equal(t, unverifiedGroups[0], txnGroups[txgIdx]) diff --git a/data/transactions/verify/verifiedTxnCache_test.go b/data/transactions/verify/verifiedTxnCache_test.go index d27510fe6a..03f5cac288 100644 --- a/data/transactions/verify/verifiedTxnCache_test.go +++ b/data/transactions/verify/verifiedTxnCache_test.go @@ -127,7 +127,7 @@ func BenchmarkGetUnverifiedTransactionGroups50(b *testing.B) { for i := 0; i < measuringMultipler; i++ { impl.GetUnverifiedTransactionGroups(queryTxnGroups, spec, protocol.ConsensusCurrentVersion) } - duration := time.Now().Sub(startTime) + duration := time.Since(startTime) // calculate time per 10K verified entries: t := int(duration*10000) / (measuringMultipler * b.N) b.ReportMetric(float64(t)/float64(time.Millisecond), "ms/10K_cache_compares") diff --git a/data/txntest/txn.go b/data/txntest/txn.go index aea4de005b..ed51c7ef40 100644 --- a/data/txntest/txn.go +++ b/data/txntest/txn.go @@ -26,6 +26,7 @@ import ( "github.com/algorand/go-algorand/crypto/merklesignature" "github.com/algorand/go-algorand/crypto/stateproof" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/committee" "github.com/algorand/go-algorand/data/stateproofmsg" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" @@ -91,6 +92,12 @@ type Txn struct { StateProofType protocol.StateProofType StateProof stateproof.StateProof StateProofMsg stateproofmsg.Message + + HbAddress basics.Address + HbProof crypto.HeartbeatProof + HbSeed committee.Seed + HbVoteID crypto.OneTimeSignatureVerifier + HbKeyDilution uint64 } // internalCopy "finishes" a shallow copy done by a simple Go assignment by @@ -218,6 +225,17 @@ func (tx Txn) Txn() transactions.Transaction { case nil: tx.Fee = basics.MicroAlgos{} } + + hb := &transactions.HeartbeatTxnFields{ + HbAddress: tx.HbAddress, + HbProof: tx.HbProof, + HbSeed: tx.HbSeed, + HbVoteID: tx.HbVoteID, + HbKeyDilution: tx.HbKeyDilution, + } + if hb.MsgIsZero() { + hb = nil + } return transactions.Transaction{ Type: tx.Type, Header: transactions.Header{ @@ -281,6 +299,7 @@ func (tx Txn) Txn() transactions.Transaction { StateProof: tx.StateProof, Message: tx.StateProofMsg, }, + HeartbeatTxnFields: hb, } } diff --git a/heartbeat/README.md b/heartbeat/README.md new file mode 100644 index 0000000000..7293afd43f --- /dev/null +++ b/heartbeat/README.md @@ -0,0 +1,180 @@ +# Block Payouts, Suspensions, and Heartbeats + +Running a validator node on Algorand is a relatively lightweight operation. Therefore, participation +in consensus was not compensated. There was an expectation that financial motivated holders of Algos +would run nodes in order to help secure their holdings. + +Although simple participation is not terribly resource intensive, running _any_ service with high +uptime becomes expensive when one considers that it should be monitored for uptime, be somewhat +over-provisioned to handle unexpected load spikes, and plans need to be in place to restart in the +face of hardware failure (or the accounts should leave consensus properly). + +With those burdens in mind, fewer Algo holders chose to run participation nodes than would be +preferred to provide security against well-financed bad actors. To alleviate this problem, a +mechanism to reward block proposers has been created. With these _block payouts_ in place, large +Algo holders are incentivized to run participation nodes in order to earn more Algos, increasing +security for the entire Algorand network. + +With the financial incentive to run participation nodes comes the risk that some nodes may be +operated without sufficient care. Therefore, a mechanism to _suspend_ nodes that appear to be +performing poorly (or not at all). Appearances can be deceiving, however. Since Algorand is a +probabilistic consensus protocol, pure chance might lead to a node appearing to be delinquent. A new +transaction type, the _heartbeat_, allows a node to explicitly indicate that it is online even if it +does not propose blocks due to "bad luck". + +# Payouts + +Payouts are made in every block, if the proposer has opted into receiving them, has an Algo balance +in an appropriate range, and has not been suspended for poor behavior since opting-in. The size of +the payout is indicated in the block header, and comes from the `FeeSink`. The block payout consist +of two components. First, a portion of the block fees (currently 50%) are paid to the proposer. +This component incentives fuller blocks which lead to larger payouts. Second, a _bonus_ payout is +made according to a exponentially decaying formula. This bonus is (intentionally) unsustainable +from protocol fees. It is expected that the Algorand Foundation will seed the `FeeSink` with +sufficient funds to allow the bonuses to be paid out according to the formula for several years. If +the `FeeSink` has insufficient funds for the sum of these components, the payout will be as high as +possible while maintaining the `FeeSink`'s minimum balance. These calculations are performed in +`endOfBlock` in `eval/eval.go`. + +To opt-in to receiving block payouts, an account includes an extra fee in the `keyreg` +transaction. The amount is controlled by the consensus parameter `Payouts.GoOnlineFee`. When such a +fee is included, a new account state bit, `IncentiveEligible` is set to true. + +Even when an account is `IncentiveEligible` there is a proposal-time check of the account's online +stake. If the account has too much or too little, no payout is performed (though +`IncentiveEligible` remains true). As explained below, this check occurs in `agreement` code in +`payoutEligible()`. The balance check is performed on the _online_ stake, that is the stake from 320 +rounds earlier, so a clever proposer can not move Algos in the round it proposes in order to receive +the payout. Finally, in an interesting corner case, a proposing account could be closed at proposal +time, since voting is based on the earlier balance. Such an account receives no payout, even if its +balances was in the proper range 320 rounds ago. + +A surprising complication in the implementation of these payouts is that when a block is prepared by +a node, it does not know which account is the proposer. Until now, `algod` could prepare a single +block which would be used by any of the accounts it was participating for. The block would be +handed off to `agreement` which would manipulate the block only to add the appropriate block seed +(which depended upon the proposer). That interaction between `eval` and `agreement` was widened +(see `WithProposer()`) to allow `agreement` to modify the block to include the proper `Proposer`, +and to zero the `ProposerPayout` if the account that proposed was not actually eligible to receive a +payout. + +# Suspensions + +Accounts can be _suspended_ for poor behavior. There are two forms of poor behavior that can lead +to suspension. First, an account is considered _absent_ if it fails to propose as often as it +should. Second, an account can be suspended for failing to respond to a _challenge_ issued by the +network at random. + +## Absenteeism + +An account can be expected to propose once every `n = TotalOnlineStake/AccountOnlineStake` rounds. +For example, a node with 2% of online stake ought to propose once every 50 rounds. Of course the +actual proposer is chosen by random sortition. To make false positive suspensions unlikely, a node +is considered absent if it fails to produce a block over the course of `20n` rounds. + +The suspension mechanism is implemented in `generateKnockOfflineAccountsList` in `eval/eval.go`. It +is closely modeled on the mechanism that knocks accounts offline if their voting keys have expired. +An absent account is added to the `AbsentParticipationAccounts` list of the block header. When +evaluating a block, accounts in `AbsentParticipationAccounts` are suspended by changing their +`Status` to `Offline` and setting `IncentiveEligible` to false, but retaining their voting keys. + +### Keyreg and `LastHeartbeat` + +As described so far, 320 rounds after a `keyreg` to go online, an account suddenly is expected to +have proposed more recently than 20 times its new expected interval. That would be impossible, since +it was not online until that round. Therefore, when a `keyreg` is used to go online and become +`IncentiveEligible`, the account's `LastHeartbeat` field is set 320 rounds into the future. In +effect, the account is treated as though it proposed in the first round it is online. + +### Large Algo increases and `LastHeartbeat` + +A similar problem can occur when an online account receives Algos. 320 rounds after receiving the +new Algos, the account's expected proposal interval will shrink. If, for example, such an account +increases by a factor of 10, then it is reasonably likely that it will not have proposed recently +enough, and will be suspended immediately. To mitigate this risk, any time an online, +`IncentiveEligible` account balance doubles from a single `Pay`, its `LastHeartbeat` is incremented +to 320 rounds past the current round. + +## Challenges + +The absenteeism checks quickly suspend a high-value account if it becomes inoperative. For example, +and account with 2% of stake can be marked absent after 500 rounds (about 24 minutes). After +suspension, the effect on consensus is mitigated after 320 more rounds (about 15 +minutes). Therefore, the suspension mechanism makes Algorand significantly more robust in the face +of operational errors. + +However, the absenteeism mechanism is very slow to notice small accounts. An account with 30,000 +Algos might represent 1/100,000 or less of total stake. It would only be considered absent after a +million or more rounds without a proposal. At current network speeds, this is about a month. With such +slow detection, a financially motived entity might make the decision to run a node even if they lack +the wherewithal to run the node with excellent uptime. A worst case scenario might be a node that is +turned off daily, overnight. Such a node would generate profit for the runner, would probably never +be marked offline by the absenteeism mechanism, yet would impact consensus negatively. Algorand +can't make progress with 1/3 of nodes offline at any given time for a nightly rest. + +To combat this scenario, the network generates random _challenges_ periodically. Every +`Payouts.ChallengeInterval` rounds (currently 1000), a random selected portion (currently 1/32) of +all online accounts are challenged. They must _heartbeat_ within `Payouts.ChallengeGracePeriod` +rounds (currently 200), or they will be subject to suspension. With the current consensus +parameters, nodes can be expected to be challenged daily. When suspended, accounts must `keyreg` +with the `GoOnlineFee` in order to receive block payouts again, so it becomes unprofitable for +these low-stake nodes to operate with poor uptimes. + +# Heartbeats + +The absenteeism mechanism is subject to rare false positives. The challenge mechanism explicitly +requires an affirmative response from nodes to indicate they are operating properly on behalf of a +challenged account. Both of these needs are addressed by a new transaction type --- _Heartbeat_. A +Heartbeat transaction contains a signature (`HbProof`) of the blockseed (`HbSeed`) of the +transaction's FirstValid block under the participation key of the account (`HbAddress`) in +question. Note that the account being heartbeat for is _not_ the `Sender` of the transaction, which +can be any address. Signing a recent block seed makes it more difficult to pre-sign heartbeats that +another machine might send on your behalf. Signing the FirstValid's blockseed (rather than +FirstValid-1) simply enforces a best practice: emit a transaction with FirstValid set to a committed +round, not a future round, avoiding a race. The node you send transactions to might not have +committed your latest round yet. + +It is relatively easy for a bad actor to emit Heartbeats for its accounts without actually +participating. However, there is no financial incentive to do so. Pretending to be operational when +offline does not earn block payouts. Furthermore, running a server to monitor the block chain to +notice challenges and gather the recent blockseed is not significantly cheaper than simply running a +functional node. It is _already_ possible for malicious, well-resourced accounts to cause consensus +difficulties by putting significant stake online without actually participating. Heartbeats do not +mitigate that risk. But these mechanisms have been designed to avoid _motivating_ such behavior, so +that they can accomplish their actual goal of noticing poor behavior stemming from _inadvertent_ +operational problems. + +## Free Heartbeats + +Challenges occur frequently, so it important that `algod` can easily send Heartbeats as +required. How should these transactions be paid for? Many accounts, especially high-value accounts, +would not want to keep their spending keys available for automatic use by `algod`. Further, creating +(and keeping funded) a low-value side account to pay for Heartbeats would be an annoying operational +overhead. Therefore, when required by challenges, heartbeat transactions do not require a fee. +Therefore, any account, even an unfunded logicsig, can send heartbeats for an account under +challenge. + +The conditions for a free Heartbeat are: + +1. The Heartbeat is not part of a larger group, and has a zero `GroupID`. +1. The `HbAddress` is Online and under challenge with the grace period at least half over. +1. The `HbAddress` is `IncentiveEligible`. +1. There is no `Note`, `Lease`, or `RekeyTo`. + +## Heartbeat Service + +The Heartbeat Service (`heartbeat/service.go`) watches the state of all acounts for which `algod` +has participation keys. If any of those accounts meets the requirements above, a heartbeat +transaction is sent, starting with the round following half a grace period from the challenge. It +uses the (presumably unfunded) logicsig that does nothing except preclude rekey operations. + +The heartbeat service does _not_ heartbeat if an account is unlucky and threatened to be considered +absent. We presume such false postives to be so unlikely that, if they occur, the node must be +brought back online manually. It would be reasonable to consider in the future: + +1. Making heartbeats free for accounts that are "nearly absent". + +or + +2. Allowing for paid heartbeats by the heartbeat service when configured with access to a funded + account's spending key. diff --git a/heartbeat/abstractions.go b/heartbeat/abstractions.go new file mode 100644 index 0000000000..a60f383669 --- /dev/null +++ b/heartbeat/abstractions.go @@ -0,0 +1,56 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package heartbeat + +import ( + "github.com/algorand/go-algorand/data/account" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/ledger/ledgercore" +) + +// txnBroadcaster is an interface that captures the node's ability to broadcast +// a new transaction. +type txnBroadcaster interface { + BroadcastInternalSignedTxGroup([]transactions.SignedTxn) error +} + +// ledger represents the aspects of the "real" Ledger that the heartbeat service +// needs to interact with +type ledger interface { + // LastRound tells the round is ready for checking + LastRound() basics.Round + + // WaitMem allows the Service to wait for the results of a round to be available + WaitMem(r basics.Round) chan struct{} + + // BlockHdr allows the service access to consensus values + BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) + + // LookupAccount allows the Service to observe accounts for suspension + LookupAccount(round basics.Round, addr basics.Address) (data ledgercore.AccountData, validThrough basics.Round, withoutRewards basics.MicroAlgos, err error) + + LookupAgreement(rnd basics.Round, addr basics.Address) (basics.OnlineAccountData, error) +} + +// participants captures the aspects of the AccountManager that are used by this +// package. Service must be able to find out which accounts to monitor and have +// access to their part keys to construct heartbeats. +type participants interface { + Keys(rnd basics.Round) []account.ParticipationRecordForRound +} diff --git a/heartbeat/service.go b/heartbeat/service.go new file mode 100644 index 0000000000..3e0a6cfa00 --- /dev/null +++ b/heartbeat/service.go @@ -0,0 +1,196 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package heartbeat + +import ( + "context" + "sync" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/data/account" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/data/transactions/logic" + "github.com/algorand/go-algorand/ledger/apply" + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/protocol" +) + +// Service emits keep-alive heartbeats for accts that are in danger of +// suspension. +type Service struct { + // addresses that should be monitored for suspension + accts participants + // current status and balances + ledger ledger + // where to send the heartbeats + bcast txnBroadcaster + + // infrastructure + ctx context.Context + shutdown context.CancelFunc + wg sync.WaitGroup + log logging.Logger +} + +// NewService creates a heartbeat service. It will need to know which accounts +// to emit heartbeats for, and how to create the heartbeats. +func NewService(accts participants, ledger ledger, bcast txnBroadcaster, log logging.Logger) *Service { + return &Service{ + accts: accts, + ledger: ledger, + bcast: bcast, + log: log.With("Context", "heartbeat"), + } +} + +// Start starts the goroutines for the Service. +func (s *Service) Start() { + s.ctx, s.shutdown = context.WithCancel(context.Background()) + s.wg.Add(1) + s.log.Info("starting heartbeat service") + go s.loop() +} + +// Stop any goroutines associated with this worker. +func (s *Service) Stop() { + s.log.Debug("heartbeat service is stopping") + defer s.log.Debug("heartbeat service has stopped") + s.shutdown() + s.wg.Wait() +} + +// findChallenged() returns a list of accounts that need a heartbeat because +// they have been challenged. +func (s *Service) findChallenged(rules config.ProposerPayoutRules, current basics.Round) []account.ParticipationRecordForRound { + ch := apply.FindChallenge(rules, current, s.ledger, apply.ChRisky) + if ch.IsZero() { + return nil + } + + var found []account.ParticipationRecordForRound + for _, pr := range s.accts.Keys(current) { // only look at accounts we have part keys for + acct, err := s.ledger.LookupAgreement(current, pr.Account) + if err != nil { + s.log.Errorf("error looking up %v: %v", pr.Account, err) + continue + } + // There can be more than one `pr` for a single Account in the case of + // overlapping partkey validity windows. Heartbeats are validated with + // the _current_ VoterID (see apply/heartbeat.go), so we only care about + // a ParticipationRecordForRound if it is for the VoterID in `acct`. + if acct.VoteID != pr.Voting.OneTimeSignatureVerifier { + continue + } + // We want to match the logic in generateKnockOfflineAccountsList, but + // don't need to check Online status because we obtained records from + // LookupAgreement, which only returns Online accounts (or empty, which + // will not be IncentiveEligible) If we ever decide to knockoff accounts + // that are not IncentiveEligoible, this code should remember to check + // acct.MicroAlgosWithRewards > 0 to ensure we need a heartbeat. + if acct.IncentiveEligible { + if ch.Failed(pr.Account, max(acct.LastHeartbeat, acct.LastProposed)) { + s.log.Infof(" %v needs a heartbeat\n", pr.Account) + found = append(found, pr) + } + } + } + return found +} + +// loop monitors for any of Service's participants being suspended. If they are, +// it tries to being them back online by emitting a heartbeat transaction. It +// could try to predict an upcoming suspension, which would prevent the +// suspension from ever occurring, but that would be considerably more complex +// both to avoid emitting repeated heartbeats, and to ensure the prediction and +// the suspension logic match. This feels like a cleaner end-to-end test, at +// the cost of lost couple rounds of participation. (Though suspension is +// designed to be extremely unlikely anyway.) +func (s *Service) loop() { + defer s.wg.Done() + suppress := make(map[basics.Address]basics.Round) + latest := s.ledger.LastRound() + for { + // exit if Done, else wait for next round + select { + case <-s.ctx.Done(): + return + case <-s.ledger.WaitMem(latest + 1): + } + + latest = s.ledger.LastRound() + + lastHdr, err := s.ledger.BlockHdr(latest) + if err != nil { + s.log.Errorf("heartbeat service could not fetch block header for round %d: %v", latest, err) + continue // Try again next round, I guess? + } + proto := config.Consensus[lastHdr.CurrentProtocol] + + for _, pr := range s.findChallenged(proto.Payouts, latest) { + if suppress[pr.Account] > latest { + continue + } + stxn := s.prepareHeartbeat(pr, lastHdr) + s.log.Infof("sending heartbeat %v for %v\n", stxn.Txn.HeartbeatTxnFields, pr.Account) + err = s.bcast.BroadcastInternalSignedTxGroup([]transactions.SignedTxn{stxn}) + if err != nil { + s.log.Errorf("error broadcasting heartbeat %v for %v: %v", stxn, pr.Account, err) + } else { + // Don't bother heartbeating again until the last one expires. + // If it is accepted, we won't need to (because we won't be + // under challenge any more). + suppress[pr.Account] = stxn.Txn.LastValid + } + } + } +} + +// acceptingByteCode is the byte code to a logic signature that will accept anything (except rekeying). +var acceptingByteCode = logic.MustAssemble(` +#pragma version 11 +txn RekeyTo; global ZeroAddress; == +`) +var acceptingSender = basics.Address(logic.HashProgram(acceptingByteCode)) + +// hbLifetime is somewhat short. It seems better to try several times during the +// grace period than to try a single time with a longer lifetime. +const hbLifetime = 10 + +func (s *Service) prepareHeartbeat(pr account.ParticipationRecordForRound, latest bookkeeping.BlockHeader) transactions.SignedTxn { + var stxn transactions.SignedTxn + stxn.Lsig = transactions.LogicSig{Logic: acceptingByteCode} + stxn.Txn.Type = protocol.HeartbeatTx + stxn.Txn.Header = transactions.Header{ + Sender: acceptingSender, + FirstValid: latest.Round, + LastValid: latest.Round + hbLifetime, + GenesisHash: latest.GenesisHash, + } + + id := basics.OneTimeIDForRound(stxn.Txn.LastValid, pr.KeyDilution) + stxn.Txn.HeartbeatTxnFields = &transactions.HeartbeatTxnFields{ + HbAddress: pr.Account, + HbProof: pr.Voting.Sign(id, latest.Seed).ToHeartbeatProof(), + HbSeed: latest.Seed, + HbVoteID: pr.Voting.OneTimeSignatureVerifier, + HbKeyDilution: pr.KeyDilution, + } + + return stxn +} diff --git a/heartbeat/service_test.go b/heartbeat/service_test.go new file mode 100644 index 0000000000..8fd3cb2865 --- /dev/null +++ b/heartbeat/service_test.go @@ -0,0 +1,300 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package heartbeat + +import ( + "fmt" + "testing" + "time" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data/account" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/committee" + "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/ledger/ledgercore" + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/algorand/go-deadlock" + "github.com/stretchr/testify/require" +) + +type table map[basics.Address]ledgercore.AccountData + +type mockedLedger struct { + mu deadlock.Mutex + waiters map[basics.Round]chan struct{} + history []table + hdr bookkeeping.BlockHeader + t *testing.T +} + +func newMockedLedger(t *testing.T) mockedLedger { + return mockedLedger{ + waiters: make(map[basics.Round]chan struct{}), + history: []table{nil}, // some genesis accounts could go here + hdr: bookkeeping.BlockHeader{ + UpgradeState: bookkeeping.UpgradeState{ + CurrentProtocol: protocol.ConsensusFuture, + }, + }, + } +} + +func (l *mockedLedger) LastRound() basics.Round { + l.mu.Lock() + defer l.mu.Unlock() + return l.lastRound() +} +func (l *mockedLedger) lastRound() basics.Round { + return basics.Round(len(l.history) - 1) +} + +func (l *mockedLedger) WaitMem(r basics.Round) chan struct{} { + l.mu.Lock() + defer l.mu.Unlock() + + if l.waiters[r] == nil { + l.waiters[r] = make(chan struct{}) + } + + // Return an already-closed channel if we already have the block. + if r <= l.lastRound() { + close(l.waiters[r]) + retChan := l.waiters[r] + delete(l.waiters, r) + return retChan + } + + return l.waiters[r] +} + +// BlockHdr allows the service access to consensus values +func (l *mockedLedger) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) { + l.mu.Lock() + defer l.mu.Unlock() + + if r > l.lastRound() { + return bookkeeping.BlockHeader{}, fmt.Errorf("%d is beyond current block (%d)", r, l.LastRound()) + } + // return the template hdr, with round + hdr := l.hdr + hdr.Round = r + return hdr, nil +} + +// setSeed allows the mock to return a specific seed +func (l *mockedLedger) setSeed(seed committee.Seed) { + l.mu.Lock() + defer l.mu.Unlock() + + l.hdr.Seed = seed +} + +func (l *mockedLedger) addBlock(delta table) error { + l.mu.Lock() + defer l.mu.Unlock() + + l.history = append(l.history, delta) + + for r, ch := range l.waiters { + switch { + case r < l.lastRound(): + l.t.Logf("%d < %d", r, l.lastRound()) + panic("why is there a waiter for an old block?") + case r == l.lastRound(): + close(ch) + delete(l.waiters, r) + case r > l.lastRound(): + /* waiter keeps waiting */ + } + } + return nil +} + +func (l *mockedLedger) LookupAccount(round basics.Round, addr basics.Address) (ledgercore.AccountData, basics.Round, basics.MicroAlgos, error) { + l.mu.Lock() + defer l.mu.Unlock() + + if round > l.lastRound() { + panic("mockedLedger.LookupAccount: future round") + } + + for r := round; r <= round; r-- { + if acct, ok := l.history[r][addr]; ok { + more := basics.MicroAlgos{Raw: acct.MicroAlgos.Raw + 1} + return acct, round, more, nil + } + } + return ledgercore.AccountData{}, round, basics.MicroAlgos{}, nil +} + +func (l *mockedLedger) LookupAgreement(round basics.Round, addr basics.Address) (basics.OnlineAccountData, error) { + l.mu.Lock() + defer l.mu.Unlock() + + if round > l.lastRound() { + panic("mockedLedger.LookupAgreement: future round") + } + + for r := round; r <= round; r-- { + if acct, ok := l.history[r][addr]; ok { + oad := basics.OnlineAccountData{ + MicroAlgosWithRewards: acct.MicroAlgos, + VotingData: acct.VotingData, + IncentiveEligible: acct.IncentiveEligible, + LastProposed: acct.LastProposed, + LastHeartbeat: acct.LastHeartbeat, + } + return oad, nil + } + } + return basics.OnlineAccountData{}, nil +} + +// waitFor confirms that the Service made it through the last block in the +// ledger and is waiting for the next. The Service is written such that it +// operates properly without this sort of wait, but for testing, we often want +// to wait so that we can confirm that the Service *didn't* do something. +func (l *mockedLedger) waitFor(s *Service, a *require.Assertions) { + a.Eventually(func() bool { // delay and confirm that the service advances to wait for next block + _, ok := l.waiters[l.LastRound()+1] + return ok + }, time.Second, 10*time.Millisecond) +} + +type mockedAcctManager []account.ParticipationRecordForRound + +func (am *mockedAcctManager) Keys(rnd basics.Round) []account.ParticipationRecordForRound { + return *am +} + +func (am *mockedAcctManager) addParticipant(addr basics.Address, otss *crypto.OneTimeSignatureSecrets) { + *am = append(*am, account.ParticipationRecordForRound{ + ParticipationRecord: account.ParticipationRecord{ + ParticipationID: [32]byte{}, + Account: addr, + Voting: otss, + FirstValid: 0, + LastValid: 1_000_000, + KeyDilution: 7, + }, + }) +} + +type txnSink struct { + t *testing.T + txns [][]transactions.SignedTxn +} + +func (ts *txnSink) BroadcastInternalSignedTxGroup(group []transactions.SignedTxn) error { + ts.t.Logf("sinking %+v", group[0].Txn.Header) + ts.txns = append(ts.txns, group) + return nil +} + +func TestStartStop(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + a := require.New(t) + sink := txnSink{t: t} + ledger := newMockedLedger(t) + s := NewService(&mockedAcctManager{}, &ledger, &sink, logging.TestingLog(t)) + a.NotNil(s) + a.NoError(ledger.addBlock(nil)) + s.Start() + a.NoError(ledger.addBlock(nil)) + s.Stop() +} + +func makeBlock(r basics.Round) bookkeeping.Block { + return bookkeeping.Block{ + BlockHeader: bookkeeping.BlockHeader{Round: r}, + Payset: []transactions.SignedTxnInBlock{}, + } +} + +func TestHeartbeatOnlyWhenChallenged(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + a := require.New(t) + sink := txnSink{t: t} + ledger := newMockedLedger(t) + participants := &mockedAcctManager{} + s := NewService(participants, &ledger, &sink, logging.TestingLog(t)) + s.Start() + + joe := basics.Address{0xcc} // 0xcc will matter when we set the challenge + mary := basics.Address{0xaa} // 0xaa will matter when we set the challenge + + acct := ledgercore.AccountData{} + + a.NoError(ledger.addBlock(table{joe: acct})) + ledger.waitFor(s, a) + a.Empty(sink.txns) + + // make "part keys" and install them + kd := uint64(100) + startBatch := basics.OneTimeIDForRound(ledger.LastRound(), kd).Batch + const batches = 50 // gives 50 * kd rounds = 5000 + otss1 := crypto.GenerateOneTimeSignatureSecrets(startBatch, batches) + otss2 := crypto.GenerateOneTimeSignatureSecrets(startBatch, batches) + participants.addParticipant(joe, otss1) + participants.addParticipant(joe, otss2) // Simulate overlapping part keys, so Keys() returns both + participants.addParticipant(mary, otss1) + + // now they are online, but not challenged, so no heartbeat + acct.Status = basics.Online + acct.VoteKeyDilution = kd + acct.VoteID = otss1.OneTimeSignatureVerifier + a.NoError(ledger.addBlock(table{joe: acct, mary: acct})) // in effect, "keyreg" with otss1 + ledger.waitFor(s, a) + a.Empty(sink.txns) + + // now we have to make it seem like joe has been challenged. We obtain the + // payout rules to find the first challenge round, skip forward to it, then + // go forward half a grace period. Only then should the service heartbeat + ledger.setSeed(committee.Seed{0xc8}) // share 5 bits with 0xcc + hdr, err := ledger.BlockHdr(ledger.LastRound()) + a.NoError(err) + rules := config.Consensus[hdr.CurrentProtocol].Payouts + for ledger.LastRound() < basics.Round(rules.ChallengeInterval+rules.ChallengeGracePeriod/2) { + a.NoError(ledger.addBlock(table{})) + ledger.waitFor(s, a) + a.Empty(sink.txns) + } + + a.NoError(ledger.addBlock(table{joe: acct})) + ledger.waitFor(s, a) + a.Empty(sink.txns) // Just kidding, no heartbeat yet, joe isn't eligible + + acct.IncentiveEligible = true + a.NoError(ledger.addBlock(table{joe: acct})) + ledger.waitFor(s, a) + // challenge is already in place, it counts immediately, so service will heartbeat + a.Len(sink.txns, 1) // only one heartbeat (for joe) despite having two part records + a.Len(sink.txns[0], 1) + a.Equal(sink.txns[0][0].Txn.Type, protocol.HeartbeatTx) + a.Equal(sink.txns[0][0].Txn.HbAddress, joe) + + s.Stop() +} diff --git a/ledger/acctonline.go b/ledger/acctonline.go index 380ff45852..76e6ef13ee 100644 --- a/ledger/acctonline.go +++ b/ledger/acctonline.go @@ -612,11 +612,6 @@ func (ao *onlineAccounts) onlineTotals(rnd basics.Round) (basics.MicroAlgos, pro return basics.MicroAlgos{Raw: onlineRoundParams.OnlineSupply}, onlineRoundParams.CurrentProtocol, nil } -// LookupOnlineAccountData returns the online account data for a given address at a given round. -func (ao *onlineAccounts) LookupOnlineAccountData(rnd basics.Round, addr basics.Address) (data basics.OnlineAccountData, err error) { - return ao.lookupOnlineAccountData(rnd, addr) -} - // roundOffset calculates the offset of the given round compared to the current dbRound. Requires that the lock would be taken. func (ao *onlineAccounts) roundOffset(rnd basics.Round) (offset uint64, err error) { if rnd < ao.cachedDBRoundOnline { diff --git a/ledger/apply/apply.go b/ledger/apply/apply.go index dfa61b2632..5bbe482f38 100644 --- a/ledger/apply/apply.go +++ b/ledger/apply/apply.go @@ -25,9 +25,14 @@ import ( "github.com/algorand/go-algorand/ledger/ledgercore" ) +// hdrProvider allows fetching old block headers +type hdrProvider interface { + BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) +} + // StateProofsApplier allows fetching and updating state-proofs state on the ledger type StateProofsApplier interface { - BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) + hdrProvider GetStateProofNextRound() basics.Round SetStateProofNextRound(rnd basics.Round) GetStateProofVerificationContext(stateProofLastAttestedRound basics.Round) (*ledgercore.StateProofVerificationContext, error) diff --git a/ledger/apply/challenge.go b/ledger/apply/challenge.go new file mode 100644 index 0000000000..0de7c1208e --- /dev/null +++ b/ledger/apply/challenge.go @@ -0,0 +1,114 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package apply + +import ( + "bytes" + "math/bits" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/committee" +) + +// ChallengePeriod indicates which part of the challenge period is under discussion. +type ChallengePeriod int + +const ( + // ChRisky indicates that a challenge is in effect, and the initial grace period is running out. + ChRisky ChallengePeriod = iota + // ChActive indicates that a challenege is in effect, and the grace period + // has run out, so accounts can be suspended + ChActive +) + +type challenge struct { + // round is when the challenge occurred. 0 means this is not a challenge. + round basics.Round + // accounts that match the first `bits` of `seed` must propose or heartbeat to stay online + seed committee.Seed + bits int +} + +// FindChallenge returns the Challenge that was last issued if it's in the period requested. +func FindChallenge(rules config.ProposerPayoutRules, current basics.Round, headers hdrProvider, period ChallengePeriod) challenge { + // are challenges active? + interval := basics.Round(rules.ChallengeInterval) + if rules.ChallengeInterval == 0 || current < interval { + return challenge{} + } + lastChallenge := current - (current % interval) + grace := basics.Round(rules.ChallengeGracePeriod) + // FindChallenge is structured this way, instead of returning the challenge + // and letting the caller determine the period it cares about, to avoid + // using BlockHdr unnecessarily. + switch period { + case ChRisky: + if current <= lastChallenge+grace/2 || current > lastChallenge+grace { + return challenge{} + } + case ChActive: + if current <= lastChallenge+grace || current > lastChallenge+2*grace { + return challenge{} + } + } + challengeHdr, err := headers.BlockHdr(lastChallenge) + if err != nil { + return challenge{} + } + challengeProto := config.Consensus[challengeHdr.CurrentProtocol] + // challenge is not considered if rules have changed since that round + if challengeProto.Payouts != rules { + return challenge{} + } + return challenge{lastChallenge, challengeHdr.Seed, rules.ChallengeBits} +} + +// IsZero returns true if the challenge is empty (used to indicate no challenege) +func (ch challenge) IsZero() bool { + return ch == challenge{} +} + +// Failed returns true iff ch is in effect, matches address, and lastSeen is +// before the challenge issue. When an address "Fails" in this way, the +// _meaning_ depends on how the challenged was obtained. If it was "risky" then +// it means the address is at risk, not that it should be suspended. It it's an +// "active" challenge, then the account should be suspended. +func (ch challenge) Failed(address basics.Address, lastSeen basics.Round) bool { + return ch.round != 0 && bitsMatch(ch.seed[:], address[:], ch.bits) && lastSeen < ch.round +} + +// bitsMatch checks if the first n bits of two byte slices match. Written to +// work on arbitrary slices, but we expect that n is small. Only user today +// calls with n=5. +func bitsMatch(a, b []byte, n int) bool { + // Ensure n is a valid number of bits to compare + if n < 0 || n > len(a)*8 || n > len(b)*8 { + return false + } + + // Compare entire bytes when we care about enough bits + if !bytes.Equal(a[:n/8], b[:n/8]) { + return false + } + + remaining := n % 8 + if remaining == 0 { + return true + } + return bits.LeadingZeros8(a[n/8]^b[n/8]) >= remaining +} diff --git a/ledger/apply/challenge_test.go b/ledger/apply/challenge_test.go new file mode 100644 index 0000000000..3114b6f935 --- /dev/null +++ b/ledger/apply/challenge_test.go @@ -0,0 +1,121 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package apply + +import ( + "testing" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBitsMatch(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + for b := 0; b <= 6; b++ { + require.True(t, bitsMatch([]byte{0x1}, []byte{0x2}, b), "%d", b) + } + require.False(t, bitsMatch([]byte{0x1}, []byte{0x2}, 7)) + require.False(t, bitsMatch([]byte{0x1}, []byte{0x2}, 8)) + require.False(t, bitsMatch([]byte{0x1}, []byte{0x2}, 9)) + + for b := 0; b <= 12; b++ { + require.True(t, bitsMatch([]byte{0x1, 0xff, 0xaa}, []byte{0x1, 0xf0}, b), "%d", b) + } + require.False(t, bitsMatch([]byte{0x1, 0xff, 0xaa}, []byte{0x1, 0xf0}, 13)) + + // on a byte boundary + require.True(t, bitsMatch([]byte{0x1}, []byte{0x1}, 8)) + require.False(t, bitsMatch([]byte{0x1}, []byte{0x1}, 9)) + require.True(t, bitsMatch([]byte{0x1, 0xff}, []byte{0x1, 0x00}, 8)) + require.False(t, bitsMatch([]byte{0x1, 0xff}, []byte{0x1, 00}, 9)) +} + +func TestFailsChallenge(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + a := assert.New(t) + + // a valid challenge, with 4 matching bits, and an old last seen + a.True(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 4}.Failed(basics.Address{0xbf, 0x34}, 10)) + + // challenge isn't "on" + a.False(challenge{round: 0, seed: [32]byte{0xb0, 0xb4}, bits: 4}.Failed(basics.Address{0xbf, 0x34}, 10)) + // node has appeared more recently + a.False(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 4}.Failed(basics.Address{0xbf, 0x34}, 12)) + // bits don't match + a.False(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 4}.Failed(basics.Address{0xcf, 0x34}, 10)) + // no enough bits match + a.False(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 5}.Failed(basics.Address{0xbf, 0x34}, 10)) +} + +type singleSource bookkeeping.BlockHeader + +func (ss singleSource) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) { + return bookkeeping.BlockHeader(ss), nil +} + +func TestActiveChallenge(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + a := assert.New(t) + + nowHeader := bookkeeping.BlockHeader{ + UpgradeState: bookkeeping.UpgradeState{ + // Here the rules are on, so they certainly differ from rules in oldHeader's params + CurrentProtocol: protocol.ConsensusFuture, + }, + } + rules := config.Consensus[nowHeader.CurrentProtocol].Payouts + + // simplest test. when interval=X and grace=G, X+G+1 is a challenge + inChallenge := basics.Round(rules.ChallengeInterval + rules.ChallengeGracePeriod + 1) + ch := FindChallenge(rules, inChallenge, singleSource(nowHeader), ChActive) + a.NotZero(ch.round) + + // all rounds before that have no challenge + for r := basics.Round(1); r < inChallenge; r++ { + ch := FindChallenge(rules, r, singleSource(nowHeader), ChActive) + a.Zero(ch.round, r) + } + + // ChallengeGracePeriod rounds allow challenges starting with inChallenge + for r := inChallenge; r < inChallenge+basics.Round(rules.ChallengeGracePeriod); r++ { + ch := FindChallenge(rules, r, singleSource(nowHeader), ChActive) + a.EqualValues(ch.round, rules.ChallengeInterval) + } + + // And the next round is again challenge-less + ch = FindChallenge(rules, inChallenge+basics.Round(rules.ChallengeGracePeriod), singleSource(nowHeader), ChActive) + a.Zero(ch.round) + + // ignore challenge if upgrade happened + oldHeader := bookkeeping.BlockHeader{ + UpgradeState: bookkeeping.UpgradeState{ + // We need a version from before payouts got turned on + CurrentProtocol: protocol.ConsensusV39, + }, + } + ch = FindChallenge(rules, inChallenge, singleSource(oldHeader), ChActive) + a.Zero(ch.round) +} diff --git a/ledger/apply/heartbeat.go b/ledger/apply/heartbeat.go new file mode 100644 index 0000000000..a37c8238a4 --- /dev/null +++ b/ledger/apply/heartbeat.go @@ -0,0 +1,102 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package apply + +import ( + "fmt" + + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/transactions" +) + +// Heartbeat applies a Heartbeat transaction using the Balances interface. +func Heartbeat(hb transactions.HeartbeatTxnFields, header transactions.Header, balances Balances, provider hdrProvider, round basics.Round) error { + // Get the account's balance entry + account, err := balances.Get(hb.HbAddress, false) + if err != nil { + return err + } + + // In txnGroupBatchPrep, we do not charge for singleton (Group.IsZero) + // heartbeats. But we only _want_ to allow free heartbeats if the account is + // under challenge. If this is an underpaid singleton heartbeat, reject it + // unless the account is under challenge. + + proto := balances.ConsensusParams() + if header.Fee.Raw < proto.MinTxnFee && header.Group.IsZero() { + kind := "free" + if header.Fee.Raw > 0 { + kind = "cheap" + } + + if account.Status != basics.Online { + return fmt.Errorf("%s heartbeat is not allowed for %s %+v", kind, account.Status, hb.HbAddress) + } + if !account.IncentiveEligible { + return fmt.Errorf("%s heartbeat is not allowed when not IncentiveEligible %+v", kind, hb.HbAddress) + } + ch := FindChallenge(proto.Payouts, round, provider, ChRisky) + if ch.IsZero() { + return fmt.Errorf("%s heartbeat for %s is not allowed with no challenge", kind, hb.HbAddress) + } + if !ch.Failed(hb.HbAddress, account.LastSeen()) { + return fmt.Errorf("%s heartbeat for %s is not challenged by %+v", kind, hb.HbAddress, ch) + } + } + + // Note the contrast with agreement. We require the account's _current_ + // partkey be used to sign the heartbeat. This is required because we can + // only look 320 rounds back for voting information. If a heartbeat was + // delayed a few rounds (even 1), we could not ask "what partkey was in + // effect at firstValid-320?" Using the current keys means that an account + // that changes keys would invalidate any heartbeats it has already sent out + // (that haven't been evaluated yet). Maybe more importantly, after going + // offline, an account can no longer heartbeat, since it has no _current_ + // keys. Yet it is still expected to vote for 320 rounds. Therefore, + // challenges do not apply to accounts that are offline (even if they should + // still be voting). + + // heartbeats sign a message consisting of the BlockSeed of the first-valid + // round, to discourage unsavory behaviour like presigning a bunch of + // heartbeats for later use keeping an unavailable account online. + hdr, err := provider.BlockHdr(header.FirstValid) + if err != nil { + return err + } + if hdr.Seed != hb.HbSeed { + return fmt.Errorf("provided seed %v does not match round %d's seed %v", + hb.HbSeed, header.FirstValid, hdr.Seed) + } + if account.VotingData.VoteID != hb.HbVoteID { + return fmt.Errorf("provided voter ID %v does not match %v's voter ID %v", + hb.HbVoteID, hb.HbAddress, account.VotingData.VoteID) + } + if account.VotingData.VoteKeyDilution != hb.HbKeyDilution { + return fmt.Errorf("provided key dilution %d does not match %v's key dilution %d", + hb.HbKeyDilution, hb.HbAddress, account.VotingData.VoteKeyDilution) + } + + account.LastHeartbeat = round + + // Write the updated entry + err = balances.Put(hb.HbAddress, account) + if err != nil { + return err + } + + return nil +} diff --git a/ledger/apply/heartbeat_test.go b/ledger/apply/heartbeat_test.go new file mode 100644 index 0000000000..06a91bf156 --- /dev/null +++ b/ledger/apply/heartbeat_test.go @@ -0,0 +1,208 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package apply + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/committee" + "github.com/algorand/go-algorand/data/txntest" + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/test/partitiontest" +) + +func TestHeartbeat(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + // Creator + sender := basics.Address{0x01} + voter := basics.Address{0x02} + const keyDilution = 777 + + fv := basics.Round(100) + lv := basics.Round(1000) + + id := basics.OneTimeIDForRound(lv, keyDilution) + otss := crypto.GenerateOneTimeSignatureSecrets(1, 2) // This will cover rounds 1-2*777 + + mockBal := makeMockBalancesWithAccounts(protocol.ConsensusFuture, map[basics.Address]basics.AccountData{ + sender: { + MicroAlgos: basics.MicroAlgos{Raw: 10_000_000}, + }, + voter: { + Status: basics.Online, + MicroAlgos: basics.MicroAlgos{Raw: 100_000_000}, + VoteID: otss.OneTimeSignatureVerifier, + VoteKeyDilution: keyDilution, + IncentiveEligible: true, + }, + }) + + seed := committee.Seed{0x01, 0x02, 0x03} + mockHdr := makeMockHeaders(bookkeeping.BlockHeader{ + Round: fv, + Seed: seed, + }) + + test := txntest.Txn{ + Type: protocol.HeartbeatTx, + Sender: sender, + FirstValid: fv, + LastValid: lv, + HbAddress: voter, + HbProof: otss.Sign(id, seed).ToHeartbeatProof(), + } + + tx := test.Txn() + + rnd := basics.Round(150) + // no fee + err := Heartbeat(*tx.HeartbeatTxnFields, tx.Header, mockBal, mockHdr, rnd) + require.ErrorContains(t, err, "free heartbeat") + + // just as bad: cheap + tx.Fee = basics.MicroAlgos{Raw: 10} + err = Heartbeat(*tx.HeartbeatTxnFields, tx.Header, mockBal, mockHdr, rnd) + require.ErrorContains(t, err, "cheap heartbeat") + + // address fee + tx.Fee = basics.MicroAlgos{Raw: 1000} + + // Seed is missing + err = Heartbeat(*tx.HeartbeatTxnFields, tx.Header, mockBal, mockHdr, rnd) + require.ErrorContains(t, err, "provided seed") + + tx.HbSeed = seed + // VoterID is missing + err = Heartbeat(*tx.HeartbeatTxnFields, tx.Header, mockBal, mockHdr, rnd) + require.ErrorContains(t, err, "provided voter ID") + + tx.HbVoteID = otss.OneTimeSignatureVerifier + // still no key dilution + err = Heartbeat(*tx.HeartbeatTxnFields, tx.Header, mockBal, mockHdr, rnd) + require.ErrorContains(t, err, "provided key dilution 0") + + tx.HbKeyDilution = keyDilution + 1 + err = Heartbeat(*tx.HeartbeatTxnFields, tx.Header, mockBal, mockHdr, rnd) + require.ErrorContains(t, err, "provided key dilution 778") + + tx.HbKeyDilution = keyDilution + err = Heartbeat(*tx.HeartbeatTxnFields, tx.Header, mockBal, mockHdr, rnd) + require.NoError(t, err) + after, err := mockBal.Get(voter, false) + require.NoError(t, err) + require.Equal(t, rnd, after.LastHeartbeat) + require.Zero(t, after.LastProposed) // unchanged +} + +// TestCheapRules ensures a heartbeat can only have a low fee if the account +// being heartbeat for is online, under risk of suspension by challenge, and +// incentive eligible. +func TestCheapRules(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + type tcase struct { + rnd basics.Round + addrStart byte + status basics.Status + incentiveEligble bool + note []byte + lease [32]byte + rekey [32]byte + err string + } + empty := [32]byte{} + // Grace period is 200. For the second half of the grace period (1101-1200), + // the heartbeat is free for online, incentive eligible, challenged accounts. + const grace = 200 + const half = grace / 2 + cases := []tcase{ + // test of range + {1000 + half, 0x01, basics.Online, true, nil, empty, empty, "no challenge"}, + {1000 + half + 1, 0x01, basics.Online, true, nil, empty, empty, ""}, + {1000 + grace, 0x01, basics.Online, true, nil, empty, empty, ""}, + {1000 + grace + 1, 0x01, basics.Online, true, nil, empty, empty, "no challenge"}, + + // test of the other requirements + {1000 + half + 1, 0xf1, basics.Online, true, nil, empty, empty, "not challenged by"}, + {1000 + half + 1, 0x01, basics.Offline, true, nil, empty, empty, "not allowed for Offline"}, + {1000 + half + 1, 0x01, basics.Online, false, nil, empty, empty, "not allowed when not IncentiveEligible"}, + } + for _, tc := range cases { + const keyDilution = 777 + + lv := basics.Round(tc.rnd + 10) + + id := basics.OneTimeIDForRound(lv, keyDilution) + otss := crypto.GenerateOneTimeSignatureSecrets(1, 10) // This will cover rounds 1-10*777 + + sender := basics.Address{0x01} + voter := basics.Address{tc.addrStart} + mockBal := makeMockBalancesWithAccounts(protocol.ConsensusFuture, map[basics.Address]basics.AccountData{ + sender: { + MicroAlgos: basics.MicroAlgos{Raw: 10_000_000}, + }, + voter: { + Status: tc.status, + MicroAlgos: basics.MicroAlgos{Raw: 100_000_000}, + VoteID: otss.OneTimeSignatureVerifier, + VoteKeyDilution: keyDilution, + IncentiveEligible: tc.incentiveEligble, + }, + }) + + seed := committee.Seed{0x01, 0x02, 0x03} + mockHdr := makeMockHeaders() + mockHdr.setFallback(bookkeeping.BlockHeader{ + UpgradeState: bookkeeping.UpgradeState{ + CurrentProtocol: protocol.ConsensusFuture, + }, + Seed: seed, + }) + txn := txntest.Txn{ + Type: protocol.HeartbeatTx, + Sender: sender, + Fee: basics.MicroAlgos{Raw: 1}, + FirstValid: tc.rnd - 10, + LastValid: tc.rnd + 10, + Lease: tc.lease, + Note: tc.note, + RekeyTo: tc.rekey, + HbAddress: voter, + HbProof: otss.Sign(id, seed).ToHeartbeatProof(), + HbSeed: seed, + HbVoteID: otss.OneTimeSignatureVerifier, + HbKeyDilution: keyDilution, + } + + tx := txn.Txn() + err := Heartbeat(*tx.HeartbeatTxnFields, tx.Header, mockBal, mockHdr, tc.rnd) + if tc.err == "" { + assert.NoError(t, err) + } else { + assert.ErrorContains(t, err, tc.err, "%+v", tc) + } + } +} diff --git a/ledger/apply/keyreg.go b/ledger/apply/keyreg.go index f5326f8240..d883618685 100644 --- a/ledger/apply/keyreg.go +++ b/ledger/apply/keyreg.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" + "github.com/algorand/go-algorand/agreement" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/transactions" ) @@ -79,7 +80,8 @@ func Keyreg(keyreg transactions.KeyregTxnFields, header transactions.Header, bal } record.Status = basics.Online if params.Payouts.Enabled { - record.LastHeartbeat = header.FirstValid + lookback := agreement.BalanceLookback(balances.ConsensusParams()) + record.LastHeartbeat = round + lookback } record.VoteFirstValid = keyreg.VoteFirst record.VoteLastValid = keyreg.VoteLast diff --git a/ledger/apply/mockBalances_test.go b/ledger/apply/mockBalances_test.go index a02a2108fd..312f37e76d 100644 --- a/ledger/apply/mockBalances_test.go +++ b/ledger/apply/mockBalances_test.go @@ -17,10 +17,12 @@ package apply import ( + "fmt" "maps" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" "github.com/algorand/go-algorand/ledger/ledgercore" @@ -271,3 +273,33 @@ func (b *mockCreatableBalances) HasAssetParams(addr basics.Address, aidx basics. _, ok = acct.AssetParams[aidx] return } + +type mockHeaders struct { + perRound map[basics.Round]bookkeeping.BlockHeader + fallback *bookkeeping.BlockHeader +} + +// makeMockHeaders takes a bunch of BlockHeaders and returns a HdrProivder for them. +func makeMockHeaders(hdrs ...bookkeeping.BlockHeader) mockHeaders { + b := make(map[basics.Round]bookkeeping.BlockHeader) + for _, hdr := range hdrs { + b[hdr.Round] = hdr + } + return mockHeaders{perRound: b} +} + +func (m mockHeaders) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) { + if hdr, ok := m.perRound[r]; ok { + return hdr, nil + } + if m.fallback != nil { + copy := *m.fallback + copy.Round = r + return copy, nil + } + return bookkeeping.BlockHeader{}, fmt.Errorf("round %v is not present", r) +} + +func (m *mockHeaders) setFallback(hdr bookkeeping.BlockHeader) { + m.fallback = &hdr +} diff --git a/ledger/apptxn_test.go b/ledger/apptxn_test.go index a7b3b15214..fce41b00a3 100644 --- a/ledger/apptxn_test.go +++ b/ledger/apptxn_test.go @@ -104,9 +104,9 @@ func TestPayAction(t *testing.T) { dl.t.Log("postsink", postsink, "postprop", postprop) if ver >= payoutsVer { - bonus := 10_000_000 // config/consensus.go - assert.EqualValues(t, bonus-500, presink-postsink) // based on 75% in config/consensus.go - require.EqualValues(t, bonus+1500, postprop-preprop) + bonus := 10_000_000 // config/consensus.go + assert.EqualValues(t, bonus-1000, presink-postsink) // based on 50% in config/consensus.go + require.EqualValues(t, bonus+1000, postprop-preprop) } else { require.EqualValues(t, 2000, postsink-presink) // no payouts yet } diff --git a/ledger/eval/eval.go b/ledger/eval/eval.go index 8bf7f8ce7d..4317fa1016 100644 --- a/ledger/eval/eval.go +++ b/ledger/eval/eval.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" "math" - "math/bits" "sync" "github.com/algorand/go-algorand/agreement" @@ -29,7 +28,6 @@ import ( "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" - "github.com/algorand/go-algorand/data/committee" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" "github.com/algorand/go-algorand/data/transactions/verify" @@ -38,6 +36,7 @@ import ( "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/util" "github.com/algorand/go-algorand/util/execpool" ) @@ -48,6 +47,7 @@ type LedgerForCowBase interface { CheckDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error LookupWithoutRewards(basics.Round, basics.Address) (ledgercore.AccountData, basics.Round, error) LookupAgreement(basics.Round, basics.Address) (basics.OnlineAccountData, error) + GetKnockOfflineCandidates(basics.Round, config.ConsensusParams) (map[basics.Address]basics.OnlineAccountData, error) LookupAsset(basics.Round, basics.Address, basics.AssetIndex) (ledgercore.AssetResource, error) LookupApplication(basics.Round, basics.Address, basics.AppIndex) (ledgercore.AppResource, error) LookupKv(basics.Round, string) ([]byte, error) @@ -207,14 +207,16 @@ func (x *roundCowBase) lookup(addr basics.Address) (ledgercore.AccountData, erro } // balanceRound reproduces the way that the agreement package finds the round to -// consider for online accounts. +// consider for online accounts. It returns the round that would be considered +// while voting on the current round (which is x.rnd+1). func (x *roundCowBase) balanceRound() (basics.Round, error) { - phdr, err := x.BlockHdr(agreement.ParamsRound(x.rnd)) + current := x.rnd + 1 + phdr, err := x.BlockHdr(agreement.ParamsRound(current)) if err != nil { return 0, err } agreementParams := config.Consensus[phdr.CurrentProtocol] - return agreement.BalanceRound(x.rnd, agreementParams), nil + return agreement.BalanceRound(current, agreementParams), nil } // lookupAgreement returns the online accountdata for the provided account address. It uses an internal cache @@ -248,12 +250,12 @@ func (x *roundCowBase) onlineStake() (basics.MicroAlgos, error) { if err != nil { return basics.MicroAlgos{}, err } - total, err := x.l.OnlineCirculation(brnd, x.rnd) + total, err := x.l.OnlineCirculation(brnd, x.rnd+1) // x.rnd+1 is round being built if err != nil { return basics.MicroAlgos{}, err } x.totalOnline = total - return x.totalOnline, err + return x.totalOnline, nil } func (x *roundCowBase) updateAssetResourceCache(aa ledgercore.AccountAsset, r ledgercore.AssetResource) { @@ -604,6 +606,7 @@ func (cs *roundCowState) Move(from basics.Address, to basics.Address, amt basics if overflowed { return fmt.Errorf("overspend (account %v, data %+v, tried to spend %v)", from, fromBal, amt) } + fromBalNew = cs.autoHeartbeat(fromBal, fromBalNew) err = cs.putAccount(from, fromBalNew) if err != nil { return err @@ -632,6 +635,7 @@ func (cs *roundCowState) Move(from basics.Address, to basics.Address, amt basics if overflowed { return fmt.Errorf("balance overflow (account %v, data %+v, was going to receive %v)", to, toBal, amt) } + toBalNew = cs.autoHeartbeat(toBal, toBalNew) err = cs.putAccount(to, toBalNew) if err != nil { return err @@ -641,6 +645,24 @@ func (cs *roundCowState) Move(from basics.Address, to basics.Address, amt basics return nil } +// autoHeartbeat compares `before` and `after`, returning a new AccountData +// based on `after` but with an updated `LastHeartbeat` if `after` shows enough +// balance increase to risk a false positive suspension for absenteeism. +func (cs *roundCowState) autoHeartbeat(before, after ledgercore.AccountData) ledgercore.AccountData { + // No need to adjust unless account is suspendable + if after.Status != basics.Online || !after.IncentiveEligible { + return after + } + + // Adjust only if balance has doubled + twice, o := basics.OMul(before.MicroAlgos.Raw, 2) + if !o && after.MicroAlgos.Raw >= twice { + lookback := agreement.BalanceLookback(cs.ConsensusParams()) + after.LastHeartbeat = cs.Round() + lookback + } + return after +} + func (cs *roundCowState) ConsensusParams() config.ConsensusParams { return cs.proto } @@ -1285,6 +1307,9 @@ func (eval *BlockEvaluator) applyTransaction(tx transactions.Transaction, cow *r // Validation of the StateProof transaction before applying will only occur in validate mode. err = apply.StateProof(tx.StateProofTxnFields, tx.Header.FirstValid, cow, eval.validate) + case protocol.HeartbeatTx: + err = apply.Heartbeat(*tx.HeartbeatTxnFields, tx.Header, cow, cow, cow.Round()) + default: err = fmt.Errorf("unknown transaction type %v", tx.Type) } @@ -1339,7 +1364,13 @@ func (eval *BlockEvaluator) TestingTxnCounter() uint64 { } // Call "endOfBlock" after all the block's rewards and transactions are processed. -func (eval *BlockEvaluator) endOfBlock() error { +// When generating a block, participating addresses are passed to prevent a +// proposer from suspending itself. +func (eval *BlockEvaluator) endOfBlock(participating ...basics.Address) error { + if participating != nil && !eval.generate { + panic("logic error: only pass partAddresses to endOfBlock when generating") + } + if eval.generate { var err error eval.block.TxnCommitments, err = eval.block.PaysetCommit() @@ -1364,7 +1395,7 @@ func (eval *BlockEvaluator) endOfBlock() error { } } - eval.generateKnockOfflineAccountsList() + eval.generateKnockOfflineAccountsList(participating) if eval.proto.StateProofInterval > 0 { var basicStateProof bookkeeping.StateProofTrackingData @@ -1579,6 +1610,10 @@ func (eval *BlockEvaluator) recordProposal() error { return nil } +// proposerPayout determines how much the proposer should be paid, assuming it +// gets paid at all. It may not examine the actual proposer because it is +// called before the proposer is known. Agreement might zero out this value +// when the actual proposer is decided, if that proposer is ineligible. func (eval *BlockEvaluator) proposerPayout() (basics.MicroAlgos, error) { incentive, _ := basics.NewPercent(eval.proto.Payouts.Percent).DivvyAlgos(eval.block.FeesCollected) total, o := basics.OAddA(incentive, eval.block.Bonus) @@ -1594,38 +1629,106 @@ func (eval *BlockEvaluator) proposerPayout() (basics.MicroAlgos, error) { return basics.MinA(total, available), nil } -type challenge struct { - // round is when the challenge occurred. 0 means this is not a challenge. - round basics.Round - // accounts that match the first `bits` of `seed` must propose or heartbeat to stay online - seed committee.Seed - bits int -} - // generateKnockOfflineAccountsList creates the lists of expired or absent -// participation accounts by traversing over the modified accounts in the state -// deltas and testing if any of them needs to be reset/suspended. Expiration -// takes precedence - if an account is expired, it should be knocked offline and -// key material deleted. If it is only suspended, the key material will remain. -func (eval *BlockEvaluator) generateKnockOfflineAccountsList() { +// participation accounts to be suspended. It examines the accounts that appear +// in the current block and high-stake accounts being tracked for state +// proofs. Expiration takes precedence - if an account is expired, it should be +// knocked offline and key material deleted. If it is only suspended, the key +// material will remain. +// +// Different nodes may propose different list of addresses based on node state, +// the protocol does not enforce which accounts must appear. Block validators +// only check whether ExpiredParticipationAccounts or +// AbsentParticipationAccounts meet the criteria for expiration or suspension, +// not whether the lists are complete. +// +// This function is passed a list of participating addresses so a node will not +// propose a block that suspends or expires itself. +func (eval *BlockEvaluator) generateKnockOfflineAccountsList(participating []basics.Address) { if !eval.generate { return } - current := eval.Round() + current := eval.Round() maxExpirations := eval.proto.MaxProposedExpiredOnlineAccounts maxSuspensions := eval.proto.Payouts.MaxMarkAbsent updates := &eval.block.ParticipationUpdates - ch := activeChallenge(&eval.proto, uint64(eval.Round()), eval.state) + ch := apply.FindChallenge(eval.proto.Payouts, current, eval.state, apply.ChActive) + onlineStake, err := eval.state.onlineStake() + if err != nil { + logging.Base().Errorf("unable to fetch online stake, no knockoffs: %v", err) + return + } + + // Make a set of candidate addresses to check for expired or absentee status. + type candidateData struct { + VoteLastValid basics.Round + VoteID crypto.OneTimeSignatureVerifier + Status basics.Status + LastProposed basics.Round + LastHeartbeat basics.Round + MicroAlgosWithRewards basics.MicroAlgos + IncentiveEligible bool // currently unused below, but may be needed in the future + } + candidates := make(map[basics.Address]candidateData) + partAddrs := util.MakeSet(participating...) + // First, ask the ledger for the top N online accounts, with their latest + // online account data, current up to the previous round. + if maxSuspensions > 0 { + knockOfflineCandidates, err := eval.l.GetKnockOfflineCandidates(eval.prevHeader.Round, eval.proto) + if err != nil { + // Log an error and keep going; generating lists of absent and expired + // accounts is not required by block validation rules. + logging.Base().Warnf("error fetching knockOfflineCandidates: %v", err) + knockOfflineCandidates = nil + } + for accountAddr, acctData := range knockOfflineCandidates { + // acctData is from previous block: doesn't include any updates in mods + candidates[accountAddr] = candidateData{ + VoteLastValid: acctData.VoteLastValid, + VoteID: acctData.VoteID, + Status: basics.Online, // GetKnockOfflineCandidates only returns online accounts + LastProposed: acctData.LastProposed, + LastHeartbeat: acctData.LastHeartbeat, + MicroAlgosWithRewards: acctData.MicroAlgosWithRewards, + IncentiveEligible: acctData.IncentiveEligible, + } + } + } + + // Then add any accounts modified in this block, with their state at the + // end of the round. for _, accountAddr := range eval.state.modifiedAccounts() { acctData, found := eval.state.mods.Accts.GetData(accountAddr) if !found { continue } + // This will overwrite data from the knockOfflineCandidates list, if they were modified in the current block. + candidates[accountAddr] = candidateData{ + VoteLastValid: acctData.VoteLastValid, + VoteID: acctData.VoteID, + Status: acctData.Status, + LastProposed: acctData.LastProposed, + LastHeartbeat: acctData.LastHeartbeat, + MicroAlgosWithRewards: acctData.WithUpdatedRewards(eval.proto, eval.state.rewardsLevel()).MicroAlgos, + IncentiveEligible: acctData.IncentiveEligible, + } + } + + // Now, check these candidate accounts to see if they are expired or absent. + for accountAddr, acctData := range candidates { + if acctData.MicroAlgosWithRewards.IsZero() { + continue // don't check accounts that are being closed + } + + if partAddrs.Contains(accountAddr) { + continue // don't check our own participation accounts + } + // Expired check: are this account's voting keys no longer valid? // Regardless of being online or suspended, if voting data exists, the // account can be expired to remove it. This means an offline account // can be expired (because it was already suspended). @@ -1637,18 +1740,25 @@ func (eval *BlockEvaluator) generateKnockOfflineAccountsList() { updates.ExpiredParticipationAccounts, accountAddr, ) - continue // if marking expired, do not also suspend + continue // if marking expired, do not consider suspension } } + // Absent check: has it been too long since the last heartbeat/proposal, or + // has this online account failed a challenge? if len(updates.AbsentParticipationAccounts) >= maxSuspensions { continue // no more room (don't break the loop, since we may have more expiries) } - if acctData.Status == basics.Online { + if acctData.Status == basics.Online && acctData.IncentiveEligible { lastSeen := max(acctData.LastProposed, acctData.LastHeartbeat) - if isAbsent(eval.state.prevTotals.Online.Money, acctData.MicroAlgos, lastSeen, current) || - failsChallenge(ch, accountAddr, lastSeen) { + oad, lErr := eval.state.lookupAgreement(accountAddr) + if lErr != nil { + logging.Base().Errorf("unable to check account for absenteeism: %v", accountAddr) + continue + } + if isAbsent(onlineStake, oad.VotingStake(), lastSeen, current) || + ch.Failed(accountAddr, lastSeen) { updates.AbsentParticipationAccounts = append( updates.AbsentParticipationAccounts, accountAddr, @@ -1658,76 +1768,25 @@ func (eval *BlockEvaluator) generateKnockOfflineAccountsList() { } } -// bitsMatch checks if the first n bits of two byte slices match. Written to -// work on arbitrary slices, but we expect that n is small. Only user today -// calls with n=5. -func bitsMatch(a, b []byte, n int) bool { - // Ensure n is a valid number of bits to compare - if n < 0 || n > len(a)*8 || n > len(b)*8 { - return false - } - - // Compare entire bytes when n is bigger than 8 - for i := 0; i < n/8; i++ { - if a[i] != b[i] { - return false - } - } - remaining := n % 8 - if remaining == 0 { - return true - } - return bits.LeadingZeros8(a[n/8]^b[n/8]) >= remaining -} +const absentFactor = 20 func isAbsent(totalOnlineStake basics.MicroAlgos, acctStake basics.MicroAlgos, lastSeen basics.Round, current basics.Round) bool { // Don't consider accounts that were online when payouts went into effect as // absent. They get noticed the next time they propose or keyreg, which // ought to be soon, if they are high stake or want to earn incentives. - if lastSeen == 0 { + if lastSeen == 0 || acctStake.Raw == 0 { return false } - // See if the account has exceeded 10x their expected observation interval. - allowableLag, o := basics.Muldiv(10, totalOnlineStake.Raw, acctStake.Raw) - if o { - // This can't happen with 10B total possible stake, but if we imagine - // another algorand network with huge possible stake, this seems reasonable. - allowableLag = math.MaxInt64 / acctStake.Raw - } - return lastSeen+basics.Round(allowableLag) < current -} - -type headerSource interface { - BlockHdr(round basics.Round) (bookkeeping.BlockHeader, error) -} - -func activeChallenge(proto *config.ConsensusParams, current uint64, headers headerSource) challenge { - rules := proto.Payouts - // are challenges active? - if rules.ChallengeInterval == 0 || current < rules.ChallengeInterval { - return challenge{} - } - lastChallenge := current - (current % rules.ChallengeInterval) - // challenge is in effect if we're after one grace period, but before the 2nd ends. - if current <= lastChallenge+rules.ChallengeGracePeriod || - current > lastChallenge+2*rules.ChallengeGracePeriod { - return challenge{} - } - round := basics.Round(lastChallenge) - challengeHdr, err := headers.BlockHdr(round) - if err != nil { - panic(err) - } - challengeProto := config.Consensus[challengeHdr.CurrentProtocol] - // challenge is not considered if rules have changed since that round - if challengeProto.Payouts != rules { - return challenge{} + // See if the account has exceeded their expected observation interval. + allowableLag, o := basics.Muldiv(absentFactor, totalOnlineStake.Raw, acctStake.Raw) + // just return false for overflow or a huge allowableLag. It implies the lag + // is longer that any network could be around, and computing with wraparound + // is annoying. + if o || allowableLag > math.MaxUint32 { + return false } - return challenge{round, challengeHdr.Seed, rules.ChallengeBits} -} -func failsChallenge(ch challenge, address basics.Address, lastSeen basics.Round) bool { - return ch.round != 0 && bitsMatch(ch.seed[:], address[:], ch.bits) && lastSeen < ch.round + return lastSeen+basics.Round(allowableLag) < current } // validateExpiredOnlineAccounts tests the expired online accounts specified in ExpiredParticipationAccounts, and verify @@ -1797,7 +1856,15 @@ func (eval *BlockEvaluator) validateAbsentOnlineAccounts() error { // For consistency with expired account handling, we preclude duplicates addressSet := make(map[basics.Address]bool, suspensionCount) - ch := activeChallenge(&eval.proto, uint64(eval.Round()), eval.state) + ch := apply.FindChallenge(eval.proto.Payouts, eval.Round(), eval.state, apply.ChActive) + totalOnlineStake, err := eval.state.onlineStake() + if err != nil { + logging.Base().Errorf("unable to fetch online stake, can't check knockoffs: %v", err) + // I suppose we can still return successfully if the absent list is empty. + if suspensionCount > 0 { + return err + } + } for _, accountAddr := range eval.block.ParticipationUpdates.AbsentParticipationAccounts { if _, exists := addressSet[accountAddr]; exists { @@ -1813,12 +1880,21 @@ func (eval *BlockEvaluator) validateAbsentOnlineAccounts() error { if acctData.Status != basics.Online { return fmt.Errorf("proposed absent account %v was %v, not Online", accountAddr, acctData.Status) } + if acctData.MicroAlgos.IsZero() { + return fmt.Errorf("proposed absent account %v with zero algos", accountAddr) + } + if !acctData.IncentiveEligible { + return fmt.Errorf("proposed absent account %v not IncentiveEligible", accountAddr) + } - lastSeen := max(acctData.LastProposed, acctData.LastHeartbeat) - if isAbsent(eval.state.prevTotals.Online.Money, acctData.MicroAlgos, lastSeen, eval.Round()) { + oad, lErr := eval.state.lookupAgreement(accountAddr) + if lErr != nil { + return fmt.Errorf("unable to check absent account: %v", accountAddr) + } + if isAbsent(totalOnlineStake, oad.VotingStake(), acctData.LastSeen(), eval.Round()) { continue // ok. it's "normal absent" } - if failsChallenge(ch, accountAddr, lastSeen) { + if ch.Failed(accountAddr, acctData.LastSeen()) { continue // ok. it's "challenge absent" } return fmt.Errorf("proposed absent account %v is not absent in %d, %d", @@ -1882,7 +1958,16 @@ func (eval *BlockEvaluator) suspendAbsentAccounts() error { // After a call to GenerateBlock, the BlockEvaluator can still be used to // accept transactions. However, to guard against reuse, subsequent calls // to GenerateBlock on the same BlockEvaluator will fail. -func (eval *BlockEvaluator) GenerateBlock(addrs []basics.Address) (*ledgercore.UnfinishedBlock, error) { +// +// A list of participating addresses is passed to GenerateBlock. This lets +// the BlockEvaluator know which of this node's participating addresses might +// be proposing this block. This information is used when: +// - generating lists of absent accounts (don't suspend yourself) +// - preparing a ledgercore.UnfinishedBlock, which contains the end-of-block +// state of each potential proposer. This allows for a final check in +// UnfinishedBlock.FinishBlock to ensure the proposer hasn't closed its +// account before setting the ProposerPayout header. +func (eval *BlockEvaluator) GenerateBlock(participating []basics.Address) (*ledgercore.UnfinishedBlock, error) { if !eval.generate { logging.Base().Panicf("GenerateBlock() called but generate is false") } @@ -1891,19 +1976,19 @@ func (eval *BlockEvaluator) GenerateBlock(addrs []basics.Address) (*ledgercore.U return nil, fmt.Errorf("GenerateBlock already called on this BlockEvaluator") } - err := eval.endOfBlock() + err := eval.endOfBlock(participating...) if err != nil { return nil, err } - // look up set of participation accounts passed to GenerateBlock (possible proposers) - finalAccounts := make(map[basics.Address]ledgercore.AccountData, len(addrs)) - for i := range addrs { - acct, err := eval.state.lookup(addrs[i]) + // look up end-of-block state of possible proposers passed to GenerateBlock + finalAccounts := make(map[basics.Address]ledgercore.AccountData, len(participating)) + for i := range participating { + acct, err := eval.state.lookup(participating[i]) if err != nil { return nil, err } - finalAccounts[addrs[i]] = acct + finalAccounts[participating[i]] = acct } vb := ledgercore.MakeUnfinishedBlock(eval.block, eval.state.deltas(), finalAccounts) diff --git a/ledger/eval/eval_test.go b/ledger/eval/eval_test.go index 358ba6b430..5874c83325 100644 --- a/ledger/eval/eval_test.go +++ b/ledger/eval/eval_test.go @@ -792,9 +792,27 @@ func (ledger *evalTestLedger) LookupAgreement(rnd basics.Round, addr basics.Addr return convertToOnline(ad), err } -// OnlineCirculation just returns a deterministic value for a given round. +func (ledger *evalTestLedger) GetKnockOfflineCandidates(rnd basics.Round, _ config.ConsensusParams) (map[basics.Address]basics.OnlineAccountData, error) { + // simulate by returning all online accounts known by the test ledger + ret := make(map[basics.Address]basics.OnlineAccountData) + for addr, data := range ledger.roundBalances[rnd] { + if data.Status == basics.Online && !data.MicroAlgos.IsZero() { + ret[addr] = data.OnlineAccountData() + } + } + return ret, nil +} + +// OnlineCirculation add up the balances of all online accounts in rnd. It +// doesn't remove expired accounts. func (ledger *evalTestLedger) OnlineCirculation(rnd, voteRound basics.Round) (basics.MicroAlgos, error) { - return basics.MicroAlgos{Raw: uint64(rnd) * 1_000_000}, nil + circulation := basics.MicroAlgos{} + for _, data := range ledger.roundBalances[rnd] { + if data.Status == basics.Online { + circulation.Raw += data.MicroAlgos.Raw + } + } + return circulation, nil } func (ledger *evalTestLedger) LookupApplication(rnd basics.Round, addr basics.Address, aidx basics.AppIndex) (ledgercore.AppResource, error) { @@ -947,8 +965,8 @@ func (ledger *evalTestLedger) nextBlock(t testing.TB) *BlockEvaluator { } // endBlock completes the block being created, returns the ValidatedBlock for inspection -func (ledger *evalTestLedger) endBlock(t testing.TB, eval *BlockEvaluator) *ledgercore.ValidatedBlock { - unfinishedBlock, err := eval.GenerateBlock(nil) +func (ledger *evalTestLedger) endBlock(t testing.TB, eval *BlockEvaluator, proposers ...basics.Address) *ledgercore.ValidatedBlock { + unfinishedBlock, err := eval.GenerateBlock(proposers) require.NoError(t, err) // fake agreement's setting of header fields so later validates work. seed := committee.Seed{} @@ -1024,6 +1042,10 @@ func (l *testCowBaseLedger) LookupAgreement(rnd basics.Round, addr basics.Addres return basics.OnlineAccountData{}, errors.New("not implemented") } +func (l *testCowBaseLedger) GetKnockOfflineCandidates(basics.Round, config.ConsensusParams) (map[basics.Address]basics.OnlineAccountData, error) { + return nil, errors.New("not implemented") +} + func (l *testCowBaseLedger) OnlineCirculation(rnd, voteRnd basics.Round) (basics.MicroAlgos, error) { return basics.MicroAlgos{}, errors.New("not implemented") } @@ -1098,7 +1120,7 @@ func TestEvalFunctionForExpiredAccounts(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - genesisInitState, addrs, keys := ledgertesting.GenesisWithProto(10, protocol.ConsensusFuture) + genesisInitState, addrs, _ := ledgertesting.GenesisWithProto(10, protocol.ConsensusFuture) sendAddr := addrs[0] recvAddr := addrs[1] @@ -1144,11 +1166,11 @@ func TestEvalFunctionForExpiredAccounts(t *testing.T) { // Advance the evaluator a couple rounds, watching for lack of expiration for i := uint64(0); i < uint64(targetRound); i++ { - vb := l.endBlock(t, blkEval) + vb := l.endBlock(t, blkEval, recvAddr) blkEval = l.nextBlock(t) for _, acct := range vb.Block().ExpiredParticipationAccounts { if acct == recvAddr { - // won't happen, because recvAddr didn't appear in block + // won't happen, because recvAddr was proposer require.Fail(t, "premature expiration") } } @@ -1156,26 +1178,6 @@ func TestEvalFunctionForExpiredAccounts(t *testing.T) { require.Greater(t, uint64(blkEval.Round()), uint64(recvAddrLastValidRound)) - genHash := l.GenesisHash() - txn := transactions.Transaction{ - Type: protocol.PaymentTx, - Header: transactions.Header{ - Sender: sendAddr, - Fee: minFee, - FirstValid: newBlock.Round(), - LastValid: blkEval.Round(), - GenesisHash: genHash, - }, - PaymentTxnFields: transactions.PaymentTxnFields{ - Receiver: recvAddr, - Amount: basics.MicroAlgos{Raw: 100}, - }, - } - - st := txn.Sign(keys[0]) - err = blkEval.Transaction(st, transactions.ApplyData{}) - require.NoError(t, err) - // Make sure we validate our block as well blkEval.validate = true @@ -1250,7 +1252,7 @@ func TestExpiredAccountGenerationWithDiskFailure(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - genesisInitState, addrs, keys := ledgertesting.GenesisWithProto(10, protocol.ConsensusFuture) + genesisInitState, addrs, _ := ledgertesting.GenesisWithProto(10, protocol.ConsensusFuture) sendAddr := addrs[0] recvAddr := addrs[1] @@ -1296,26 +1298,6 @@ func TestExpiredAccountGenerationWithDiskFailure(t *testing.T) { eval = l.nextBlock(t) } - genHash := l.GenesisHash() - txn := transactions.Transaction{ - Type: protocol.PaymentTx, - Header: transactions.Header{ - Sender: sendAddr, - Fee: minFee, - FirstValid: newBlock.Round(), - LastValid: eval.Round(), - GenesisHash: genHash, - }, - PaymentTxnFields: transactions.PaymentTxnFields{ - Receiver: recvAddr, - Amount: basics.MicroAlgos{Raw: 100}, - }, - } - - st := txn.Sign(keys[0]) - err = eval.Transaction(st, transactions.ApplyData{}) - require.NoError(t, err) - eval.validate = true eval.generate = false @@ -1353,19 +1335,46 @@ func TestAbsenteeChecks(t *testing.T) { crypto.RandBytes(tmp.StateProofID[:]) crypto.RandBytes(tmp.SelectionID[:]) crypto.RandBytes(tmp.VoteID[:]) + tmp.IncentiveEligible = true // make suspendable tmp.VoteFirstValid = 1 tmp.VoteLastValid = 1500 // large enough to avoid EXPIRATION, so we can see SUSPENSION - tmp.LastHeartbeat = 1 // non-zero allows suspensions switch i { case 1: - tmp.LastHeartbeat = 1150 // lie here so that addr[1] won't be suspended + tmp.LastHeartbeat = 1 // we want addrs[1] to be suspended earlier than others case 2: - tmp.LastProposed = 1150 // lie here so that addr[2] won't be suspended + tmp.LastProposed = 1 // we want addrs[2] to be suspended earlier than others + case 3: + tmp.LastProposed = 1 // we want addrs[3] to be a proposer, and never suspend itself + case 5: + tmp.LastHeartbeat = 1 // like addr[1] but !IncentiveEligible, no suspend + tmp.IncentiveEligible = false + case 6: + tmp.LastProposed = 1 // like addr[2] but !IncentiveEligible, no suspend + tmp.IncentiveEligible = false + default: + if i < 10 { // make 0,3,4,7,8,9 unsuspendable + switch i % 3 { + case 0: + tmp.LastProposed = 1200 + case 1: + tmp.LastHeartbeat = 1200 + case 2: + tmp.IncentiveEligible = false + } + } else { + // ensure non-zero balance for the new accounts, but a small + // balance so they will not be absent, just challenged. + tmp.MicroAlgos = basics.MicroAlgos{Raw: 1_000_000} + tmp.LastHeartbeat = 1 // non-zero allows suspensions + } } genesisInitState.Accounts[addr] = tmp } + // pretend this node is participating on behalf of addrs[3] and addrs[4] + proposers := []basics.Address{addrs[3], addrs[4]} + l := newTestLedger(t, bookkeeping.GenesisBalances{ Balances: genesisInitState.Accounts, FeeSink: testSinkAddr, @@ -1377,15 +1386,21 @@ func TestAbsenteeChecks(t *testing.T) { blkEval, err := l.StartEvaluator(newBlock.BlockHeader, 0, 0, nil) require.NoError(t, err) - // Advance the evaluator, watching for lack of suspensions since we don't - // suspend until a txn with a suspendable account appears + // Advance the evaluator, watching for suspensions as they appear challenge := byte(0) - for i := uint64(0); i < uint64(1210); i++ { // A bit past one grace period (200) past challenge at 1000. - vb := l.endBlock(t, blkEval) + for i := uint64(0); i < uint64(1200); i++ { // Just before first suspension at 1171 + vb := l.endBlock(t, blkEval, proposers...) blkEval = l.nextBlock(t) - require.Zero(t, vb.Block().AbsentParticipationAccounts) - if vb.Block().Round() == 1000 { + + switch vb.Block().Round() { + case 202: // 2 out of 10 genesis accounts are now absent + require.Len(t, vb.Block().AbsentParticipationAccounts, 2, addrs) + require.Contains(t, vb.Block().AbsentParticipationAccounts, addrs[1]) + require.Contains(t, vb.Block().AbsentParticipationAccounts, addrs[2]) + case 1000: challenge = vb.Block().BlockHeader.Seed[0] + default: + require.Zero(t, vb.Block().AbsentParticipationAccounts, "round %v", vb.Block().Round()) } } challenged := basics.Address{(challenge >> 3) << 3, 0xaa} @@ -1421,26 +1436,32 @@ func TestAbsenteeChecks(t *testing.T) { // Make sure we validate our block as well blkEval.validate = true - unfinishedBlock, err := blkEval.GenerateBlock(nil) + unfinishedBlock, err := blkEval.GenerateBlock(proposers) require.NoError(t, err) // fake agreement's setting of header fields so later validates work validatedBlock := ledgercore.MakeValidatedBlock(unfinishedBlock.UnfinishedBlock().WithProposer(committee.Seed{}, testPoolAddr, true), unfinishedBlock.UnfinishedDeltas()) - require.Zero(t, validatedBlock.Block().ExpiredParticipationAccounts) - require.Contains(t, validatedBlock.Block().AbsentParticipationAccounts, addrs[0], addrs[0].String()) - require.NotContains(t, validatedBlock.Block().AbsentParticipationAccounts, addrs[1], addrs[1].String()) - require.NotContains(t, validatedBlock.Block().AbsentParticipationAccounts, addrs[2], addrs[2].String()) + require.Equal(t, basics.Round(1201), validatedBlock.Block().Round()) + require.Empty(t, validatedBlock.Block().ExpiredParticipationAccounts) // Of the 32 extra accounts, make sure only the one matching the challenge is suspended + require.Len(t, validatedBlock.Block().AbsentParticipationAccounts, 1) require.Contains(t, validatedBlock.Block().AbsentParticipationAccounts, challenged, challenged.String()) + foundChallenged := false for i := byte(0); i < 32; i++ { if i == challenge>>3 { + rnd := validatedBlock.Block().Round() + ad := basics.Address{i << 3, 0xaa} + t.Logf("extra account %d %s is challenged, balance rnd %d %d", i, ad, + rnd, l.roundBalances[rnd][ad].MicroAlgos.Raw) require.Equal(t, basics.Address{i << 3, 0xaa}, challenged) + foundChallenged = true continue } require.NotContains(t, validatedBlock.Block().AbsentParticipationAccounts, basics.Address{i << 3, 0xaa}) } + require.True(t, foundChallenged) _, err = Eval(context.Background(), l, validatedBlock.Block(), false, nil, nil, l.tracer) require.NoError(t, err) @@ -1458,7 +1479,7 @@ func TestAbsenteeChecks(t *testing.T) { // Introduce an address that shouldn't be suspended badBlock := goodBlock - badBlock.AbsentParticipationAccounts = append(badBlock.AbsentParticipationAccounts, addrs[1]) + badBlock.AbsentParticipationAccounts = append(badBlock.AbsentParticipationAccounts, addrs[9]) _, err = Eval(context.Background(), l, badBlock, true, verify.GetMockedCache(true), nil, l.tracer) require.ErrorContains(t, err, "not absent") @@ -1496,16 +1517,21 @@ func TestExpiredAccountGeneration(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - genesisInitState, addrs, keys := ledgertesting.GenesisWithProto(10, protocol.ConsensusFuture) + genesisInitState, addrs, _ := ledgertesting.GenesisWithProto(10, protocol.ConsensusFuture) sendAddr := addrs[0] recvAddr := addrs[1] + propAddr := addrs[2] + otherPropAddr := addrs[3] // not expiring, but part of proposer addresses passed to GenerateBlock - // the last round that the recvAddr is valid for - recvAddrLastValidRound := basics.Round(2) + // pretend this node is participating on behalf of addrs[2] and addrs[3] + proposers := []basics.Address{propAddr, otherPropAddr} + + // the last round that the recvAddr and propAddr are valid for + testAddrLastValidRound := basics.Round(2) // the target round we want to advance the evaluator to - targetRound := basics.Round(4) + targetRound := basics.Round(2) // Set all to online except the sending address for _, addr := range addrs { @@ -1526,11 +1552,11 @@ func TestExpiredAccountGeneration(t *testing.T) { genesisInitState.Accounts[addr] = tmp } - // Choose recvAddr to have a last valid round less than genesis block round - { - tmp := genesisInitState.Accounts[recvAddr] - tmp.VoteLastValid = recvAddrLastValidRound - genesisInitState.Accounts[recvAddr] = tmp + // Choose recvAddr and propAddr to have a last valid round less than genesis block round + for _, addr := range []basics.Address{recvAddr, propAddr} { + tmp := genesisInitState.Accounts[addr] + tmp.VoteLastValid = testAddrLastValidRound + genesisInitState.Accounts[addr] = tmp } l := newTestLedger(t, bookkeeping.GenesisBalances{ @@ -1547,36 +1573,18 @@ func TestExpiredAccountGeneration(t *testing.T) { // Advance the evaluator a couple rounds... for i := uint64(0); i < uint64(targetRound); i++ { - l.endBlock(t, eval) + vb := l.endBlock(t, eval) eval = l.nextBlock(t) + require.Empty(t, vb.Block().ExpiredParticipationAccounts) } - require.Greater(t, uint64(eval.Round()), uint64(recvAddrLastValidRound)) - - genHash := l.GenesisHash() - txn := transactions.Transaction{ - Type: protocol.PaymentTx, - Header: transactions.Header{ - Sender: sendAddr, - Fee: minFee, - FirstValid: newBlock.Round(), - LastValid: eval.Round(), - GenesisHash: genHash, - }, - PaymentTxnFields: transactions.PaymentTxnFields{ - Receiver: recvAddr, - Amount: basics.MicroAlgos{Raw: 100}, - }, - } - - st := txn.Sign(keys[0]) - err = eval.Transaction(st, transactions.ApplyData{}) - require.NoError(t, err) + require.Greater(t, uint64(eval.Round()), uint64(testAddrLastValidRound)) // Make sure we validate our block as well eval.validate = true - unfinishedBlock, err := eval.GenerateBlock(nil) + // GenerateBlock will not mark its own proposer addresses as expired + unfinishedBlock, err := eval.GenerateBlock(proposers) require.NoError(t, err) listOfExpiredAccounts := unfinishedBlock.UnfinishedBlock().ParticipationUpdates.ExpiredParticipationAccounts @@ -1593,29 +1601,17 @@ func TestExpiredAccountGeneration(t *testing.T) { require.Zero(t, recvAcct.VoteID) require.Zero(t, recvAcct.SelectionID) require.Zero(t, recvAcct.StateProofID) -} - -func TestBitsMatch(t *testing.T) { - partitiontest.PartitionTest(t) - t.Parallel() - - for b := 0; b <= 6; b++ { - require.True(t, bitsMatch([]byte{0x1}, []byte{0x2}, b), "%d", b) - } - require.False(t, bitsMatch([]byte{0x1}, []byte{0x2}, 7)) - require.False(t, bitsMatch([]byte{0x1}, []byte{0x2}, 8)) - require.False(t, bitsMatch([]byte{0x1}, []byte{0x2}, 9)) - - for b := 0; b <= 12; b++ { - require.True(t, bitsMatch([]byte{0x1, 0xff, 0xaa}, []byte{0x1, 0xf0}, b), "%d", b) - } - require.False(t, bitsMatch([]byte{0x1, 0xff, 0xaa}, []byte{0x1, 0xf0}, 13)) - // on a byte boundary - require.True(t, bitsMatch([]byte{0x1}, []byte{0x1}, 8)) - require.False(t, bitsMatch([]byte{0x1}, []byte{0x1}, 9)) - require.True(t, bitsMatch([]byte{0x1, 0xff}, []byte{0x1, 0x00}, 8)) - require.False(t, bitsMatch([]byte{0x1, 0xff}, []byte{0x1, 00}, 9)) + // propAddr not marked expired + propAcct, err := eval.state.lookup(propAddr) + require.NoError(t, err) + require.Equal(t, basics.Online, propAcct.Status) + require.NotZero(t, propAcct.VoteFirstValid) + require.NotZero(t, propAcct.VoteLastValid) + require.NotZero(t, propAcct.VoteKeyDilution) + require.NotZero(t, propAcct.VoteID) + require.NotZero(t, propAcct.SelectionID) + require.NotZero(t, propAcct.StateProofID) } func TestIsAbsent(t *testing.T) { @@ -1626,82 +1622,13 @@ func TestIsAbsent(t *testing.T) { var absent = func(total uint64, acct uint64, last uint64, current uint64) bool { return isAbsent(basics.Algos(total), basics.Algos(acct), basics.Round(last), basics.Round(current)) } - // 1% of stake, absent for 1000 rounds - a.False(absent(1000, 10, 5000, 6000)) - a.True(absent(1000, 10, 5000, 6001)) // longer - a.True(absent(1000, 11, 5000, 6001)) // more acct stake - a.False(absent(1000, 9, 5000, 6001)) // less acct stake - a.False(absent(1001, 10, 5000, 6001)) // more online stake + a.False(absent(1000, 10, 5000, 6000)) // 1% of stake, absent for 1000 rounds + a.False(absent(1000, 10, 5000, 7000)) // 1% of stake, absent for 2000 rounds + a.True(absent(1000, 10, 5000, 7001)) // 2001 + a.True(absent(1000, 11, 5000, 7000)) // more acct stake drives percent down, makes it absent + a.False(absent(1000, 9, 5000, 7001)) // less acct stake + a.False(absent(1001, 10, 5000, 7001)) // more online stake // not absent if never seen - a.False(absent(1000, 10, 0, 6000)) - a.False(absent(1000, 10, 0, 6001)) -} - -func TestFailsChallenge(t *testing.T) { - partitiontest.PartitionTest(t) - t.Parallel() - a := assert.New(t) - - // a valid challenge, with 4 matching bits, and an old last seen - a.True(failsChallenge(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 4}, basics.Address{0xbf, 0x34}, 10)) - - // challenge isn't "on" - a.False(failsChallenge(challenge{round: 0, seed: [32]byte{0xb0, 0xb4}, bits: 4}, basics.Address{0xbf, 0x34}, 10)) - // node has appeared more recently - a.False(failsChallenge(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 4}, basics.Address{0xbf, 0x34}, 12)) - // bits don't match - a.False(failsChallenge(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 4}, basics.Address{0xcf, 0x34}, 10)) - // no enough bits match - a.False(failsChallenge(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 5}, basics.Address{0xbf, 0x34}, 10)) -} - -type singleSource bookkeeping.BlockHeader - -func (ss singleSource) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) { - return bookkeeping.BlockHeader(ss), nil -} - -func TestActiveChallenge(t *testing.T) { - partitiontest.PartitionTest(t) - t.Parallel() - a := assert.New(t) - - nowHeader := bookkeeping.BlockHeader{ - UpgradeState: bookkeeping.UpgradeState{ - // Here the rules are on, so they certainly differ from rules in oldHeader's params - CurrentProtocol: protocol.ConsensusFuture, - }, - } - now := config.Consensus[nowHeader.CurrentProtocol] - - // simplest test. when interval=X and grace=G, X+G+1 is a challenge - inChallenge := now.Payouts.ChallengeInterval + now.Payouts.ChallengeGracePeriod + 1 - ch := activeChallenge(&now, inChallenge, singleSource(nowHeader)) - a.NotZero(ch.round) - - // all rounds before that have no challenge - for r := uint64(1); r < inChallenge; r++ { - ch := activeChallenge(&now, r, singleSource(nowHeader)) - a.Zero(ch.round, r) - } - - // ChallengeGracePeriod rounds allow challenges starting with inChallenge - for r := inChallenge; r < inChallenge+now.Payouts.ChallengeGracePeriod; r++ { - ch := activeChallenge(&now, r, singleSource(nowHeader)) - a.EqualValues(ch.round, now.Payouts.ChallengeInterval) - } - - // And the next round is again challenge-less - ch = activeChallenge(&now, inChallenge+now.Payouts.ChallengeGracePeriod, singleSource(nowHeader)) - a.Zero(ch.round) - - // ignore challenge if upgrade happened - oldHeader := bookkeeping.BlockHeader{ - UpgradeState: bookkeeping.UpgradeState{ - // We need a version from before payouts got turned on - CurrentProtocol: protocol.ConsensusV39, - }, - } - ch = activeChallenge(&now, inChallenge, singleSource(oldHeader)) - a.Zero(ch.round) + a.False(absent(1000, 10, 0, 2001)) + a.True(absent(1000, 10, 1, 2002)) } diff --git a/ledger/eval/prefetcher/prefetcher.go b/ledger/eval/prefetcher/prefetcher.go index 487d370524..765b6ea9c2 100644 --- a/ledger/eval/prefetcher/prefetcher.go +++ b/ledger/eval/prefetcher/prefetcher.go @@ -343,7 +343,9 @@ func (p *accountPrefetcher) prefetch(ctx context.Context) { // since they might be non-used arbitrary values case protocol.StateProofTx: - case protocol.KeyRegistrationTx: + case protocol.KeyRegistrationTx: // No extra accounts besides the sender + case protocol.HeartbeatTx: + loadAccountsAddAccountTask(&stxn.Txn.HbAddress, task, accountTasks, queue) } // If you add new addresses here, also add them in getTxnAddresses(). diff --git a/ledger/eval/prefetcher/prefetcher_alignment_test.go b/ledger/eval/prefetcher/prefetcher_alignment_test.go index 734d84a661..0c232aebf4 100644 --- a/ledger/eval/prefetcher/prefetcher_alignment_test.go +++ b/ledger/eval/prefetcher/prefetcher_alignment_test.go @@ -30,6 +30,7 @@ import ( "github.com/algorand/go-algorand/crypto/stateproof" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/committee" "github.com/algorand/go-algorand/data/stateproofmsg" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/ledger/eval" @@ -119,6 +120,7 @@ func (l *prefetcherAlignmentTestLedger) LookupWithoutRewards(_ basics.Round, add } return ledgercore.AccountData{}, 0, nil } + func (l *prefetcherAlignmentTestLedger) LookupAgreement(_ basics.Round, addr basics.Address) (basics.OnlineAccountData, error) { // prefetch alignment tests do not check for prefetching of online account data // because it's quite different and can only occur in AVM opcodes, which @@ -126,9 +128,15 @@ func (l *prefetcherAlignmentTestLedger) LookupAgreement(_ basics.Round, addr bas // will be accessed in AVM.) return basics.OnlineAccountData{}, errors.New("not implemented") } + func (l *prefetcherAlignmentTestLedger) OnlineCirculation(rnd, voteRnd basics.Round) (basics.MicroAlgos, error) { return basics.MicroAlgos{}, errors.New("not implemented") } + +func (l *prefetcherAlignmentTestLedger) GetKnockOfflineCandidates(basics.Round, config.ConsensusParams) (map[basics.Address]basics.OnlineAccountData, error) { + return nil, errors.New("not implemented") +} + func (l *prefetcherAlignmentTestLedger) LookupApplication(rnd basics.Round, addr basics.Address, aidx basics.AppIndex) (ledgercore.AppResource, error) { l.mu.Lock() if l.requestedApps == nil { @@ -144,6 +152,7 @@ func (l *prefetcherAlignmentTestLedger) LookupApplication(rnd basics.Round, addr return l.apps[addr][aidx], nil } + func (l *prefetcherAlignmentTestLedger) LookupAsset(rnd basics.Round, addr basics.Address, aidx basics.AssetIndex) (ledgercore.AssetResource, error) { l.mu.Lock() if l.requestedAssets == nil { @@ -159,9 +168,11 @@ func (l *prefetcherAlignmentTestLedger) LookupAsset(rnd basics.Round, addr basic return l.assets[addr][aidx], nil } + func (l *prefetcherAlignmentTestLedger) LookupKv(rnd basics.Round, key string) ([]byte, error) { panic("not implemented") } + func (l *prefetcherAlignmentTestLedger) GetCreatorForRound(_ basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) { l.mu.Lock() if l.requestedCreators == nil { @@ -175,6 +186,7 @@ func (l *prefetcherAlignmentTestLedger) GetCreatorForRound(_ basics.Round, cidx } return basics.Address{}, false, nil } + func (l *prefetcherAlignmentTestLedger) GenesisHash() crypto.Digest { return crypto.Digest{} } @@ -1411,3 +1423,57 @@ func TestEvaluatorPrefetcherAlignmentStateProof(t *testing.T) { prefetched.pretend(rewardsPool()) require.Equal(t, requested, prefetched) } + +func TestEvaluatorPrefetcherAlignmentHeartbeat(t *testing.T) { + partitiontest.PartitionTest(t) + + // We need valid part keys to evaluate the Heartbeat. + const kd = 10 + firstID := basics.OneTimeIDForRound(0, kd) + otss := crypto.GenerateOneTimeSignatureSecrets(firstID.Batch, 5) + + l := &prefetcherAlignmentTestLedger{ + balances: map[basics.Address]ledgercore.AccountData{ + rewardsPool(): { + AccountBaseData: ledgercore.AccountBaseData{ + MicroAlgos: basics.MicroAlgos{Raw: 1234567890}, + }, + }, + makeAddress(1): { + AccountBaseData: ledgercore.AccountBaseData{ + MicroAlgos: basics.MicroAlgos{Raw: 1000001}, + }, + }, + makeAddress(2): { + AccountBaseData: ledgercore.AccountBaseData{ + MicroAlgos: basics.MicroAlgos{Raw: 100_000}, + }, + VotingData: basics.VotingData{ + VoteID: otss.OneTimeSignatureVerifier, + VoteKeyDilution: 123, + }, + }, + }, + } + + txn := transactions.Transaction{ + Type: protocol.HeartbeatTx, + Header: transactions.Header{ + Sender: makeAddress(1), + GenesisHash: genesisHash(), + Fee: basics.Algos(1), // Heartbeat txn is unusual in that it checks fees a bit. + }, + HeartbeatTxnFields: &transactions.HeartbeatTxnFields{ + HbAddress: makeAddress(2), + HbProof: otss.Sign(firstID, committee.Seed(genesisHash())).ToHeartbeatProof(), + HbSeed: committee.Seed(genesisHash()), + HbVoteID: otss.OneTimeSignatureVerifier, + HbKeyDilution: 123, + }, + } + + requested, prefetched := run(t, l, txn) + + prefetched.pretend(rewardsPool()) + require.Equal(t, requested, prefetched) +} diff --git a/ledger/eval_simple_test.go b/ledger/eval_simple_test.go index 972821c26c..6ec44b99f3 100644 --- a/ledger/eval_simple_test.go +++ b/ledger/eval_simple_test.go @@ -21,6 +21,7 @@ import ( "encoding/binary" "fmt" "reflect" + "slices" "strings" "testing" @@ -280,15 +281,15 @@ func TestPayoutFees(t *testing.T) { // new fields are in the header require.EqualValues(t, 2000, vb.Block().FeesCollected.Raw) require.EqualValues(t, bonus1, vb.Block().Bonus.Raw) - require.EqualValues(t, bonus1+1_500, vb.Block().ProposerPayout().Raw) + require.EqualValues(t, bonus1+1_000, vb.Block().ProposerPayout().Raw) // This last one is really only testing the "fake" agreement that // happens in dl.endBlock(). require.EqualValues(t, proposer, vb.Block().Proposer()) // At the end of the block, part of the fees + bonus have been moved to // the proposer. - require.EqualValues(t, bonus1+1500, postprop-preprop) // based on 75% in config/consensus.go - require.EqualValues(t, bonus1-500, presink-postsink) + require.EqualValues(t, bonus1+1_000, postprop-preprop) // based on 75% in config/consensus.go + require.EqualValues(t, bonus1-1_000, presink-postsink) require.Equal(t, prp.LastProposed, dl.generator.Latest()) } else { require.False(t, dl.generator.GenesisProto().Payouts.Enabled) @@ -412,8 +413,34 @@ func TestAbsentTracking(t *testing.T) { int 0; voter_params_get VoterIncentiveEligible; itob; log; itob; log; int 1` + addrIndexes := make(map[basics.Address]int) + for i, addr := range addrs { + addrIndexes[addr] = i + } + prettyAddrs := func(inAddrs []basics.Address) []string { + ret := make([]string, len(inAddrs)) + for i, addr := range inAddrs { + if idx, ok := addrIndexes[addr]; ok { + ret[i] = fmt.Sprintf("addrs[%d]", idx) + } else { + ret[i] = addr.String() + } + } + return ret + } + + printAbsent := func(vb *ledgercore.ValidatedBlock) { + t.Helper() + absent := vb.Block().AbsentParticipationAccounts + expired := vb.Block().ExpiredParticipationAccounts + if len(expired) > 0 || len(absent) > 0 { + t.Logf("rnd %d: expired %d, absent %d (exp %v abs %v)", vb.Block().Round(), + len(expired), len(absent), prettyAddrs(expired), prettyAddrs(absent)) + } + } + checkingBegins := 40 - ledgertesting.TestConsensusRange(t, checkingBegins, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + runTest := func(t *testing.T, cv protocol.ConsensusVersion, cfg config.Local) { dl := NewDoubleLedger(t, genBalances, cv, cfg) defer dl.Close() @@ -456,13 +483,17 @@ func TestAbsentTracking(t *testing.T) { // have addrs[1] go online explicitly, which makes it eligible for suspension. // use a large fee, so we can see IncentiveEligible change - dl.txn(&txntest.Txn{ // #2 + vb := dl.fullBlock(&txntest.Txn{ // #2 Type: "keyreg", Fee: 10_000_000, Sender: addrs[1], VotePK: [32]byte{1}, SelectionPK: [32]byte{1}, }) + addr1Keyreg := vb.Block().Round() + require.EqualValues(t, 2, addr1Keyreg) // sanity check + const lookback = 320 // keyreg puts LastHeartbeat 320 rounds into the future + require.EqualValues(t, addr1Keyreg+lookback, lookup(t, dl.generator, addrs[1]).LastHeartbeat) // as configured above, only the first two accounts should be online require.True(t, lookup(t, dl.generator, addrs[0]).Status == basics.Online) @@ -480,7 +511,8 @@ func TestAbsentTracking(t *testing.T) { require.True(t, lookup(t, dl.generator, addrs[1]).IncentiveEligible) require.False(t, lookup(t, dl.generator, addrs[2]).IncentiveEligible) - vb := dl.fullBlock() // #6 + vb = dl.fullBlock() // #6 + printAbsent(vb) totals, err := dl.generator.Totals(vb.Block().Round()) require.NoError(t, err) require.NotZero(t, totals.Online.Money.Raw) @@ -494,7 +526,7 @@ func TestAbsentTracking(t *testing.T) { Receiver: addrs[2], Amount: 100_000, }) - dl.endBlock(proposer) // #7 + printAbsent(dl.endBlock(proposer)) // #7 prp := lookup(t, dl.validator, proposer) require.Equal(t, prp.LastProposed, dl.validator.Latest()) @@ -508,7 +540,7 @@ func TestAbsentTracking(t *testing.T) { require.Equal(t, totals.Online.Money.Raw-100_000-1000, newtotals.Online.Money.Raw) totals = newtotals - dl.fullBlock() + printAbsent(dl.fullBlock()) // addrs[2] was already offline dl.txns(&txntest.Txn{Type: "keyreg", Sender: addrs[2]}) // OFFLINE keyreg #9 @@ -524,12 +556,13 @@ func TestAbsentTracking(t *testing.T) { require.Zero(t, regger.LastHeartbeat) // ONLINE keyreg without extra fee - dl.txns(&txntest.Txn{ + vb = dl.fullBlock(&txntest.Txn{ Type: "keyreg", Sender: addrs[2], VotePK: [32]byte{1}, SelectionPK: [32]byte{1}, }) // #10 + printAbsent(vb) // online totals have grown, addr[2] was added newtotals, err = dl.generator.Totals(dl.generator.Latest()) require.NoError(t, err) @@ -539,7 +572,7 @@ func TestAbsentTracking(t *testing.T) { require.Zero(t, regger.LastProposed) require.True(t, regger.Status == basics.Online) - // But nothing has changed, since we're not past 320 + // But nothing has changed for voter_params_get, since we're not past 320 checkState(addrs[0], true, false, 833_333_333_333_333) // #11 checkState(addrs[1], true, false, 833_333_333_333_333) // #12 checkState(addrs[2], false, false, 0) // #13 @@ -555,14 +588,16 @@ func TestAbsentTracking(t *testing.T) { VotePK: [32]byte{1}, SelectionPK: [32]byte{1}, }) // #14 - twoEligible := vb.Block().Round() - require.EqualValues(t, 14, twoEligible) // sanity check + printAbsent(vb) + addr2Eligible := vb.Block().Round() + require.EqualValues(t, 14, addr2Eligible) // sanity check regger = lookup(t, dl.validator, addrs[2]) require.True(t, regger.IncentiveEligible) + require.EqualValues(t, 14+320, regger.LastHeartbeat) for i := 0; i < 5; i++ { - dl.fullBlock() // #15-19 + printAbsent(dl.fullBlock()) // #15-19 require.True(t, lookup(t, dl.generator, addrs[0]).Status == basics.Online) require.True(t, lookup(t, dl.generator, addrs[1]).Status == basics.Online) require.True(t, lookup(t, dl.generator, addrs[2]).Status == basics.Online) @@ -573,107 +608,96 @@ func TestAbsentTracking(t *testing.T) { require.True(t, lookup(t, dl.generator, addrs[1]).Status == basics.Online) require.True(t, lookup(t, dl.generator, addrs[2]).Status == basics.Online) - for i := 0; i < 30; i++ { - dl.fullBlock() // #20-49 - } + var addr1off basics.Round + var addr2off basics.Round + // We're at 20, skip ahead by lookback + 60 to see the knockoffs + const absentFactor = 20 + skip := basics.Round(3) * absentFactor + for { + vb := dl.fullBlock() + printAbsent(vb) + + rnd := vb.Block().Round() + switch { + case slices.Contains(vb.Block().AbsentParticipationAccounts, addrs[1]): + addr1off = rnd + case slices.Contains(vb.Block().AbsentParticipationAccounts, addrs[2]): + addr2off = rnd + default: + require.Empty(t, vb.Block().AbsentParticipationAccounts) + } - // addrs 0-2 all have about 1/3 of stake, so seemingly (see next block - // of checks) become eligible for suspension after 30 rounds. We're at - // about 35. But, since blocks are empty, nobody's suspendible account - // is noticed. - require.Equal(t, basics.Online, lookup(t, dl.generator, addrs[0]).Status) - require.Equal(t, basics.Online, lookup(t, dl.generator, addrs[1]).Status) - require.Equal(t, basics.Online, lookup(t, dl.generator, addrs[2]).Status) - require.True(t, lookup(t, dl.generator, addrs[2]).IncentiveEligible) + if rnd < 100 { + // `vote_params_get` sees no changes in the early going, because it looks back 320 + checkState(addrs[1], true, false, 833_333_333_333_333) // this also advances a round! + // to avoid complications from advancing an extra round, we only do this check for 100 rounds + } - // when 2 pays 0, they both get noticed but addr[0] is not considered - // absent because it is a genesis account - vb = dl.fullBlock(&txntest.Txn{ - Type: "pay", - Sender: addrs[2], - Receiver: addrs[0], - Amount: 0, - }) // #50 - require.Equal(t, vb.Block().AbsentParticipationAccounts, []basics.Address{addrs[2]}) - - twoPaysZero := vb.Block().Round() - require.EqualValues(t, 50, twoPaysZero) - // addr[0] has never proposed or heartbeat so it is not considered absent - require.Equal(t, basics.Online, lookup(t, dl.generator, addrs[0]).Status) - // addr[1] still hasn't been "noticed" - require.Equal(t, basics.Online, lookup(t, dl.generator, addrs[1]).Status) - require.Equal(t, basics.Offline, lookup(t, dl.generator, addrs[2]).Status) - require.False(t, lookup(t, dl.generator, addrs[2]).IncentiveEligible) + // addr[1] spent 10A on a fee in rnd 1, so online stake and eligibility adjusted in 323 + if rnd == addr1Keyreg-2+lookback { + checkState(addrs[1], true, false, 833_333_333_333_333) // check occurs during reg+lookback-1 + checkState(addrs[1], true, true, 833_333_323_333_333) // check occurs during reg+lookback + } - // separate the payments by a few blocks so it will be easier to test - // when the changes go into effect - for i := 0; i < 4; i++ { - dl.fullBlock() // #51-54 + // watch the change across the round that addr2 becomes eligible (by spending 2A in keyreg) + if rnd == addr2Eligible-2+lookback { + checkState(addrs[2], true, false, 833_333_333_429_333) + checkState(addrs[2], true, true, 833_333_331_429_333) // after keyreg w/ 2A is effective + } + + if rnd > 20+lookback+skip { + break + } } - // now, when 2 pays 1, 1 gets suspended (unlike 0, we had 1 keyreg early on, so LastHeartbeat>0) - vb = dl.fullBlock(&txntest.Txn{ - Type: "pay", - Sender: addrs[2], - Receiver: addrs[1], - Amount: 0, - }) // #55 - twoPaysOne := vb.Block().Round() - require.EqualValues(t, 55, twoPaysOne) - require.Equal(t, vb.Block().AbsentParticipationAccounts, []basics.Address{addrs[1]}) - require.Equal(t, basics.Online, lookup(t, dl.generator, addrs[0]).Status) + require.Equal(t, addr2Eligible+lookback+skip, addr2off) + require.Equal(t, addr1Keyreg+lookback+skip+1, addr1off) // addr1 paid out a little bit, extending its lag by 1 + + require.Equal(t, basics.Online, lookup(t, dl.generator, addrs[0]).Status) // genesis account require.Equal(t, basics.Offline, lookup(t, dl.generator, addrs[1]).Status) - require.False(t, lookup(t, dl.generator, addrs[1]).IncentiveEligible) require.Equal(t, basics.Offline, lookup(t, dl.generator, addrs[2]).Status) require.False(t, lookup(t, dl.generator, addrs[2]).IncentiveEligible) // now, addrs[2] proposes, so it gets back online, but stays ineligible dl.proposer = addrs[2] - dl.fullBlock() + printAbsent(dl.fullBlock()) require.Equal(t, basics.Online, lookup(t, dl.generator, addrs[2]).Status) require.False(t, lookup(t, dl.generator, addrs[2]).IncentiveEligible) - // "synchronize" so the loop below ends on 320 - for dl.fullBlock().Block().Round()%4 != 3 { - } - // keep in mind that each call to checkState also advances the round, so - // each loop advances by 4. - for rnd := dl.fullBlock().Block().Round(); rnd < 320; rnd = dl.fullBlock().Block().Round() { - // STILL nothing has changed, as we're under 320 - checkState(addrs[0], true, false, 833_333_333_333_333) - checkState(addrs[1], true, false, 833_333_333_333_333) - checkState(addrs[2], false, false, 0) - } - // rnd was 320 in the last fullBlock + // The knockoffs have happened, now skip through another lookback rounds + // to observe the changes with checkstate + addr1check, addr2check := false, false + for { + vb := dl.fullBlock() + printAbsent(vb) + rnd := vb.Block().Round() + + // observe addr1 stake going to zero 320 rounds after knockoff + if rnd == addr1off+lookback-2 { + checkState(addrs[1], true, true, 833_333_323_188_333) + checkState(addrs[1], false, false, 0) + addr1check = true + } - // We will soon see effects visible to `vote_params_get` - // In first block, addr[3] created an app. No effect on 0-2 - checkState(addrs[1], true, false, 833_333_333_333_333) // 321 - // in second block, the checkstate app was created - checkState(addrs[1], true, false, 833_333_333_333_333) // 322 - // addr[1] spent 10A on a fee in rnd 3, so online stake and eligibility adjusted in 323 - checkState(addrs[1], true, true, 833_333_323_333_333) // 323 + // observe addr2 stake going to zero 320 rounds after knockoff + if rnd == addr2off+lookback-2 { + checkState(addrs[2], true, true, 833_333_331_427_333) // still "online" + checkState(addrs[2], false, false, 0) + addr2check = true + } - for rnd := dl.fullBlock().Block().Round(); rnd < 320+twoEligible-1; rnd = dl.fullBlock().Block().Round() { + if rnd > 20+2*lookback+skip { + break + } } - checkState(addrs[2], true, false, 833_333_333_429_333) - checkState(addrs[2], true, true, 833_333_331_429_333) // after keyreg w/ 2A is effective + // sanity check that we didn't skip one because of checkstate advancing a round + require.True(t, addr1check) + require.True(t, addr2check) - for rnd := dl.fullBlock().Block().Round(); rnd < 320+twoPaysZero-1; rnd = dl.fullBlock().Block().Round() { - } - // we're at the round before two's suspension kicks in - checkState(addrs[2], true, true, 833_333_331_429_333) // still "online" - checkState(addrs[0], true, false, 833_333_333_331_333) // paid fee in #5 and #11, we're at ~371 - // 2 was noticed & suspended after paying 0, eligible and amount go to 0 - checkState(addrs[2], false, false, 0) checkState(addrs[0], true, false, 833_333_333_331_333) // addr 0 didn't get suspended (genesis) + } - // roughly the same check, except for addr 1, which was genesis, but - // after doing a keyreg, became susceptible to suspension - for rnd := dl.fullBlock().Block().Round(); rnd < 320+twoPaysOne-1; rnd = dl.fullBlock().Block().Round() { - } - checkState(addrs[1], true, true, 833_333_323_230_333) // still online, balance irrelevant - // 1 was noticed & suspended after being paid by 2, so eligible and amount go to 0 - checkState(addrs[1], false, false, 0) + ledgertesting.TestConsensusRange(t, checkingBegins, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + runTest(t, cv, cfg) }) } @@ -736,71 +760,138 @@ func TestAbsenteeChallenges(t *testing.T) { dl.beginBlock() dl.endBlock(seedAndProp) // This becomes the seed, which is used for the challenge - for vb := dl.fullBlock(); vb.Block().Round() < 1200; vb = dl.fullBlock() { - // advance through first grace period + for vb := dl.fullBlock(); vb.Block().Round() < 1199; vb = dl.fullBlock() { + // advance through first grace period: no one marked absent + require.Empty(t, vb.Block().AbsentParticipationAccounts) } + + // regguy keyregs before he's caught, which is a heartbeat, he stays on as well + vb := dl.fullBlock(&txntest.Txn{ + Type: "keyreg", // Does not pay extra fee, since he's still eligible + Sender: regguy, + VotePK: [32]byte{1}, + SelectionPK: [32]byte{1}, + }) + require.Equal(t, basics.Round(1200), vb.Block().Round()) + require.Empty(t, vb.Block().AbsentParticipationAccounts) + acct := lookup(t, dl.generator, regguy) + require.Equal(t, basics.Online, acct.Status) + require.Equal(t, ver >= checkingBegins, acct.IncentiveEligible) + dl.beginBlock() - dl.endBlock(propguy) // propose, which is a fine (though less likely) way to respond + vb = dl.endBlock(propguy) // propose, which is a fine (though less likely) way to respond - // All still online, unchanged eligibility + // propguy could be suspended in 1201 here, but won't, because they are proposer + require.Equal(t, basics.Round(1201), vb.Block().Round()) + + require.NotContains(t, vb.Block().AbsentParticipationAccounts, []basics.Address{propguy}) + require.NotContains(t, vb.Block().AbsentParticipationAccounts, regguy) + if ver >= checkingBegins { + // badguy and regguy will both be suspended in 1201 + require.Contains(t, vb.Block().AbsentParticipationAccounts, badguy) + } + + // propguy & regguy still online, badguy suspended (depending on consensus version) for _, guy := range []basics.Address{propguy, regguy, badguy} { acct := lookup(t, dl.generator, guy) - require.Equal(t, basics.Online, acct.Status) - require.Equal(t, ver >= checkingBegins, acct.IncentiveEligible, guy) + switch guy { + case propguy, regguy: + require.Equal(t, basics.Online, acct.Status) + require.Equal(t, ver >= checkingBegins, acct.IncentiveEligible) + require.False(t, acct.VoteID.IsEmpty()) + case badguy: + // if checking, badguy fails + require.Equal(t, ver >= checkingBegins, basics.Offline == acct.Status) + require.False(t, acct.IncentiveEligible) + } + // whether suspended or online, all still have VoteID + require.False(t, acct.VoteID.IsEmpty()) } - for vb := dl.fullBlock(); vb.Block().Round() < 1220; vb = dl.fullBlock() { - // advance into knockoff period. but no transactions means - // unresponsive accounts go unnoticed. + if ver < checkingBegins { + for vb := dl.fullBlock(); vb.Block().Round() < 1220; vb = dl.fullBlock() { + // advance into knockoff period. + } + // All still online, same eligibility + for _, guy := range []basics.Address{propguy, regguy, badguy} { + acct := lookup(t, dl.generator, guy) + require.Equal(t, basics.Online, acct.Status) + require.False(t, acct.IncentiveEligible) + } } - // All still online, same eligibility - for _, guy := range []basics.Address{propguy, regguy, badguy} { - acct := lookup(t, dl.generator, guy) - require.Equal(t, basics.Online, acct.Status) - require.Equal(t, ver >= checkingBegins, acct.IncentiveEligible, guy) + }) +} + +func TestDoubleLedgerGetKnockoffCandidates(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + const onlineCount = 5 + genBalances, addrs, _ := ledgertesting.NewTestGenesis(func(cfg *ledgertesting.GenesisCfg) { + cfg.OnlineCount = onlineCount + ledgertesting.TurnOffRewards(cfg) + }) + payoutsBegin := 40 + + // txn to send in round 1, to change the balances to be different from genesis + payTxn := &txntest.Txn{Type: "pay", Sender: addrs[1], Receiver: addrs[2], Amount: 1_000_000} + + checkAccts := func(l *Ledger, rnd basics.Round, cv protocol.ConsensusVersion) { + accts, err := l.GetKnockOfflineCandidates(rnd, config.Consensus[cv]) + require.NoError(t, err) + require.NotEmpty(t, accts) + + // get online genesis accounts + onlineCnt := 0 + genesisOnlineAccts := make(map[basics.Address]basics.OnlineAccountData) + afterPayTxnOnlineAccts := make(map[basics.Address]basics.OnlineAccountData) + for addr, ad := range genBalances.Balances { + if ad.Status == basics.Online { + onlineCnt++ + genesisOnlineAccts[addr] = ad.OnlineAccountData() + afterPayTxnOnlineAccts[addr] = ad.OnlineAccountData() + } } - // badguy never responded, he gets knocked off when paid - vb := dl.fullBlock(&txntest.Txn{ - Type: "pay", - Sender: addrs[0], - Receiver: badguy, - }) - if ver >= checkingBegins { - require.Equal(t, vb.Block().AbsentParticipationAccounts, []basics.Address{badguy}) + // calculate expected balances after applying payTxn + payTxnReceiver := afterPayTxnOnlineAccts[payTxn.Receiver] + payTxnReceiver.MicroAlgosWithRewards.Raw += payTxn.Amount + payTxnSender := afterPayTxnOnlineAccts[payTxn.Sender] + payTxnSender.MicroAlgosWithRewards.Raw -= (payTxn.Amount + config.Consensus[cv].MinTxnFee) + afterPayTxnOnlineAccts[payTxn.Receiver] = payTxnReceiver + afterPayTxnOnlineAccts[payTxn.Sender] = payTxnSender + + require.Equal(t, onlineCount, onlineCnt) + require.Len(t, accts, onlineCnt) + if rnd == 0 { + // balances should be same as genesis + require.Equal(t, genesisOnlineAccts, accts) + } else { + // balances > rnd 1 should reflect payTxn change + require.Equal(t, afterPayTxnOnlineAccts, accts, "rnd %d", rnd) } - acct := lookup(t, dl.generator, badguy) - require.Equal(t, ver >= checkingBegins, basics.Offline == acct.Status) // if checking, badguy fails - require.False(t, acct.IncentiveEligible) - // propguy proposed during the grace period, he stays on even when paid - dl.txns(&txntest.Txn{ - Type: "pay", - Sender: addrs[0], - Receiver: propguy, - }) - acct = lookup(t, dl.generator, propguy) - require.Equal(t, basics.Online, acct.Status) - require.Equal(t, ver >= checkingBegins, acct.IncentiveEligible) + } - // regguy keyregs before he's caught, which is a heartbeat, he stays on as well - dl.txns(&txntest.Txn{ - Type: "keyreg", // Does not pay extra fee, since he's still eligible - Sender: regguy, - VotePK: [32]byte{1}, - SelectionPK: [32]byte{1}, - }) - acct = lookup(t, dl.generator, regguy) - require.Equal(t, basics.Online, acct.Status) - require.Equal(t, ver >= checkingBegins, acct.IncentiveEligible) - dl.txns(&txntest.Txn{ - Type: "pay", - Sender: addrs[0], - Receiver: regguy, - }) - acct = lookup(t, dl.generator, regguy) - require.Equal(t, basics.Online, acct.Status) - require.Equal(t, ver >= checkingBegins, acct.IncentiveEligible) + ledgertesting.TestConsensusRange(t, payoutsBegin-1, 0, func(t *testing.T, ver int, cv protocol.ConsensusVersion, cfg config.Local) { + dl := NewDoubleLedger(t, genBalances, cv, cfg) + defer dl.Close() + + checkAccts(dl.generator, basics.Round(0), cv) + checkAccts(dl.validator, basics.Round(0), cv) + + // change two accounts' balances to be different from genesis + payTxn.GenesisHash = crypto.Digest{} // clear if set from previous run + dl.fullBlock(payTxn) + + // run up to round 240 + proto := config.Consensus[cv] + upToRound := basics.Round(proto.StateProofInterval - proto.StateProofVotersLookback) + require.Equal(t, basics.Round(240), upToRound) + for rnd := dl.fullBlock().Block().Round(); rnd < upToRound; rnd = dl.fullBlock().Block().Round() { + checkAccts(dl.generator, rnd, cv) + checkAccts(dl.validator, rnd, cv) + } }) } diff --git a/ledger/ledger.go b/ledger/ledger.go index 2f10724fee..bb0dad21de 100644 --- a/ledger/ledger.go +++ b/ledger/ledger.go @@ -638,10 +638,55 @@ func (l *Ledger) LookupAgreement(rnd basics.Round, addr basics.Address) (basics. defer l.trackerMu.RUnlock() // Intentionally apply (pending) rewards up to rnd. - data, err := l.acctsOnline.LookupOnlineAccountData(rnd, addr) + data, err := l.acctsOnline.lookupOnlineAccountData(rnd, addr) return data, err } +// GetKnockOfflineCandidates retrieves a list of online accounts who will be +// checked to a recent proposal or heartbeat. Large accounts are the ones worth checking. +func (l *Ledger) GetKnockOfflineCandidates(rnd basics.Round, proto config.ConsensusParams) (map[basics.Address]basics.OnlineAccountData, error) { + l.trackerMu.RLock() + defer l.trackerMu.RUnlock() + + // get state proof worker's most recent list for top N addresses + if proto.StateProofInterval == 0 { + return nil, nil + } + + var addrs []basics.Address + + // special handling for rounds 0-240: return participating genesis accounts + if rnd < basics.Round(proto.StateProofInterval).SubSaturate(basics.Round(proto.StateProofVotersLookback)) { + for addr, data := range l.genesisAccounts { + if data.Status == basics.Online { + addrs = append(addrs, addr) + } + } + } else { + // get latest state proof voters information, up to rnd, without calling cond.Wait() + _, voters := l.acctsOnline.voters.LatestCompletedVotersUpTo(rnd) + if voters == nil { // no cached voters found < rnd + return nil, nil + } + addrs = make([]basics.Address, 0, len(voters.AddrToPos)) + for addr := range voters.AddrToPos { + addrs = append(addrs, addr) + } + } + + // fetch fresh data up to this round from online account cache. These accounts should all + // be in cache, as long as proto.StateProofTopVoters < onlineAccountsCacheMaxSize. + ret := make(map[basics.Address]basics.OnlineAccountData) + for _, addr := range addrs { + data, err := l.acctsOnline.lookupOnlineAccountData(rnd, addr) + if err != nil || data.MicroAlgosWithRewards.IsZero() { + continue // skip missing / not online accounts + } + ret[addr] = data + } + return ret, nil +} + // LookupWithoutRewards is like Lookup but does not apply pending rewards up // to the requested round rnd. func (l *Ledger) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (ledgercore.AccountData, basics.Round, error) { @@ -717,7 +762,7 @@ func (l *Ledger) Block(rnd basics.Round) (blk bookkeeping.Block, err error) { func (l *Ledger) BlockHdr(rnd basics.Round) (blk bookkeeping.BlockHeader, err error) { // Expected availability range in txTail.blockHeader is [Latest - MaxTxnLife, Latest] - // allowing (MaxTxnLife + 1) = 1001 rounds back loopback. + // allowing (MaxTxnLife + 1) = 1001 rounds lookback. // The depth besides the MaxTxnLife is controlled by DeeperBlockHeaderHistory parameter // and currently set to 1. // Explanation: diff --git a/ledger/ledger_perf_test.go b/ledger/ledger_perf_test.go index b34877aed5..b2ea97b1ce 100644 --- a/ledger/ledger_perf_test.go +++ b/ledger/ledger_perf_test.go @@ -33,6 +33,7 @@ import ( "github.com/algorand/go-algorand/data/basics" basics_testing "github.com/algorand/go-algorand/data/basics/testing" "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/committee" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" "github.com/algorand/go-algorand/data/transactions/verify" @@ -296,21 +297,23 @@ func benchmarkFullBlocks(params testParams, b *testing.B) { lvb, err := eval.GenerateBlock(nil) require.NoError(b, err) + fb := lvb.FinishBlock(committee.Seed{0x01}, basics.Address{0x01}, false) + // If this is the app creation block, add to both ledgers if i == 1 { - err = l0.AddBlock(lvb.UnfinishedBlock(), cert) + err = l0.AddBlock(fb, cert) require.NoError(b, err) - err = l1.AddBlock(lvb.UnfinishedBlock(), cert) + err = l1.AddBlock(fb, cert) require.NoError(b, err) continue } // For all other blocks, add just to the first ledger, and stash // away to be replayed in the second ledger while running timer - err = l0.AddBlock(lvb.UnfinishedBlock(), cert) + err = l0.AddBlock(fb, cert) require.NoError(b, err) - blocks = append(blocks, lvb.UnfinishedBlock()) + blocks = append(blocks, fb) } b.Logf("built %d blocks, each with %d txns", numBlocks, txPerBlock) diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index 9018d5d73b..8941452d4c 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -1979,6 +1979,35 @@ func TestLookupAgreement(t *testing.T) { require.Equal(t, oad, ad.OnlineAccountData()) } +func TestGetKnockOfflineCandidates(t *testing.T) { + partitiontest.PartitionTest(t) + + ver := protocol.ConsensusFuture + genesisInitState, _ := ledgertesting.GenerateInitState(t, ver, 1_000_000) + const inMem = true + log := logging.TestingLog(t) + cfg := config.GetDefaultLocal() + cfg.Archival = true + ledger, err := OpenLedger(log, t.Name(), inMem, genesisInitState, cfg) + require.NoError(t, err, "could not open ledger") + defer ledger.Close() + + accts, err := ledger.GetKnockOfflineCandidates(0, config.Consensus[ver]) + require.NoError(t, err) + require.NotEmpty(t, accts) + // get online genesis accounts + onlineCnt := 0 + onlineAddrs := make(map[basics.Address]basics.OnlineAccountData) + for addr, ad := range genesisInitState.Accounts { + if ad.Status == basics.Online { + onlineCnt++ + onlineAddrs[addr] = ad.OnlineAccountData() + } + } + require.Len(t, accts, onlineCnt) + require.Equal(t, onlineAddrs, accts) +} + func BenchmarkLedgerStartup(b *testing.B) { log := logging.TestingLog(b) tmpDir := b.TempDir() diff --git a/ledger/ledgercore/accountdata.go b/ledger/ledgercore/accountdata.go index 081fbffde6..ea7b150a6e 100644 --- a/ledger/ledgercore/accountdata.go +++ b/ledger/ledgercore/accountdata.go @@ -135,10 +135,15 @@ func (u *AccountData) Suspend() { } // Suspended returns true if the account is suspended (offline with keys) -func (u *AccountData) Suspended() bool { +func (u AccountData) Suspended() bool { return u.Status == basics.Offline && !u.VoteID.IsEmpty() } +// LastSeen returns the last round that the account was seen online +func (u AccountData) LastSeen() basics.Round { + return max(u.LastProposed, u.LastHeartbeat) +} + // MinBalance computes the minimum balance requirements for an account based on // some consensus parameters. MinBalance should correspond roughly to how much // storage the account is allowed to store on disk. @@ -187,6 +192,8 @@ func (u AccountData) OnlineAccountData(proto config.ConsensusParams, rewardsLeve MicroAlgosWithRewards: microAlgos, VotingData: u.VotingData, IncentiveEligible: u.IncentiveEligible, + LastProposed: u.LastProposed, + LastHeartbeat: u.LastHeartbeat, } } diff --git a/ledger/ledgercore/onlineacct.go b/ledger/ledgercore/onlineacct.go index 8a6b771aad..f5b29c789e 100644 --- a/ledger/ledgercore/onlineacct.go +++ b/ledger/ledgercore/onlineacct.go @@ -22,7 +22,7 @@ import ( ) // An OnlineAccount corresponds to an account whose AccountData.Status -// is Online. This is used for a Merkle tree commitment of online +// is Online. This is used for a Merkle tree commitment of online // accounts, which is subsequently used to validate participants for // a state proof. type OnlineAccount struct { diff --git a/ledger/ledgercore/votersForRound.go b/ledger/ledgercore/votersForRound.go index 7ab103dcd1..957ec08a52 100644 --- a/ledger/ledgercore/votersForRound.go +++ b/ledger/ledgercore/votersForRound.go @@ -183,3 +183,11 @@ func (tr *VotersForRound) Wait() error { } return nil } + +// Completed returns true if the tree has finished being constructed. +// If there was an error constructing the tree, the error is also returned. +func (tr *VotersForRound) Completed() (bool, error) { + tr.mu.Lock() + defer tr.mu.Unlock() + return tr.Tree != nil || tr.loadTreeError != nil, tr.loadTreeError +} diff --git a/ledger/onlineaccountscache_test.go b/ledger/onlineaccountscache_test.go index b64d18aabf..fa66d67a9f 100644 --- a/ledger/onlineaccountscache_test.go +++ b/ledger/onlineaccountscache_test.go @@ -189,6 +189,15 @@ func TestOnlineAccountsCacheMaxEntries(t *testing.T) { require.Equal(t, 2, oac.accounts[addr].Len()) } +// TestOnlineAccountsCacheSizeBiggerThanStateProofTopVoters asserts that the online accounts cache +// is bigger than the number of top online accounts tracked by the state proof system. +func TestOnlineAccountsCacheSizeBiggerThanStateProofTopVoters(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + require.Greater(t, uint64(onlineAccountsCacheMaxSize), config.Consensus[protocol.ConsensusFuture].StateProofTopVoters) +} + var benchmarkOnlineAccountsCacheReadResult cachedOnlineAccount func benchmarkOnlineAccountsCacheRead(b *testing.B, historyLength int) { diff --git a/ledger/simple_test.go b/ledger/simple_test.go index 8b4632d1de..3bc9f335c8 100644 --- a/ledger/simple_test.go +++ b/ledger/simple_test.go @@ -140,7 +140,7 @@ func txn(t testing.TB, ledger *Ledger, eval *eval.BlockEvaluator, txn *txntest.T } return } - require.True(t, len(problem) == 0 || problem[0] == "") + require.True(t, len(problem) == 0 || problem[0] == "", "Transaction did not fail. Expected: %v", problem) } func txgroup(t testing.TB, ledger *Ledger, eval *eval.BlockEvaluator, txns ...*txntest.Txn) error { @@ -157,10 +157,11 @@ func txgroup(t testing.TB, ledger *Ledger, eval *eval.BlockEvaluator, txns ...*t // inspection. Proposer is optional - if unset, blocks will be finished with // ZeroAddress proposer. func endBlock(t testing.TB, ledger *Ledger, eval *eval.BlockEvaluator, proposer ...basics.Address) *ledgercore.ValidatedBlock { - ub, err := eval.GenerateBlock(nil) + // pass proposers to GenerateBlock, if provided + ub, err := eval.GenerateBlock(proposer) require.NoError(t, err) - // We fake some thigns that agreement would do, like setting proposer + // We fake some things that agreement would do, like setting proposer validatedBlock := ledgercore.MakeValidatedBlock(ub.UnfinishedBlock(), ub.UnfinishedDeltas()) gvb := &validatedBlock diff --git a/ledger/store/trackerdb/data.go b/ledger/store/trackerdb/data.go index 8e69f2fc69..1649d1f82d 100644 --- a/ledger/store/trackerdb/data.go +++ b/ledger/store/trackerdb/data.go @@ -152,6 +152,8 @@ type BaseOnlineAccountData struct { BaseVotingData + LastProposed basics.Round `codec:"V"` + LastHeartbeat basics.Round `codec:"W"` IncentiveEligible bool `codec:"X"` MicroAlgos basics.MicroAlgos `codec:"Y"` RewardsBase uint64 `codec:"Z"` @@ -456,7 +458,10 @@ func (bo *BaseOnlineAccountData) IsVotingEmpty() bool { func (bo *BaseOnlineAccountData) IsEmpty() bool { return bo.IsVotingEmpty() && bo.MicroAlgos.Raw == 0 && - bo.RewardsBase == 0 && !bo.IncentiveEligible + bo.RewardsBase == 0 && + bo.LastHeartbeat == 0 && + bo.LastProposed == 0 && + !bo.IncentiveEligible } // GetOnlineAccount returns ledgercore.OnlineAccount for top online accounts / voters @@ -491,6 +496,8 @@ func (bo *BaseOnlineAccountData) GetOnlineAccountData(proto config.ConsensusPara VoteKeyDilution: bo.VoteKeyDilution, }, IncentiveEligible: bo.IncentiveEligible, + LastProposed: bo.LastProposed, + LastHeartbeat: bo.LastHeartbeat, } } @@ -507,6 +514,8 @@ func (bo *BaseOnlineAccountData) SetCoreAccountData(ad *ledgercore.AccountData) bo.MicroAlgos = ad.MicroAlgos bo.RewardsBase = ad.RewardsBase bo.IncentiveEligible = ad.IncentiveEligible + bo.LastProposed = ad.LastProposed + bo.LastHeartbeat = ad.LastHeartbeat } // MakeResourcesData returns a new empty instance of resourcesData. diff --git a/ledger/store/trackerdb/data_test.go b/ledger/store/trackerdb/data_test.go index edc0d0dc9e..b256fa4e76 100644 --- a/ledger/store/trackerdb/data_test.go +++ b/ledger/store/trackerdb/data_test.go @@ -1152,7 +1152,7 @@ func TestBaseOnlineAccountDataIsEmpty(t *testing.T) { structureTesting := func(t *testing.T) { encoding, err := json.Marshal(&empty) zeros32 := "0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0" - expectedEncoding := `{"VoteID":[` + zeros32 + `],"SelectionID":[` + zeros32 + `],"VoteFirstValid":0,"VoteLastValid":0,"VoteKeyDilution":0,"StateProofID":[` + zeros32 + `,` + zeros32 + `],"IncentiveEligible":false,"MicroAlgos":{"Raw":0},"RewardsBase":0}` + expectedEncoding := `{"VoteID":[` + zeros32 + `],"SelectionID":[` + zeros32 + `],"VoteFirstValid":0,"VoteLastValid":0,"VoteKeyDilution":0,"StateProofID":[` + zeros32 + `,` + zeros32 + `],"LastProposed":0,"LastHeartbeat":0,"IncentiveEligible":false,"MicroAlgos":{"Raw":0},"RewardsBase":0}` require.NoError(t, err) require.Equal(t, expectedEncoding, string(encoding)) } @@ -1249,7 +1249,7 @@ func TestBaseOnlineAccountDataReflect(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - require.Equal(t, 5, reflect.TypeOf(BaseOnlineAccountData{}).NumField(), "update all getters and setters for baseOnlineAccountData and change the field count") + require.Equal(t, 7, reflect.TypeOf(BaseOnlineAccountData{}).NumField(), "update all getters and setters for baseOnlineAccountData and change the field count") } func TestBaseVotingDataReflect(t *testing.T) { diff --git a/ledger/store/trackerdb/msgp_gen.go b/ledger/store/trackerdb/msgp_gen.go index 465248e93d..98f35bf519 100644 --- a/ledger/store/trackerdb/msgp_gen.go +++ b/ledger/store/trackerdb/msgp_gen.go @@ -749,8 +749,8 @@ func BaseAccountDataMaxSize() (s int) { func (z *BaseOnlineAccountData) MarshalMsg(b []byte) (o []byte) { o = msgp.Require(b, z.Msgsize()) // omitempty: check for empty values - zb0001Len := uint32(9) - var zb0001Mask uint16 /* 11 bits */ + zb0001Len := uint32(11) + var zb0001Mask uint16 /* 13 bits */ if (*z).BaseVotingData.VoteID.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x1 @@ -775,18 +775,26 @@ func (z *BaseOnlineAccountData) MarshalMsg(b []byte) (o []byte) { zb0001Len-- zb0001Mask |= 0x20 } - if (*z).IncentiveEligible == false { + if (*z).LastProposed.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x40 } - if (*z).MicroAlgos.MsgIsZero() { + if (*z).LastHeartbeat.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x80 } - if (*z).RewardsBase == 0 { + if (*z).IncentiveEligible == false { zb0001Len-- zb0001Mask |= 0x100 } + if (*z).MicroAlgos.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x200 + } + if (*z).RewardsBase == 0 { + zb0001Len-- + zb0001Mask |= 0x400 + } // variable map header, size zb0001Len o = append(o, 0x80|uint8(zb0001Len)) if zb0001Len != 0 { @@ -821,16 +829,26 @@ func (z *BaseOnlineAccountData) MarshalMsg(b []byte) (o []byte) { o = (*z).BaseVotingData.StateProofID.MarshalMsg(o) } if (zb0001Mask & 0x40) == 0 { // if not empty + // string "V" + o = append(o, 0xa1, 0x56) + o = (*z).LastProposed.MarshalMsg(o) + } + if (zb0001Mask & 0x80) == 0 { // if not empty + // string "W" + o = append(o, 0xa1, 0x57) + o = (*z).LastHeartbeat.MarshalMsg(o) + } + if (zb0001Mask & 0x100) == 0 { // if not empty // string "X" o = append(o, 0xa1, 0x58) o = msgp.AppendBool(o, (*z).IncentiveEligible) } - if (zb0001Mask & 0x80) == 0 { // if not empty + if (zb0001Mask & 0x200) == 0 { // if not empty // string "Y" o = append(o, 0xa1, 0x59) o = (*z).MicroAlgos.MarshalMsg(o) } - if (zb0001Mask & 0x100) == 0 { // if not empty + if (zb0001Mask & 0x400) == 0 { // if not empty // string "Z" o = append(o, 0xa1, 0x5a) o = msgp.AppendUint64(o, (*z).RewardsBase) @@ -910,6 +928,22 @@ func (z *BaseOnlineAccountData) UnmarshalMsgWithState(bts []byte, st msgp.Unmars return } } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).LastProposed.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "LastProposed") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).LastHeartbeat.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "LastHeartbeat") + return + } + } if zb0001 > 0 { zb0001-- (*z).IncentiveEligible, bts, err = msgp.ReadBoolBytes(bts) @@ -993,6 +1027,18 @@ func (z *BaseOnlineAccountData) UnmarshalMsgWithState(bts []byte, st msgp.Unmars err = msgp.WrapError(err, "StateProofID") return } + case "V": + bts, err = (*z).LastProposed.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "LastProposed") + return + } + case "W": + bts, err = (*z).LastHeartbeat.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "LastHeartbeat") + return + } case "X": (*z).IncentiveEligible, bts, err = msgp.ReadBoolBytes(bts) if err != nil { @@ -1034,18 +1080,18 @@ func (_ *BaseOnlineAccountData) CanUnmarshalMsg(z interface{}) bool { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BaseOnlineAccountData) Msgsize() (s int) { - s = 1 + 2 + (*z).BaseVotingData.VoteID.Msgsize() + 2 + (*z).BaseVotingData.SelectionID.Msgsize() + 2 + (*z).BaseVotingData.VoteFirstValid.Msgsize() + 2 + (*z).BaseVotingData.VoteLastValid.Msgsize() + 2 + msgp.Uint64Size + 2 + (*z).BaseVotingData.StateProofID.Msgsize() + 2 + msgp.BoolSize + 2 + (*z).MicroAlgos.Msgsize() + 2 + msgp.Uint64Size + s = 1 + 2 + (*z).BaseVotingData.VoteID.Msgsize() + 2 + (*z).BaseVotingData.SelectionID.Msgsize() + 2 + (*z).BaseVotingData.VoteFirstValid.Msgsize() + 2 + (*z).BaseVotingData.VoteLastValid.Msgsize() + 2 + msgp.Uint64Size + 2 + (*z).BaseVotingData.StateProofID.Msgsize() + 2 + (*z).LastProposed.Msgsize() + 2 + (*z).LastHeartbeat.Msgsize() + 2 + msgp.BoolSize + 2 + (*z).MicroAlgos.Msgsize() + 2 + msgp.Uint64Size return } // MsgIsZero returns whether this is a zero value func (z *BaseOnlineAccountData) MsgIsZero() bool { - return ((*z).BaseVotingData.VoteID.MsgIsZero()) && ((*z).BaseVotingData.SelectionID.MsgIsZero()) && ((*z).BaseVotingData.VoteFirstValid.MsgIsZero()) && ((*z).BaseVotingData.VoteLastValid.MsgIsZero()) && ((*z).BaseVotingData.VoteKeyDilution == 0) && ((*z).BaseVotingData.StateProofID.MsgIsZero()) && ((*z).IncentiveEligible == false) && ((*z).MicroAlgos.MsgIsZero()) && ((*z).RewardsBase == 0) + return ((*z).BaseVotingData.VoteID.MsgIsZero()) && ((*z).BaseVotingData.SelectionID.MsgIsZero()) && ((*z).BaseVotingData.VoteFirstValid.MsgIsZero()) && ((*z).BaseVotingData.VoteLastValid.MsgIsZero()) && ((*z).BaseVotingData.VoteKeyDilution == 0) && ((*z).BaseVotingData.StateProofID.MsgIsZero()) && ((*z).LastProposed.MsgIsZero()) && ((*z).LastHeartbeat.MsgIsZero()) && ((*z).IncentiveEligible == false) && ((*z).MicroAlgos.MsgIsZero()) && ((*z).RewardsBase == 0) } // MaxSize returns a maximum valid message size for this message type func BaseOnlineAccountDataMaxSize() (s int) { - s = 1 + 2 + crypto.OneTimeSignatureVerifierMaxSize() + 2 + crypto.VRFVerifierMaxSize() + 2 + basics.RoundMaxSize() + 2 + basics.RoundMaxSize() + 2 + msgp.Uint64Size + 2 + merklesignature.CommitmentMaxSize() + 2 + msgp.BoolSize + 2 + basics.MicroAlgosMaxSize() + 2 + msgp.Uint64Size + s = 1 + 2 + crypto.OneTimeSignatureVerifierMaxSize() + 2 + crypto.VRFVerifierMaxSize() + 2 + basics.RoundMaxSize() + 2 + basics.RoundMaxSize() + 2 + msgp.Uint64Size + 2 + merklesignature.CommitmentMaxSize() + 2 + basics.RoundMaxSize() + 2 + basics.RoundMaxSize() + 2 + msgp.BoolSize + 2 + basics.MicroAlgosMaxSize() + 2 + msgp.Uint64Size return } diff --git a/ledger/tracker.go b/ledger/tracker.go index 1f7950a1c2..7f6b025d18 100644 --- a/ledger/tracker.go +++ b/ledger/tracker.go @@ -948,7 +948,12 @@ func (aul *accountUpdatesLedgerEvaluator) LookupWithoutRewards(rnd basics.Round, } func (aul *accountUpdatesLedgerEvaluator) LookupAgreement(rnd basics.Round, addr basics.Address) (basics.OnlineAccountData, error) { - return aul.ao.LookupOnlineAccountData(rnd, addr) + return aul.ao.lookupOnlineAccountData(rnd, addr) +} + +func (aul *accountUpdatesLedgerEvaluator) GetKnockOfflineCandidates(basics.Round, config.ConsensusParams) (map[basics.Address]basics.OnlineAccountData, error) { + // This method is only used when generating blocks, so we don't need to implement it here. + return nil, fmt.Errorf("accountUpdatesLedgerEvaluator: GetKnockOfflineCandidates is not implemented and should not be called during replay") } func (aul *accountUpdatesLedgerEvaluator) OnlineCirculation(rnd basics.Round, voteRnd basics.Round) (basics.MicroAlgos, error) { diff --git a/ledger/voters.go b/ledger/voters.go index 63e0722a6f..49d7adf457 100644 --- a/ledger/voters.go +++ b/ledger/voters.go @@ -291,7 +291,30 @@ func (vt *votersTracker) lowestRound(base basics.Round) basics.Round { return minRound } -// VotersForStateProof returns the top online participants from round r. +// LatestCompletedVotersUpTo returns the highest round <= r for which information about the top online +// participants has already been collected, and the completed VotersForRound for that round. +// If none is found, it returns 0, nil. Unlike VotersForStateProof, this function does not wait. +func (vt *votersTracker) LatestCompletedVotersUpTo(r basics.Round) (basics.Round, *ledgercore.VotersForRound) { + vt.votersMu.RLock() + defer vt.votersMu.RUnlock() + + var latestRound basics.Round + var latestVoters *ledgercore.VotersForRound + + for round, voters := range vt.votersForRoundCache { + if round <= r && round > latestRound { + if completed, err := voters.Completed(); completed && err == nil { + latestRound = round + latestVoters = voters + } + } + } + + return latestRound, latestVoters +} + +// VotersForStateProof returns the top online participants from round r. If this data is still being +// constructed in another goroutine, this function will wait until it is ready. func (vt *votersTracker) VotersForStateProof(r basics.Round) (*ledgercore.VotersForRound, error) { tr, exists := vt.getVoters(r) if !exists { diff --git a/ledger/voters_test.go b/ledger/voters_test.go index a4913c4999..083492c610 100644 --- a/ledger/voters_test.go +++ b/ledger/voters_test.go @@ -17,6 +17,7 @@ package ledger import ( + "fmt" "testing" "github.com/algorand/go-algorand/config" @@ -273,3 +274,84 @@ func TestTopNAccountsThatHaveNoMssKeys(t *testing.T) { a.Equal(merklesignature.NoKeysCommitment, top.Participants[j].PK.Commitment) } } + +// implements ledgercore.OnlineAccountsFetcher +type testOnlineAccountsFetcher struct { + topAccts []*ledgercore.OnlineAccount + totalStake basics.MicroAlgos + err error +} + +func (o testOnlineAccountsFetcher) TopOnlineAccounts(rnd basics.Round, voteRnd basics.Round, n uint64, params *config.ConsensusParams, rewardsLevel uint64) (topOnlineAccounts []*ledgercore.OnlineAccount, totalOnlineStake basics.MicroAlgos, err error) { + return o.topAccts, o.totalStake, o.err +} + +func TestLatestCompletedVotersUpToWithError(t *testing.T) { + partitiontest.PartitionTest(t) + a := require.New(t) + + // Set up mock ledger with initial data + accts := []map[basics.Address]basics.AccountData{makeRandomOnlineAccounts(20)} + ml := makeMockLedgerForTracker(t, true, 1, protocol.ConsensusCurrentVersion, accts) + defer ml.Close() + + conf := config.GetDefaultLocal() + _, ao := newAcctUpdates(t, ml, conf) + + // Add several blocks + for i := uint64(1); i < 10; i++ { + addRandomBlock(t, ml) + } + commitAll(t, ml) + + // Populate votersForRoundCache with test data + for r := basics.Round(1); r <= 9; r += 2 { // simulate every odd round + vr := ledgercore.MakeVotersForRound() + if r%4 == 1 { // Simulate an error for rounds 1, 5, and 9 + vr.BroadcastError(fmt.Errorf("error loading data for round %d", r)) + } else { + // Simulate a successful load of voter data + hdr := bookkeeping.BlockHeader{Round: r} + oaf := testOnlineAccountsFetcher{nil, basics.MicroAlgos{Raw: 1_000_000}, nil} + require.NoError(t, vr.LoadTree(oaf, hdr)) + } + + ao.voters.setVoters(r, vr) + } + + // LastCompletedVotersUpTo retrieves the highest round less than or equal to + // the requested round where data is complete, ignoring rounds with errors. + for _, tc := range []struct { + reqRound, retRound uint64 + completed bool + }{ + {0, 0, false}, + {1, 0, false}, + {2, 0, false}, // requested 2, no completed rounds <= 2 + {3, 3, true}, + {4, 3, true}, + {5, 3, true}, // requested 5, got 3 (round 5 had error) + {6, 3, true}, + {7, 7, true}, // requested 7, got 7 (last completed <= 8) + {8, 7, true}, // requested 8, got 7 (last completed <= 8) + {9, 7, true}, // requested 9, got 7 (err at 9) + {10, 7, true}, + {11, 7, true}, + } { + completedRound, voters := ao.voters.LatestCompletedVotersUpTo(basics.Round(tc.reqRound)) + a.Equal(completedRound, basics.Round(tc.retRound)) // No completed rounds before 2 + a.Equal(voters != nil, tc.completed) + } + + // Test with errors in all rounds + ao.voters.votersForRoundCache = make(map[basics.Round]*ledgercore.VotersForRound) // reset map + for r := basics.Round(1); r <= 9; r += 2 { + vr := ledgercore.MakeVotersForRound() + vr.BroadcastError(fmt.Errorf("error loading data for round %d", r)) + ao.voters.setVoters(r, vr) + } + + completedRound, voters := ao.voters.LatestCompletedVotersUpTo(basics.Round(9)) + a.Equal(basics.Round(0), completedRound) // No completed rounds due to errors + a.Nil(voters) +} diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go index f3f1c67192..e7739e085c 100644 --- a/libgoal/libgoal.go +++ b/libgoal/libgoal.go @@ -28,7 +28,6 @@ import ( v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2" kmdclient "github.com/algorand/go-algorand/daemon/kmd/client" "github.com/algorand/go-algorand/ledger/ledgercore" - "github.com/algorand/go-algorand/rpcs" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" @@ -831,53 +830,43 @@ func (c *Client) Block(round uint64) (resp v2.BlockResponseJSON, err error) { // RawBlock takes a round and returns its block func (c *Client) RawBlock(round uint64) (resp []byte, err error) { algod, err := c.ensureAlgodClient() - if err == nil { - resp, err = algod.RawBlock(round) - } - return -} - -// EncodedBlockCert takes a round and returns its parsed block and certificate -func (c *Client) EncodedBlockCert(round uint64) (blockCert rpcs.EncodedBlockCert, err error) { - algod, err := c.ensureAlgodClient() - if err == nil { - var resp []byte - resp, err = algod.RawBlock(round) - if err == nil { - err = protocol.Decode(resp, &blockCert) - if err != nil { - return - } - } + if err != nil { + return } - return + return algod.RawBlock(round) } // BookkeepingBlock takes a round and returns its block func (c *Client) BookkeepingBlock(round uint64) (block bookkeeping.Block, err error) { - blockCert, err := c.EncodedBlockCert(round) - if err == nil { - return blockCert.Block, nil + algod, err := c.ensureAlgodClient() + if err != nil { + return } - return + blockCert, err := algod.EncodedBlockCert(round) + if err != nil { + return + } + return blockCert.Block, nil } // HealthCheck returns an error if something is wrong func (c *Client) HealthCheck() error { algod, err := c.ensureAlgodClient() - if err == nil { - err = algod.HealthCheck() + if err != nil { + return err } - return err + return algod.HealthCheck() } -// WaitForRound takes a round, waits until it appears and returns its status. This function blocks. +// WaitForRound takes a round, waits up to one minute, for it to appear and +// returns the node status. This function blocks and fails if the block does not +// appear in one minute. func (c *Client) WaitForRound(round uint64) (resp model.NodeStatusResponse, err error) { algod, err := c.ensureAlgodClient() - if err == nil { - resp, err = algod.StatusAfterBlock(round) + if err != nil { + return } - return + return algod.WaitForRound(round, time.Minute) } // GetBalance takes an address and returns its total balance; if the address doesn't exist, it returns 0. diff --git a/network/connPerfMon_test.go b/network/connPerfMon_test.go index 560be72a96..4c2bc5f034 100644 --- a/network/connPerfMon_test.go +++ b/network/connPerfMon_test.go @@ -103,14 +103,14 @@ func TestConnMonitorStageTiming(t *testing.T) { startTestTime := time.Now().UnixNano() perfMonitor := makeConnectionPerformanceMonitor([]Tag{protocol.AgreementVoteTag}) // measure measuring overhead. - measuringOverhead := time.Now().Sub(time.Now()) + measuringOverhead := time.Since(time.Now()) perfMonitor.Reset(peers) for msgIdx, msg := range msgPool { msg.Received += startTestTime beforeNotify := time.Now() beforeNotifyStage := perfMonitor.stage perfMonitor.Notify(&msg) - notifyTime := time.Now().Sub(beforeNotify) + notifyTime := time.Since(beforeNotify) stageTimings[beforeNotifyStage] += notifyTime stageNotifyCalls[beforeNotifyStage]++ if perfMonitor.GetPeersStatistics() != nil { diff --git a/node/node.go b/node/node.go index 44c8449d95..2d452afa90 100644 --- a/node/node.go +++ b/node/node.go @@ -43,6 +43,7 @@ import ( "github.com/algorand/go-algorand/data/pools" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/verify" + "github.com/algorand/go-algorand/heartbeat" "github.com/algorand/go-algorand/ledger" "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/ledger/simulation" @@ -155,6 +156,8 @@ type AlgorandFullNode struct { stateProofWorker *stateproof.Worker partHandles []db.Accessor + + heartbeatService *heartbeat.Service } // TxnWithStatus represents information about a single transaction, @@ -338,6 +341,8 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd node.stateProofWorker = stateproof.NewWorker(node.genesisDirs.StateproofGenesisDir, node.log, node.accountManager, node.ledger.Ledger, node.net, node) + node.heartbeatService = heartbeat.NewService(node.accountManager, node.ledger, node, node.log) + return node, err } @@ -380,6 +385,7 @@ func (node *AlgorandFullNode) Start() error { node.ledgerService.Start() node.txHandler.Start() node.stateProofWorker.Start() + node.heartbeatService.Start() err := startNetwork() if err != nil { return err @@ -459,6 +465,7 @@ func (node *AlgorandFullNode) Stop() { if node.catchpointCatchupService != nil { node.catchpointCatchupService.Stop() } else { + node.heartbeatService.Stop() node.stateProofWorker.Stop() node.txHandler.Stop() node.agreementService.Shutdown() @@ -1220,6 +1227,7 @@ func (node *AlgorandFullNode) SetCatchpointCatchupMode(catchpointCatchupMode boo }() node.net.ClearHandlers() node.net.ClearValidatorHandlers() + node.heartbeatService.Stop() node.stateProofWorker.Stop() node.txHandler.Stop() node.agreementService.Shutdown() @@ -1248,6 +1256,7 @@ func (node *AlgorandFullNode) SetCatchpointCatchupMode(catchpointCatchupMode boo node.ledgerService.Start() node.txHandler.Start() node.stateProofWorker.Start() + node.heartbeatService.Start() // Set up a context we can use to cancel goroutines on Stop() node.ctx, node.cancelCtx = context.WithCancel(context.Background()) diff --git a/protocol/txntype.go b/protocol/txntype.go index 76cb2dc406..ee2d085dcb 100644 --- a/protocol/txntype.go +++ b/protocol/txntype.go @@ -47,6 +47,9 @@ const ( // StateProofTx records a state proof StateProofTx TxType = "stpf" + // HeartbeatTx demonstrates the account is alive + HeartbeatTx TxType = "hb" + // UnknownTx signals an error UnknownTx TxType = "unknown" ) diff --git a/stateproof/builder.go b/stateproof/builder.go index 96ca279a4b..3f2e61d695 100644 --- a/stateproof/builder.go +++ b/stateproof/builder.go @@ -668,7 +668,7 @@ func (spw *Worker) tryBroadcast() { latestHeader, err := spw.ledger.BlockHdr(firstValid) if err != nil { - spw.log.Warnf("spw.tryBroadcast: could not fetch block header for round %d failed: %v", firstValid, err) + spw.log.Warnf("spw.tryBroadcast: could not fetch block header for round %d: %v", firstValid, err) break } diff --git a/stateproof/worker.go b/stateproof/worker.go index f74e118f58..163ec214e0 100644 --- a/stateproof/worker.go +++ b/stateproof/worker.go @@ -95,9 +95,7 @@ func NewWorker(genesisDir string, log logging.Logger, accts Accounts, ledger Led // Start starts the goroutines for the worker. func (spw *Worker) Start() { - ctx, cancel := context.WithCancel(context.Background()) - spw.ctx = ctx - spw.shutdown = cancel + spw.ctx, spw.shutdown = context.WithCancel(context.Background()) spw.signedCh = make(chan struct{}, 1) err := spw.initDb(spw.inMemory) diff --git a/test/e2e-go/features/accountPerf/sixMillion_test.go b/test/e2e-go/features/accountPerf/sixMillion_test.go index 946d1b24b6..94feb3e9eb 100644 --- a/test/e2e-go/features/accountPerf/sixMillion_test.go +++ b/test/e2e-go/features/accountPerf/sixMillion_test.go @@ -1024,13 +1024,10 @@ func checkPoint(counter, firstValid, tLife uint64, force bool, fixture *fixtures if verbose { fmt.Printf("Waiting for round %d...", int(lastRound)) } - nodeStat, err := fixture.AlgodClient.WaitForBlock(basics.Round(lastRound - 1)) + nodeStat, err := fixture.AlgodClient.WaitForRound(lastRound, time.Minute) if err != nil { return 0, 0, fmt.Errorf("failed to wait for block %d : %w", lastRound, err) } - if nodeStat.LastRound < lastRound { - return 0, 0, fmt.Errorf("failed to wait for block %d : node is at round %d", lastRound, nodeStat.LastRound) - } return 0, nodeStat.LastRound + 1, nil } return counter, firstValid, nil diff --git a/test/e2e-go/features/catchup/basicCatchup_test.go b/test/e2e-go/features/catchup/basicCatchup_test.go index 2e3ac87943..adc8c43f18 100644 --- a/test/e2e-go/features/catchup/basicCatchup_test.go +++ b/test/e2e-go/features/catchup/basicCatchup_test.go @@ -56,9 +56,8 @@ func TestBasicCatchup(t *testing.T) { a.NoError(err) // Let the network make some progress - a.NoError(err) waitForRound := uint64(3) - err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound) + err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(3) a.NoError(err) // Now spin up third node @@ -71,7 +70,7 @@ func TestBasicCatchup(t *testing.T) { defer shutdownClonedNode(cloneDataDir, &fixture, t) // Now, catch up - err = fixture.LibGoalFixture.ClientWaitForRoundWithTimeout(cloneClient, waitForRound) + _, err = cloneClient.WaitForRound(waitForRound) a.NoError(err) } @@ -155,7 +154,7 @@ func runCatchupOverGossip(t fixtures.TestingTB, // Let the secondary make progress up to round 3, while the primary was never startred ( hence, it's on round = 0) waitForRound := uint64(3) - err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound) + err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(waitForRound) a.NoError(err) // stop the secondary, which is on round 3 or more. @@ -167,7 +166,7 @@ func runCatchupOverGossip(t fixtures.TestingTB, a.NoError(err) // Now, catch up - err = fixture.LibGoalFixture.ClientWaitForRoundWithTimeout(lg, waitForRound) + _, err = lg.WaitForRound(waitForRound) a.NoError(err) waitStart := time.Now() @@ -184,7 +183,7 @@ func runCatchupOverGossip(t fixtures.TestingTB, break } - if time.Now().Sub(waitStart) > time.Minute { + if time.Since(waitStart) > time.Minute { // it's taking too long. a.FailNow("Waiting too long for catchup to complete") } @@ -258,7 +257,7 @@ func TestStoppedCatchupOnUnsupported(t *testing.T) { // Let the network make some progress a.NoError(err) waitForRound := uint64(3) // UpgradeVoteRounds + DefaultUpgradeWaitRounds - err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound) + err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(waitForRound) a.NoError(err) // Now spin up third node @@ -274,7 +273,7 @@ func TestStoppedCatchupOnUnsupported(t *testing.T) { defer shutdownClonedNode(cloneDataDir, &fixture, t) // Now, catch up - err = fixture.LibGoalFixture.ClientWaitForRoundWithTimeout(cloneClient, waitForRound) + _, err = cloneClient.WaitForRound(waitForRound) a.NoError(err) timeout := time.NewTimer(20 * time.Second) @@ -374,7 +373,7 @@ func TestBasicCatchupCompletes(t *testing.T) { a.NoError(err) // Wait for the network to make some progess. - err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound) + err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(waitForRound) a.NoError(err) // Start the third node to catchup. @@ -384,7 +383,7 @@ func TestBasicCatchupCompletes(t *testing.T) { defer shutdownClonedNode(cloneDataDir, &fixture, t) // Wait for it to catchup - err = fixture.LibGoalFixture.ClientWaitForRoundWithTimeout(cloneClient, waitForRound) + _, err = cloneClient.WaitForRound(waitForRound) a.NoError(err) // Calculate the catchup time diff --git a/test/e2e-go/features/catchup/catchpointCatchup_test.go b/test/e2e-go/features/catchup/catchpointCatchup_test.go index 3a1eefedc4..0a1d522cac 100644 --- a/test/e2e-go/features/catchup/catchpointCatchup_test.go +++ b/test/e2e-go/features/catchup/catchpointCatchup_test.go @@ -46,7 +46,7 @@ import ( const basicTestCatchpointInterval = 4 func waitForCatchpointGeneration(t *testing.T, fixture *fixtures.RestClientFixture, client client.RestClient, catchpointRound basics.Round) string { - err := fixture.ClientWaitForRoundWithTimeout(client, uint64(catchpointRound+1)) + err := client.WaitForRoundWithTimeout(uint64(catchpointRound + 1)) if err != nil { return "" } @@ -212,7 +212,7 @@ func startCatchpointGeneratingNode(a *require.Assertions, fixture *fixtures.Rest restClient := fixture.GetAlgodClientForController(nodeController) // We don't want to start using the node without it being properly initialized. - err = fixture.ClientWaitForRoundWithTimeout(restClient, 1) + err = restClient.WaitForRoundWithTimeout(1) a.NoError(err) return nodeController, restClient, &errorsCollector @@ -239,7 +239,7 @@ func startCatchpointUsingNode(a *require.Assertions, fixture *fixtures.RestClien restClient := fixture.GetAlgodClientForController(nodeController) // We don't want to start using the node without it being properly initialized. - err = fixture.ClientWaitForRoundWithTimeout(restClient, 1) + err = restClient.WaitForRoundWithTimeout(1) a.NoError(err) return nodeController, restClient, wp, &errorsCollector @@ -263,7 +263,7 @@ func startCatchpointNormalNode(a *require.Assertions, fixture *fixtures.RestClie restClient := fixture.GetAlgodClientForController(nodeController) // We don't want to start using the node without it being properly initialized. - err = fixture.ClientWaitForRoundWithTimeout(restClient, 1) + err = restClient.WaitForRoundWithTimeout(1) a.NoError(err) return nodeController, restClient, &errorsCollector @@ -365,7 +365,7 @@ func TestBasicCatchpointCatchup(t *testing.T) { _, err = usingNodeRestClient.Catchup(catchpointLabel, 0) a.NoError(err) - err = fixture.ClientWaitForRoundWithTimeout(usingNodeRestClient, uint64(targetCatchpointRound+1)) + err = usingNodeRestClient.WaitForRoundWithTimeout(uint64(targetCatchpointRound + 1)) a.NoError(err) // ensure the raw block can be downloaded (including cert) @@ -438,7 +438,7 @@ func TestCatchpointLabelGeneration(t *testing.T) { primaryNodeRestClient := fixture.GetAlgodClientForController(primaryNode) log.Infof("Building ledger history..") for { - err = fixture.ClientWaitForRound(primaryNodeRestClient, currentRound, 45*time.Second) + _, err = primaryNodeRestClient.WaitForRound(currentRound+1, 45*time.Second) a.NoError(err) if targetRound <= currentRound { break @@ -553,8 +553,7 @@ func TestNodeTxHandlerRestart(t *testing.T) { // Wait for the network to start making progress again primaryNodeRestClient := fixture.GetAlgodClientForController(primaryNode) - err = fixture.ClientWaitForRound(primaryNodeRestClient, targetRound, - 10*catchpointCatchupProtocol.AgreementFilterTimeout) + _, err = primaryNodeRestClient.WaitForRound(targetRound, 10*catchpointCatchupProtocol.AgreementFilterTimeout) a.NoError(err) // let the 2nd client send a transaction @@ -674,8 +673,7 @@ func TestReadyEndpoint(t *testing.T) { // Wait for the network to start making progress again primaryNodeRestClient := fixture.GetAlgodClientForController(primaryNode) - err = fixture.ClientWaitForRound(primaryNodeRestClient, targetRound, - 10*catchpointCatchupProtocol.AgreementFilterTimeout) + _, err = primaryNodeRestClient.WaitForRound(targetRound, 10*catchpointCatchupProtocol.AgreementFilterTimeout) a.NoError(err) // The primary node has reached the target round, diff --git a/test/e2e-go/features/catchup/stateproofsCatchup_test.go b/test/e2e-go/features/catchup/stateproofsCatchup_test.go index 5dcbc11452..f9639abeb1 100644 --- a/test/e2e-go/features/catchup/stateproofsCatchup_test.go +++ b/test/e2e-go/features/catchup/stateproofsCatchup_test.go @@ -115,7 +115,7 @@ func TestStateProofInReplayCatchpoint(t *testing.T) { } // wait for fastcatchup to complete and the node is synced - err = fixture.ClientWaitForRoundWithTimeout(usingNodeRestClient, uint64(targetCatchpointRound+1)) + err = usingNodeRestClient.WaitForRoundWithTimeout(uint64(targetCatchpointRound + 1)) a.NoError(err) primaryLibGoal := fixture.GetLibGoalClientFromNodeController(primaryNode) @@ -174,7 +174,7 @@ func TestStateProofAfterCatchpoint(t *testing.T) { roundAfterSPGeneration := targetCatchpointRound.RoundUpToMultipleOf(basics.Round(consensusParams.StateProofInterval)) + basics.Round(consensusParams.StateProofInterval/2) - err = fixture.ClientWaitForRoundWithTimeout(usingNodeRestClient, uint64(roundAfterSPGeneration)) + err = usingNodeRestClient.WaitForRoundWithTimeout(uint64(roundAfterSPGeneration)) a.NoError(err) primaryLibGoal := fixture.GetLibGoalClientFromNodeController(primaryNode) @@ -234,14 +234,14 @@ func TestSendSigsAfterCatchpointCatchup(t *testing.T) { primaryNodeAddr, err := primaryNode.GetListeningAddress() a.NoError(err) - err = fixture.ClientWaitForRoundWithTimeout(primaryNodeRestClient, 3) + err = primaryNodeRestClient.WaitForRoundWithTimeout(3) a.NoError(err) normalNode, normalNodeRestClient, normalNodeEC := startCatchpointNormalNode(a, &fixture, "Node1", primaryNodeAddr) defer normalNodeEC.Print() defer normalNode.StopAlgod() - err = fixture.ClientWaitForRoundWithTimeout(normalNodeRestClient, 3) + err = normalNodeRestClient.WaitForRoundWithTimeout(3) a.NoError(err) // at this point PrimaryNode and Node1 would pass round 3. Before running Node2 we remove block 2 from Primary database. @@ -267,7 +267,7 @@ func TestSendSigsAfterCatchpointCatchup(t *testing.T) { _, err = usingNodeRestClient.Catchup(catchpointLabel, 0) a.NoError(err) - err = fixture.ClientWaitForRoundWithTimeout(usingNodeRestClient, uint64(targetCatchpointRound)+1) + err = usingNodeRestClient.WaitForRoundWithTimeout(uint64(targetCatchpointRound) + 1) a.NoError(err) lastNormalRound, err := fixture.GetLibGoalClientFromNodeController(normalNode).CurrentRound() @@ -280,7 +280,7 @@ func TestSendSigsAfterCatchpointCatchup(t *testing.T) { lastNormalNodeSignedRound := basics.Round(lastNormalRound).RoundDownToMultipleOf(basics.Round(consensusParams.StateProofInterval)) lastNormalNextStateProofRound := lastNormalNodeSignedRound + basics.Round(consensusParams.StateProofInterval) targetRound := lastNormalNextStateProofRound + basics.Round(consensusParams.StateProofInterval*2) - err = fixture.ClientWaitForRoundWithTimeout(usingNodeRestClient, uint64(targetRound)) + err = usingNodeRestClient.WaitForRoundWithTimeout(uint64(targetRound)) a.NoError(err) primaryClient := fixture.GetLibGoalClientFromNodeController(primaryNode) diff --git a/test/e2e-go/features/followernode/syncDeltas_test.go b/test/e2e-go/features/followernode/syncDeltas_test.go index af27c7dda7..d1458b7451 100644 --- a/test/e2e-go/features/followernode/syncDeltas_test.go +++ b/test/e2e-go/features/followernode/syncDeltas_test.go @@ -74,7 +74,7 @@ func TestBasicSyncMode(t *testing.T) { // Let the network make some progress waitForRound := uint64(5) - err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound) + err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(waitForRound) a.NoError(err) // Get the follower client, and exercise the sync/ledger functionality @@ -88,7 +88,7 @@ func TestBasicSyncMode(t *testing.T) { a.NoError(err) a.Equal(round, rResp.Round) // make some progress to round - err = fixture.ClientWaitForRoundWithTimeout(followClient, round) + err = followClient.WaitForRoundWithTimeout(round) a.NoError(err) // retrieve state delta gResp, err := followClient.GetLedgerStateDelta(round) @@ -113,6 +113,6 @@ func TestBasicSyncMode(t *testing.T) { err = followClient.SetSyncRound(round + 1) a.NoError(err) } - err = fixture.LibGoalFixture.ClientWaitForRoundWithTimeout(fixture.LibGoalClient, waitForRound) + err = fixture.WaitForRoundWithTimeout(waitForRound) a.NoError(err) } diff --git a/test/e2e-go/features/followernode/syncRestart_test.go b/test/e2e-go/features/followernode/syncRestart_test.go index 589bb7b53c..1aa5b2560d 100644 --- a/test/e2e-go/features/followernode/syncRestart_test.go +++ b/test/e2e-go/features/followernode/syncRestart_test.go @@ -62,7 +62,7 @@ func TestSyncRestart(t *testing.T) { waitTill := func(node string, round uint64) { controller, err := fixture.GetNodeController(node) a.NoError(err) - err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(controller), round) + err = fixture.GetAlgodClientForController(controller).WaitForRoundWithTimeout(round) a.NoError(err) } diff --git a/test/e2e-go/features/incentives/challenge_test.go b/test/e2e-go/features/incentives/challenge_test.go new file mode 100644 index 0000000000..661bc7b40c --- /dev/null +++ b/test/e2e-go/features/incentives/challenge_test.go @@ -0,0 +1,222 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package suspension + +import ( + "fmt" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/libgoal" + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/test/framework/fixtures" + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/algorand/go-algorand/util" +) + +// eligible is just a dumb 50/50 choice of whether to mark an address +// incentiveELigible or not, so we get a diversity of testing. Ineligible +// accounts should not be challenged or try to heartbeat. +func eligible(address string) bool { + return address[0]&0x01 == 0 +} + +// TestChallenges ensures that accounts are knocked off if they don't respond to +// a challenge, and that algod responds for accounts it knows (keepign them online) +func TestChallenges(t *testing.T) { + partitiontest.PartitionTest(t) + defer fixtures.ShutdownSynchronizedTest(t) + + t.Parallel() + a := require.New(fixtures.SynchronizedTest(t)) + + // Overview of this test: + // Use a consensus protocol with challenge interval=50, grace period=10, bits=2. + // Start a three-node network. One relay, two nodes with 4 accounts each + // At round 50, ~2 nodes will be challenged. + + const lookback = 32 + const interval = 50 + const grace = 10 + const mask = 0x80 + + var fixture fixtures.RestClientFixture + // Speed up rounds, keep lookback > 2 * grace period + fixture.FasterConsensus(protocol.ConsensusFuture, time.Second, lookback) + fixture.AlterConsensus(protocol.ConsensusFuture, + func(cp config.ConsensusParams) config.ConsensusParams { + cp.Payouts.ChallengeInterval = 50 + cp.Payouts.ChallengeGracePeriod = 10 + cp.Payouts.ChallengeBits = 1 // half of nodes should get challenged + return cp + }) + fixture.Setup(t, filepath.Join("nettemplates", "Challenges.json")) + defer fixture.Shutdown() + + clientAndAccounts := func(name string) (libgoal.Client, []model.Account) { + c := fixture.GetLibGoalClientForNamedNode(name) + accounts, err := fixture.GetNodeWalletsSortedByBalance(c) + a.NoError(err) + a.Len(accounts, 8) + fmt.Printf("Client %s has %v\n", name, accounts) + return c, accounts + } + + c1, accounts1 := clientAndAccounts("Node1") + c2, accounts2 := clientAndAccounts("Node2") + + err := fixture.WaitForRoundWithTimeout(interval - lookback) // Make all LastHeartbeats > interval, < 2*interval + a.NoError(err) + + // eligible accounts1 will get challenged with node offline, and suspended + for _, account := range accounts1 { + rekeyreg(&fixture, a, c1, account.Address, eligible(account.Address)) + } + // eligible accounts2 will get challenged, but node2 will heartbeat for them + for _, account := range accounts2 { + rekeyreg(&fixture, a, c2, account.Address, eligible(account.Address)) + } + + // turn off node 1, so it can't heartbeat + a.NoError(c1.FullStop()) + + current, err := c2.CurrentRound() + a.NoError(err) + // Get them all done so that their inflated LastHeartbeat comes before the + // next challenge. + a.Less(current+lookback, 2*uint64(interval)) + + // We need to wait for the first challenge that happens after the keyreg + // LastHeartbeat has passed. Example: current is 40, so the lastPossible + // LastHeartbeat is 72. Interval is 50, so challengeRound is 100. + + // 100 = 40 + 32 + (50-22) = 72 + 28 + lastPossible := current + lookback + challengeRound := lastPossible + (interval - lastPossible%interval) + + // Advance to challenge round, check the blockseed + err = fixture.WaitForRoundWithTimeout(challengeRound) + a.NoError(err) + blk, err := c2.BookkeepingBlock(challengeRound) + a.NoError(err) + challenge := blk.BlockHeader.Seed[0] & mask // high bit + + // match1 are the accounts from node1 that match the challenge, but only + // eligible ones are truly challenged and could be suspended. + match1 := util.MakeSet[basics.Address]() + eligible1 := util.MakeSet[basics.Address]() // matched AND eligible + for _, account := range accounts1 { + address, err := basics.UnmarshalChecksumAddress(account.Address) + a.NoError(err) + if address[0]&mask == challenge { + fmt.Printf("%v of node 1 was challenged %v by %v\n", address, address[0], challenge) + match1.Add(address) + if eligible(address.String()) { + eligible1.Add(address) + } + } + } + require.NotEmpty(t, match1, "rerun the test") // TODO: remove. + + match2 := util.MakeSet[basics.Address]() + eligible2 := util.MakeSet[basics.Address]() // matched AND eligible + for _, account := range accounts2 { + address, err := basics.UnmarshalChecksumAddress(account.Address) + a.NoError(err) + if address[0]&mask == challenge { + fmt.Printf("%v of node 2 was challenged %v by %v\n", address, address[0], challenge) + match2.Add(address) + if eligible(address.String()) { + eligible2.Add(address) + } + } + } + require.NotEmpty(t, match2, "rerun the test") // TODO: remove. + + allMatches := util.Union(match1, match2) + + // All nodes are online to start + for address := range allMatches { + data, err := c2.AccountData(address.String()) + a.NoError(err) + a.Equal(basics.Online, data.Status, "%v %d", address.String(), data.LastHeartbeat) + a.NotZero(data.VoteID) + a.Equal(eligible(address.String()), data.IncentiveEligible) + } + + // Watch the first half grace period for proposals from challenged nodes, since they won't have to heartbeat. + lucky := util.MakeSet[basics.Address]() + fixture.WithEveryBlock(challengeRound, challengeRound+grace/2, func(block bookkeeping.Block) { + if eligible2.Contains(block.Proposer()) { + lucky.Add(block.Proposer()) + } + a.Empty(block.AbsentParticipationAccounts) // nobody suspended during grace + }) + + // In the second half of the grace period, Node 2 should heartbeat for its eligible accounts + beated := util.MakeSet[basics.Address]() + fixture.WithEveryBlock(challengeRound+grace/2, challengeRound+grace, func(block bookkeeping.Block) { + if eligible2.Contains(block.Proposer()) { + lucky.Add(block.Proposer()) + } + for i, txn := range block.Payset { + hb := txn.Txn.HeartbeatTxnFields + fmt.Printf("Heartbeat txn %v in position %d round %d\n", hb, i, block.Round()) + a.True(match2.Contains(hb.HbAddress)) // only Node 2 is alive + a.True(eligible2.Contains(hb.HbAddress)) // only eligible accounts get heartbeat + a.False(beated.Contains(hb.HbAddress)) // beat only once + beated.Add(hb.HbAddress) + a.False(lucky.Contains(hb.HbAddress)) // we should not see a heartbeat from an account that proposed + } + a.Empty(block.AbsentParticipationAccounts) // nobody suspended during grace + }) + a.Equal(eligible2, util.Union(beated, lucky)) + + blk, err = fixture.WaitForBlockWithTimeout(challengeRound + grace + 1) + a.NoError(err) + a.Equal(eligible1, util.MakeSet(blk.AbsentParticipationAccounts...)) + + // node 1 challenged (eligible) accounts are suspended because node 1 is off + for address := range match1 { + data, err := c2.AccountData(address.String()) + a.NoError(err) + if eligible1.Contains(address) { + a.Equal(basics.Offline, data.Status, address) + } else { + a.Equal(basics.Online, data.Status, address) // not eligible, so not suspended + } + a.NotZero(data.VoteID, address) + a.False(data.IncentiveEligible, address) // suspension turns off flag + } + + // node 2 challenged accounts are not suspended (saved by heartbeat or weren't eligible) + for address := range match2 { + data, err := c2.AccountData(address.String()) + a.NoError(err) + a.Equal(basics.Online, data.Status, address) + a.NotZero(data.VoteID, address) + a.Equal(data.IncentiveEligible, eligible(address.String())) + } + +} diff --git a/test/e2e-go/features/incentives/payouts_test.go b/test/e2e-go/features/incentives/payouts_test.go index 1b9f4d0ec3..bf8b2e20e2 100644 --- a/test/e2e-go/features/incentives/payouts_test.go +++ b/test/e2e-go/features/incentives/payouts_test.go @@ -48,7 +48,7 @@ func TestBasicPayouts(t *testing.T) { var fixture fixtures.RestClientFixture // Make the seed lookback shorter, otherwise we need to wait 320 rounds to become IncentiveEligible. const lookback = 32 - fixture.FasterConsensus(protocol.ConsensusFuture, time.Second, 32) + fixture.FasterConsensus(protocol.ConsensusFuture, time.Second, lookback) fmt.Printf("lookback is %d\n", lookback) fixture.Setup(t, filepath.Join("nettemplates", "Payouts.json")) defer fixture.Shutdown() @@ -71,8 +71,8 @@ func TestBasicPayouts(t *testing.T) { c01, account01 := clientAndAccount("Node01") relay, _ := clientAndAccount("Relay") - data01 := rekeyreg(&fixture, a, c01, account01.Address) - data15 := rekeyreg(&fixture, a, c15, account15.Address) + data01 := rekeyreg(&fixture, a, c01, account01.Address, true) + data15 := rekeyreg(&fixture, a, c15, account15.Address, true) // have account01 burn some money to get below the eligibility cap // Starts with 100M, so burn 60M and get under 70M cap. @@ -317,14 +317,19 @@ func getblock(client libgoal.Client, round uint64) (bookkeeping.Block, error) { return client.BookkeepingBlock(round) } -func rekeyreg(f *fixtures.RestClientFixture, a *require.Assertions, client libgoal.Client, address string) basics.AccountData { +func rekeyreg(f *fixtures.RestClientFixture, a *require.Assertions, client libgoal.Client, address string, becomeEligible bool) basics.AccountData { // we start by making an _offline_ tx here, because we want to populate the // key material ourself with a copy of the account's existing material. That // makes it an _online_ keyreg. That allows the running node to chug along // without new part keys. We overpay the fee, which makes us // IncentiveEligible, and to get some funds into FeeSink because we will // watch it drain toward bottom of test. - reReg, err := client.MakeUnsignedGoOfflineTx(address, 0, 0, 12_000_000, [32]byte{}) + + fee := uint64(1000) + if becomeEligible { + fee = 12_000_000 + } + reReg, err := client.MakeUnsignedGoOfflineTx(address, 0, 0, fee, [32]byte{}) a.NoError(err) data, err := client.AccountData(address) @@ -354,7 +359,7 @@ func rekeyreg(f *fixtures.RestClientFixture, a *require.Assertions, client libgo a.NoError(err) a.Equal(basics.Online, data.Status) a.True(data.LastHeartbeat > 0) - a.True(data.IncentiveEligible) + a.Equal(becomeEligible, data.IncentiveEligible) fmt.Printf(" %v has %v in round %d\n", address, data.MicroAlgos.Raw, *txn.ConfirmedRound) return data } diff --git a/test/e2e-go/features/incentives/suspension_test.go b/test/e2e-go/features/incentives/suspension_test.go index 6768f7926e..4a3709d96e 100644 --- a/test/e2e-go/features/incentives/suspension_test.go +++ b/test/e2e-go/features/incentives/suspension_test.go @@ -33,7 +33,6 @@ import ( ) // TestBasicSuspension confirms that accounts that don't propose get suspended -// (when a tx naming them occurs) func TestBasicSuspension(t *testing.T) { partitiontest.PartitionTest(t) defer fixtures.ShutdownSynchronizedTest(t) @@ -45,15 +44,17 @@ func TestBasicSuspension(t *testing.T) { // Start a three-node network (70,20,10), all online // Wait for 10 and 20% nodes to propose (we never suspend accounts with lastProposed=lastHeartbeat=0) // Stop them both - // Run for 55 rounds, which is enough for 20% node to be suspended, but not 10% + // Run for 105 rounds, which is enough for 20% node to be suspended, but not 10% // check neither suspended, send a tx from 20% to 10%, only 20% gets suspended - // TODO once we have heartbeats: bring them back up, make sure 20% gets back online - const suspend20 = 55 + // bring n20 back up, make sure it gets back online by proposing during the lookback + const suspend20 = 105 // 1.00/0.20 * absentFactor var fixture fixtures.RestClientFixture - // Speed up rounds, but keep long lookback, so 20% node has a chance to get - // back online after being suspended. - fixture.FasterConsensus(protocol.ConsensusFuture, time.Second, 320) + // Speed up rounds. Long enough lookback, so 20% node has a chance to + // get back online after being suspended. (0.8^32 is very small) + + const lookback = 32 + fixture.FasterConsensus(protocol.ConsensusFuture, time.Second, lookback) fixture.Setup(t, filepath.Join("nettemplates", "Suspension.json")) defer fixture.Shutdown() @@ -69,74 +70,43 @@ func TestBasicSuspension(t *testing.T) { c10, account10 := clientAndAccount("Node10") c20, account20 := clientAndAccount("Node20") - rekeyreg(&fixture, a, c10, account10.Address) - rekeyreg(&fixture, a, c20, account20.Address) - - // Wait until each have proposed, so they are suspendable - proposed10 := false - proposed20 := false - for !proposed10 || !proposed20 { - status, err := c10.Status() - a.NoError(err) - block, err := c10.BookkeepingBlock(status.LastRound) - a.NoError(err) - - fmt.Printf(" block %d proposed by %v\n", status.LastRound, block.Proposer()) - - fixture.WaitForRoundWithTimeout(status.LastRound + 1) - - switch block.Proposer().String() { - case account10.Address: - proposed10 = true - case account20.Address: - proposed20 = true - } - } + rekeyreg(&fixture, a, c10, account10.Address, true) + rekeyreg(&fixture, a, c20, account20.Address, true) + // Accounts are now suspendable whether they have proposed yet or not + // because keyreg sets LastHeartbeat. Stop c20 which means account20 will be + // absent about 50 rounds after keyreg goes into effect (lookback) a.NoError(c20.FullStop()) afterStop, err := c10.Status() a.NoError(err) - // Advance 55 rounds - err = fixture.WaitForRoundWithTimeout(afterStop.LastRound + suspend20) - a.NoError(err) - - // n20 is still online after 55 rounds of absence (the node is off, but the - // account is marked online) because it has not been "noticed". - account, err := fixture.LibGoalClient.AccountData(account20.Address) + // Advance lookback+55 rounds + err = fixture.WaitForRoundWithTimeout(afterStop.LastRound + lookback + suspend20) a.NoError(err) - a.Equal(basics.Online, account.Status) - voteID := account.VoteID - a.NotZero(voteID) - - // pay n10 & n20, so both could be noticed - richAccount, err := fixture.GetRichestAccount() - a.NoError(err) - fixture.SendMoneyAndWait(afterStop.LastRound+suspend20, 5, 1000, richAccount.Address, account10.Address, "") - fixture.SendMoneyAndWait(afterStop.LastRound+suspend20, 5, 1000, richAccount.Address, account20.Address, "") // make sure c10 node is in-sync with the network status, err := fixture.LibGoalClient.Status() a.NoError(err) + fmt.Printf("status.LastRound %d\n", status.LastRound) _, err = c10.WaitForRound(status.LastRound) a.NoError(err) - // n20's account is now offline, but has voting key material (suspended) - account, err = c10.AccountData(account20.Address) + // n20's account has been suspended (offline, but has voting key material) + account, err := c10.AccountData(account20.Address) a.NoError(err) + fmt.Printf("account20 %d %d\n", account.LastProposed, account.LastHeartbeat) a.Equal(basics.Offline, account.Status) a.NotZero(account.VoteID) a.False(account.IncentiveEligible) // suspension turns off flag - // n10's account is still online, because it's got less stake, has not been absent 10 x interval. account, err = c10.AccountData(account10.Address) a.NoError(err) a.Equal(basics.Online, account.Status) a.NotZero(account.VoteID) a.True(account.IncentiveEligible) - // Use the fixture to start the node again. Since we're only a bit past the + // Use the fixture to start node20 again. Since we're only a bit past the // suspension round, it will still be voting. It should get a chance to // propose soon (20/100 of blocks) which will put it back online. lg, err := fixture.StartNode(c20.DataDir()) @@ -172,8 +142,6 @@ func TestBasicSuspension(t *testing.T) { a.NoError(err) r.Equal(basics.Online, account.Status, i) r.Greater(account.LastProposed, restartRound, i) - - r.Equal(voteID, account.VoteID, i) r.False(account.IncentiveEligible, i) } } diff --git a/test/e2e-go/features/incentives/whalejoin_test.go b/test/e2e-go/features/incentives/whalejoin_test.go new file mode 100644 index 0000000000..90a67450e2 --- /dev/null +++ b/test/e2e-go/features/incentives/whalejoin_test.go @@ -0,0 +1,324 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package suspension + +import ( + "fmt" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/require" + + v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2" + "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/libgoal" + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/test/framework/fixtures" + "github.com/algorand/go-algorand/test/partitiontest" +) + +// TestWhaleJoin shows a "whale" with more stake than is currently online can go +// online without immediate suspension. This tests for a bug we had where we +// calcululated expected proposal interval using the _old_ totals, rather than +// the totals following the keyreg. So big joiner was being expected to propose +// in the same block it joined. +func TestWhaleJoin(t *testing.T) { + partitiontest.PartitionTest(t) + defer fixtures.ShutdownSynchronizedTest(t) + + t.Parallel() + a := require.New(fixtures.SynchronizedTest(t)) + + var fixture fixtures.RestClientFixture + // Make rounds shorter and seed lookback smaller, otherwise we need to wait + // 320 slow rounds for particpation effects to matter. + const lookback = 32 + fixture.FasterConsensus(protocol.ConsensusFuture, time.Second, lookback) + fixture.Setup(t, filepath.Join("nettemplates", "Payouts.json")) + defer fixture.Shutdown() + + // Overview of this test: + // 1. Take wallet15 offline (but retain keys so can back online later) + // 2. Have wallet01 spend almost all their algos + // 3. Wait for balances to flow through "lookback" + // 4. Rejoin wallet15 which will have way more stake that what is online. + + clientAndAccount := func(name string) (libgoal.Client, model.Account) { + c := fixture.GetLibGoalClientForNamedNode(name) + accounts, err := fixture.GetNodeWalletsSortedByBalance(c) + a.NoError(err) + a.Len(accounts, 1) + fmt.Printf("Client %s is %v\n", name, accounts[0].Address) + return c, accounts[0] + } + + c15, account15 := clientAndAccount("Node15") + c01, account01 := clientAndAccount("Node01") + + // 1. take wallet15 offline + keys := offline(&fixture, a, c15, account15.Address) + + // 2. c01 starts with 100M, so burn 99.9M to get total online stake down + burn, err := c01.SendPaymentFromUnencryptedWallet(account01.Address, basics.Address{}.String(), + 1000, 99_900_000_000_000, nil) + a.NoError(err) + receipt, err := fixture.WaitForConfirmedTxn(uint64(burn.LastValid), burn.ID().String()) + a.NoError(err) + + // 3. Wait lookback rounds + _, err = c01.WaitForRound(*receipt.ConfirmedRound + lookback) + a.NoError(err) + + // 4. rejoin, with 1.5B against the paltry 100k that's currently online + online(&fixture, a, c15, account15.Address, keys) + + // 5. wait for agreement balances to kick in (another lookback's worth, plus some slack) + _, err = c01.WaitForRound(*receipt.ConfirmedRound + 2*lookback + 5) + a.NoError(err) + + data, err := c15.AccountData(account15.Address) + a.NoError(err) + a.Equal(basics.Online, data.Status) + + // even after being in the block to "get noticed" + txn, err := c15.SendPaymentFromUnencryptedWallet(account15.Address, basics.Address{}.String(), + 1000, 1, nil) + a.NoError(err) + _, err = fixture.WaitForConfirmedTxn(uint64(txn.LastValid), txn.ID().String()) + a.NoError(err) + data, err = c15.AccountData(account15.Address) + a.NoError(err) + a.Equal(basics.Online, data.Status) +} + +// TestBigJoin shows that even though an account can't vote during the first 320 +// rounds after joining, it is not marked absent because of that gap. This would +// be a problem for "biggish" accounts, that might already be absent after 320 +// rounds of not voting. +func TestBigJoin(t *testing.T) { + partitiontest.PartitionTest(t) + defer fixtures.ShutdownSynchronizedTest(t) + + t.Parallel() + a := require.New(fixtures.SynchronizedTest(t)) + + var fixture fixtures.RestClientFixture + // We need lookback to be fairly long, so that we can have a node join with + // 1/16 stake, and have lookback be long enough to risk absenteeism. + const lookback = 164 // > 160, which is 10x the 1/16th's interval + fixture.FasterConsensus(protocol.ConsensusFuture, time.Second/2, lookback) + fixture.Setup(t, filepath.Join("nettemplates", "Payouts.json")) + defer fixture.Shutdown() + + // Overview of this test: + // 1. Take wallet01 offline (but retain keys so can back online later) + // 2. Wait `lookback` rounds so it can't propose. + // 3. Rejoin wallet01 which will now have 1/16 of the stake + // 4. Wait 160 rounds and ensure node01 does not get knocked offline for being absent + // 5. Wait the rest of lookback to ensure it _still_ does not get knock off. + + clientAndAccount := func(name string) (libgoal.Client, model.Account) { + c := fixture.GetLibGoalClientForNamedNode(name) + accounts, err := fixture.GetNodeWalletsSortedByBalance(c) + a.NoError(err) + a.Len(accounts, 1) + fmt.Printf("Client %s is %v\n", name, accounts[0].Address) + return c, accounts[0] + } + + c01, account01 := clientAndAccount("Node01") + + // 1. take wallet01 offline + keys := offline(&fixture, a, c01, account01.Address) + + // 2. Wait lookback rounds + wait(&fixture, a, lookback) + + // 4. rejoin, with 1/16 of total stake + onRound := online(&fixture, a, c01, account01.Address, keys) + + // 5. wait for enough rounds to pass, during which c01 can't vote, that is + // could get knocked off. + wait(&fixture, a, 161) + data, err := c01.AccountData(account01.Address) + a.NoError(err) + a.Equal(basics.Online, data.Status) + + // 5a. just to be sure, do a zero pay to get it "noticed" + zeroPay(&fixture, a, c01, account01.Address) + data, err = c01.AccountData(account01.Address) + a.NoError(err) + a.Equal(basics.Online, data.Status) + + // 6. Now wait until lookback after onRound (which should just be a couple + // more rounds). Check again, to ensure that once c01 is _really_ + // online/voting, it is still safe for long enough to propose. + a.NoError(fixture.WaitForRoundWithTimeout(onRound + lookback)) + data, err = c01.AccountData(account01.Address) + a.NoError(err) + a.Equal(basics.Online, data.Status) + + zeroPay(&fixture, a, c01, account01.Address) + data, err = c01.AccountData(account01.Address) + a.NoError(err) + a.Equal(basics.Online, data.Status) + + // The node _could_ have gotten lucky and propose in first couple rounds it + // is allowed to propose, so this test is expected to be "flaky" in a + // sense. It would pass about 1/8 of the time, even if we had the problem it + // is looking for. +} + +// TestBigIncrease shows when an incentive eligible account receives a lot of +// algos, they are not immediately suspended. We also check the details of the +// mechanism - that LastHeartbeat is incremented when such an account doubles +// its balance in a single pay. +func TestBigIncrease(t *testing.T) { + partitiontest.PartitionTest(t) + defer fixtures.ShutdownSynchronizedTest(t) + + t.Parallel() + a := require.New(fixtures.SynchronizedTest(t)) + + var fixture fixtures.RestClientFixture + const lookback = 32 + fixture.FasterConsensus(protocol.ConsensusFuture, time.Second/2, lookback) + fixture.Setup(t, filepath.Join("nettemplates", "Payouts.json")) + defer fixture.Shutdown() + + // Overview of this test: + // 0. spend wallet01 down so it has a very small percent of stake + // 1. rereg wallet01 so it is suspendable + // 2. move almost all of wallet15's money to wallet01 + // 3. check that c1.LastHeart is set to 32 rounds later + // 4. wait 40 rounds ensure c1 stays online + + clientAndAccount := func(name string) (libgoal.Client, model.Account) { + c := fixture.GetLibGoalClientForNamedNode(name) + accounts, err := fixture.GetNodeWalletsSortedByBalance(c) + a.NoError(err) + a.Len(accounts, 1) + fmt.Printf("Client %s is %v\n", name, accounts[0].Address) + return c, accounts[0] + } + + c1, account01 := clientAndAccount("Node01") + c15, account15 := clientAndAccount("Node15") + + // We need to spend 01 down so that it has nearly no stake. That way, it + // certainly will not have proposed by pure luck just before the critical + // round. If we don't do that, 1/16 of stake is enough that it will probably + // have a fairly recent proposal, and not get knocked off. + pay(&fixture, a, c1, account01.Address, account15.Address, 99*account01.Amount/100) + + rekeyreg(&fixture, a, c1, account01.Address, true) + + // 2. Wait lookback rounds + wait(&fixture, a, lookback) + + tx := pay(&fixture, a, c15, account15.Address, account01.Address, 50*account15.Amount/100) + data, err := c15.AccountData(account01.Address) + a.NoError(err) + a.EqualValues(*tx.ConfirmedRound+lookback, data.LastHeartbeat) + + wait(&fixture, a, lookback+5) + data, err = c15.AccountData(account01.Address) + a.NoError(err) + a.Equal(basics.Online, data.Status) + a.True(data.IncentiveEligible) +} + +func wait(f *fixtures.RestClientFixture, a *require.Assertions, count uint64) { + res, err := f.AlgodClient.Status() + a.NoError(err) + round := res.LastRound + count + a.NoError(f.WaitForRoundWithTimeout(round)) +} + +func pay(f *fixtures.RestClientFixture, a *require.Assertions, + c libgoal.Client, from string, to string, amount uint64) v2.PreEncodedTxInfo { + pay, err := c.SendPaymentFromUnencryptedWallet(from, to, 1000, amount, nil) + a.NoError(err) + tx, err := f.WaitForConfirmedTxn(uint64(pay.LastValid), pay.ID().String()) + a.NoError(err) + return tx +} + +func zeroPay(f *fixtures.RestClientFixture, a *require.Assertions, + c libgoal.Client, address string) { + pay(f, a, c, address, address, 0) +} + +// Go offline, but return the key material so it's easy to go back online +func offline(f *fixtures.RestClientFixture, a *require.Assertions, client libgoal.Client, address string) transactions.KeyregTxnFields { + offTx, err := client.MakeUnsignedGoOfflineTx(address, 0, 0, 100_000, [32]byte{}) + a.NoError(err) + + data, err := client.AccountData(address) + a.NoError(err) + keys := transactions.KeyregTxnFields{ + VotePK: data.VoteID, + SelectionPK: data.SelectionID, + StateProofPK: data.StateProofID, + VoteFirst: data.VoteFirstValid, + VoteLast: data.VoteLastValid, + VoteKeyDilution: data.VoteKeyDilution, + } + + wh, err := client.GetUnencryptedWalletHandle() + a.NoError(err) + onlineTxID, err := client.SignAndBroadcastTransaction(wh, nil, offTx) + a.NoError(err) + txn, err := f.WaitForConfirmedTxn(uint64(offTx.LastValid), onlineTxID) + a.NoError(err) + // sync up with the network + _, err = client.WaitForRound(*txn.ConfirmedRound) + a.NoError(err) + data, err = client.AccountData(address) + a.NoError(err) + a.Equal(basics.Offline, data.Status) + return keys +} + +// Go online with the supplied key material +func online(f *fixtures.RestClientFixture, a *require.Assertions, client libgoal.Client, address string, keys transactions.KeyregTxnFields) uint64 { + // sanity check that we start offline + data, err := client.AccountData(address) + a.NoError(err) + a.Equal(basics.Offline, data.Status) + + // make an empty keyreg, we'll copy in the keys + onTx, err := client.MakeUnsignedGoOfflineTx(address, 0, 0, 100_000, [32]byte{}) + a.NoError(err) + + onTx.KeyregTxnFields = keys + wh, err := client.GetUnencryptedWalletHandle() + a.NoError(err) + onlineTxID, err := client.SignAndBroadcastTransaction(wh, nil, onTx) + a.NoError(err) + receipt, err := f.WaitForConfirmedTxn(uint64(onTx.LastValid), onlineTxID) + a.NoError(err) + data, err = client.AccountData(address) + a.NoError(err) + // Before bug fix, the account would be suspended in the same round of the + // keyreg, so it would not be online. + a.Equal(basics.Online, data.Status) + return *receipt.ConfirmedRound +} diff --git a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go index 0b38fe76ff..21a701139a 100644 --- a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go +++ b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go @@ -216,7 +216,7 @@ func TestNewAccountCanGoOnlineAndParticipate(t *testing.T) { // Need to wait for funding to take effect on selection, then we can see if we're participating // Stop before the account should become eligible for selection so we can ensure it wasn't - err = fixture.ClientWaitForRound(fixture.AlgodClient, uint64(accountProposesStarting-1), + err = fixture.WaitForRound(uint64(accountProposesStarting-1), time.Duration(uint64(globals.MaxTimePerRound)*uint64(accountProposesStarting-1))) a.NoError(err) @@ -226,7 +226,7 @@ func TestNewAccountCanGoOnlineAndParticipate(t *testing.T) { a.False(blockWasProposed, "account should not be selected until BalLookback (round %d) passes", int(accountProposesStarting-1)) // Now wait until the round where the funded account will be used. - err = fixture.ClientWaitForRound(fixture.AlgodClient, uint64(accountProposesStarting), 10*globals.MaxTimePerRound) + err = fixture.WaitForRound(uint64(accountProposesStarting), 10*globals.MaxTimePerRound) a.NoError(err) blockWasProposedByNewAccountRecently := fixture.VerifyBlockProposedRange(newAccount, int(accountProposesStarting), 1) diff --git a/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go b/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go index 21ce3bdf0d..e3429490c4 100644 --- a/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go +++ b/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go @@ -57,7 +57,7 @@ func TestBasicPartitionRecovery(t *testing.T) { // Let the network make some progress waitForRound := uint64(3) - err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound) + err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(waitForRound) a.NoError(err) // Now stop 2nd node @@ -133,7 +133,7 @@ func runTestWithStaggeredStopStart(t *testing.T, fixture *fixtures.RestClientFix // Let the network make some progress waitForRound := uint64(3) - err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc1), waitForRound) + err = fixture.GetAlgodClientForController(nc1).WaitForRoundWithTimeout(waitForRound) a.NoError(err) // Stop Node1 @@ -196,7 +196,7 @@ func TestBasicPartitionRecoveryPartOffline(t *testing.T) { // Let the network make some progress waitForRound := uint64(3) - err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc1), waitForRound) + err = fixture.GetAlgodClientForController(nc1).WaitForRoundWithTimeout(waitForRound) a.NoError(err) // Stop Node1 @@ -264,8 +264,7 @@ func TestPartitionHalfOffline(t *testing.T) { // Let the network make some progress client := fixture.LibGoalClient - waitForRound := uint64(3) - err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc1), waitForRound) + err = fixture.GetAlgodClientForController(nc1).WaitForRoundWithTimeout(3) a.NoError(err) // Stop nodes with 50% of stake diff --git a/test/e2e-go/features/stateproofs/stateproofs_test.go b/test/e2e-go/features/stateproofs/stateproofs_test.go index 85d7d5e127..4735ca840f 100644 --- a/test/e2e-go/features/stateproofs/stateproofs_test.go +++ b/test/e2e-go/features/stateproofs/stateproofs_test.go @@ -810,6 +810,7 @@ func TestTotalWeightChanges(t *testing.T) { a := require.New(fixtures.SynchronizedTest(t)) consensusParams := getDefaultStateProofConsensusParams() + consensusParams.StateProofWeightThreshold = (1 << 32) * 90 / 100 consensusParams.StateProofStrengthTarget = 4 consensusParams.StateProofTopVoters = 4 diff --git a/test/e2e-go/restAPI/other/misc_test.go b/test/e2e-go/restAPI/other/misc_test.go index eeaff9fcd1..23e805dc25 100644 --- a/test/e2e-go/restAPI/other/misc_test.go +++ b/test/e2e-go/restAPI/other/misc_test.go @@ -62,7 +62,7 @@ func TestDisabledAPIConfig(t *testing.T) { a.NoError(err) testClient := client.MakeRestClient(url, "") // empty token - _, err = testClient.WaitForBlock(1) + err = testClient.WaitForRoundWithTimeout(1) assert.NoError(t, err) _, err = testClient.Block(1) assert.NoError(t, err) diff --git a/test/e2e-go/restAPI/simulate/simulateRestAPI_test.go b/test/e2e-go/restAPI/simulate/simulateRestAPI_test.go index 66601c1737..b058b510e4 100644 --- a/test/e2e-go/restAPI/simulate/simulateRestAPI_test.go +++ b/test/e2e-go/restAPI/simulate/simulateRestAPI_test.go @@ -53,7 +53,7 @@ func TestSimulateTxnTracerDevMode(t *testing.T) { testClient := localFixture.LibGoalClient - _, err := testClient.WaitForRound(1) + _, err := testClient.Status() a.NoError(err) wh, err := testClient.GetUnencryptedWalletHandle() @@ -288,11 +288,11 @@ int 1` // Let the primary node make some progress primaryClient := fixture.GetAlgodClientForController(nc) - err = fixture.ClientWaitForRoundWithTimeout(primaryClient, followerSyncRound+uint64(cfg.MaxAcctLookback)) + err = primaryClient.WaitForRoundWithTimeout(followerSyncRound + uint64(cfg.MaxAcctLookback)) a.NoError(err) // Let follower node progress as far as it can - err = fixture.ClientWaitForRoundWithTimeout(followClient, followerSyncRound+uint64(cfg.MaxAcctLookback)-1) + err = followClient.WaitForRoundWithTimeout(followerSyncRound + uint64(cfg.MaxAcctLookback) - 1) a.NoError(err) simulateRequest := v2.PreEncodedSimulateRequest{ diff --git a/test/e2e-go/upgrades/application_support_test.go b/test/e2e-go/upgrades/application_support_test.go index 549a82c5ab..c41ad84166 100644 --- a/test/e2e-go/upgrades/application_support_test.go +++ b/test/e2e-go/upgrades/application_support_test.go @@ -180,7 +180,7 @@ int 1 curStatus, err = client.Status() a.NoError(err) - a.Less(int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute)) + a.Less(int64(time.Since(startLoopTime)), int64(3*time.Minute)) time.Sleep(time.Duration(smallLambdaMs) * time.Millisecond) } @@ -438,7 +438,7 @@ int 1 curStatus, err = client.Status() a.NoError(err) - a.Less(int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute)) + a.Less(int64(time.Since(startLoopTime)), int64(3*time.Minute)) time.Sleep(time.Duration(smallLambdaMs) * time.Millisecond) round = curStatus.LastRound } diff --git a/test/e2e-go/upgrades/rekey_support_test.go b/test/e2e-go/upgrades/rekey_support_test.go index 0dcec41545..cc3eca018c 100644 --- a/test/e2e-go/upgrades/rekey_support_test.go +++ b/test/e2e-go/upgrades/rekey_support_test.go @@ -150,7 +150,7 @@ func TestRekeyUpgrade(t *testing.T) { curStatus, err = client.Status() a.NoError(err) - a.Less(int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute)) + a.Less(int64(time.Since(startLoopTime)), int64(3*time.Minute)) time.Sleep(time.Duration(smallLambdaMs) * time.Millisecond) round = curStatus.LastRound } diff --git a/test/framework/fixtures/libgoalFixture.go b/test/framework/fixtures/libgoalFixture.go index bd4f615ae7..c05a59ff1f 100644 --- a/test/framework/fixtures/libgoalFixture.go +++ b/test/framework/fixtures/libgoalFixture.go @@ -42,7 +42,6 @@ import ( "github.com/algorand/go-algorand/netdeploy" "github.com/algorand/go-algorand/nodecontrol" "github.com/algorand/go-algorand/protocol" - "github.com/algorand/go-algorand/test/e2e-go/globals" "github.com/algorand/go-algorand/util/db" ) @@ -67,26 +66,32 @@ func (f *RestClientFixture) SetConsensus(consensus config.ConsensusProtocols) { f.consensus = consensus } +// AlterConsensus allows the caller to modify the consensus settings for a given version. +func (f *RestClientFixture) AlterConsensus(ver protocol.ConsensusVersion, alter func(config.ConsensusParams) config.ConsensusParams) { + if f.consensus == nil { + f.consensus = make(config.ConsensusProtocols) + } + f.consensus[ver] = alter(f.ConsensusParamsFromVer(ver)) +} + // FasterConsensus speeds up the given consensus version in two ways. The seed // refresh lookback is set to 8 (instead of 80), so the 320 round balance // lookback becomes 32. And, if the architecture implies it can be handled, // round times are shortened by lowering vote timeouts. func (f *RestClientFixture) FasterConsensus(ver protocol.ConsensusVersion, timeout time.Duration, lookback basics.Round) { - if f.consensus == nil { - f.consensus = make(config.ConsensusProtocols) - } - fast := config.Consensus[ver] - // balanceRound is 4 * SeedRefreshInterval - if lookback%4 != 0 { - panic(fmt.Sprintf("lookback must be a multiple of 4, got %d", lookback)) - } - fast.SeedRefreshInterval = uint64(lookback) / 4 - // and speed up the rounds while we're at it - if runtime.GOARCH == "amd64" || runtime.GOARCH == "arm64" { - fast.AgreementFilterTimeoutPeriod0 = timeout - fast.AgreementFilterTimeout = timeout - } - f.consensus[ver] = fast + f.AlterConsensus(ver, func(fast config.ConsensusParams) config.ConsensusParams { + // balanceRound is 4 * SeedRefreshInterval + if lookback%4 != 0 { + panic(fmt.Sprintf("lookback must be a multiple of 4, got %d", lookback)) + } + fast.SeedRefreshInterval = uint64(lookback) / 4 + // and speed up the rounds while we're at it + if runtime.GOARCH == "amd64" || runtime.GOARCH == "arm64" { + fast.AgreementFilterTimeoutPeriod0 = timeout + fast.AgreementFilterTimeout = timeout + } + return fast + }) } // Setup is called to initialize the test fixture for the test(s) @@ -452,75 +457,6 @@ func (f *LibGoalFixture) GetParticipationOnlyAccounts(lg libgoal.Client) []accou return f.clientPartKeys[lg.DataDir()] } -// WaitForRoundWithTimeout waits for a given round to reach. The implementation also ensures to limit the wait time for each round to the -// globals.MaxTimePerRound so we can alert when we're getting "hung" before waiting for all the expected rounds to reach. -func (f *LibGoalFixture) WaitForRoundWithTimeout(roundToWaitFor uint64) error { - return f.ClientWaitForRoundWithTimeout(f.LibGoalClient, roundToWaitFor) -} - -// ClientWaitForRoundWithTimeout waits for a given round to be reached by the specific client/node. The implementation -// also ensures to limit the wait time for each round to the globals.MaxTimePerRound so we can alert when we're -// getting "hung" before waiting for all the expected rounds to reach. -func (f *LibGoalFixture) ClientWaitForRoundWithTimeout(client libgoal.Client, roundToWaitFor uint64) error { - status, err := client.Status() - require.NoError(f.t, err) - lastRound := status.LastRound - - // If node is already at or past target round, we're done - if lastRound >= roundToWaitFor { - return nil - } - - roundTime := globals.MaxTimePerRound * 10 // For first block, we wait much longer - roundComplete := make(chan error, 2) - - for nextRound := lastRound + 1; lastRound < roundToWaitFor; { - roundStarted := time.Now() - - go func(done chan error) { - err := f.ClientWaitForRound(client, nextRound, roundTime) - done <- err - }(roundComplete) - - select { - case lastError := <-roundComplete: - if lastError != nil { - close(roundComplete) - return lastError - } - case <-time.After(roundTime): - // we've timed out. - time := time.Now().Sub(roundStarted) - return fmt.Errorf("fixture.WaitForRound took %3.2f seconds between round %d and %d", time.Seconds(), lastRound, nextRound) - } - - roundTime = singleRoundMaxTime - lastRound++ - nextRound++ - } - return nil -} - -// ClientWaitForRound waits up to the specified amount of time for -// the network to reach or pass the specified round, on the specific client/node -func (f *LibGoalFixture) ClientWaitForRound(client libgoal.Client, round uint64, waitTime time.Duration) error { - timeout := time.NewTimer(waitTime) - for { - status, err := client.Status() - if err != nil { - return err - } - if status.LastRound >= round { - return nil - } - select { - case <-timeout.C: - return fmt.Errorf("timeout waiting for round %v", round) - case <-time.After(200 * time.Millisecond): - } - } -} - // CurrentConsensusParams returns the consensus parameters for the currently active protocol func (f *LibGoalFixture) CurrentConsensusParams() (consensus config.ConsensusParams, err error) { status, err := f.LibGoalClient.Status() @@ -532,20 +468,20 @@ func (f *LibGoalFixture) CurrentConsensusParams() (consensus config.ConsensusPar } // ConsensusParams returns the consensus parameters for the protocol from the specified round -func (f *LibGoalFixture) ConsensusParams(round uint64) (consensus config.ConsensusParams, err error) { +func (f *LibGoalFixture) ConsensusParams(round uint64) (config.ConsensusParams, error) { block, err := f.LibGoalClient.BookkeepingBlock(round) if err != nil { - return + return config.ConsensusParams{}, err } - version := protocol.ConsensusVersion(block.CurrentProtocol) - if f.consensus != nil { - consensus, has := f.consensus[version] - if has { - return consensus, nil - } + return f.ConsensusParamsFromVer(block.CurrentProtocol), nil +} + +// ConsensusParamsFromVer looks up a consensus version, allowing for override +func (f *LibGoalFixture) ConsensusParamsFromVer(cv protocol.ConsensusVersion) config.ConsensusParams { + if consensus, has := f.consensus[cv]; has { + return consensus } - consensus = config.Consensus[version] - return + return config.Consensus[cv] } // CurrentMinFeeAndBalance returns the MinTxnFee and MinBalance for the currently active protocol diff --git a/test/framework/fixtures/restClientFixture.go b/test/framework/fixtures/restClientFixture.go index 473df25d38..fb1a26d31b 100644 --- a/test/framework/fixtures/restClientFixture.go +++ b/test/framework/fixtures/restClientFixture.go @@ -25,6 +25,7 @@ import ( "github.com/stretchr/testify/require" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/netdeploy" "github.com/algorand/go-algorand/protocol" @@ -34,7 +35,6 @@ import ( "github.com/algorand/go-algorand/libgoal" "github.com/algorand/go-algorand/nodecontrol" - "github.com/algorand/go-algorand/test/e2e-go/globals" "github.com/algorand/go-algorand/util/tokens" ) @@ -80,79 +80,37 @@ func (f *RestClientFixture) GetAlgodClientForController(nc nodecontrol.NodeContr // WaitForRound waits up to the specified amount of time for // the network to reach or pass the specified round func (f *RestClientFixture) WaitForRound(round uint64, waitTime time.Duration) error { - return f.ClientWaitForRound(f.AlgodClient, round, waitTime) + _, err := f.AlgodClient.WaitForRound(round, waitTime) + return err } -// ClientWaitForRound waits up to the specified amount of time for -// the network to reach or pass the specified round, on the specific client/node -func (f *RestClientFixture) ClientWaitForRound(client client.RestClient, round uint64, waitTime time.Duration) error { - timeout := time.NewTimer(waitTime) - for { - status, err := client.Status() - if err != nil { - return err - } - - if status.LastRound >= round { - return nil - } - select { - case <-timeout.C: - return fmt.Errorf("timeout waiting for round %v with last round = %v", round, status.LastRound) - case <-time.After(200 * time.Millisecond): - } +// WithEveryBlock calls the provided function for every block from first to last. +func (f *RestClientFixture) WithEveryBlock(first, last uint64, visit func(bookkeeping.Block)) { + for round := first; round <= last; round++ { + err := f.WaitForRoundWithTimeout(round) + require.NoError(f.t, err) + block, err := f.AlgodClient.Block(round) + require.NoError(f.t, err) + visit(block.Block) } } // WaitForRoundWithTimeout waits for a given round to reach. The implementation also ensures to limit the wait time for each round to the // globals.MaxTimePerRound so we can alert when we're getting "hung" before waiting for all the expected rounds to reach. func (f *RestClientFixture) WaitForRoundWithTimeout(roundToWaitFor uint64) error { - return f.ClientWaitForRoundWithTimeout(f.AlgodClient, roundToWaitFor) + return f.AlgodClient.WaitForRoundWithTimeout(roundToWaitFor) } -const singleRoundMaxTime = globals.MaxTimePerRound * 40 - -// ClientWaitForRoundWithTimeout waits for a given round to be reached by the specific client/node. The implementation -// also ensures to limit the wait time for each round to the globals.MaxTimePerRound so we can alert when we're -// getting "hung" before waiting for all the expected rounds to reach. -func (f *RestClientFixture) ClientWaitForRoundWithTimeout(client client.RestClient, roundToWaitFor uint64) error { - status, err := client.Status() - require.NoError(f.t, err) - lastRound := status.LastRound - - // If node is already at or past target round, we're done - if lastRound >= roundToWaitFor { - return nil +// WaitForBlockWithTimeout waits for a given round and returns its block. +func (f *RestClientFixture) WaitForBlockWithTimeout(roundToWaitFor uint64) (bookkeeping.Block, error) { + if err := f.AlgodClient.WaitForRoundWithTimeout(roundToWaitFor); err != nil { + return bookkeeping.Block{}, err } - - roundTime := globals.MaxTimePerRound * 10 // For first block, we wait much longer - roundComplete := make(chan error, 2) - - for nextRound := lastRound + 1; lastRound < roundToWaitFor; { - roundStarted := time.Now() - - go func(done chan error) { - err := f.ClientWaitForRound(client, nextRound, roundTime) - done <- err - }(roundComplete) - - select { - case lastError := <-roundComplete: - if lastError != nil { - close(roundComplete) - return lastError - } - case <-time.After(roundTime): - // we've timed out. - time := time.Now().Sub(roundStarted) - return fmt.Errorf("fixture.WaitForRound took %3.2f seconds between round %d and %d", time.Seconds(), lastRound, nextRound) - } - - roundTime = singleRoundMaxTime - lastRound++ - nextRound++ + both, err := f.AlgodClient.EncodedBlockCert(roundToWaitFor) + if err != nil { + return bookkeeping.Block{}, err } - return nil + return both.Block, nil } // GetFirstAccount returns the first account from listing local accounts @@ -367,17 +325,15 @@ func (f *RestClientFixture) SendMoneyAndWaitFromWallet(walletHandle, walletPassw // VerifyBlockProposedRange checks the rounds starting at fromRounds and moving backwards checking countDownNumRounds rounds if any // blocks were proposed by address -func (f *RestClientFixture) VerifyBlockProposedRange(account string, fromRound, countDownNumRounds int) (blockWasProposed bool) { - c := f.LibGoalClient +func (f *RestClientFixture) VerifyBlockProposedRange(account string, fromRound, countDownNumRounds int) bool { for i := 0; i < countDownNumRounds; i++ { - cert, err := c.EncodedBlockCert(uint64(fromRound - i)) + cert, err := f.AlgodClient.EncodedBlockCert(uint64(fromRound - i)) require.NoError(f.t, err, "client failed to get block %d", fromRound-i) if cert.Certificate.Proposal.OriginalProposer.GetUserAddress() == account { - blockWasProposed = true - break + return true } } - return + return false } // VerifyBlockProposed checks the last searchRange blocks to see if any blocks were proposed by address diff --git a/test/testdata/nettemplates/Challenges.json b/test/testdata/nettemplates/Challenges.json new file mode 100644 index 0000000000..1d9944937c --- /dev/null +++ b/test/testdata/nettemplates/Challenges.json @@ -0,0 +1,60 @@ +{ + "Genesis": { + "NetworkName": "tbd", + "ConsensusProtocol": "future", + "LastPartKeyRound": 500, + "Wallets": [ + { "Name": "Relay", "Stake": 84, "Online": true }, + { "Name": "Wallet0", "Stake": 1, "Online": true }, + { "Name": "Wallet1", "Stake": 1, "Online": true }, + { "Name": "Wallet2", "Stake": 1, "Online": true }, + { "Name": "Wallet3", "Stake": 1, "Online": true }, + { "Name": "Wallet4", "Stake": 1, "Online": true }, + { "Name": "Wallet5", "Stake": 1, "Online": true }, + { "Name": "Wallet6", "Stake": 1, "Online": true }, + { "Name": "Wallet7", "Stake": 1, "Online": true }, + { "Name": "Wallet8", "Stake": 1, "Online": true }, + { "Name": "Wallet9", "Stake": 1, "Online": true }, + { "Name": "WalletA", "Stake": 1, "Online": true }, + { "Name": "WalletB", "Stake": 1, "Online": true }, + { "Name": "WalletC", "Stake": 1, "Online": true }, + { "Name": "WalletD", "Stake": 1, "Online": true }, + { "Name": "WalletE", "Stake": 1, "Online": true }, + { "Name": "WalletF", "Stake": 1, "Online": true } + ], + "RewardsPoolBalance": 0 + }, + "Nodes": [ + { + "Name": "Relay", + "Wallets": [{ "Name": "Relay", "ParticipationOnly": false }], + "IsRelay": true + }, + { + "Name": "Node1", + "Wallets": [ + { "Name": "Wallet0", "ParticipationOnly": false }, + { "Name": "Wallet1", "ParticipationOnly": false }, + { "Name": "Wallet2", "ParticipationOnly": false }, + { "Name": "Wallet3", "ParticipationOnly": false }, + { "Name": "Wallet4", "ParticipationOnly": false }, + { "Name": "Wallet5", "ParticipationOnly": false }, + { "Name": "Wallet6", "ParticipationOnly": false }, + { "Name": "Wallet7", "ParticipationOnly": false } + ] + }, + { + "Name": "Node2", + "Wallets": [ + { "Name": "Wallet8", "ParticipationOnly": false }, + { "Name": "Wallet9", "ParticipationOnly": false }, + { "Name": "WalletA", "ParticipationOnly": false }, + { "Name": "WalletB", "ParticipationOnly": false }, + { "Name": "WalletC", "ParticipationOnly": false }, + { "Name": "WalletD", "ParticipationOnly": false }, + { "Name": "WalletE", "ParticipationOnly": false }, + { "Name": "WalletF", "ParticipationOnly": false } + ] + } + ] +} diff --git a/tools/x-repo-types/typeAnalyzer/typeAnalyzer.go b/tools/x-repo-types/typeAnalyzer/typeAnalyzer.go index cde3894823..edc9d612dd 100644 --- a/tools/x-repo-types/typeAnalyzer/typeAnalyzer.go +++ b/tools/x-repo-types/typeAnalyzer/typeAnalyzer.go @@ -237,12 +237,17 @@ func (t *TypeNode) buildStructChildren(path TypePath) TypePath { if typeField.Anonymous { // embedded struct case - actualKind := typeField.Type.Kind() + fieldType := typeField.Type + if fieldType.Kind() == reflect.Ptr { + // get underlying type for embedded pointer to struct + fieldType = fieldType.Elem() + } + actualKind := fieldType.Kind() if actualKind != reflect.Struct { panic(fmt.Sprintf("expected [%s] but got unexpected embedded type: %s", reflect.Struct, typeField.Type)) } - embedded := TypeNode{t.Depth, typeField.Type, reflect.Struct, nil, nil} + embedded := TypeNode{t.Depth, fieldType, reflect.Struct, nil, nil} embeddedCyclePath := embedded.build(path) if len(embeddedCyclePath) > 0 { cyclePath = embeddedCyclePath diff --git a/util/db/dbutil.go b/util/db/dbutil.go index 8b045ad70c..e1cd16e2b5 100644 --- a/util/db/dbutil.go +++ b/util/db/dbutil.go @@ -327,7 +327,7 @@ func (db *Accessor) AtomicContextWithRetryClearFn(ctx context.Context, fn idemFn } if time.Now().After(atomicDeadline) { - db.getDecoratedLogger(fn, extras).Warnf("dbatomic: tx surpassed expected deadline by %v", time.Now().Sub(atomicDeadline)) + db.getDecoratedLogger(fn, extras).Warnf("dbatomic: tx surpassed expected deadline by %v", time.Since(atomicDeadline)) } return } diff --git a/util/execpool/stream.go b/util/execpool/stream.go index 29ec4613f1..f6017a0af1 100644 --- a/util/execpool/stream.go +++ b/util/execpool/stream.go @@ -87,7 +87,7 @@ func (sv *StreamToBatch) Start(ctx context.Context) { go sv.batchingLoop() } -// WaitForStop waits until the batching loop terminates afer the ctx is canceled +// WaitForStop waits until the batching loop terminates after the ctx is canceled func (sv *StreamToBatch) WaitForStop() { sv.activeLoopWg.Wait() } diff --git a/util/set.go b/util/set.go index 6851299c46..3727a99f33 100644 --- a/util/set.go +++ b/util/set.go @@ -40,3 +40,39 @@ func (s Set[T]) Contains(elem T) (exists bool) { _, exists = s[elem] return } + +// Union constructs a new set, containing all elements from the given sets. nil +// is never returned +func Union[T comparable](sets ...Set[T]) Set[T] { + union := make(Set[T]) + for _, set := range sets { + for elem := range set { + union.Add(elem) + } + } + return union +} + +// Intersection constructs a new set, containing all elements that appear in all +// given sets. nil is never returned. Intersection of no sets is an empty set +// because that seems more useful, regardless of your very reasonable arguments +// otherwise. +func Intersection[T comparable](sets ...Set[T]) Set[T] { + var intersection = make(Set[T]) + if len(sets) == 0 { + return intersection + } + for elem := range sets[0] { + inAll := true + for _, set := range sets[1:] { + if _, exists := set[elem]; !exists { + inAll = false + break + } + } + if inAll { + intersection.Add(elem) + } + } + return intersection +} diff --git a/util/set_test.go b/util/set_test.go new file mode 100644 index 0000000000..86df9c5464 --- /dev/null +++ b/util/set_test.go @@ -0,0 +1,75 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package util + +import ( + "testing" + + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/stretchr/testify/require" +) + +func TestMakeSet(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + s := MakeSet(1, 2, 3) + require.True(t, s.Contains(1)) + require.True(t, s.Contains(2)) + require.True(t, s.Contains(3)) + require.False(t, s.Contains(4)) + + s = MakeSet[int]() + require.NotNil(t, s) + require.False(t, s.Contains(1)) + require.False(t, s.Contains(4)) +} + +func TestSetAdd(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + s := MakeSet[int]() + s.Add(6) + require.False(t, s.Contains(1)) + require.True(t, s.Contains(6)) + s.Add(6) + require.False(t, s.Contains(1)) + require.True(t, s.Contains(6)) +} + +func TestSetOps(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + empty := MakeSet[string]() + abc := MakeSet("a", "b", "c") + cde := MakeSet("c", "d", "e") + + require.Equal(t, abc, Union(abc)) + require.Equal(t, abc, Union(empty, abc)) + require.Equal(t, abc, Union(abc, empty, abc)) + require.NotNil(t, Union(empty, empty, empty)) + require.Equal(t, empty, Union(empty, empty, empty)) + + require.Equal(t, abc, Intersection(abc, abc)) + require.NotNil(t, Intersection(abc, empty)) + require.Equal(t, empty, Intersection(abc, empty)) + require.Equal(t, empty, Intersection(empty, abc)) + require.Equal(t, MakeSet("c"), Intersection(abc, cde)) + require.Equal(t, MakeSet("c"), Intersection(cde, abc, cde)) +} From a896dfd020888fb03bede2ba50d07eea140249ce Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Thu, 19 Dec 2024 16:39:25 -0500 Subject: [PATCH 10/15] Doc: voter balance version fix (#6205) --- data/transactions/logic/TEAL_opcodes_v11.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/data/transactions/logic/TEAL_opcodes_v11.md b/data/transactions/logic/TEAL_opcodes_v11.md index 345ebca43e..08a4b3ee5a 100644 --- a/data/transactions/logic/TEAL_opcodes_v11.md +++ b/data/transactions/logic/TEAL_opcodes_v11.md @@ -1105,10 +1105,10 @@ Fields Fields -| Index | Name | Type | In | Notes | -| - | ------ | -- | - | --------- | -| 0 | VoterBalance | uint64 | v6 | Online stake in microalgos | -| 1 | VoterIncentiveEligible | bool | | Had this account opted into block payouts | +| Index | Name | Type | Notes | +| - | ------ | -- | --------- | +| 0 | VoterBalance | uint64 | Online stake in microalgos | +| 1 | VoterIncentiveEligible | bool | Had this account opted into block payouts | ## online_stake From 38ce41c2255efac9a0008d6fbfa61ee0fa1522e3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Dec 2024 17:12:26 -0500 Subject: [PATCH 11/15] build(deps): bump golang.org/x/crypto from 0.29.0 to 0.31.0 (#6203) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Gary Malouf <982483+gmalouf@users.noreply.github.com> --- go.mod | 14 +++++++------- go.sum | 28 ++++++++++++++-------------- tools/block-generator/go.mod | 12 ++++++------ tools/block-generator/go.sum | 24 ++++++++++++------------ 4 files changed, 39 insertions(+), 39 deletions(-) diff --git a/go.mod b/go.mod index 9ec11b823e..0f45d6b6a7 100644 --- a/go.mod +++ b/go.mod @@ -50,11 +50,11 @@ require ( github.com/stretchr/testify v1.9.0 go.opencensus.io v0.24.0 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.29.0 + golang.org/x/crypto v0.31.0 golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c - golang.org/x/sync v0.9.0 - golang.org/x/sys v0.27.0 - golang.org/x/text v0.20.0 + golang.org/x/sync v0.10.0 + golang.org/x/sys v0.28.0 + golang.org/x/text v0.21.0 gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009 pgregory.net/rapid v0.6.2 ) @@ -176,7 +176,7 @@ require ( github.com/prometheus/common v0.60.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/quic-go/qpack v0.5.1 // indirect - github.com/quic-go/quic-go v0.48.1 // indirect + github.com/quic-go/quic-go v0.48.2 // indirect github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect @@ -196,8 +196,8 @@ require ( go.uber.org/mock v0.5.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/mod v0.21.0 // indirect - golang.org/x/net v0.30.0 // indirect - golang.org/x/term v0.26.0 // indirect + golang.org/x/net v0.33.0 // indirect + golang.org/x/term v0.27.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.26.0 // indirect gonum.org/v1/gonum v0.15.0 // indirect diff --git a/go.sum b/go.sum index fdee6063f8..42c6ef5005 100644 --- a/go.sum +++ b/go.sum @@ -570,8 +570,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= -github.com/quic-go/quic-go v0.48.1 h1:y/8xmfWI9qmGTc+lBr4jKRUWLGSlSigv847ULJ4hYXA= -github.com/quic-go/quic-go v0.48.1/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs= +github.com/quic-go/quic-go v0.48.2 h1:wsKXZPeGWpMpCGSWqOcqpW2wZYic/8T3aqiOID0/KWE= +github.com/quic-go/quic-go v0.48.2/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs= github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg= github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= @@ -733,8 +733,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= -golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= @@ -784,8 +784,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -801,8 +801,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -843,8 +843,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -852,8 +852,8 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= -golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -864,8 +864,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= diff --git a/tools/block-generator/go.mod b/tools/block-generator/go.mod index e0a4101d05..343f91c7bc 100644 --- a/tools/block-generator/go.mod +++ b/tools/block-generator/go.mod @@ -151,7 +151,7 @@ require ( github.com/prometheus/common v0.60.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/quic-go/qpack v0.5.1 // indirect - github.com/quic-go/quic-go v0.48.1 // indirect + github.com/quic-go/quic-go v0.48.2 // indirect github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect @@ -169,13 +169,13 @@ require ( go.uber.org/mock v0.5.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.29.0 // indirect + golang.org/x/crypto v0.31.0 // indirect golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect golang.org/x/mod v0.22.0 // indirect - golang.org/x/net v0.31.0 // indirect - golang.org/x/sync v0.9.0 // indirect - golang.org/x/sys v0.27.0 // indirect - golang.org/x/text v0.20.0 // indirect + golang.org/x/net v0.33.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect golang.org/x/tools v0.27.0 // indirect gonum.org/v1/gonum v0.15.0 // indirect google.golang.org/protobuf v1.35.1 // indirect diff --git a/tools/block-generator/go.sum b/tools/block-generator/go.sum index 19c67b5d26..4f27558584 100644 --- a/tools/block-generator/go.sum +++ b/tools/block-generator/go.sum @@ -525,8 +525,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= -github.com/quic-go/quic-go v0.48.1 h1:y/8xmfWI9qmGTc+lBr4jKRUWLGSlSigv847ULJ4hYXA= -github.com/quic-go/quic-go v0.48.1/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs= +github.com/quic-go/quic-go v0.48.2 h1:wsKXZPeGWpMpCGSWqOcqpW2wZYic/8T3aqiOID0/KWE= +github.com/quic-go/quic-go v0.48.2/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs= github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg= github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= @@ -677,8 +677,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= -golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= @@ -727,8 +727,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= -golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -744,8 +744,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -782,8 +782,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -801,8 +801,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= From 2e043dfb24714f15fd64503f03c36ba5a558742d Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Thu, 19 Dec 2024 19:17:36 -0500 Subject: [PATCH 12/15] Specs: Commits spec changes made in specs repo (#6206) --- data/transactions/logic/README.md | 22 +++++++------ data/transactions/logic/README_in.md | 20 +++++++----- data/transactions/logic/TEAL_opcodes_v11.md | 16 --------- data/transactions/logic/assembler_test.go | 11 +++++-- data/transactions/logic/crypto_test.go | 8 ++--- data/transactions/logic/eval_test.go | 9 ++++++ data/transactions/logic/langspec_v11.json | 36 --------------------- data/transactions/logic/opcodes.go | 10 +++--- ledger/simulation/simulation_eval_test.go | 30 ++++++++++++++--- 9 files changed, 76 insertions(+), 86 deletions(-) diff --git a/data/transactions/logic/README.md b/data/transactions/logic/README.md index ca5e04bef4..269832d38a 100644 --- a/data/transactions/logic/README.md +++ b/data/transactions/logic/README.md @@ -137,14 +137,18 @@ of a contract account. transaction against the contract account is for the program to approve it. -The bytecode plus the length of all Args must add up to no more than -1000 bytes (consensus parameter LogicSigMaxSize). Each opcode has an -associated cost, usually 1, but a few slow operations have higher -costs. Prior to v4, the program's cost was estimated as the static sum -of all the opcode costs in the program (whether they were actually -executed or not). Beginning with v4, the program's cost is tracked -dynamically, while being evaluated. If the program exceeds its budget, -it fails. +The size of a Smart Signature is defined as the length of its bytecode +plus the length of all its Args. The sum of the sizes of all Smart +Signatures in a group must not exceed 1000 bytes times the number of +transactions in the group (1000 bytes is defined in consensus parameter +`LogicSigMaxSize`). + +Each opcode has an associated cost, usually 1, but a few slow operations +have higher costs. Prior to v4, the program's cost was estimated as the +static sum of all the opcode costs in the program (whether they were +actually executed or not). Beginning with v4, the program's cost is +tracked dynamically while being evaluated. If the program exceeds its +budget, it fails. The total program cost of all Smart Signatures in a group must not exceed 20,000 (consensus parameter LogicSigMaxCost) times the number @@ -463,8 +467,6 @@ these results may contain leading zero bytes. | `keccak256` | Keccak256 hash of value A, yields [32]byte | | `sha512_256` | SHA512_256 hash of value A, yields [32]byte | | `sha3_256` | SHA3_256 hash of value A, yields [32]byte | -| `sumhash512` | sumhash512 of value A, yields [64]byte | -| `falcon_verify` | for (data A, compressed-format signature B, pubkey C) verify the signature of data against the pubkey | | `ed25519verify` | for (data A, signature B, pubkey C) verify the signature of ("ProgData" \|\| program_hash \|\| data) against the pubkey => {0 or 1} | | `ed25519verify_bare` | for (data A, signature B, pubkey C) verify the signature of the data against the pubkey => {0 or 1} | | `ecdsa_verify v` | for (data A, signature B, C and pubkey D, E) verify the signature of the data against the pubkey => {0 or 1} | diff --git a/data/transactions/logic/README_in.md b/data/transactions/logic/README_in.md index 31f8fb05be..848c345f97 100644 --- a/data/transactions/logic/README_in.md +++ b/data/transactions/logic/README_in.md @@ -123,14 +123,18 @@ of a contract account. transaction against the contract account is for the program to approve it. -The bytecode plus the length of all Args must add up to no more than -1000 bytes (consensus parameter LogicSigMaxSize). Each opcode has an -associated cost, usually 1, but a few slow operations have higher -costs. Prior to v4, the program's cost was estimated as the static sum -of all the opcode costs in the program (whether they were actually -executed or not). Beginning with v4, the program's cost is tracked -dynamically, while being evaluated. If the program exceeds its budget, -it fails. +The size of a Smart Signature is defined as the length of its bytecode +plus the length of all its Args. The sum of the sizes of all Smart +Signatures in a group must not exceed 1000 bytes times the number of +transactions in the group (1000 bytes is defined in consensus parameter +`LogicSigMaxSize`). + +Each opcode has an associated cost, usually 1, but a few slow operations +have higher costs. Prior to v4, the program's cost was estimated as the +static sum of all the opcode costs in the program (whether they were +actually executed or not). Beginning with v4, the program's cost is +tracked dynamically while being evaluated. If the program exceeds its +budget, it fails. The total program cost of all Smart Signatures in a group must not exceed 20,000 (consensus parameter LogicSigMaxCost) times the number diff --git a/data/transactions/logic/TEAL_opcodes_v11.md b/data/transactions/logic/TEAL_opcodes_v11.md index 08a4b3ee5a..8bbad2e206 100644 --- a/data/transactions/logic/TEAL_opcodes_v11.md +++ b/data/transactions/logic/TEAL_opcodes_v11.md @@ -1177,22 +1177,6 @@ pushints args are not added to the intcblock during assembly processes - **Cost**: 1900 - Availability: v7 -## falcon_verify - -- Bytecode: 0x85 -- Stack: ..., A: []byte, B: [1232]byte, C: [1793]byte → ..., bool -- for (data A, compressed-format signature B, pubkey C) verify the signature of data against the pubkey -- **Cost**: 1700 -- Availability: v11 - -## sumhash512 - -- Bytecode: 0x86 -- Stack: ..., A: []byte → ..., [64]byte -- sumhash512 of value A, yields [64]byte -- **Cost**: 150 + 7 per 4 bytes of A -- Availability: v11 - ## callsub - Syntax: `callsub TARGET` where TARGET: branch offset diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go index 34b2ed1995..093fe7de3d 100644 --- a/data/transactions/logic/assembler_test.go +++ b/data/transactions/logic/assembler_test.go @@ -455,7 +455,9 @@ const spliceNonsence = ` const v10Nonsense = v9Nonsense + pairingNonsense + spliceNonsence -const v11Nonsense = v10Nonsense + incentiveNonsense + stateProofNonsense + mimcNonsense +const v11Nonsense = v10Nonsense + incentiveNonsense + mimcNonsense + +const v12Nonsense = v11Nonsense + stateProofNonsense const v6Compiled = "2004010002b7a60c26050242420c68656c6c6f20776f726c6421070123456789abcd208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292b0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f2310231123122313231418191a1b1c28171615400003290349483403350222231d4a484848482b50512a632223524100034200004322602261222704634848222862482864286548482228246628226723286828692322700048482371004848361c0037001a0031183119311b311d311e311f312023221e312131223123312431253126312731283129312a312b312c312d312e312f447825225314225427042455220824564c4d4b0222382124391c0081e80780046a6f686e2281d00f23241f880003420001892224902291922494249593a0a1a2a3a4a5a6a7a8a9aaabacadae24af3a00003b003c003d816472064e014f012a57000823810858235b235a2359b03139330039b1b200b322c01a23c1001a2323c21a23c3233e233f8120af06002a494905002a49490700b400b53a03b6b7043cb8033a0c2349c42a9631007300810881088120978101c53a8101c6003a" @@ -482,7 +484,8 @@ const incentiveCompiled = "757401" const stateProofCompiled = "80070123456789abcd86494985" const mimcCompiled = "802011223344556677889900aabbccddeeff11223344556677889900aabbccddeeffe601" -const V11Compiled = v10Compiled + incentiveCompiled + stateProofCompiled + mimcCompiled +const v11Compiled = v10Compiled + incentiveCompiled + mimcCompiled +const v12Compiled = v11Compiled + stateProofCompiled var nonsense = map[uint64]string{ 1: v1Nonsense, @@ -496,6 +499,7 @@ var nonsense = map[uint64]string{ 9: v9Nonsense, 10: v10Nonsense, 11: v11Nonsense, + 12: v12Nonsense, } var compiled = map[uint64]string{ @@ -509,7 +513,8 @@ var compiled = map[uint64]string{ 8: "08" + v8Compiled, 9: "09" + v9Compiled, 10: "0a" + v10Compiled, - 11: "0b" + V11Compiled, + 11: "0b" + v11Compiled, + 12: "0c" + v12Compiled, } func pseudoOp(opcode string) bool { diff --git a/data/transactions/logic/crypto_test.go b/data/transactions/logic/crypto_test.go index 5c14e23049..8713823747 100644 --- a/data/transactions/logic/crypto_test.go +++ b/data/transactions/logic/crypto_test.go @@ -76,7 +76,7 @@ func TestSumhash(t *testing.T) { } for _, v := range testVectors { - testAccepts(t, fmt.Sprintf(`byte "%s"; sumhash512; byte 0x%s; ==`, v.in, v.out), 11) + testAccepts(t, fmt.Sprintf(`byte "%s"; sumhash512; byte 0x%s; ==`, v.in, v.out), 12) } } @@ -390,13 +390,13 @@ func TestFalconVerify(t *testing.T) { require.NoError(t, err) yes := testProg(t, fmt.Sprintf(`arg 0; arg 1; byte 0x%s; falcon_verify`, - hex.EncodeToString(fs.PublicKey[:])), 11) + hex.EncodeToString(fs.PublicKey[:])), 12) require.NoError(t, err) no := testProg(t, fmt.Sprintf(`arg 0; arg 1; byte 0x%s; falcon_verify; !`, - hex.EncodeToString(fs.PublicKey[:])), 11) + hex.EncodeToString(fs.PublicKey[:])), 12) require.NoError(t, err) - for v := uint64(11); v <= AssemblerMaxVersion; v++ { + for v := uint64(12); v <= AssemblerMaxVersion; v++ { t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) { yes.Program[0] = byte(v) sig, err := fs.SignBytes(data) diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go index 27a64f1b82..e6f0abe320 100644 --- a/data/transactions/logic/eval_test.go +++ b/data/transactions/logic/eval_test.go @@ -1266,6 +1266,8 @@ global PayoutsPercent; int 4; ==; assert global PayoutsMinBalance; int 5; ==; assert global PayoutsMaxBalance; int 6; ==; assert ` +const globalV12TestProgram = globalV11TestProgram + ` +` func TestAllGlobals(t *testing.T) { partitiontest.PartitionTest(t) @@ -1289,6 +1291,7 @@ func TestAllGlobals(t *testing.T) { 9: {CallerApplicationAddress, globalV9TestProgram}, 10: {GenesisHash, globalV10TestProgram}, 11: {PayoutsMaxBalance, globalV11TestProgram}, + 12: {PayoutsMaxBalance, globalV12TestProgram}, } // tests keys are versions so they must be in a range 1..AssemblerMaxVersion plus zero version require.LessOrEqual(t, len(tests), AssemblerMaxVersion+1) @@ -1800,6 +1803,11 @@ assert int 1 ` +const testTxnProgramTextV12 = testTxnProgramTextV11 + ` +assert +int 1 +` + func makeSampleTxn() transactions.SignedTxn { var txn transactions.SignedTxn copy(txn.Txn.Sender[:], []byte("aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00")) @@ -1914,6 +1922,7 @@ func TestTxn(t *testing.T) { 9: testTxnProgramTextV9, 10: testTxnProgramTextV10, 11: testTxnProgramTextV11, + 12: testTxnProgramTextV12, } for i, txnField := range TxnFieldNames { diff --git a/data/transactions/logic/langspec_v11.json b/data/transactions/logic/langspec_v11.json index cb054ebcd8..58d5657253 100644 --- a/data/transactions/logic/langspec_v11.json +++ b/data/transactions/logic/langspec_v11.json @@ -3072,42 +3072,6 @@ "Cryptography" ] }, - { - "Opcode": 133, - "Name": "falcon_verify", - "Args": [ - "[]byte", - "[1232]byte", - "[1793]byte" - ], - "Returns": [ - "bool" - ], - "Size": 1, - "DocCost": "1700", - "Doc": "for (data A, compressed-format signature B, pubkey C) verify the signature of data against the pubkey", - "IntroducedVersion": 11, - "Groups": [ - "Cryptography" - ] - }, - { - "Opcode": 134, - "Name": "sumhash512", - "Args": [ - "[]byte" - ], - "Returns": [ - "[64]byte" - ], - "Size": 1, - "DocCost": "150 + 7 per 4 bytes of A", - "Doc": "sumhash512 of value A, yields [64]byte", - "IntroducedVersion": 11, - "Groups": [ - "Cryptography" - ] - }, { "Opcode": 136, "Name": "callsub", diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go index b283c49656..a8de92ce8d 100644 --- a/data/transactions/logic/opcodes.go +++ b/data/transactions/logic/opcodes.go @@ -28,7 +28,7 @@ import ( ) // LogicVersion defines default assembler and max eval versions -const LogicVersion = 11 +const LogicVersion = 12 // rekeyingEnabledVersion is the version of TEAL where RekeyTo functionality // was enabled. This is important to remember so that old TEAL accounts cannot @@ -75,13 +75,13 @@ const sharedResourcesVersion = 9 // apps can access resources from other transac const pairingVersion = 10 // bn256 opcodes. will add bls12-381, and unify the available opcodes. const spliceVersion = 10 // box splicing/resizing +const incentiveVersion = 11 // block fields, heartbeat +const mimcVersion = 11 + // EXPERIMENTAL. These should be revisited whenever a new LogicSigVersion is // moved from vFuture to a new consensus version. If they remain unready, bump // their version, and fixup TestAssemble() in assembler_test.go. -const incentiveVersion = 11 // block fields, heartbeat - -const spOpcodesVersion = 11 // falcon_verify, sumhash512 -const mimcVersion = 11 +const spOpcodesVersion = 12 // falcon_verify, sumhash512 // Unlimited Global Storage opcodes const boxVersion = 8 // box_* diff --git a/ledger/simulation/simulation_eval_test.go b/ledger/simulation/simulation_eval_test.go index ef81a8a10e..c36171eae6 100644 --- a/ledger/simulation/simulation_eval_test.go +++ b/ledger/simulation/simulation_eval_test.go @@ -6961,6 +6961,9 @@ func TestUnnamedResources(t *testing.T) { env.OptIntoApp(otherAppUser, otherAppID) proto := env.TxnInfo.CurrentProtocolParams() + if v > int(proto.LogicSigVersion) { + t.Skip("not testing in unsupported proto") + } expectedUnnamedResourceGroupAssignment := &simulation.ResourceTracker{ MaxAccounts: proto.MaxTxGroupSize * (proto.MaxAppTxnAccounts + proto.MaxAppTxnForeignApps), MaxAssets: proto.MaxTxGroupSize * proto.MaxAppTxnForeignAssets, @@ -7196,6 +7199,11 @@ func TestUnnamedResourcesAccountLocalWrite(t *testing.T) { sender := env.Accounts[0] testAppUser := env.Accounts[1].Addr + proto := env.TxnInfo.CurrentProtocolParams() + if v > int(proto.LogicSigVersion) { + t.Skip("not testing in unsupported proto") + } + program := fmt.Sprintf(`#pragma version %d txn ApplicationID ! @@ -7232,7 +7240,6 @@ int 1 }) stxn := txn.Txn().Sign(sender.Sk) - proto := env.TxnInfo.CurrentProtocolParams() expectedUnnamedResourceAssignment := &simulation.ResourceTracker{ MaxAccounts: proto.MaxTxGroupSize * (proto.MaxAppTxnAccounts + proto.MaxAppTxnForeignApps), MaxAssets: proto.MaxTxGroupSize * proto.MaxAppTxnForeignAssets, @@ -7338,6 +7345,11 @@ func TestUnnamedResourcesCreatedAppsAndAssets(t *testing.T) { t.Run(fmt.Sprintf("v%d", v), func(t *testing.T) { t.Parallel() simulationTest(t, func(env simulationtesting.Environment) simulationTestCase { + proto := env.TxnInfo.CurrentProtocolParams() + if v > int(proto.LogicSigVersion) { + t.Skip("not testing in unsupported proto") + } + sender := env.Accounts[0] otherResourceCreator := env.Accounts[1] otherAccount := env.Accounts[2].Addr @@ -7435,7 +7447,6 @@ int 1 appCreateStxn := appCreateTxn.Txn().Sign(otherResourceCreator.Sk) appCallStxn := appCallTxn.Txn().Sign(sender.Sk) - proto := env.TxnInfo.CurrentProtocolParams() expectedUnnamedResourceAssignment := simulation.ResourceTracker{ MaxAccounts: (proto.MaxTxGroupSize - 1) * (proto.MaxAppTxnAccounts + proto.MaxAppTxnForeignApps), MaxAssets: (proto.MaxTxGroupSize - 1) * proto.MaxAppTxnForeignAssets, @@ -7702,6 +7713,11 @@ func TestUnnamedResourcesBoxIOBudget(t *testing.T) { env := simulationtesting.PrepareSimulatorTest(t) defer env.Close() + proto := env.TxnInfo.CurrentProtocolParams() + if v > int(proto.LogicSigVersion) { + t.Skip("not testing in unsupported proto") + } + sender := env.Accounts[0] appID := env.CreateApp(sender.Addr, simulationtesting.AppParams{ @@ -7709,8 +7725,6 @@ func TestUnnamedResourcesBoxIOBudget(t *testing.T) { ClearStateProgram: fmt.Sprintf("#pragma version %d\n int 1", v), }) - proto := env.TxnInfo.CurrentProtocolParams() - // MBR is needed for boxes. transferable := env.Accounts[1].AcctData.MicroAlgos.Raw - proto.MinBalance - proto.MinTxnFee env.TransferAlgos(env.Accounts[1].Addr, appID.Address(), transferable) @@ -8587,6 +8601,10 @@ func TestUnnamedResourcesLimits(t *testing.T) { defer env.Close() proto := env.TxnInfo.CurrentProtocolParams() + if v > int(proto.LogicSigVersion) { + t.Skip("not testing in unsupported proto") + return + } sender := env.Accounts[0] otherAccounts := make([]basics.Address, len(env.Accounts)-1) @@ -8753,6 +8771,10 @@ func TestUnnamedResourcesCrossProductLimits(t *testing.T) { defer env.Close() proto := env.TxnInfo.CurrentProtocolParams() + if v > int(proto.LogicSigVersion) { + t.Skip("not testing in unsupported proto") + return + } sender := env.Accounts[0] otherAccounts := make([]basics.Address, proto.MaxTxGroupSize) From a10b6b34154527c3e16329db76fd4ec3ff03be7b Mon Sep 17 00:00:00 2001 From: cce <51567+cce@users.noreply.github.com> Date: Thu, 19 Dec 2024 19:17:52 -0500 Subject: [PATCH 13/15] catchpoints: Add onlineaccounts and onlineroundparamstail tables to snapshot files (#6177) Co-authored-by: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> --- catchup/catchpointService_test.go | 4 +- cmd/catchpointdump/file.go | 7 +- .../mocks/mockCatchpointCatchupAccessor.go | 4 +- config/consensus.go | 6 + ledger/catchpointfileheader.go | 20 +- ledger/catchpointfilewriter.go | 197 ++++++--- ledger/catchpointfilewriter_test.go | 273 +++++++++++-- ledger/catchpointtracker.go | 84 +++- ledger/catchpointtracker_test.go | 18 +- ledger/catchupaccessor.go | 223 ++++++++--- ledger/catchupaccessor_test.go | 8 + ledger/encoded/msgp_gen.go | 377 ++++++++++++++++++ ledger/encoded/msgp_gen_test.go | 120 ++++++ ledger/encoded/recordsV6.go | 30 ++ ledger/ledgercore/catchpointlabel.go | 46 ++- ledger/ledgercore/catchpointlabel_test.go | 4 +- ledger/msgp_gen.go | 338 ++++++++++++---- ledger/store/trackerdb/catchpoint.go | 7 + .../dualdriver/accounts_reader_ext.go | 36 ++ .../store/trackerdb/dualdriver/dualdriver.go | 17 +- .../generickv/accounts_ext_reader.go | 10 + ledger/store/trackerdb/generickv/reader.go | 17 +- ledger/store/trackerdb/interface.go | 12 + ledger/store/trackerdb/msgp_gen.go | 108 ++++- .../trackerdb/sqlitedriver/accountsV2.go | 22 + .../trackerdb/sqlitedriver/catchpoint.go | 62 +++ .../sqlitedriver/encodedAccountsIter.go | 4 +- .../store/trackerdb/sqlitedriver/kvsIter.go | 114 ++++++ .../trackerdb/sqlitedriver/sqlitedriver.go | 17 +- ledger/store/trackerdb/store.go | 5 +- ledger/tracker_test.go | 5 +- logging/telemetryspec/event.go | 4 +- protocol/hash.go | 2 + .../catchup/catchpointCatchup_test.go | 12 +- .../catchup/stateproofsCatchup_test.go | 6 +- 35 files changed, 1928 insertions(+), 291 deletions(-) diff --git a/catchup/catchpointService_test.go b/catchup/catchpointService_test.go index 39cef9f2b9..0487fb975f 100644 --- a/catchup/catchpointService_test.go +++ b/catchup/catchpointService_test.go @@ -81,8 +81,8 @@ func (m *catchpointCatchupAccessorMock) Ledger() (l ledger.CatchupAccessorClient } // GetVerifyData returns the balances hash, spver hash and totals used by VerifyCatchpoint -func (m *catchpointCatchupAccessorMock) GetVerifyData(ctx context.Context) (balancesHash crypto.Digest, spverHash crypto.Digest, totals ledgercore.AccountTotals, err error) { - return crypto.Digest{}, crypto.Digest{}, ledgercore.AccountTotals{}, nil +func (m *catchpointCatchupAccessorMock) GetVerifyData(ctx context.Context) (balancesHash, spverHash, onlineAccountsHash, onlineRoundParamsHash crypto.Digest, totals ledgercore.AccountTotals, err error) { + return crypto.Digest{}, crypto.Digest{}, crypto.Digest{}, crypto.Digest{}, ledgercore.AccountTotals{}, nil } // TestCatchpointServicePeerRank ensures CatchpointService does not crash when a block fetched diff --git a/cmd/catchpointdump/file.go b/cmd/catchpointdump/file.go index f0f7c7bff5..eeda7f25ec 100644 --- a/cmd/catchpointdump/file.go +++ b/cmd/catchpointdump/file.go @@ -214,12 +214,13 @@ func loadCatchpointIntoDatabase(ctx context.Context, catchupAccessor ledger.Catc if err != nil { return fileHeader, err } - var balanceHash, spverHash crypto.Digest - balanceHash, spverHash, _, err = catchupAccessor.GetVerifyData(ctx) + var balanceHash, spverHash, onlineAccountsHash, onlineRoundParamsHash crypto.Digest + balanceHash, spverHash, onlineAccountsHash, onlineRoundParamsHash, _, err = catchupAccessor.GetVerifyData(ctx) if err != nil { return fileHeader, err } - fmt.Printf("accounts digest=%s, spver digest=%s\n\n", balanceHash, spverHash) + fmt.Printf("accounts digest=%s, spver digest=%s, onlineaccounts digest=%s onlineroundparams digest=%s\n\n", + balanceHash, spverHash, onlineAccountsHash, onlineRoundParamsHash) } return fileHeader, nil } diff --git a/components/mocks/mockCatchpointCatchupAccessor.go b/components/mocks/mockCatchpointCatchupAccessor.go index edf7946743..1d7355b3b7 100644 --- a/components/mocks/mockCatchpointCatchupAccessor.go +++ b/components/mocks/mockCatchpointCatchupAccessor.go @@ -71,8 +71,8 @@ func (m *MockCatchpointCatchupAccessor) GetCatchupBlockRound(ctx context.Context } // GetVerifyData returns the balances hash, spver hash and totals used by VerifyCatchpoint -func (m *MockCatchpointCatchupAccessor) GetVerifyData(ctx context.Context) (balancesHash crypto.Digest, spverHash crypto.Digest, totals ledgercore.AccountTotals, err error) { - return crypto.Digest{}, crypto.Digest{}, ledgercore.AccountTotals{}, nil +func (m *MockCatchpointCatchupAccessor) GetVerifyData(ctx context.Context) (balancesHash, spverHash, onlineAccountsHash, onlineRoundParams crypto.Digest, totals ledgercore.AccountTotals, err error) { + return crypto.Digest{}, crypto.Digest{}, crypto.Digest{}, crypto.Digest{}, ledgercore.AccountTotals{}, nil } // VerifyCatchpoint verifies that the catchpoint is valid by reconstructing the label. diff --git a/config/consensus.go b/config/consensus.go index b153848230..7e79e9b8e5 100644 --- a/config/consensus.go +++ b/config/consensus.go @@ -514,6 +514,10 @@ type ConsensusParams struct { // Version 7 includes state proof verification contexts EnableCatchpointsWithSPContexts bool + // EnableCatchpointsWithOnlineAccounts specifies when to enable version 8 catchpoints. + // Version 8 includes onlineaccounts and onlineroundparams amounts, for historical stake lookups. + EnableCatchpointsWithOnlineAccounts bool + // AppForbidLowResources enforces a rule that prevents apps from accessing // asas and apps below 256, in an effort to decrease the ambiguity of // opcodes that accept IDs or slot indexes. Simultaneously, the first ID @@ -1537,6 +1541,8 @@ func initConsensusProtocols() { vFuture.Heartbeat = true + vFuture.EnableCatchpointsWithOnlineAccounts = true + Consensus[protocol.ConsensusFuture] = vFuture // vAlphaX versions are an separate series of consensus parameters and versions for alphanet diff --git a/ledger/catchpointfileheader.go b/ledger/catchpointfileheader.go index f076f7267c..8fd75df135 100644 --- a/ledger/catchpointfileheader.go +++ b/ledger/catchpointfileheader.go @@ -27,13 +27,15 @@ import ( type CatchpointFileHeader struct { _struct struct{} `codec:",omitempty,omitemptyarray"` - Version uint64 `codec:"version"` - BalancesRound basics.Round `codec:"balancesRound"` - BlocksRound basics.Round `codec:"blocksRound"` - Totals ledgercore.AccountTotals `codec:"accountTotals"` - TotalAccounts uint64 `codec:"accountsCount"` - TotalChunks uint64 `codec:"chunksCount"` - TotalKVs uint64 `codec:"kvsCount"` - Catchpoint string `codec:"catchpoint"` - BlockHeaderDigest crypto.Digest `codec:"blockHeaderDigest"` + Version uint64 `codec:"version"` + BalancesRound basics.Round `codec:"balancesRound"` + BlocksRound basics.Round `codec:"blocksRound"` + Totals ledgercore.AccountTotals `codec:"accountTotals"` + TotalAccounts uint64 `codec:"accountsCount"` + TotalChunks uint64 `codec:"chunksCount"` + TotalKVs uint64 `codec:"kvsCount"` + TotalOnlineAccounts uint64 `codec:"onlineAccountsCount"` + TotalOnlineRoundParams uint64 `codec:"onlineRoundParamsCount"` + Catchpoint string `codec:"catchpoint"` + BlockHeaderDigest crypto.Digest `codec:"blockHeaderDigest"` } diff --git a/ledger/catchpointfilewriter.go b/ledger/catchpointfilewriter.go index cd58606a2f..01e78a59eb 100644 --- a/ledger/catchpointfilewriter.go +++ b/ledger/catchpointfilewriter.go @@ -51,33 +51,29 @@ const ( // the writing is complete. It might take multiple steps until the operation is over, and the caller // has the option of throttling the CPU utilization in between the calls. type catchpointFileWriter struct { - ctx context.Context - tx trackerdb.SnapshotScope - filePath string - totalAccounts uint64 - totalKVs uint64 - file *os.File - tar *tar.Writer - compressor io.WriteCloser - chunk catchpointFileChunkV6 - chunkNum uint64 - writtenBytes int64 - biggestChunkLen uint64 - accountsIterator accountsBatchIter - maxResourcesPerChunk int - accountsDone bool - kvRows kvIter -} - -type kvIter interface { - Next() bool - KeyValue() ([]byte, []byte, error) - Close() -} - -type accountsBatchIter interface { - Next(ctx context.Context, accountCount int, resourceCount int) ([]encoded.BalanceRecordV6, uint64, error) - Close() + ctx context.Context + tx trackerdb.SnapshotScope + filePath string + totalAccounts uint64 + totalKVs uint64 + totalOnlineAccounts uint64 + totalOnlineRoundParams uint64 + file *os.File + tar *tar.Writer + compressor io.WriteCloser + chunk catchpointFileChunkV6 + chunkNum uint64 + writtenBytes int64 + biggestChunkLen uint64 + accountsIterator trackerdb.EncodedAccountsBatchIter + maxResourcesPerChunk int + accountsDone bool + kvRows trackerdb.KVsIter + kvDone bool + onlineAccountRows trackerdb.TableIterator[*encoded.OnlineAccountRecordV6] + onlineAccountsDone bool + onlineRoundParamsRows trackerdb.TableIterator[*encoded.OnlineRoundParamsRecordV6] + onlineRoundParamsDone bool } type catchpointFileBalancesChunkV5 struct { @@ -88,13 +84,15 @@ type catchpointFileBalancesChunkV5 struct { type catchpointFileChunkV6 struct { _struct struct{} `codec:",omitempty,omitemptyarray"` - Balances []encoded.BalanceRecordV6 `codec:"bl,allocbound=BalancesPerCatchpointFileChunk"` - numAccounts uint64 - KVs []encoded.KVRecordV6 `codec:"kv,allocbound=BalancesPerCatchpointFileChunk"` + Balances []encoded.BalanceRecordV6 `codec:"bl,allocbound=BalancesPerCatchpointFileChunk"` + numAccounts uint64 + KVs []encoded.KVRecordV6 `codec:"kv,allocbound=BalancesPerCatchpointFileChunk"` + OnlineAccounts []encoded.OnlineAccountRecordV6 `codec:"oa,allocbound=BalancesPerCatchpointFileChunk"` + OnlineRoundParams []encoded.OnlineRoundParamsRecordV6 `codec:"orp,allocbound=BalancesPerCatchpointFileChunk"` } func (chunk catchpointFileChunkV6) empty() bool { - return len(chunk.Balances) == 0 && len(chunk.KVs) == 0 + return len(chunk.Balances) == 0 && len(chunk.KVs) == 0 && len(chunk.OnlineAccounts) == 0 && len(chunk.OnlineRoundParams) == 0 } type catchpointStateProofVerificationContext struct { @@ -122,6 +120,16 @@ func makeCatchpointFileWriter(ctx context.Context, filePath string, tx trackerdb return nil, err } + totalOnlineAccounts, err := aw.TotalOnlineAccountRows(ctx) + if err != nil { + return nil, err + } + + totalOnlineRoundParams, err := aw.TotalOnlineRoundParams(ctx) + if err != nil { + return nil, err + } + err = os.MkdirAll(filepath.Dir(filePath), 0700) if err != nil { return nil, err @@ -137,16 +145,18 @@ func makeCatchpointFileWriter(ctx context.Context, filePath string, tx trackerdb tar := tar.NewWriter(compressor) res := &catchpointFileWriter{ - ctx: ctx, - tx: tx, - filePath: filePath, - totalAccounts: totalAccounts, - totalKVs: totalKVs, - file: file, - compressor: compressor, - tar: tar, - accountsIterator: tx.MakeEncodedAccoutsBatchIter(), - maxResourcesPerChunk: maxResourcesPerChunk, + ctx: ctx, + tx: tx, + filePath: filePath, + totalAccounts: totalAccounts, + totalKVs: totalKVs, + totalOnlineAccounts: totalOnlineAccounts, + totalOnlineRoundParams: totalOnlineRoundParams, + file: file, + compressor: compressor, + tar: tar, + accountsIterator: tx.MakeEncodedAccountsBatchIter(), + maxResourcesPerChunk: maxResourcesPerChunk, } return res, nil } @@ -233,6 +243,14 @@ func (cw *catchpointFileWriter) FileWriteStep(stepCtx context.Context) (more boo cw.kvRows.Close() cw.kvRows = nil } + if cw.onlineAccountRows != nil { + cw.onlineAccountRows.Close() + cw.onlineAccountRows = nil + } + if cw.onlineRoundParamsRows != nil { + cw.onlineRoundParamsRows.Close() + cw.onlineRoundParamsRows = nil + } } }() @@ -323,27 +341,94 @@ func (cw *catchpointFileWriter) readDatabaseStep(ctx context.Context) error { cw.accountsDone = true } - // Create the *Rows iterator JIT - if cw.kvRows == nil { - rows, err := cw.tx.MakeKVsIter(ctx) - if err != nil { - return err + // Create the kvRows iterator JIT + if !cw.kvDone { + if cw.kvRows == nil { + rows, err := cw.tx.MakeKVsIter(ctx) + if err != nil { + return err + } + cw.kvRows = rows + } + + kvrs := make([]encoded.KVRecordV6, 0, BalancesPerCatchpointFileChunk) + for cw.kvRows.Next() { + k, v, err := cw.kvRows.KeyValue() + if err != nil { + return err + } + kvrs = append(kvrs, encoded.KVRecordV6{Key: k, Value: v}) + if len(kvrs) == BalancesPerCatchpointFileChunk { + break + } + } + if len(kvrs) > 0 { + cw.chunk = catchpointFileChunkV6{KVs: kvrs} + return nil } - cw.kvRows = rows + // Do not close kvRows here, or it will start over on the next iteration + cw.kvDone = true } - kvrs := make([]encoded.KVRecordV6, 0, BalancesPerCatchpointFileChunk) - for cw.kvRows.Next() { - k, v, err := cw.kvRows.KeyValue() - if err != nil { - return err + if !cw.onlineAccountsDone { + // Create the OnlineAccounts iterator JIT + if cw.onlineAccountRows == nil { + rows, err := cw.tx.MakeOnlineAccountsIter(ctx) + if err != nil { + return err + } + cw.onlineAccountRows = rows } - kvrs = append(kvrs, encoded.KVRecordV6{Key: k, Value: v}) - if len(kvrs) == BalancesPerCatchpointFileChunk { - break + + onlineAccts := make([]encoded.OnlineAccountRecordV6, 0, BalancesPerCatchpointFileChunk) + for cw.onlineAccountRows.Next() { + oa, err := cw.onlineAccountRows.GetItem() + if err != nil { + return err + } + onlineAccts = append(onlineAccts, *oa) + if len(onlineAccts) == BalancesPerCatchpointFileChunk { + break + } + } + if len(onlineAccts) > 0 { + cw.chunk = catchpointFileChunkV6{OnlineAccounts: onlineAccts} + return nil + } + // Do not close onlineAccountRows here, or it will start over on the next iteration + cw.onlineAccountsDone = true + } + + if !cw.onlineRoundParamsDone { + // Create the OnlineRoundParams iterator JIT + if cw.onlineRoundParamsRows == nil { + rows, err := cw.tx.MakeOnlineRoundParamsIter(ctx) + if err != nil { + return err + } + cw.onlineRoundParamsRows = rows } + + onlineRndParams := make([]encoded.OnlineRoundParamsRecordV6, 0, BalancesPerCatchpointFileChunk) + for cw.onlineRoundParamsRows.Next() { + or, err := cw.onlineRoundParamsRows.GetItem() + if err != nil { + return err + } + onlineRndParams = append(onlineRndParams, *or) + if len(onlineRndParams) == BalancesPerCatchpointFileChunk { + break + } + } + if len(onlineRndParams) > 0 { + cw.chunk = catchpointFileChunkV6{OnlineRoundParams: onlineRndParams} + return nil + } + // Do not close onlineRndParamsRows here, or it will start over on the next iteration + cw.onlineRoundParamsDone = true } - cw.chunk = catchpointFileChunkV6{KVs: kvrs} + + // Finished the last chunk return nil } diff --git a/ledger/catchpointfilewriter_test.go b/ledger/catchpointfilewriter_test.go index 499adeedc8..553942ad0b 100644 --- a/ledger/catchpointfilewriter_test.go +++ b/ledger/catchpointfilewriter_test.go @@ -22,6 +22,7 @@ import ( "compress/gzip" "context" "database/sql" + "encoding/binary" "fmt" "io" "os" @@ -297,8 +298,7 @@ func TestBasicCatchpointWriter(t *testing.T) { } func testWriteCatchpoint(t *testing.T, rdb trackerdb.Store, datapath string, filepath string, maxResourcesPerChunk int) CatchpointFileHeader { - var totalAccounts uint64 - var totalChunks uint64 + var totalAccounts, totalKVs, totalOnlineAccounts, totalOnlineRoundParams, totalChunks uint64 var biggestChunkLen uint64 var accountsRnd basics.Round var totals ledgercore.AccountTotals @@ -333,6 +333,9 @@ func testWriteCatchpoint(t *testing.T, rdb trackerdb.Store, datapath string, fil } } totalAccounts = writer.totalAccounts + totalKVs = writer.totalKVs + totalOnlineAccounts = writer.totalOnlineAccounts + totalOnlineRoundParams = writer.totalOnlineRoundParams totalChunks = writer.chunkNum biggestChunkLen = writer.biggestChunkLen accountsRnd, err = ar.AccountsRound() @@ -347,14 +350,17 @@ func testWriteCatchpoint(t *testing.T, rdb trackerdb.Store, datapath string, fil blockHeaderDigest := crypto.Hash([]byte{1, 2, 3}) catchpointLabel := fmt.Sprintf("%d#%v", blocksRound, blockHeaderDigest) // this is not a correct way to create a label, but it's good enough for this unit test catchpointFileHeader := CatchpointFileHeader{ - Version: CatchpointFileVersionV7, - BalancesRound: accountsRnd, - BlocksRound: blocksRound, - Totals: totals, - TotalAccounts: totalAccounts, - TotalChunks: totalChunks, - Catchpoint: catchpointLabel, - BlockHeaderDigest: blockHeaderDigest, + Version: CatchpointFileVersionV8, + BalancesRound: accountsRnd, + BlocksRound: blocksRound, + Totals: totals, + TotalAccounts: totalAccounts, + TotalKVs: totalKVs, + TotalOnlineAccounts: totalOnlineAccounts, + TotalOnlineRoundParams: totalOnlineRoundParams, + TotalChunks: totalChunks, + Catchpoint: catchpointLabel, + BlockHeaderDigest: blockHeaderDigest, } err = repackCatchpoint( context.Background(), catchpointFileHeader, biggestChunkLen, @@ -611,8 +617,10 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) { require.Equal(t, basics.Round(0), validThrough) } - err = l.reloadLedger() - require.NoError(t, err) + // TODO: uncomment if we want to test re-initializing the ledger fully + // currently this doesn't work, because reloadLedger runs migrations like txtail that require a working block DB + //err = l.reloadLedger() + //require.NoError(t, err) // now manually construct the MT and ensure the reading makeOrderedAccountsIter works as expected: // no errors on read, hashes match @@ -697,24 +705,41 @@ func testNewLedgerFromCatchpoint(t *testing.T, catchpointWriterReadAccess tracke var catchupProgress CatchpointCatchupAccessorProgress catchpointContent := readCatchpointFile(t, filepath) + var balancesRound basics.Round for _, catchpointData := range catchpointContent { + // get BalancesRound from header and use it to set the DB round + if catchpointData.headerName == CatchpointContentFileName { + var fileheader CatchpointFileHeader + err = protocol.Decode(catchpointData.data, &fileheader) + require.NoError(t, err) + balancesRound = fileheader.BalancesRound + } err = accessor.ProcessStagingBalances(context.Background(), catchpointData.headerName, catchpointData.data, &catchupProgress) require.NoError(t, err) } + require.NotZero(t, balancesRound, "no balances round found in test catchpoint file") + + // TODO: uncomment if we want to test re-initializing the ledger fully, by setting the balances round (DB round) + // for use by the trackers and migrations. However the txtail migration requires a working block DB, which most + // of these catchpoint tests don't copy over when saving/restoring. + // + // // Manually set the balances round. In regular catchpoint restore, this is set by StoreBalancesRound + // // when the first block is downloaded. + // err = l.trackerDB().Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { + // crw, err := tx.MakeCatchpointWriter() + // require.NoError(t, err) + + // err = crw.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupBalancesRound, uint64(balancesRound)) + // require.NoError(t, err) + // return nil + // }) + // require.NoError(t, err) err = accessor.BuildMerkleTrie(context.Background(), nil) require.NoError(t, err) - resetAccountDBToV6(t, l) - - err = l.trackerDBs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error { - cw, err := tx.MakeCatchpointWriter() - if err != nil { - return err - } - - return cw.ApplyCatchpointStagingBalances(ctx, 0, 0) - }) + // Initializes DB, runs migrations, runs ApplyCatchpointStagingBalances + err = accessor.(*catchpointCatchupAccessorImpl).finishBalances(context.Background()) require.NoError(t, err) balanceTrieStats := func(db trackerdb.Store) merkletrie.Stats { @@ -790,6 +815,20 @@ func TestFullCatchpointWriter(t *testing.T) { } } +// ensure both committed all pending changes before taking a catchpoint +// another approach is to modify the test and craft round numbers, +// and make the ledger to generate catchpoint itself when it is time +func testCatchpointFlushRound(l *Ledger) { + // Clear the timer to ensure a flush + l.trackers.mu.Lock() + l.trackers.lastFlushTime = time.Time{} + l.trackers.mu.Unlock() + + r, _ := l.LatestCommitted() + l.trackers.committedUpTo(r) + l.trackers.waitAccountsWriting() +} + func TestExactAccountChunk(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() @@ -821,21 +860,8 @@ func TestExactAccountChunk(t *testing.T) { dl.fullBlock(&selfpay) } - // ensure both committed all pending changes before taking a catchpoint - // another approach is to modify the test and craft round numbers, - // and make the ledger to generate catchpoint itself when it is time - flushRound := func(l *Ledger) { - // Clear the timer to ensure a flush - l.trackers.mu.Lock() - l.trackers.lastFlushTime = time.Time{} - l.trackers.mu.Unlock() - - r, _ := l.LatestCommitted() - l.trackers.committedUpTo(r) - l.trackers.waitAccountsWriting() - } - flushRound(dl.generator) - flushRound(dl.validator) + testCatchpointFlushRound(dl.generator) + testCatchpointFlushRound(dl.validator) require.Eventually(t, func() bool { dl.generator.accts.accountsMu.RLock() @@ -854,7 +880,7 @@ func TestExactAccountChunk(t *testing.T) { catchpointFilePath := filepath.Join(tempDir, t.Name()+".catchpoint.tar.gz") cph := testWriteCatchpoint(t, dl.validator.trackerDB(), catchpointDataFilePath, catchpointFilePath, 0) - require.EqualValues(t, cph.TotalChunks, 1) + require.EqualValues(t, cph.TotalChunks, 2) l := testNewLedgerFromCatchpoint(t, dl.generator.trackerDB(), catchpointFilePath) defer l.Close() @@ -906,7 +932,7 @@ func TestCatchpointAfterTxns(t *testing.T) { catchpointFilePath := filepath.Join(tempDir, t.Name()+".catchpoint.tar.gz") cph := testWriteCatchpoint(t, dl.validator.trackerDB(), catchpointDataFilePath, catchpointFilePath, 0) - require.EqualValues(t, 2, cph.TotalChunks) + require.EqualValues(t, 3, cph.TotalChunks) l := testNewLedgerFromCatchpoint(t, dl.validator.trackerDB(), catchpointFilePath) defer l.Close() @@ -922,7 +948,7 @@ func TestCatchpointAfterTxns(t *testing.T) { // Write and read back in, and ensure even the last effect exists. cph = testWriteCatchpoint(t, dl.validator.trackerDB(), catchpointDataFilePath, catchpointFilePath, 0) - require.EqualValues(t, cph.TotalChunks, 2) // Still only 2 chunks, as last was in a recent block + require.EqualValues(t, cph.TotalChunks, 3) // Still only 3 chunks, as last was in a recent block // Drive home the point that `last` is _not_ included in the catchpoint by inspecting balance read from catchpoint. { @@ -938,7 +964,7 @@ func TestCatchpointAfterTxns(t *testing.T) { } cph = testWriteCatchpoint(t, dl.validator.trackerDB(), catchpointDataFilePath, catchpointFilePath, 0) - require.EqualValues(t, cph.TotalChunks, 3) + require.EqualValues(t, cph.TotalChunks, 4) l = testNewLedgerFromCatchpoint(t, dl.validator.trackerDB(), catchpointFilePath) defer l.Close() @@ -959,6 +985,169 @@ func TestCatchpointAfterTxns(t *testing.T) { } } +func TestCatchpointAfterStakeLookupTxns(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + genBalances, addrs, _ := ledgertesting.NewTestGenesis(func(cfg *ledgertesting.GenesisCfg) { + cfg.OnlineCount = 1 + ledgertesting.TurnOffRewards(cfg) + }) + cfg := config.GetDefaultLocal() + dl := NewDoubleLedger(t, genBalances, protocol.ConsensusFuture, cfg, simpleLedgerOnDisk()) + defer dl.Close() + + initialStake := uint64(833333333333333) + expectedStake := initialStake + stakeAppSource := main(` +// ensure total online stake matches arg 0 +txn ApplicationArgs 0 +btoi +online_stake +== +assert +// ensure stake for accounts 1 (the only online account) matches arg 0 +txn Accounts 1 +voter_params_get VoterBalance +pop +txn ApplicationArgs 0 +btoi +== +assert +`) + // uses block 1 and 2 + stakeApp := dl.fundedApp(addrs[1], 1_000_000, stakeAppSource) + + // starting with block 3, make an app call and a pay in each block + callStakeApp := func(assertStake uint64) []*txntest.Txn { + stakebuf := make([]byte, 8) + binary.BigEndian.PutUint64(stakebuf, assertStake) + return []*txntest.Txn{ + // assert stake from 320 rounds ago + txntest.Txn{ + Type: "appl", + Sender: addrs[2], + ApplicationID: stakeApp, + Note: ledgertesting.RandomNote(), + Accounts: []basics.Address{addrs[0]}, + }.Args(string(stakebuf)), + // pay 1 microalgo to the only online account (takes effect in 320 rounds) + { + Type: "pay", + Sender: addrs[1], + Receiver: addrs[0], + Amount: 1, + }} + } + + // adds block 3 + vb := dl.fullBlock(callStakeApp(expectedStake)...) + require.Equal(t, vb.Block().Round(), basics.Round(3)) + require.Empty(t, vb.Block().ExpiredParticipationAccounts) + require.Empty(t, vb.Block().AbsentParticipationAccounts) + + // add blocks until round 322, after which stake will go up by 1 each round + for ; vb.Block().Round() < 322; vb = dl.fullBlock(callStakeApp(expectedStake)...) { + require.Empty(t, vb.Block().ExpiredParticipationAccounts) + require.Empty(t, vb.Block().AbsentParticipationAccounts) + + nextRnd := vb.Block().Round() + 1 + stake, err := dl.generator.OnlineCirculation(nextRnd.SubSaturate(320), nextRnd) + require.NoError(t, err) + require.Equal(t, expectedStake, stake.Raw) + } + require.Equal(t, vb.Block().Round(), basics.Round(322)) + + for vb.Block().Round() <= 1500 { + expectedStake++ // add 1 microalgo to the expected stake for the next block + + // the online_stake opcode in block 323 will look up OnlineCirculation(3, 323). + nextRnd := vb.Block().Round() + 1 + stake, err := dl.generator.OnlineCirculation(nextRnd.SubSaturate(320), nextRnd) + require.NoError(t, err) + require.Equal(t, expectedStake, stake.Raw) + + // build a new block for nextRnd, asserting online stake for nextRnd-320 + vb = dl.fullBlock(callStakeApp(expectedStake)...) + require.Empty(t, vb.Block().ExpiredParticipationAccounts) + require.Empty(t, vb.Block().AbsentParticipationAccounts) + } + + // wait for tracker to flush + testCatchpointFlushRound(dl.generator) + testCatchpointFlushRound(dl.validator) + + // ensure flush and latest round all were OK + genDBRound := dl.generator.LatestTrackerCommitted() + valDBRound := dl.validator.LatestTrackerCommitted() + require.NotZero(t, genDBRound) + require.NotZero(t, valDBRound) + require.Equal(t, genDBRound, valDBRound) + require.Equal(t, 1497, int(genDBRound)) + genLatestRound := dl.generator.Latest() + valLatestRound := dl.validator.Latest() + require.NotZero(t, genLatestRound) + require.NotZero(t, valLatestRound) + require.Equal(t, genLatestRound, valLatestRound) + // latest should be 4 rounds ahead of DB round + require.Equal(t, genDBRound+basics.Round(cfg.MaxAcctLookback), genLatestRound) + + t.Log("DB round generator", genDBRound, "validator", valDBRound) + t.Log("Latest round generator", genLatestRound, "validator", valLatestRound) + + genOAHash, genOARows, err := calculateVerificationHash(context.Background(), dl.generator.trackerDB().MakeOnlineAccountsIter) + require.NoError(t, err) + valOAHash, valOARows, err := calculateVerificationHash(context.Background(), dl.validator.trackerDB().MakeOnlineAccountsIter) + require.NoError(t, err) + require.Equal(t, genOAHash, valOAHash) + require.NotZero(t, genOAHash) + require.Equal(t, genOARows, valOARows) + require.NotZero(t, genOARows) + + genORPHash, genORPRows, err := calculateVerificationHash(context.Background(), dl.generator.trackerDB().MakeOnlineRoundParamsIter) + require.NoError(t, err) + valORPHash, valORPRows, err := calculateVerificationHash(context.Background(), dl.validator.trackerDB().MakeOnlineRoundParamsIter) + require.NoError(t, err) + require.Equal(t, genORPHash, valORPHash) + require.NotZero(t, genORPHash) + require.Equal(t, genORPRows, valORPRows) + require.NotZero(t, genORPRows) + + tempDir := t.TempDir() + catchpointDataFilePath := filepath.Join(tempDir, t.Name()+".data") + catchpointFilePath := filepath.Join(tempDir, t.Name()+".catchpoint.tar.gz") + + cph := testWriteCatchpoint(t, dl.generator.trackerDB(), catchpointDataFilePath, catchpointFilePath, 0) + require.EqualValues(t, 7, cph.TotalChunks) + + l := testNewLedgerFromCatchpoint(t, dl.generator.trackerDB(), catchpointFilePath) + defer l.Close() + + catchpointOAHash, catchpointOARows, err := calculateVerificationHash(context.Background(), l.trackerDBs.MakeOnlineAccountsIter) + require.NoError(t, err) + require.Equal(t, genOAHash, catchpointOAHash) + t.Log("catchpoint onlineaccounts hash", catchpointOAHash, "matches") + require.Equal(t, genOARows, catchpointOARows) + + catchpointORPHash, catchpointORPRows, err := calculateVerificationHash(context.Background(), l.trackerDBs.MakeOnlineRoundParamsIter) + require.NoError(t, err) + require.Equal(t, genORPHash, catchpointORPHash) + t.Log("catchpoint onlineroundparams hash", catchpointORPHash, "matches") + require.Equal(t, genORPRows, catchpointORPRows) + + oar, err := l.trackerDBs.MakeOnlineAccountsOptimizedReader() + require.NoError(t, err) + + for i := genDBRound; i >= (genDBRound - 1000); i-- { + oad, err := oar.LookupOnline(addrs[0], basics.Round(i)) + require.NoError(t, err) + // block 3 started paying 1 microalgo to addrs[0] per round + expected := initialStake + uint64(i) - 2 + require.Equal(t, expected, oad.AccountData.MicroAlgos.Raw) + } + +} + // Exercises a sequence of box modifications that caused a bug in // catchpoint writes. // @@ -1028,7 +1217,7 @@ func TestCatchpointAfterBoxTxns(t *testing.T) { catchpointFilePath := filepath.Join(tempDir, t.Name()+".catchpoint.tar.gz") cph := testWriteCatchpoint(t, dl.generator.trackerDB(), catchpointDataFilePath, catchpointFilePath, 0) - require.EqualValues(t, 2, cph.TotalChunks) + require.EqualValues(t, 3, cph.TotalChunks) l := testNewLedgerFromCatchpoint(t, dl.generator.trackerDB(), catchpointFilePath) defer l.Close() diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go index df7772de53..8adbdc8dfb 100644 --- a/ledger/catchpointtracker.go +++ b/ledger/catchpointtracker.go @@ -64,6 +64,10 @@ const ( // CatchpointFileVersionV7 is the catchpoint file version that is matching database schema V10. // This version introduced state proof verification data and versioning for CatchpointLabel. CatchpointFileVersionV7 = uint64(0202) + // CatchpointFileVersionV8 is the catchpoint file version that includes V6 and V7 data, as well + // as historical onlineaccounts and onlineroundparamstail table data (added in DB version V7, + // but until this version initialized with current round data, not 320 rounds of historical info). + CatchpointFileVersionV8 = uint64(0203) // CatchpointContentFileName is a name of a file with catchpoint header info inside tar archive CatchpointContentFileName = "content.msgpack" @@ -212,13 +216,13 @@ func (ct *catchpointTracker) getSPVerificationData() (encodedData []byte, spVeri func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basics.Round, blockProto protocol.ConsensusVersion, updatingBalancesDuration time.Duration) error { ct.log.Infof("finishing catchpoint's first stage dbRound: %d", dbRound) - var totalKVs uint64 - var totalAccounts uint64 + var totalAccounts, totalKVs, totalOnlineAccounts, totalOnlineRoundParams uint64 var totalChunks uint64 var biggestChunkLen uint64 var spVerificationHash crypto.Digest var spVerificationEncodedData []byte var catchpointGenerationStats telemetryspec.CatchpointGenerationEventDetails + var onlineAccountsHash, onlineRoundParamsHash crypto.Digest params := config.Consensus[blockProto] if params.EnableCatchpointsWithSPContexts { @@ -230,6 +234,26 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic return err } } + if params.EnableCatchpointsWithOnlineAccounts { + // Generate hashes of the onlineaccounts and onlineroundparams tables. + err := ct.dbs.Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) error { + var dbErr error + onlineAccountsHash, _, dbErr = calculateVerificationHash(ctx, tx.MakeOnlineAccountsIter) + if dbErr != nil { + return dbErr + + } + + onlineRoundParamsHash, _, dbErr = calculateVerificationHash(ctx, tx.MakeOnlineRoundParamsIter) + if dbErr != nil { + return dbErr + } + return nil + }) + if err != nil { + return err + } + } if ct.enableGeneratingCatchpointFiles { // Generate the catchpoint file. This is done inline so that it will @@ -239,7 +263,7 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic var err error catchpointGenerationStats.BalancesWriteTime = uint64(updatingBalancesDuration.Nanoseconds()) - totalKVs, totalAccounts, totalChunks, biggestChunkLen, err = ct.generateCatchpointData( + totalAccounts, totalKVs, totalOnlineAccounts, totalOnlineRoundParams, totalChunks, biggestChunkLen, err = ct.generateCatchpointData( ctx, dbRound, &catchpointGenerationStats, spVerificationEncodedData) ct.catchpointDataWriting.Store(0) if err != nil { @@ -253,7 +277,9 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic return err } - err = ct.recordFirstStageInfo(ctx, tx, &catchpointGenerationStats, dbRound, totalKVs, totalAccounts, totalChunks, biggestChunkLen, spVerificationHash) + err = ct.recordFirstStageInfo(ctx, tx, &catchpointGenerationStats, dbRound, + totalAccounts, totalKVs, totalOnlineAccounts, totalOnlineRoundParams, totalChunks, biggestChunkLen, + spVerificationHash, onlineAccountsHash, onlineRoundParamsHash) if err != nil { return err } @@ -764,8 +790,14 @@ func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound var labelMaker ledgercore.CatchpointLabelMaker var version uint64 params := config.Consensus[blockProto] - if params.EnableCatchpointsWithSPContexts { - labelMaker = ledgercore.MakeCatchpointLabelMakerCurrent(round, &blockHash, &dataInfo.TrieBalancesHash, dataInfo.Totals, &dataInfo.StateProofVerificationHash) + if params.EnableCatchpointsWithOnlineAccounts { + if !params.EnableCatchpointsWithSPContexts { + return fmt.Errorf("invalid params for catchpoint file version v8: SP contexts not enabled") + } + labelMaker = ledgercore.MakeCatchpointLabelMakerCurrent(round, &blockHash, &dataInfo.TrieBalancesHash, dataInfo.Totals, &dataInfo.StateProofVerificationHash, &dataInfo.OnlineAccountsHash, &dataInfo.OnlineRoundParamsHash) + version = CatchpointFileVersionV8 + } else if params.EnableCatchpointsWithSPContexts { + labelMaker = ledgercore.MakeCatchpointLabelMakerV7(round, &blockHash, &dataInfo.TrieBalancesHash, dataInfo.Totals, &dataInfo.StateProofVerificationHash) version = CatchpointFileVersionV7 } else { labelMaker = ledgercore.MakeCatchpointLabelMakerV6(round, &blockHash, &dataInfo.TrieBalancesHash, dataInfo.Totals) @@ -806,15 +838,17 @@ func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound // Make a catchpoint file. header := CatchpointFileHeader{ - Version: version, - BalancesRound: accountsRound, - BlocksRound: round, - Totals: dataInfo.Totals, - TotalAccounts: dataInfo.TotalAccounts, - TotalKVs: dataInfo.TotalKVs, - TotalChunks: dataInfo.TotalChunks, - Catchpoint: label, - BlockHeaderDigest: blockHash, + Version: version, + BalancesRound: accountsRound, + BlocksRound: round, + Totals: dataInfo.Totals, + TotalAccounts: dataInfo.TotalAccounts, + TotalKVs: dataInfo.TotalKVs, + TotalOnlineAccounts: dataInfo.TotalOnlineAccounts, + TotalOnlineRoundParams: dataInfo.TotalOnlineRoundParams, + TotalChunks: dataInfo.TotalChunks, + Catchpoint: label, + BlockHeaderDigest: blockHash, } relCatchpointFilePath := filepath.Join(trackerdb.CatchpointDirName, trackerdb.MakeCatchpointFilePath(round)) @@ -855,6 +889,8 @@ func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound With("writingDuration", uint64(time.Since(startTime).Nanoseconds())). With("accountsCount", dataInfo.TotalAccounts). With("kvsCount", dataInfo.TotalKVs). + With("onlineAccountsCount", dataInfo.TotalOnlineAccounts). + With("onlineRoundParamsCount", dataInfo.TotalOnlineRoundParams). With("fileSize", fileInfo.Size()). With("filepath", relCatchpointFilePath). With("catchpointLabel", label). @@ -1173,7 +1209,7 @@ func (ct *catchpointTracker) isWritingCatchpointDataFile() bool { // - Balance and KV chunk (named balances.x.msgpack). // ... // - Balance and KV chunk (named balances.x.msgpack). -func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, accountsRound basics.Round, catchpointGenerationStats *telemetryspec.CatchpointGenerationEventDetails, encodedSPData []byte) (totalKVs, totalAccounts, totalChunks, biggestChunkLen uint64, err error) { +func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, accountsRound basics.Round, catchpointGenerationStats *telemetryspec.CatchpointGenerationEventDetails, encodedSPData []byte) (totalAccounts, totalKVs, totalOnlineAccounts, totalOnlineRoundParams, totalChunks, biggestChunkLen uint64, err error) { ct.log.Debugf("catchpointTracker.generateCatchpointData() writing catchpoint accounts for round %d", accountsRound) startTime := time.Now() @@ -1261,19 +1297,25 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account ledgerGeneratecatchpointMicros.AddMicrosecondsSince(start, nil) if err != nil { ct.log.Warnf("catchpointTracker.generateCatchpointData() %v", err) - return 0, 0, 0, 0, err + return 0, 0, 0, 0, 0, 0, err } catchpointGenerationStats.FileSize = uint64(catchpointWriter.writtenBytes) catchpointGenerationStats.WritingDuration = uint64(time.Since(startTime).Nanoseconds()) catchpointGenerationStats.AccountsCount = catchpointWriter.totalAccounts catchpointGenerationStats.KVsCount = catchpointWriter.totalKVs + catchpointGenerationStats.OnlineAccountsCount = catchpointWriter.totalOnlineAccounts + catchpointGenerationStats.OnlineRoundParamsCount = catchpointWriter.totalOnlineRoundParams catchpointGenerationStats.AccountsRound = uint64(accountsRound) - return catchpointWriter.totalKVs, catchpointWriter.totalAccounts, catchpointWriter.chunkNum, catchpointWriter.biggestChunkLen, nil + return catchpointWriter.totalAccounts, catchpointWriter.totalKVs, catchpointWriter.totalOnlineAccounts, catchpointWriter.totalOnlineRoundParams, catchpointWriter.chunkNum, catchpointWriter.biggestChunkLen, nil } -func (ct *catchpointTracker) recordFirstStageInfo(ctx context.Context, tx trackerdb.TransactionScope, catchpointGenerationStats *telemetryspec.CatchpointGenerationEventDetails, accountsRound basics.Round, totalKVs uint64, totalAccounts uint64, totalChunks uint64, biggestChunkLen uint64, stateProofVerificationHash crypto.Digest) error { +func (ct *catchpointTracker) recordFirstStageInfo(ctx context.Context, tx trackerdb.TransactionScope, + catchpointGenerationStats *telemetryspec.CatchpointGenerationEventDetails, + accountsRound basics.Round, + totalAccounts, totalKVs, totalOnlineAccounts, totalOnlineRoundParams, totalChunks, biggestChunkLen uint64, + stateProofVerificationHash, onlineAccountsVerificationHash, onlineRoundParamsVerificationHash crypto.Digest) error { ar, err := tx.MakeAccountsReader() if err != nil { return err @@ -1316,10 +1358,14 @@ func (ct *catchpointTracker) recordFirstStageInfo(ctx context.Context, tx tracke Totals: accountTotals, TotalAccounts: totalAccounts, TotalKVs: totalKVs, + TotalOnlineAccounts: totalOnlineAccounts, + TotalOnlineRoundParams: totalOnlineRoundParams, TotalChunks: totalChunks, BiggestChunkLen: biggestChunkLen, TrieBalancesHash: trieBalancesHash, StateProofVerificationHash: stateProofVerificationHash, + OnlineAccountsHash: onlineAccountsVerificationHash, + OnlineRoundParamsHash: onlineRoundParamsVerificationHash, } err = cw.InsertOrReplaceCatchpointFirstStageInfo(ctx, accountsRound, &info) diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go index da2408946b..ff84cabcb3 100644 --- a/ledger/catchpointtracker_test.go +++ b/ledger/catchpointtracker_test.go @@ -363,7 +363,7 @@ func createCatchpoint(t *testing.T, ct *catchpointTracker, accountsRound basics. require.NoError(t, err) var catchpointGenerationStats telemetryspec.CatchpointGenerationEventDetails - _, _, _, biggestChunkLen, err := ct.generateCatchpointData( + _, _, _, _, _, biggestChunkLen, err := ct.generateCatchpointData( context.Background(), accountsRound, &catchpointGenerationStats, spVerificationEncodedData) require.NoError(t, err) @@ -1882,10 +1882,6 @@ func TestHashContract(t *testing.T) { func TestCatchpointFastUpdates(t *testing.T) { partitiontest.PartitionTest(t) - if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" { - t.Skip("This test is too slow on ARM and causes CI builds to time out") - } - proto := config.Consensus[protocol.ConsensusFuture] accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)} @@ -1925,6 +1921,7 @@ func TestCatchpointFastUpdates(t *testing.T) { wg := sync.WaitGroup{} + lastRound := basics.Round(0) for i := basics.Round(initialBlocksCount); i < basics.Round(proto.CatchpointLookback+15); i++ { rewardLevelDelta := crypto.RandUint64() % 5 rewardLevel += rewardLevelDelta @@ -1959,10 +1956,21 @@ func TestCatchpointFastUpdates(t *testing.T) { defer wg.Done() ml.trackers.committedUpTo(round) }(i) + lastRound = i } wg.Wait() ml.trackers.waitAccountsWriting() + for ml.trackers.getDbRound() <= basics.Round(proto.CatchpointLookback) { + // db round stuck <= 320? likely committedUpTo dropped some commit tasks, due to deferredCommits channel full + // so give it another try + ml.trackers.committedUpTo(lastRound) + require.Eventually(t, func() bool { + //ml.trackers.waitAccountsWriting() + return ml.trackers.getDbRound() > basics.Round(proto.CatchpointLookback) + }, 5*time.Second, 100*time.Millisecond) + } + require.NotEmpty(t, ct.GetLastCatchpointLabel()) } diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go index 315fa6b003..de10baa0e1 100644 --- a/ledger/catchupaccessor.go +++ b/ledger/catchupaccessor.go @@ -69,7 +69,7 @@ type CatchpointCatchupAccessor interface { GetCatchupBlockRound(ctx context.Context) (round basics.Round, err error) // GetVerifyData returns the balances hash, spver hash and totals used by VerifyCatchpoint - GetVerifyData(ctx context.Context) (balancesHash crypto.Digest, spverHash crypto.Digest, totals ledgercore.AccountTotals, err error) + GetVerifyData(ctx context.Context) (balancesHash, spverHash, onlineAccountsHash, onlineRoundParamsHash crypto.Digest, totals ledgercore.AccountTotals, err error) // VerifyCatchpoint verifies that the catchpoint is valid by reconstructing the label. VerifyCatchpoint(ctx context.Context, blk *bookkeeping.Block) (err error) @@ -103,6 +103,8 @@ type stagingWriter interface { writeCreatables(context.Context, []trackerdb.NormalizedAccountBalance) error writeHashes(context.Context, []trackerdb.NormalizedAccountBalance) error writeKVs(context.Context, []encoded.KVRecordV6) error + writeOnlineAccounts(context.Context, []encoded.OnlineAccountRecordV6) error + writeOnlineRoundParams(context.Context, []encoded.OnlineRoundParamsRecordV6) error isShared() bool } @@ -165,6 +167,26 @@ func (w *stagingWriterImpl) writeKVs(ctx context.Context, kvrs []encoded.KVRecor }) } +func (w *stagingWriterImpl) writeOnlineAccounts(ctx context.Context, accts []encoded.OnlineAccountRecordV6) error { + return w.wdb.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { + crw, err := tx.MakeCatchpointReaderWriter() + if err != nil { + return err + } + return crw.WriteCatchpointStagingOnlineAccounts(ctx, accts) + }) +} + +func (w *stagingWriterImpl) writeOnlineRoundParams(ctx context.Context, params []encoded.OnlineRoundParamsRecordV6) error { + return w.wdb.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { + crw, err := tx.MakeCatchpointReaderWriter() + if err != nil { + return err + } + return crw.WriteCatchpointStagingOnlineRoundParams(ctx, params) + }) +} + func (w *stagingWriterImpl) writeCreatables(ctx context.Context, balances []trackerdb.NormalizedAccountBalance) error { return w.wdb.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error { crw, err := tx.MakeCatchpointReaderWriter() @@ -346,24 +368,30 @@ func (c *catchpointCatchupAccessorImpl) ResetStagingBalances(ctx context.Context // CatchpointCatchupAccessorProgress is used by the caller of ProcessStagingBalances to obtain progress information type CatchpointCatchupAccessorProgress struct { - TotalAccounts uint64 - ProcessedAccounts uint64 - ProcessedBytes uint64 - TotalKVs uint64 - ProcessedKVs uint64 - TotalChunks uint64 - SeenHeader bool - Version uint64 - TotalAccountHashes uint64 + TotalAccounts uint64 + ProcessedAccounts uint64 + ProcessedBytes uint64 + TotalKVs uint64 + ProcessedKVs uint64 + TotalOnlineAccounts uint64 + ProcessedOnlineAccounts uint64 + TotalOnlineRoundParams uint64 + ProcessedOnlineRoundParams uint64 + TotalChunks uint64 + SeenHeader bool + Version uint64 + TotalAccountHashes uint64 // Having the cachedTrie here would help to accelerate the catchup process since the trie maintain an internal cache of nodes. // While rebuilding the trie, we don't want to force and reload (some) of these nodes into the cache for each catchpoint file chunk. cachedTrie *merkletrie.Trie - BalancesWriteDuration time.Duration - CreatablesWriteDuration time.Duration - HashesWriteDuration time.Duration - KVWriteDuration time.Duration + BalancesWriteDuration time.Duration + CreatablesWriteDuration time.Duration + HashesWriteDuration time.Duration + KVWriteDuration time.Duration + OnlineAccountsWriteDuration time.Duration + OnlineRoundParamsWriteDuration time.Duration } // ProcessStagingBalances deserialize the given bytes as a temporary staging balances @@ -418,6 +446,8 @@ func (c *catchpointCatchupAccessorImpl) processStagingContent(ctx context.Contex case CatchpointFileVersionV5: case CatchpointFileVersionV6: case CatchpointFileVersionV7: + case CatchpointFileVersionV8: + default: return fmt.Errorf("CatchpointCatchupAccessorImpl::processStagingContent: unable to process catchpoint - version %d is not supported", fileHeader.Version) } @@ -459,6 +489,8 @@ func (c *catchpointCatchupAccessorImpl) processStagingContent(ctx context.Contex progress.SeenHeader = true progress.TotalAccounts = fileHeader.TotalAccounts progress.TotalKVs = fileHeader.TotalKVs + progress.TotalOnlineAccounts = fileHeader.TotalOnlineAccounts + progress.TotalOnlineRoundParams = fileHeader.TotalOnlineRoundParams progress.TotalChunks = fileHeader.TotalChunks progress.Version = fileHeader.Version @@ -480,6 +512,8 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte var normalizedAccountBalances []trackerdb.NormalizedAccountBalance var expectingMoreEntries []bool var chunkKVs []encoded.KVRecordV6 + var chunkOnlineAccounts []encoded.OnlineAccountRecordV6 + var chunkOnlineRoundParams []encoded.OnlineRoundParamsRecordV6 switch progress.Version { default: @@ -501,16 +535,21 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte expectingMoreEntries = make([]bool, len(balances.Balances)) case CatchpointFileVersionV6: + // V6 split accounts from resources; later, KVs were added to the v6 chunk format fallthrough case CatchpointFileVersionV7: + // V7 added state proof verification data + hash, but left v6 chunk format unchanged + fallthrough + case CatchpointFileVersionV8: + // V8 added online accounts and online round params data + hashes, and added them to the v6 chunk format var chunk catchpointFileChunkV6 err = protocol.Decode(bytes, &chunk) if err != nil { return err } - if len(chunk.Balances) == 0 && len(chunk.KVs) == 0 { - return fmt.Errorf("processStagingBalances received a chunk with no accounts or KVs") + if chunk.empty() { + return fmt.Errorf("processStagingBalances received an empty chunk") } normalizedAccountBalances, err = prepareNormalizedBalancesV6(chunk.Balances, c.ledger.GenesisProto()) @@ -519,6 +558,8 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte expectingMoreEntries[i] = balance.ExpectingMoreEntries } chunkKVs = chunk.KVs + chunkOnlineAccounts = chunk.OnlineAccounts + chunkOnlineRoundParams = chunk.OnlineRoundParams } if err != nil { @@ -594,14 +635,8 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte wg := sync.WaitGroup{} - var errBalances error - var errCreatables error - var errHashes error - var errKVs error - var durBalances time.Duration - var durCreatables time.Duration - var durHashes time.Duration - var durKVs time.Duration + var errBalances, errCreatables, errHashes, errKVs, errOnlineAccounts, errOnlineRoundParams error + var durBalances, durCreatables, durHashes, durKVs, durOnlineAccounts, durOnlineRoundParams time.Duration // start the balances writer wg.Add(1) @@ -666,6 +701,26 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte durKVs = time.Since(writeKVsStart) }() + // start the online accounts writer + wg.Add(1) + go func() { + defer wg.Done() + + writeOnlineAccountsStart := time.Now() + errOnlineAccounts = c.stagingWriter.writeOnlineAccounts(ctx, chunkOnlineAccounts) + durOnlineAccounts = time.Since(writeOnlineAccountsStart) + }() + + // start the rounds params writer + wg.Add(1) + go func() { + defer wg.Done() + + writeOnlineRoundParamsStart := time.Now() + errOnlineRoundParams = c.stagingWriter.writeOnlineRoundParams(ctx, chunkOnlineRoundParams) + durOnlineRoundParams = time.Since(writeOnlineRoundParamsStart) + }() + wg.Wait() if errBalances != nil { @@ -680,15 +735,25 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte if errKVs != nil { return errKVs } + if errOnlineAccounts != nil { + return errOnlineAccounts + } + if errOnlineRoundParams != nil { + return errOnlineRoundParams + } progress.BalancesWriteDuration += durBalances progress.CreatablesWriteDuration += durCreatables progress.HashesWriteDuration += durHashes progress.KVWriteDuration += durKVs + progress.OnlineAccountsWriteDuration += durOnlineAccounts + progress.OnlineRoundParamsWriteDuration += durOnlineRoundParams ledgerProcessstagingbalancesMicros.AddMicrosecondsSince(start, nil) progress.ProcessedBytes += uint64(len(bytes)) progress.ProcessedKVs += uint64(len(chunkKVs)) + progress.ProcessedOnlineAccounts += uint64(len(chunkOnlineAccounts)) + progress.ProcessedOnlineRoundParams += uint64(len(chunkOnlineRoundParams)) for _, acctBal := range normalizedAccountBalances { progress.TotalAccountHashes += uint64(len(acctBal.AccountHashes)) if !acctBal.PartialBalance { @@ -721,7 +786,7 @@ func countHashes(hashes [][]byte) (accountCount, kvCount uint64) { accountCount++ } } - return accountCount, kvCount + return } // BuildMerkleTrie would process the catchpointpendinghashes and insert all the items in it into the merkle trie @@ -931,7 +996,7 @@ func (c *catchpointCatchupAccessorImpl) GetCatchupBlockRound(ctx context.Context return basics.Round(iRound), nil } -func (c *catchpointCatchupAccessorImpl) GetVerifyData(ctx context.Context) (balancesHash crypto.Digest, spverHash crypto.Digest, totals ledgercore.AccountTotals, err error) { +func (c *catchpointCatchupAccessorImpl) GetVerifyData(ctx context.Context) (balancesHash, spverHash, onlineAccountsHash, onlineRoundParamsHash crypto.Digest, totals ledgercore.AccountTotals, err error) { var rawStateProofVerificationContext []ledgercore.StateProofVerificationContext err = c.ledger.trackerDB().Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { @@ -966,16 +1031,61 @@ func (c *catchpointCatchupAccessorImpl) GetVerifyData(ctx context.Context) (bala return fmt.Errorf("unable to get state proof verification data: %v", err) } + onlineAccountsHash, _, err = calculateVerificationHash(ctx, tx.MakeOnlineAccountsIter) + if err != nil { + return fmt.Errorf("unable to get online accounts verification data: %v", err) + } + + onlineRoundParamsHash, _, err = calculateVerificationHash(ctx, tx.MakeOnlineRoundParamsIter) + if err != nil { + return fmt.Errorf("unable to get online round params verification data: %v", err) + } + return }) if err != nil { - return crypto.Digest{}, crypto.Digest{}, ledgercore.AccountTotals{}, err + return crypto.Digest{}, crypto.Digest{}, crypto.Digest{}, crypto.Digest{}, ledgercore.AccountTotals{}, err } wrappedContext := catchpointStateProofVerificationContext{Data: rawStateProofVerificationContext} spverHash = crypto.HashObj(wrappedContext) - return balancesHash, spverHash, totals, err + return balancesHash, spverHash, onlineAccountsHash, onlineRoundParamsHash, totals, nil +} + +// calculateVerificationHash iterates over a TableIterator, hashes each item, and returns a hash of +// all the concatenated item hashes. It is used to verify onlineaccounts and onlineroundparams tables, +// both at restore time (in catchpointCatchupAccessorImpl) and snapshot time (in catchpointTracker). +func calculateVerificationHash[T crypto.Hashable]( + ctx context.Context, + iterFactory func(context.Context) (trackerdb.TableIterator[T], error), +) (crypto.Digest, uint64, error) { + + rows, err := iterFactory(ctx) + if err != nil { + return crypto.Digest{}, 0, err + } + defer rows.Close() + hasher := crypto.HashFactory{HashType: crypto.Sha512_256}.NewHash() + cnt := uint64(0) + for rows.Next() { + item, err := rows.GetItem() + if err != nil { + return crypto.Digest{}, 0, err + } + + h := crypto.HashObj(item) + _, err = hasher.Write(h[:]) + if err != nil { + return crypto.Digest{}, 0, err + } + cnt++ + } + ret := hasher.Sum(nil) + if len(ret) != crypto.DigestSize { + return crypto.Digest{}, 0, fmt.Errorf("unexpected hash size: %d", len(ret)) + } + return crypto.Digest(ret), cnt, nil } // VerifyCatchpoint verifies that the catchpoint is valid by reconstructing the label. @@ -1003,7 +1113,7 @@ func (c *catchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, bl start := time.Now() ledgerVerifycatchpointCount.Inc(nil) - balancesHash, spVerificationHash, totals, err := c.GetVerifyData(ctx) + balancesHash, spVerificationHash, onlineAccountsHash, onlineRoundParamsHash, totals, err := c.GetVerifyData(ctx) ledgerVerifycatchpointMicros.AddMicrosecondsSince(start, nil) if err != nil { return err @@ -1016,8 +1126,12 @@ func (c *catchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, bl blockDigest := blk.Digest() if version <= CatchpointFileVersionV6 { catchpointLabelMaker = ledgercore.MakeCatchpointLabelMakerV6(blockRound, &blockDigest, &balancesHash, totals) + } else if version == CatchpointFileVersionV7 { + catchpointLabelMaker = ledgercore.MakeCatchpointLabelMakerV7(blockRound, &blockDigest, &balancesHash, totals, &spVerificationHash) + } else if version == CatchpointFileVersionV8 { + catchpointLabelMaker = ledgercore.MakeCatchpointLabelMakerCurrent(blockRound, &blockDigest, &balancesHash, totals, &spVerificationHash, &onlineAccountsHash, &onlineRoundParamsHash) } else { - catchpointLabelMaker = ledgercore.MakeCatchpointLabelMakerCurrent(blockRound, &blockDigest, &balancesHash, totals, &spVerificationHash) + return fmt.Errorf("unable to verify catchpoint - version %d not supported", version) } generatedLabel := ledgercore.MakeLabel(catchpointLabelMaker) @@ -1155,7 +1269,7 @@ func (c *catchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err return err } - var balancesRound, hashRound uint64 + var balancesRound, hashRound, catchpointFileVersion uint64 var totals ledgercore.AccountTotals balancesRound, err = crw.ReadCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupBalancesRound) @@ -1168,6 +1282,11 @@ func (c *catchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err return err } + catchpointFileVersion, err = c.catchpointStore.ReadCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupVersion) + if err != nil { + return fmt.Errorf("unable to retrieve catchpoint version: %v", err) + } + totals, err = ar.AccountsTotals(ctx, true) if err != nil { return err @@ -1190,20 +1309,20 @@ func (c *catchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err if err != nil { return err } - { - tp := trackerdb.Params{ - InitAccounts: c.ledger.GenesisAccounts(), - InitProto: c.ledger.GenesisProtoVersion(), - GenesisHash: c.ledger.GenesisHash(), - FromCatchpoint: true, - CatchpointEnabled: c.ledger.catchpoint.catchpointEnabled(), - DbPathPrefix: c.ledger.catchpoint.dbDirectory, - BlockDb: c.ledger.blockDBs, - } - _, err = tx.RunMigrations(ctx, tp, c.ledger.log, 6 /*target database version*/) - if err != nil { - return err - } + + tp := trackerdb.Params{ + InitAccounts: c.ledger.GenesisAccounts(), + InitProto: c.ledger.GenesisProtoVersion(), + GenesisHash: c.ledger.GenesisHash(), + FromCatchpoint: true, + CatchpointEnabled: c.ledger.catchpoint.catchpointEnabled(), + DbPathPrefix: c.ledger.catchpoint.dbDirectory, + BlockDb: c.ledger.blockDBs, + } + // Upgrade to v6 + _, err = tx.RunMigrations(ctx, tp, c.ledger.log, 6 /*target database version*/) + if err != nil { + return err } err = crw.ApplyCatchpointStagingBalances(ctx, basics.Round(balancesRound), basics.Round(hashRound)) @@ -1211,6 +1330,20 @@ func (c *catchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err return err } + if catchpointFileVersion == CatchpointFileVersionV8 { // This catchpoint contains onlineaccounts and onlineroundparamstail tables. + // Upgrade to v7 (which adds the onlineaccounts & onlineroundparamstail tables, among others) + _, err = tx.RunMigrations(ctx, tp, c.ledger.log, 7) + if err != nil { + return err + } + + // Now that we have upgraded to v7, replace the onlineaccounts and onlineroundparamstail with the staged catchpoint tables. + err = crw.ApplyCatchpointStagingTablesV7(ctx) + if err != nil { + return err + } + } + err = aw.AccountsPutTotals(totals, false) if err != nil { return err diff --git a/ledger/catchupaccessor_test.go b/ledger/catchupaccessor_test.go index 37f27c6794..50ae986faa 100644 --- a/ledger/catchupaccessor_test.go +++ b/ledger/catchupaccessor_test.go @@ -541,6 +541,14 @@ func (w *testStagingWriter) writeKVs(ctx context.Context, kvrs []encoded.KVRecor return nil } +func (w *testStagingWriter) writeOnlineAccounts(ctx context.Context, accounts []encoded.OnlineAccountRecordV6) error { + return nil +} + +func (w *testStagingWriter) writeOnlineRoundParams(ctx context.Context, params []encoded.OnlineRoundParamsRecordV6) error { + return nil +} + func (w *testStagingWriter) writeHashes(ctx context.Context, balances []trackerdb.NormalizedAccountBalance) error { for _, bal := range balances { for _, hash := range bal.AccountHashes { diff --git a/ledger/encoded/msgp_gen.go b/ledger/encoded/msgp_gen.go index cc2422ded0..58de0bc8a1 100644 --- a/ledger/encoded/msgp_gen.go +++ b/ledger/encoded/msgp_gen.go @@ -41,6 +41,26 @@ import ( // |-----> (*) MsgIsZero // |-----> KVRecordV6MaxSize() // +// OnlineAccountRecordV6 +// |-----> (*) MarshalMsg +// |-----> (*) CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) UnmarshalMsgWithState +// |-----> (*) CanUnmarshalMsg +// |-----> (*) Msgsize +// |-----> (*) MsgIsZero +// |-----> OnlineAccountRecordV6MaxSize() +// +// OnlineRoundParamsRecordV6 +// |-----> (*) MarshalMsg +// |-----> (*) CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) UnmarshalMsgWithState +// |-----> (*) CanUnmarshalMsg +// |-----> (*) Msgsize +// |-----> (*) MsgIsZero +// |-----> OnlineRoundParamsRecordV6MaxSize() +// // MarshalMsg implements msgp.Marshaler func (z *BalanceRecordV5) MarshalMsg(b []byte) (o []byte) { @@ -645,3 +665,360 @@ func KVRecordV6MaxSize() (s int) { s = 1 + 2 + msgp.BytesPrefixSize + KVRecordV6MaxKeyLength + 2 + msgp.BytesPrefixSize + KVRecordV6MaxValueLength return } + +// MarshalMsg implements msgp.Marshaler +func (z *OnlineAccountRecordV6) MarshalMsg(b []byte) (o []byte) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0001Len := uint32(5) + var zb0001Mask uint8 /* 6 bits */ + if (*z).Address.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x2 + } + if (*z).Data.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x4 + } + if (*z).NormalizedOnlineBalance == 0 { + zb0001Len-- + zb0001Mask |= 0x8 + } + if (*z).UpdateRound.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x10 + } + if (*z).VoteLastValid.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x20 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len != 0 { + if (zb0001Mask & 0x2) == 0 { // if not empty + // string "addr" + o = append(o, 0xa4, 0x61, 0x64, 0x64, 0x72) + o = (*z).Address.MarshalMsg(o) + } + if (zb0001Mask & 0x4) == 0 { // if not empty + // string "data" + o = append(o, 0xa4, 0x64, 0x61, 0x74, 0x61) + o = (*z).Data.MarshalMsg(o) + } + if (zb0001Mask & 0x8) == 0 { // if not empty + // string "nob" + o = append(o, 0xa3, 0x6e, 0x6f, 0x62) + o = msgp.AppendUint64(o, (*z).NormalizedOnlineBalance) + } + if (zb0001Mask & 0x10) == 0 { // if not empty + // string "upd" + o = append(o, 0xa3, 0x75, 0x70, 0x64) + o = (*z).UpdateRound.MarshalMsg(o) + } + if (zb0001Mask & 0x20) == 0 { // if not empty + // string "vlv" + o = append(o, 0xa3, 0x76, 0x6c, 0x76) + o = (*z).VoteLastValid.MarshalMsg(o) + } + } + return +} + +func (_ *OnlineAccountRecordV6) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(*OnlineAccountRecordV6) + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *OnlineAccountRecordV6) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o []byte, err error) { + if st.AllowableDepth == 0 { + err = msgp.ErrMaxDepthExceeded{} + return + } + st.AllowableDepth-- + var field []byte + _ = field + var zb0001 int + var zb0002 bool + zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).Address.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Address") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).UpdateRound.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "UpdateRound") + return + } + } + if zb0001 > 0 { + zb0001-- + (*z).NormalizedOnlineBalance, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "NormalizedOnlineBalance") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).VoteLastValid.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "VoteLastValid") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).Data.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Data") + return + } + } + if zb0001 > 0 { + err = msgp.ErrTooManyArrayFields(zb0001) + if err != nil { + err = msgp.WrapError(err, "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0002 { + (*z) = OnlineAccountRecordV6{} + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch string(field) { + case "addr": + bts, err = (*z).Address.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "Address") + return + } + case "upd": + bts, err = (*z).UpdateRound.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "UpdateRound") + return + } + case "nob": + (*z).NormalizedOnlineBalance, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "NormalizedOnlineBalance") + return + } + case "vlv": + bts, err = (*z).VoteLastValid.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "VoteLastValid") + return + } + case "data": + bts, err = (*z).Data.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "Data") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + } + o = bts + return +} + +func (z *OnlineAccountRecordV6) UnmarshalMsg(bts []byte) (o []byte, err error) { + return z.UnmarshalMsgWithState(bts, msgp.DefaultUnmarshalState) +} +func (_ *OnlineAccountRecordV6) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*OnlineAccountRecordV6) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *OnlineAccountRecordV6) Msgsize() (s int) { + s = 1 + 5 + (*z).Address.Msgsize() + 4 + (*z).UpdateRound.Msgsize() + 4 + msgp.Uint64Size + 4 + (*z).VoteLastValid.Msgsize() + 5 + (*z).Data.Msgsize() + return +} + +// MsgIsZero returns whether this is a zero value +func (z *OnlineAccountRecordV6) MsgIsZero() bool { + return ((*z).Address.MsgIsZero()) && ((*z).UpdateRound.MsgIsZero()) && ((*z).NormalizedOnlineBalance == 0) && ((*z).VoteLastValid.MsgIsZero()) && ((*z).Data.MsgIsZero()) +} + +// MaxSize returns a maximum valid message size for this message type +func OnlineAccountRecordV6MaxSize() (s int) { + s = 1 + 5 + basics.AddressMaxSize() + 4 + basics.RoundMaxSize() + 4 + msgp.Uint64Size + 4 + basics.RoundMaxSize() + 5 + panic("Unable to determine max size: MaxSize() not implemented for Raw type") + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *OnlineRoundParamsRecordV6) MarshalMsg(b []byte) (o []byte) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0001Len := uint32(2) + var zb0001Mask uint8 /* 3 bits */ + if (*z).Data.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x2 + } + if (*z).Round.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x4 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len != 0 { + if (zb0001Mask & 0x2) == 0 { // if not empty + // string "data" + o = append(o, 0xa4, 0x64, 0x61, 0x74, 0x61) + o = (*z).Data.MarshalMsg(o) + } + if (zb0001Mask & 0x4) == 0 { // if not empty + // string "rnd" + o = append(o, 0xa3, 0x72, 0x6e, 0x64) + o = (*z).Round.MarshalMsg(o) + } + } + return +} + +func (_ *OnlineRoundParamsRecordV6) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(*OnlineRoundParamsRecordV6) + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *OnlineRoundParamsRecordV6) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o []byte, err error) { + if st.AllowableDepth == 0 { + err = msgp.ErrMaxDepthExceeded{} + return + } + st.AllowableDepth-- + var field []byte + _ = field + var zb0001 int + var zb0002 bool + zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).Round.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Round") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).Data.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Data") + return + } + } + if zb0001 > 0 { + err = msgp.ErrTooManyArrayFields(zb0001) + if err != nil { + err = msgp.WrapError(err, "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0002 { + (*z) = OnlineRoundParamsRecordV6{} + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch string(field) { + case "rnd": + bts, err = (*z).Round.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "Round") + return + } + case "data": + bts, err = (*z).Data.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "Data") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + } + o = bts + return +} + +func (z *OnlineRoundParamsRecordV6) UnmarshalMsg(bts []byte) (o []byte, err error) { + return z.UnmarshalMsgWithState(bts, msgp.DefaultUnmarshalState) +} +func (_ *OnlineRoundParamsRecordV6) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*OnlineRoundParamsRecordV6) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *OnlineRoundParamsRecordV6) Msgsize() (s int) { + s = 1 + 4 + (*z).Round.Msgsize() + 5 + (*z).Data.Msgsize() + return +} + +// MsgIsZero returns whether this is a zero value +func (z *OnlineRoundParamsRecordV6) MsgIsZero() bool { + return ((*z).Round.MsgIsZero()) && ((*z).Data.MsgIsZero()) +} + +// MaxSize returns a maximum valid message size for this message type +func OnlineRoundParamsRecordV6MaxSize() (s int) { + s = 1 + 4 + basics.RoundMaxSize() + 5 + panic("Unable to determine max size: MaxSize() not implemented for Raw type") + return +} diff --git a/ledger/encoded/msgp_gen_test.go b/ledger/encoded/msgp_gen_test.go index 415339c728..b905d9616e 100644 --- a/ledger/encoded/msgp_gen_test.go +++ b/ledger/encoded/msgp_gen_test.go @@ -193,3 +193,123 @@ func BenchmarkUnmarshalKVRecordV6(b *testing.B) { } } } + +func TestMarshalUnmarshalOnlineAccountRecordV6(t *testing.T) { + partitiontest.PartitionTest(t) + v := OnlineAccountRecordV6{} + bts := v.MarshalMsg(nil) + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func TestRandomizedEncodingOnlineAccountRecordV6(t *testing.T) { + protocol.RunEncodingTest(t, &OnlineAccountRecordV6{}) +} + +func BenchmarkMarshalMsgOnlineAccountRecordV6(b *testing.B) { + v := OnlineAccountRecordV6{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgOnlineAccountRecordV6(b *testing.B) { + v := OnlineAccountRecordV6{} + bts := make([]byte, 0, v.Msgsize()) + bts = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalOnlineAccountRecordV6(b *testing.B) { + v := OnlineAccountRecordV6{} + bts := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalOnlineRoundParamsRecordV6(t *testing.T) { + partitiontest.PartitionTest(t) + v := OnlineRoundParamsRecordV6{} + bts := v.MarshalMsg(nil) + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func TestRandomizedEncodingOnlineRoundParamsRecordV6(t *testing.T) { + protocol.RunEncodingTest(t, &OnlineRoundParamsRecordV6{}) +} + +func BenchmarkMarshalMsgOnlineRoundParamsRecordV6(b *testing.B) { + v := OnlineRoundParamsRecordV6{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgOnlineRoundParamsRecordV6(b *testing.B) { + v := OnlineRoundParamsRecordV6{} + bts := make([]byte, 0, v.Msgsize()) + bts = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalOnlineRoundParamsRecordV6(b *testing.B) { + v := OnlineRoundParamsRecordV6{} + bts := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/ledger/encoded/recordsV6.go b/ledger/encoded/recordsV6.go index 520f6f2b8e..2ed4161bf9 100644 --- a/ledger/encoded/recordsV6.go +++ b/ledger/encoded/recordsV6.go @@ -18,6 +18,7 @@ package encoded import ( "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/protocol" "github.com/algorand/msgp/msgp" ) @@ -62,3 +63,32 @@ type KVRecordV6 struct { Key []byte `codec:"k,allocbound=KVRecordV6MaxKeyLength"` Value []byte `codec:"v,allocbound=KVRecordV6MaxValueLength"` } + +// OnlineAccountRecordV6 is an encoded row from the onlineaccounts table, used for catchpoint files. +type OnlineAccountRecordV6 struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + Address basics.Address `codec:"addr,allocbound=crypto.DigestSize"` + UpdateRound basics.Round `codec:"upd"` + NormalizedOnlineBalance uint64 `codec:"nob"` + VoteLastValid basics.Round `codec:"vlv"` + Data msgp.Raw `codec:"data"` // encoding of BaseOnlineAccountData +} + +// ToBeHashed implements crypto.Hashable. +func (r OnlineAccountRecordV6) ToBeHashed() (protocol.HashID, []byte) { + return protocol.OnlineAccount, protocol.Encode(&r) +} + +// OnlineRoundParamsRecordV6 is an encoded row from the onlineroundparams table, used for catchpoint files. +type OnlineRoundParamsRecordV6 struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + Round basics.Round `codec:"rnd"` + Data msgp.Raw `codec:"data"` // encoding of OnlineRoundParamsData +} + +// ToBeHashed implements crypto.Hashable. +func (r OnlineRoundParamsRecordV6) ToBeHashed() (protocol.HashID, []byte) { + return protocol.OnlineRoundParams, protocol.Encode(&r) +} diff --git a/ledger/ledgercore/catchpointlabel.go b/ledger/ledgercore/catchpointlabel.go index b80a0bc1e2..5a7bf3b0c2 100644 --- a/ledger/ledgercore/catchpointlabel.go +++ b/ledger/ledgercore/catchpointlabel.go @@ -82,32 +82,64 @@ func (l *CatchpointLabelMakerV6) message() string { return fmt.Sprintf("round=%d, block digest=%s, accounts digest=%s", l.ledgerRound, l.ledgerRoundBlockHash, l.balancesMerkleRoot) } -// CatchpointLabelMakerCurrent represent a single catchpoint maker, matching catchpoints of version V7 and above. +// CatchpointLabelMakerCurrent represents a single catchpoint maker, matching catchpoints of version V7 and above. type CatchpointLabelMakerCurrent struct { - v6Label CatchpointLabelMakerV6 - spVerificationHash crypto.Digest + v7Label CatchpointLabelMakerV7 + onlineAccountsHash crypto.Digest + onlineRoundParamsHash crypto.Digest } // MakeCatchpointLabelMakerCurrent creates a catchpoint label given the catchpoint label parameters. func MakeCatchpointLabelMakerCurrent(ledgerRound basics.Round, ledgerRoundBlockHash *crypto.Digest, - balancesMerkleRoot *crypto.Digest, totals AccountTotals, spVerificationContextHash *crypto.Digest) *CatchpointLabelMakerCurrent { + balancesMerkleRoot *crypto.Digest, totals AccountTotals, spVerificationContextHash, onlineAccountsHash, onlineRoundParamsHash *crypto.Digest) *CatchpointLabelMakerCurrent { return &CatchpointLabelMakerCurrent{ + v7Label: *MakeCatchpointLabelMakerV7(ledgerRound, ledgerRoundBlockHash, balancesMerkleRoot, totals, spVerificationContextHash), + onlineAccountsHash: *onlineAccountsHash, + onlineRoundParamsHash: *onlineRoundParamsHash, + } +} + +func (l *CatchpointLabelMakerCurrent) buffer() []byte { + v6Buffer := l.v7Label.buffer() + v6Buffer = append(v6Buffer, l.onlineAccountsHash[:]...) + v6Buffer = append(v6Buffer, l.onlineRoundParamsHash[:]...) + return v6Buffer +} + +func (l *CatchpointLabelMakerCurrent) round() basics.Round { + return l.v7Label.round() +} + +func (l *CatchpointLabelMakerCurrent) message() string { + return fmt.Sprintf("%s onlineaccts digest=%s onlineroundparams digest=%s", l.v7Label.message(), l.onlineAccountsHash, l.onlineRoundParamsHash) +} + +// CatchpointLabelMakerV7 represents a single catchpoint maker, matching catchpoints of version V7 and above. +type CatchpointLabelMakerV7 struct { + v6Label CatchpointLabelMakerV6 + spVerificationHash crypto.Digest +} + +// MakeCatchpointLabelMakerV7 creates a catchpoint label given the catchpoint label parameters. +func MakeCatchpointLabelMakerV7(ledgerRound basics.Round, ledgerRoundBlockHash *crypto.Digest, + balancesMerkleRoot *crypto.Digest, totals AccountTotals, spVerificationContextHash *crypto.Digest) *CatchpointLabelMakerV7 { + return &CatchpointLabelMakerV7{ v6Label: *MakeCatchpointLabelMakerV6(ledgerRound, ledgerRoundBlockHash, balancesMerkleRoot, totals), spVerificationHash: *spVerificationContextHash, } } -func (l *CatchpointLabelMakerCurrent) buffer() []byte { +func (l *CatchpointLabelMakerV7) buffer() []byte { v6Buffer := l.v6Label.buffer() return append(v6Buffer, l.spVerificationHash[:]...) } -func (l *CatchpointLabelMakerCurrent) round() basics.Round { +func (l *CatchpointLabelMakerV7) round() basics.Round { return l.v6Label.round() } -func (l *CatchpointLabelMakerCurrent) message() string { +func (l *CatchpointLabelMakerV7) message() string { return fmt.Sprintf("%s spver digest=%s", l.v6Label.message(), l.spVerificationHash) } diff --git a/ledger/ledgercore/catchpointlabel_test.go b/ledger/ledgercore/catchpointlabel_test.go index d5c8a9b0e7..f76e559d30 100644 --- a/ledger/ledgercore/catchpointlabel_test.go +++ b/ledger/ledgercore/catchpointlabel_test.go @@ -51,7 +51,7 @@ func TestUniqueCatchpointLabel(t *testing.T) { for _, balancesMerkleRoot := range balancesMerkleRoots { for _, stateProofVerificationContextHash := range stateProofVerificationContextHashes { for _, total := range totals { - labelMaker := MakeCatchpointLabelMakerCurrent(r, &ledgerRoundHash, &balancesMerkleRoot, total, &stateProofVerificationContextHash) + labelMaker := MakeCatchpointLabelMakerV7(r, &ledgerRoundHash, &balancesMerkleRoot, total, &stateProofVerificationContextHash) labelString := MakeLabel(labelMaker) require.False(t, uniqueSet[labelString]) uniqueSet[labelString] = true @@ -85,7 +85,7 @@ func TestCatchpointLabelParsing(t *testing.T) { for _, balancesMerkleRoot := range balancesMerkleRoots { for _, stateProofVerificationContextHash := range stateProofVerificationContextHashes { for _, total := range totals { - labelMaker := MakeCatchpointLabelMakerCurrent(r, &ledgerRoundHash, &balancesMerkleRoot, total, &stateProofVerificationContextHash) + labelMaker := MakeCatchpointLabelMakerV7(r, &ledgerRoundHash, &balancesMerkleRoot, total, &stateProofVerificationContextHash) labelString := MakeLabel(labelMaker) parsedRound, parsedHash, err := ParseCatchpointLabel(labelString) require.Equal(t, r, parsedRound) diff --git a/ledger/msgp_gen.go b/ledger/msgp_gen.go index 603c83fdcc..7aee9fd9e6 100644 --- a/ledger/msgp_gen.go +++ b/ledger/msgp_gen.go @@ -127,8 +127,8 @@ func CatchpointCatchupStateMaxSize() (s int) { func (z *CatchpointFileHeader) MarshalMsg(b []byte) (o []byte) { o = msgp.Require(b, z.Msgsize()) // omitempty: check for empty values - zb0001Len := uint32(9) - var zb0001Mask uint16 /* 10 bits */ + zb0001Len := uint32(11) + var zb0001Mask uint16 /* 12 bits */ if (*z).Totals.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x2 @@ -161,10 +161,18 @@ func (z *CatchpointFileHeader) MarshalMsg(b []byte) (o []byte) { zb0001Len-- zb0001Mask |= 0x100 } - if (*z).Version == 0 { + if (*z).TotalOnlineAccounts == 0 { zb0001Len-- zb0001Mask |= 0x200 } + if (*z).TotalOnlineRoundParams == 0 { + zb0001Len-- + zb0001Mask |= 0x400 + } + if (*z).Version == 0 { + zb0001Len-- + zb0001Mask |= 0x800 + } // variable map header, size zb0001Len o = append(o, 0x80|uint8(zb0001Len)) if zb0001Len != 0 { @@ -209,6 +217,16 @@ func (z *CatchpointFileHeader) MarshalMsg(b []byte) (o []byte) { o = msgp.AppendUint64(o, (*z).TotalKVs) } if (zb0001Mask & 0x200) == 0 { // if not empty + // string "onlineAccountsCount" + o = append(o, 0xb3, 0x6f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74) + o = msgp.AppendUint64(o, (*z).TotalOnlineAccounts) + } + if (zb0001Mask & 0x400) == 0 { // if not empty + // string "onlineRoundParamsCount" + o = append(o, 0xb6, 0x6f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74) + o = msgp.AppendUint64(o, (*z).TotalOnlineRoundParams) + } + if (zb0001Mask & 0x800) == 0 { // if not empty // string "version" o = append(o, 0xa7, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) o = msgp.AppendUint64(o, (*z).Version) @@ -296,6 +314,22 @@ func (z *CatchpointFileHeader) UnmarshalMsgWithState(bts []byte, st msgp.Unmarsh return } } + if zb0001 > 0 { + zb0001-- + (*z).TotalOnlineAccounts, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "TotalOnlineAccounts") + return + } + } + if zb0001 > 0 { + zb0001-- + (*z).TotalOnlineRoundParams, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "TotalOnlineRoundParams") + return + } + } if zb0001 > 0 { zb0001-- (*z).Catchpoint, bts, err = msgp.ReadStringBytes(bts) @@ -377,6 +411,18 @@ func (z *CatchpointFileHeader) UnmarshalMsgWithState(bts []byte, st msgp.Unmarsh err = msgp.WrapError(err, "TotalKVs") return } + case "onlineAccountsCount": + (*z).TotalOnlineAccounts, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TotalOnlineAccounts") + return + } + case "onlineRoundParamsCount": + (*z).TotalOnlineRoundParams, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TotalOnlineRoundParams") + return + } case "catchpoint": (*z).Catchpoint, bts, err = msgp.ReadStringBytes(bts) if err != nil { @@ -412,18 +458,18 @@ func (_ *CatchpointFileHeader) CanUnmarshalMsg(z interface{}) bool { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *CatchpointFileHeader) Msgsize() (s int) { - s = 1 + 8 + msgp.Uint64Size + 14 + (*z).BalancesRound.Msgsize() + 12 + (*z).BlocksRound.Msgsize() + 14 + (*z).Totals.Msgsize() + 14 + msgp.Uint64Size + 12 + msgp.Uint64Size + 9 + msgp.Uint64Size + 11 + msgp.StringPrefixSize + len((*z).Catchpoint) + 18 + (*z).BlockHeaderDigest.Msgsize() + s = 1 + 8 + msgp.Uint64Size + 14 + (*z).BalancesRound.Msgsize() + 12 + (*z).BlocksRound.Msgsize() + 14 + (*z).Totals.Msgsize() + 14 + msgp.Uint64Size + 12 + msgp.Uint64Size + 9 + msgp.Uint64Size + 20 + msgp.Uint64Size + 23 + msgp.Uint64Size + 11 + msgp.StringPrefixSize + len((*z).Catchpoint) + 18 + (*z).BlockHeaderDigest.Msgsize() return } // MsgIsZero returns whether this is a zero value func (z *CatchpointFileHeader) MsgIsZero() bool { - return ((*z).Version == 0) && ((*z).BalancesRound.MsgIsZero()) && ((*z).BlocksRound.MsgIsZero()) && ((*z).Totals.MsgIsZero()) && ((*z).TotalAccounts == 0) && ((*z).TotalChunks == 0) && ((*z).TotalKVs == 0) && ((*z).Catchpoint == "") && ((*z).BlockHeaderDigest.MsgIsZero()) + return ((*z).Version == 0) && ((*z).BalancesRound.MsgIsZero()) && ((*z).BlocksRound.MsgIsZero()) && ((*z).Totals.MsgIsZero()) && ((*z).TotalAccounts == 0) && ((*z).TotalChunks == 0) && ((*z).TotalKVs == 0) && ((*z).TotalOnlineAccounts == 0) && ((*z).TotalOnlineRoundParams == 0) && ((*z).Catchpoint == "") && ((*z).BlockHeaderDigest.MsgIsZero()) } // MaxSize returns a maximum valid message size for this message type func CatchpointFileHeaderMaxSize() (s int) { - s = 1 + 8 + msgp.Uint64Size + 14 + basics.RoundMaxSize() + 12 + basics.RoundMaxSize() + 14 + ledgercore.AccountTotalsMaxSize() + 14 + msgp.Uint64Size + 12 + msgp.Uint64Size + 9 + msgp.Uint64Size + 11 + s = 1 + 8 + msgp.Uint64Size + 14 + basics.RoundMaxSize() + 12 + basics.RoundMaxSize() + 14 + ledgercore.AccountTotalsMaxSize() + 14 + msgp.Uint64Size + 12 + msgp.Uint64Size + 9 + msgp.Uint64Size + 20 + msgp.Uint64Size + 23 + msgp.Uint64Size + 11 panic("Unable to determine max size: String type z.Catchpoint is unbounded") s += 18 + crypto.DigestMaxSize() return @@ -607,20 +653,28 @@ func CatchpointFileBalancesChunkV5MaxSize() (s int) { func (z *catchpointFileChunkV6) MarshalMsg(b []byte) (o []byte) { o = msgp.Require(b, z.Msgsize()) // omitempty: check for empty values - zb0003Len := uint32(2) - var zb0003Mask uint8 /* 4 bits */ + zb0005Len := uint32(4) + var zb0005Mask uint8 /* 6 bits */ if len((*z).Balances) == 0 { - zb0003Len-- - zb0003Mask |= 0x2 + zb0005Len-- + zb0005Mask |= 0x2 } if len((*z).KVs) == 0 { - zb0003Len-- - zb0003Mask |= 0x4 + zb0005Len-- + zb0005Mask |= 0x4 + } + if len((*z).OnlineAccounts) == 0 { + zb0005Len-- + zb0005Mask |= 0x10 } - // variable map header, size zb0003Len - o = append(o, 0x80|uint8(zb0003Len)) - if zb0003Len != 0 { - if (zb0003Mask & 0x2) == 0 { // if not empty + if len((*z).OnlineRoundParams) == 0 { + zb0005Len-- + zb0005Mask |= 0x20 + } + // variable map header, size zb0005Len + o = append(o, 0x80|uint8(zb0005Len)) + if zb0005Len != 0 { + if (zb0005Mask & 0x2) == 0 { // if not empty // string "bl" o = append(o, 0xa2, 0x62, 0x6c) if (*z).Balances == nil { @@ -632,7 +686,7 @@ func (z *catchpointFileChunkV6) MarshalMsg(b []byte) (o []byte) { o = (*z).Balances[zb0001].MarshalMsg(o) } } - if (zb0003Mask & 0x4) == 0 { // if not empty + if (zb0005Mask & 0x4) == 0 { // if not empty // string "kv" o = append(o, 0xa2, 0x6b, 0x76) if (*z).KVs == nil { @@ -644,6 +698,30 @@ func (z *catchpointFileChunkV6) MarshalMsg(b []byte) (o []byte) { o = (*z).KVs[zb0002].MarshalMsg(o) } } + if (zb0005Mask & 0x10) == 0 { // if not empty + // string "oa" + o = append(o, 0xa2, 0x6f, 0x61) + if (*z).OnlineAccounts == nil { + o = msgp.AppendNil(o) + } else { + o = msgp.AppendArrayHeader(o, uint32(len((*z).OnlineAccounts))) + } + for zb0003 := range (*z).OnlineAccounts { + o = (*z).OnlineAccounts[zb0003].MarshalMsg(o) + } + } + if (zb0005Mask & 0x20) == 0 { // if not empty + // string "orp" + o = append(o, 0xa3, 0x6f, 0x72, 0x70) + if (*z).OnlineRoundParams == nil { + o = msgp.AppendNil(o) + } else { + o = msgp.AppendArrayHeader(o, uint32(len((*z).OnlineRoundParams))) + } + for zb0004 := range (*z).OnlineRoundParams { + o = (*z).OnlineRoundParams[zb0004].MarshalMsg(o) + } + } } return } @@ -662,35 +740,35 @@ func (z *catchpointFileChunkV6) UnmarshalMsgWithState(bts []byte, st msgp.Unmars st.AllowableDepth-- var field []byte _ = field - var zb0003 int - var zb0004 bool - zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts) + var zb0005 int + var zb0006 bool + zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts) if _, ok := err.(msgp.TypeError); ok { - zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts) + zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { err = msgp.WrapError(err) return } - if zb0003 > 0 { - zb0003-- - var zb0005 int - var zb0006 bool - zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts) + if zb0005 > 0 { + zb0005-- + var zb0007 int + var zb0008 bool + zb0007, zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "struct-from-array", "Balances") return } - if zb0005 > BalancesPerCatchpointFileChunk { - err = msgp.ErrOverflow(uint64(zb0005), uint64(BalancesPerCatchpointFileChunk)) + if zb0007 > BalancesPerCatchpointFileChunk { + err = msgp.ErrOverflow(uint64(zb0007), uint64(BalancesPerCatchpointFileChunk)) err = msgp.WrapError(err, "struct-from-array", "Balances") return } - if zb0006 { + if zb0008 { (*z).Balances = nil - } else if (*z).Balances != nil && cap((*z).Balances) >= zb0005 { - (*z).Balances = ((*z).Balances)[:zb0005] + } else if (*z).Balances != nil && cap((*z).Balances) >= zb0007 { + (*z).Balances = ((*z).Balances)[:zb0007] } else { - (*z).Balances = make([]encoded.BalanceRecordV6, zb0005) + (*z).Balances = make([]encoded.BalanceRecordV6, zb0007) } for zb0001 := range (*z).Balances { bts, err = (*z).Balances[zb0001].UnmarshalMsgWithState(bts, st) @@ -700,26 +778,26 @@ func (z *catchpointFileChunkV6) UnmarshalMsgWithState(bts []byte, st msgp.Unmars } } } - if zb0003 > 0 { - zb0003-- - var zb0007 int - var zb0008 bool - zb0007, zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts) + if zb0005 > 0 { + zb0005-- + var zb0009 int + var zb0010 bool + zb0009, zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "struct-from-array", "KVs") return } - if zb0007 > BalancesPerCatchpointFileChunk { - err = msgp.ErrOverflow(uint64(zb0007), uint64(BalancesPerCatchpointFileChunk)) + if zb0009 > BalancesPerCatchpointFileChunk { + err = msgp.ErrOverflow(uint64(zb0009), uint64(BalancesPerCatchpointFileChunk)) err = msgp.WrapError(err, "struct-from-array", "KVs") return } - if zb0008 { + if zb0010 { (*z).KVs = nil - } else if (*z).KVs != nil && cap((*z).KVs) >= zb0007 { - (*z).KVs = ((*z).KVs)[:zb0007] + } else if (*z).KVs != nil && cap((*z).KVs) >= zb0009 { + (*z).KVs = ((*z).KVs)[:zb0009] } else { - (*z).KVs = make([]encoded.KVRecordV6, zb0007) + (*z).KVs = make([]encoded.KVRecordV6, zb0009) } for zb0002 := range (*z).KVs { bts, err = (*z).KVs[zb0002].UnmarshalMsgWithState(bts, st) @@ -729,8 +807,66 @@ func (z *catchpointFileChunkV6) UnmarshalMsgWithState(bts []byte, st msgp.Unmars } } } - if zb0003 > 0 { - err = msgp.ErrTooManyArrayFields(zb0003) + if zb0005 > 0 { + zb0005-- + var zb0011 int + var zb0012 bool + zb0011, zb0012, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "OnlineAccounts") + return + } + if zb0011 > BalancesPerCatchpointFileChunk { + err = msgp.ErrOverflow(uint64(zb0011), uint64(BalancesPerCatchpointFileChunk)) + err = msgp.WrapError(err, "struct-from-array", "OnlineAccounts") + return + } + if zb0012 { + (*z).OnlineAccounts = nil + } else if (*z).OnlineAccounts != nil && cap((*z).OnlineAccounts) >= zb0011 { + (*z).OnlineAccounts = ((*z).OnlineAccounts)[:zb0011] + } else { + (*z).OnlineAccounts = make([]encoded.OnlineAccountRecordV6, zb0011) + } + for zb0003 := range (*z).OnlineAccounts { + bts, err = (*z).OnlineAccounts[zb0003].UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "OnlineAccounts", zb0003) + return + } + } + } + if zb0005 > 0 { + zb0005-- + var zb0013 int + var zb0014 bool + zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "OnlineRoundParams") + return + } + if zb0013 > BalancesPerCatchpointFileChunk { + err = msgp.ErrOverflow(uint64(zb0013), uint64(BalancesPerCatchpointFileChunk)) + err = msgp.WrapError(err, "struct-from-array", "OnlineRoundParams") + return + } + if zb0014 { + (*z).OnlineRoundParams = nil + } else if (*z).OnlineRoundParams != nil && cap((*z).OnlineRoundParams) >= zb0013 { + (*z).OnlineRoundParams = ((*z).OnlineRoundParams)[:zb0013] + } else { + (*z).OnlineRoundParams = make([]encoded.OnlineRoundParamsRecordV6, zb0013) + } + for zb0004 := range (*z).OnlineRoundParams { + bts, err = (*z).OnlineRoundParams[zb0004].UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "OnlineRoundParams", zb0004) + return + } + } + } + if zb0005 > 0 { + err = msgp.ErrTooManyArrayFields(zb0005) if err != nil { err = msgp.WrapError(err, "struct-from-array") return @@ -741,11 +877,11 @@ func (z *catchpointFileChunkV6) UnmarshalMsgWithState(bts []byte, st msgp.Unmars err = msgp.WrapError(err) return } - if zb0004 { + if zb0006 { (*z) = catchpointFileChunkV6{} } - for zb0003 > 0 { - zb0003-- + for zb0005 > 0 { + zb0005-- field, bts, err = msgp.ReadMapKeyZC(bts) if err != nil { err = msgp.WrapError(err) @@ -753,24 +889,24 @@ func (z *catchpointFileChunkV6) UnmarshalMsgWithState(bts []byte, st msgp.Unmars } switch string(field) { case "bl": - var zb0009 int - var zb0010 bool - zb0009, zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts) + var zb0015 int + var zb0016 bool + zb0015, zb0016, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "Balances") return } - if zb0009 > BalancesPerCatchpointFileChunk { - err = msgp.ErrOverflow(uint64(zb0009), uint64(BalancesPerCatchpointFileChunk)) + if zb0015 > BalancesPerCatchpointFileChunk { + err = msgp.ErrOverflow(uint64(zb0015), uint64(BalancesPerCatchpointFileChunk)) err = msgp.WrapError(err, "Balances") return } - if zb0010 { + if zb0016 { (*z).Balances = nil - } else if (*z).Balances != nil && cap((*z).Balances) >= zb0009 { - (*z).Balances = ((*z).Balances)[:zb0009] + } else if (*z).Balances != nil && cap((*z).Balances) >= zb0015 { + (*z).Balances = ((*z).Balances)[:zb0015] } else { - (*z).Balances = make([]encoded.BalanceRecordV6, zb0009) + (*z).Balances = make([]encoded.BalanceRecordV6, zb0015) } for zb0001 := range (*z).Balances { bts, err = (*z).Balances[zb0001].UnmarshalMsgWithState(bts, st) @@ -780,24 +916,24 @@ func (z *catchpointFileChunkV6) UnmarshalMsgWithState(bts []byte, st msgp.Unmars } } case "kv": - var zb0011 int - var zb0012 bool - zb0011, zb0012, bts, err = msgp.ReadArrayHeaderBytes(bts) + var zb0017 int + var zb0018 bool + zb0017, zb0018, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "KVs") return } - if zb0011 > BalancesPerCatchpointFileChunk { - err = msgp.ErrOverflow(uint64(zb0011), uint64(BalancesPerCatchpointFileChunk)) + if zb0017 > BalancesPerCatchpointFileChunk { + err = msgp.ErrOverflow(uint64(zb0017), uint64(BalancesPerCatchpointFileChunk)) err = msgp.WrapError(err, "KVs") return } - if zb0012 { + if zb0018 { (*z).KVs = nil - } else if (*z).KVs != nil && cap((*z).KVs) >= zb0011 { - (*z).KVs = ((*z).KVs)[:zb0011] + } else if (*z).KVs != nil && cap((*z).KVs) >= zb0017 { + (*z).KVs = ((*z).KVs)[:zb0017] } else { - (*z).KVs = make([]encoded.KVRecordV6, zb0011) + (*z).KVs = make([]encoded.KVRecordV6, zb0017) } for zb0002 := range (*z).KVs { bts, err = (*z).KVs[zb0002].UnmarshalMsgWithState(bts, st) @@ -806,6 +942,60 @@ func (z *catchpointFileChunkV6) UnmarshalMsgWithState(bts []byte, st msgp.Unmars return } } + case "oa": + var zb0019 int + var zb0020 bool + zb0019, zb0020, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "OnlineAccounts") + return + } + if zb0019 > BalancesPerCatchpointFileChunk { + err = msgp.ErrOverflow(uint64(zb0019), uint64(BalancesPerCatchpointFileChunk)) + err = msgp.WrapError(err, "OnlineAccounts") + return + } + if zb0020 { + (*z).OnlineAccounts = nil + } else if (*z).OnlineAccounts != nil && cap((*z).OnlineAccounts) >= zb0019 { + (*z).OnlineAccounts = ((*z).OnlineAccounts)[:zb0019] + } else { + (*z).OnlineAccounts = make([]encoded.OnlineAccountRecordV6, zb0019) + } + for zb0003 := range (*z).OnlineAccounts { + bts, err = (*z).OnlineAccounts[zb0003].UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "OnlineAccounts", zb0003) + return + } + } + case "orp": + var zb0021 int + var zb0022 bool + zb0021, zb0022, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "OnlineRoundParams") + return + } + if zb0021 > BalancesPerCatchpointFileChunk { + err = msgp.ErrOverflow(uint64(zb0021), uint64(BalancesPerCatchpointFileChunk)) + err = msgp.WrapError(err, "OnlineRoundParams") + return + } + if zb0022 { + (*z).OnlineRoundParams = nil + } else if (*z).OnlineRoundParams != nil && cap((*z).OnlineRoundParams) >= zb0021 { + (*z).OnlineRoundParams = ((*z).OnlineRoundParams)[:zb0021] + } else { + (*z).OnlineRoundParams = make([]encoded.OnlineRoundParamsRecordV6, zb0021) + } + for zb0004 := range (*z).OnlineRoundParams { + bts, err = (*z).OnlineRoundParams[zb0004].UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "OnlineRoundParams", zb0004) + return + } + } default: err = msgp.ErrNoField(string(field)) if err != nil { @@ -837,12 +1027,20 @@ func (z *catchpointFileChunkV6) Msgsize() (s int) { for zb0002 := range (*z).KVs { s += (*z).KVs[zb0002].Msgsize() } + s += 3 + msgp.ArrayHeaderSize + for zb0003 := range (*z).OnlineAccounts { + s += (*z).OnlineAccounts[zb0003].Msgsize() + } + s += 4 + msgp.ArrayHeaderSize + for zb0004 := range (*z).OnlineRoundParams { + s += (*z).OnlineRoundParams[zb0004].Msgsize() + } return } // MsgIsZero returns whether this is a zero value func (z *catchpointFileChunkV6) MsgIsZero() bool { - return (len((*z).Balances) == 0) && (len((*z).KVs) == 0) + return (len((*z).Balances) == 0) && (len((*z).KVs) == 0) && (len((*z).OnlineAccounts) == 0) && (len((*z).OnlineRoundParams) == 0) } // MaxSize returns a maximum valid message size for this message type @@ -853,6 +1051,12 @@ func CatchpointFileChunkV6MaxSize() (s int) { s += 3 // Calculating size of slice: z.KVs s += msgp.ArrayHeaderSize + ((BalancesPerCatchpointFileChunk) * (encoded.KVRecordV6MaxSize())) + s += 3 + // Calculating size of slice: z.OnlineAccounts + s += msgp.ArrayHeaderSize + ((BalancesPerCatchpointFileChunk) * (encoded.OnlineAccountRecordV6MaxSize())) + s += 4 + // Calculating size of slice: z.OnlineRoundParams + s += msgp.ArrayHeaderSize + ((BalancesPerCatchpointFileChunk) * (encoded.OnlineRoundParamsRecordV6MaxSize())) return } diff --git a/ledger/store/trackerdb/catchpoint.go b/ledger/store/trackerdb/catchpoint.go index ad6c9a236d..f2d48b0347 100644 --- a/ledger/store/trackerdb/catchpoint.go +++ b/ledger/store/trackerdb/catchpoint.go @@ -125,6 +125,9 @@ type CatchpointFirstStageInfo struct { // data files are generated. TotalKVs uint64 `codec:"kvsCount"` + TotalOnlineAccounts uint64 `codec:"onlineAccountsCount"` + TotalOnlineRoundParams uint64 `codec:"onlineRoundParamsCount"` + // Total number of chunks in the catchpoint data file. Only set when catchpoint // data files are generated. TotalChunks uint64 `codec:"chunksCount"` @@ -133,6 +136,10 @@ type CatchpointFirstStageInfo struct { // StateProofVerificationHash is the hash of the state proof verification data contained in the catchpoint data file. StateProofVerificationHash crypto.Digest `codec:"spVerificationHash"` + + // OnlineAccountsHash and OnlineRoundParamsHash provide verification for these tables in the catchpoint data file. + OnlineAccountsHash crypto.Digest `codec:"onlineAccountsHash"` + OnlineRoundParamsHash crypto.Digest `codec:"onlineRoundParamsHash"` } // MakeCatchpointFilePath builds the path of a catchpoint file. diff --git a/ledger/store/trackerdb/dualdriver/accounts_reader_ext.go b/ledger/store/trackerdb/dualdriver/accounts_reader_ext.go index 6d6b527f48..3113ac86dd 100644 --- a/ledger/store/trackerdb/dualdriver/accounts_reader_ext.go +++ b/ledger/store/trackerdb/dualdriver/accounts_reader_ext.go @@ -360,3 +360,39 @@ func (ar *accountsReaderExt) TotalResources(ctx context.Context) (total uint64, // return primary results return totalP, nil } + +// TotalOnlineAccountRows implements trackerdb.AccountsReaderExt +func (ar *accountsReaderExt) TotalOnlineAccountRows(ctx context.Context) (total uint64, err error) { + totalP, errP := ar.primary.TotalOnlineAccountRows(ctx) + totalS, errS := ar.secondary.TotalOnlineAccountRows(ctx) + // coalesce errors + err = coalesceErrors(errP, errS) + if err != nil { + return + } + // check results match + if totalP != totalS { + err = ErrInconsistentResult + return + } + // return primary results + return totalP, nil +} + +// TotalOnlineRoundParams implements trackerdb.AccountsReaderExt +func (ar *accountsReaderExt) TotalOnlineRoundParams(ctx context.Context) (total uint64, err error) { + totalP, errP := ar.primary.TotalOnlineRoundParams(ctx) + totalS, errS := ar.secondary.TotalOnlineRoundParams(ctx) + // coalesce errors + err = coalesceErrors(errP, errS) + if err != nil { + return + } + // check results match + if totalP != totalS { + err = ErrInconsistentResult + return + } + // return primary results + return totalP, nil +} diff --git a/ledger/store/trackerdb/dualdriver/dualdriver.go b/ledger/store/trackerdb/dualdriver/dualdriver.go index e51b05929f..ed23528675 100644 --- a/ledger/store/trackerdb/dualdriver/dualdriver.go +++ b/ledger/store/trackerdb/dualdriver/dualdriver.go @@ -24,6 +24,7 @@ import ( "sync" "time" + "github.com/algorand/go-algorand/ledger/encoded" "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/util/db" @@ -264,8 +265,8 @@ func (*reader) MakeCatchpointReader() (trackerdb.CatchpointReader, error) { return nil, nil } -// MakeEncodedAccoutsBatchIter implements trackerdb.Reader -func (*reader) MakeEncodedAccoutsBatchIter() trackerdb.EncodedAccountsBatchIter { +// MakeEncodedAccountsBatchIter implements trackerdb.Reader +func (*reader) MakeEncodedAccountsBatchIter() trackerdb.EncodedAccountsBatchIter { // TODO: catchpoint return nil } @@ -276,6 +277,18 @@ func (*reader) MakeKVsIter(ctx context.Context) (trackerdb.KVsIter, error) { return nil, nil } +// MakeOnlineAccountsIter implements trackerdb.Reader +func (*reader) MakeOnlineAccountsIter(ctx context.Context) (trackerdb.TableIterator[*encoded.OnlineAccountRecordV6], error) { + // TODO: catchpoint + return nil, nil +} + +// MakeOnlineRoundParamsIter implements trackerdb.Reader +func (*reader) MakeOnlineRoundParamsIter(ctx context.Context) (trackerdb.TableIterator[*encoded.OnlineRoundParamsRecordV6], error) { + // TODO: catchpoint + return nil, nil +} + type writer struct { primary trackerdb.Writer secondary trackerdb.Writer diff --git a/ledger/store/trackerdb/generickv/accounts_ext_reader.go b/ledger/store/trackerdb/generickv/accounts_ext_reader.go index 79460b51e7..6e5c72daaf 100644 --- a/ledger/store/trackerdb/generickv/accounts_ext_reader.go +++ b/ledger/store/trackerdb/generickv/accounts_ext_reader.go @@ -125,6 +125,16 @@ func (r *accountsReader) TotalKVs(ctx context.Context) (total uint64, err error) return } +func (r *accountsReader) TotalOnlineAccountRows(ctx context.Context) (total uint64, err error) { + // TODO: catchpoint + return +} + +func (r *accountsReader) TotalOnlineRoundParams(ctx context.Context) (total uint64, err error) { + // TODO: catchpoint + return +} + // TODO: this replicates some functionality from LookupOnlineHistory, implemented for onlineAccountsReader func (r *accountsReader) LookupOnlineAccountDataByAddress(addr basics.Address) (ref trackerdb.OnlineAccountRef, data []byte, err error) { low, high := onlineAccountAddressRangePrefix(addr) diff --git a/ledger/store/trackerdb/generickv/reader.go b/ledger/store/trackerdb/generickv/reader.go index 454a4eb9dc..f8422792e5 100644 --- a/ledger/store/trackerdb/generickv/reader.go +++ b/ledger/store/trackerdb/generickv/reader.go @@ -20,6 +20,7 @@ import ( "context" "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/ledger/encoded" "github.com/algorand/go-algorand/ledger/store/trackerdb" ) @@ -65,8 +66,8 @@ func (r *reader) MakeCatchpointReader() (trackerdb.CatchpointReader, error) { panic("unimplemented") } -// MakeEncodedAccoutsBatchIter implements trackerdb.Reader -func (r *reader) MakeEncodedAccoutsBatchIter() trackerdb.EncodedAccountsBatchIter { +// MakeEncodedAccountsBatchIter implements trackerdb.Reader +func (r *reader) MakeEncodedAccountsBatchIter() trackerdb.EncodedAccountsBatchIter { // TODO: catchpoint panic("unimplemented") } @@ -76,3 +77,15 @@ func (r *reader) MakeKVsIter(ctx context.Context) (trackerdb.KVsIter, error) { // TODO: catchpoint panic("unimplemented") } + +// MakeOnlineAccountsIter implements trackerdb.Reader +func (r *reader) MakeOnlineAccountsIter(ctx context.Context) (trackerdb.TableIterator[*encoded.OnlineAccountRecordV6], error) { + // TODO: catchpoint + panic("unimplemented") +} + +// MakeOnlineRoundParamsIter implements trackerdb.Reader +func (r *reader) MakeOnlineRoundParamsIter(ctx context.Context) (trackerdb.TableIterator[*encoded.OnlineRoundParamsRecordV6], error) { + // TODO: catchpoint + panic("unimplemented") +} diff --git a/ledger/store/trackerdb/interface.go b/ledger/store/trackerdb/interface.go index 25d7b79e3c..946056eaf8 100644 --- a/ledger/store/trackerdb/interface.go +++ b/ledger/store/trackerdb/interface.go @@ -126,6 +126,8 @@ type AccountsReaderExt interface { TotalResources(ctx context.Context) (total uint64, err error) TotalAccounts(ctx context.Context) (total uint64, err error) TotalKVs(ctx context.Context) (total uint64, err error) + TotalOnlineAccountRows(ctx context.Context) (total uint64, err error) + TotalOnlineRoundParams(ctx context.Context) (total uint64, err error) AccountsRound() (rnd basics.Round, err error) LookupOnlineAccountDataByAddress(addr basics.Address) (ref OnlineAccountRef, data []byte, err error) AccountsOnlineTop(rnd basics.Round, offset uint64, n uint64, proto config.ConsensusParams) (map[basics.Address]*ledgercore.OnlineAccount, error) @@ -176,10 +178,13 @@ type CatchpointWriter interface { WriteCatchpointStagingBalances(ctx context.Context, bals []NormalizedAccountBalance) error WriteCatchpointStagingKVs(ctx context.Context, keys [][]byte, values [][]byte, hashes [][]byte) error + WriteCatchpointStagingOnlineAccounts(context.Context, []encoded.OnlineAccountRecordV6) error + WriteCatchpointStagingOnlineRoundParams(context.Context, []encoded.OnlineRoundParamsRecordV6) error WriteCatchpointStagingCreatable(ctx context.Context, bals []NormalizedAccountBalance) error WriteCatchpointStagingHashes(ctx context.Context, bals []NormalizedAccountBalance) error ApplyCatchpointStagingBalances(ctx context.Context, balancesRound basics.Round, merkleRootRound basics.Round) (err error) + ApplyCatchpointStagingTablesV7(ctx context.Context) error ResetCatchpointStagingBalances(ctx context.Context, newCatchup bool) (err error) InsertUnfinishedCatchpoint(ctx context.Context, round basics.Round, blockHash crypto.Digest) error @@ -235,6 +240,13 @@ type KVsIter interface { Close() } +// TableIterator is used to add online accounts and online round params to catchpoint files. +type TableIterator[T any] interface { + Next() bool + GetItem() (T, error) + Close() +} + // EncodedAccountsBatchIter is an iterator for a accounts. type EncodedAccountsBatchIter interface { Next(ctx context.Context, accountCount int, resourceCount int) (bals []encoded.BalanceRecordV6, numAccountsProcessed uint64, err error) diff --git a/ledger/store/trackerdb/msgp_gen.go b/ledger/store/trackerdb/msgp_gen.go index 98f35bf519..3c3206e71b 100644 --- a/ledger/store/trackerdb/msgp_gen.go +++ b/ledger/store/trackerdb/msgp_gen.go @@ -1334,8 +1334,8 @@ func BaseVotingDataMaxSize() (s int) { func (z *CatchpointFirstStageInfo) MarshalMsg(b []byte) (o []byte) { o = msgp.Require(b, z.Msgsize()) // omitempty: check for empty values - zb0001Len := uint32(7) - var zb0001Mask uint8 /* 8 bits */ + zb0001Len := uint32(11) + var zb0001Mask uint16 /* 12 bits */ if (*z).Totals.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x2 @@ -1356,14 +1356,30 @@ func (z *CatchpointFirstStageInfo) MarshalMsg(b []byte) (o []byte) { zb0001Len-- zb0001Mask |= 0x20 } - if (*z).StateProofVerificationHash.MsgIsZero() { + if (*z).TotalOnlineAccounts == 0 { zb0001Len-- zb0001Mask |= 0x40 } - if (*z).TrieBalancesHash.MsgIsZero() { + if (*z).OnlineAccountsHash.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x80 } + if (*z).TotalOnlineRoundParams == 0 { + zb0001Len-- + zb0001Mask |= 0x100 + } + if (*z).OnlineRoundParamsHash.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x200 + } + if (*z).StateProofVerificationHash.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x400 + } + if (*z).TrieBalancesHash.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x800 + } // variable map header, size zb0001Len o = append(o, 0x80|uint8(zb0001Len)) if zb0001Len != 0 { @@ -1393,11 +1409,31 @@ func (z *CatchpointFirstStageInfo) MarshalMsg(b []byte) (o []byte) { o = msgp.AppendUint64(o, (*z).TotalKVs) } if (zb0001Mask & 0x40) == 0 { // if not empty + // string "onlineAccountsCount" + o = append(o, 0xb3, 0x6f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74) + o = msgp.AppendUint64(o, (*z).TotalOnlineAccounts) + } + if (zb0001Mask & 0x80) == 0 { // if not empty + // string "onlineAccountsHash" + o = append(o, 0xb2, 0x6f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x48, 0x61, 0x73, 0x68) + o = (*z).OnlineAccountsHash.MarshalMsg(o) + } + if (zb0001Mask & 0x100) == 0 { // if not empty + // string "onlineRoundParamsCount" + o = append(o, 0xb6, 0x6f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74) + o = msgp.AppendUint64(o, (*z).TotalOnlineRoundParams) + } + if (zb0001Mask & 0x200) == 0 { // if not empty + // string "onlineRoundParamsHash" + o = append(o, 0xb5, 0x6f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x48, 0x61, 0x73, 0x68) + o = (*z).OnlineRoundParamsHash.MarshalMsg(o) + } + if (zb0001Mask & 0x400) == 0 { // if not empty // string "spVerificationHash" o = append(o, 0xb2, 0x73, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x61, 0x73, 0x68) o = (*z).StateProofVerificationHash.MarshalMsg(o) } - if (zb0001Mask & 0x80) == 0 { // if not empty + if (zb0001Mask & 0x800) == 0 { // if not empty // string "trieBalancesHash" o = append(o, 0xb0, 0x74, 0x72, 0x69, 0x65, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x48, 0x61, 0x73, 0x68) o = (*z).TrieBalancesHash.MarshalMsg(o) @@ -1461,6 +1497,22 @@ func (z *CatchpointFirstStageInfo) UnmarshalMsgWithState(bts []byte, st msgp.Unm return } } + if zb0001 > 0 { + zb0001-- + (*z).TotalOnlineAccounts, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "TotalOnlineAccounts") + return + } + } + if zb0001 > 0 { + zb0001-- + (*z).TotalOnlineRoundParams, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "TotalOnlineRoundParams") + return + } + } if zb0001 > 0 { zb0001-- (*z).TotalChunks, bts, err = msgp.ReadUint64Bytes(bts) @@ -1485,6 +1537,22 @@ func (z *CatchpointFirstStageInfo) UnmarshalMsgWithState(bts []byte, st msgp.Unm return } } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).OnlineAccountsHash.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "OnlineAccountsHash") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).OnlineRoundParamsHash.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "OnlineRoundParamsHash") + return + } + } if zb0001 > 0 { err = msgp.ErrTooManyArrayFields(zb0001) if err != nil { @@ -1532,6 +1600,18 @@ func (z *CatchpointFirstStageInfo) UnmarshalMsgWithState(bts []byte, st msgp.Unm err = msgp.WrapError(err, "TotalKVs") return } + case "onlineAccountsCount": + (*z).TotalOnlineAccounts, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TotalOnlineAccounts") + return + } + case "onlineRoundParamsCount": + (*z).TotalOnlineRoundParams, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TotalOnlineRoundParams") + return + } case "chunksCount": (*z).TotalChunks, bts, err = msgp.ReadUint64Bytes(bts) if err != nil { @@ -1550,6 +1630,18 @@ func (z *CatchpointFirstStageInfo) UnmarshalMsgWithState(bts []byte, st msgp.Unm err = msgp.WrapError(err, "StateProofVerificationHash") return } + case "onlineAccountsHash": + bts, err = (*z).OnlineAccountsHash.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "OnlineAccountsHash") + return + } + case "onlineRoundParamsHash": + bts, err = (*z).OnlineRoundParamsHash.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "OnlineRoundParamsHash") + return + } default: err = msgp.ErrNoField(string(field)) if err != nil { @@ -1573,18 +1665,18 @@ func (_ *CatchpointFirstStageInfo) CanUnmarshalMsg(z interface{}) bool { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *CatchpointFirstStageInfo) Msgsize() (s int) { - s = 1 + 14 + (*z).Totals.Msgsize() + 17 + (*z).TrieBalancesHash.Msgsize() + 14 + msgp.Uint64Size + 9 + msgp.Uint64Size + 12 + msgp.Uint64Size + 13 + msgp.Uint64Size + 19 + (*z).StateProofVerificationHash.Msgsize() + s = 1 + 14 + (*z).Totals.Msgsize() + 17 + (*z).TrieBalancesHash.Msgsize() + 14 + msgp.Uint64Size + 9 + msgp.Uint64Size + 20 + msgp.Uint64Size + 23 + msgp.Uint64Size + 12 + msgp.Uint64Size + 13 + msgp.Uint64Size + 19 + (*z).StateProofVerificationHash.Msgsize() + 19 + (*z).OnlineAccountsHash.Msgsize() + 22 + (*z).OnlineRoundParamsHash.Msgsize() return } // MsgIsZero returns whether this is a zero value func (z *CatchpointFirstStageInfo) MsgIsZero() bool { - return ((*z).Totals.MsgIsZero()) && ((*z).TrieBalancesHash.MsgIsZero()) && ((*z).TotalAccounts == 0) && ((*z).TotalKVs == 0) && ((*z).TotalChunks == 0) && ((*z).BiggestChunkLen == 0) && ((*z).StateProofVerificationHash.MsgIsZero()) + return ((*z).Totals.MsgIsZero()) && ((*z).TrieBalancesHash.MsgIsZero()) && ((*z).TotalAccounts == 0) && ((*z).TotalKVs == 0) && ((*z).TotalOnlineAccounts == 0) && ((*z).TotalOnlineRoundParams == 0) && ((*z).TotalChunks == 0) && ((*z).BiggestChunkLen == 0) && ((*z).StateProofVerificationHash.MsgIsZero()) && ((*z).OnlineAccountsHash.MsgIsZero()) && ((*z).OnlineRoundParamsHash.MsgIsZero()) } // MaxSize returns a maximum valid message size for this message type func CatchpointFirstStageInfoMaxSize() (s int) { - s = 1 + 14 + ledgercore.AccountTotalsMaxSize() + 17 + crypto.DigestMaxSize() + 14 + msgp.Uint64Size + 9 + msgp.Uint64Size + 12 + msgp.Uint64Size + 13 + msgp.Uint64Size + 19 + crypto.DigestMaxSize() + s = 1 + 14 + ledgercore.AccountTotalsMaxSize() + 17 + crypto.DigestMaxSize() + 14 + msgp.Uint64Size + 9 + msgp.Uint64Size + 20 + msgp.Uint64Size + 23 + msgp.Uint64Size + 12 + msgp.Uint64Size + 13 + msgp.Uint64Size + 19 + crypto.DigestMaxSize() + 19 + crypto.DigestMaxSize() + 22 + crypto.DigestMaxSize() return } diff --git a/ledger/store/trackerdb/sqlitedriver/accountsV2.go b/ledger/store/trackerdb/sqlitedriver/accountsV2.go index 0ba84c84bd..b5443f0cda 100644 --- a/ledger/store/trackerdb/sqlitedriver/accountsV2.go +++ b/ledger/store/trackerdb/sqlitedriver/accountsV2.go @@ -379,6 +379,28 @@ func (r *accountsV2Reader) TotalKVs(ctx context.Context) (total uint64, err erro return } +// TotalOnlineAccountRows returns the total number of rows in the onlineaccounts table. +func (r *accountsV2Reader) TotalOnlineAccountRows(ctx context.Context) (total uint64, err error) { + err = r.q.QueryRowContext(ctx, "SELECT count(1) FROM onlineaccounts").Scan(&total) + if err == sql.ErrNoRows { + total = 0 + err = nil + return + } + return +} + +// TotalOnlineRoundParams returns the total number of rows in the onlineroundparamstail table. +func (r *accountsV2Reader) TotalOnlineRoundParams(ctx context.Context) (total uint64, err error) { + err = r.q.QueryRowContext(ctx, "SELECT count(1) FROM onlineroundparamstail").Scan(&total) + if err == sql.ErrNoRows { + total = 0 + err = nil + return + } + return +} + // LoadTxTail returns the tx tails func (r *accountsV2Reader) LoadTxTail(ctx context.Context, dbRound basics.Round) (roundData []*trackerdb.TxTailRound, roundHash []crypto.Digest, baseRound basics.Round, err error) { rows, err := r.q.QueryContext(ctx, "SELECT rnd, data FROM txtail ORDER BY rnd DESC") diff --git a/ledger/store/trackerdb/sqlitedriver/catchpoint.go b/ledger/store/trackerdb/sqlitedriver/catchpoint.go index cdda10978e..94a0a15b7c 100644 --- a/ledger/store/trackerdb/sqlitedriver/catchpoint.go +++ b/ledger/store/trackerdb/sqlitedriver/catchpoint.go @@ -25,6 +25,7 @@ import ( "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/ledger/encoded" "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" @@ -464,6 +465,42 @@ func (cw *catchpointWriter) WriteCatchpointStagingKVs(ctx context.Context, keys return nil } +// WriteCatchpointStagingOnlineAccounts inserts all the onlineaccounts in the provided array +// into the catchpoint staging table catchpointonlineaccounts, and their hashes to the pending +func (cw *catchpointWriter) WriteCatchpointStagingOnlineAccounts(ctx context.Context, oas []encoded.OnlineAccountRecordV6) error { + insertStmt, err := cw.e.PrepareContext(ctx, "INSERT INTO catchpointonlineaccounts(address, updround, normalizedonlinebalance, votelastvalid, data) VALUES(?, ?, ?, ?, ?)") + if err != nil { + return err + } + defer insertStmt.Close() + + for i := 0; i < len(oas); i++ { + _, err := insertStmt.ExecContext(ctx, oas[i].Address[:], oas[i].UpdateRound, oas[i].NormalizedOnlineBalance, oas[i].VoteLastValid, oas[i].Data) + if err != nil { + return err + } + } + return nil +} + +// WriteCatchpointStagingOnlineRoundParams inserts all the online round params in the provided array +// into the catchpoint staging table catchpointonlineroundparamstail, and their hashes to the pending +func (cw *catchpointWriter) WriteCatchpointStagingOnlineRoundParams(ctx context.Context, orps []encoded.OnlineRoundParamsRecordV6) error { + insertStmt, err := cw.e.PrepareContext(ctx, "INSERT INTO catchpointonlineroundparamstail(rnd, data) VALUES(?, ?)") + if err != nil { + return err + } + defer insertStmt.Close() + + for i := 0; i < len(orps); i++ { + _, err := insertStmt.ExecContext(ctx, orps[i].Round, orps[i].Data) + if err != nil { + return err + } + } + return nil +} + func (cw *catchpointWriter) ResetCatchpointStagingBalances(ctx context.Context, newCatchup bool) (err error) { s := []string{ "DROP TABLE IF EXISTS catchpointbalances", @@ -472,6 +509,8 @@ func (cw *catchpointWriter) ResetCatchpointStagingBalances(ctx context.Context, "DROP TABLE IF EXISTS catchpointpendinghashes", "DROP TABLE IF EXISTS catchpointresources", "DROP TABLE IF EXISTS catchpointkvstore", + "DROP TABLE IF EXISTS catchpointonlineaccounts", + "DROP TABLE IF EXISTS catchpointonlineroundparamstail", "DROP TABLE IF EXISTS catchpointstateproofverification", "DELETE FROM accounttotals where id='catchpointStaging'", } @@ -486,6 +525,7 @@ func (cw *catchpointWriter) ResetCatchpointStagingBalances(ctx context.Context, now := time.Now().UnixNano() idxnameBalances := fmt.Sprintf("onlineaccountbals_idx_%d", now) idxnameAddress := fmt.Sprintf("accountbase_address_idx_%d", now) + idxnameOnlineAccounts := fmt.Sprintf("onlineaccountnorm_idx_%d", now) s = append(s, "CREATE TABLE IF NOT EXISTS catchpointassetcreators (asset integer primary key, creator blob, ctype integer)", @@ -494,10 +534,13 @@ func (cw *catchpointWriter) ResetCatchpointStagingBalances(ctx context.Context, "CREATE TABLE IF NOT EXISTS catchpointaccounthashes (id integer primary key, data blob)", "CREATE TABLE IF NOT EXISTS catchpointresources (addrid INTEGER NOT NULL, aidx INTEGER NOT NULL, data BLOB NOT NULL, PRIMARY KEY (addrid, aidx) ) WITHOUT ROWID", "CREATE TABLE IF NOT EXISTS catchpointkvstore (key blob primary key, value blob)", + "CREATE TABLE IF NOT EXISTS catchpointonlineaccounts (address BLOB NOT NULL, updround INTEGER NOT NULL, normalizedonlinebalance INTEGER NOT NULL, votelastvalid INTEGER NOT NULL, data BLOB NOT NULL, PRIMARY KEY (address, updround) )", + "CREATE TABLE IF NOT EXISTS catchpointonlineroundparamstail(rnd INTEGER NOT NULL PRIMARY KEY, data BLOB NOT NULL)", "CREATE TABLE IF NOT EXISTS catchpointstateproofverification (lastattestedround INTEGER PRIMARY KEY NOT NULL, verificationContext BLOB NOT NULL)", createNormalizedOnlineBalanceIndex(idxnameBalances, "catchpointbalances"), // should this be removed ? createUniqueAddressBalanceIndex(idxnameAddress, "catchpointbalances"), + createNormalizedOnlineBalanceIndexOnline(idxnameOnlineAccounts, "catchpointonlineaccounts"), ) } @@ -550,6 +593,25 @@ func (cw *catchpointWriter) ApplyCatchpointStagingBalances(ctx context.Context, return } +// ApplyCatchpointStagingTablesV7 drops the existing onlineaccounts and onlineroundparamstail tables, +// replacing them with data from the catchpoint staging tables. It should only be used for CatchpointFileVersionV8, +// after the ApplyCatchpointStagingBalances function has been run on DB v6, then upgraded to DB v7. +func (cw *catchpointWriter) ApplyCatchpointStagingTablesV7(ctx context.Context) (err error) { + stmts := []string{ + "DROP TABLE IF EXISTS onlineaccounts", + "DROP TABLE IF EXISTS onlineroundparamstail", + "ALTER TABLE catchpointonlineaccounts RENAME TO onlineaccounts", + "ALTER TABLE catchpointonlineroundparamstail RENAME TO onlineroundparamstail", + } + for _, stmt := range stmts { + _, err = cw.e.Exec(stmt) + if err != nil { + return err + } + } + return +} + // CreateCatchpointStagingHashesIndex creates an index on catchpointpendinghashes to allow faster scanning according to the hash order func (cw *catchpointWriter) CreateCatchpointStagingHashesIndex(ctx context.Context) (err error) { _, err = cw.e.ExecContext(ctx, "CREATE INDEX IF NOT EXISTS catchpointpendinghashesidx ON catchpointpendinghashes(data)") diff --git a/ledger/store/trackerdb/sqlitedriver/encodedAccountsIter.go b/ledger/store/trackerdb/sqlitedriver/encodedAccountsIter.go index 7d2829d3e2..ea2dd5e2d8 100644 --- a/ledger/store/trackerdb/sqlitedriver/encodedAccountsIter.go +++ b/ledger/store/trackerdb/sqlitedriver/encodedAccountsIter.go @@ -45,8 +45,8 @@ type catchpointAccountResourceCounter struct { totalAssets uint64 } -// MakeEncodedAccoutsBatchIter creates an empty accounts batch iterator. -func MakeEncodedAccoutsBatchIter(q db.Queryable) *encodedAccountsBatchIter { +// MakeEncodedAccountsBatchIter creates an empty accounts batch iterator. +func MakeEncodedAccountsBatchIter(q db.Queryable) *encodedAccountsBatchIter { return &encodedAccountsBatchIter{q: q} } diff --git a/ledger/store/trackerdb/sqlitedriver/kvsIter.go b/ledger/store/trackerdb/sqlitedriver/kvsIter.go index 4ae08962a0..b85d6c48cb 100644 --- a/ledger/store/trackerdb/sqlitedriver/kvsIter.go +++ b/ledger/store/trackerdb/sqlitedriver/kvsIter.go @@ -19,7 +19,13 @@ package sqlitedriver import ( "context" "database/sql" + "fmt" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/ledger/encoded" + "github.com/algorand/go-algorand/ledger/ledgercore" + "github.com/algorand/go-algorand/ledger/store/trackerdb" + "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" ) @@ -53,3 +59,111 @@ func (iter *kvsIter) KeyValue() (k []byte, v []byte, err error) { func (iter *kvsIter) Close() { iter.rows.Close() } + +// tableIterator is used to dump onlineaccounts and onlineroundparams tables for catchpoints. +type tableIterator[T any] struct { + rows *sql.Rows + scan func(*sql.Rows) (T, error) +} + +func (iter *tableIterator[T]) Next() bool { return iter.rows.Next() } +func (iter *tableIterator[T]) Close() { iter.rows.Close() } +func (iter *tableIterator[T]) GetItem() (T, error) { + return iter.scan(iter.rows) +} + +// MakeOnlineAccountsIter creates an onlineAccounts iterator. +func MakeOnlineAccountsIter(ctx context.Context, q db.Queryable) (trackerdb.TableIterator[*encoded.OnlineAccountRecordV6], error) { + rows, err := q.QueryContext(ctx, "SELECT address, updround, normalizedonlinebalance, votelastvalid, data FROM onlineaccounts ORDER BY address, updround") + if err != nil { + return nil, err + } + + return &tableIterator[*encoded.OnlineAccountRecordV6]{rows: rows, scan: scanOnlineAccount}, nil +} + +func scanOnlineAccount(rows *sql.Rows) (*encoded.OnlineAccountRecordV6, error) { + var ret encoded.OnlineAccountRecordV6 + var updRound, normBal, lastValid sql.NullInt64 + var addr, data []byte + + err := rows.Scan(&addr, &updRound, &normBal, &lastValid, &data) + if err != nil { + return nil, err + } + if len(addr) != len(ret.Address) { + err = fmt.Errorf("onlineaccounts DB address length mismatch: %d != %d", len(addr), len(ret.Address)) + return nil, err + } + copy(ret.Address[:], addr) + + if !updRound.Valid || updRound.Int64 < 0 { + return nil, fmt.Errorf("invalid updateRound (%v) for online account %s", updRound, ret.Address.String()) + } + ret.UpdateRound = basics.Round(updRound.Int64) + + if !normBal.Valid || normBal.Int64 < 0 { + return nil, fmt.Errorf("invalid norm balance (%v) for online account %s", normBal, ret.Address.String()) + } + ret.NormalizedOnlineBalance = uint64(normBal.Int64) + + if !lastValid.Valid || lastValid.Int64 < 0 { + return nil, fmt.Errorf("invalid lastValid (%v) for online account %s", lastValid, ret.Address) + } + ret.VoteLastValid = basics.Round(lastValid.Int64) + + var oaData trackerdb.BaseOnlineAccountData + err = protocol.Decode(data, &oaData) + if err != nil { + return nil, fmt.Errorf("encoding error for online account %s: %v", ret.Address, err) + } + + // check consistency of the decoded data against row data + // skip checking NormalizedOnlineBalance, requires proto + if ret.VoteLastValid != oaData.VoteLastValid { + return nil, fmt.Errorf("decoded voteLastValid %d does not match row voteLastValid %d", oaData.VoteLastValid, ret.VoteLastValid) + } + + // return original encoded column value + ret.Data = data + + return &ret, nil +} + +// MakeOnlineRoundParamsIter creates an onlineRoundParams iterator. +func MakeOnlineRoundParamsIter(ctx context.Context, q db.Queryable) (trackerdb.TableIterator[*encoded.OnlineRoundParamsRecordV6], error) { + rows, err := q.QueryContext(ctx, "SELECT rnd, data FROM onlineroundparamstail ORDER BY rnd") + if err != nil { + return nil, err + } + + return &tableIterator[*encoded.OnlineRoundParamsRecordV6]{rows: rows, scan: scanOnlineRoundParams}, nil +} + +func scanOnlineRoundParams(rows *sql.Rows) (*encoded.OnlineRoundParamsRecordV6, error) { + var ret encoded.OnlineRoundParamsRecordV6 + var rnd sql.NullInt64 + var data []byte + + err := rows.Scan(&rnd, &data) + if err != nil { + return nil, err + } + + if !rnd.Valid || rnd.Int64 < 0 { + return nil, fmt.Errorf("invalid round (%v) for online round params", rnd) + } + ret.Round = basics.Round(rnd.Int64) + + // test decode + var orpData ledgercore.OnlineRoundParamsData + err = protocol.Decode(data, &orpData) + if err != nil { + return nil, fmt.Errorf("encoding error for online round params round %v: %v", ret.Round, err) + } + + // return original encoded column value + ret.Data = data + + return &ret, nil +} diff --git a/ledger/store/trackerdb/sqlitedriver/sqlitedriver.go b/ledger/store/trackerdb/sqlitedriver/sqlitedriver.go index 54a080fe94..d29fdb9abc 100644 --- a/ledger/store/trackerdb/sqlitedriver/sqlitedriver.go +++ b/ledger/store/trackerdb/sqlitedriver/sqlitedriver.go @@ -25,6 +25,7 @@ import ( "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/ledger/encoded" "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" @@ -200,9 +201,9 @@ func (r *sqlReader) MakeCatchpointReader() (trackerdb.CatchpointReader, error) { return makeCatchpointReader(r.q), nil } -// MakeEncodedAccoutsBatchIter implements trackerdb.Reader -func (r *sqlReader) MakeEncodedAccoutsBatchIter() trackerdb.EncodedAccountsBatchIter { - return MakeEncodedAccoutsBatchIter(r.q) +// MakeEncodedAccountsBatchIter implements trackerdb.Reader +func (r *sqlReader) MakeEncodedAccountsBatchIter() trackerdb.EncodedAccountsBatchIter { + return MakeEncodedAccountsBatchIter(r.q) } // MakeKVsIter implements trackerdb.Reader @@ -210,6 +211,16 @@ func (r *sqlReader) MakeKVsIter(ctx context.Context) (trackerdb.KVsIter, error) return MakeKVsIter(ctx, r.q) } +// MakeOnlineAccountsIter implements trackerdb.Reader +func (r *sqlReader) MakeOnlineAccountsIter(ctx context.Context) (trackerdb.TableIterator[*encoded.OnlineAccountRecordV6], error) { + return MakeOnlineAccountsIter(ctx, r.q) +} + +// MakeOnlineRoundParamsIter implements trackerdb.Reader +func (r *sqlReader) MakeOnlineRoundParamsIter(ctx context.Context) (trackerdb.TableIterator[*encoded.OnlineRoundParamsRecordV6], error) { + return MakeOnlineRoundParamsIter(ctx, r.q) +} + type sqlWriter struct { e db.Executable } diff --git a/ledger/store/trackerdb/store.go b/ledger/store/trackerdb/store.go index 257ff63842..968c313a81 100644 --- a/ledger/store/trackerdb/store.go +++ b/ledger/store/trackerdb/store.go @@ -20,6 +20,7 @@ import ( "context" "time" + "github.com/algorand/go-algorand/ledger/encoded" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/util/db" ) @@ -63,8 +64,10 @@ type Reader interface { MakeCatchpointPendingHashesIterator(hashCount int) CatchpointPendingHashesIter // Note: Catchpoint tracker needs this on the reader handle in sqlite to not get locked by write txns MakeCatchpointReader() (CatchpointReader, error) - MakeEncodedAccoutsBatchIter() EncodedAccountsBatchIter + MakeEncodedAccountsBatchIter() EncodedAccountsBatchIter MakeKVsIter(ctx context.Context) (KVsIter, error) + MakeOnlineAccountsIter(ctx context.Context) (TableIterator[*encoded.OnlineAccountRecordV6], error) + MakeOnlineRoundParamsIter(ctx context.Context) (TableIterator[*encoded.OnlineRoundParamsRecordV6], error) } // Writer is the interface for the trackerdb write operations. diff --git a/ledger/tracker_test.go b/ledger/tracker_test.go index 3c7bf51faa..324c658591 100644 --- a/ledger/tracker_test.go +++ b/ledger/tracker_test.go @@ -188,9 +188,6 @@ func (t *emptyTracker) postCommit(ctx context.Context, dcc *deferredCommitContex func (t *emptyTracker) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) { } -func (t *emptyTracker) clearCommitRoundRetry(ctx context.Context, dcc *deferredCommitContext) { -} - // control functions are not used by the emptyTracker func (t *emptyTracker) handleUnorderedCommit(dcc *deferredCommitContext) { } @@ -198,6 +195,8 @@ func (t *emptyTracker) handlePrepareCommitError(dcc *deferredCommitContext) { } func (t *emptyTracker) handleCommitError(dcc *deferredCommitContext) { } +func (t *emptyTracker) clearCommitRoundRetry(ctx context.Context, dcc *deferredCommitContext) { +} // close is not used by the emptyTracker func (t *emptyTracker) close() { diff --git a/logging/telemetryspec/event.go b/logging/telemetryspec/event.go index 3f17c1bf85..1363646043 100644 --- a/logging/telemetryspec/event.go +++ b/logging/telemetryspec/event.go @@ -322,8 +322,8 @@ type CatchpointGenerationEventDetails struct { BalancesWriteTime uint64 // AccountsCount is the number of accounts that were written into the generated catchpoint file AccountsCount uint64 - // KVsCount is the number of accounts that were written into the generated catchpoint file - KVsCount uint64 + // KVsCount, OnlineAccountsCount, OnlineRoundParamsCount are sizes written into the generated catchpoint file + KVsCount, OnlineAccountsCount, OnlineRoundParamsCount uint64 // FileSize is the size of the catchpoint file, in bytes. FileSize uint64 // MerkleTrieRootHash is the merkle trie root hash represents all accounts and kvs diff --git a/protocol/hash.go b/protocol/hash.go index 906afb2c3d..9390c582c2 100644 --- a/protocol/hash.go +++ b/protocol/hash.go @@ -52,6 +52,8 @@ const ( NetIdentityChallengeResponse HashID = "NIR" NetIdentityVerificationMessage HashID = "NIV" NetPrioResponse HashID = "NPR" + OnlineAccount HashID = "OA" + OnlineRoundParams HashID = "ORP" OneTimeSigKey1 HashID = "OT1" OneTimeSigKey2 HashID = "OT2" PaysetFlat HashID = "PF" diff --git a/test/e2e-go/features/catchup/catchpointCatchup_test.go b/test/e2e-go/features/catchup/catchpointCatchup_test.go index 0a1d522cac..e7408ed55a 100644 --- a/test/e2e-go/features/catchup/catchpointCatchup_test.go +++ b/test/e2e-go/features/catchup/catchpointCatchup_test.go @@ -294,7 +294,7 @@ func TestCatchpointCatchupFailure(t *testing.T) { t.Skip() } - consensusParams := config.Consensus[protocol.ConsensusCurrentVersion] + consensusParams := config.Consensus[protocol.ConsensusFuture] applyCatchpointConsensusChanges(&consensusParams) a := require.New(fixtures.SynchronizedTest(t)) @@ -339,7 +339,7 @@ func TestBasicCatchpointCatchup(t *testing.T) { t.Skip() } - consensusParams := config.Consensus[protocol.ConsensusCurrentVersion] + consensusParams := config.Consensus[protocol.ConsensusFuture] applyCatchpointConsensusChanges(&consensusParams) a := require.New(fixtures.SynchronizedTest(t)) @@ -397,7 +397,7 @@ func TestCatchpointLabelGeneration(t *testing.T) { consensus := make(config.ConsensusProtocols) const consensusCatchpointCatchupTestProtocol = protocol.ConsensusVersion("catchpointtestingprotocol") - catchpointCatchupProtocol := config.Consensus[protocol.ConsensusCurrentVersion] + catchpointCatchupProtocol := config.Consensus[protocol.ConsensusFuture] applyCatchpointConsensusChanges(&catchpointCatchupProtocol) consensus[consensusCatchpointCatchupTestProtocol] = catchpointCatchupProtocol @@ -474,7 +474,7 @@ func TestNodeTxHandlerRestart(t *testing.T) { a := require.New(fixtures.SynchronizedTest(t)) consensus := make(config.ConsensusProtocols) - protoVersion := protocol.ConsensusCurrentVersion + protoVersion := protocol.ConsensusFuture catchpointCatchupProtocol := config.Consensus[protoVersion] applyCatchpointConsensusChanges(&catchpointCatchupProtocol) catchpointCatchupProtocol.StateProofInterval = 0 @@ -581,7 +581,7 @@ func TestReadyEndpoint(t *testing.T) { a := require.New(fixtures.SynchronizedTest(t)) consensus := make(config.ConsensusProtocols) - protoVersion := protocol.ConsensusCurrentVersion + protoVersion := protocol.ConsensusFuture catchpointCatchupProtocol := config.Consensus[protoVersion] applyCatchpointConsensusChanges(&catchpointCatchupProtocol) catchpointCatchupProtocol.StateProofInterval = 0 @@ -720,7 +720,7 @@ func TestNodeTxSyncRestart(t *testing.T) { a := require.New(fixtures.SynchronizedTest(t)) consensus := make(config.ConsensusProtocols) - protoVersion := protocol.ConsensusCurrentVersion + protoVersion := protocol.ConsensusFuture catchpointCatchupProtocol := config.Consensus[protoVersion] prevMaxTxnLife := catchpointCatchupProtocol.MaxTxnLife applyCatchpointConsensusChanges(&catchpointCatchupProtocol) diff --git a/test/e2e-go/features/catchup/stateproofsCatchup_test.go b/test/e2e-go/features/catchup/stateproofsCatchup_test.go index f9639abeb1..69c50f5ccd 100644 --- a/test/e2e-go/features/catchup/stateproofsCatchup_test.go +++ b/test/e2e-go/features/catchup/stateproofsCatchup_test.go @@ -70,7 +70,7 @@ func TestStateProofInReplayCatchpoint(t *testing.T) { a := require.New(fixtures.SynchronizedTest(t)) - consensusParams := config.Consensus[protocol.ConsensusCurrentVersion] + consensusParams := config.Consensus[protocol.ConsensusFuture] applyCatchpointConsensusChanges(&consensusParams) applyCatchpointStateProofConsensusChanges(&consensusParams) @@ -146,7 +146,7 @@ func TestStateProofAfterCatchpoint(t *testing.T) { } a := require.New(fixtures.SynchronizedTest(t)) - consensusParams := config.Consensus[protocol.ConsensusCurrentVersion] + consensusParams := config.Consensus[protocol.ConsensusFuture] applyCatchpointConsensusChanges(&consensusParams) applyCatchpointStateProofConsensusChanges(&consensusParams) consensusParams.StateProofInterval = 16 @@ -211,7 +211,7 @@ func TestSendSigsAfterCatchpointCatchup(t *testing.T) { configurableConsensus := make(config.ConsensusProtocols) consensusVersion := protocol.ConsensusVersion("catchpointtestingprotocol") - consensusParams := config.Consensus[protocol.ConsensusCurrentVersion] + consensusParams := config.Consensus[protocol.ConsensusFuture] applyCatchpointStateProofConsensusChanges(&consensusParams) applyCatchpointConsensusChanges(&consensusParams) // Weight threshold allows creation of state proofs using the primary node and at least one other node. From a1137a29a1008624c0424be619cd1d1d63c072ef Mon Sep 17 00:00:00 2001 From: Gary Malouf <982483+gmalouf@users.noreply.github.com> Date: Fri, 20 Dec 2024 10:23:02 -0500 Subject: [PATCH 14/15] Consensus: Consensus version v40, set major release to 4 and reset minor. (#6207) Co-authored-by: cce <51567+cce@users.noreply.github.com> --- config/consensus.go | 50 ++++++++++++------- config/version.go | 4 +- data/bookkeeping/block_test.go | 12 ++++- data/pools/transactionPool_test.go | 1 + data/transactions/logic/langspec_v1.json | 2 +- data/transactions/logic/langspec_v10.json | 2 +- data/transactions/logic/langspec_v11.json | 2 +- data/transactions/logic/langspec_v2.json | 2 +- data/transactions/logic/langspec_v3.json | 2 +- data/transactions/logic/langspec_v4.json | 2 +- data/transactions/logic/langspec_v5.json | 2 +- data/transactions/logic/langspec_v6.json | 2 +- data/transactions/logic/langspec_v7.json | 2 +- data/transactions/logic/langspec_v8.json | 2 +- data/transactions/logic/langspec_v9.json | 2 +- ledger/applications_test.go | 3 ++ ledger/catchpointfilewriter.go | 4 +- ledger/catchpointfilewriter_test.go | 12 ++--- ledger/catchpointtracker.go | 4 +- ledger/catchupaccessor.go | 9 ++-- ledger/eval_simple_test.go | 5 +- ledger/ledger_test.go | 6 +++ .../store/trackerdb/dualdriver/dualdriver.go | 4 +- ledger/store/trackerdb/generickv/reader.go | 4 +- .../store/trackerdb/sqlitedriver/kvsIter.go | 17 +++++-- .../trackerdb/sqlitedriver/sqlitedriver.go | 8 +-- ledger/store/trackerdb/store.go | 4 +- ledger/testing/consensusRange.go | 1 + ledger/testing/consensusRange_test.go | 2 +- node/follower_node_test.go | 26 ++++------ protocol/consensus.go | 7 ++- .../catchup/catchpointCatchup_test.go | 6 +-- .../catchup/stateproofsCatchup_test.go | 2 +- 33 files changed, 128 insertions(+), 85 deletions(-) diff --git a/config/consensus.go b/config/consensus.go index 7e79e9b8e5..32343ed154 100644 --- a/config/consensus.go +++ b/config/consensus.go @@ -1516,32 +1516,44 @@ func initConsensusProtocols() { // but our current max is 150000 so using that : v38.ApprovedUpgrades[protocol.ConsensusV39] = 150000 - // ConsensusFuture is used to test features that are implemented - // but not yet released in a production protocol version. - vFuture := v39 - vFuture.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + v40 := v39 + v40.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} - vFuture.LogicSigVersion = 11 // When moving this to a release, put a new higher LogicSigVersion here + v40.LogicSigVersion = 11 - vFuture.EnableLogicSigSizePooling = true + v40.EnableLogicSigSizePooling = true - vFuture.Payouts.Enabled = true - vFuture.Payouts.Percent = 50 - vFuture.Payouts.GoOnlineFee = 2_000_000 // 2 algos - vFuture.Payouts.MinBalance = 30_000_000_000 // 30,000 algos - vFuture.Payouts.MaxBalance = 70_000_000_000_000 // 70M algos - vFuture.Payouts.MaxMarkAbsent = 32 - vFuture.Payouts.ChallengeInterval = 1000 - vFuture.Payouts.ChallengeGracePeriod = 200 - vFuture.Payouts.ChallengeBits = 5 + v40.Payouts.Enabled = true + v40.Payouts.Percent = 50 + v40.Payouts.GoOnlineFee = 2_000_000 // 2 algos + v40.Payouts.MinBalance = 30_000_000_000 // 30,000 algos + v40.Payouts.MaxBalance = 70_000_000_000_000 // 70M algos + v40.Payouts.MaxMarkAbsent = 32 + v40.Payouts.ChallengeInterval = 1000 + v40.Payouts.ChallengeGracePeriod = 200 + v40.Payouts.ChallengeBits = 5 - vFuture.Bonus.BaseAmount = 10_000_000 // 10 Algos + v40.Bonus.BaseAmount = 10_000_000 // 10 Algos // 2.9 sec rounds gives about 10.8M rounds per year. - vFuture.Bonus.DecayInterval = 1_000_000 // .99^(10.8M/1M) ~ .897. So ~10% decay per year + v40.Bonus.DecayInterval = 1_000_000 // .99^(10.8M/1M) ~ .897. So ~10% decay per year + + v40.Heartbeat = true + + v40.EnableCatchpointsWithOnlineAccounts = true - vFuture.Heartbeat = true + Consensus[protocol.ConsensusV40] = v40 + + // v39 can be upgraded to v40, with an update delay of 7d: + // 208000 = (7 * 24 * 60 * 60 / 2.9 ballpark round times) + // our current max is 250000 + v39.ApprovedUpgrades[protocol.ConsensusV40] = 208000 + + // ConsensusFuture is used to test features that are implemented + // but not yet released in a production protocol version. + vFuture := v40 + vFuture.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} - vFuture.EnableCatchpointsWithOnlineAccounts = true + vFuture.LogicSigVersion = 12 // When moving this to a release, put a new higher LogicSigVersion here Consensus[protocol.ConsensusFuture] = vFuture diff --git a/config/version.go b/config/version.go index 94a82a6ca8..08f6cae898 100644 --- a/config/version.go +++ b/config/version.go @@ -29,11 +29,11 @@ import ( // VersionMajor is the Major semantic version number (#.y.z) - changed when first public release (0.y.z -> 1.y.z) // and when backwards compatibility is broken. -const VersionMajor = 3 +const VersionMajor = 4 // VersionMinor is the Minor semantic version number (x.#.z) - changed when backwards-compatible features are introduced. // Not enforced until after initial public release (x > 0). -const VersionMinor = 28 +const VersionMinor = 0 // Version is the type holding our full version information. type Version struct { diff --git a/data/bookkeeping/block_test.go b/data/bookkeeping/block_test.go index bc8aec6a7a..9b2608cd1d 100644 --- a/data/bookkeeping/block_test.go +++ b/data/bookkeeping/block_test.go @@ -39,12 +39,22 @@ import ( var delegatesMoney = basics.MicroAlgos{Raw: 1000 * 1000 * 1000} var proto1 = protocol.ConsensusVersion("Test1") +var proto1NoBonus = protocol.ConsensusVersion("Test1NoBonus") var proto2 = protocol.ConsensusVersion("Test2") var proto3 = protocol.ConsensusVersion("Test3") var protoUnsupported = protocol.ConsensusVersion("TestUnsupported") var protoDelay = protocol.ConsensusVersion("TestDelay") func init() { + verBeforeBonus := protocol.ConsensusV39 + params1NB := config.Consensus[verBeforeBonus] + params1NB.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{ + proto2: 0, + } + params1NB.MinUpgradeWaitRounds = 0 + params1NB.MaxUpgradeWaitRounds = 0 + config.Consensus[proto1NoBonus] = params1NB + params1 := config.Consensus[protocol.ConsensusCurrentVersion] params1.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{ proto2: 0, @@ -263,7 +273,7 @@ func TestBonus(t *testing.T) { t.Parallel() var prev Block - prev.CurrentProtocol = proto1 + prev.CurrentProtocol = proto1NoBonus prev.BlockHeader.GenesisID = t.Name() crypto.RandBytes(prev.BlockHeader.GenesisHash[:]) diff --git a/data/pools/transactionPool_test.go b/data/pools/transactionPool_test.go index 01b851f86d..518358d809 100644 --- a/data/pools/transactionPool_test.go +++ b/data/pools/transactionPool_test.go @@ -1453,6 +1453,7 @@ func TestStateProofLogging(t *testing.T) { b.BlockHeader.GenesisHash = mockLedger.GenesisHash() b.CurrentProtocol = protocol.ConsensusCurrentVersion b.BlockHeader.Round = 1 + b.BlockHeader.Bonus = basics.MicroAlgos{Raw: 10000000} phdr, err := mockLedger.BlockHdr(0) require.NoError(t, err) diff --git a/data/transactions/logic/langspec_v1.json b/data/transactions/logic/langspec_v1.json index 5fd347507d..10ff7909f7 100644 --- a/data/transactions/logic/langspec_v1.json +++ b/data/transactions/logic/langspec_v1.json @@ -1,6 +1,6 @@ { "Version": 1, - "LogicSigVersion": 10, + "LogicSigVersion": 11, "NamedTypes": [ { "Name": "[]byte", diff --git a/data/transactions/logic/langspec_v10.json b/data/transactions/logic/langspec_v10.json index 1a8986436a..dffcd9e328 100644 --- a/data/transactions/logic/langspec_v10.json +++ b/data/transactions/logic/langspec_v10.json @@ -1,6 +1,6 @@ { "Version": 10, - "LogicSigVersion": 10, + "LogicSigVersion": 11, "NamedTypes": [ { "Name": "[]byte", diff --git a/data/transactions/logic/langspec_v11.json b/data/transactions/logic/langspec_v11.json index 58d5657253..2ff7187f3c 100644 --- a/data/transactions/logic/langspec_v11.json +++ b/data/transactions/logic/langspec_v11.json @@ -1,6 +1,6 @@ { "Version": 11, - "LogicSigVersion": 10, + "LogicSigVersion": 11, "NamedTypes": [ { "Name": "[]byte", diff --git a/data/transactions/logic/langspec_v2.json b/data/transactions/logic/langspec_v2.json index 24f9ad97f7..584339b88a 100644 --- a/data/transactions/logic/langspec_v2.json +++ b/data/transactions/logic/langspec_v2.json @@ -1,6 +1,6 @@ { "Version": 2, - "LogicSigVersion": 10, + "LogicSigVersion": 11, "NamedTypes": [ { "Name": "[]byte", diff --git a/data/transactions/logic/langspec_v3.json b/data/transactions/logic/langspec_v3.json index 085084359a..8ed4c5f45c 100644 --- a/data/transactions/logic/langspec_v3.json +++ b/data/transactions/logic/langspec_v3.json @@ -1,6 +1,6 @@ { "Version": 3, - "LogicSigVersion": 10, + "LogicSigVersion": 11, "NamedTypes": [ { "Name": "[]byte", diff --git a/data/transactions/logic/langspec_v4.json b/data/transactions/logic/langspec_v4.json index ddbf651876..988f628246 100644 --- a/data/transactions/logic/langspec_v4.json +++ b/data/transactions/logic/langspec_v4.json @@ -1,6 +1,6 @@ { "Version": 4, - "LogicSigVersion": 10, + "LogicSigVersion": 11, "NamedTypes": [ { "Name": "[]byte", diff --git a/data/transactions/logic/langspec_v5.json b/data/transactions/logic/langspec_v5.json index 2cca3941da..5a7cbb6532 100644 --- a/data/transactions/logic/langspec_v5.json +++ b/data/transactions/logic/langspec_v5.json @@ -1,6 +1,6 @@ { "Version": 5, - "LogicSigVersion": 10, + "LogicSigVersion": 11, "NamedTypes": [ { "Name": "[]byte", diff --git a/data/transactions/logic/langspec_v6.json b/data/transactions/logic/langspec_v6.json index 6795706db7..5cd1de7fc3 100644 --- a/data/transactions/logic/langspec_v6.json +++ b/data/transactions/logic/langspec_v6.json @@ -1,6 +1,6 @@ { "Version": 6, - "LogicSigVersion": 10, + "LogicSigVersion": 11, "NamedTypes": [ { "Name": "[]byte", diff --git a/data/transactions/logic/langspec_v7.json b/data/transactions/logic/langspec_v7.json index 12d2594194..4152b01675 100644 --- a/data/transactions/logic/langspec_v7.json +++ b/data/transactions/logic/langspec_v7.json @@ -1,6 +1,6 @@ { "Version": 7, - "LogicSigVersion": 10, + "LogicSigVersion": 11, "NamedTypes": [ { "Name": "[]byte", diff --git a/data/transactions/logic/langspec_v8.json b/data/transactions/logic/langspec_v8.json index c5fcdbf58d..d667cbb0d1 100644 --- a/data/transactions/logic/langspec_v8.json +++ b/data/transactions/logic/langspec_v8.json @@ -1,6 +1,6 @@ { "Version": 8, - "LogicSigVersion": 10, + "LogicSigVersion": 11, "NamedTypes": [ { "Name": "[]byte", diff --git a/data/transactions/logic/langspec_v9.json b/data/transactions/logic/langspec_v9.json index 01e951cc3c..0d5dedf63c 100644 --- a/data/transactions/logic/langspec_v9.json +++ b/data/transactions/logic/langspec_v9.json @@ -1,6 +1,6 @@ { "Version": 9, - "LogicSigVersion": 10, + "LogicSigVersion": 11, "NamedTypes": [ { "Name": "[]byte", diff --git a/ledger/applications_test.go b/ledger/applications_test.go index 315cfd2c47..f140d6c7dd 100644 --- a/ledger/applications_test.go +++ b/ledger/applications_test.go @@ -587,6 +587,7 @@ return` blk.TxnCounter = blk.TxnCounter + 2 blk.Payset = append(blk.Payset, txib1, txib2) blk.TxnCommitments, err = blk.PaysetCommit() + blk.FeesCollected = basics.MicroAlgos{Raw: txib1.Txn.Fee.Raw + txib2.Txn.Fee.Raw} a.NoError(err) err = l.appendUnvalidated(blk) a.NoError(err) @@ -731,6 +732,7 @@ return` blk.TxnCounter = blk.TxnCounter + 2 blk.Payset = append(blk.Payset, txib1, txib2) blk.TxnCommitments, err = blk.PaysetCommit() + blk.FeesCollected = basics.MicroAlgos{Raw: txib1.Txn.Fee.Raw + txib2.Txn.Fee.Raw} a.NoError(err) err = l.appendUnvalidated(blk) a.NoError(err) @@ -867,6 +869,7 @@ return` blk.TxnCounter = blk.TxnCounter + 2 blk.Payset = append(blk.Payset, txib1, txib2) blk.TxnCommitments, err = blk.PaysetCommit() + blk.FeesCollected = basics.MicroAlgos{Raw: txib1.Txn.Fee.Raw + txib2.Txn.Fee.Raw} a.NoError(err) err = l.appendUnvalidated(blk) a.NoError(err) diff --git a/ledger/catchpointfilewriter.go b/ledger/catchpointfilewriter.go index 01e78a59eb..606da98aff 100644 --- a/ledger/catchpointfilewriter.go +++ b/ledger/catchpointfilewriter.go @@ -373,7 +373,7 @@ func (cw *catchpointFileWriter) readDatabaseStep(ctx context.Context) error { if !cw.onlineAccountsDone { // Create the OnlineAccounts iterator JIT if cw.onlineAccountRows == nil { - rows, err := cw.tx.MakeOnlineAccountsIter(ctx) + rows, err := cw.tx.MakeOnlineAccountsIter(ctx, false) if err != nil { return err } @@ -402,7 +402,7 @@ func (cw *catchpointFileWriter) readDatabaseStep(ctx context.Context) error { if !cw.onlineRoundParamsDone { // Create the OnlineRoundParams iterator JIT if cw.onlineRoundParamsRows == nil { - rows, err := cw.tx.MakeOnlineRoundParamsIter(ctx) + rows, err := cw.tx.MakeOnlineRoundParamsIter(ctx, false) if err != nil { return err } diff --git a/ledger/catchpointfilewriter_test.go b/ledger/catchpointfilewriter_test.go index 553942ad0b..d091cd5331 100644 --- a/ledger/catchpointfilewriter_test.go +++ b/ledger/catchpointfilewriter_test.go @@ -1095,18 +1095,18 @@ assert t.Log("DB round generator", genDBRound, "validator", valDBRound) t.Log("Latest round generator", genLatestRound, "validator", valLatestRound) - genOAHash, genOARows, err := calculateVerificationHash(context.Background(), dl.generator.trackerDB().MakeOnlineAccountsIter) + genOAHash, genOARows, err := calculateVerificationHash(context.Background(), dl.generator.trackerDB().MakeOnlineAccountsIter, false) require.NoError(t, err) - valOAHash, valOARows, err := calculateVerificationHash(context.Background(), dl.validator.trackerDB().MakeOnlineAccountsIter) + valOAHash, valOARows, err := calculateVerificationHash(context.Background(), dl.validator.trackerDB().MakeOnlineAccountsIter, false) require.NoError(t, err) require.Equal(t, genOAHash, valOAHash) require.NotZero(t, genOAHash) require.Equal(t, genOARows, valOARows) require.NotZero(t, genOARows) - genORPHash, genORPRows, err := calculateVerificationHash(context.Background(), dl.generator.trackerDB().MakeOnlineRoundParamsIter) + genORPHash, genORPRows, err := calculateVerificationHash(context.Background(), dl.generator.trackerDB().MakeOnlineRoundParamsIter, false) require.NoError(t, err) - valORPHash, valORPRows, err := calculateVerificationHash(context.Background(), dl.validator.trackerDB().MakeOnlineRoundParamsIter) + valORPHash, valORPRows, err := calculateVerificationHash(context.Background(), dl.validator.trackerDB().MakeOnlineRoundParamsIter, false) require.NoError(t, err) require.Equal(t, genORPHash, valORPHash) require.NotZero(t, genORPHash) @@ -1123,13 +1123,13 @@ assert l := testNewLedgerFromCatchpoint(t, dl.generator.trackerDB(), catchpointFilePath) defer l.Close() - catchpointOAHash, catchpointOARows, err := calculateVerificationHash(context.Background(), l.trackerDBs.MakeOnlineAccountsIter) + catchpointOAHash, catchpointOARows, err := calculateVerificationHash(context.Background(), l.trackerDBs.MakeOnlineAccountsIter, false) require.NoError(t, err) require.Equal(t, genOAHash, catchpointOAHash) t.Log("catchpoint onlineaccounts hash", catchpointOAHash, "matches") require.Equal(t, genOARows, catchpointOARows) - catchpointORPHash, catchpointORPRows, err := calculateVerificationHash(context.Background(), l.trackerDBs.MakeOnlineRoundParamsIter) + catchpointORPHash, catchpointORPRows, err := calculateVerificationHash(context.Background(), l.trackerDBs.MakeOnlineRoundParamsIter, false) require.NoError(t, err) require.Equal(t, genORPHash, catchpointORPHash) t.Log("catchpoint onlineroundparams hash", catchpointORPHash, "matches") diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go index 8adbdc8dfb..603672f8e6 100644 --- a/ledger/catchpointtracker.go +++ b/ledger/catchpointtracker.go @@ -238,13 +238,13 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic // Generate hashes of the onlineaccounts and onlineroundparams tables. err := ct.dbs.Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) error { var dbErr error - onlineAccountsHash, _, dbErr = calculateVerificationHash(ctx, tx.MakeOnlineAccountsIter) + onlineAccountsHash, _, dbErr = calculateVerificationHash(ctx, tx.MakeOnlineAccountsIter, false) if dbErr != nil { return dbErr } - onlineRoundParamsHash, _, dbErr = calculateVerificationHash(ctx, tx.MakeOnlineRoundParamsIter) + onlineRoundParamsHash, _, dbErr = calculateVerificationHash(ctx, tx.MakeOnlineRoundParamsIter, false) if dbErr != nil { return dbErr } diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go index de10baa0e1..7418ebc98c 100644 --- a/ledger/catchupaccessor.go +++ b/ledger/catchupaccessor.go @@ -1031,12 +1031,12 @@ func (c *catchpointCatchupAccessorImpl) GetVerifyData(ctx context.Context) (bala return fmt.Errorf("unable to get state proof verification data: %v", err) } - onlineAccountsHash, _, err = calculateVerificationHash(ctx, tx.MakeOnlineAccountsIter) + onlineAccountsHash, _, err = calculateVerificationHash(ctx, tx.MakeOnlineAccountsIter, true) if err != nil { return fmt.Errorf("unable to get online accounts verification data: %v", err) } - onlineRoundParamsHash, _, err = calculateVerificationHash(ctx, tx.MakeOnlineRoundParamsIter) + onlineRoundParamsHash, _, err = calculateVerificationHash(ctx, tx.MakeOnlineRoundParamsIter, true) if err != nil { return fmt.Errorf("unable to get online round params verification data: %v", err) } @@ -1058,10 +1058,11 @@ func (c *catchpointCatchupAccessorImpl) GetVerifyData(ctx context.Context) (bala // both at restore time (in catchpointCatchupAccessorImpl) and snapshot time (in catchpointTracker). func calculateVerificationHash[T crypto.Hashable]( ctx context.Context, - iterFactory func(context.Context) (trackerdb.TableIterator[T], error), + iterFactory func(context.Context, bool) (trackerdb.TableIterator[T], error), + useStaging bool, ) (crypto.Digest, uint64, error) { - rows, err := iterFactory(ctx) + rows, err := iterFactory(ctx, useStaging) if err != nil { return crypto.Digest{}, 0, err } diff --git a/ledger/eval_simple_test.go b/ledger/eval_simple_test.go index 6ec44b99f3..f518f074f0 100644 --- a/ledger/eval_simple_test.go +++ b/ledger/eval_simple_test.go @@ -33,6 +33,7 @@ import ( "github.com/algorand/go-algorand/crypto/merklesignature" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/committee" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/txntest" "github.com/algorand/go-algorand/ledger/ledgercore" @@ -1238,11 +1239,11 @@ func TestRekeying(t *testing.T) { if err != nil { return err } - validatedBlock := ledgercore.MakeValidatedBlock(unfinishedBlock.UnfinishedBlock(), unfinishedBlock.UnfinishedDeltas()) + fb := unfinishedBlock.FinishBlock(committee.Seed{0x01}, basics.Address{0x01}, false) backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil) defer backlogPool.Shutdown() - _, err = l.Validate(context.Background(), validatedBlock.Block(), backlogPool) + _, err = l.Validate(context.Background(), fb, backlogPool) return err } diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index 8941452d4c..219b12bc3e 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -155,6 +155,10 @@ func makeNewEmptyBlock(t *testing.T, l *Ledger, GenesisID string, initAccounts m // UpgradeVote: empty, } + if proto.Payouts.Enabled { + blk.BlockHeader.Proposer = basics.Address{0x01} // Must be set to _something_. + } + blk.TxnCommitments, err = blk.PaysetCommit() require.NoError(t, err) @@ -262,6 +266,8 @@ func TestLedgerBlockHeaders(t *testing.T) { Round: l.Latest() + 1, Branch: lastBlock.Hash(), // Seed: does not matter, + Bonus: bookkeeping.NextBonus(lastBlock.BlockHeader, &proto), + Proposer: basics.Address{0x01}, // Must be set to _something_. TimeStamp: 0, GenesisID: t.Name(), RewardsState: lastBlock.NextRewardsState(l.Latest()+1, proto, poolBal.MicroAlgos, totalRewardUnits, logging.Base()), diff --git a/ledger/store/trackerdb/dualdriver/dualdriver.go b/ledger/store/trackerdb/dualdriver/dualdriver.go index ed23528675..382db683a7 100644 --- a/ledger/store/trackerdb/dualdriver/dualdriver.go +++ b/ledger/store/trackerdb/dualdriver/dualdriver.go @@ -278,13 +278,13 @@ func (*reader) MakeKVsIter(ctx context.Context) (trackerdb.KVsIter, error) { } // MakeOnlineAccountsIter implements trackerdb.Reader -func (*reader) MakeOnlineAccountsIter(ctx context.Context) (trackerdb.TableIterator[*encoded.OnlineAccountRecordV6], error) { +func (*reader) MakeOnlineAccountsIter(context.Context, bool) (trackerdb.TableIterator[*encoded.OnlineAccountRecordV6], error) { // TODO: catchpoint return nil, nil } // MakeOnlineRoundParamsIter implements trackerdb.Reader -func (*reader) MakeOnlineRoundParamsIter(ctx context.Context) (trackerdb.TableIterator[*encoded.OnlineRoundParamsRecordV6], error) { +func (*reader) MakeOnlineRoundParamsIter(context.Context, bool) (trackerdb.TableIterator[*encoded.OnlineRoundParamsRecordV6], error) { // TODO: catchpoint return nil, nil } diff --git a/ledger/store/trackerdb/generickv/reader.go b/ledger/store/trackerdb/generickv/reader.go index f8422792e5..bbbfadc9c9 100644 --- a/ledger/store/trackerdb/generickv/reader.go +++ b/ledger/store/trackerdb/generickv/reader.go @@ -79,13 +79,13 @@ func (r *reader) MakeKVsIter(ctx context.Context) (trackerdb.KVsIter, error) { } // MakeOnlineAccountsIter implements trackerdb.Reader -func (r *reader) MakeOnlineAccountsIter(ctx context.Context) (trackerdb.TableIterator[*encoded.OnlineAccountRecordV6], error) { +func (r *reader) MakeOnlineAccountsIter(context.Context, bool) (trackerdb.TableIterator[*encoded.OnlineAccountRecordV6], error) { // TODO: catchpoint panic("unimplemented") } // MakeOnlineRoundParamsIter implements trackerdb.Reader -func (r *reader) MakeOnlineRoundParamsIter(ctx context.Context) (trackerdb.TableIterator[*encoded.OnlineRoundParamsRecordV6], error) { +func (r *reader) MakeOnlineRoundParamsIter(context.Context, bool) (trackerdb.TableIterator[*encoded.OnlineRoundParamsRecordV6], error) { // TODO: catchpoint panic("unimplemented") } diff --git a/ledger/store/trackerdb/sqlitedriver/kvsIter.go b/ledger/store/trackerdb/sqlitedriver/kvsIter.go index b85d6c48cb..05fc769d6b 100644 --- a/ledger/store/trackerdb/sqlitedriver/kvsIter.go +++ b/ledger/store/trackerdb/sqlitedriver/kvsIter.go @@ -73,8 +73,13 @@ func (iter *tableIterator[T]) GetItem() (T, error) { } // MakeOnlineAccountsIter creates an onlineAccounts iterator. -func MakeOnlineAccountsIter(ctx context.Context, q db.Queryable) (trackerdb.TableIterator[*encoded.OnlineAccountRecordV6], error) { - rows, err := q.QueryContext(ctx, "SELECT address, updround, normalizedonlinebalance, votelastvalid, data FROM onlineaccounts ORDER BY address, updround") +func MakeOnlineAccountsIter(ctx context.Context, q db.Queryable, useStaging bool) (trackerdb.TableIterator[*encoded.OnlineAccountRecordV6], error) { + table := "onlineaccounts" + if useStaging { + table = "catchpointonlineaccounts" + } + + rows, err := q.QueryContext(ctx, fmt.Sprintf("SELECT address, updround, normalizedonlinebalance, votelastvalid, data FROM %s ORDER BY address, updround", table)) if err != nil { return nil, err } @@ -131,8 +136,12 @@ func scanOnlineAccount(rows *sql.Rows) (*encoded.OnlineAccountRecordV6, error) { } // MakeOnlineRoundParamsIter creates an onlineRoundParams iterator. -func MakeOnlineRoundParamsIter(ctx context.Context, q db.Queryable) (trackerdb.TableIterator[*encoded.OnlineRoundParamsRecordV6], error) { - rows, err := q.QueryContext(ctx, "SELECT rnd, data FROM onlineroundparamstail ORDER BY rnd") +func MakeOnlineRoundParamsIter(ctx context.Context, q db.Queryable, useStaging bool) (trackerdb.TableIterator[*encoded.OnlineRoundParamsRecordV6], error) { + table := "onlineroundparamstail" + if useStaging { + table = "catchpointonlineroundparamstail" + } + rows, err := q.QueryContext(ctx, fmt.Sprintf("SELECT rnd, data FROM %s ORDER BY rnd", table)) if err != nil { return nil, err } diff --git a/ledger/store/trackerdb/sqlitedriver/sqlitedriver.go b/ledger/store/trackerdb/sqlitedriver/sqlitedriver.go index d29fdb9abc..247744e01f 100644 --- a/ledger/store/trackerdb/sqlitedriver/sqlitedriver.go +++ b/ledger/store/trackerdb/sqlitedriver/sqlitedriver.go @@ -212,13 +212,13 @@ func (r *sqlReader) MakeKVsIter(ctx context.Context) (trackerdb.KVsIter, error) } // MakeOnlineAccountsIter implements trackerdb.Reader -func (r *sqlReader) MakeOnlineAccountsIter(ctx context.Context) (trackerdb.TableIterator[*encoded.OnlineAccountRecordV6], error) { - return MakeOnlineAccountsIter(ctx, r.q) +func (r *sqlReader) MakeOnlineAccountsIter(ctx context.Context, useStaging bool) (trackerdb.TableIterator[*encoded.OnlineAccountRecordV6], error) { + return MakeOnlineAccountsIter(ctx, r.q, useStaging) } // MakeOnlineRoundParamsIter implements trackerdb.Reader -func (r *sqlReader) MakeOnlineRoundParamsIter(ctx context.Context) (trackerdb.TableIterator[*encoded.OnlineRoundParamsRecordV6], error) { - return MakeOnlineRoundParamsIter(ctx, r.q) +func (r *sqlReader) MakeOnlineRoundParamsIter(ctx context.Context, useStaging bool) (trackerdb.TableIterator[*encoded.OnlineRoundParamsRecordV6], error) { + return MakeOnlineRoundParamsIter(ctx, r.q, useStaging) } type sqlWriter struct { diff --git a/ledger/store/trackerdb/store.go b/ledger/store/trackerdb/store.go index 968c313a81..66f7fd0f19 100644 --- a/ledger/store/trackerdb/store.go +++ b/ledger/store/trackerdb/store.go @@ -66,8 +66,8 @@ type Reader interface { MakeCatchpointReader() (CatchpointReader, error) MakeEncodedAccountsBatchIter() EncodedAccountsBatchIter MakeKVsIter(ctx context.Context) (KVsIter, error) - MakeOnlineAccountsIter(ctx context.Context) (TableIterator[*encoded.OnlineAccountRecordV6], error) - MakeOnlineRoundParamsIter(ctx context.Context) (TableIterator[*encoded.OnlineRoundParamsRecordV6], error) + MakeOnlineAccountsIter(ctx context.Context, useStaging bool) (TableIterator[*encoded.OnlineAccountRecordV6], error) + MakeOnlineRoundParamsIter(ctx context.Context, useStaging bool) (TableIterator[*encoded.OnlineRoundParamsRecordV6], error) } // Writer is the interface for the trackerdb write operations. diff --git a/ledger/testing/consensusRange.go b/ledger/testing/consensusRange.go index 199a462afc..882fb0f05d 100644 --- a/ledger/testing/consensusRange.go +++ b/ledger/testing/consensusRange.go @@ -61,6 +61,7 @@ var consensusByNumber = []protocol.ConsensusVersion{ protocol.ConsensusV37, protocol.ConsensusV38, // AVM v9, ECDSA pre-check, stateproofs recoverability protocol.ConsensusV39, // AVM v10, logicsig opcode budget pooling, elliptic curve ops, dynamic round times + protocol.ConsensusV40, // Consensus incentives, AVM v11, mimc protocol.ConsensusFuture, } diff --git a/ledger/testing/consensusRange_test.go b/ledger/testing/consensusRange_test.go index 9b270e3bf7..1ee2fee8f1 100644 --- a/ledger/testing/consensusRange_test.go +++ b/ledger/testing/consensusRange_test.go @@ -56,6 +56,6 @@ func TestReleasedVersion(t *testing.T) { } require.Equal(t, versionStringFromIndex(len(consensusByNumber)-1), "vFuture") - require.Equal(t, versionStringFromIndex(39), "v39") + require.Equal(t, versionStringFromIndex(40), "v40") } diff --git a/node/follower_node_test.go b/node/follower_node_test.go index cd82ea4e34..4983b68c8c 100644 --- a/node/follower_node_test.go +++ b/node/follower_node_test.go @@ -39,6 +39,8 @@ import ( "github.com/algorand/go-algorand/test/partitiontest" ) +var testAddr = basics.Address{0x6, 0xda, 0xcc, 0x4b, 0x6d, 0x9e, 0xd1, 0x41, 0xb1, 0x75, 0x76, 0xbd, 0x45, 0x9a, 0xe6, 0x42, 0x1d, 0x48, 0x6d, 0xa3, 0xd4, 0xef, 0x21, 0x47, 0xc4, 0x9, 0xa3, 0x96, 0xb8, 0x2e, 0xa2, 0x21} + func followNodeDefaultGenesis() bookkeeping.Genesis { return bookkeeping.Genesis{ SchemaID: "go-test-follower-node-genesis", @@ -56,7 +58,13 @@ func followNodeDefaultGenesis() bookkeeping.Genesis { { Address: sinkAddr.String(), State: bookkeeping.GenesisAccountData{ - MicroAlgos: basics.MicroAlgos{Raw: 1000000}, + MicroAlgos: basics.MicroAlgos{Raw: 500000}, + }, + }, + { + Address: testAddr.String(), + State: bookkeeping.GenesisAccountData{ + MicroAlgos: basics.MicroAlgos{Raw: 500000}, }, }, }, @@ -73,20 +81,6 @@ func setupFollowNode(t *testing.T) *AlgorandFollowerNode { return node } -func remakeableFollowNode(t *testing.T, tempDir string, maxAcctLookback uint64) (*AlgorandFollowerNode, string) { - cfg := config.GetDefaultLocal() - cfg.EnableFollowMode = true - cfg.DisableNetworking = true - cfg.MaxAcctLookback = maxAcctLookback - genesis := followNodeDefaultGenesis() - if tempDir == "" { - tempDir = t.TempDir() - } - followNode, err := MakeFollower(logging.Base(), tempDir, cfg, []string{}, genesis) - require.NoError(t, err) - return followNode, tempDir -} - func TestSyncRound(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() @@ -317,7 +311,7 @@ func TestSimulate(t *testing.T) { stxn := txntest.Txn{ Type: protocol.PaymentTx, - Sender: sinkAddr, + Sender: testAddr, Receiver: poolAddr, Amount: 1, Fee: 1000, diff --git a/protocol/consensus.go b/protocol/consensus.go index 51c023ec2a..1549ae76bd 100644 --- a/protocol/consensus.go +++ b/protocol/consensus.go @@ -223,6 +223,11 @@ const ConsensusV39 = ConsensusVersion( "https://github.com/algorandfoundation/specs/tree/925a46433742afb0b51bb939354bd907fa88bf95", ) +// ConsensusV40 enables consensus incentives and TEAL v11 featuring the mimc opcode +const ConsensusV40 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/236dcc18c9c507d794813ab768e467ea42d1b4d9", +) + // ConsensusFuture is a protocol that should not appear in any production // network, but is used to test features before they are released. const ConsensusFuture = ConsensusVersion( @@ -252,7 +257,7 @@ const ConsensusVAlpha5 = ConsensusVersion("alpha5") // ConsensusCurrentVersion is the latest version and should be used // when a specific version is not provided. -const ConsensusCurrentVersion = ConsensusV39 +const ConsensusCurrentVersion = ConsensusV40 // Error is used to indicate that an unsupported protocol has been detected. type Error ConsensusVersion diff --git a/test/e2e-go/features/catchup/catchpointCatchup_test.go b/test/e2e-go/features/catchup/catchpointCatchup_test.go index e7408ed55a..ec6171cbfd 100644 --- a/test/e2e-go/features/catchup/catchpointCatchup_test.go +++ b/test/e2e-go/features/catchup/catchpointCatchup_test.go @@ -474,7 +474,7 @@ func TestNodeTxHandlerRestart(t *testing.T) { a := require.New(fixtures.SynchronizedTest(t)) consensus := make(config.ConsensusProtocols) - protoVersion := protocol.ConsensusFuture + protoVersion := protocol.ConsensusCurrentVersion catchpointCatchupProtocol := config.Consensus[protoVersion] applyCatchpointConsensusChanges(&catchpointCatchupProtocol) catchpointCatchupProtocol.StateProofInterval = 0 @@ -581,7 +581,7 @@ func TestReadyEndpoint(t *testing.T) { a := require.New(fixtures.SynchronizedTest(t)) consensus := make(config.ConsensusProtocols) - protoVersion := protocol.ConsensusFuture + protoVersion := protocol.ConsensusCurrentVersion catchpointCatchupProtocol := config.Consensus[protoVersion] applyCatchpointConsensusChanges(&catchpointCatchupProtocol) catchpointCatchupProtocol.StateProofInterval = 0 @@ -720,7 +720,7 @@ func TestNodeTxSyncRestart(t *testing.T) { a := require.New(fixtures.SynchronizedTest(t)) consensus := make(config.ConsensusProtocols) - protoVersion := protocol.ConsensusFuture + protoVersion := protocol.ConsensusCurrentVersion catchpointCatchupProtocol := config.Consensus[protoVersion] prevMaxTxnLife := catchpointCatchupProtocol.MaxTxnLife applyCatchpointConsensusChanges(&catchpointCatchupProtocol) diff --git a/test/e2e-go/features/catchup/stateproofsCatchup_test.go b/test/e2e-go/features/catchup/stateproofsCatchup_test.go index 69c50f5ccd..5e5e34a067 100644 --- a/test/e2e-go/features/catchup/stateproofsCatchup_test.go +++ b/test/e2e-go/features/catchup/stateproofsCatchup_test.go @@ -146,7 +146,7 @@ func TestStateProofAfterCatchpoint(t *testing.T) { } a := require.New(fixtures.SynchronizedTest(t)) - consensusParams := config.Consensus[protocol.ConsensusFuture] + consensusParams := config.Consensus[protocol.ConsensusCurrentVersion] applyCatchpointConsensusChanges(&consensusParams) applyCatchpointStateProofConsensusChanges(&consensusParams) consensusParams.StateProofInterval = 16 From 89109d8aaef8d61a19d2386acd0a3448dbc7ffc9 Mon Sep 17 00:00:00 2001 From: DevOps Service Date: Fri, 20 Dec 2024 15:50:29 +0000 Subject: [PATCH 15/15] Update the Version, BuildNumber, genesistimestamp.data --- buildnumber.dat | 1 + genesistimestamp.dat | 1 + 2 files changed, 2 insertions(+) create mode 100644 buildnumber.dat create mode 100644 genesistimestamp.dat diff --git a/buildnumber.dat b/buildnumber.dat new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/buildnumber.dat @@ -0,0 +1 @@ +0 diff --git a/genesistimestamp.dat b/genesistimestamp.dat new file mode 100644 index 0000000000..c72c6a7795 --- /dev/null +++ b/genesistimestamp.dat @@ -0,0 +1 @@ +1558657885