From 88b0ca5ecd6122db90cc4d73207dd1d39df31dc8 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Tue, 11 Jun 2024 12:56:48 -0400 Subject: [PATCH 01/82] tests: fix TestVotersReloadFromDiskAfterOneStateProofCommitted (#6024) --- ledger/ledger_test.go | 32 +++++++++++++++++++++++++------- ledger/tracker.go | 3 +++ ledger/tracker_test.go | 2 +- 3 files changed, 29 insertions(+), 8 deletions(-) diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index 127ecf85cb..2a9666688b 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -2931,14 +2931,31 @@ func testVotersReloadFromDiskAfterOneStateProofCommitted(t *testing.T, cfg confi require.NoError(t, err) } - triggerDeleteVoters(t, l, genesisInitState) - l.acctsOnline.voters.votersMu.Lock() - vtSnapshot := l.acctsOnline.voters.votersForRoundCache + // wait all pending commits to finish + l.trackers.accountsWriting.Wait() - // verifying that the tree for round 512 is still in the cache, but the tree for round 256 is evicted. - require.Contains(t, vtSnapshot, basics.Round(496)) - require.NotContains(t, vtSnapshot, basics.Round(240)) - l.acctsOnline.voters.votersMu.Unlock() + // quit the commitSyncer goroutine + l.trackers.ctxCancel() + l.trackers.ctxCancel = nil + <-l.trackers.commitSyncerClosed + l.trackers.commitSyncerClosed = nil + + // flush one final time + triggerTrackerFlush(t, l) + + var vtSnapshot map[basics.Round]*ledgercore.VotersForRound + func() { + // grab internal lock in order to access the voters tracker + // since the assert below might fail, use a nested scope to ensure the lock is released + l.acctsOnline.voters.votersMu.Lock() + defer l.acctsOnline.voters.votersMu.Unlock() + + vtSnapshot = l.acctsOnline.voters.votersForRoundCache + + // verifying that the tree for round 512 is still in the cache, but the tree for round 256 is evicted. + require.Contains(t, vtSnapshot, basics.Round(496)) + require.NotContains(t, vtSnapshot, basics.Round(240)) + }() err = l.reloadLedger() require.NoError(t, err) @@ -2953,6 +2970,7 @@ func TestVotersReloadFromDiskAfterOneStateProofCommitted(t *testing.T) { cfg := config.GetDefaultLocal() cfg.Archival = false cfg.MaxAcctLookback = proto.StateProofInterval - proto.StateProofVotersLookback - 10 + cfg.CatchpointInterval = 0 // no need catchpoint for this test ledgertesting.WithAndWithoutLRUCache(t, cfg, testVotersReloadFromDiskAfterOneStateProofCommitted) } diff --git a/ledger/tracker.go b/ledger/tracker.go index d99f997ebe..97098a572f 100644 --- a/ledger/tracker.go +++ b/ledger/tracker.go @@ -547,6 +547,8 @@ func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) error { offset := dcc.offset dbRound := dcc.oldBase + tr.log.Debugf("commitRound called for (%d-%d)", dbRound, dbRound+basics.Round(offset)) + // we can exit right away, as this is the result of mis-ordered call to committedUpTo. if tr.dbRound < dbRound || offset < uint64(tr.dbRound-dbRound) { tr.log.Warnf("out of order deferred commit: offset %d, dbRound %d but current tracker DB round is %d", offset, dbRound, tr.dbRound) @@ -574,6 +576,7 @@ func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) error { dcc.offset = offset dcc.oldBase = dbRound dcc.flushTime = time.Now() + tr.log.Debugf("commitRound advancing tracker db snapshot (%d-%d)", dbRound, dbRound+basics.Round(offset)) var err error for _, lt := range tr.trackers { diff --git a/ledger/tracker_test.go b/ledger/tracker_test.go index 1c8a99ff41..9c26223c39 100644 --- a/ledger/tracker_test.go +++ b/ledger/tracker_test.go @@ -259,7 +259,7 @@ func (st *commitRoundStallingTracker) commitRound(context.Context, trackerdb.Tra // 3. Set a block in prepareCommit, and initiate the commit // 4. Set a block in produceCommittingTask, add a new block and resume the commit // 5. Resume produceCommittingTask -// 6. The data race and panic happens in block queue syncher thread +// 6. The data race and panic happens in block queue syncer thread func TestTrackers_DbRoundDataRace(t *testing.T) { partitiontest.PartitionTest(t) From 985512b7138fb0223d593cd4dd5565fb3bcf6144 Mon Sep 17 00:00:00 2001 From: Henrik Soerensen Date: Tue, 11 Jun 2024 13:49:59 -0400 Subject: [PATCH 02/82] network: Allow short-lived connections to query /status endpoint when at full capacity (#6009) Co-authored-by: ohill <145173879+ohill@users.noreply.github.com> --- config/localTemplate.go | 2 +- daemon/algod/server.go | 3 ++- network/requestTracker.go | 17 +++++++++++++++-- network/wsNetwork.go | 10 +++++++++- rpcs/healthService.go | 8 +++----- rpcs/healthService_test.go | 9 +++++---- 6 files changed, 35 insertions(+), 14 deletions(-) diff --git a/config/localTemplate.go b/config/localTemplate.go index 67218f8ade..afd97d2b73 100644 --- a/config/localTemplate.go +++ b/config/localTemplate.go @@ -130,7 +130,7 @@ type Local struct { LogArchiveDir string `version[31]:""` // IncomingConnectionsLimit specifies the max number of incoming connections - // for the port configured in NetAddress. 0 means no connections allowed. Must be non-negative. + // for the gossip protocol configured in NetAddress. 0 means no connections allowed. Must be non-negative. // Estimating 1.5MB per incoming connection, 1.5MB*2400 = 3.6GB IncomingConnectionsLimit int `version[0]:"-1" version[1]:"10000" version[17]:"800" version[27]:"2400"` diff --git a/daemon/algod/server.go b/daemon/algod/server.go index 57e1b443f5..bf33def658 100644 --- a/daemon/algod/server.go +++ b/daemon/algod/server.go @@ -40,6 +40,7 @@ import ( "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/logging/telemetryspec" + "github.com/algorand/go-algorand/network" "github.com/algorand/go-algorand/network/limitlistener" "github.com/algorand/go-algorand/node" "github.com/algorand/go-algorand/util" @@ -146,7 +147,7 @@ func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string, genes if cfg.IsGossipServer() { var ot basics.OverflowTracker - fdRequired = ot.Add(fdRequired, uint64(cfg.IncomingConnectionsLimit)) + fdRequired = ot.Add(fdRequired, uint64(cfg.IncomingConnectionsLimit)+network.ReservedHealthServiceConnections) if ot.Overflowed { return errors.New("Initialize() overflowed when adding up IncomingConnectionsLimit to the existing RLIMIT_NOFILE value; decrease RestConnectionsHardLimit or IncomingConnectionsLimit") } diff --git a/network/requestTracker.go b/network/requestTracker.go index c88d4e5cf0..c38cc9d2ed 100644 --- a/network/requestTracker.go +++ b/network/requestTracker.go @@ -415,7 +415,7 @@ func (rt *RequestTracker) sendBlockedConnectionResponse(conn net.Conn, requestTi } } -// pruneAcceptedConnections clean stale items form the acceptedConnections map; it's syncornized via the acceptedConnectionsMu mutex which is expected to be taken by the caller. +// pruneAcceptedConnections clean stale items form the acceptedConnections map; it's syncornized via the hostRequestsMu mutex which is expected to be taken by the caller. // in case the created is 0, the pruning is disabled for this connection. The HTTP handlers would call Close to have this entry cleared out. func (rt *RequestTracker) pruneAcceptedConnections(pruneStartDate time.Time) { localAddrToRemove := []net.Addr{} @@ -494,6 +494,20 @@ func (rt *RequestTracker) ServeHTTP(response http.ResponseWriter, request *http. localAddr := request.Context().Value(http.LocalAddrContextKey).(net.Addr) rt.hostRequestsMu.Lock() + // Check if the number of connections exceeds the limit + acceptedConnections := len(rt.acceptedConnections) + + if acceptedConnections > rt.config.IncomingConnectionsLimit && request.URL.Path != HealthServiceStatusPath { + rt.hostRequestsMu.Unlock() + // If the limit is exceeded, reject the connection + networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "rt_incoming_connection_limit"}) + rt.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerFailEvent, + telemetryspec.ConnectPeerFailEventDetails{ + Address: localAddr.String(), Incoming: true, Reason: "RequestTracker Connection Limit"}) + response.WriteHeader(http.StatusServiceUnavailable) + return + } + trackedRequest := rt.acceptedConnections[localAddr] if trackedRequest != nil { // update the original tracker request so that it won't get pruned. @@ -550,7 +564,6 @@ func (rt *RequestTracker) ServeHTTP(response http.ResponseWriter, request *http. // send the request downstream; in our case, it would go to the router. rt.downstreamHandler.ServeHTTP(response, request) - } // remoteHostProxyFix updates the origin IP address in the trackedRequest diff --git a/network/wsNetwork.go b/network/wsNetwork.go index 9c203d8839..a35375f517 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -107,6 +107,11 @@ const testingPublicAddress = "testing" // Maximum number of bytes to read from a header when trying to establish a websocket connection. const wsMaxHeaderBytes = 4096 +// ReservedHealthServiceConnections reserves additional connections for the health check endpoint. This reserves +// capacity to query the health check service when a node is serving maximum peers. The file descriptors will be +// used from the ReservedFDs pool, as this pool is meant for short-lived usage (dns queries, disk i/o, etc.) +const ReservedHealthServiceConnections = 10 + var networkIncomingConnections = metrics.MakeGauge(metrics.NetworkIncomingConnections) var networkOutgoingConnections = metrics.MakeGauge(metrics.NetworkOutgoingConnections) @@ -151,6 +156,9 @@ const peerShutdownDisconnectionAckDuration = 50 * time.Millisecond // Contains {genesisID} param to be handled by gorilla/mux const GossipNetworkPath = "/v1/{genesisID}/gossip" +// HealthServiceStatusPath is the path to register HealthService as a handler for when using gorilla/mux +const HealthServiceStatusPath = "/status" + // NodeInfo helps the network get information about the node it is running on type NodeInfo interface { // IsParticipating returns true if this node has stake and may vote on blocks or propose blocks. @@ -684,7 +692,7 @@ func (wn *WebsocketNetwork) Start() { } // wrap the original listener with a limited connection listener listener = limitlistener.RejectingLimitListener( - listener, uint64(wn.config.IncomingConnectionsLimit), wn.log) + listener, uint64(wn.config.IncomingConnectionsLimit)+ReservedHealthServiceConnections, wn.log) // wrap the limited connection listener with a requests tracker listener wn.listener = wn.requestsTracker.Listener(listener) wn.log.Debugf("listening on %s", wn.listener.Addr().String()) diff --git a/rpcs/healthService.go b/rpcs/healthService.go index d3121d8bd2..1c852c4e8e 100644 --- a/rpcs/healthService.go +++ b/rpcs/healthService.go @@ -17,12 +17,10 @@ package rpcs import ( - "github.com/algorand/go-algorand/network" "net/http" -) -// HealthServiceStatusPath is the path to register HealthService as a handler for when using gorilla/mux -const HealthServiceStatusPath = "/status" + "github.com/algorand/go-algorand/network" +) // HealthService is a service that provides health information endpoints for the node type HealthService struct{} @@ -31,7 +29,7 @@ type HealthService struct{} func MakeHealthService(net network.GossipNode) HealthService { service := HealthService{} - net.RegisterHTTPHandler(HealthServiceStatusPath, service) + net.RegisterHTTPHandler(network.HealthServiceStatusPath, service) return service } diff --git a/rpcs/healthService_test.go b/rpcs/healthService_test.go index 9d0bb215c2..f2846c322d 100644 --- a/rpcs/healthService_test.go +++ b/rpcs/healthService_test.go @@ -17,13 +17,14 @@ package rpcs import ( - "github.com/algorand/go-algorand/network" - "github.com/algorand/go-algorand/test/partitiontest" - "github.com/stretchr/testify/require" "io" "net/http" "path" "testing" + + "github.com/algorand/go-algorand/network" + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/stretchr/testify/require" ) func TestHealthService_ServeHTTP(t *testing.T) { @@ -40,7 +41,7 @@ func TestHealthService_ServeHTTP(t *testing.T) { client := http.Client{} - parsedURL.Path = path.Join(parsedURL.Path, HealthServiceStatusPath) + parsedURL.Path = path.Join(parsedURL.Path, network.HealthServiceStatusPath) response, err := client.Get(parsedURL.String()) require.NoError(t, err) From a5aac4228c13e43c118c99e94062bf23d1fb9846 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Tue, 11 Jun 2024 15:22:26 -0400 Subject: [PATCH 03/82] tests: debug output on LibGoalFixture failure (#6026) --- test/framework/fixtures/libgoalFixture.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/test/framework/fixtures/libgoalFixture.go b/test/framework/fixtures/libgoalFixture.go index f1a13111a0..d5140199da 100644 --- a/test/framework/fixtures/libgoalFixture.go +++ b/test/framework/fixtures/libgoalFixture.go @@ -144,20 +144,23 @@ func (f *LibGoalFixture) nodeExitWithError(nc *nodecontrol.NodeController, err e return } - defer func() { + debugLog := func() { f.t.Logf("Node at %s has terminated with an error: %v. Dumping logs...", nc.GetDataDir(), err) f.dumpLogs(filepath.Join(nc.GetDataDir(), "node.log")) - }() + } exitError, ok := err.(*exec.ExitError) if !ok { - require.NoError(f.t, err, "Node at %s has terminated with an error", nc.GetDataDir()) + debugLog() + require.NoError(f.t, err) return } ws := exitError.Sys().(syscall.WaitStatus) exitCode := ws.ExitStatus() - require.NoError(f.t, err, "Node at %s has terminated with error code %d", nc.GetDataDir(), exitCode) + f.t.Logf("Node at %s has terminated with error code %d (%v)", nc.GetDataDir(), exitCode, *exitError) + debugLog() + require.NoError(f.t, err) } func (f *LibGoalFixture) importRootKeys(lg *libgoal.Client, dataDir string) { From 4caf2e4ccf987ebc1441ca72772efd50ada9d950 Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Tue, 11 Jun 2024 17:56:04 -0400 Subject: [PATCH 04/82] AVM: Expose relevant incentive constants (#6025) --- data/transactions/logic/assembler_test.go | 30 ++++++++--------------- data/transactions/logic/eval.go | 10 ++++++++ data/transactions/logic/eval_test.go | 16 ++++++++++-- data/transactions/logic/fields.go | 26 ++++++++++++++++++++ data/transactions/logic/fields_string.go | 11 ++++++--- 5 files changed, 68 insertions(+), 25 deletions(-) diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go index 79b37186df..32101dbcbc 100644 --- a/data/transactions/logic/assembler_test.go +++ b/data/transactions/logic/assembler_test.go @@ -633,28 +633,13 @@ func assembleWithTrace(text string, ver uint64) (*OpStream, error) { return &ops, err } -func lines(s string, num int) (bool, string) { - if num < 1 { - return true, "" - } - found := 0 - for i := 0; i < len(s); i++ { - if s[i] == '\n' { - found++ - if found == num { - return true, s[0 : i+1] - } - } - } - return false, s -} - func summarize(trace *strings.Builder) string { - truncated, msg := lines(trace.String(), 50) - if !truncated { - return msg + all := trace.String() + if strings.Count(all, "\n") < 50 { + return all } - return msg + "(trace truncated)\n" + lines := strings.Split(all, "\n") + return strings.Join(lines[:20], "\n") + "\n(some trace elided)\n" + strings.Join(lines[len(lines)-20:], "\n") } func testProg(t testing.TB, source string, ver uint64, expected ...expect) *OpStream { @@ -1720,6 +1705,11 @@ pushint 1 block BlkFeesCollected pushint 1 block BlkBonus +global PayoutsEnabled +global PayoutsGoOnlineFee +global PayoutsPercent +global PayoutsMinBalance +global PayoutsMaxBalance `, AssemblerMaxVersion) for _, names := range [][]string{GlobalFieldNames[:], TxnFieldNames[:], blockFieldNames[:]} { for _, f := range names { diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go index 36ba3bb788..3201877ed9 100644 --- a/data/transactions/logic/eval.go +++ b/data/transactions/logic/eval.go @@ -3737,6 +3737,16 @@ func (cx *EvalContext) globalFieldToValue(fs globalFieldSpec) (sv stackValue, er case GenesisHash: gh := cx.SigLedger.GenesisHash() sv.Bytes = gh[:] + case PayoutsEnabled: + sv.Uint = boolToUint(cx.Proto.Payouts.Enabled) + case PayoutsGoOnlineFee: + sv.Uint = cx.Proto.Payouts.GoOnlineFee + case PayoutsPercent: + sv.Uint = cx.Proto.Payouts.Percent + case PayoutsMinBalance: + sv.Uint = cx.Proto.Payouts.MinBalance + case PayoutsMaxBalance: + sv.Uint = cx.Proto.Payouts.MaxBalance default: return sv, fmt.Errorf("invalid global field %s", fs.field) } diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go index 3a81a9cbef..c8f7a8bc5f 100644 --- a/data/transactions/logic/eval_test.go +++ b/data/transactions/logic/eval_test.go @@ -125,6 +125,14 @@ func makeTestProto(opts ...protoOpt) *config.ConsensusParams { MaxBoxSize: 1000, BytesPerBoxReference: 100, + + Payouts: config.ProposerPayoutRules{ + Enabled: true, + GoOnlineFee: 3, + Percent: 4, + MinBalance: 5, + MaxBalance: 6, + }, } for _, opt := range opts { if opt != nil { // so some callsites can take one arg and pass it in @@ -1234,7 +1242,11 @@ global GenesisHash; len; int 32; ==; && ` const globalV11TestProgram = globalV10TestProgram + ` -// No new globals in v11 +global PayoutsEnabled; assert +global PayoutsGoOnlineFee; int 3; ==; assert +global PayoutsPercent; int 4; ==; assert +global PayoutsMinBalance; int 5; ==; assert +global PayoutsMaxBalance; int 6; ==; assert ` func TestAllGlobals(t *testing.T) { @@ -1258,7 +1270,7 @@ func TestAllGlobals(t *testing.T) { 8: {CallerApplicationAddress, globalV8TestProgram}, 9: {CallerApplicationAddress, globalV9TestProgram}, 10: {GenesisHash, globalV10TestProgram}, - 11: {GenesisHash, globalV11TestProgram}, + 11: {PayoutsMaxBalance, globalV11TestProgram}, } // tests keys are versions so they must be in a range 1..AssemblerMaxVersion plus zero version require.LessOrEqual(t, len(tests), AssemblerMaxVersion+1) diff --git a/data/transactions/logic/fields.go b/data/transactions/logic/fields.go index 367967089b..99cc08bad2 100644 --- a/data/transactions/logic/fields.go +++ b/data/transactions/logic/fields.go @@ -538,6 +538,21 @@ const ( // GenesisHash is the genesis hash for the network GenesisHash + // PayoutsEnabled is whether block proposal payouts are enabled + PayoutsEnabled + + // PayoutsGoOnlineFee is the fee required in a keyreg transaction to make an account incentive eligible + PayoutsGoOnlineFee + + // PayoutsPercent is the percentage of transaction fees in a block that can be paid to the block proposer. + PayoutsPercent + + // PayoutsMinBalance is the minimum algo balance an account must have to receive block payouts (in the agreement round). + PayoutsMinBalance + + // PayoutsMaxBalance is the maximum algo balance an account can have to receive block payouts (in the agreement round). + PayoutsMaxBalance + invalidGlobalField // compile-time constant for number of fields ) @@ -603,6 +618,17 @@ var globalFieldSpecs = [...]globalFieldSpec{ {AssetOptInMinBalance, StackUint64, modeAny, 10, "The additional minimum balance required to opt-in to an asset."}, {GenesisHash, StackBytes32, modeAny, 10, "The Genesis Hash for the network."}, + + {PayoutsEnabled, StackBoolean, modeAny, incentiveVersion, + "Whether block proposal payouts are enabled."}, + {PayoutsGoOnlineFee, StackUint64, modeAny, incentiveVersion, + "The fee required in a keyreg transaction to make an account incentive eligible."}, + {PayoutsPercent, StackUint64, modeAny, incentiveVersion, + "The percentage of transaction fees in a block that can be paid to the block proposer."}, + {PayoutsMinBalance, StackUint64, modeAny, incentiveVersion, + "The minimum algo balance an account must have in the agreement round to receive block payouts in the proposal round."}, + {PayoutsMaxBalance, StackUint64, modeAny, incentiveVersion, + "The maximum algo balance an account can have in the agreement round to receive block payouts in the proposal round."}, } func globalFieldSpecByField(f GlobalField) (globalFieldSpec, bool) { diff --git a/data/transactions/logic/fields_string.go b/data/transactions/logic/fields_string.go index d925e69ee1..3463da269c 100644 --- a/data/transactions/logic/fields_string.go +++ b/data/transactions/logic/fields_string.go @@ -111,12 +111,17 @@ func _() { _ = x[AssetCreateMinBalance-15] _ = x[AssetOptInMinBalance-16] _ = x[GenesisHash-17] - _ = x[invalidGlobalField-18] + _ = x[PayoutsEnabled-18] + _ = x[PayoutsGoOnlineFee-19] + _ = x[PayoutsPercent-20] + _ = x[PayoutsMinBalance-21] + _ = x[PayoutsMaxBalance-22] + _ = x[invalidGlobalField-23] } -const _GlobalField_name = "MinTxnFeeMinBalanceMaxTxnLifeZeroAddressGroupSizeLogicSigVersionRoundLatestTimestampCurrentApplicationIDCreatorAddressCurrentApplicationAddressGroupIDOpcodeBudgetCallerApplicationIDCallerApplicationAddressAssetCreateMinBalanceAssetOptInMinBalanceGenesisHashinvalidGlobalField" +const _GlobalField_name = "MinTxnFeeMinBalanceMaxTxnLifeZeroAddressGroupSizeLogicSigVersionRoundLatestTimestampCurrentApplicationIDCreatorAddressCurrentApplicationAddressGroupIDOpcodeBudgetCallerApplicationIDCallerApplicationAddressAssetCreateMinBalanceAssetOptInMinBalanceGenesisHashPayoutsEnabledPayoutsGoOnlineFeePayoutsPercentPayoutsMinBalancePayoutsMaxBalanceinvalidGlobalField" -var _GlobalField_index = [...]uint16{0, 9, 19, 29, 40, 49, 64, 69, 84, 104, 118, 143, 150, 162, 181, 205, 226, 246, 257, 275} +var _GlobalField_index = [...]uint16{0, 9, 19, 29, 40, 49, 64, 69, 84, 104, 118, 143, 150, 162, 181, 205, 226, 246, 257, 271, 289, 303, 320, 337, 355} func (i GlobalField) String() string { if i >= GlobalField(len(_GlobalField_index)-1) { From 7f8939941ae5882b80e21b49be471a2f89398dca Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Wed, 12 Jun 2024 12:46:14 -0400 Subject: [PATCH 05/82] libgoal: output debug info on failure to stderr (#6027) --- test/framework/fixtures/libgoalFixture.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/test/framework/fixtures/libgoalFixture.go b/test/framework/fixtures/libgoalFixture.go index d5140199da..40269aa111 100644 --- a/test/framework/fixtures/libgoalFixture.go +++ b/test/framework/fixtures/libgoalFixture.go @@ -145,7 +145,7 @@ func (f *LibGoalFixture) nodeExitWithError(nc *nodecontrol.NodeController, err e } debugLog := func() { - f.t.Logf("Node at %s has terminated with an error: %v. Dumping logs...", nc.GetDataDir(), err) + fmt.Fprintf(os.Stderr, "Node at %s has terminated with an error: %v. Dumping logs...", nc.GetDataDir(), err) f.dumpLogs(filepath.Join(nc.GetDataDir(), "node.log")) } @@ -158,7 +158,7 @@ func (f *LibGoalFixture) nodeExitWithError(nc *nodecontrol.NodeController, err e ws := exitError.Sys().(syscall.WaitStatus) exitCode := ws.ExitStatus() - f.t.Logf("Node at %s has terminated with error code %d (%v)", nc.GetDataDir(), exitCode, *exitError) + fmt.Fprintf(os.Stderr, "Node at %s has terminated with error code %d (%v)", nc.GetDataDir(), exitCode, *exitError) debugLog() require.NoError(f.t, err) } @@ -369,18 +369,19 @@ func (f *LibGoalFixture) ShutdownImpl(preserveData bool) { func (f *LibGoalFixture) dumpLogs(filePath string) { file, err := os.Open(filePath) if err != nil { - f.t.Logf("could not open %s", filePath) + fmt.Fprintf(os.Stderr, "could not open %s", filePath) return } defer file.Close() - f.t.Log("=================================\n") + fmt.Fprintf(os.Stderr, "=================================\n") parts := strings.Split(filePath, "/") - f.t.Logf("%s/%s:", parts[len(parts)-2], parts[len(parts)-1]) // Primary/node.log + fmt.Fprintf(os.Stderr, "%s/%s:", parts[len(parts)-2], parts[len(parts)-1]) // Primary/node.log scanner := bufio.NewScanner(file) for scanner.Scan() { - f.t.Logf(scanner.Text()) + fmt.Fprint(os.Stderr, scanner.Text()) } + fmt.Fprintln(os.Stderr) } // intercept baseFixture.failOnError so we can clean up any algods that are still alive From 9191b1befc7cc9a30402d128f562369baa60d066 Mon Sep 17 00:00:00 2001 From: ohill <145173879+ohill@users.noreply.github.com> Date: Wed, 12 Jun 2024 13:08:09 -0400 Subject: [PATCH 06/82] config: Add GoMemLimit config option and use with 10-node test (#5975) --- cmd/algod/main.go | 6 + config/localTemplate.go | 6 +- config/local_defaults.go | 3 +- installer/config.json.example | 3 +- .../catchup/stateproofsCatchup_test.go | 6 + .../partitionRecovery_test.go | 1 + test/testdata/configs/config-v34.json | 139 ++++++++++++++++++ 7 files changed, 161 insertions(+), 3 deletions(-) create mode 100644 test/testdata/configs/config-v34.json diff --git a/cmd/algod/main.go b/cmd/algod/main.go index 603f543b89..0f93ed447f 100644 --- a/cmd/algod/main.go +++ b/cmd/algod/main.go @@ -22,6 +22,7 @@ import ( "math/rand" "os" "path/filepath" + "runtime/debug" "strconv" "strings" "time" @@ -173,6 +174,11 @@ func run() int { log.Fatalf("Cannot load config: %v", err) } + // set soft memory limit, if configured + if cfg.GoMemLimit > 0 { + debug.SetMemoryLimit(int64(cfg.GoMemLimit)) + } + _, err = cfg.ValidateDNSBootstrapArray(genesis.Network) if err != nil { // log is not setup yet, this will log to stderr diff --git a/config/localTemplate.go b/config/localTemplate.go index afd97d2b73..309ffcb798 100644 --- a/config/localTemplate.go +++ b/config/localTemplate.go @@ -43,7 +43,7 @@ type Local struct { // Version tracks the current version of the defaults so we can migrate old -> new // This is specifically important whenever we decide to change the default value // for an existing parameter. This field tag must be updated any time we add a new version. - Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18" version[19]:"19" version[20]:"20" version[21]:"21" version[22]:"22" version[23]:"23" version[24]:"24" version[25]:"25" version[26]:"26" version[27]:"27" version[28]:"28" version[29]:"29" version[30]:"30" version[31]:"31" version[32]:"32" version[33]:"33"` + Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18" version[19]:"19" version[20]:"20" version[21]:"21" version[22]:"22" version[23]:"23" version[24]:"24" version[25]:"25" version[26]:"26" version[27]:"27" version[28]:"28" version[29]:"29" version[30]:"30" version[31]:"31" version[32]:"32" version[33]:"33" version[34]:"34"` // Archival nodes retain a full copy of the block history. Non-Archival nodes will delete old blocks and only retain what's need to properly validate blockchain messages (the precise number of recent blocks depends on the consensus parameters. Currently the last 1321 blocks are required). This means that non-Archival nodes require significantly less storage than Archival nodes. If setting this to true for the first time, the existing ledger may need to be deleted to get the historical values stored as the setting only affects current blocks forward. To do this, shutdown the node and delete all .sqlite files within the data/testnet-version directory, except the crash.sqlite file. Restart the node and wait for the node to sync. Archival bool `version[0]:"false"` @@ -610,6 +610,10 @@ type Local struct { // DisableAPIAuth turns off authentication for public (non-admin) API endpoints. DisableAPIAuth bool `version[30]:"false"` + + // GoMemLimit provides the Go runtime with a soft memory limit. The default behavior is no limit, + // unless the GOMEMLIMIT environment variable is set. + GoMemLimit uint64 `version[34]:"0"` } // DNSBootstrapArray returns an array of one or more DNS Bootstrap identifiers diff --git a/config/local_defaults.go b/config/local_defaults.go index d2a73d4c6f..f5f02082aa 100644 --- a/config/local_defaults.go +++ b/config/local_defaults.go @@ -20,7 +20,7 @@ package config var defaultLocal = Local{ - Version: 33, + Version: 34, AccountUpdatesStatsInterval: 5000000000, AccountsRebuildSynchronousMode: 1, AgreementIncomingBundlesQueueLength: 15, @@ -89,6 +89,7 @@ var defaultLocal = Local{ FallbackDNSResolverAddress: "", ForceFetchTransactions: false, ForceRelayMessages: false, + GoMemLimit: 0, GossipFanout: 4, HeartbeatUpdateInterval: 600, HotDataDir: "", diff --git a/installer/config.json.example b/installer/config.json.example index d9188ef748..4a9714115f 100644 --- a/installer/config.json.example +++ b/installer/config.json.example @@ -1,5 +1,5 @@ { - "Version": 33, + "Version": 34, "AccountUpdatesStatsInterval": 5000000000, "AccountsRebuildSynchronousMode": 1, "AgreementIncomingBundlesQueueLength": 15, @@ -68,6 +68,7 @@ "FallbackDNSResolverAddress": "", "ForceFetchTransactions": false, "ForceRelayMessages": false, + "GoMemLimit": 0, "GossipFanout": 4, "HeartbeatUpdateInterval": 600, "HotDataDir": "", diff --git a/test/e2e-go/features/catchup/stateproofsCatchup_test.go b/test/e2e-go/features/catchup/stateproofsCatchup_test.go index 9de6bf385c..5dcbc11452 100644 --- a/test/e2e-go/features/catchup/stateproofsCatchup_test.go +++ b/test/e2e-go/features/catchup/stateproofsCatchup_test.go @@ -221,6 +221,12 @@ func TestSendSigsAfterCatchpointCatchup(t *testing.T) { var fixture fixtures.RestClientFixture fixture.SetConsensus(configurableConsensus) fixture.SetupNoStart(t, filepath.Join("nettemplates", "ThreeNodesWithRichAcct.json")) + for _, nodeDir := range fixture.NodeDataDirs() { + cfg, err := config.LoadConfigFromDisk(nodeDir) + a.NoError(err) + cfg.GoMemLimit = 4 * 1024 * 1024 * 1024 // 4GB + cfg.SaveToDisk(nodeDir) + } primaryNode, primaryNodeRestClient, primaryEC := startCatchpointGeneratingNode(a, &fixture, "Primary") defer primaryEC.Print() diff --git a/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go b/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go index 284146864d..21ce3bdf0d 100644 --- a/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go +++ b/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go @@ -251,6 +251,7 @@ func TestPartitionHalfOffline(t *testing.T) { a.NoError(err) // adjust the refresh interval for one hour, so that we won't be reloading the participation key during this test. cfg.ParticipationKeysRefreshInterval = time.Hour + cfg.GoMemLimit = 1 * 1024 * 1024 * 1024 // 1GB cfg.SaveToDisk(nodeDir) } fixture.Start() diff --git a/test/testdata/configs/config-v34.json b/test/testdata/configs/config-v34.json new file mode 100644 index 0000000000..4a9714115f --- /dev/null +++ b/test/testdata/configs/config-v34.json @@ -0,0 +1,139 @@ +{ + "Version": 34, + "AccountUpdatesStatsInterval": 5000000000, + "AccountsRebuildSynchronousMode": 1, + "AgreementIncomingBundlesQueueLength": 15, + "AgreementIncomingProposalsQueueLength": 50, + "AgreementIncomingVotesQueueLength": 20000, + "AnnounceParticipationKey": true, + "Archival": false, + "BaseLoggerDebugLevel": 4, + "BlockDBDir": "", + "BlockServiceCustomFallbackEndpoints": "", + "BlockServiceMemCap": 500000000, + "BroadcastConnectionsLimit": -1, + "CadaverDirectory": "", + "CadaverSizeTarget": 0, + "CatchpointDir": "", + "CatchpointFileHistoryLength": 365, + "CatchpointInterval": 10000, + "CatchpointTracking": 0, + "CatchupBlockDownloadRetryAttempts": 1000, + "CatchupBlockValidateMode": 0, + "CatchupFailurePeerRefreshRate": 10, + "CatchupGossipBlockFetchTimeoutSec": 4, + "CatchupHTTPBlockFetchTimeoutSec": 4, + "CatchupLedgerDownloadRetryAttempts": 50, + "CatchupParallelBlocks": 16, + "ColdDataDir": "", + "ConnectionsRateLimitingCount": 60, + "ConnectionsRateLimitingWindowSeconds": 1, + "CrashDBDir": "", + "DNSBootstrapID": ".algorand.network?backup=.algorand.net&dedup=.algorand-.(network|net)", + "DNSSecurityFlags": 1, + "DeadlockDetection": 0, + "DeadlockDetectionThreshold": 30, + "DisableAPIAuth": false, + "DisableLedgerLRUCache": false, + "DisableLocalhostConnectionRateLimit": true, + "DisableNetworking": false, + "DisableOutgoingConnectionThrottling": false, + "EnableAccountUpdatesStats": false, + "EnableAgreementReporting": false, + "EnableAgreementTimeMetrics": false, + "EnableAssembleStats": false, + "EnableBlockService": false, + "EnableDeveloperAPI": false, + "EnableExperimentalAPI": false, + "EnableFollowMode": false, + "EnableGossipBlockService": true, + "EnableGossipService": true, + "EnableIncomingMessageFilter": false, + "EnableLedgerService": false, + "EnableMetricReporting": false, + "EnableOutgoingNetworkMessageFiltering": true, + "EnableP2P": false, + "EnablePingHandler": true, + "EnableProcessBlockStats": false, + "EnableProfiler": false, + "EnableRequestLogger": false, + "EnableRuntimeMetrics": false, + "EnableTopAccountsReporting": false, + "EnableTxBacklogAppRateLimiting": true, + "EnableTxBacklogRateLimiting": true, + "EnableTxnEvalTracer": false, + "EnableUsageLog": false, + "EnableVerbosedTransactionSyncLogging": false, + "EndpointAddress": "127.0.0.1:0", + "FallbackDNSResolverAddress": "", + "ForceFetchTransactions": false, + "ForceRelayMessages": false, + "GoMemLimit": 0, + "GossipFanout": 4, + "HeartbeatUpdateInterval": 600, + "HotDataDir": "", + "IncomingConnectionsLimit": 2400, + "IncomingMessageFilterBucketCount": 5, + "IncomingMessageFilterBucketSize": 512, + "LedgerSynchronousMode": 2, + "LogArchiveDir": "", + "LogArchiveMaxAge": "", + "LogArchiveName": "node.archive.log", + "LogFileDir": "", + "LogSizeLimit": 1073741824, + "MaxAPIBoxPerApplication": 100000, + "MaxAPIResourcesPerAccount": 100000, + "MaxAcctLookback": 4, + "MaxBlockHistoryLookback": 0, + "MaxCatchpointDownloadDuration": 43200000000000, + "MaxConnectionsPerIP": 15, + "MinCatchpointFileDownloadBytesPerSecond": 20480, + "NetAddress": "", + "NetworkMessageTraceServer": "", + "NetworkProtocolVersion": "", + "NodeExporterListenAddress": ":9100", + "NodeExporterPath": "./node_exporter", + "OptimizeAccountsDatabaseOnStartup": false, + "OutgoingMessageFilterBucketCount": 3, + "OutgoingMessageFilterBucketSize": 128, + "P2PPersistPeerID": false, + "P2PPrivateKeyLocation": "", + "ParticipationKeysRefreshInterval": 60000000000, + "PeerConnectionsUpdateInterval": 3600, + "PeerPingPeriodSeconds": 0, + "PriorityPeers": {}, + "ProposalAssemblyTime": 500000000, + "PublicAddress": "", + "ReconnectTime": 60000000000, + "ReservedFDs": 256, + "RestConnectionsHardLimit": 2048, + "RestConnectionsSoftLimit": 1024, + "RestReadTimeoutSeconds": 15, + "RestWriteTimeoutSeconds": 120, + "RunHosted": false, + "StateproofDir": "", + "StorageEngine": "sqlite", + "SuggestedFeeBlockHistory": 3, + "SuggestedFeeSlidingWindowSize": 50, + "TLSCertFile": "", + "TLSKeyFile": "", + "TelemetryToLog": true, + "TrackerDBDir": "", + "TransactionSyncDataExchangeRate": 0, + "TransactionSyncSignificantMessageThreshold": 0, + "TxBacklogAppTxPerSecondRate": 100, + "TxBacklogAppTxRateLimiterMaxSize": 1048576, + "TxBacklogRateLimitingCongestionPct": 50, + "TxBacklogReservedCapacityPerPeer": 20, + "TxBacklogServiceRateWindowSeconds": 10, + "TxBacklogSize": 26000, + "TxIncomingFilterMaxSize": 500000, + "TxIncomingFilteringFlags": 1, + "TxPoolExponentialIncreaseFactor": 2, + "TxPoolSize": 75000, + "TxSyncIntervalSeconds": 60, + "TxSyncServeResponseSize": 1000000, + "TxSyncTimeoutSeconds": 30, + "UseXForwardedForAddressField": "", + "VerifiedTranscationsCacheSize": 150000 +} From dbe60eeec8cab9a2e4cb29af3ab1290e8ad151be Mon Sep 17 00:00:00 2001 From: John Lee Date: Wed, 12 Jun 2024 13:08:40 -0400 Subject: [PATCH 07/82] Legacy Docker: pin ubuntu version (#6028) --- docker/releases/build_releases.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/releases/build_releases.sh b/docker/releases/build_releases.sh index 1862c7ae99..11376eba48 100755 --- a/docker/releases/build_releases.sh +++ b/docker/releases/build_releases.sh @@ -76,7 +76,7 @@ case $NETWORK in esac IFS='' read -r -d '' DOCKERFILE < Date: Wed, 12 Jun 2024 13:22:45 -0400 Subject: [PATCH 08/82] config: Add warning if EndpointAddress and NetAddress ports are equal (#6006) Co-authored-by: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> --- daemon/algod/server.go | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/daemon/algod/server.go b/daemon/algod/server.go index bf33def658..12cbf3e968 100644 --- a/daemon/algod/server.go +++ b/daemon/algod/server.go @@ -24,6 +24,7 @@ import ( "net" "net/http" _ "net/http/pprof" // net/http/pprof is for registering the pprof URLs with the web server, so http://localhost:8080/debug/pprof/ works. + "net/url" "os" "os/signal" "path/filepath" @@ -269,6 +270,19 @@ func makeListener(addr string) (net.Listener, error) { return net.Listen("tcp", addr) } +// helper to get port from an address +func getPortFromAddress(addr string) (string, error) { + u, err := url.Parse(addr) + if err == nil && u.Scheme != "" { + addr = u.Host + } + _, port, err := net.SplitHostPort(addr) + if err != nil { + return "", fmt.Errorf("Error parsing address: %v", err) + } + return port, nil +} + // Start starts a Node instance and its network services func (s *Server) Start() { s.log.Info("Trying to start an Algorand node") @@ -359,6 +373,20 @@ func (s *Server) Start() { fmt.Printf("netlistenfile error: %v\n", err) os.Exit(1) } + + addrPort, err := getPortFromAddress(addr) + if err != nil { + s.log.Warnf("Error getting port from EndpointAddress: %v", err) + } + + listenAddrPort, err := getPortFromAddress(listenAddr) + if err != nil { + s.log.Warnf("Error getting port from NetAddress: %v", err) + } + + if addrPort == listenAddrPort { + s.log.Warnf("EndpointAddress port %v matches NetAddress port %v. This may lead to unexpected results when accessing endpoints.", addrPort, listenAddrPort) + } } errChan := make(chan error, 1) From 95c9c1837cf466c2c9f423864c9901dc26e0e7c1 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Thu, 13 Jun 2024 15:52:13 -0400 Subject: [PATCH 09/82] tests: preserve logs on LibGoalFixture failure (#6030) --- agreement/service.go | 3 ++ catchup/catchpointService.go | 3 ++ catchup/service.go | 3 ++ data/txHandler.go | 3 ++ netdeploy/network.go | 15 +++++--- network/wsNetwork.go | 3 ++ node/impls.go | 2 + node/node.go | 7 ++++ nodecontrol/algodControl.go | 9 +++++ rpcs/blockService.go | 3 ++ rpcs/ledgerService.go | 3 ++ rpcs/txSyncer.go | 3 ++ stateproof/worker.go | 3 ++ .../features/catchup/basicCatchup_test.go | 17 +++++++++ test/framework/fixtures/libgoalFixture.go | 37 ++++++++++++------- 15 files changed, 96 insertions(+), 18 deletions(-) diff --git a/agreement/service.go b/agreement/service.go index d8ec84b92f..8e38797492 100644 --- a/agreement/service.go +++ b/agreement/service.go @@ -173,6 +173,9 @@ func (s *Service) Start() { // // This method returns after all resources have been cleaned up. func (s *Service) Shutdown() { + s.log.Debug("agreement service is stopping") + defer s.log.Debug("agreement service has stopped") + close(s.quit) s.quitFn() <-s.done diff --git a/catchup/catchpointService.go b/catchup/catchpointService.go index 974f5964f1..efa34436a3 100644 --- a/catchup/catchpointService.go +++ b/catchup/catchpointService.go @@ -185,6 +185,9 @@ func (cs *CatchpointCatchupService) Abort() { // Stop stops the catchpoint catchup service - unlike Abort, this is not intended to abort the process but rather to allow // cleanup of in-memory resources for the purpose of clean shutdown. func (cs *CatchpointCatchupService) Stop() { + cs.log.Debug("catchpoint service is stopping") + defer cs.log.Debug("catchpoint service has stopped") + // signal the running goroutine that we want to stop cs.cancelCtxFunc() // wait for the running goroutine to terminate. diff --git a/catchup/service.go b/catchup/service.go index 5c6609b236..b1720c4fce 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -161,6 +161,9 @@ func (s *Service) Start() { // Stop informs the catchup service that it should stop, and waits for it to stop (when periodicSync() exits) func (s *Service) Stop() { + s.log.Debug("catchup service is stopping") + defer s.log.Debug("catchup service has stopped") + s.cancel() s.workers.Wait() if s.initialSyncNotified.CompareAndSwap(0, 1) { diff --git a/data/txHandler.go b/data/txHandler.go index 7851889378..871e71cbc3 100644 --- a/data/txHandler.go +++ b/data/txHandler.go @@ -254,6 +254,9 @@ func (handler *TxHandler) Start() { // Stop suspends the processing of incoming messages at the transaction handler func (handler *TxHandler) Stop() { + logging.Base().Debug("transaction handler is stopping") + defer logging.Base().Debug("transaction handler is stopping") + handler.ctxCancel() if handler.erl != nil { handler.erl.Stop() diff --git a/netdeploy/network.go b/netdeploy/network.go index b26c8ef5bc..6f31673a54 100644 --- a/netdeploy/network.go +++ b/netdeploy/network.go @@ -415,13 +415,14 @@ func (n Network) StartNode(binDir, nodeDir string, redirectOutput bool) (err err // Stop the network, ensuring primary relay stops first // No return code - we try to kill them if we can (if we read valid PID file) -func (n Network) Stop(binDir string) { - c := make(chan struct{}, len(n.cfg.RelayDirs)+len(n.nodeDirs)) +func (n Network) Stop(binDir string) (err error) { + c := make(chan error, len(n.cfg.RelayDirs)+len(n.nodeDirs)) stopNodeContoller := func(nc *nodecontrol.NodeController) { + var stopErr error defer func() { - c <- struct{}{} + c <- stopErr }() - nc.FullStop() + stopErr = nc.FullStop() } for _, relayDir := range n.cfg.RelayDirs { relayDataDir := n.getNodeFullPath(relayDir) @@ -439,9 +440,13 @@ func (n Network) Stop(binDir string) { } // wait until we finish stopping all the node controllers. for i := cap(c); i > 0; i-- { - <-c + stopErr := <-c + if stopErr != nil { + err = stopErr + } } close(c) + return err } // NetworkNodeStatus represents the result from checking the status of a particular node instance diff --git a/network/wsNetwork.go b/network/wsNetwork.go index a35375f517..6fc97def5e 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -793,6 +793,9 @@ func (wn *WebsocketNetwork) innerStop() { // Stop closes network connections and stops threads. // Stop blocks until all activity on this node is done. func (wn *WebsocketNetwork) Stop() { + wn.log.Debug("network is stopping") + defer wn.log.Debug("network has stopped") + wn.handler.ClearHandlers([]Tag{}) // if we have a working ticker, just stop it and clear it out. The access to this variable is safe since the Start()/Stop() are synced by the diff --git a/node/impls.go b/node/impls.go index 826f0399c4..3063e09659 100644 --- a/node/impls.go +++ b/node/impls.go @@ -44,6 +44,8 @@ func (i blockAuthenticatorImpl) Authenticate(block *bookkeeping.Block, cert *agr } func (i blockAuthenticatorImpl) Quit() { + logging.Base().Debug("block authenticator is stopping") + defer logging.Base().Debug("block authenticator has stopped") i.AsyncVoteVerifier.Quit() } diff --git a/node/node.go b/node/node.go index acf204facf..d1c6cc4b82 100644 --- a/node/node.go +++ b/node/node.go @@ -396,6 +396,8 @@ func (node *AlgorandFullNode) startMonitoringRoutines() { // waitMonitoringRoutines waits for all the monitoring routines to exit. Note that // the node.mu must not be taken, and that the node's context should have been canceled. func (node *AlgorandFullNode) waitMonitoringRoutines() { + node.log.Debug("waiting on node monitoring routines to exit") + defer node.log.Debug("done waiting on node monitoring routines to exit") node.monitoringRoutinesWaitGroup.Wait() } @@ -409,6 +411,9 @@ func (node *AlgorandFullNode) ListeningAddress() (string, bool) { // Stop stops running the node. Once a node is closed, it can never start again. func (node *AlgorandFullNode) Stop() { + node.log.Debug("algorand node is stopping") + defer node.log.Debug("algorand node has stopped") + node.mu.Lock() defer func() { node.mu.Unlock() @@ -431,9 +436,11 @@ func (node *AlgorandFullNode) Stop() { node.ledgerService.Stop() } node.catchupBlockAuth.Quit() + node.log.Debug("crypto worker pools are stopping") node.highPriorityCryptoVerificationPool.Shutdown() node.lowPriorityCryptoVerificationPool.Shutdown() node.cryptoPool.Shutdown() + node.log.Debug("crypto worker pools have stopped") node.cancelCtx() } diff --git a/nodecontrol/algodControl.go b/nodecontrol/algodControl.go index 74137a72eb..a6bcb5fb82 100644 --- a/nodecontrol/algodControl.go +++ b/nodecontrol/algodControl.go @@ -49,6 +49,14 @@ func (e *NodeNotRunningError) Error() string { return fmt.Sprintf("no running node in directory '%s'", e.algodDataDir) } +// NodeKilledError thrown when StopAlgod is called but the node was killed by SIGKILL instead of a clean shutdown with SIGTERM +type NodeKilledError struct { +} + +func (e *NodeKilledError) Error() string { + return "node was killed" +} + // MissingDataDirError thrown when StopAlgod is called but requested directory does not exist type MissingDataDirError struct { algodDataDir string @@ -176,6 +184,7 @@ func (nc *NodeController) StopAlgod() (err error) { if killed { // delete the pid file. os.Remove(nc.algodPidFile) + return &NodeKilledError{} } } else { return &NodeNotRunningError{algodDataDir: nc.algodDataDir} diff --git a/rpcs/blockService.go b/rpcs/blockService.go index 8231b5a98b..d1ef82dfd4 100644 --- a/rpcs/blockService.go +++ b/rpcs/blockService.go @@ -169,6 +169,9 @@ func (bs *BlockService) Start() { // Stop servicing catchup requests over ws func (bs *BlockService) Stop() { + bs.log.Debug("block service is stopping") + defer bs.log.Debug("block service has stopped") + bs.mu.Lock() close(bs.stop) bs.mu.Unlock() diff --git a/rpcs/ledgerService.go b/rpcs/ledgerService.go index b3742bb985..5f75a36b0c 100644 --- a/rpcs/ledgerService.go +++ b/rpcs/ledgerService.go @@ -96,6 +96,9 @@ func (ls *LedgerService) Start() { // Stop servicing catchup requests func (ls *LedgerService) Stop() { if ls.enableService { + logging.Base().Debug("ledger service is stopping") + defer logging.Base().Debug("ledger service has stopped") + ls.running.Store(0) ls.stopping.Wait() } diff --git a/rpcs/txSyncer.go b/rpcs/txSyncer.go index 1d7fc2c027..2d2a993d9a 100644 --- a/rpcs/txSyncer.go +++ b/rpcs/txSyncer.go @@ -97,6 +97,9 @@ func (syncer *TxSyncer) Start(canStart chan struct{}) { // Stop stops periodic syncing func (syncer *TxSyncer) Stop() { + syncer.log.Debug("transaction syncer is stopping") + defer syncer.log.Debug("transaction syncer has stopped") + syncer.cancel() syncer.wg.Wait() } diff --git a/stateproof/worker.go b/stateproof/worker.go index e73a06d137..f74e118f58 100644 --- a/stateproof/worker.go +++ b/stateproof/worker.go @@ -142,6 +142,9 @@ func (spw *Worker) initDb(inMemory bool) error { // Stop stops any goroutines associated with this worker. It is the caller responsibility to remove the register // network handlers func (spw *Worker) Stop() { + spw.log.Debug("stateproof worker is stopping") + defer spw.log.Debug("stateproof worker has stopped") + spw.shutdown() spw.wg.Wait() diff --git a/test/e2e-go/features/catchup/basicCatchup_test.go b/test/e2e-go/features/catchup/basicCatchup_test.go index 2e3ac87943..938313206d 100644 --- a/test/e2e-go/features/catchup/basicCatchup_test.go +++ b/test/e2e-go/features/catchup/basicCatchup_test.go @@ -128,6 +128,15 @@ func runCatchupOverGossip(t fixtures.TestingTB, a.NoError(err) a.Empty(cfg.NetworkProtocolVersion) cfg.NetworkProtocolVersion = ledgerNodeDowngradeTo + cfg.BaseLoggerDebugLevel = 5 // debug logging while debugging this test + cfg.SaveToDisk(dir) + } else { + // TODO: remove when TestCatchupOverGossip is fixed + dir, err := fixture.GetNodeDir("Node") + a.NoError(err) + cfg, err := config.LoadConfigFromDisk(dir) + a.NoError(err) + cfg.BaseLoggerDebugLevel = 5 // debug logging while debugging this test cfg.SaveToDisk(dir) } @@ -138,6 +147,14 @@ func runCatchupOverGossip(t fixtures.TestingTB, a.NoError(err) a.Empty(cfg.NetworkProtocolVersion) cfg.NetworkProtocolVersion = fetcherNodeDowngradeTo + cfg.BaseLoggerDebugLevel = 5 // debug logging while debugging this test + cfg.SaveToDisk(dir) + } else { + // TODO: remove when TestCatchupOverGossip is fixed + dir := fixture.PrimaryDataDir() + cfg, err := config.LoadConfigFromDisk(dir) + a.NoError(err) + cfg.BaseLoggerDebugLevel = 5 // debug logging while debugging this test cfg.SaveToDisk(dir) } diff --git a/test/framework/fixtures/libgoalFixture.go b/test/framework/fixtures/libgoalFixture.go index 40269aa111..de1a06623d 100644 --- a/test/framework/fixtures/libgoalFixture.go +++ b/test/framework/fixtures/libgoalFixture.go @@ -145,7 +145,7 @@ func (f *LibGoalFixture) nodeExitWithError(nc *nodecontrol.NodeController, err e } debugLog := func() { - fmt.Fprintf(os.Stderr, "Node at %s has terminated with an error: %v. Dumping logs...", nc.GetDataDir(), err) + fmt.Fprintf(os.Stderr, "Node at %s has terminated with an error: %v. Dumping logs...\n", nc.GetDataDir(), err) f.dumpLogs(filepath.Join(nc.GetDataDir(), "node.log")) } @@ -158,7 +158,7 @@ func (f *LibGoalFixture) nodeExitWithError(nc *nodecontrol.NodeController, err e ws := exitError.Sys().(syscall.WaitStatus) exitCode := ws.ExitStatus() - fmt.Fprintf(os.Stderr, "Node at %s has terminated with error code %d (%v)", nc.GetDataDir(), exitCode, *exitError) + fmt.Fprintf(os.Stderr, "Node at %s has terminated with error code %d (%v)\n", nc.GetDataDir(), exitCode, *exitError) debugLog() require.NoError(f.t, err) } @@ -345,7 +345,10 @@ func (f *LibGoalFixture) Shutdown() { func (f *LibGoalFixture) ShutdownImpl(preserveData bool) { f.NC.StopKMD() if preserveData { - f.network.Stop(f.binDir) + err := f.network.Stop(f.binDir) + if err != nil { + f.t.Logf("Fixture %s shutdown caught a network stop error: %v", f.Name, err) + } for _, relayDir := range f.RelayDataDirs() { f.dumpLogs(filepath.Join(relayDir, "node.log")) } @@ -353,14 +356,22 @@ func (f *LibGoalFixture) ShutdownImpl(preserveData bool) { f.dumpLogs(filepath.Join(nodeDir, "node.log")) } } else { - f.network.Delete(f.binDir) - - // Remove the test dir, if it was created by us as a temporary - // directory and it is empty. If there's anything still in the - // test dir, os.Remove()'s rmdir will fail and have no effect; - // we ignore this error. - if f.testDirTmp { - os.Remove(f.testDir) + err := f.network.Stop(f.binDir) + if err == nil { + // no error, proceed with cleanup + delErr := f.network.Delete(f.binDir) + if delErr != nil { + f.t.Logf("Fixture %s shutdown caught a network delete error: %v", f.Name, delErr) + } + // Remove the test dir, if it was created by us as a temporary + // directory and it is empty. If there's anything still in the + // test dir, os.Remove()'s rmdir will fail and have no effect; + // we ignore this error. + if f.testDirTmp { + os.Remove(f.testDir) + } + } else { + f.t.Logf("Fixture %s shutdown caught a network stop error: %v", f.Name, err) } } } @@ -369,14 +380,14 @@ func (f *LibGoalFixture) ShutdownImpl(preserveData bool) { func (f *LibGoalFixture) dumpLogs(filePath string) { file, err := os.Open(filePath) if err != nil { - fmt.Fprintf(os.Stderr, "could not open %s", filePath) + fmt.Fprintf(os.Stderr, "could not open %s\n", filePath) return } defer file.Close() fmt.Fprintf(os.Stderr, "=================================\n") parts := strings.Split(filePath, "/") - fmt.Fprintf(os.Stderr, "%s/%s:", parts[len(parts)-2], parts[len(parts)-1]) // Primary/node.log + fmt.Fprintf(os.Stderr, "%s/%s:\n", parts[len(parts)-2], parts[len(parts)-1]) // Primary/node.log scanner := bufio.NewScanner(file) for scanner.Scan() { fmt.Fprint(os.Stderr, scanner.Text()) From 97ab5593f86d26e09add68d55baefb4eeee4964a Mon Sep 17 00:00:00 2001 From: Joe Polny <50534337+joe-p@users.noreply.github.com> Date: Fri, 14 Jun 2024 10:18:10 -0400 Subject: [PATCH 10/82] simulate: fix signers (#5942) Co-authored-by: Jason Paulos --- daemon/algod/api/algod.oas2.json | 13 + daemon/algod/api/algod.oas3.yml | 13 + .../api/server/v2/generated/data/routes.go | 428 ++++++------- .../v2/generated/experimental/routes.go | 449 ++++++------- .../api/server/v2/generated/model/types.go | 9 + .../nonparticipating/private/routes.go | 119 ++-- .../nonparticipating/public/routes.go | 602 +++++++++--------- .../generated/participating/private/routes.go | 126 ++-- .../generated/participating/public/routes.go | 415 ++++++------ daemon/algod/api/server/v2/handlers.go | 2 + daemon/algod/api/server/v2/utils.go | 11 +- ledger/simulation/simulation_eval_test.go | 448 +++++++++++++ ledger/simulation/simulator.go | 98 ++- ledger/simulation/simulator_test.go | 2 +- ledger/simulation/trace.go | 6 + ledger/simulation/tracer.go | 46 +- .../restAPI/simulate/simulateRestAPI_test.go | 83 +++ 17 files changed, 1788 insertions(+), 1082 deletions(-) diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json index 36e4f34861..e0827feb16 100644 --- a/daemon/algod/api/algod.oas2.json +++ b/daemon/algod/api/algod.oas2.json @@ -3944,6 +3944,10 @@ }, "exec-trace-config": { "$ref": "#/definitions/SimulateTraceConfig" + }, + "fix-signers": { + "description": "If true, signers for transactions that are missing signatures will be fixed during evaluation.", + "type": "boolean" } } }, @@ -4287,6 +4291,11 @@ }, "unnamed-resources-accessed": { "$ref": "#/definitions/SimulateUnnamedResourcesAccessed" + }, + "fixed-signer":{ + "description": "The account that needed to sign this transaction when no signature was provided and the provided signer was incorrect.", + "type": "string", + "x-algorand-format": "Address" } } }, @@ -4393,6 +4402,10 @@ "extra-opcode-budget": { "description": "The extra opcode budget added to each transaction group during simulation", "type": "integer" + }, + "fix-signers": { + "description": "If true, signers for transactions that are missing signatures will be fixed during evaluation.", + "type": "boolean" } } }, diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml index 6737a95072..3fb04f5add 100644 --- a/daemon/algod/api/algod.oas3.yml +++ b/daemon/algod/api/algod.oas3.yml @@ -2302,6 +2302,10 @@ "description": "Applies extra opcode budget during simulation for each transaction group.", "type": "integer" }, + "fix-signers": { + "description": "If true, signers for transactions that are missing signatures will be fixed during evaluation.", + "type": "boolean" + }, "round": { "description": "If provided, specifies the round preceding the simulation. State changes through this round will be used to run this simulation. Usually only the 4 most recent rounds will be available (controlled by the node config value MaxAcctLookback). If not specified, defaults to the latest available round.", "type": "integer" @@ -2408,6 +2412,11 @@ "exec-trace": { "$ref": "#/components/schemas/SimulationTransactionExecTrace" }, + "fixed-signer": { + "description": "The account that needed to sign this transaction when no signature was provided and the provided signer was incorrect.", + "type": "string", + "x-algorand-format": "Address" + }, "logic-sig-budget-consumed": { "description": "Budget used during execution of a logic sig transaction.", "type": "integer" @@ -2494,6 +2503,10 @@ "description": "The extra opcode budget added to each transaction group during simulation", "type": "integer" }, + "fix-signers": { + "description": "If true, signers for transactions that are missing signatures will be fixed during evaluation.", + "type": "boolean" + }, "max-log-calls": { "description": "The maximum log calls one can make during simulation", "type": "integer" diff --git a/daemon/algod/api/server/v2/generated/data/routes.go b/daemon/algod/api/server/v2/generated/data/routes.go index 504ee50c69..dc0159d20d 100644 --- a/daemon/algod/api/server/v2/generated/data/routes.go +++ b/daemon/algod/api/server/v2/generated/data/routes.go @@ -114,225 +114,227 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+y9+5PbtpIw+q+gtFvl2Fec8SvZE986tXdiJzlz4yQuj5O9u7ZvApEtCWcogAcAZ6T4", - "8//+FboBEiRBiZqZODlfnZ/sEfFoNBqNfqH7wyxXm0pJkNbMnn2YVVzzDVjQ+BfPc1VLm4nC/VWAybWo", - "rFBy9ix8Y8ZqIVez+Uy4Xytu17P5TPINtG1c//lMwz9qoaGYPbO6hvnM5GvYcDew3VWudTPSNlupzA9x", - "RkOcv5h93POBF4UGY4ZQ/ijLHRMyL+sCmNVcGp67T4ZdC7tmdi0M852ZkExJYGrJ7LrTmC0FlIU5CYv8", - "Rw16F63STz6+pI8tiJlWJQzhfK42CyEhQAUNUM2GMKtYAUtstOaWuRkcrKGhVcwA1/maLZU+ACoBEcML", - "st7Mnr2dGZAFaNytHMQV/nepAX6DzHK9Ajt7P08tbmlBZ1ZsEks799jXYOrSGoZtcY0rcQWSuV4n7Pva", - "WLYAxiV7/c1z9uTJky/dQjbcWig8kY2uqp09XhN1nz2bFdxC+DykNV6ulOayyJr2r795jvNf+AVObcWN", - "gfRhOXNf2PmLsQWEjgkSEtLCCvehQ/2uR+JQtD8vYKk0TNwTanynmxLP/4fuSs5tvq6UkDaxLwy/Mvqc", - "5GFR9308rAGg075ymNJu0LcPsy/ff3g0f/Tw47+9Pcv+x//5+ZOPE5f/vBn3AAaSDfNaa5D5Lltp4Hha", - "1lwO8fHa04NZq7os2Jpf4ebzDbJ635e5vsQ6r3hZOzoRuVZn5UoZxj0ZFbDkdWlZmJjVsnRsyo3mqZ0J", - "wyqtrkQBxdxx3+u1yNcs54aGwHbsWpSlo8HaQDFGa+nV7TlMH2OUOLhuhA9c0J8XGe26DmACtsgNsrxU", - "BjKrDlxP4cbhsmDxhdLeVea4y4q9WQPDyd0HumwRd9LRdFnumMV9LRg3jLNwNc2ZWLKdqtk1bk4pLrG/", - "X43D2oY5pOHmdO5Rd3jH0DdARgJ5C6VK4BKRF87dEGVyKVa1BsOu12DX/s7TYColDTC1+Dvk1m37/3vx", - "4w9MafY9GMNX8IrnlwxkrgooTtj5kkllI9LwtIQ4dD3H1uHhSl3yfzfK0cTGrCqeX6Zv9FJsRGJV3/Ot", - "2NQbJuvNArTb0nCFWMU02FrLMYBoxAOkuOHb4aRvdC1z3P922o4s56hNmKrkO0TYhm//+nDuwTGMlyWr", - "QBZCrpjdylE5zs19GLxMq1oWE8Qc6/Y0ulhNBblYCihYM8oeSPw0h+AR8jh4WuErAicMMgpOM8sBcCRs", - "EzTjTrf7wiq+gohkTthPnrnhV6suQTaEzhY7/FRpuBKqNk2nERhx6v0SuFQWskrDUiRo7MKjwzEYauM5", - "8MbLQLmSlgsJhWPOCLSyQMxqFKZowv36zvAWX3ADXzwdu+PbrxN3f6n6u753xyftNjbK6Egmrk731R/Y", - "tGTV6T9BP4znNmKV0c+DjRSrN+62WYoSb6K/u/0LaKgNMoEOIsLdZMRKcltrePZOPnB/sYxdWC4Lrgv3", - "y4Z++r4urbgQK/dTST+9VCuRX4jVCDIbWJMKF3bb0D9uvDQ7ttukXvFSqcu6iheUdxTXxY6dvxjbZBrz", - "WMI8a7TdWPF4sw3KyLE97LbZyBEgR3FXcdfwEnYaHLQ8X+I/2yXSE1/q39w/VVW63rZaplDr6NhfyWg+", - "8GaFs6oqRc4dEl/7z+6rYwJAigRvW5zihfrsQwRipVUF2goalFdVVqqcl5mx3OJI/65hOXs2+7fT1v5y", - "St3NaTT5S9frAjs5kZXEoIxX1RFjvHKij9nDLByDxk/IJojtodAkJG2iIyXhWHAJV1zak1Zl6fCD5gC/", - "9TO1+CZph/DdU8FGEc6o4QIMScDU8J5hEeoZopUhWlEgXZVq0fzw2VlVtRjE72dVRfhA6REECmawFcaa", - "+7h83p6keJ7zFyfs23hsFMWVLHfuciBRw90NS39r+VussS35NbQj3jMMt1PpE7c1AQ1OzL8LikO1Yq1K", - "J/UcpBXX+G++bUxm7vdJnf85SCzG7ThxoaLlMUc6Dv4SKTef9ShnSDje3HPCzvp9b0Y2bpQ9BGPOWyze", - "NfHgL8LCxhykhAiiiJr89nCt+W7mhcQMhb0hmfxkgCik4ishEdq5U58k2/BL2g+FeHeEAKbRi4iWSIJs", - "TKhe5vSoPxnYWf4JqDW1sUESdZJqKYxFvRobszWUKDhzGQg6JpUbUcaEDd+ziAbma80romX/hcQuIVGf", - "p0YE6y0v3ol3YhLmiN1HG41Q3ZgtH2SdSUiQa/Rg+KpU+eXfuFnfwQlfhLGGtI/TsDXwAjRbc7NOHJwe", - "bbejTaFv1xBpli2iqU6aJb5UK3MHSyzVMayrqp7zsnRTD1lWb7U48KSDXJbMNWawEWgw94ojWdhJ/2Jf", - "83ztxAKW87Kct6YiVWUlXEHplHYhJeg5s2tu28OPIwe9Bs+RAcfsLLBoNd7MhCY23dgiNLANxxto47SZ", - "quz2aTio4RvoSUF4I6oarQiRonH+IqwOrkAiT2qGRvCbNaK1Jh78xM3tP+HMUtHiyAJog/uuwV/DLzpA", - "u9btfSrbKZQuyGZt3W9Cs1xpGoJueD+5+w9w3XYm6vys0pD5ITS/Am146VbXW9T9hnzv6nQeOJkFtzw6", - "mZ4K0woYcQ7sh+Id6ISV5kf8Dy+Z++ykGEdJLfUIFEZU5E4t6GJ2qKKZXAO0tyq2IVMmq3h+eRSUz9vJ", - "02xm0sn7mqynfgv9IpoderMVhbmrbcLBxvaqe0LIdhXY0UAW2ct0ormmIOCNqhixjx4IxClwNEKI2t75", - "tfaV2qZg+kptB1ea2sKd7IQbZzKz/0ptX3jIlD6MeRx7CtLdAiXfgMHbTcaM083S+uXOFkrfTJroXTCS", - "td5Gxt2okTA17yEJm9ZV5s9mwmNBDXoDtQEe+4WA/vApjHWwcGH574AF40a9Cyx0B7prLKhNJUq4A9Jf", - "J4W4BTfw5DG7+NvZ548e//L48y8cSVZarTTfsMXOgmGfebMcM3ZXwv2kdoTSRXr0L54GH1V33NQ4RtU6", - "hw2vhkOR74u0X2rGXLsh1rpoxlU3AE7iiOCuNkI7I7euA+0FLOrVBVjrNN1XWi3vnBsOZkhBh41eVdoJ", - "FqbrJ/TS0mnhmpzC1mp+WmFLkAXFGbh1CON0wM3iTohqbOOLdpaCeYwWcPBQHLtN7TS7eKv0Ttd3Yd4A", - "rZVOXsGVVlblqsycnCdUwkDxyrdgvkXYrqr/O0HLrrlhbm70XtayGLFD2K2cfn/R0G+2ssXN3huM1ptY", - "nZ93yr50kd9qIRXozG4lQ+rsmEeWWm0YZwV2RFnjW7Akf4kNXFi+qX5cLu/G2qlwoIQdR2zAuJkYtXDS", - "j4FcSQrmO2Cy8aNOQU8fMcHLZMcB8Bi52MkcXWV3cWzHrVkbIdFvb3Yyj0xbDsYSilWHLG9vwhpDB011", - "zyTAceh4iZ/RVv8CSsu/UfpNK75+q1Vd3Tl77s85dTncL8Z7AwrXN5iBhVyV3QDSlYP9JLXGP2RBzxsj", - "Aq0BoUeKfClWaxvpi6+0+h3uxOQsKUDxAxmLStdnaDL6QRWOmdja3IEo2Q7WcjhHtzFf4wtVW8aZVAXg", - "5tcmLWSOhBxirBOGaNlYbkX7hDBsAY66cl671dYVwwCkwX3Rdsx4Tic0Q9SYkfCLJm6GWtF0FM5WauDF", - "ji0AJFMLH+Pgoy9wkRyjp2wQ07yIm+AXHbgqrXIwBorMm6IPghba0dVh9+AJAUeAm1mYUWzJ9a2Bvbw6", - "COcl7DKM9TPss+9+Nvf/AHitsrw8gFhsk0Jv3542hHra9PsIrj95THZkqSOqdeKtYxAlWBhD4VE4Gd2/", - "PkSDXbw9Wq5AY0jJ70rxYZLbEVAD6u9M77eFtq5GIti9mu4kPLdhkksVBKvUYCU3NjvEll2jji3BrSDi", - "hClOjAOPCF4vubEUBiVkgTZNuk5wHhLC3BTjAI+qIW7kn4MGMhw7d/egNLVp1BFTV5XSForUGtAjOzrX", - "D7Bt5lLLaOxG57GK1QYOjTyGpWh8jyyvAeMf3Db+V+/RHS4Oferunt8lUdkBokXEPkAuQqsIu3EU7wgg", - "wrSIJsIRpkc5TejwfGasqirHLWxWy6bfGJouqPWZ/altOyQucnLQvV0oMOhA8e095NeEWYrfXnPDPBzB", - "xY7mHIrXGsLsDmNmhMwh20f5qOK5VvEROHhI62qleQFZASXfJYID6DOjz/sGwB1v1V1lIaNA3PSmt5Qc", - "4h73DK1wPJMSHhl+Ybk7gk4VaAnE9z4wcgE4doo5eTq61wyFcyW3KIyHy6atToyIt+GVsm7HPT0gyJ6j", - "TwF4BA/N0DdHBXbOWt2zP8V/g/ETNHLE8ZPswIwtoR3/qAWM2IL9G6fovPTYe48DJ9nmKBs7wEfGjuyI", - "YfoV11bkokJd5zvY3bnq158g6ThnBVguSihY9IHUwCruzyiEtD/mzVTBSba3IfgD41tiOSFMpwv8JexQ", - "535FbxMiU8dd6LKJUd39xCVDQEPEsxPB4yaw5bktd05Qs2vYsWvQwEy9oBCGoT/FqiqLB0j6Z/bM6L2z", - "Sd/oXnfxBQ4VLS8Va0Y6wX743vQUgw46vC5QKVVOsJANkJGEYFLsCKuU23Xhnz+FBzCBkjpAeqaNrvnm", - "+r9nOmjGFbD/VjXLuUSVq7bQyDRKo6CAAqSbwYlgzZw+OLHFEJSwAdIk8cuDB/2FP3jg91wYtoTr8GbQ", - "Neyj48EDtOO8UsZ2Dtcd2EPdcTtPXB/ouHIXn9dC+jzlcMSTH3nKTr7qDd54u9yZMsYTrlv+rRlA72Ru", - "p6w9ppFp0V447iRfTjc+aLBu3PcLsalLbu/CawVXvMzUFWgtCjjIyf3EQsmvr3j5Y9MN30NC7mg0hyzH", - "V3wTx4I3rg89/HPjCCncAaag/6kAwTn1uqBOB1TMNlJVbDZQCG6h3LFKQw703s1JjqZZ6gmjSPh8zeUK", - "FQat6pUPbqVxkOHXhkwzupaDIZJCld3KDI3cqQvAh6mFJ49OnALuVLq+hZwUmGvezOdfuU65maM96HsM", - "kk6y+WxU43VIvWo1XkJO993mhMugI+9F+GknnuhKQdQ52WeIr3hb3GFym/v7mOzboVNQDieOIn7bj2NB", - "v07dLnd3IPTQQExDpcHgFRWbqQx9Vcv4jXYIFdwZC5uhJZ+6/jJy/F6P6otKlkJCtlESdsm0JELC9/gx", - "eZzwmhzpjALLWN++DtKBvwdWd54p1Hhb/OJu909o32NlvlH6rlyiNOBk8X6CB/Kgu91PeVM/KS/LhGvR", - "v+DsMwAzb4J1hWbcGJULlNnOCzP3UcHkjfTPPbvof9W8S7mDs9cft+dDi5MDoI0YyopxlpcCLchKGqvr", - "3L6THG1U0VITQVxBGR+3Wj4PTdJm0oQV0w/1TnIM4GssV8mAjSUkzDTfAATjpalXKzC2p+ssAd5J30pI", - "Vkthca6NOy4ZnZcKNEZSnVDLDd+xpaMJq9hvoBVb1LYr/eMDZWNFWXqHnpuGqeU7yS0rgRvLvhfyzRaH", - "C07/cGQl2GulLxsspG/3FUgwwmTpYLNv6SvG9fvlr32MP4a70+cQdNpmTJi5ZXaSpPz/n/3ns7dn2f/w", - "7LeH2Zf/1+n7D08/3n8w+PHxx7/+9X91f3ry8a/3//PfUzsVYE89n/WQn7/wmvH5C1R/olD9PuyfzP6/", - "ETJLElkczdGjLfYZporwBHS/axyza3gn7VY6QrripSgcb7kJOfRvmMFZpNPRo5rORvSMYWGtRyoVt+Ay", - "LMFkeqzxxlLUMD4z/VAdnZL+7Tmel2UtaSuD9E3vMEN8mVrOm2QElKfsGcOX6msegjz9n48//2I2b1+Y", - "N99n85n/+j5ByaLYpvIIFLBN6YrxI4l7hlV8Z8CmuQfCngylo9iOeNgNbBagzVpUn55TGCsWaQ4Xnix5", - "m9NWnksK8HfnB12cO+85UctPD7fVAAVUdp3KX9QR1LBVu5sAvbCTSqsrkHMmTuCkb/MpnL7og/pK4MsQ", - "mKqVmqINNeeACC1QRYT1eCGTDCsp+uk9b/CXv7lzdcgPnIKrP2cqovfet1+/YaeeYZp7lNKCho6SECRU", - "af94shOQ5LhZ/KbsnXwnX8ASrQ9KPnsnC2756YIbkZvT2oD+ipdc5nCyUuxZeI/5glv+Tg4krdHEitGj", - "aVbVi1Lk7DJWSFrypGRZwxHevXvLy5V69+79IDZjqD74qZL8hSbInCCsapv5VD+ZhmuuU74v06R6wZEp", - "l9e+WUnIVjUZSEMqIT9+mufxqjL9lA/D5VdV6ZYfkaHxCQ3cljFjVfMezQko/kmv298flL8YNL8OdpXa", - "gGG/bnj1Vkj7nmXv6ocPn+DLvjYHwq/+ync0uatgsnVlNCVF36iCCye1EmPVs4qvUi62d+/eWuAV7j7K", - "yxu0cZQlw26dV4fhgQEO1S6geeI8ugEEx9GPg3FxF9QrpHVMLwE/4RZ2H2Dfar+i9/M33q4Db/B5bdeZ", - "O9vJVRlH4mFnmmxvKydkhWgMI1aorfrEeAtg+RryS5+xDDaV3c073UPAjxc0A+sQhnLZ0QtDzKaEDooF", - "sLoquBfFudz109oYelGBg76GS9i9UW0ypmPy2HTTqpixg4qUGkmXjljjY+vH6G++jyoLD019dhJ8vBnI", - "4llDF6HP+EEmkfcODnGKKDppP8YQwXUCEUT8Iyi4wULdeLci/dTyhMxBWnEFGZRiJRapNLz/NfSHBVgd", - "VfrMgz4KuRnQMLFkTpVf0MXq1XvN5Qrc9eyuVGV4SVlVk0EbqA+tgWu7AG732vllnJAiQIcq5TW+vEYL", - "39wtAbZuv4VFi52Ea6dVoKGI2vjo5ZPx+DMCHIobwhO6t5rCyaiu61GXyDgYbuUGu41a60PzYjpDuOj7", - "BjBlqbp2++KgUD7bJiV1ie6X2vAVjOgusfduYj6MjscPBzkkkSRlELXsixoDSSAJMjXO3JqTZxjcF3eI", - "Uc3sBWSGmchB7H1GmETbI2xRogDbRK7S3nPd8aJSVuAx0NKsBbRsRcEARhcj8XFccxOOI+ZLDVx2knT2", - "O6Z92Zea7jyKJYySojaJ58Jt2OegA73fJ6gLWelCKrpY6Z+QVs7pXvh8IbUdSqJoWkAJK1o4NQ6E0iZM", - "ajfIwfHjcom8JUuFJUYG6kgA8HOA01weMEa+ETZ5hBQZR2Bj4AMOzH5Q8dmUq2OAlD7hEw9j4xUR/Q3p", - "h30UqO+EUVW5y1WM+BvzwAF8KopWsuhFVOMwTMg5c2zuipeOzXldvB1kkCENFYpePjQfenN/TNHY45qi", - "K/+oNZGQcJPVxNJsADotau+BeKG2Gb1QTuoii+3C0Xvy7QK+l04dTMpFd8+whdpiOBdeLRQrfwCWcTgC", - "GJHtZSsM0iv2G5OzCJh90+6Xc1NUaJBkvKG1IZcxQW/K1COy5Ri5fBall7sRAD0zVFurwZslDpoPuuLJ", - "8DJvb7V5mzY1PAtLHf+xI5TcpRH8De1j3YRwf2sT/40nFwsn6pNkwhtalm6ToZA6V5R18JgEhX1y6ACx", - "B6uv+nJgEq3dWK8uXiOspViJY75Dp+QQbQZKQCU464im2WUqUsDp8oD3+EXoFhnrcPe43N2PAgg1rISx", - "0DqNQlzQH2GO55g+Wanl+OpspZdufa+Vai5/cptjx84yP/kKMAJ/KbSxGXrckktwjb4xaET6xjVNS6Dd", - "EEUqNiCKNMfFaS9hlxWirNP06uf97oWb9ofmojH1Am8xISlAa4HFMZKBy3umptj2vQt+SQt+ye9svdNO", - "g2vqJtaOXLpz/JOcix4D28cOEgSYIo7hro2idA+DjB6cD7ljJI1GMS0n+7wNg8NUhLEPRqmFZ+9jNz+N", - "lFxLlAYw/UJQrVZQhPRmwR8moyRypZKrqIpTVe3LmXfCKHUdZp7bk7TOh+HDWBB+JO5nQhawTUMfawUI", - "efuyDhPu4SQrkJSuJG0WSqImDvHHFpGt7hP7QvsPAJJB0G96zuw2Opl2qdlO3IASeOF1EgNhffuP5XBD", - "POrmY+HTncyn+48QDog0JWxU2GSYhmCEAfOqEsW253iiUUeNYPwo6/KItIWsxQ92AAPdIOgkwXVSaftQ", - "a29gP0Wd99RpZRR77QOLHX3z3D/AL2qNHoxOZPMwb3ujq01c+3c/X1il+Qq8FyojkG41BC7nGDREWdEN", - "s4LCSQqxXELsfTE38Rx0gBvY2IsJpJsgsrSLphbSfvE0RUYHqKeF8TDK0hSToIUxn/yboZcryPSRKam5", - "EqKtuYGrKvlc/zvYZT/zsnZKhtCmDc/1bqfu5XvErl9tvoMdjnww6tUBdmBX0PL0GpAGU5b+5pOJEljf", - "M50U/6hedrbwiJ06S+/SHW2NL8owTvztLdMpWtBdym0ORhsk4WCZshsX6dgEd3qgi/g+KR/aBFEclkEi", - "eT+eSphQwnJ4FTW5KA7R7hvgZSBeXM7s43x2u0iA1G3mRzyA61fNBZrEM0aakme4E9hzJMp5VWl1xcvM", - "x0uMXf5aXfnLH5uH8IpPrMmkKfvN12cvX3nwP85neQlcZ40lYHRV2K76p1kVlXHYf5VQtm9v6CRLUbT5", - "TUbmOMbiGjN794xNg6IobfxMdBR9zMUyHfB+kPf5UB9a4p6QH6iaiJ/W50kBP90gH37FRRmcjQHakeB0", - "XNy0yjpJrhAPcOtgoSjmK7tTdjM43enT0VLXAZ6Ec/2IqSnTGof0iSuRFfngH37n0tM3SneYv3+ZmAwe", - "+v3EKidkEx5HYrVD/cq+MHXCSPD6dfWrO40PHsRH7cGDOfu19B8iAPH3hf8d9YsHD5Lew6QZyzEJtFJJ", - "voH7zSuL0Y34tAq4hOtpF/TZ1aaRLNU4GTYUSlFAAd3XHnvXWnh8Fv6XAkpwP51MUdLjTSd0x8BMOUEX", - "Yy8RmyDTDZXMNEzJfkw1PoJ1pIXM3pdkIGfs8AjJeoMOzMyUIk+HdsiFcexVUjCla8yw8Yi11o1Yi5HY", - "XFmLaCzXbErO1B6Q0RxJZJpk2tYWdwvlj3ctxT9qYKJwWs1SgMZ7rXfVBeUARx0IpGm7mB+Y/FTt8Lex", - "g+zxNwVb0D4jyF7/3YvGpxQWmir6c2QEeDzjgHHvid729OGpmV6zrbshmNP0mCml0wOj8866kTmSpdCF", - "yZZa/QZpRwj6jxKJMILjU6CZ9zeQqci9PktpnMptRfd29kPbPV03Htv4W+vCYdFN1bGbXKbpU33cRt5E", - "6TXpdM0eyWNKWBxh0H0aMMJa8HhFwbBYBiVEH3FJ54myQHRemKVPZfyW85TGb0+lh3nw/rXk1wueqhHj", - "dCEHU7S9nTgpq1joHDbANDkOaHYWRXA3bQVlkqtAtz6IYVbaG+o1NO1kjaZVYJCiYtVlTmEKpVGJYWp5", - "zSVVEXf9iF/53gbIBe96XSuNeSBNOqSrgFxskubYd+/eFvkwfKcQK0EFsmsDUQVmPxCjZJNIRb6KdZO5", - "w6PmfMkezqMy8H43CnEljFiUgC0eUYsFN3hdNu7wpotbHki7Ntj88YTm61oWGgq7NoRYo1ije6KQ1wQm", - "LsBeA0j2ENs9+pJ9hiGZRlzBfYdFLwTNnj36EgNq6I+HqVvWFzjfx7IL5NkhWDtNxxiTSmM4JulHTUdf", - "LzXAbzB+O+w5TdR1ylnClv5COXyWNlzyFaTfZ2wOwER9cTfRnd/DiyRvABir1Y4Jm54fLHf8aeTNt2N/", - "BAbL1WYj7MYH7hm1cfTUllemScNwVOvf14sKcIWPGP9ahfC/nq3rE6sxfDPyZgujlH9AH22M1jnjlPyz", - "FG1keqjXyc5DbmEsoNXUzSLcuLnc0lGWxED1Jau0kBbtH7VdZn9xarHmuWN/J2PgZosvniYKUXVrtcjj", - "AP/keNdgQF+lUa9HyD7ILL4v+0wqmW0cRynutzkWolM5GqibDskciwvdP/RUydeNko2SW90hNx5x6lsR", - "ntwz4C1JsVnPUfR49Mo+OWXWOk0evHY79NPrl17K2CidKhjQHncvcWiwWsAVvphLb5Ib85Z7octJu3Ab", - "6P/Y+KcgckZiWTjLSUUg8mjueyzvpPifv28zn6NjlV4i9myASiesnd5u94mjDY+zuvX9txQwht9GMDcZ", - "bTjKECsj0fcUXt/0+SPihfog0Z53DI6PfmXa6eAoxz94gEA/eDD3YvCvj7ufib0/eJBOQJw0ublfWyzc", - "RiPGvqk9/EolDGChamETUOTzIyQMkGOXlPvgmODCDzVn3Qpxn16KuJv3Xelo0/QpePfuLX4JeMA/+oj4", - "g5klbmD7SmH8sHcrZCZJpmi+R3HunH2ltlMJp3cHBeL5E6BoBCUTzXO4kkEF0KS7/mC8SESjbtQFlMop", - "mXFRoNie/8+DZ7f4+R5s16Isfm5zu/UuEs1lvk5GCS9cx19IRu9cwcQqk3VG1lxKKJPDkW77S9CBE1r6", - "39XUeTZCTmzbr0BLy+0trgW8C2YAKkzo0Cts6SaIsdpNm9WkZShXqmA4T1vUomWOw1LOqRKaiffNOOym", + "H4sIAAAAAAAC/+y9/5PbNrIg/q+g9F6VY3/EGdtx8jb+1Na7iZ1k5+IkLo+TvfdsXwKRLQk7FMAFwBkp", + "Pv/vV+gGSJAEJWpm4iRX+5M9Ir40Go1Gf0P3+1muNpWSIK2ZPX0/q7jmG7Cg8S+e56qWNhOF+6sAk2tR", + "WaHk7Gn4xozVQq5m85lwv1bcrmfzmeQbaNu4/vOZhn/WQkMxe2p1DfOZydew4W5gu6tc62akbbZSmR/i", + "jIY4fz77sOcDLwoNxgyh/EGWOyZkXtYFMKu5NDx3nwy7FnbN7FoY5jszIZmSwNSS2XWnMVsKKAtzEhb5", + "zxr0Llqln3x8SR9aEDOtShjC+UxtFkJCgAoaoJoNYVaxApbYaM0tczM4WENDq5gBrvM1Wyp9AFQCIoYX", + "ZL2ZPX0zMyAL0LhbOYgr/O9SA/wKmeV6BXb2bp5a3NKCzqzYJJZ27rGvwdSlNQzb4hpX4gokc71O2He1", + "sWwBjEv26utn7NNPP/3CLWTDrYXCE9noqtrZ4zVR99nTWcEthM9DWuPlSmkui6xp/+rrZzj/hV/g1Fbc", + "GEgfljP3hZ0/H1tA6JggISEtrHAfOtTveiQORfvzApZKw8Q9ocZ3uinx/L/rruTc5utKCWkT+8LwK6PP", + "SR4Wdd/HwxoAOu0rhyntBn3zMPvi3ftH80cPP/zbm7Psv/2fn336YeLynzXjHsBAsmFeaw0y32UrDRxP", + "y5rLIT5eeXowa1WXBVvzK9x8vkFW7/sy15dY5xUva0cnItfqrFwpw7gnowKWvC4tCxOzWpaOTbnRPLUz", + "YVil1ZUooJg77nu9Fvma5dzQENiOXYuydDRYGyjGaC29uj2H6UOMEgfXjfCBC/rjIqNd1wFMwBa5QZaX", + "ykBm1YHrKdw4XBYsvlDau8ocd1mx12tgOLn7QJct4k46mi7LHbO4rwXjhnEWrqY5E0u2UzW7xs0pxSX2", + "96txWNswhzTcnM496g7vGPoGyEggb6FUCVwi8sK5G6JMLsWq1mDY9Rrs2t95GkylpAGmFv+A3Lpt/58X", + "P3zPlGbfgTF8BS95fslA5qqA4oSdL5lUNiINT0uIQ9dzbB0ertQl/w+jHE1szKri+WX6Ri/FRiRW9R3f", + "ik29YbLeLEC7LQ1XiFVMg621HAOIRjxAihu+HU76Wtcyx/1vp+3Ico7ahKlKvkOEbfj2rw/nHhzDeFmy", + "CmQh5IrZrRyV49zch8HLtKplMUHMsW5Po4vVVJCLpYCCNaPsgcRPcwgeIY+DpxW+InDCIKPgNLMcAEfC", + "NkEz7nS7L6ziK4hI5oT96JkbfrXqEmRD6Gyxw0+VhiuhatN0GoERp94vgUtlIas0LEWCxi48OhyDoTae", + "A2+8DJQrabmQUDjmjEArC8SsRmGKJtyv7wxv8QU38PmTsTu+/Tpx95eqv+t7d3zSbmOjjI5k4up0X/2B", + "TUtWnf4T9MN4biNWGf082Eixeu1um6Uo8Sb6h9u/gIbaIBPoICLcTUasJLe1hqdv5QP3F8vYheWy4Lpw", + "v2zop+/q0ooLsXI/lfTTC7US+YVYjSCzgTWpcGG3Df3jxkuzY7tN6hUvlLqsq3hBeUdxXezY+fOxTaYx", + "jyXMs0bbjRWP19ugjBzbw26bjRwBchR3FXcNL2GnwUHL8yX+s10iPfGl/tX9U1Wl622rZQq1jo79lYzm", + "A29WOKuqUuTcIfGV/+y+OiYApEjwtsUpXqhP30cgVlpVoK2gQXlVZaXKeZkZyy2O9O8alrOns387be0v", + "p9TdnEaTv3C9LrCTE1lJDMp4VR0xxksn+pg9zMIxaPyEbILYHgpNQtImOlISjgWXcMWlPWlVlg4/aA7w", + "Gz9Ti2+SdgjfPRVsFOGMGi7AkARMDe8ZFqGeIVoZohUF0lWpFs0Pn5xVVYtB/H5WVYQPlB5BoGAGW2Gs", + "uY/L5+1Jiuc5f37CvonHRlFcyXLnLgcSNdzdsPS3lr/FGtuSX0M74j3DcDuVPnFbE9DgxPy7oDhUK9aq", + "dFLPQVpxjf/m28Zk5n6f1PnPQWIxbseJCxUtjznScfCXSLn5pEc5Q8Lx5p4TdtbvezOycaPsIRhz3mLx", + "rokHfxEWNuYgJUQQRdTkt4drzXczLyRmKOwNyeRHA0QhFV8JidDOnfok2YZf0n4oxLsjBDCNXkS0RBJk", + "Y0L1MqdH/cnAzvInoNbUxgZJ1EmqpTAW9WpszNZQouDMZSDomFRuRBkTNnzPIhqYrzWviJb9FxK7hER9", + "nhoRrLe8eCfeiUmYI3YfbTRCdWO2fJB1JiFBrtGD4ctS5Zd/42Z9Byd8EcYa0j5Ow9bAC9Bszc06cXB6", + "tN2ONoW+XUOkWbaIpjpplvhCrcwdLLFUx7CuqnrGy9JNPWRZvdXiwJMOclky15jBRqDB3CuOZGEn/Yt9", + "xfO1EwtYzsty3pqKVJWVcAWlU9qFlKDnzK65bQ8/jhz0GjxHBhyzs8Ci1XgzE5rYdGOL0MA2HG+gjdNm", + "qrLbp+Gghm+gJwXhjahqtCJEisb587A6uAKJPKkZGsFv1ojWmnjwEze3/4QzS0WLIwugDe67Bn8Nv+gA", + "7Vq396lsp1C6IJu1db8JzXKlaQi64f3k7j/AdduZqPOTSkPmh9D8CrThpVtdb1H3G/K9q9N54GQW3PLo", + "ZHoqTCtgxDmwH4p3oBNWmh/wP7xk7rOTYhwltdQjUBhRkTu1oIvZoYpmcg3Q3qrYhkyZrOL55VFQPmsn", + "T7OZSSfvK7Ke+i30i2h26PVWFOautgkHG9ur7gkh21VgRwNZZC/TieaagoDXqmLEPnogEKfA0Qghanvn", + "19qXapuC6Uu1HVxpagt3shNunMnM/ku1fe4hU/ow5nHsKUh3C5R8AwZvNxkzTjdL65c7Wyh9M2mid8FI", + "1nobGXejRsLUvIckbFpXmT+bCY8FNegN1AZ47BcC+sOnMNbBwoXlvwEWjBv1LrDQHeiusaA2lSjhDkh/", + "nRTiFtzAp4/Zxd/OPnv0+OfHn33uSLLSaqX5hi12Fgz7xJvlmLG7Eu4ntSOULtKjf/4k+Ki646bGMarW", + "OWx4NRyKfF+k/VIz5toNsdZFM666AXASRwR3tRHaGbl1HWjPYVGvLsBap+m+1Gp559xwMEMKOmz0stJO", + "sDBdP6GXlk4L1+QUtlbz0wpbgiwozsCtQxinA24Wd0JUYxtftLMUzGO0gIOH4thtaqfZxVuld7q+C/MG", + "aK108gqutLIqV2Xm5DyhEgaKl74F8y3CdlX93wlads0Nc3Oj97KWxYgdwm7l9PuLhn69lS1u9t5gtN7E", + "6vy8U/ali/xWC6lAZ3YrGVJnxzyy1GrDOCuwI8oa34Al+Uts4MLyTfXDcnk31k6FAyXsOGIDxs3EqIWT", + "fgzkSlIw3wGTjR91Cnr6iAleJjsOgMfIxU7m6Cq7i2M7bs3aCIl+e7OTeWTacjCWUKw6ZHl7E9YYOmiq", + "eyYBjkPHC/yMtvrnUFr+tdKvW/H1G63q6s7Zc3/OqcvhfjHeG1C4vsEMLOSq7AaQrhzsJ6k1/i4LetYY", + "EWgNCD1S5AuxWttIX3yp1W9wJyZnSQGKH8hYVLo+Q5PR96pwzMTW5g5EyXawlsM5uo35Gl+o2jLOpCoA", + "N782aSFzJOQQY50wRMvGcivaJ4RhC3DUlfParbauGAYgDe6LtmPGczqhGaLGjIRfNHEz1Iqmo3C2UgMv", + "dmwBIJla+BgHH32Bi+QYPWWDmOZF3AS/6MBVaZWDMVBk3hR9ELTQjq4OuwdPCDgC3MzCjGJLrm8N7OXV", + "QTgvYZdhrJ9hn3z7k7n/O8BrleXlAcRimxR6+/a0IdTTpt9HcP3JY7IjSx1RrRNvHYMowcIYCo/Cyej+", + "9SEa7OLt0XIFGkNKflOKD5PcjoAaUH9jer8ttHU1EsHu1XQn4bkNk1yqIFilBiu5sdkhtuwadWwJbgUR", + "J0xxYhx4RPB6wY2lMCghC7Rp0nWC85AQ5qYYB3hUDXEj/xQ0kOHYubsHpalNo46YuqqUtlCk1oAe2dG5", + "vodtM5daRmM3Oo9VrDZwaOQxLEXje2R5DRj/4Lbxv3qP7nBx6FN39/wuicoOEC0i9gFyEVpF2I2jeEcA", + "EaZFNBGOMD3KaUKH5zNjVVU5bmGzWjb9xtB0Qa3P7I9t2yFxkZOD7u1CgUEHim/vIb8mzFL89pob5uEI", + "LnY051C81hBmdxgzI2QO2T7KRxXPtYqPwMFDWlcrzQvICij5LhEcQJ8Zfd43AO54q+4qCxkF4qY3vaXk", + "EPe4Z2iF45mU8MjwC8vdEXSqQEsgvveBkQvAsVPMydPRvWYonCu5RWE8XDZtdWJEvA2vlHU77ukBQfYc", + "fQrAI3hohr45KrBz1uqe/Sn+C4yfoJEjjp9kB2ZsCe34Ry1gxBbs3zhF56XH3nscOMk2R9nYAT4ydmRH", + "DNMvubYiFxXqOt/C7s5Vv/4EScc5K8ByUULBog+kBlZxf0YhpP0xb6YKTrK9DcEfGN8SywlhOl3gL2GH", + "OvdLepsQmTruQpdNjOruJy4ZAhoinp0IHjeBLc9tuXOCml3Djl2DBmbqBYUwDP0pVlVZPEDSP7NnRu+d", + "TfpG97qLL3CoaHmpWDPSCfbD97qnGHTQ4XWBSqlygoVsgIwkBJNiR1il3K4L//wpPIAJlNQB0jNtdM03", + "1/8900EzroD9l6pZziWqXLWFRqZRGgUFFCDdDE4Ea+b0wYkthqCEDZAmiV8ePOgv/MEDv+fCsCVchzeD", + "rmEfHQ8eoB3npTK2c7juwB7qjtt54vpAx5W7+LwW0ucphyOe/MhTdvJlb/DG2+XOlDGecN3yb80Aeidz", + "O2XtMY1Mi/bCcSf5crrxQYN1475fiE1dcnsXXiu44mWmrkBrUcBBTu4nFkp+dcXLH5pu+B4SckejOWQ5", + "vuKbOBa8dn3o4Z8bR0jhDjAF/U8FCM6p1wV1OqBitpGqYrOBQnAL5Y5VGnKg925OcjTNUk8YRcLnay5X", + "qDBoVa98cCuNgwy/NmSa0bUcDJEUquxWZmjkTl0APkwtPHl04hRwp9L1LeSkwFzzZj7/ynXKzRztQd9j", + "kHSSzWejGq9D6lWr8RJyuu82J1wGHXkvwk878URXCqLOyT5DfMXb4g6T29zfxmTfDp2CcjhxFPHbfhwL", + "+nXqdrm7A6GHBmIaKg0Gr6jYTGXoq1rGb7RDqODOWNgMLfnU9eeR4/dqVF9UshQSso2SsEumJRESvsOP", + "yeOE1+RIZxRYxvr2dZAO/D2wuvNMocbb4hd3u39C+x4r87XSd+USpQEni/cTPJAH3e1+ypv6SXlZJlyL", + "/gVnnwGYeROsKzTjxqhcoMx2Xpi5jwomb6R/7tlF/8vmXcodnL3+uD0fWpwcAG3EUFaMs7wUaEFW0lhd", + "5/at5GijipaaCOIKyvi41fJZaJI2kyasmH6ot5JjAF9juUoGbCwhYab5GiAYL029WoGxPV1nCfBW+lZC", + "sloKi3Nt3HHJ6LxUoDGS6oRabviOLR1NWMV+Ba3YorZd6R8fKBsrytI79Nw0TC3fSm5ZCdxY9p2Qr7c4", + "XHD6hyMrwV4rfdlgIX27r0CCESZLB5t9Q18xrt8vf+1j/DHcnT6HoNM2Y8LMLbOTJOV/f/KfT9+cZf/N", + "s18fZl/8f6fv3j/5cP/B4MfHH/761//T/enTD3+9/5//ntqpAHvq+ayH/Py514zPn6P6E4Xq92H/aPb/", + "jZBZksjiaI4ebbFPMFWEJ6D7XeOYXcNbabfSEdIVL0XheMtNyKF/wwzOIp2OHtV0NqJnDAtrPVKpuAWX", + "YQkm02ONN5aihvGZ6Yfq6JT0b8/xvCxrSVsZpG96hxniy9Ry3iQjoDxlTxm+VF/zEOTp/3z82eezefvC", + "vPk+m8/813cJShbFNpVHoIBtSleMH0ncM6ziOwM2zT0Q9mQoHcV2xMNuYLMAbdai+vicwlixSHO48GTJ", + "25y28lxSgL87P+ji3HnPiVp+fLitBiigsutU/qKOoIat2t0E6IWdVFpdgZwzcQInfZtP4fRFH9RXAl+G", + "wFSt1BRtqDkHRGiBKiKsxwuZZFhJ0U/veYO//M2dq0N+4BRc/TlTEb33vvnqNTv1DNPco5QWNHSUhCCh", + "SvvHk52AJMfN4jdlb+Vb+RyWaH1Q8ulbWXDLTxfciNyc1gb0l7zkMoeTlWJPw3vM59zyt3IgaY0mVowe", + "TbOqXpQiZ5exQtKSJyXLGo7w9u0bXq7U27fvBrEZQ/XBT5XkLzRB5gRhVdvMp/rJNFxznfJ9mSbVC45M", + "ubz2zUpCtqrJQBpSCfnx0zyPV5Xpp3wYLr+qSrf8iAyNT2jgtowZq5r3aE5A8U963f5+r/zFoPl1sKvU", + "Bgz7ZcOrN0Ladyx7Wz98+Cm+7GtzIPzir3xHk7sKJltXRlNS9I0quHBSKzFWPav4KuVie/v2jQVe4e6j", + "vLxBG0dZMuzWeXUYHhjgUO0CmifOoxtAcBz9OBgXd0G9QlrH9BLwE25h9wH2rfYrej9/4+068Aaf13ad", + "ubOdXJVxJB52psn2tnJCVojGMGKF2qpPjLcAlq8hv/QZy2BT2d280z0E/HhBM7AOYSiXHb0wxGxK6KBY", + "AKurgntRnMtdP62NoRcVOOgruITda9UmYzomj003rYoZO6hIqZF06Yg1PrZ+jP7m+6iy8NDUZyfBx5uB", + "LJ42dBH6jB9kEnnv4BCniKKT9mMMEVwnEEHEP4KCGyzUjXcr0k8tT8gcpBVXkEEpVmKRSsP796E/LMDq", + "qNJnHvRRyM2Ahoklc6r8gi5Wr95rLlfgrmd3pSrDS8qqmgzaQH1oDVzbBXC7184v44QUATpUKa/x5TVa", + "+OZuCbB1+y0sWuwkXDutAg1F1MZHL5+Mx58R4FDcEJ7QvdUUTkZ1XY+6RMbBcCs32G3UWh+aF9MZwkXf", + "N4ApS9W12xcHhfLZNimpS3S/1IavYER3ib13E/NhdDx+OMghiSQpg6hlX9QYSAJJkKlx5tacPMPgvrhD", + "jGpmLyAzzEQOYu8zwiTaHmGLEgXYJnKV9p7rjheVsgKPgZZmLaBlKwoGMLoYiY/jmptwHDFfauCyk6Sz", + "3zDty77UdOdRLGGUFLVJPBduwz4HHej9PkFdyEoXUtHFSv+EtHJO98LnC6ntUBJF0wJKWNHCqXEglDZh", + "UrtBDo4flkvkLVkqLDEyUEcCgJ8DnObygDHyjbDJI6TIOAIbAx9wYPa9is+mXB0DpPQJn3gYG6+I6G9I", + "P+yjQH0njKrKXa5ixN+YBw7gU1G0kkUvohqHYULOmWNzV7x0bM7r4u0ggwxpqFD08qH50Jv7Y4rGHtcU", + "XflHrYmEhJusJpZmA9BpUXsPxAu1zeiFclIXWWwXjt6TbxfwvXTqYFIuunuGLdQWw7nwaqFY+QOwjMMR", + "wIhsL1thkF6x35icRcDsm3a/nJuiQoMk4w2tDbmMCXpTph6RLcfI5ZMovdyNAOiZodpaDd4scdB80BVP", + "hpd5e6vN27Sp4VlY6viPHaHkLo3gb2gf6yaE+1ub+G88uVg4UR8lE97QsnSbDIXUuaKsg8ckKOyTQweI", + "PVh92ZcDk2jtxnp18RphLcVKHPMdOiWHaDNQAirBWUc0zS5TkQJOlwe8xy9Ct8hYh7vH5e5+FECoYSWM", + "hdZpFOKCfg9zPMf0yUotx1dnK71063ulVHP5k9scO3aW+dFXgBH4S6GNzdDjllyCa/S1QSPS165pWgLt", + "hihSsQFRpDkuTnsJu6wQZZ2mVz/vt8/dtN83F42pF3iLCUkBWgssjpEMXN4zNcW2713wC1rwC35n6512", + "GlxTN7F25NKd409yLnoMbB87SBBgijiGuzaK0j0MMnpwPuSOkTQaxbSc7PM2DA5TEcY+GKUWnr2P3fw0", + "UnItURrA9AtBtVpBEdKbBX+YjJLIlUquoipOVbUvZ94Jo9R1mHluT9I6H4YPY0H4kbifCVnANg19rBUg", + "5O3LOky4h5OsQFK6krRZKImaOMQfW0S2uo/sC+0/AEgGQb/uObPb6GTapWY7cQNK4IXXSQyE9e0/lsMN", + "8aibj4VPdzKf7j9COCDSlLBRYZNhGoIRBsyrShTbnuOJRh01gvGjrMsj0hayFj/YAQx0g6CTBNdJpe1D", + "rb2B/RR13lOnlVHstQ8sdvTNc/8Av6g1ejA6kc3DvO2NrjZx7d/+dGGV5ivwXqiMQLrVELicY9AQZUU3", + "zAoKJynEcgmx98XcxHPQAW5gYy8mkG6CyNIumlpI+/mTFBkdoJ4WxsMoS1NMghbGfPKvh16uINNHpqTm", + "Soi25gauquRz/W9hl/3Ey9opGUKbNjzXu526l+8Ru361+RZ2OPLBqFcH2IFdQcvTK0AaTFn6m08mSmB9", + "z3RS/KN62dnCI3bqLL1Ld7Q1vijDOPG3t0ynaEF3Kbc5GG2QhINlym5cpGMT3OmBLuL7pHxoE0RxWAaJ", + "5P14KmFCCcvhVdTkojhEu6+Bl4F4cTmzD/PZ7SIBUreZH/EArl82F2gSzxhpSp7hTmDPkSjnVaXVFS8z", + "Hy8xdvlrdeUvf2wewis+siaTpuzXX529eOnB/zCf5SVwnTWWgNFVYbvqT7MqKuOw/yqhbN/e0EmWomjz", + "m4zMcYzFNWb27hmbBkVR2viZ6Cj6mItlOuD9IO/zoT60xD0hP1A1ET+tz5MCfrpBPvyKizI4GwO0I8Hp", + "uLhplXWSXCEe4NbBQlHMV3an7GZwutOno6WuAzwJ5/oBU1OmNQ7pE1ciK/LBP/zOpaevle4wf/8yMRk8", + "9NuJVU7IJjyOxGqH+pV9YeqEkeD1y+oXdxofPIiP2oMHc/ZL6T9EAOLvC/876hcPHiS9h0kzlmMSaKWS", + "fAP3m1cWoxvxcRVwCdfTLuizq00jWapxMmwolKKAArqvPfautfD4LPwvBZTgfjqZoqTHm07ojoGZcoIu", + "xl4iNkGmGyqZaZiS/ZhqfATrSAuZvS/JQM7Y4RGS9QYdmJkpRZ4O7ZAL49irpGBK15hh4xFrrRuxFiOx", + "ubIW0Viu2ZScqT0gozmSyDTJtK0t7hbKH+9ain/WwEThtJqlAI33Wu+qC8oBjjoQSNN2MT8w+ana4W9j", + "B9njbwq2oH1GkL3+u+eNTyksNFX058gI8HjGAePeE73t6cNTM71mW3dDMKfpMVNKpwdG5511I3MkS6EL", + "ky21+hXSjhD0HyUSYQTHp0Az768gU5F7fZbSOJXbiu7t7Ie2e7puPLbxt9aFw6KbqmM3uUzTp/q4jbyJ", + "0mvS6Zo9kseUsDjCoPs0YIS14PGKgmGxDEqIPuKSzhNlgei8MEufyvgt5ymN355KD/Pg/WvJrxc8VSPG", + "6UIOpmh7O3FSVrHQOWyAaXIc0OwsiuBu2grKJFeBbn0Qw6y0N9RraNrJGk2rwCBFxarLnMIUSqMSw9Ty", + "mkuqIu76Eb/yvQ2QC971ulYa80CadEhXAbnYJM2xb9++KfJh+E4hVoIKZNcGogrMfiBGySaRinwV6yZz", + "h0fN+ZI9nEdl4P1uFOJKGLEoAVs8ohYLbvC6bNzhTRe3PJB2bbD54wnN17UsNBR2bQixRrFG90QhrwlM", + "XIC9BpDsIbZ79AX7BEMyjbiC+w6LXgiaPX30BQbU0B8PU7esL3C+j2UXyLNDsHaajjEmlcZwTNKPmo6+", + "XmqAX2H8dthzmqjrlLOELf2FcvgsbbjkK0i/z9gcgIn64m6iO7+HF0neADBWqx0TNj0/WO7408ibb8f+", + "CAyWq81G2I0P3DNq4+ipLa9Mk4bhqNa/rxcV4AofMf61CuF/PVvXR1Zj+GbkzRZGKX+PPtoYrXPGKfln", + "KdrI9FCvk52H3MJYQKupm0W4cXO5paMsiYHqS1ZpIS3aP2q7zP7i1GLNc8f+TsbAzRafP0kUourWapHH", + "Af7R8a7BgL5Ko16PkH2QWXxf9olUMts4jlLcb3MsRKdyNFA3HZI5Fhe6f+ipkq8bJRslt7pDbjzi1Lci", + "PLlnwFuSYrOeo+jx6JV9dMqsdZo8eO126MdXL7yUsVE6VTCgPe5e4tBgtYArfDGX3iQ35i33QpeTduE2", + "0P++8U9B5IzEsnCWk4pA5NHc91jeSfE/fddmPkfHKr1E7NkAlU5YO73d7iNHGx5ndev7bylgDL+NYG4y", + "2nCUIVZGou8pvL7p83vEC/VBoj3vGBwf/cK008FRjn/wAIF+8GDuxeBfHnc/E3t/8CCdgDhpcnO/tli4", + "jUaMfVN7+KVKGMBC1cImoMjnR0gYIMcuKffBMcGFH2rOuhXiPr4UcTfvu9LRpulT8PbtG/wS8IB/9BHx", + "OzNL3MD2lcL4Ye9WyEySTNF8j+LcOftSbacSTu8OCsTzB0DRCEommudwJYMKoEl3/cF4kYhG3agLKJVT", + "MuOiQLE9/8+DZ7f4+R5s16Isfmpzu/UuEs1lvk5GCS9cx59JRu9cwcQqk3VG1lxKKJPDkW77c9CBE1r6", + "P9TUeTZCTmzbr0BLy+0trgW8C2YAKkzo0Cts6SaIsdpNm9WkZShXqmA4T1vUomWOw1LOqRKaiffNOOym", "tj5uFd+C+4RDS1FiGGbab4wtM83tSAItrHce6gu5cbD8uCEzA40OmnGxwYvZ8E1VAp7MK9B8hV2VhF53", - "TKGGI0cVK5ip3CdsiQkrFLO1lkwtl9EyQFqhodzNWcWNoUEeumXBFueePXv08GHS7IXYmbBSwmJY5o/t", - "Uh6dYhP64ossUSmAo4A9DOvHlqKO2dgh4fiakv+owdgUT8UP9HIVvaTu1qZ6kk3t0xP2LWY+ckTcSXWP", - "5sqQRLibULOuSsWLOSY3fvP12UtGs1IfKiFP9SxXaK3rkn/SvTI9wWjI7DSSOWf6OPtTebhVG5s15SdT", - "uQldi7ZApujF3KAdL8bOCXtBJtSmgD9NwjBFtt5AEVW7JCUeicP9x1qer9E22ZGAxnnl9EKsgZ21npvo", + "TKGGI0cVK5ip3CdsiQkrFLO1lkwtl9EyQFqhodzNWcWNoUEeumXBFueePX308GHS7IXYmbBSwmJY5g/t", + "Uh6dYhP64ossUSmAo4A9DOuHlqKO2dgh4fiakv+swdgUT8UP9HIVvaTu1qZ6kk3t0xP2DWY+ckTcSXWP", + "5sqQRLibULOuSsWLOSY3fv3V2QtGs1IfKiFP9SxXaK3rkn/SvTI9wWjI7DSSOWf6OPtTebhVG5s15SdT", + "uQldi7ZApujF3KAdL8bOCXtOJtSmgD9NwjBFtt5AEVW7JCUeicP9x1qer9E22ZGAxnnl9EKsgZ21npvo", "9WFT/QgZtoPb12KlUqxzpuwa9LUwgC/y4Qq66RCb3KDeNh7SI3aXp2spiVJOjhBGm1pHx6I9AEeSbAgq", - "SELWQ/yRlimqx3xsXdoL7JV+i9Erctvz+ofkeiHFNvveOxdyLpUUOZZCSEnSmLptmptyQtWItH/RzPwJ", + "SELWQ/yRlimqx3xsXdoL7JV+i9Erctvz+ofkeiHFNvvOOxdyLpUUOZZCSEnSmLptmptyQtWItH/RzPwJ", "TRyuZGnd5i2wx+Josd3ACD3ihi7/6KvbVKIO+tPC1pdcW4E1nrNBMQ+Vrr1DTEgDvpqVI6KYTyqdCGpK", - "PoRoAiiOJCPMyjRi4fzGffvB278xKcalkGjp8mjz+hm5rEoj0DMtmbBspcD49XRf85i3rs8JZmksYPv+", - "5KVaifxCrHAMCqNzy6aY0eFQZyGC1EdsurbPXVufO7/5uRMORpOeVZWfdLwOelKQtFs5iuBU3FIIJImQ", - "24wfj7aH3PaGfuN96ggNrjBqDSq8hweE0dTS7o7ytdMtiaKwBaMXlckEukImwHgpZHChpi+IPHkl4Mbg", - "eR3pZ3LNLekOk3jaG+DlyAMIfKFMPvjbDtWvHOBQgmsMc4xvY1sGfIRxNA1aiZ/LHQuHwlF3JEw852UT", - "Op0o6o1SlReiCnxc1CvznWIcjnFn4clkB10Hn+813bEax7E30ViOwkVdrMBmvChSqa2+wq8Mv4ZHYrCF", + "PoRoAiiOJCPMyjRi4fzaffve278xKcalkGjp8mjz+hm5rEoj0DMtmbBspcD49XRf85g3rs8JZmksYPvu", + "5IVaifxCrHAMCqNzy6aY0eFQZyGC1EdsurbPXFufO7/5uRMORpOeVZWfdLwOelKQtFs5iuBU3FIIJImQ", + "24wfj7aH3PaGfuN96ggNrjBqDSq8hweE0dTS7o7yldMtiaKwBaMXlckEukImwHghZHChpi+IPHkl4Mbg", + "eR3pZ3LNLekOk3jaa+DlyAMIfKFMPvjbDtWvHOBQgmsMc4xvY1sGfIRxNA1aiZ/LHQuHwlF3JEw842UT", + "Op0o6o1SlReiCnxc1CvznWIcjnFn4clkB10Hn+813bEax7E30ViOwkVdrMBmvChSqa2+xK8Mv4ZHYrCF", "vG6KUDWvA7s5yofU5ifKlTT1Zs9cocEtp4vq5ieoIa7dH3YYM+0sdvhvqgLT+M74oOmjX+WGCOniuMT8", - "w1fGKanX0XRmxCqbjgm8U26PjnbqmxF62/9OKT081/1TvMbtcbl4j1L87Wt3ccSJewfx6XS1NHl1MRZc", + "w1fGKanX0XRmxCqbjgm8U26PjnbqmxF62/9OKT081/1DvMbtcbl4j1L87St3ccSJewfx6XS1NHl1MRZc", "4feQ8KjJCNnlSniVDeqMYdQDbl5iy3rAh4ZJwK94OfISPvaV0P1K/oOx9/D5aPoGbn16LsvZXhY0mvKI", - "YoV73pehC3EsPpjCg+/Oa+HXuheh47677zqeOooRa5nFqIfuZk60doOP9aJ9dzWWIiHU6cDvcT0QH8Uz", - "92ng4UqoOkRfhRjooBLSrz4FT6fux8j6ky8L/mivxaiP5Y2vX0vL9Dr5dz+TF5aBtHr3J/C4DDa9X1Qm", - "Ie2SeaptwprSh5NKIXZuxSk1bFLlUrxsGGxlxFo6tDQoPzMgqxdTxIEBPj7OZ+fFURdmquTOjEZJHbuX", - "YrW2mLH/b8AL0K8OVCRoqxDgEauUEW0F0tIN5lPArnG4k6mPDRwBi7iiwnCsEIR6BbnFsrNtcJ0GOKa+", + "YoV73pehC3EsPpjCg+/Oa+HXuheh4767bzueOooRa5nFqIfuZk60doOP9aJ9ezWWIiHU6cDvcT0QH8Uz", + "92ng4UqoOkRfhRjooBLSrz4FT6fux8j6ky8Lfm+vxaiP5bWvX0vL9Dr5tz+RF5aBtHr3B/C4DDa9X1Qm", + "Ie2SeaptwprSh5NKIXZuxSk1bFLlUrxsGGxlxFo6tDQoPzMgq+dTxIEBPj7MZ+fFURdmquTOjEZJHbsX", + "YrW2mLH/b8AL0C8PVCRoqxDgEauUEW0F0tIN5lPArnG4k6mPDRwBi7iiwnCsEIR6BbnFsrNtcJ0GOKa+", "gpssOH3+VZlgXJ1u3mT4ggT7qhAMa80euOMHiZOi5F9Up/Nkes79syaEml6AXXPTpmvpvZme/HJzuYQc", - "syLvTVT1X2uQURKkebDLICzLKG+VaN4xYV7v462OLUD78kjthSeqr3NrcMbesV/C7p5hHWpIFg5tHvHd", - "JHEwYoBcYCGH9Jgh2UeNCdNQBmIhhAT7VMxtcYzRnM9R2rUbzhVI0l0cbSq2PVOmi55Pmst1PSrtIz7J", - "GctlNayZPK5/vMAS1cYHyPEm8XCspbPzYeGca5+4GNOKNb6TkMIYTPgt5BCkWUpx6esHIFbIU3XNdRFa", - "3ElSKLqbRBroZTOzaB9wDIMcEqUY8C1UXionRmRjD8q6byaagMN7hiJD2wQ+CNcStIaicYmUykBmVXjw", - "sQ+Ofaig8NcbIcGMlj8i4EZTX79uc3tjGTiOqa65j3qNF8g0bLiDTkcZuMfn3Ifs5/Q9PMIPZcAOWpga", - "ej1cjzY83RFmgMSY6pfM35aHH/ffxNgkpASdBc9TPx237GZkw7ybRZ3TBR0fjMYgNzl3zh5WkrTT5MNV", - "9nSE6JH8JexOSQkKhXzDDsZAk+REoEcJR3ubfKfmN5OCe3Un4P2xeeQqpcpsxNlxPswh3qf4S5FfAuYA", - "bELcR2q0s8/Qxt54s6/Xu5Azu6pAQnH/hLEzSY+KgmO7W16wN7m8Z/fNv8VZi5rS+nuj2sk7mX6dgQn3", - "9S25WRhmPw8z4FjdLaeiQQ5kqN7KsZCba0zO363ieTJVKx+6mvtV5FuiIihSMskFeaye40FPGY4wBUKU", - "qwMdmZx5TxczpUrF8t4kTYMbKo2peDIEyIKcki2ggcIPnkRAsi564hRS6juf9E4tmYbWiXzT7H/DEu4p", - "jb4/czNLl98tlYZOMXbXmzJ9Ng9fMI0m/mchrOZ6d5McfYMS8gPrySiWD4ZjNZFY7ULaaKwhDstSXWfI", - "rLKmzkVKtXXtTPcyDkXX2n7uVC8giuvixgtqO7bmBcuV1pDHPdLvPQmqjdKQlQrDvFIe6KV1cvcGH3lJ", - "VqoVU1WuCqB6MWkKGpurlpKj2ARRVE0SBUQ7+FqY+kR0PHFKd6eSHylDUWt1RO38HOjlepvViRadkS9z", - "JGIZjM/i5DFEjYfw7qn9f1SllnMMY7wSGOvSfbRP0mfl7pgmk0F85i7iNEPMrrWqV+sooTO7FmUZDAZu", - "G3TtFdB4lJ9MjeFI+GLLTfGUbZSxXrOjkUwzVBvi9VmupNWqLLtGIBKJV96y/T3fnuW5fanU5YLnl/dR", - "j5TKNist5uE9cz8Yr51J91J5dS+8jMqHH06NS+0wNM0TyWSG1GMpRxdSj8B8f5hjHbZxnw0X1l9Xl3ml", - "1YYzybhVG5GnafifK7ptNCYtxRKSOcKoliFldcBmyKjjy6EJZkCWNEQzSJ4sxnbGPE/zTl1kHu6/KPH2", - "x2VL8JfEyMU05JNeasnyUdmqBwBCSk+Nba2pAGIs+TRcRa0oNQG6pPuATuTiGPlzO9jcCHcOlIVbATWI", - "NmwA/IyU/TnlcqPIxYXahu/322RvNwL+434q7zCPsZCqi5a0NAVVhcQwIxwhnVJ6b/zRG3xmvpgahdQU", - "q514o0YAjMcldWCYFJ10LBhLLkooslStw/PGJjSPNFv/FKpfglwYz8lzXodSg27sWoNPVEIite76myru", - "SEk1zYeWW1nAFugdxW+gFdUQnEf+DiipxGBP+VZVVsIVdMK1fPaUGkU7cQWhr2k6swKgQu9f3yaVikOK", - "7/KeocKvPYsiWaZgN2m5IMTSTrEDZomkEWUrMzomZupRchBdiaLmHfyZY0WOrtnNHeUEqgYyeRb0tqnT", - "/EQjvA4DnIX+KVEmYOL9ND50NAtKo24fAzoYl1ibsVMv02GJcWqgxqGBsxWN45NIvOUbpuLXctwAOCT5", - "Vr2ZuE9CyQixX28hR6mmG3d3e5wwHIyZXtqvURFcNzt8c0PyH0LDe0l4dLyUqmHAP1TbY6kJdOEFdmyA", - "RaelE3ud1IzlBD3/9/xvzhZ1GMjp1VTdMNbgXkDw2GEm8sZZ4QVa0VxoIb5w7hNR9pVyEUVWb/iOKY3/", - "OH3tHzUvxXKHJ5TAD92YWXNHQt5FSL5rH6/oJt4vmMwDYMEuoMJUtG4xdcxouJ0bJQLaXYGhDI1iG34J", - "8TagW544T24dyzH1YiOMwcuut51DLPjFh2QiG17EOjKmNOwW/A5Jbl3v/7t9tRVPFTKRVSXPQy1LX0yn", - "YxCnerWBuOwaNvuf9Q3V40ACTQ3clmh1eAde3MC4d2TkRipWfqxQSAfsQW3QQY2UWy1joo2yVw1iz4PI", - "SUu5612YGh8yADquKHgI/LjA4qfBfzLb6NgypoD/Z8H7SEnVGF6qnvoJsNzJFZGAleyqC7XNNCzNoVAI", - "Mqw6RVi3WSaCcVLIXAM3FBty/qNX2dpkmkI6FZKiFxvvWzNKAUshW2YpZFXbhAaAOTXlLkJYbJ5GtI44", - "e8akBCeGXfHyxyvQWhRjG+dOBxUfjIsZBJO875tQ/ps7dTiAMK32gy8JoX2pFjVzFziVS6LAQmO5LLgu", - "4uZCshy0u/fZNd+Zm/s+HLS6dvLFAe8Hj6SZ7vv2yA+CpE2AlDvvvrylZ6IBkN+hi2KCawEjWBNuBTKK", - "WDXiSRjCkM7HwbdZqVb4vmyEAH3WUvT9kLKiJBpsSR46bh4jfoP902DCdn/wrcJZp0yx/5z9iKhDhecn", - "Kezek0bWtP6DP4rIpIMQ6F+u2rBw2pwh/afeaPq0HPE7zSDchUcMYa8pPITmgxFPRteCO7KL6CD3D3xj", - "c+30QlhdH3zqJSjpsBnqtmZP4DeYNsiZ5z5wZ2j0GSjFhJS5f0d7pE2ILMnhHhgBj6qW+7PVnbYJpnDj", - "HFM9bP/L2axSVZZPiQakmg6FN2h7SLswjtBHZK4eWXcTOGGaKiedjDidcifHFlAbLbdyyC9T5fuU7DGD", - "xggH7RrL1RJ5GdX0RjsMvvFojBfz/uujrsGmYRKMMw15rdGgec13hwtSjeQSvvjb2eePHv/y+PMvmGvA", - "CrEC0+aj7hV0aiPGhOzbWT5tjNhgeTa9CeFdOiEueMrCc5tmU/xZI25r2mSTg3JWx1hCExdA4jgmCgnd", - "aK9wnDbo+8+1XalF3vmOpVDw+++ZVmWZrgfQiG4JU39qtyJjv5P4K9BGGOsYYddXJ2wbK2vWaI7DrLBX", - "lGdEydyn7W+oQNiRYJzUQsZCLZGf4atf799gsK1Kz6vIJ7FvXV4vIosYBmdg/MYCWKUqL0qLJUtBhG9L", - "dPTm0hsaMbwzip5smC3FUaYI0cckp0kvLqW8n9t3y3zaNKd3m5gQL8KhvAFpjlnSx1+034STtKb0Pw3/", - "SDzRvzOu0Sz39+AVSf3gZuXaJ4E2fK6dIA8EYOQdZucFXfSEKEpRq8kqj/b74Orsix/fty7Qgw8GEJLQ", - "4QB48cPKtl0T4+7B+YNzvX7fICVayvsxSugs/9BbzcB6m4sk2iJvpLAWDLElNRQLo4e45nnzvnVEKxk8", - "g9VKWeY007JMPJ8luwmeqZhwnEqgr3j56bnGN0Ibe4b4gOL1+KOZ+A1ljGRCpblZBreXfNLc0XvJu5ta", - "vsInu/8Fbo+S95wfyruLB7cZWr2wlvkq3Ar0Cphd45gUDvToC7bwZRgqDbkwfTf0dRBOmieDoMXSh17C", - "1h54o3honT8rewsyXoaYEfZD5E5SaLZrIWyP6B/MVEZObpLKU9Q3IIsE/lI8Ki7beuC6uGXK/pslBIlS", - "ex2ZEGRYkHbq8ijphbt0agPDdU6+rTu4TVzU7dqmZrOZnPn/3bu3djElCU06S7/rjllw7iRd/1HJ+n+H", - "/DeEIz+GnzdFMT+PZUSlrJ8jWZt7+1GL8mCASCcH98f5bAUSjDCYZfoXX1Xk096lAQJ6kz88qgTrbRKJ", - "EGISa+1MHk0VZdeekFjbd0tkQ8b3bnmthd1hRdlgQBO/JDP1fNtkffBZQxrflb/7rLqEpqp3myOiNuF2", - "/VbxEu8jcqlJdwup8oR9Tbmf/UH5673Ff8CTvzwtHj559B+Lvzz8/GEOTz//8uFD/uVT/ujLJ4/g8V8+", - "f/oQHi2/+HLxuHj89PHi6eOnX3z+Zf7k6aPF0y++/I97jg85kAnQkPT92ez/y87KlcrOXp1nbxywLU54", - "Jb4DtzeoKy8VVjx0SM3xJMKGi3L2LPz0/4QTdpKrTTt8+HXmK/fM1tZW5tnp6fX19Unc5XSFj8Izq+p8", - "fRrmwTp0HXnl1XkTTU5xL7ijrfUYN9WTwhl+e/31xRt29ur8pCWY2bPZw5OHJ4980WPJKzF7NnuCP+Hp", - "WeO+n2LmxVPjk6qfVhWlVf84n516OvR/rYGXmF7F/bEBq0UePmngxc7/31zz1Qr0Cb4loJ+uHp8GieP0", - "g383/3Hft9M42uL0Qye9QHGgZ4gmONTk9EMonLp/wE7RTB/HFXWYCOi+ZqcLLJYytSnEqxtfCqoq5vQD", - "Ctujv596i0n6Iyo9dJpOQ5qOkZb0IDv9sYPCD3brFrJ/ONcmGi/nNl/X1ekH/A8ejGhFlN/x1G7lKTpl", - "Tz90EOE/DxDR/b3tHre42qgCAnBquaRqs/s+n36gf6OJYFuBFk7ixJwq/lfKfXWKRcd2w5930rs0S0hl", - "LPlJGiCNOOSb38m8fYjV8IrzIjS+2Mk8iMYhzhA5wOOHD2n6p/ifmS/K08vrcerP88w0Vcj3GmY6GRWR", - "v/Zscg289NwM7MkMYXj06WA4lxRb6BguXQwf57PPPyUWzqWTYXjJsCVN/+QTbgLoK5EDewObSmmuRblj", - "P8kmPDIqkZqiwEuprmWA3EkV9WbD9Q6l9Y26AsN89dWIOJkGJx9RCAW6+VsaxmuNOz7ydlbVi1Lksznl", - "z3yPEplNCSfBUDScKRjJ2sG7p+Lbg2di+i50Zd49CUsmwXngKTsNPxTYh/sb9r7vZqWp7qU2aPYvRvAv", - "RnCHjMDWWo4e0ej+wqxbUPkHlznP17CPHwxvy+iCn1UqlVbgYg+z8LUtxnjFRZdXtOF7s2dvp5V+854N", - "MloXYNxhPgkKi5PGW31CNxwpnHn0q0Z7va+q9cf3f4r7/TmX4Tx3dpxcl1yXAnRDBVwOy438iwv8H8MF", - "qG4Sp32dMwtlaeKzbxWeffLy+GSKkrxvE/lAJ/dlK0x3fj4NtomUDtpt+aHzZ1evMuvaFuo6mgWt+uSS", - "GmoZ7mNt+n+fXnNhs6XSPuUiVuofdrbAy1NfX6X3a5vSfPAF87RHP8aPG5O/nnKvbqS+Ia8b6zjQh1Nf", - "vco30ihEFofPrWUttlQhn21sVG/fOy6HJbg9C24NL89OT/GpyVoZezr7OP/QM8rEH983hBUqR84qLa4w", - "w/37+WybKS1WQvIy81aNtkjU7PHJw9nH/x0AAP//lHEJdNQGAQA=", + "syLvTVT19zXIKAnSPNhlEJZllLdKNO+YMK/38VbHFqB9eaT2whPV17k1OGPv2C9hd8+wDjUkC4c2j/hu", + "kjgYMUAusJBDesyQ7KPGhGkoA7EQQoJ9Kua2OMZozuco7doN5wok6S6ONhXbninTRc8nzeW6HpX2EZ/k", + "jOWyGtZMHtc/nmOJauMD5HiTeDjW0tn5sHDOtU9cjGnFGt9JSGEMJvwWcgjSLKW49PUDECvkqbrmuggt", + "7iQpFN1NIg30splZtA84hkEOiVIM+BYqL5UTI7KxB2XdNxNNwOE9Q5GhbQIfhGsJWkPRuERKZSCzKjz4", + "2AfHPlRQ+OuNkGBGyx8RcKOpr1+1ub2xDBzHVNfcR73GC2QaNtxBp6MM3ONz7kP2M/oeHuGHMmAHLUwN", + "vR6uRxue7ggzQGJM9Uvmb8vDj/tvYmwSUoLOguepn45bdjOyYd7Nos7pgo4PRmOQm5w7Zw8rSdpp8uEq", + "ezpC9Ej+EnanpASFQr5hB2OgSXIi0KOEo71NvlPzm0nBvboT8H7fPHKVUmU24uw4H+YQ71P8pcgvAXMA", + "NiHuIzXa2SdoY2+82dfrXciZXVUgobh/wtiZpEdFwbHdLS/Ym1zes/vm3+KsRU1p/b1R7eStTL/OwIT7", + "+pbcLAyzn4cZcKzullPRIAcyVG/lWMjNNSbn71bxPJmqlQ9dzf0q8i1RERQpmeSCPFbP8KCnDEeYAiHK", + "1YGOTM68p4uZUqVieW+SpsENlcZUPBkCZEFOyRbQQOEHTyIgWRc9cQop9Z1PeqeWTEPrRL5p9r9hCfeU", + "Rt+fuZmly++WSkOnGLvrTZk+m4cvmEYT/7MQVnO9u0mOvkEJ+YH1ZBTLB8OxmkisdiFtNNYQh2WprjNk", + "VllT5yKl2rp2pnsZh6JrbT93qhcQxXVx4wW1HVvzguVKa8jjHun3ngTVRmnISoVhXikP9NI6uXuDj7wk", + "K9WKqSpXBVC9mDQFjc1VS8lRbIIoqiaJAqIdfC1MfSI6njilu1PJj5ShqLU6onZ+DvRyvc3qRIvOyJc5", + "ErEMxmdx8hiixkN499T+T/Pmpdgi3YBOHfkls7qGOfMt+jWy/cHnGthGGEOgNLR0LcoSH46LbeR5bQIX", + "0qgdEXvPMazySmDsTTeJAEnDlbvzmswKMQ+4iNMeMbvWql6towTTDZxB5dW1V4jjUX40NYZH4QsyN8UT", + "tlHGek2TRmqX3IacfZIrabUqy65RikT0lbe0f8e3Z3luXyh1ueD55X3Ua6WyzUqLeXhf3Q8ObGfSvdRi", + "3Qs4o3Lmh1P1UjsMlfNEO5lB9ljc0YXdIzDfHeagh23uZ8OF9dfVZaZpNeZMMm7VRuTpM/XnirYbjZFL", + "sahkzjKqrUhZJrAZHvb4smqCK5BFDtEMkieLw50xzwi8kxnZjfsvSuD9cdkSPKMZuSiHzMVLUVk+Kuv1", + "AEBI6emzrTUVZIwlsYarqBWlSkAXeR/QibcKRiLdDjY3wp0DZeFWQA2iHxsAPyHjw5xyy1Ek5UJtw/f7", + "bfK5GwH/YT+Vd5jHWIjXRUtamoK8QqKaEY6QTnG9Nx7qNT57X0yNimqK50684SMAxuOkOjBMipY6Fowl", + "FyUUWar24nljo5pHmrZ/mtUviS6M5+Q5r0PpQzd2rcEnTiERX3f9XxV3pKSa5kNLsixgC/Su41fQimoa", + "ziP/C5RU8rBnDFBVVsIVdMLHfDaXGkVNcQWhr2k6swKgQm9k30aWiouK7/Ke4cSvPYsia6ZgN2lJIcTS", + "TrEDZpKkUWcrMzomZupRchBdiaLmHfyZY0WOrhnQHeUEqgY6Qhb0yKnT/EgjvAoDnIX+KVEmYOLdND50", + "NAtKo24fAzoYJ1mbsVMv02GScaqixsGCsxWNI5ZIvOUbpuLXctwgOST5Vt2auE9CyQixX20hR6nG6ztQ", + "eI1nxEnhs54gtUuAgrQC1yVhbV+DZFJFJSavuWlUlTaHYviBJsZGQnpt+gZO5Taa8fY7y3AwZnrJ1EYV", + "Cd3Q6c3N87/LSdx7EEfHS9GIAf/8b4/9K1C3VzuwAZbylm4/neyPRRr9Lea5+Jwt6jBQWaprqhkZ66HP", + "IfhBifqCC8iL5aK5lkPU5tyn9+ybOkQUr77hO6Y0/uO0zn/WvBTLHfIZAj90Y2bNHQl5xytFBPgoUDfx", + "fvFqHgAL1hYVpqJ1i6ljRsPt3CgR0O4iD8V9FNvwS4i3AYMdiH/m1jFOUy/QcuGu7N52DrHgFx9StGx4", + "EWv6mCiyW0Y9pA52vf//9i1cPFXI71aVPA8VQn2Joi6fwSrAgbjsGjb7H0sO+VoggaaycEu0OryuL25g", + "Mj2SdaVeIIyVX+mAPai4Oqg8c6tlTLT89mps7HlmOmkpd70LU6NuBkDHdRoPgR+Xrfw4+E/mcB1bxhTw", + "/yh4HylUG8NLNWk/ApY7GTgSsJK1eqG2mYalORRgQuZqp87rNndHMLEKmWvghiJuzn/wimebolRIpwhT", + "TGjj02xGKWApZMsshaxqm9BjMFOp3EUIi43+iNYRF9qYlOCEySte/nAFWotibOPc6aCSjnGJiODo8H0T", + "JozmTh0OIEyrw+H7zNaMHjdzFzgVoaJwTWO5LLgu4uZCshy0u/fZNd+Zm3uUGufAIZ8Sj6SZbtaAyLuE", + "pE2AlDvvFL6lv6cBkN+h42eCwwbjghPOGjLtWDXinxnC8Kdw2Gz4NivVCl8RjhwIn5sWPXykAiqJZnCS", + "z6atO8xjxK+wfxpMy+8ZkVU465Qp9p/7H3ArUY38UQq79+STjbL/rJPibulgBqTKVRv8T8QyPI+pl7g+", + "+Ur8GjcIm+GpSqA9iDYRRvxDXbv4yC5iGIR/xh0bwaeXO+tGWqTe+5JlIEOLgdkT3g+mDWXnuQ/PGprS", + "BqYGQsrcv5Y+0tJG9vlwL42AR7Xp/VnvTtuEzLhxjqkRt/99dFapKsunxHxS5Y7Cuwk8pF0YR+gjcgKM", + "rLsJjzFNLZtO3qNOUZtjy+SNFtU55O2q8n1K/5iZaISjd10Qaom8jCq3o3ULX/I0xpR5/41Z1wzWMAnG", + "mYa81mgmvua7w2XHRjJGX/zt7LNHj39+/NnnzDVghViBabOO98p2tXGBQvbtPh83EnCwPJvehJB9gBAX", + "/I/hUVWzKf6sEbc1bUrRQdGyY+zLiQsgcRwT5aJutFc4Thva/8fartQi73zHUij47fdMq7JMV31o5KqE", + "AyW1W5ELxWkgFWgjjHWMsOsBFbaNiDZrNA9i7t8ryiajZA7BfuypQNiRkKvUQsYCapGf4dtu7zVisK1K", + "z6vI07NvXV5PIwsdCo0YFbMAVqnKi/ZiyVIQ4QsiHb2s9YZPtIhHMbINs6Vo2RQh+sjzNOnFBbP3c/tu", + "MVeb5vRuExPiRTiUNyDNMf/EeN6Cm3CS1rT/h+EfiUQMd8Y1muX+FrwiqR/crCj/JNCGj/IT5IEAjLy2", + "7byTjB6KRYmINXkJ0J8QHMh98eO71rF88FkIQhI6HAAvfj7btmteMnhwfueMvt81SImW8m6MEjrLP/Qi", + "N7De5iKJtsgbTawFQ2xJDcXC6Lm1eda8Yh7RSgaPnbVSljnNtCwTj6TJjoNnKiYcpxLoK15+fK7xtdDG", + "niE+oHg1/jQqfikbI5lQaW6Wp+8FnzR39Cr27qaWL/Fh9t/B7VHynvNDeSf84DZD4w5WrF+FW4HeerNr", + "HJOCrB59zha+2EalIRem79y/DsJJ8zAUtFj6gFbY2gMvUQ+t8ydlb0HGyxCJw76P3FuNz95D2B7R35mp", + "jJzcJJWnqG9AFgn8pXhUXJz3wHVxy8IMN0v7EiVwOzLty7Ds8NTlUWoTd+nUBobrnHxbd3CbuKjbtU3N", + "WTS5vsPbt2/sYkqqoXQtBtcdcx3dSVGGo0oy/AZZjghHfgw/b4pifhrLe0u5XUdyc/f2oxblwYCVTqb1", + "D/PZCiQYYTCX+M++dszHvUsDBJR5YXhUCdbbpIshxCTW2pk8mirKoT4hfbrvlsh5ja8a81oLu8O6wcGA", + "Jn5O5mP6psnt4XPDNL40f/dZdQlN7fY2E0htwu36jeIl3kfk4pPuFlLlCfuKMnz7g/LXe4v/gE//8qR4", + "+Omj/1j85eFnD3N48tkXDx/yL57wR198+gge/+WzJw/h0fLzLxaPi8dPHi+ePH7y+Wdf5J8+ebR48vkX", + "/3HP8SEHMgEaUvs/nf2v7Kxcqezs5Xn22gHb4oRX4ltwe4O68lJhXUuH1BxPImy4KGdPw0//I5ywk1xt", + "2uHDrzNfn2m2trYyT09Pr6+vT+Iupyt8+p9ZVefr0zAPVhvsyCsvz5sYfYrDwR1trce4qZ4UzvDbq68u", + "XrOzl+cnLcHMns4enjw8eeRLW0teidnT2af4E56eNe77KebXPDU+df5pVVHy/A/z2amnQ//XGniJSXTc", + "HxuwWuThkwZe7Pz/zTVfrUCf4AsN+unq8WmQOE7f++wIH/Z9O42jP07fd5JIFAd6huiGQ01O34fyuPsH", + "7JRG9XFlUYeJgO5rdrrAkjhTm0K8uvGloKpiTt+jsD36+6m3mKQ/otJDp+k0JGMZaUnP7tMfOyh8b7du", + "IfuHc22i8XJu83Vdnb7H/+DBiFZEWTxP7VaeopP49H0HEf7zABHd39vucYurjSogAKeWS6opvO/z6Xv6", + "N5oIthVo4SROzJzjf6UMZ6dYWm43/HknvUuzhFRemh+lAdKIQ1WBnczb520NrzgvQuOLncyDaBziHpED", + "PH74kKZ/gv+Z+dJLvewtp/48z0xTa36vYaaTNxP5a88m18BLj/jAnswQhkcfD4ZzSbGOjuHSxfBhPvvs", + "Y2LhXDoZhpcMW9L0n37ETQB9JXJgr2FTKc21KHfsR9mEa0aFcFMUeCnVtQyQO6mi3my43qG0vlFXYJiv", + "sRsRJ9Pg5CMK6UA3f0vDeK1xx0fezKp6UYp8Nqcsqe9QIrMp4SQYioYzBSNZO3j3VHxz8ExM34WuzLsn", + "Lc0kOA8kLKDhhwL7cH/D3vfdrDTVvdQGzf7FCP7FCO6QEdhay9EjGt1fmFsNKv+MNef5Gvbxg+FtGV3w", + "s0qlkkdc7GEWvoLJGK+46PKKNpxw9vTNtAJ/3rNBRusCjDvMJ0FhcdJ4q0/ohiOFM49+1Wiv99Uu//Du", + "D3G/P+MynOfOjpPrkutSgG6ogMthUZl/cYH/Z7gAVcfitK9zZqEsTXz2rcKzT14enzJTkvdtIh/oZDht", + "henOz6fBNpHSQbst33f+7OpVZl3bQl1Hs6BVn1xSQy3DfaxN/+/Tay5stlTaJ9bkSwt62NkCL099FZ3e", + "r23i+sEXzMYf/Rg/GU3+esq9upH6hrxurONAH0599SrfSKMQ6Rw+t5a12FKFfLaxUb1557gcFlr3LLg1", + "vDw9PcWnL2tl7Onsw/x9zygTf3zXEFaoDzqrtLjCOgbv5rNtprRYCcnLzFs12lJgs8cnD2cf/m8AAAD/", + "/xb9Ejq6CAEA", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/experimental/routes.go b/daemon/algod/api/server/v2/generated/experimental/routes.go index f78cbd49a6..5d9044ed56 100644 --- a/daemon/algod/api/server/v2/generated/experimental/routes.go +++ b/daemon/algod/api/server/v2/generated/experimental/routes.go @@ -130,230 +130,231 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9f5PbtpLgV0Fpt8qxT5yxHSf74qtXexM7yZuLnbg8TvZ2bV8CkS0JbyiADwBnpPj8", - "3a/QDZAgCUrUzMTJq9q/7BHxo9FoNBr988MsV5tKSZDWzJ5+mFVc8w1Y0PgXz3NVS5uJwv1VgMm1qKxQ", - "cvY0fGPGaiFXs/lMuF8rbtez+UzyDbRtXP/5TMM/aqGhmD21uob5zORr2HA3sN1VrnUz0jZbqcwPcUZD", - "nD+ffdzzgReFBmOGUP4oyx0TMi/rApjVXBqeu0+GXQu7ZnYtDPOdmZBMSWBqyey605gtBZSFOQmL/EcN", - "ehet0k8+vqSPLYiZViUM4XymNgshIUAFDVDNhjCrWAFLbLTmlrkZHKyhoVXMANf5mi2VPgAqARHDC7Le", - "zJ6+nRmQBWjcrRzEFf53qQF+g8xyvQI7ez9PLW5pQWdWbBJLO/fY12Dq0hqGbXGNK3EFkrleJ+xlbSxb", - "AOOSvf72Gfv888+/cgvZcGuh8EQ2uqp29nhN1H32dFZwC+HzkNZ4uVKayyJr2r/+9hnOf+EXOLUVNwbS", - "h+XMfWHnz8cWEDomSEhICyvchw71ux6JQ9H+vICl0jBxT6jxnW5KPP8fuis5t/m6UkLaxL4w/Mroc5KH", - "Rd338bAGgE77ymFKu0HfPsy+ev/h0fzRw4//8vYs+y//5xeff5y4/GfNuAcwkGyY11qDzHfZSgPH07Lm", - "coiP154ezFrVZcHW/Ao3n2+Q1fu+zPUl1nnFy9rRici1OitXyjDuyaiAJa9Ly8LErJalY1NuNE/tTBhW", - "aXUlCijmjvter0W+Zjk3NAS2Y9eiLB0N1gaKMVpLr27PYfoYo8TBdSN84IL+vMho13UAE7BFbpDlpTKQ", - "WXXgego3DpcFiy+U9q4yx11W7M0aGE7uPtBli7iTjqbLcscs7mvBuGGchatpzsSS7VTNrnFzSnGJ/f1q", - "HNY2zCENN6dzj7rDO4a+ATISyFsoVQKXiLxw7oYok0uxqjUYdr0Gu/Z3ngZTKWmAqcXfIbdu2//3xY8/", - "MKXZSzCGr+AVzy8ZyFwVUJyw8yWTykak4WkJceh6jq3Dw5W65P9ulKOJjVlVPL9M3+il2IjEql7yrdjU", - "GybrzQK029JwhVjFNNhayzGAaMQDpLjh2+Gkb3Qtc9z/dtqOLOeoTZiq5DtE2IZv//pw7sExjJclq0AW", - "Qq6Y3cpROc7NfRi8TKtaFhPEHOv2NLpYTQW5WAooWDPKHkj8NIfgEfI4eFrhKwInDDIKTjPLAXAkbBM0", - "4063+8IqvoKIZE7YT5654VerLkE2hM4WO/xUabgSqjZNpxEYcer9ErhUFrJKw1IkaOzCo8MxGGrjOfDG", - "y0C5kpYLCYVjzgi0skDMahSmaML9753hLb7gBr58MnbHt18n7v5S9Xd9745P2m1slNGRTFyd7qs/sGnJ", - "qtN/wvswntuIVUY/DzZSrN6422YpSryJ/u72L6ChNsgEOogId5MRK8ltreHpO/nA/cUydmG5LLgu3C8b", - "+ullXVpxIVbup5J+eqFWIr8QqxFkNrAmH1zYbUP/uPHS7Nhuk++KF0pd1lW8oLzzcF3s2PnzsU2mMY8l", - "zLPmtRs/PN5sw2Pk2B5222zkCJCjuKu4a3gJOw0OWp4v8Z/tEumJL/Vv7p+qKl1vWy1TqHV07K9kVB94", - "tcJZVZUi5w6Jr/1n99UxAaCHBG9bnOKF+vRDBGKlVQXaChqUV1VWqpyXmbHc4kj/qmE5ezr7l9NW/3JK", - "3c1pNPkL1+sCOzmRlcSgjFfVEWO8cqKP2cMsHIPGT8gmiO2h0CQkbaIjJeFYcAlXXNqT9snS4QfNAX7r", - "Z2rxTdIO4bv3BBtFOKOGCzAkAVPDe4ZFqGeIVoZoRYF0VapF88NnZ1XVYhC/n1UV4QOlRxAomMFWGGvu", - "4/J5e5Liec6fn7Dv4rFRFFey3LnLgUQNdzcs/a3lb7FGt+TX0I54zzDcTqVP3NYENDgx/y4oDp8Va1U6", - "qecgrbjGf/NtYzJzv0/q/M9BYjFux4kLH1oec/TGwV+ix81nPcoZEo5X95yws37fm5GNG2UPwZjzFot3", - "TTz4i7CwMQcpIYIooia/PVxrvpt5ITFDYW9IJj8ZIAqp+EpIhHbunk+Sbfgl7YdCvDtCANO8i4iWSIJs", - "VKhe5vSoPxnoWf4JqDW1sUESdZJqKYzFdzU2ZmsoUXDmMhB0TCo3oowJG75nEQ3M15pXRMv+C4ldQuJ7", - "nhoRrLe8eCfeiUmYI3YfbTRCdWO2fJB1JiFBrtGD4etS5Zd/42Z9Byd8EcYa0j5Ow9bAC9Bszc06cXB6", - "tN2ONoW+XUOkWbaIpjpplvhCrcwdLLFUx7CuqnrGy9JNPWRZvdXiwJMOclky15jBRqDC3D8cScNO7y/2", - "Dc/XTixgOS/LeasqUlVWwhWU7tEupAQ9Z3bNbXv4ceTwrsFzZMAxOwssWo1XM6GKTTe6CA1sw/EG2rjX", - "TFV2+zQc1PAN9KQgvBFVjVqE6KFx/jysDq5AIk9qhkbwmzWitiYe/MTN7T/hzFLR4kgDaIP5rsFfwy86", - "QLvW7X0q2ymULkhnbd1vQrNcaRqCbng/ufsPcN12Jur8rNKQ+SE0vwJteOlW11vU/YZ87+p0HjiZBbc8", - "OpmeCtMPMOIc2A/FO9AJLc2P+B9eMvfZSTGOklrqESiMqMicWtDF7FBFM7kGqG9VbEOqTFbx/PIoKJ+1", - "k6fZzKST9w1pT/0W+kU0O/RmKwpzV9uEg43tVfeEkO4qsKOBLLKX6URzTUHAG1UxYh89EIhT4GiEELW9", - "82vta7VNwfS12g6uNLWFO9kJN85kZv+12j73kCl9GPM49hSkuwVKvgGDt5uMGaebpbXLnS2Uvpk00btg", - "JGutjYy7USNhat5DEjatq8yfzYTFghr0BmodPPYLAf3hUxjrYOHC8t8BC8aNehdY6A5011hQm0qUcAek", - "v04KcQtu4PPH7OJvZ188evzL4y++dCRZabXSfMMWOwuGfebVcszYXQn3k68jlC7So3/5JNiouuOmxjGq", - "1jlseDUcimxf9PqlZsy1G2Kti2ZcdQPgJI4I7mojtDMy6zrQnsOiXl2Ate6l+0qr5Z1zw8EMKeiw0atK", - "O8HCdO2EXlo6LVyTU9hazU8rbAmyID8Dtw5h3Btws7gTohrb+KKdpWAeowUcPBTHblM7zS7eKr3T9V2o", - "N0BrpZNXcKWVVbkqMyfnCZVQULzyLZhvEbar6v9O0LJrbpibG62XtSxG9BB2K6ffXzT0m61scbP3BqP1", - "Jlbn552yL13kt6+QCnRmt5IhdXbUI0utNoyzAjuirPEdWJK/xAYuLN9UPy6Xd6PtVDhQQo8jNmDcTIxa", - "OOnHQK4kOfMdUNn4Uaegp4+YYGWy4wB4jFzsZI6msrs4tuParI2QaLc3O5lHqi0HYwnFqkOWt1dhjaGD", - "prpnEuA4dLzAz6irfw6l5d8q/aYVX7/Tqq7unD3355y6HO4X460Bhesb1MBCrsquA+nKwX6SWuMfsqBn", - "jRKB1oDQI0W+EKu1jd6Lr7T6He7E5CwpQPEDKYtK12eoMvpBFY6Z2NrcgSjZDtZyOEe3MV/jC1VbxplU", - "BeDm1yYtZI64HKKvE7po2VhuRf2EMGwBjrpyXrvV1hVDB6TBfdF2zHhOJzRD1JgR94vGb4Za0XTkzlZq", - "4MWOLQAkUwvv4+C9L3CRHL2nbBDTvIib4BcduCqtcjAGisyrog+CFtrR1WH34AkBR4CbWZhRbMn1rYG9", - "vDoI5yXsMvT1M+yz73829/8AeK2yvDyAWGyTQm9fnzaEetr0+wiuP3lMdqSpI6p14q1jECVYGEPhUTgZ", - "3b8+RINdvD1arkCjS8nvSvFhktsRUAPq70zvt4W2rkY82P0z3Ul4bsMklyoIVqnBSm5sdogtu0YdXYJb", - "QcQJU5wYBx4RvF5wY8kNSsgCdZp0neA8JIS5KcYBHn2GuJF/Di+Q4di5uwelqU3zHDF1VSltoUitAS2y", - "o3P9ANtmLrWMxm7ePFax2sChkcewFI3vkeVfwPgHt4391Vt0h4tDm7q753dJVHaAaBGxD5CL0CrCbuzF", - "OwKIMC2iiXCE6VFO4zo8nxmrqspxC5vVsuk3hqYLan1mf2rbDomLjBx0bxcKDBpQfHsP+TVhlvy319ww", - "D0cwsaM6h/y1hjC7w5gZIXPI9lE+PvFcq/gIHDykdbXSvICsgJLvEs4B9JnR530D4I63z11lISNH3PSm", - "t5Qc/B73DK1wPJMSHhl+Ybk7gu4p0BKI731g5AJw7BRz8nR0rxkK50puURgPl01bnRgRb8MrZd2Oe3pA", - "kD1HnwLwCB6aoW+OCuyctW/P/hT/CcZP0MgRx0+yAzO2hHb8oxYwogv2MU7Reemx9x4HTrLNUTZ2gI+M", - "HdkRxfQrrq3IRYVvne9hd+dPv/4EScM5K8ByUULBog/0DKzi/oxcSPtj3uwpOEn3NgR/oHxLLCe46XSB", - "v4QdvrlfUWxCpOq4i7dsYlR3P3HJENDg8exE8LgJbHluy50T1OwaduwaNDBTL8iFYWhPsarK4gGS9pk9", - "M3rrbNI2utdcfIFDRctL+ZrRm2A/fG96D4MOOvxboFKqnKAhGyAjCcEk3xFWKbfrwoc/hQCYQEkdID3T", - "RtN8c/3fMx004wrYf6qa5Vzik6u20Mg0SqOggAKkm8GJYM2c3jmxxRCUsAF6SeKXBw/6C3/wwO+5MGwJ", - "1yFm0DXso+PBA9TjvFLGdg7XHehD3XE7T1wfaLhyF59/hfR5ymGPJz/ylJ181Ru8sXa5M2WMJ1y3/Fsz", - "gN7J3E5Ze0wj07y9cNxJtpyuf9Bg3bjvF2JTl9zehdUKrniZqSvQWhRwkJP7iYWS31zx8semG8ZDQu5o", - "NIcsxyi+iWPBG9eHAv/cOEIKd4DJ6X8qQHBOvS6o04EnZuupKjYbKAS3UO5YpSEHindzkqNplnrCyBM+", - "X3O5wgeDVvXKO7fSOMjwa0OqGV3LwRBJocpuZYZK7tQF4N3UQsijE6eAuyddX0NOD5hr3szno1yn3MzR", - "HvQtBkkj2Xw2+uJ1SL1qX7yEnG7c5oTLoCPvRfhpJ55oSkHUOdlniK94W9xhcpv7+6js26FTUA4njjx+", - "249jTr/uuV3u7kDooYGYhkqDwSsqVlMZ+qqWcYx2cBXcGQuboSafuv4ycvxej74XlSyFhGyjJOySaUmE", - "hJf4MXmc8Joc6YwCy1jf/hukA38PrO48U6jxtvjF3e6f0L7Fynyr9F2ZRGnAyeL9BAvkQXO7n/KmdlJe", - "lgnToo/g7DMAM2+cdYVm3BiVC5TZzgsz917BZI304Z5d9L9q4lLu4Oz1x+3Z0OLkAKgjhrJinOWlQA2y", - "ksbqOrfvJEcdVbTUhBNXeIyPay2fhSZpNWlCi+mHeic5OvA1mqukw8YSEmqabwGC8tLUqxUY23vrLAHe", - "Sd9KSFZLYXGujTsuGZ2XCjR6Up1Qyw3fsaWjCavYb6AVW9S2K/1jgLKxoiy9Qc9Nw9TyneSWlcCNZS+F", - "fLPF4YLRPxxZCfZa6csGC+nbfQUSjDBZ2tnsO/qKfv1++Wvv44/u7vQ5OJ22GRNmbpmdJCn/97N/f/r2", - "LPsvnv32MPvqf5y+//Dk4/0Hgx8ff/zrX/9f96fPP/71/r//a2qnAuyp8FkP+flz/zI+f47Pn8hVvw/7", - "J9P/b4TMkkQWe3P0aIt9hqkiPAHd7yrH7BreSbuVjpCueCkKx1tuQg79G2ZwFul09KimsxE9ZVhY65GP", - "iltwGZZgMj3WeGMpauifmQ5UR6Okjz3H87KsJW1lkL4pDjP4l6nlvElGQHnKnjKMVF/z4OTp/3z8xZez", - "eRth3nyfzWf+6/sEJYtim8ojUMA29VaMgyTuGVbxnQGb5h4Ie9KVjnw74mE3sFmANmtRfXpOYaxYpDlc", - "CFnyOqetPJfk4O/OD5o4d95yopafHm6rAQqo7DqVv6gjqGGrdjcBem4nlVZXIOdMnMBJX+dTuPeid+or", - "gS+DY6pWasprqDkHRGiBKiKsxwuZpFhJ0U8vvMFf/ubOn0N+4BRc/TlTHr33vvvmDTv1DNPco5QWNHSU", - "hCDxlPbBkx2HJMfN4piyd/KdfA5L1D4o+fSdLLjlpwtuRG5OawP6a15ymcPJSrGnIR7zObf8nRxIWqOJ", - "FaOgaVbVi1Lk7DJ+kLTkScmyhiO8e/eWlyv17t37gW/G8Pngp0ryF5ogc4Kwqm3mU/1kGq65Ttm+TJPq", - "BUemXF77ZiUhW9WkIA2phPz4aZ7Hq8r0Uz4Ml19VpVt+RIbGJzRwW8aMVU08mhNQfEiv298flL8YNL8O", - "epXagGG/bnj1Vkj7nmXv6ocPP8fIvjYHwq/+ync0uatgsnZlNCVFX6mCC6dnJfqqZxVfpUxs7969tcAr", - "3H2Ulzeo4yhLht06UYchwACHahfQhDiPbgDBcXRwMC7ugnqFtI7pJeAn3MJuAPat9iuKn7/xdh2Iwee1", - "XWfubCdXZRyJh51psr2tnJAVvDGMWOFr1SfGWwDL15Bf+oxlsKnsbt7pHhx+vKAZWIcwlMuOIgwxmxIa", - "KBbA6qrgXhTnctdPa2MoogIHfQ2XsHuj2mRMx+Sx6aZVMWMHFSk1ki4dscbH1o/R33zvVRYCTX12Egze", - "DGTxtKGL0Gf8IJPIeweHOEUUnbQfY4jgOoEIIv4RFNxgoW68W5F+anlC5iCtuIIMSrESi1Qa3v8Y2sMC", - "rI4qfeZB74XcDGiYWDL3lF/Qxeqf95rLFbjr2V2pyvCSsqomnTbwPbQGru0CuN2r55dxQooAHT4przHy", - "GjV8c7cE2Lr9FhY1dhKu3asCFUXUxnsvn4z7nxHgUNwQntC9fSmcjL51PeoSGQfDrdxgt3nWete8mM4Q", - "Lvq+AUxZqq7dvjgolM+2SUldovulNnwFI2+X2Ho3MR9Gx+KHgxySSJIyiFr2RY2BJJAEmRpnbs3JMwzu", - "izvE+MzsOWSGmchA7G1GmETbI2xRogDbeK7S3nPdsaJSVuAx0NKsBbRsRcEARhcj8XFccxOOI+ZLDVx2", - "knT2O6Z92Zea7jzyJYySojaJ58Jt2Oegg3e/T1AXstKFVHTxo39CWjn39sLwhdR2KImiaQElrGjh1DgQ", - "Spswqd0gB8ePyyXylizllhgpqCMBwM8B7uXygDGyjbDJI6TIOAIbHR9wYPaDis+mXB0DpPQJn3gYG6+I", - "6G9IB/aRo74TRlXlLlcxYm/MAwfwqShayaLnUY3DMCHnzLG5K146Nuff4u0ggwxp+KDo5UPzrjf3xx4a", - "e0xTdOUftSYSEm6ymliaDUCnRe09EC/UNqMI5eRbZLFdOHpPxi5gvHTqYFIuunuGLdQW3bnwaiFf+QOw", - "jMMRwIh0L1thkF6x35icRcDsm3a/nJuiQoMk4xWtDbmMCXpTph6RLcfI5bMovdyNAOipodpaDV4tcVB9", - "0BVPhpd5e6vN27SpISwsdfzHjlByl0bwN9SPdRPC/a1N/DeeXCycqE+SCW+oWbpNhkLqXFHWwWMSFPbJ", - "oQPEHqy+6suBSbR2fb26eI2wlmIljvkOjZJDtBkoAR/BWUc0zS5TngLuLQ94j1+EbpGyDnePy939yIFQ", - "w0oYC63RKPgF/RHqeI7pk5Vajq/OVnrp1vdaqebyJ7M5duws85OvAD3wl0Ibm6HFLbkE1+hbg0qkb13T", - "tATadVGkYgOiSHNcnPYSdlkhyjpNr37e75+7aX9oLhpTL/AWE5IctBZYHCPpuLxnavJt37vgF7TgF/zO", - "1jvtNLimbmLtyKU7xz/JuegxsH3sIEGAKeIY7tooSvcwyCjgfMgdI2k08mk52WdtGBymIox90EsthL2P", - "3fw0UnItURrAdISgWq2gCOnNgj1MRknkSiVXURWnqtqXM++EUeo6zDy3J2mdd8OHMSf8SNzPhCxgm4Y+", - "fhUg5G1kHSbcw0lWICldSVotlERN7OKPLSJd3Se2hfYDAJJO0G96xuzWO5l2qdlO3IASeOHfJAbC+vYf", - "y+GGeNTNx9ynO5lP9x8hHBBpStiosMkwDcEIA+ZVJYptz/BEo44qwfhR2uURaQtZix/sAAa6TtBJguuk", - "0vau1l7Bfopv3lP3KiPfa+9Y7Oib5z4Av6g1WjA6ns3DvO3NW23i2r//+cIqzVfgrVAZgXSrIXA5x6Ah", - "yopumBXkTlKI5RJi64u5ieWgA9xAx15MIN0EkaVNNLWQ9ssnKTI6QD0tjIdRlqaYBC2M2eTfDK1cQaaP", - "VEnNlRBtzQ1MVclw/e9hl/3My9o9MoQ2rXuuNzt1L98jdv1q8z3scOSDXq8OsAO7gpqn14A0mNL0N59M", - "lMD6numk+MfnZWcLj9ips/Qu3dHW+KIM48Tf3jKdogXdpdzmYLROEg6WKbtxkfZNcKcHuojvk/KhTRDF", - "YRkkkvfjqYQJJSyHV1GTi+IQ7b4BXgbixeXMPs5nt/MESN1mfsQDuH7VXKBJPKOnKVmGO449R6KcV5VW", - "V7zMvL/E2OWv1ZW//LF5cK/4xC+ZNGW/+ebsxSsP/sf5LC+B66zRBIyuCttV/zSrojIO+68SyvbtFZ2k", - "KYo2v8nIHPtYXGNm756yaVAUpfWfiY6i97lYph3eD/I+7+pDS9zj8gNV4/HT2jzJ4afr5MOvuCiDsTFA", - "O+KcjoubVlknyRXiAW7tLBT5fGV3ym4Gpzt9OlrqOsCTcK4fMTVl+sUhfeJKZEXe+YffufT0rdId5u8j", - "E5POQ7+fWOWEbMLjiK92qF/ZF6ZOGAlev65+dafxwYP4qD14MGe/lv5DBCD+vvC/4/viwYOk9TCpxnJM", - "ArVUkm/gfhNlMboRn/YBLuF62gV9drVpJEs1ToYNhZIXUED3tcfetRYen4X/pYAS3E8nUx7p8aYTumNg", - "ppygi7FIxMbJdEMlMw1Tsu9TjUGwjrSQ2fuSDGSMHR4hWW/QgJmZUuRp1w65MI69SnKmdI0ZNh7R1roR", - "azHimytrEY3lmk3JmdoDMpojiUyTTNva4m6h/PGupfhHDUwU7lWzFKDxXutddeFxgKMOBNK0XswPTHaq", - "dvjb6EH22JuCLmifEmSv/e55Y1MKC00V/TnSAzyeccC493hve/rw1EzRbOuuC+a0d8yU0umB0Xlj3cgc", - "yVLowmRLrX6DtCEE7UeJRBjB8ClQzfsbyJTnXp+lNEbltqJ7O/uh7Z7+Nh7b+Fu/hcOim6pjN7lM06f6", - "uI28yaPXpNM1eySPPcJiD4NuaMAIa8HjFTnDYhmU4H3EJZ0nygLRiTBLn8o4lvOUxm9PpYd5EP9a8usF", - "T9WIcW8hB1O0vR0/KatY6Bw2wDQ5Dmh2FnlwN20FZZKrQLc2iGFW2hu+a2jayS+a9gGDFBU/XebkplAa", - "lRimltdcUhVx14/4le9tgEzwrte10pgH0qRdugrIxSapjn337m2RD913CrESVCC7NhBVYPYDMUo2iVTk", - "q1g3mTs8as6X7OE8KgPvd6MQV8KIRQnY4hG1WHCD12VjDm+6uOWBtGuDzR9PaL6uZaGhsGtDiDWKNW9P", - "FPIax8QF2GsAyR5iu0dfsc/QJdOIK7jvsOiFoNnTR1+hQw398TB1y/oC5/tYdoE8Ozhrp+kYfVJpDMck", - "/ahp7+ulBvgNxm+HPaeJuk45S9jSXyiHz9KGS76CdHzG5gBM1Bd3E835PbxIsgaAsVrtmLDp+cFyx59G", - "Yr4d+yMwWK42G2E33nHPqI2jp7a8Mk0ahqNa/75eVIArfET/1yq4//V0XZ/4GcM3IzFb6KX8A9poY7TO", - "Gafkn6VoPdNDvU52HnILYwGtpm4W4cbN5ZaOsiQ6qi9ZpYW0qP+o7TL7i3sWa5479ncyBm62+PJJohBV", - "t1aLPA7wT453DQb0VRr1eoTsg8zi+7LPpJLZxnGU4n6bYyE6laOOummXzDG/0P1DT5V83SjZKLnVHXLj", - "Eae+FeHJPQPekhSb9RxFj0ev7JNTZq3T5MFrt0M/vX7hpYyN0qmCAe1x9xKHBqsFXGHEXHqT3Ji33Atd", - "TtqF20D/x/o/BZEzEsvCWU4+BCKL5r5geSfF//yyzXyOhlWKROzpAJVOaDu93u4Texsep3Xr22/JYQy/", - "jWBuMtpwlCFWRrzvyb2+6fNH+Av1QaI97ygcH/3KtHuDoxz/4AEC/eDB3IvBvz7ufib2/uBBOgFxUuXm", - "fm2xcJsXMfZN7eHXKqEAC1ULG4cinx8hoYAcu6TcB8cEF36oOetWiPv0UsTdxHelvU3Tp+Ddu7f4JeAB", - "/+gj4g9mlriBbZTC+GHvVshMkkzRfI/83Dn7Wm2nEk7vDgrE8ydA0QhKJqrncCWDCqBJc/1Bf5GIRt2o", - "CyiVe2TGRYFiff4/D57d4ud7sF2Lsvi5ze3Wu0g0l/k66SW8cB1/IRm9cwUTq0zWGVlzKaFMDkdv21/C", - "GzjxSv+7mjrPRsiJbfsVaGm5vcW1gHfBDECFCR16hS3dBDFWu2mzmrQM5UoVDOdpi1q0zHFYyjlVQjMR", - "34zDbmrr/VYxFtwnHFqKEt0w03ZjbJlpbkcSaGG981BfyI2D5ccNqRlodNCMiw1ezIZvqhLwZF6B5ivs", - "qiT0umMKNRw5qljBTOU+YUtMWKGYrbVkarmMlgHSCg3lbs4qbgwN8tAtC7Y49+zpo4cPk2ovxM6ElRIW", - "wzJ/bJfy6BSb0BdfZIlKARwF7GFYP7YUdczGDgnH15T8Rw3GpngqfqDIVbSSulub6kk2tU9P2HeY+cgR", - "cSfVPaorQxLhbkLNuioVL+aY3PjNN2cvGM1KfaiEPNWzXKG2rkv+SfPK9ASjIbPTSOac6ePsT+XhVm1s", - "1pSfTOUmdC3aApmi53ODerwYOyfsOalQmwL+NAnDFNl6A0VU7ZIe8Ugc7j/W8nyNusmOBDTOK6cXYg3s", - "rLXcRNGHTfUjZNgObl+LlUqxzpmya9DXwgBG5MMVdNMhNrlBvW48pEfsLk/XUhKlnBwhjDa1jo5FewCO", - "JNngVJCErIf4IzVTVI/52Lq0F9grHYvRK3Lbs/qH5HohxTZ76Y0LOZdKihxLIaQkaUzdNs1MOaFqRNq+", - "aGb+hCYOV7K0bhML7LE4Wmw3MEKPuKHJP/rqNpWog/60sPUl11ZgjedsUMxDpWtvEBPSgK9m5Ygo5pNK", - "J5yakoEQjQPFkWSEWZlGNJzfum8/eP03JsW4FBI1XR5t/n1GJqvSCLRMSyYsWykwfj3daB7z1vU5wSyN", - "BWzfn7xQK5FfiBWOQW50btnkMzoc6ix4kHqPTdf2mWvrc+c3P3fcwWjSs6ryk47XQU8KknYrRxGc8lsK", - "jiQRcpvx49H2kNte12+8Tx2hwRV6rUGF9/CAMJpa2t1RvnFvS6IobMEoojKZQFfIBBgvhAwm1PQFkSev", - "BNwYPK8j/UyuuaW3wySe9gZ4ORIAgRHKZIO/7VD9ygEOJbjGMMf4NrZlwEcYR9Oglfi53LFwKBx1R8LE", - "M142rtOJot4oVXkhqsDgol6Z7xTjcIw7CyGTHXQdDN9rumM1jmNvorEchYu6WIHNeFGkUlt9jV8Zfg1B", - "YrCFvG6KUDXRgd0c5UNq8xPlSpp6s2eu0OCW00V18xPUENfuDzuMmXYWO/w3VYFpfGe80/TRUbnBQ7o4", - "LjH/MMo4JfU6ms6MWGXTMYF3yu3R0U59M0Jv+98ppYdw3T9FNG6Py8V7lOJv37iLI07cO/BPp6ulyauL", - "vuAKv4eER01GyC5XwqtsUGcMvR5w8xJb1gM+NEwCfsXLkUj42FZC9yvZD8bi4fPR9A3c+vRclrO9LGg0", - "5RH5CvesL0MT4ph/MLkH353Vwq91L0LHbXffdyx15CPWMotRC93NjGjtBh9rRfv+aixFQqjTgd/jeiDe", - "i2fu08DDlVB18L4KPtDhSUi/+hQ8nbofI+tPRhb80VaLURvLG1+/lpbp3+Tf/0xWWAbS6t2fwOIy2PR+", - "UZmEtEvqqbYJa0ofTiqF2LkVp9SwSZVL8bJh0JURa+nQ0qD8zICsnk8RBwb4+DifnRdHXZipkjszGiV1", - "7F6I1dpixv6/AS9AvzpQkaCtQoBHrFJGtBVISzeYTwG7xuFOpgYbOAIWcUWF4VjBCfUKcotlZ1vnOg1w", - "TH0FN1kw+vx3ZYLx53QTk+ELEuyrQjCsNXvgjh8kToqSf1GdzpPpOffPGhdqigC75qZN19KLmZ4cublc", - "Qo5ZkfcmqvqPNcgoCdI86GUQlmWUt0o0cUyY1/t4rWML0L48Unvhierr3BqcsTj2S9jdM6xDDcnCoU0Q", - "300SByMGyAQWckiPKZK915gwDWUgFoJLsE/F3BbHGM35HKVdu+FcgSTdxdGmYtszZbro+aS5XNej0j5i", - "SM5YLqthzeTx98dzLFFtvIMcbxIPx690dj4snHPtExdjWrHGdhJSGIMJv4UcgjRLKS59/QDEClmqrrku", - "Qos7SQpFd5NIA71sZhZtAMfQySFRigFjofJSOTEiGwso68ZMNA6H9wx5hrYJfBCuJWgNRWMSKZWBzKoQ", - "8LEPjn2oIPfXGyHBjJY/IuBGU1+/bnN7Yxk4jqmuufd6jRfINGy4g05HGbjH59yH7Gf0PQThhzJgBzVM", - "Db0erkcbQneEGSAxpvol87fl4eD+myibhJSgs2B56qfjlt2MbJh3s6hzuqDjg9Eo5CbnztnDSpJ6mny4", - "yt4bIQqSv4TdKT2CQiHfsIMx0CQ5EehRwtHeJt+p+s2k4F7dCXh/bB65SqkyGzF2nA9ziPcp/lLkl4A5", - "ABsX95Ea7ewz1LE31uzr9S7kzK4qkFDcP2HsTFJQUTBsd8sL9iaX9+y++bc4a1FTWn+vVDt5J9PRGZhw", - "X9+Sm4Vh9vMwA47V3XIqGuRAhuqtHHO5ucbk/N0qnidTX+VDU3O/inxLVARFSia5IIvVMzzoKcURpkCI", - "cnWgIZMzb+liplQpX96bpGlwQ6UxFU+GAFmQU7IFNFD4wZMISNZFT5xCSn3nk96pJdPQGpFvmv1vWMI9", - "9aLvz9zM0uV3S6WhU4zd9aZMn03gC6bRxP8shNVc726So29QQn6gPRnF8kF3rMYTq11I6401xGFZqusM", - "mVXW1LlIPW1dO9O9jEPRtbafO9ULiPy6uPGC2o6tecFypTXkcY90vCdBtVEaslKhm1fKAr20Tu7eYJCX", - "ZKVaMVXlqgCqF5OmoLG5aik5ik0QedUkUUC0g9HC1Cei44lTujuV7EgZilqrI2rn50CR621WJ1p0RrbM", - "EY9lMD6Lk8cQNR7Cu6f2/1GVWs7RjfFKoK9LN2ifpM/K3TFNJoP4zF3EaYaYXWtVr9ZRQmd2LcoyKAzc", - "NujaP0DjUX4yNbojYcSWm+IJ2yhj/cuORjLNUK2L12e5klarsuwqgUgkXnnN9ku+Pctz+0KpywXPL+/j", - "O1Iq26y0mId45r4zXjuT7qXy6l54GZUPP5wal9qha5onkskMqcdSji6kHoH5/jDHOqzjPhsurL+uLvNK", - "PxvOJONWbUSepuF/Lu+2UZ+0FEtI5gijWoaU1QGbIaOOL4fGmQFZ0hDNIHmyGNsZ8zzNG3WRebj/osTb", - "H5ctwV8SIxfTkE96qSXLR2WrHgAIKYUa21pTAcRY8mm4ilpRagI0SfcBncjF0fPndrC5Ee4cKAu3Amrg", - "bdgA+Bk99ueUy408FxdqG77fb5O93Qj4j/upvMM8xlyqLlrS0uRUFRLDjHCEdErpvf5HbzDMfDHVC6kp", - "VjvxRo0AGPdL6sAwyTvpWDCWXJRQZKlah+eNTmgevWx9KFS/BLkwnpPnvA6lBt3YtQafqIREat21N1Xc", - "kZJqmg81t7KALVAcxW+gFdUQnEf2DiipxGDv8a2qrIQr6Lhr+ewpNYp24gpCX9N0ZgVAhda/vk4q5YcU", - "3+U9RYVfexZ5skzBblJzQYilnWIH1BJJJcpWZnRMzNSj5CC6EkXNO/gzx4ocXbWbO8oJVA1k8iy826ZO", - "8xON8DoMcBb6p0SZgIn30/jQ0Swojbp9DOigX2Jtxk69TLslxqmBGoMGzlY0hk8i8ZZvmIpfy3EF4JDk", - "2+fNxH0SSkaI/WYLOUo1Xb+72+OE4WDM9NJ+jYrgutnhmyuS/xAa3kvCo+OlnhoGfKDaHk1NoAsvsGMD", - "LDotndjrpGYsJ+j5v+d/c7aow0DuXU3VDeMX3HMIFjvMRN4YK7xAK5oLLfgXzn0iyv6jXESe1Ru+Y0rj", - "P+699o+al2K5wxNK4IduzKy5IyFvIiTbtfdXdBPvF0zmAbCgF1BhKlq3mDpmNNzOjRIB7a7AUIZGsQ2/", - "hHgb0CxPnCe3juWYerERxuBl19vOIRb84kMykQ0v4jcypjTsFvwOSW5d7//ZRm3FU4VMZFXJ81DL0hfT", - "6SjEqV5tIC67hs3+sL7h8ziQQFMDtyVaHeLAixso94703Ej5yo8VCumAPagNOqiRcqtlTNRR9qpB7AmI", - "nLSUu96Fqf4hA6DjioKHwI8LLH4a/CezjY4tYwr4fxa8j5RUjeGl6qmfAMudXBEJWEmvulDbTMPSHHKF", - "IMWqewjrNstEUE4KmWvghnxDzn/0T7Y2maaQ7glJ3ouN9a0ZpYClkC2zFLKqbeIFgDk15S5CWKyeRrSO", - "GHvGpAQnhl3x8scr0FoUYxvnTgcVH4yLGQSVvO+bePw3d+pwAGHa1w9GEkIbqRY1cxc4lUsix0JjuSy4", - "LuLmQrIctLv32TXfmZvbPhy0unbyxQHrB4+kmW58e2QHQdImQMqdN1/e0jLRAMjv0EQxwbSAHqwJswIp", - "RawasSQMYUjn4+DbrFQrjC8bIUCftRRtP/RYURIVtiQPHTePEb/B/mkwYbs/+FbhrFOm2H/OfkTU4YPn", - "Jyns3pNG2rR+wB95ZNJBCPQvV61bOG3OkP5TMZo+LUccpxmEuxDEEPaa3ENoPhixZHQ1uCO7iAZyH+Ab", - "q2unF8Lq2uBTkaD0hs3wbWv2OH6DaZ2cee4dd4ZKn8GjmJAy93G0R+qESJMc7oER8KhquT9b3WkbZwo3", - "zjHVw/ZHzmaVqrJ8ijcg1XQovELbQ9qFcYQ+InX1yLobxwnTVDnpZMTplDs5toDaaLmVQ3aZKt/3yB5T", - "aIxw0K6yXC2Rl1FNb9TDYIxHo7yY96OPugqbhkkwzjTktUaF5jXfHS5INZJL+OJvZ188evzL4y++ZK4B", - "K8QKTJuPulfQqfUYE7KvZ/m0PmKD5dn0JoS4dEJcsJSFcJtmU/xZI25r2mSTg3JWx2hCExdA4jgmCgnd", - "aK9wnNbp+8+1XalF3vmOpVDw+++ZVmWZrgfQiG4JVX9qtyJlv5P4K9BGGOsYYddWJ2zrK2vWqI7DrLBX", - "lGdEydyn7W+oQNgRZ5zUQsZcLZGfYdSvt28w2Fal51Vkk9i3Lv8uIo0YOmeg/8YCWKUqL0qLJUtBhLEl", - "Ooq59IpGdO+MvCcbZkt+lClC9D7JadKLSynv5/bdMp82zendJibEi3Aob0CaY5r08Yj2m3CSVpX+p+Ef", - "iRD9O+MazXJ/D16RfB/crFz7JNCG4doJ8kAARuIwOxF0UQhRlKJWk1Ye9ffB1NkXP162JtCDAQMISehw", - "ALw4sLJt1/i4e3D+4FyvLxukREt5P0YJneUfitUMrLe5SKIt8koKa8EQW1JDsTAKxDXPmvjWkVfJIAxW", - "K2WZe5mWZSJ8lvQmeKZiwnFPAn3Fy0/PNb4V2tgzxAcUr8eDZuIYyhjJhEpzswxuL/ikuaN4ybubWr7C", - "kN3/ALdHyXvOD+XNxYPbDLVeWMt8FW4FigJm1zgmuQM9+pItfBmGSkMuTN8MfR2EkyZkELRYetdL2NoD", - "MYqH1vmzsrcg42XwGWE/ROYkhWq7FsL2iP7BTGXk5CapPEV9A7JI4C/Fo+KyrQeui1um7L9ZQpAotdeR", - "CUGGBWmnLo+SXrhLpzYwXOfk27qD28RF3a5tajabyZn/3717axdTktCks/S77pgF507S9R+VrP93yH9D", - "OPJj+HlTFPPzWEZUyvo5krW5tx+1KA86iHRycH+cz1YgwQiDWaZ/8VVFPu1dGiCgmPzhUSVYb5NIhBCT", - "WGtn8miqKLv2hMTavlsiGzLGu+W1FnaHFWWDAk38kszU812T9cFnDWlsV/7us+oSmqrebY6I2oTb9TvF", - "S7yPyKQm3S2kyhP2DeV+9gflr/cW/waf/+VJ8fDzR/+2+MvDLx7m8OSLrx4+5F894Y+++vwRPP7LF08e", - "wqPll18tHhePnzxePHn85Msvvso/f/Jo8eTLr/7tnuNDDmQCNCR9fzr7P9lZuVLZ2avz7I0DtsUJr8T3", - "4PYG38pLhRUPHVJzPImw4aKcPQ0//a9wwk5ytWmHD7/OfOWe2drayjw9Pb2+vj6Ju5yuMCg8s6rO16dh", - "HqxD15FXXp033uTk94I72mqPcVM9KZzht9ffXLxhZ6/OT1qCmT2dPTx5ePLIFz2WvBKzp7PP8Sc8PWvc", - "91PMvHhqfFL106qitOof57NTT4f+rzXwEtOruD82YLXIwycNvNj5/5trvlqBPsFYAvrp6vFpkDhOP/i4", - "+Y/7vp3G3hanHzrpBYoDPRtvgqSd74VSl2hmDjLQPdPzjTiJ6zKfFw7F1BIdGsx5y+xCcV20486evk3p", - "V3zdsKpelCJndEUjjboNiEioSRrRsghUps3awu4tw3NM7GH21fsPX/zlY0qQ6gPy0hv9WiuHdxDFmCN0", - "lz8JcP2jBr1rAUML+CwGY2gSTOfO2lqsrx/NdsJ+8t4D+JX4RoitCiFKTdqx0GkEMDdECq4GC++xwhu6", - "0yE5PH74MJxuLztHZHXqqTVGd9e+MPC1OSaYvVP2OCH4uMVkiI8hxf5kKOGOw6aQnHy8MVXJhl+SZQWd", - "1Jj2UZMeo97DFZHcRDP4bQkM/HcsaDMhJJdmGgoeH4ccceQEBvfUWPlVClLteZehVOXij/PZkyOpYa8S", - "qpM9MgH+S146kKEISUMIgkefDoJzSV6U7mqhK/DjfPbFp8TBuXTMi5cMW0bFVxMULy+lupahpZNX6s2G", - "6x1KI3bKHvscN2gvDO2I7uny5O4Mv50RW8YyFBVo4R6FvJy9/3joejn9EIpu77+MOgWXvQ9w1GHiJbev", - "2ekCC21NbQomajy+FFRzmdMPeEJHfz/12vb0R1SYkSR2GlI8jbSkZB7pjx0UfrBbt5D9w7k20Xg5t/m6", - "rk4/4H9QqIpWRLmBT+1WnqJDz+mHDiL85wEiur+33eMWVxtVQABOLZdUqXzf59MP9G80UYcwW6GmK6B8", - "EzV6tob8cpa++3qJ06NejGROviihIOb0ZEIHqWzc6UYH+jWKH4b9+D0TSwb9KYQJMxxxbimt5CnW89y1", - "uAw/72Se/HG4zZ2UeiM/n4YnT0q07bb80Pmze+TMuraFuo5mQWUhabqHkLmPten/fXrNhXXPf5/JDQuA", - "Dztb4OWpL9vQ+7XNlDz4gumfox/jmKnkr6fco3pWKZMg29f8OrLwnWFjkhDA2K8VvijGbqdtthASKSi+", - "oVodAX0cysaDe8nJNegMF8wswywsmApCK17k3GDhaV8BZSCtf0weu08tbXzNCxYyaGSslT3O/Eu0s7T/", - "lkRw+s8/3fQXoK9EDuwNbCqluRbljv0km5CUGzPSb5E4Nc8vUUJvCJb8JzW/7ka56HSGgm6Bn5CwApjd", - "sjWXReljulWNlcscZaFZVEWOOe4CCgWuKqURAMocCAW5KpgTdtE4cqBbRB0eOQVcQakqtFtgPlyahKOT", - "Bxn64ougy//ns23mDvEKZObZSLZQxc5XhJlpfm23FJ494FUkHI4wsoHolvrqpZORRsGBOnxuFYixQg61", - "CI0q7u1794rFSuNewdDql56enmJEzVoZezpzj/Cu7in++L5BWCiQOau0uMJE/og0pYV7W5aZV960tbBm", - "j08ezj7+/wAAAP//hDzDXLsHAQA=", + "H4sIAAAAAAAC/+x9f5PbtpLgV0Fpt8qxT5yxHSf74qtXexM7yZuLnbg8TvZ2bd8LRLYkvKEAPgCckeLz", + "d79CN0CCJChRMxMnqdq/7BHxo9FoNPoXuj/McrWplARpzezph1nFNd+ABY1/8TxXtbSZKNxfBZhci8oK", + "JWdPwzdmrBZyNZvPhPu14nY9m88k30DbxvWfzzT8sxYaitlTq2uYz0y+hg13A9td5Vo3I22zlcr8EGc0", + "xPnz2cc9H3hRaDBmCOWPstwxIfOyLoBZzaXhuftk2LWwa2bXwjDfmQnJlASmlsyuO43ZUkBZmJOwyH/W", + "oHfRKv3k40v62IKYaVXCEM5narMQEgJU0ADVbAizihWwxEZrbpmbwcEaGlrFDHCdr9lS6QOgEhAxvCDr", + "zezp25kBWYDG3cpBXOF/lxrgV8gs1yuws/fz1OKWFnRmxSaxtHOPfQ2mLq1h2BbXuBJXIJnrdcJe1say", + "BTAu2etvn7HPP//8K7eQDbcWCk9ko6tqZ4/XRN1nT2cFtxA+D2mNlyuluSyypv3rb5/h/Bd+gVNbcWMg", + "fVjO3Bd2/nxsAaFjgoSEtLDCfehQv+uROBTtzwtYKg0T94Qa3+mmxPP/rruSc5uvKyWkTewLw6+MPid5", + "WNR9Hw9rAOi0rxymtBv07cPsq/cfHs0fPfz4L2/Psv/yf37x+ceJy3/WjHsAA8mGea01yHyXrTRwPC1r", + "Lof4eO3pwaxVXRZsza9w8/kGWb3vy1xfYp1XvKwdnYhcq7NypQzjnowKWPK6tCxMzGpZOjblRvPUzoRh", + "lVZXooBi7rjv9Vrka5ZzQ0NgO3YtytLRYG2gGKO19Or2HKaPMUocXDfCBy7oj4uMdl0HMAFb5AZZXioD", + "mVUHrqdw43BZsPhCae8qc9xlxd6sgeHk7gNdtog76Wi6LHfM4r4WjBvGWbia5kws2U7V7Bo3pxSX2N+v", + "xmFtwxzScHM696g7vGPoGyAjgbyFUiVwicgL526IMrkUq1qDYddrsGt/52kwlZIGmFr8A3Lrtv1/X/z4", + "A1OavQRj+Ape8fySgcxVAcUJO18yqWxEGp6WEIeu59g6PFypS/4fRjma2JhVxfPL9I1eio1IrOol34pN", + "vWGy3ixAuy0NV4hVTIOttRwDiEY8QIobvh1O+kbXMsf9b6ftyHKO2oSpSr5DhG349q8P5x4cw3hZsgpk", + "IeSK2a0clePc3IfBy7SqZTFBzLFuT6OL1VSQi6WAgjWj7IHET3MIHiGPg6cVviJwwiCj4DSzHABHwjZB", + "M+50uy+s4iuISOaE/eSZG3616hJkQ+hsscNPlYYroWrTdBqBEafeL4FLZSGrNCxFgsYuPDocg6E2ngNv", + "vAyUK2m5kFA45oxAKwvErEZhiibcr+8Mb/EFN/Dlk7E7vv06cfeXqr/re3d80m5jo4yOZOLqdF/9gU1L", + "Vp3+E/TDeG4jVhn9PNhIsXrjbpulKPEm+ofbv4CG2iAT6CAi3E1GrCS3tYan7+QD9xfL2IXlsuC6cL9s", + "6KeXdWnFhVi5n0r66YVaifxCrEaQ2cCaVLiw24b+ceOl2bHdJvWKF0pd1lW8oLyjuC527Pz52CbTmMcS", + "5lmj7caKx5ttUEaO7WG3zUaOADmKu4q7hpew0+Cg5fkS/9kukZ74Uv/q/qmq0vW21TKFWkfH/kpG84E3", + "K5xVVSly7pD42n92Xx0TAFIkeNviFC/Upx8iECutKtBW0KC8qrJS5bzMjOUWR/pXDcvZ09m/nLb2l1Pq", + "bk6jyV+4XhfYyYmsJAZlvKqOGOOVE33MHmbhGDR+QjZBbA+FJiFpEx0pCceCS7ji0p60KkuHHzQH+K2f", + "qcU3STuE754KNopwRg0XYEgCpob3DItQzxCtDNGKAumqVIvmh8/OqqrFIH4/qyrCB0qPIFAwg60w1tzH", + "5fP2JMXznD8/Yd/FY6MormS5c5cDiRrublj6W8vfYo1tya+hHfGeYbidSp+4rQlocGL+XVAcqhVrVTqp", + "5yCtuMZ/821jMnO/T+r85yCxGLfjxIWKlscc6Tj4S6TcfNajnCHheHPPCTvr970Z2bhR9hCMOW+xeNfE", + "g78ICxtzkBIiiCJq8tvDtea7mRcSMxT2hmTykwGikIqvhERo5059kmzDL2k/FOLdEQKYRi8iWiIJsjGh", + "epnTo/5kYGf5E1BramODJOok1VIYi3o1NmZrKFFw5jIQdEwqN6KMCRu+ZxENzNeaV0TL/guJXUKiPk+N", + "CNZbXrwT78QkzBG7jzYaoboxWz7IOpOQINfowfB1qfLLv3GzvoMTvghjDWkfp2Fr4AVotuZmnTg4Pdpu", + "R5tC364h0ixbRFOdNEt8oVbmDpZYqmNYV1U942Xpph6yrN5qceBJB7ksmWvMYCPQYO4VR7Kwk/7FvuH5", + "2okFLOdlOW9NRarKSriC0intQkrQc2bX3LaHH0cOeg2eIwOO2Vlg0Wq8mQlNbLqxRWhgG4430MZpM1XZ", + "7dNwUMM30JOC8EZUNVoRIkXj/HlYHVyBRJ7UDI3gN2tEa008+Imb23/CmaWixZEF0Ab3XYO/hl90gHat", + "2/tUtlMoXZDN2rrfhGa50jQE3fB+cvcf4LrtTNT5WaUh80NofgXa8NKtrreo+w353tXpPHAyC255dDI9", + "FaYVMOIc2A/FO9AJK82P+B9eMvfZSTGOklrqESiMqMidWtDF7FBFM7kGaG9VbEOmTFbx/PIoKJ+1k6fZ", + "zKST9w1ZT/0W+kU0O/RmKwpzV9uEg43tVfeEkO0qsKOBLLKX6URzTUHAG1UxYh89EIhT4GiEELW982vt", + "a7VNwfS12g6uNLWFO9kJN85kZv+12j73kCl9GPM49hSkuwVKvgGDt5uMGaebpfXLnS2Uvpk00btgJGu9", + "jYy7USNhat5DEjatq8yfzYTHghr0BmoDPPYLAf3hUxjrYOHC8t8AC8aNehdY6A5011hQm0qUcAekv04K", + "cQtu4PPH7OJvZ188evz3x1986Uiy0mql+YYtdhYM+8yb5ZixuxLuJ7UjlC7So3/5JPiouuOmxjGq1jls", + "eDUcinxfpP1SM+baDbHWRTOuugFwEkcEd7UR2hm5dR1oz2FRry7AWqfpvtJqeefccDBDCjps9KrSTrAw", + "XT+hl5ZOC9fkFLZW89MKW4IsKM7ArUMYpwNuFndCVGMbX7SzFMxjtICDh+LYbWqn2cVbpXe6vgvzBmit", + "dPIKrrSyKldl5uQ8oRIGile+BfMtwnZV/d8JWnbNDXNzo/eylsWIHcJu5fT7i4Z+s5UtbvbeYLTexOr8", + "vFP2pYv8VgupQGd2KxlSZ8c8stRqwzgrsCPKGt+BJflLbODC8k3143J5N9ZOhQMl7DhiA8bNxKiFk34M", + "5EpSMN8Bk40fdQp6+ogJXiY7DoDHyMVO5ugqu4tjO27N2giJfnuzk3lk2nIwllCsOmR5exPWGDpoqnsm", + "AY5Dxwv8jLb651Ba/q3Sb1rx9Tut6urO2XN/zqnL4X4x3htQuL7BDCzkquwGkK4c7CepNf4uC3rWGBFo", + "DQg9UuQLsVrbSF98pdVvcCcmZ0kBih/IWFS6PkOT0Q+qcMzE1uYORMl2sJbDObqN+RpfqNoyzqQqADe/", + "NmkhcyTkEGOdMETLxnIr2ieEYQtw1JXz2q22rhgGIA3ui7ZjxnM6oRmixoyEXzRxM9SKpqNwtlIDL3Zs", + "ASCZWvgYBx99gYvkGD1lg5jmRdwEv+jAVWmVgzFQZN4UfRC00I6uDrsHTwg4AtzMwoxiS65vDezl1UE4", + "L2GXYayfYZ99/7O5/zvAa5Xl5QHEYpsUevv2tCHU06bfR3D9yWOyI0sdUa0Tbx2DKMHCGAqPwsno/vUh", + "Guzi7dFyBRpDSn5Tig+T3I6AGlB/Y3q/LbR1NRLB7tV0J+G5DZNcqiBYpQYrubHZIbbsGnVsCW4FESdM", + "cWIceETwesGNpTAoIQu0adJ1gvOQEOamGAd4VA1xI/8cNJDh2Lm7B6WpTaOOmLqqlLZQpNaAHtnRuX6A", + "bTOXWkZjNzqPVaw2cGjkMSxF43tkeQ0Y/+C28b96j+5wcehTd/f8LonKDhAtIvYBchFaRdiNo3hHABGm", + "RTQRjjA9ymlCh+czY1VVOW5hs1o2/cbQdEGtz+xPbdshcZGTg+7tQoFBB4pv7yG/JsxS/PaaG+bhCC52", + "NOdQvNYQZncYMyNkDtk+ykcVz7WKj8DBQ1pXK80LyAoo+S4RHECfGX3eNwDueKvuKgsZBeKmN72l5BD3", + "uGdoheOZlPDI8AvL3RF0qkBLIL73gZELwLFTzMnT0b1mKJwruUVhPFw2bXViRLwNr5R1O+7pAUH2HH0K", + "wCN4aIa+OSqwc9bqnv0p/hOMn6CRI46fZAdmbAnt+EctYMQW7N84Reelx957HDjJNkfZ2AE+MnZkRwzT", + "r7i2IhcV6jrfw+7OVb/+BEnHOSvAclFCwaIPpAZWcX9GIaT9MW+mCk6yvQ3BHxjfEssJYTpd4C9hhzr3", + "K3qbEJk67kKXTYzq7icuGQIaIp6dCB43gS3Pbblzgppdw45dgwZm6gWFMAz9KVZVWTxA0j+zZ0bvnU36", + "Rve6iy9wqGh5qVgz0gn2w/empxh00OF1gUqpcoKFbICMJASTYkdYpdyuC//8KTyACZTUAdIzbXTNN9f/", + "PdNBM66A/aeqWc4lqly1hUamURoFBRQg3QxOBGvm9MGJLYaghA2QJolfHjzoL/zBA7/nwrAlXIc3g65h", + "Hx0PHqAd55UytnO47sAe6o7beeL6QMeVu/i8FtLnKYcjnvzIU3byVW/wxtvlzpQxnnDd8m/NAHoncztl", + "7TGNTIv2wnEn+XK68UGDdeO+X4hNXXJ7F14ruOJlpq5Aa1HAQU7uJxZKfnPFyx+bbvgeEnJHozlkOb7i", + "mzgWvHF96OGfG0dI4Q4wBf1PBQjOqdcFdTqgYraRqmKzgUJwC+WOVRpyoPduTnI0zVJPGEXC52suV6gw", + "aFWvfHArjYMMvzZkmtG1HAyRFKrsVmZo5E5dAD5MLTx5dOIUcKfS9S3kpMBc82Y+/8p1ys0c7UHfY5B0", + "ks1noxqvQ+pVq/EScrrvNidcBh15L8JPO/FEVwqizsk+Q3zF2+IOk9vc38Zk3w6dgnI4cRTx234cC/p1", + "6na5uwOhhwZiGioNBq+o2Exl6Ktaxm+0Q6jgzljYDC351PXvI8fv9ai+qGQpJGQbJWGXTEsiJLzEj8nj", + "hNfkSGcUWMb69nWQDvw9sLrzTKHG2+IXd7t/QvseK/Ot0nflEqUBJ4v3EzyQB93tfsqb+kl5WSZci/4F", + "Z58BmHkTrCs048aoXKDMdl6YuY8KJm+kf+7ZRf+r5l3KHZy9/rg9H1qcHABtxFBWjLO8FGhBVtJYXef2", + "neRoo4qWmgjiCsr4uNXyWWiSNpMmrJh+qHeSYwBfY7lKBmwsIWGm+RYgGC9NvVqBsT1dZwnwTvpWQrJa", + "Cotzbdxxyei8VKAxkuqEWm74ji0dTVjFfgWt2KK2XekfHygbK8rSO/TcNEwt30luWQncWPZSyDdbHC44", + "/cORlWCvlb5ssJC+3VcgwQiTpYPNvqOvGNfvl7/2Mf4Y7k6fQ9BpmzFh5pbZSZLyfz/796dvz7L/4tmv", + "D7Ov/sfp+w9PPt5/MPjx8ce//vX/dX/6/ONf7//7v6Z2KsCeej7rIT9/7jXj8+eo/kSh+n3YP5n9fyNk", + "liSyOJqjR1vsM0wV4Qnoftc4ZtfwTtqtdIR0xUtRON5yE3Lo3zCDs0ino0c1nY3oGcPCWo9UKm7BZViC", + "yfRY442lqGF8ZvqhOjol/dtzPC/LWtJWBumb3mGG+DK1nDfJCChP2VOGL9XXPAR5+j8ff/HlbN6+MG++", + "z+Yz//V9gpJFsU3lEShgm9IV40cS9wyr+M6ATXMPhD0ZSkexHfGwG9gsQJu1qD49pzBWLNIcLjxZ8jan", + "rTyXFODvzg+6OHfec6KWnx5uqwEKqOw6lb+oI6hhq3Y3AXphJ5VWVyDnTJzASd/mUzh90Qf1lcCXITBV", + "KzVFG2rOARFaoIoI6/FCJhlWUvTTe97gL39z5+qQHzgFV3/OVETvve++ecNOPcM09yilBQ0dJSFIqNL+", + "8WQnIMlxs/hN2Tv5Tj6HJVoflHz6Thbc8tMFNyI3p7UB/TUvuczhZKXY0/Ae8zm3/J0cSFqjiRWjR9Os", + "qhelyNllrJC05EnJsoYjvHv3lpcr9e7d+0FsxlB98FMl+QtNkDlBWNU286l+Mg3XXKd8X6ZJ9YIjUy6v", + "fbOSkK1qMpCGVEJ+/DTP41Vl+ikfhsuvqtItPyJD4xMauC1jxqrmPZoTUPyTXre/Pyh/MWh+HewqtQHD", + "ftnw6q2Q9j3L3tUPH36OL/vaHAi/+Cvf0eSugsnWldGUFH2jCi6c1EqMVc8qvkq52N69e2uBV7j7KC9v", + "0MZRlgy7dV4dhgcGOFS7gOaJ8+gGEBxHPw7GxV1Qr5DWMb0E/IRb2H2Afav9it7P33i7DrzB57VdZ+5s", + "J1dlHImHnWmyva2ckBWiMYxYobbqE+MtgOVryC99xjLYVHY373QPAT9e0AysQxjKZUcvDDGbEjooFsDq", + "quBeFOdy109rY+hFBQ76Gi5h90a1yZiOyWPTTatixg4qUmokXTpijY+tH6O/+T6qLDw09dlJ8PFmIIun", + "DV2EPuMHmUTeOzjEKaLopP0YQwTXCUQQ8Y+g4AYLdePdivRTyxMyB2nFFWRQipVYpNLw/sfQHxZgdVTp", + "Mw/6KORmQMPEkjlVfkEXq1fvNZcrcNezu1KV4SVlVU0GbaA+tAau7QK43Wvnl3FCigAdqpTX+PIaLXxz", + "twTYuv0WFi12Eq6dVoGGImrjo5dPxuPPCHAobghP6N5qCiejuq5HXSLjYLiVG+w2aq0PzYvpDOGi7xvA", + "lKXq2u2Lg0L5bJuU1CW6X2rDVzCiu8Teu4n5MDoePxzkkESSlEHUsi9qDCSBJMjUOHNrTp5hcF/cIUY1", + "sxeQGWYiB7H3GWESbY+wRYkCbBO5SnvPdceLSlmBx0BLsxbQshUFAxhdjMTHcc1NOI6YLzVw2UnS2W+Y", + "9mVfarrzKJYwSoraJJ4Lt2Gfgw70fp+gLmSlC6noYqV/Qlo5p3vh84XUdiiJomkBJaxo4dQ4EEqbMKnd", + "IAfHj8sl8pYsFZYYGagjAcDPAU5zecAY+UbY5BFSZByBjYEPODD7QcVnU66OAVL6hE88jI1XRPQ3pB/2", + "UaC+E0ZV5S5XMeJvzAMH8KkoWsmiF1GNwzAh58yxuSteOjbndfF2kEGGNFQoevnQfOjN/TFFY49riq78", + "o9ZEQsJNVhNLswHotKi9B+KF2mb0Qjmpiyy2C0fvybcL+F46dTApF909wxZqi+FceLVQrPwBWMbhCGBE", + "tpetMEiv2G9MziJg9k27X85NUaFBkvGG1oZcxgS9KVOPyJZj5PJZlF7uRgD0zFBtrQZvljhoPuiKJ8PL", + "vL3V5m3a1PAsLHX8x45QcpdG8De0j3UTwv2tTfw3nlwsnKhPkglvaFm6TYZC6lxR1sFjEhT2yaEDxB6s", + "vurLgUm0dmO9uniNsJZiJY75Dp2SQ7QZKAGV4KwjmmaXqUgBp8sD3uMXoVtkrMPd43J3Pwog1LASxkLr", + "NApxQb+HOZ5j+mSlluOrs5VeuvW9Vqq5/Mltjh07y/zkK8AI/KXQxmbocUsuwTX61qAR6VvXNC2BdkMU", + "qdiAKNIcF6e9hF1WiLJO06uf9/vnbtofmovG1Au8xYSkAK0FFsdIBi7vmZpi2/cu+AUt+AW/s/VOOw2u", + "qZtYO3LpzvEnORc9BraPHSQIMEUcw10bRekeBhk9OB9yx0gajWJaTvZ5GwaHqQhjH4xSC8/ex25+Gim5", + "ligNYPqFoFqtoAjpzYI/TEZJ5EolV1EVp6ralzPvhFHqOsw8tydpnQ/Dh7Eg/Ejcz4QsYJuGPtYKEPL2", + "ZR0m3MNJViApXUnaLJRETRzijy0iW90n9oX2HwAkg6Df9JzZbXQy7VKznbgBJfDC6yQGwvr2H8vhhnjU", + "zcfCpzuZT/cfIRwQaUrYqLDJMA3BCAPmVSWKbc/xRKOOGsH4UdblEWkLWYsf7AAGukHQSYLrpNL2odbe", + "wH6KOu+p08oo9toHFjv65rl/gF/UGj0YncjmYd72RlebuPbvf76wSvMVeC9URiDdaghczjFoiLKiG2YF", + "hZMUYrmE2PtibuI56AA3sLEXE0g3QWRpF00tpP3ySYqMDlBPC+NhlKUpJkELYz75N0MvV5DpI1NScyVE", + "W3MDV1Xyuf73sMt+5mXtlAyhTRue691O3cv3iF2/2nwPOxz5YNSrA+zArqDl6TUgDaYs/c0nEyWwvmc6", + "Kf5Rvexs4RE7dZbepTvaGl+UYZz421umU7Sgu5TbHIw2SMLBMmU3LtKxCe70QBfxfVI+tAmiOCyDRPJ+", + "PJUwoYTl8CpqclEcot03wMtAvLic2cf57HaRAKnbzI94ANevmgs0iWeMNCXPcCew50iU86rS6oqXmY+X", + "GLv8tbrylz82D+EVn1iTSVP2m2/OXrzy4H+cz/ISuM4aS8DoqrBd9adZFZVx2H+VULZvb+gkS1G0+U1G", + "5jjG4hoze/eMTYOiKG38THQUfczFMh3wfpD3+VAfWuKekB+omoif1udJAT/dIB9+xUUZnI0B2pHgdFzc", + "tMo6Sa4QD3DrYKEo5iu7U3YzON3p09FS1wGehHP9iKkp0xqH9IkrkRX54B9+59LTt0p3mL9/mZgMHvrt", + "xConZBMeR2K1Q/3KvjB1wkjw+mX1izuNDx7ER+3Bgzn7pfQfIgDx94X/HfWLBw+S3sOkGcsxCbRSSb6B", + "+80ri9GN+LQKuITraRf02dWmkSzVOBk2FEpRQAHd1x5711p4fBb+lwJKcD+dTFHS400ndMfATDlBF2Mv", + "EZsg0w2VzDRMyX5MNT6CdaSFzN6XZCBn7PAIyXqDDszMlCJPh3bIhXHsVVIwpWvMsPGItdaNWIuR2FxZ", + "i2gs12xKztQekNEcSWSaZNrWFncL5Y93LcU/a2CicFrNUoDGe6131QXlAEcdCKRpu5gfmPxU7fC3sYPs", + "8TcFW9A+I8he/93zxqcUFpoq+nNkBHg844Bx74ne9vThqZles627IZjT9JgppdMDo/POupE5kqXQhcmW", + "Wv0KaUcI+o8SiTCC41OgmfdXkKnIvT5LaZzKbUX3dvZD2z1dNx7b+FvrwmHRTdWxm1ym6VN93EbeROk1", + "6XTNHsljSlgcYdB9GjDCWvB4RcGwWAYlRB9xSeeJskB0XpilT2X8lvOUxm9PpYd58P615NcLnqoR43Qh", + "B1O0vZ04KatY6Bw2wDQ5Dmh2FkVwN20FZZKrQLc+iGFW2hvqNTTtZI2mVWCQomLVZU5hCqVRiWFqec0l", + "VRF3/Yhf+d4GyAXvel0rjXkgTTqkq4BcbJLm2Hfv3hb5MHynECtBBbJrA1EFZj8Qo2STSEW+inWTucOj", + "5nzJHs6jMvB+NwpxJYxYlIAtHlGLBTd4XTbu8KaLWx5IuzbY/PGE5utaFhoKuzaEWKNYo3uikNcEJi7A", + "XgNI9hDbPfqKfYYhmUZcwX2HRS8EzZ4++goDauiPh6lb1hc438eyC+TZIVg7TccYk0pjOCbpR01HXy81", + "wK8wfjvsOU3UdcpZwpb+Qjl8ljZc8hWk32dsDsBEfXE30Z3fw4skbwAYq9WOCZueHyx3/GnkzbdjfwQG", + "y9VmI+zGB+4ZtXH01JZXpknDcFTr39eLCnCFjxj/WoXwv56t6xOrMXwz8mYLo5R/QB9tjNY545T8sxRt", + "ZHqo18nOQ25hLKDV1M0i3Li53NJRlsRA9SWrtJAW7R+1XWZ/cWqx5rljfydj4GaLL58kClF1a7XI4wD/", + "5HjXYEBfpVGvR8g+yCy+L/tMKpltHEcp7rc5FqJTORqomw7JHIsL3T/0VMnXjZKNklvdITcecepbEZ7c", + "M+AtSbFZz1H0ePTKPjll1jpNHrx2O/TT6xdeytgonSoY0B53L3FosFrAFb6YS2+SG/OWe6HLSbtwG+h/", + "3/inIHJGYlk4y0lFIPJo7nss76T4n1+2mc/RsUovEXs2QKUT1k5vt/vE0YbHWd36/lsKGMNvI5ibjDYc", + "ZYiVkeh7Cq9v+vwe8UJ9kGjPOwbHR78w7XRwlOMfPECgHzyYezH4l8fdz8TeHzxIJyBOmtzcry0WbqMR", + "Y9/UHn6tEgawULWwCSjy+RESBsixS8p9cExw4Yeas26FuE8vRdzN+650tGn6FLx79xa/BDzgH31E/M7M", + "EjewfaUwfti7FTKTJFM036M4d86+VtuphNO7gwLx/AFQNIKSieY5XMmgAmjSXX8wXiSiUTfqAkrllMy4", + "KFBsz//z4Nktfr4H27Uoi5/b3G69i0Rzma+TUcIL1/HvJKN3rmBilck6I2suJZTJ4Ui3/XvQgRNa+j/U", + "1Hk2Qk5s269AS8vtLa4FvAtmACpM6NArbOkmiLHaTZvVpGUoV6pgOE9b1KJljsNSzqkSmon3zTjsprY+", + "bhXfgvuEQ0tRYhhm2m+MLTPN7UgCLax3HuoLuXGw/LghMwONDppxscGL2fBNVQKezCvQfIVdlYRed0yh", + "hiNHFSuYqdwnbIkJKxSztZZMLZfRMkBaoaHczVnFjaFBHrplwRbnnj199PBh0uyF2JmwUsJiWOaP7VIe", + "nWIT+uKLLFEpgKOAPQzrx5aijtnYIeH4mpL/rMHYFE/FD/RyFb2k7tamepJN7dMT9h1mPnJE3El1j+bK", + "kES4m1CzrkrFizkmN37zzdkLRrNSHyohT/UsV2it65J/0r0yPcFoyOw0kjln+jj7U3m4VRubNeUnU7kJ", + "XYu2QKboxdygHS/Gzgl7TibUpoA/TcIwRbbeQBFVuyQlHonD/cdanq/RNtmRgMZ55fRCrIGdtZ6b6PVh", + "U/0IGbaD29dipVKsc6bsGvS1MIAv8uEKuukQm9yg3jYe0iN2l6drKYlSTo4QRptaR8eiPQBHkmwIKkhC", + "1kP8kZYpqsd8bF3aC+yVfovRK3Lb8/qH5HohxTZ76Z0LOZdKihxLIaQkaUzdNs1NOaFqRNq/aGb+hCYO", + "V7K0bvMW2GNxtNhuYIQecUOXf/TVbSpRB/1pYetLrq3AGs/ZoJiHStfeISakAV/NyhFRzCeVTgQ1JR9C", + "NAEUR5IRZmUasXB+67794O3fmBTjUki0dHm0ef2MXFalEeiZlkxYtlJg/Hq6r3nMW9fnBLM0FrB9f/JC", + "rUR+IVY4BoXRuWVTzOhwqLMQQeojNl3bZ66tz53f/NwJB6NJz6rKTzpeBz0pSNqtHEVwKm4pBJJEyG3G", + "j0fbQ257Q7/xPnWEBlcYtQYV3sMDwmhqaXdH+cbplkRR2ILRi8pkAl0hE2C8EDK4UNMXRJ68EnBj8LyO", + "9DO55pZ0h0k87Q3wcuQBBL5QJh/8bYfqVw5wKME1hjnGt7EtAz7COJoGrcTP5Y6FQ+GoOxImnvGyCZ1O", + "FPVGqcoLUQU+LuqV+U4xDse4s/BksoOug8/3mu5YjePYm2gsR+GiLlZgM14UqdRWX+NXhl/DIzHYQl43", + "Raia14HdHOVDavMT5UqaerNnrtDgltNFdfMT1BDX7g87jJl2Fjv8N1WBaXxnfND00a9yQ4R0cVxi/uEr", + "45TU62g6M2KVTccE3im3R0c79c0Ive1/p5Qenuv+IV7j9rhcvEcp/vaNuzjixL2D+HS6Wpq8uhgLrvB7", + "SHjUZITsciW8ygZ1xjDqATcvsWU94EPDJOBXvBx5CR/7Suh+Jf/B2Hv4fDR9A7c+PZflbC8LGk15RLHC", + "Pe/L0IU4Fh9M4cF357Xwa92L0HHf3fcdTx3FiLXMYtRDdzMnWrvBx3rRvr8aS5EQ6nTg97geiI/imfs0", + "8HAlVB2ir0IMdFAJ6VefgqdT92Nk/cmXBb+312LUx/LG16+lZXqd/PufyQvLQFq9+wN4XAab3i8qk5B2", + "yTzVNmFN6cNJpRA7t+KUGjapcileNgy2MmItHVoalJ8ZkNXzKeLAAB8f57Pz4qgLM1VyZ0ajpI7dC7Fa", + "W8zY/zfgBehXByoStFUI8IhVyoi2AmnpBvMpYNc43MnUxwaOgEVcUWE4VghCvYLcYtnZNrhOAxxTX8FN", + "Fpw+/12ZYFydbt5k+IIE+6oQDGvNHrjjB4mTouRfVKfzZHrO/bMmhJpegF1z06Zr6b2Znvxyc7mEHLMi", + "701U9R9rkFESpHmwyyAsyyhvlWjeMWFe7+Otji1A+/JI7YUnqq9za3DG3rFfwu6eYR1qSBYObR7x3SRx", + "MGKAXGAhh/SYIdlHjQnTUAZiIYQE+1TMbXGM0ZzPUdq1G84VSNJdHG0qtj1TpoueT5rLdT0q7SM+yRnL", + "ZTWsmTyufzzHEtXGB8jxJvFwrKWz82HhnGufuBjTijW+k5DCGEz4LeQQpFlKcenrByBWyFN1zXURWtxJ", + "Uii6m0Qa6GUzs2gfcAyDHBKlGPAtVF4qJ0ZkYw/Kum8mmoDDe4YiQ9sEPgjXErSGonGJlMpAZlV48LEP", + "jn2ooPDXGyHBjJY/IuBGU1+/bnN7Yxk4jqmuuY96jRfINGy4g05HGbjH59yH7Gf0PTzCD2XADlqYGno9", + "XI82PN0RZoDEmOqXzN+Whx/338TYJKQEnQXPUz8dt+xmZMO8m0Wd0wUdH4zGIDc5d84eVpK00+TDVfZ0", + "hOiR/CXsTkkJCoV8ww7GQJPkRKBHCUd7m3yn5jeTgnt1J+D9vnnkKqXKbMTZcT7MId6n+EuRXwLmAGxC", + "3EdqtLPP0MbeeLOv17uQM7uqQEJx/4SxM0mPioJju1tesDe5vGf3zb/FWYua0vp7o9rJO5l+nYEJ9/Ut", + "uVkYZj8PM+BY3S2nokEOZKjeyrGQm2tMzt+t4nkyVSsfupr7VeRboiIoUjLJBXmsnuFBTxmOMAVClKsD", + "HZmceU8XM6VKxfLeJE2DGyqNqXgyBMiCnJItoIHCD55EQLIueuIUUuo7n/ROLZmG1ol80+x/wxLuKY2+", + "P3MzS5ffLZWGTjF215syfTYPXzCNJv5nIazmeneTHH2DEvID68kolg+GYzWRWO1C2misIQ7LUl1nyKyy", + "ps5FSrV17Uz3Mg5F19p+7lQvIIrr4sYLaju25gXLldaQxz3S7z0Jqo3SkJUKw7xSHuildXL3Bh95SVaq", + "FVNVrgqgejFpChqbq5aSo9gEUVRNEgVEO/hamPpEdDxxSnenkh8pQ1FrdUTt/Bzo5Xqb1YkWnZEvcyRi", + "GYzP4uQxRI2H8O6p/Z/mzUuxRboBnTryS2Z1DXPmW/RrZPuDzzWwjTCGQGlo6VqUJT4cF9vI89oELqRR", + "OyL2nmNY5ZXA2JtuEgGShit35zWZFWIecBGnPWJ2rVW9WkcJphs4g8qra68Qx6P8ZGoMj8IXZG6KJ2yj", + "jPWaJo3ULrkNOfssV9JqVZZdoxSJ6CtvaX/Jt2d5bl8odbng+eV91Gulss1Ki3l4X90PDmxn0r3UYt0L", + "OKNy5odT9VI7DJXzRDuZQfZY3NGF3SMw3x/moIdt7mfDhfXX1WWmaTXmTDJu1Ubk6TP154q2G42RS7Go", + "ZM4yqq1IWSawGR72+LJqgiuQRQ7RDJIni8OdMc8IvJMZ2Y37L0rg/XHZEjyjGbkoh8zFS1FZPirr9QBA", + "SOnps601FWSMJbGGq6gVpUpAF3kf0Im3CkYi3Q42N8KdA2XhVkANoh8bAD8j48OccstRJOVCbcP3+23y", + "uRsB/3E/lXeYx1iI10VLWpqCvEKimhGOkE5xvTce6g0+e19MjYpqiudOvOEjAMbjpDowTIqWOhaMJRcl", + "FFmq9uJ5Y6OaR5q2f5rVL4kujOfkOa9D6UM3dq3BJ04hEV93/V8Vd6SkmuZDS7IsYAv0ruNX0IpqGs4j", + "/wuUVPKwZwxQVVbCFXTCx3w2lxpFTXEFoa9pOrMCoEJvZN9GloqLiu/ynuHErz2LImumYDdpSSHE0k6x", + "A2aSpFFnKzM6JmbqUXIQXYmi5h38mWNFjq4Z0B3lBKoGOkIW9Mip0/xEI7wOA5yF/ilRJmDi/TQ+dDQL", + "SqNuHwM6GCdZm7FTL9NhknGqosbBgrMVjSOWSLzlG6bi13LcIDkk+VbdmrhPQskIsd9sIUepxus7UHiN", + "Z8RJ4bOeILVLgIK0AtclYW1fg2RSRSUmr7lpVJU2h2L4gSbGRkJ6bfoGTuU2mvH2O8twMGZ6ydRGFQnd", + "0OnNzfO/y0ncexBHx0vRiAH//G+P/StQt1c7sAGW8pZuP53sj0Ua/S3muficLeowUFmqa6oZGeuhzyH4", + "QYn6ggvIi+WiuZZD1Obcp/fsmzpEFK++4TumNP7jtM5/1rwUyx3yGQI/dGNmzR0JeccrRQT4KFA38X7x", + "ah4AC9YWFaaidYupY0bD7dwoEdDuIg/FfRTb8EuItwGDHYh/5tYxTlMv0HLhruzedg6x4BcfUrRseBFr", + "+pgosltGPaQOdr3/Z/sWLp4q5HerSp6HCqG+RFGXz2AV4EBcdg2b/Y8lh3wtkEBTWbglWh1e1xc3MJke", + "ybpSLxDGyq90wB5UXB1UnrnVMiZafns1NvY8M520lLvehalRNwOg4zqNh8CPy1Z+Gvwnc7iOLWMK+H8U", + "vI8Uqo3hpZq0nwDLnQwcCVjJWr1Q20zD0hwKMCFztVPndZu7I5hYhcw1cEMRN+c/esWzTVEqpFOEKSa0", + "8Wk2oxSwFLJllkJWtU3oMZipVO4ihMVGf0TriAttTEpwwuQVL3+8Aq1FMbZx7nRQSce4RERwdPi+CRNG", + "c6cOBxCm1eHwfWZrRo+buQucilBRuKaxXBZcF3FzIVkO2t377JrvzM09So1z4JBPiUfSTDdrQORdQtIm", + "QMqddwrf0t/TAMjv0PEzwWGDccEJZw2Zdqwa8c8MYfhTOGw2fJuVaoWvCEcOhM9Nix4+UgGVRDM4yWfT", + "1h3mMeJX2D8NpuX3jMgqnHXKFPvP/Y+4lahG/iSF3XvyyUbZf9ZJcbd0MANS5aoN/idiGZ7H1Etcn3wl", + "fo0bhM3wVCXQHkSbCCP+oa5dfGQXMQzCP+OOjeDTy511Iy1S733JMpChxcDsCe8H04ay89yHZw1NaQNT", + "AyFl7l9LH2lpI/t8uJdGwKPa9P6sd6dtQmbcOMfUiNv/PjqrVJXlU2I+qXJH4d0EHtIujCP0ETkBRtbd", + "hMeYppZNJ+9Rp6jNsWXyRovqHPJ2Vfk+pX/MTDTC0bsuCLVEXkaV29G6hS95GmPKvP/GrGsGa5gE40xD", + "Xms0E1/z3eGyYyMZoy/+dvbFo8d/f/zFl8w1YIVYgWmzjvfKdrVxgUL27T6fNhJwsDyb3oSQfYAQF/yP", + "4VFVsyn+rBG3NW1K0UHRsmPsy4kLIHEcE+WibrRXOE4b2v/H2q7UIu98x1Io+O33TKuyTFd9aOSqhAMl", + "tVuRC8VpIBVoI4x1jLDrARW2jYg2azQPYu7fK8omo2QOwX7sqUDYkZCr1ELGAmqRn+Hbbu81YrCtSs+r", + "yNOzb11eTyMLHQqNGBWzAFapyov2YslSEOELIh29rPWGT7SIRzGyDbOlaNkUIfrI8zTpxQWz93P7bjFX", + "m+b0bhMT4kU4lDcgzTH/xHjegptwkta0/4fhH4lEDHfGNZrl/ha8Iqkf3Kwo/yTQho/yE+SBAIy8tu28", + "k4weikWJiDV5CdCfEBzIffHjZetYPvgsBCEJHQ6AFz+fbds1Lxk8OL9zRt+XDVKipbwfo4TO8g+9yA2s", + "t7lIoi3yRhNrwRBbUkOxMHpubZ41r5hHtJLBY2etlGVOMy3LxCNpsuPgmYoJx6kE+oqXn55rfCu0sWeI", + "Dyhejz+Nil/KxkgmVJqb5el7wSfNHb2Kvbup5St8mP0f4PYoec/5obwTfnCboXEHK9avwq1Ab73ZNY5J", + "QVaPvmQLX2yj0pAL03fuXwfhpHkYClosfUArbO2Bl6iH1vmzsrcg42WIxGE/RO6txmfvIWyP6O/MVEZO", + "bpLKU9Q3IIsE/lI8Ki7Oe+C6uGVhhpulfYkSuB2Z9mVYdnjq8ii1ibt0agPDdU6+rTu4TVzU7dqm5iya", + "XN/h3bu3djEl1VC6FoPrjrmO7qQow1ElGX6DLEeEIz+GnzdFMT+P5b2l3K4jubl7+1GL8mDASifT+sf5", + "bAUSjDCYS/zvvnbMp71LAwSUeWF4VAnW26SLIcQk1tqZPJoqyqE+IX2675bIeY2vGvNaC7vDusHBgCb+", + "nszH9F2T28Pnhml8af7us+oSmtrtbSaQ2oTb9TvFS7yPyMUn3S2kyhP2DWX49gflr/cW/waf/+VJ8fDz", + "R/+2+MvDLx7m8OSLrx4+5F894Y+++vwRPP7LF08ewqPll18tHhePnzxePHn85Msvvso/f/Jo8eTLr/7t", + "nuNDDmQCNKT2fzr7P9lZuVLZ2avz7I0DtsUJr8T34PYGdeWlwrqWDqk5nkTYcFHOnoaf/lc4YSe52rTD", + "h19nvj7TbG1tZZ6enl5fX5/EXU5X+PQ/s6rO16dhHqw22JFXXp03MfoUh4M72lqPcVM9KZzht9ffXLxh", + "Z6/OT1qCmT2dPTx5ePLIl7aWvBKzp7PP8Sc8PWvc91PMr3lqfOr806qi5Pkf57NTT4f+rzXwEpPouD82", + "YLXIwycNvNj5/5trvlqBPsEXGvTT1ePTIHGcfvDZET7u+3YaR3+cfugkkSgO9GyiG5J+xxdKXaLbO8hA", + "90wvVuMkrr59XjgUU0sMsDDnLbMLJZTRrzx7+jZlX/FxklW9KEXO6IpGGnUbEJFQkxqkZRFoTJu15ftb", + "hueY2MPsq/cfvvjLx5Qg1QfkpXf6tV4OH3aLL7nwEcJJgOufNehdCxh65GcxGEOXYDpD2tayyhc38LOd", + "sJ98NAN+Jb7RRH36h19NcrnQaQQwN0QKrgYL77GOH4b3ITk8fvgwnG4vO0dkdeqpNUZ3178wiP05JmVB", + "p7h1QvBxi8kQH0OK/clQWiWHTSE5Rc5jSO2GX5JnBYPmmPZvYz1GfRwuIrl5I+K3JTDw37Bs0YSH1zTT", + "UPD4OOSIIycwhMvGxq9SkGnPhzCl6lN/nM+eHEkNe41QnRyhCfBf8tKBDEVIDUMQPPp0EJxLiup0Vwtd", + "gR/nsy8+JQ7OpWNevGTYMiqxm6B4eSnVtQwtnbxSbzZc71AasVP22GcyQn9haEd0T5cnd2f47YzYMhYb", + "qUALpxTycvb+46Hr5fRDKK2+/zLqlNX2MclRh4mX3L5mpwsspza1KZio8fhS0MxlTj/gCR39/dRb29Mf", + "0WBGkthpSOQ10pJStqQ/dlD4wW7dQvYP59pE4+Xc5uu6Ov2A/0GhKloRZYA+tVt5igFGpx86iPCfB4jo", + "/t52j1tcbVQBATi1XFI9+n2fTz/Qv9FEHcJshZqugPJN1OjZGvLLWfru66XHj3oxkjn5ooSCmNOTCR2k", + "snGnGx3o1yh+GPbj90wsGfSnECbMcMS5peShp1i1ddfiMvy8k3nyx+E2dxInjvx8GlSelGjbbfmh82f3", + "yJl1bQt1Hc2CxkKydA8hcx9r0//79JoL69R/n68Py7wPO1vg5akvztH7tc2HPfiCSb6jH+OXaMlfT7lH", + "9axSJkG2r/l15OE7w8YkIYCxXyvUKMZup222EBIpKL6hWhsBfRzKxoN7yck1GAwX3CzDXDuY8EMrXuTc", + "YHlxX+dmIK1/TB67Ty1tfM0LFvKkZKyVPc68JtpZ2n9LIjj9559u+gvQVyIH9gY2ldJci3LHfpLNE5kb", + "M9JvkTg1zy9RQm8IluInNb/uvrrR6bwP3TJOIQ0IMLtlay6L0r+UVzXWp3OUhW5RFQXmuAsolDGrlEYA", + "KD8kFBSqYE7YRRPIgWERdVByCriCUlXot8CsxzQJxyAPcvTFF0GX/89n28wd4hXIzLORbKGKna/7M9P8", + "2m7p0fuAV5FwOMLIBqJb6quXTkYahYDu8Lk1IMYGObQiNKa4t++dFov15L2BobUvPT09xRc+a2Xs6cwp", + "4V3bU/zxfYOwUAZ1VmlxheUaEGlKC6dblpk33rQVz2aPTx7OPv7/AAAA//8zEE3OoQkBAA==", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/model/types.go b/daemon/algod/api/server/v2/generated/model/types.go index e2d32257ba..d03f3d99f0 100644 --- a/daemon/algod/api/server/v2/generated/model/types.go +++ b/daemon/algod/api/server/v2/generated/model/types.go @@ -798,6 +798,9 @@ type SimulateRequest struct { // ExtraOpcodeBudget Applies extra opcode budget during simulation for each transaction group. ExtraOpcodeBudget *uint64 `json:"extra-opcode-budget,omitempty"` + // FixSigners If true, signers for transactions that are missing signatures will be fixed during evaluation. + FixSigners *bool `json:"fix-signers,omitempty"` + // Round If provided, specifies the round preceding the simulation. State changes through this round will be used to run this simulation. Usually only the 4 most recent rounds will be available (controlled by the node config value MaxAcctLookback). If not specified, defaults to the latest available round. Round *uint64 `json:"round,omitempty"` @@ -855,6 +858,9 @@ type SimulateTransactionResult struct { // ExecTrace The execution trace of calling an app or a logic sig, containing the inner app call trace in a recursive way. ExecTrace *SimulationTransactionExecTrace `json:"exec-trace,omitempty"` + // FixedSigner The account that needed to sign this transaction when no signature was provided and the provided signer was incorrect. + FixedSigner *string `json:"fixed-signer,omitempty"` + // LogicSigBudgetConsumed Budget used during execution of a logic sig transaction. LogicSigBudgetConsumed *uint64 `json:"logic-sig-budget-consumed,omitempty"` @@ -900,6 +906,9 @@ type SimulationEvalOverrides struct { // ExtraOpcodeBudget The extra opcode budget added to each transaction group during simulation ExtraOpcodeBudget *uint64 `json:"extra-opcode-budget,omitempty"` + // FixSigners If true, signers for transactions that are missing signatures will be fixed during evaluation. + FixSigners *bool `json:"fix-signers,omitempty"` + // MaxLogCalls The maximum log calls one can make during simulation MaxLogCalls *uint64 `json:"max-log-calls,omitempty"` diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go b/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go index bf31180ecc..7e8da1ba91 100644 --- a/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go +++ b/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go @@ -334,65 +334,66 @@ var swaggerSpec = []string{ "9aLvz9zM0uV3S6WhU4zd9aZMn03gC6bRxP8shNVc726So29QQn6gPRnF8kF3rMYTq11I6401xGFZqusM", "mVXW1LlIPW1dO9O9jEPRtbafO9ULiPy6uPGC2o6tecFypTXkcY90vCdBtVEaslKhm1fKAr20Tu7eYJCX", "ZKVaMVXlqgCqF5OmoLG5aik5ik0QedUkUUC0g9HC1Cei44lTujuV7EgZilqrI2rn50CR621WJ1p0RrbM", - "EY9lMD6Lk8cQNR7Cu6f2/1GVWs7RjfFKoK9LN2ifpM/K3TFNJoP4zF3EaYaYXWtVr9ZRQmd2LcoyKAzc", - "NujaP0DjUX40NbojYcSWm+IJ2yhj/cuORjLNUK2L1ye5klarsuwqgUgkXnnN9nd8e5bn9oVSlwueX97H", - "d6RUtllpMQ/xzH1nvHYm3Uvl1b3wMioffjg1LrVD1zRPJJMZUo+lHF1IPQLz3WGOdVjHfTZcWH9dXeaV", - "fjacScat2og8TcN/Lu+2UZ+0FEtI5gijWoaU1QGbIaOOL4fGmQFZ0hDNIHmyGNsZ8zzNG3WRebj/osTb", - "H5ctwV8SIxfTkE96qSXLR2WrHgAIKYUa21pTAcRY8mm4ilpRagI0SfcBncjF0fPndrC5Ee4cKAu3Amrg", - "bdgA+Ak99ueUy408FxdqG77fb5O93Qj4D/upvMM8xlyqLlrS0uRUFRLDjHCEdErpvf5HrzHMfDHVC6kp", - "VjvxRo0AGPdL6sAwyTvpWDCWXJRQZKlah+eNTmgevWx9KFS/BLkwnpPnvA6lBt3YtQafqIREat21N1Xc", - "kZJqmg81t7KALVAcxa+gFdUQnEf2DiipxGDv8a2qrIQr6Lhr+ewpNYp24gpCX9N0ZgVAhda/vk4q5YcU", - "3+U9RYVfexZ5skzBblJzQYilnWIH1BJJJcpWZnRMzNSj5CC6EkXNO/gzx4ocXbWbO8oJVA1k8iy826ZO", - "8yON8CoMcBb6p0SZgIl30/jQ0Swojbp9DOigX2Jtxk69TLslxqmBGoMGzlY0hk8i8ZZvmIpfy3EF4JDk", - "2+fNxH0SSkaI/WoLOUo1Xb+72+OE4WDM9NJ+jYrgutnhmyuSfxca3kvCo+OlnhoGfKDaHk1NoAsvsGMD", - "LDotndjrpGYsJ+j5v+d/c7aow0DuXU3VDeMX3HMIFjvMRN4YK7xAK5oLLfgXzn0iyv6jXESe1Ru+Y0rj", - "P+699s+al2K5wxNK4IduzKy5IyFvIiTbtfdXdBPvF0zmAbCgF1BhKlq3mDpmNNzOjRIB7a7AUIZGsQ2/", - "hHgb0CxPnCe3juWYerERxuBl19vOIRb84kMykQ0v4jcypjTsFvwOSW5d7//ZRm3FU4VMZFXJ81DL0hfT", - "6SjEqV5tIC67hs3+sL7h8ziQQFMDtyVaHeLAixso94703Ej5yo8VCumAPagNOqiRcqtlTNRR9qpB7AmI", - "nLSUu96Fqf4hA6DjioKHwI8LLH4c/CezjY4tYwr4fxS8j5RUjeGl6qkfAcudXBEJWEmvulDbTMPSHHKF", - "IMWqewjrNstEUE4KmWvghnxDzn/wT7Y2maaQ7glJ3ouN9a0ZpYClkC2zFLKqbeIFgDk15S5CWKyeRrSO", - "GHvGpAQnhl3x8ocr0FoUYxvnTgcVH4yLGQSVvO+bePw3d+pwAGHa1w9GEkIbqRY1cxc4lUsix0JjuSy4", - "LuLmQrIctLv32TXfmZvbPhy0unbyxQHrB4+kmW58e2QHQdImQMqdN1/e0jLRAMjv0EQxwbSAHqwJswIp", - "RawasSQMYUjn4+DbrFQrjC8bIUCftRRtP/RYURIVtiQPHTePEb/C/mkwYbs/+FbhrFOm2H/OfkDU4YPn", - "Ryns3pNG2rR+wB95ZNJBCPQvV61bOG3OkP5TMZo+LUccpxmEuxDEEPaa3ENoPhixZHQ1uCO7iAZyH+Ab", - "q2unF8Lq2uBTkaD0hs3wbWv2OH6DaZ2cee4dd4ZKn8GjmJAy93G0R+qESJMc7oER8KhquT9b3WkbZwo3", - "zjHVw/ZHzmaVqrJ8ijcg1XQovELbQ9qFcYQ+InX1yLobxwnTVDnpZMTplDs5toDaaLmVQ3aZKt/3yB5T", - "aIxw0K6yXC2Rl1FNb9TDYIxHo7yY96OPugqbhkkwzjTktUaF5jXfHS5INZJL+OJvZ589evzz488+Z64B", - "K8QKTJuPulfQqfUYE7KvZ/m4PmKD5dn0JoS4dEJcsJSFcJtmU/xZI25r2mSTg3JWx2hCExdA4jgmCgnd", - "aK9wnNbp+4+1XalF3vmOpVDw2++ZVmWZrgfQiG4JVX9qtyJlv5P4K9BGGOsYYddWJ2zrK2vWqI7DrLBX", - "lGdEydyn7W+oQNgRZ5zUQsZcLZGfYdSvt28w2Fal51Vkk9i3Lv8uIo0YOmeg/8YCWKUqL0qLJUtBhLEl", - "Ooq59IpGdO+MvCcbZkt+lClC9D7JadKLSynv5/bdMp82zendJibEi3Aob0CaY5r08Yj2m3CSVpX+h+Ef", - "iRD9O+MazXJ/C16RfB/crFz7JNCG4doJ8kAARuIwOxF0UQhRlKJWk1Ye9ffB1NkXP75rTaAHAwYQktDh", - "AHhxYGXbrvFx9+D8zrlev2uQEi3l3RgldJZ/KFYzsN7mIom2yCsprAVDbEkNxcIoENc8a+JbR14lgzBY", - "rZRl7mValonwWdKb4JmKCcc9CfQVLz8+1/haaGPPEB9QvBoPmoljKGMkEyrNzTK4veCT5o7iJe9uavkS", - "Q3b/Dm6PkvecH8qbiwe3GWq9sJb5KtwKFAXMrnFMcgd69Dlb+DIMlYZcmL4Z+joIJ03IIGix9K6XsLUH", - "YhQPrfMnZW9BxsvgM8K+j8xJCtV2LYTtEf2dmcrIyU1SeYr6BmSRwF+KR8VlWw9cF7dM2X+zhCBRaq8j", - "E4IMC9JOXR4lvXCXTm1guM7Jt3UHt4mLul3b1Gw2kzP/v337xi6mJKFJZ+l33TELzp2k6z8qWf9vkP+G", - "cOTH8POmKOansYyolPVzJGtzbz9qUR50EOnk4P4wn61AghEGs0z/7KuKfNy7NEBAMfnDo0qw3iaRCCEm", - "sdbO5NFUUXbtCYm1fbdENmSMd8trLewOK8oGBZr4OZmp55sm64PPGtLYrvzdZ9UlNFW92xwRtQm36zeK", - "l3gfkUlNultIlSfsK8r97A/KX+8t/g0+/cuT4uGnj/5t8ZeHnz3M4clnXzx8yL94wh998ekjePyXz548", - "hEfLz79YPC4eP3m8ePL4yeeffZF/+uTR4snnX/zbPceHHMgEaEj6/nT2f7KzcqWys5fn2WsHbIsTXolv", - "we0NvpWXCiseOqTmeBJhw0U5exp++l/hhJ3katMOH36d+co9s7W1lXl6enp9fX0SdzldYVB4ZlWdr0/D", - "PFiHriOvvDxvvMnJ7wV3tNUe46Z6UjjDb6++unjNzl6en7QEM3s6e3jy8OSRL3oseSVmT2ef4k94eta4", - "76eYefHU+KTqp1Xl06onzWSvfC2eLsWFzghsk5fb7Tal6/bJ0U1c8fi8QNqyw5TuWJkL3aAQwMcPH4Zd", - "8TJPdPWcYsTC0/ezafXPh5PhzvezLSzq1UsHc0jy0aR988YJjzO0FxLCmv2iJzBfGdSsa3HFLczefZjP", - "qjqBzq8wHMHsw9k8ShVO0KiyaDA+wOjL+r8IRj/MZ6eeT86evnd/rYGXmP7H/bFxhJqHTxp4sfP/N9d8", - "tQJ94tfpfrp6fBok4tP3Pq/Dh33fTmNvoNP3nfQXxYGewdvlUJPT96Gw7/4BO0VdvZ9h1GEioPuanS6w", - "mM/UphCvbnwpSPPm9D0+Bkd/P/UavfRHfJQTtz8NaWRGWlLCgPTHDgrf261byP7hXJtovJzbfF1Xp+/x", - "P0i2H+i0l5DKN0OFBDhrm8+ZsIwvlMY6sTZfO24QClQKE7UcHPkz1+sZQRDqfaNryezpm2HUDA7Ewkh4", - "Xbq7oL3NOjO1Aguq9iOm0IhjnfatUPbmYfbFu/eP5o8efvgXJ3T5Pz/79MPEALNnzbjsopGoJjZ8d0uO", - "N9AftIukTWoY2FDg9bQwHhXht6o3EGuQcaAKXW/4odyODPjJHfL4bnbUBH//khcsBHPj3I8+3tznkvyD", - "ndBEwt2H+eyzj7n6c+lInpcMW0ZlhYdb/6O8lOpahpZOEq83G6534RibDlNgfrNR3uvdgPOZVDJK+SZX", - "JGaoVED9CL8xlt+A31y4Xv/NbzoNBxYnjF4izZ8vQR35mNBl0lTcgpAHM/iV8+KKyzyEsLSe8bhfJHl7", - "wmicL2sDy7oMyRKqUiypsrdSZZjI1FXlOM6Sm4ayvDu+e7xR7HkzNKtlriS58WDkQzBGYgw5GjTNpag6", - "XcTSUZWvOS0BfHgxbvo/a9C7dtc3wr3C2u0dOJr9liyc8HgHLLw70B2z8MdHstE//4r/a19aTx7+5eNB", - "EFKsvBYbULX9s16aF3SD3erS9DI8VQk4tVt5iq69p+87zxX/efBc6f7edo9bXG1UAeEJoZZLg6qVfZ9P", - "39O/0USwrUCLDUgqHu5/pZvjFEtX74Y/72Se/HG4jk722JGfT4N2L/VK7rZ83/mz+/Iz69oW6pqqLSbl", - "Fbw+eck2XPIVhT43CjF3D/oB2sS27Iequah8thbGsUiYqm2rsaQwBh8G3diU8UZrPItWQuIEaBzEWfjS", - "deXRBe7r9J0MZSMP2feqgKFslLoIPYydy7A5CqmKeLe9GIeM98NxBwWNmGSBH5KR+1ib/t+n11xYJ0H5", - "DLOI0WFnC7w89eWker+2FRwGX7AsRfRjHMud/PWUd89FV4Pitmys40C9kvrqNQgjjUIgRfjcGhJixTyS", - "S6OSf/PO7boBfRUoqdUzPz09xci6tTL2FCXRrg46/viu2ehQKLfZcPdtmyktVkLyMvNKsrYm3uzxycPZ", - "h/8fAAD//3HzSP3DCwEA", + "EY9lMD6Lk8cQNR7Cu6f2f5o3L8UW6QZ06sgvmdU1zJlv0a+R7Q8+18A2whgCpaGla1GWGDgutpHltXFc", + "SKN2ROw9R7fKK4G+N90kAiQNV+7OazIrxDzgIk57xOxaq3q1jhJMN3CGJ6+u/YM4HuVHU6N7FEaQuSme", + "sI0y1r80aaR2ya3L2Se5klarsuwqpUhEX3lN+3d8e5bn9oVSlwueX97Hd61UtllpMQ/x1X3nwHYm3Ust", + "1r2AMypnfjhVL7VDVzlPtJMZZI/FHV3YPQLz3WEOeljnfjZcWH9dXWaafsacScat2og8fab+XN52oz5y", + "KRaVzFlGtRUpywQ2w8MeX1aNcwWyyCGaQfJkcbgz5hmBNzIju3H/RQm8Py5bgmc0IxflkLl4KSrLR2W9", + "HgAIKYU+21pTQcZYEmu4ilpRqgQ0kfcBnXiroCfS7WBzI9w5UBZuBdTA+7EB8BNSPswptxx5Ui7UNny/", + "3yafuxHwH/ZTeYd5jLl4XbSkpcnJKySqGeEI6RTXe/2hXmPY+2KqV1RTPHfiDR8BMO4n1YFhkrfUsWAs", + "uSihyFK1F88bHdU8emn70Kx+SXRhPCfPeR1KH7qxaw0+cQqJ+Lpr/6q4IyXVNB9qkmUBW6C4jl9BK6pp", + "OI/sL1BSycOeMkBVWQlX0HEf89lcahQ1xRWEvqbpzAqACq2RfR1Zyi8qvst7ihO/9izyrJmC3aQmhRBL", + "O8UOqEmSSp2tzOiYmKlHyUF0JYqad/BnjhU5umpAd5QTqBq8EbLwjpw6zY80wqswwFnonxJlAibeTeND", + "R7OgNOr2MaCDfpK1GTv1Mu0mGacqagwsOFvRGGKJxFu+YSp+LccVkkOSb59bE/dJKBkh9qst5CjV+PcO", + "FP7FM2Kk8FlPkNolQEGvAtcloW1fg2RSRSUmr7lpniptDsXwA02MjYT0r+kbGJVbb8bb7yzDwZjpJVMb", + "fUjohk5vrp7/XU7i3oM4Ol6KRgz48L89+q9A3f7ZgQ2wlLd0++lkfyzS6G8xz8XnbFGHgcpSXVPNyPgd", + "+hyCHZSoL5iAvFgumms5eG3OfXrPvqpDRP7qG75jSuM/7tX5z5qXYrlDPkPgh27MrLkjIW94JY8A7wXq", + "Jt4vXs0DYEHbosJUtG4xdcxouJ0bJQLaXeShuI9iG34J8TagswPxz9w6xmnqBWou3JXd284hFvziQ4qW", + "DS/ilz4miuyWUQ+pg13v/9nGwsVThfxuVcnzUCHUlyjq8hmsAhyIy65hsz9YcsjXAgk0lYVbotUhur64", + "gcr0SNaVikAYK7/SAXtQcXVQeeZWy5io+e3V2NgTZjppKXe9C1O9bgZAx3UaD4Efl638OPhP5nAdW8YU", + "8P8oeB8pVBvDSzVpPwKWOxk4ErCStnqhtpmGpTnkYELqavec123ujqBiFTLXwA153Jz/4B+ebYpSId1D", + "mHxCG5tmM0oBSyFbZilkVdvEOwYzlcpdhLBY6Y9oHTGhjUkJTpi84uUPV6C1KMY2zp0OKukYl4gIhg7f", + "N6HCaO7U4QDCtG84jM9s1ehxM3eBUxEqctc0lsuC6yJuLiTLQbt7n13znbm5RakxDhyyKfFImulmDYis", + "S0jaBEi580bhW9p7GgD5HRp+Jhhs0C84Yawh1Y5VI/aZIQx/CoPNhm+zUq0winDkQPjctGjhoyegkqgG", + "J/ls2rrDPEb8CvunwbT8nhFZhbNOmWL/uf8BtxKfkT9KYfeefNJR9sM6ye+WDmZAqly1zv9ELMPzmIrE", + "9clX4mjcIGyGUJVAexBtIozYh7p68ZFdRDcIH8YdK8Gnlzvrelqk4n1JM5ChxsDsce8H07qy89y7Zw1V", + "aQNVAyFl7qOlj9S0kX4+3Esj4FFten/Wu9M2LjNunGNqxO2Pj84qVWX5FJ9PqtxReDOBh7QL4wh9REaA", + "kXU37jGmqWXTyXvUKWpzbJm80aI6h6xdVb7v0T+mJhrh6F0ThFoiL6PK7ajdwkieRpky78eYddVgDZNg", + "nGnIa41q4mu+O1x2bCRj9MXfzj579Pjnx599zlwDVogVmDbreK9sV+sXKGRf7/NxPQEHy7PpTQjZBwhx", + "wf4YgqqaTfFnjbitaVOKDoqWHaNfTlwAieOYKBd1o73CcVrX/j/WdqUWeec7lkLBb79nWpVluupDI1cl", + "DCip3YpMKO4FUoE2wljHCLsWUGFbj2izRvUg5v69omwySuYQ9MeeCoQdcblKLWTMoRb5GcZ2e6sRg21V", + "el5Flp596/LvNNLQodCIXjELYJWqvGgvliwFEUYQ6Siy1is+USMe+cg2zJa8ZVOE6D3P06QXF8zez+27", + "xVxtmtO7TUyIF+FQ3oA0x+wT43kLbsJJWtX+H4Z/JBIx3BnXaJb7W/CK5PvgZkX5J4E2DMpPkAcCMBJt", + "24mTjALFokTEmqwEaE8IBuS++PFda1g+GBaCkIQOB8CLw2fbdk0kgwfnd87o+12DlGgp78YoobP8QxG5", + "gfU2F0m0RV5pYi0YYktqKBZG4dbmWRPFPPIqGQQ7a6Uscy/TskwESZMeB89UTDjuSaCvePnxucbXQht7", + "hviA4tV4aFQcKRsjmVBpbpan7wWfNHcUFXt3U8uXGJj9d3B7lLzn/FDeCD+4zVC5gxXrV+FWoFhvdo1j", + "kpPVo8/ZwhfbqDTkwvSN+9dBOGkCQ0GLpXdoha09EIl6aJ0/KXsLMl4GTxz2fWTeamz2HsL2iP7OTGXk", + "5CapPEV9A7JI4C/Fo+LivAeui1sWZrhZ2pcogduRaV+GZYenLo9Sm7hLpzYwXOfk27qD28RF3a5tas6i", + "yfUd3r59YxdTUg2lazG47pjr6E6KMhxVkuE3yHJEOPJj+HlTFPPTWN5byu06kpu7tx+1KA86rHQyrX+Y", + "z1YgwQiDucR/9rVjPu5dGiCgzAvDo0qw3iZdDCEmsdbO5NFUUQ71CenTfbdEzmuMasxrLewO6wYHBZr4", + "OZmP6Zsmt4fPDdPY0vzdZ9UlNLXb20wgtQm36zeKl3gfkYlPultIlSfsK8rw7Q/KX+8t/g0+/cuT4uGn", + "j/5t8ZeHnz3M4clnXzx8yL94wh998ekjePyXz548hEfLz79YPC4eP3m8ePL4yeeffZF/+uTR4snnX/zb", + "PceHHMgEaEjt/3T2f7KzcqWys5fn2WsHbIsTXolvwe0NvpWXCutaOqTmeBJhw0U5exp++l/hhJ3katMO", + "H36d+fpMs7W1lXl6enp9fX0SdzldYeh/ZlWdr0/DPFhtsCOvvDxvfPTJDwd3tNUe46Z6UjjDb6++unjN", + "zl6en7QEM3s6e3jy8OSRL20teSVmT2ef4k94eta476eYX/PU+NT5p1Xlk+cnzXavfMWlLsWFzghsk33d", + "7TYlZfcp8E1c1/q8QNqyw8T9WH8N3bIQwMcPH4Zd8TJPdPWcYhzI0/ezaVXuh5Phzvdzaizq1UsHc0jl", + "0iT388YJjzO0XxLCmv2iJzBfGdSsa3HFLczefZjPqjqBzq8wyMPsw9k8SghP0KiyaDA+wOjL+r8IRj/M", + "Z6eeT86evnd/rYGXmOTJ/bFxhJqHTxp4sfP/N9d8tQJ94tfpfrp6fBok4tP3PnvHh33fTmPvpNP3nSQn", + "xYGewfvmUJPT96F88/4BO6V7vd9j1GEioPuanS6wZNPUphCvbnwpSPPm9D0+Bkd/P/UavfRHfJQTtz8N", + "yYJGWlJaiPTHDgrf261byP7hXJtovJzbfF1Xp+/xP0i2H+i0l5DKKkTlIjhrm8+ZsIwvlMZqwDZfO24Q", + "ypAKE7UcHPkz1+sZQRCquqOry+zpm2EsEg7Ewkh4Xbq7oL3NOjO1Aguq9iOm0IhjnfatUPbmYfbFu/eP", + "5o8efvgXJ3T5Pz/79MNET+5nzbjsopGoJjZ8d0uON9AftIukTWoY2FDg9bQwHmvit6o3EGuQcaDWYG/4", + "odyODPjJHfL4bg7cBH//khcshOzj3I8+3tznkvyVndBEwt2H+eyzj7n6c+lInpcMW0bFo4db/6O8lOpa", + "hpZOEq83G6534RibDlNgfrNR3uvdgPOZVDJK7CdXJGaoVNqEEX5jLL8Bv7lwvf6b33QaDixOGBNGmj9f", + "aDzyMaHLpKmrBiHbafBz58UVl3kIDGo99XG/SPL2hNE4g9YGlnUZUmJUpVhS/XalyjCRqavKcZwlNw1l", + "+fAA93ijiP5maFbLXEly48FIjGCMxMh8NGiaS1F1uoiloypfWZyigk7Cpv+zBr1rd30j3Cus3d6Bo9lv", + "ycIJj3fAwrsD3TELf3wkG/3zr/i/9qX15OFfPh4EIZHOa7EBVds/66V5QTfYrS5NL8NTLYhTu5Wn6Gp8", + "+r7zXPGfB8+V7u9t97jF1UYVEJ4Qark0qFrZ9/n0Pf0bTQTbCrTYgKQS8f5XujlOsUD5bvjzTubJH4fr", + "6OQIHvn5NGj3Uq/kbsv3nT+7Lz+zrm2hrqmmZlJeweuTl2zDJV9RQHmjEHP3oB+gTV/Mfqiai8rHkTKO", + "peBUbVuNJYVV+ODyxqaMN1rjWbQSEidA4yDOwpeuK48ucF+N8WQoG3nIvlcFDGWj1EXoYexchs1RSNU9", + "vO3FOGS8H447KGjEJAv8kIzcx9r0/z695sI6CcrnEUaMDjtb4OWpLxrW+7Wt0zH4gsVHoh/jCPnkr6e8", + "ey66GhS3ZWMdB+qV1FevQRhpFAI7wufWkBAr5pFcGpX8m3du1w3oq0BJrZ756ekpRvqtlbGnKIl2ddDx", + "x3fNRodyyM2Gu2/bTGmxEpKXmVeStZUPZ49PHs4+/P8AAAD//50s/LmpDQEA", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go index 18ea670c4d..31d870e45e 100644 --- a/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go +++ b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go @@ -746,306 +746,308 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+y9/XfbtrIo+q9g6d618nFFOUnTnt281XWfm6Stb/O1Yrf7nNPktRAJSdimAG4AlKXm", - "5X+/CzMACZKgRNmyk7T+KbFIAoPBYDDf82GUymUhBRNGj558GBVU0SUzTMFfNE1lKUzCM/tXxnSqeGG4", - "FKMn/hnRRnExH41H3P5aULMYjUeCLln9jv1+PFLs3yVXLBs9Mapk45FOF2xJ7cBmU9i3q5HWyVwmbohj", - "HOLk2ejjlgc0yxTTugvla5FvCBdpXmaMGEWFpql9pMkFNwtiFlwT9zHhgkjBiJwRs2i8TGac5Zme+EX+", - "u2RqE6zSTd6/pI81iImSOevC+VQup1wwDxWrgKo2hBhJMjaDlxbUEDuDhdW/aCTRjKp0QWZS7QAVgQjh", - "ZaJcjp78NtJMZEzBbqWMr+C/M8XYnywxVM2ZGb0fxxY3M0wlhi8jSztx2FdMl7nRBN6FNc75igliv5qQ", - "l6U2ZMoIFeTtD0/JV1999a1dyJIawzJHZL2rqmcP14Sfj56MMmqYf9ylNZrPpaIiS6r33/7wFOY/dQsc", - "+hbVmsUPy7F9Qk6e9S3AfxghIS4Mm8M+NKjffhE5FPXPUzaTig3cE3z5oJsSzv9JdyWlJl0UkgsT2RcC", - "Twk+jvKw4PNtPKwCoPF+YTGl7KC/PUi+ff/h4fjhg4//47fj5L/dn19/9XHg8p9W4+7AQPTFtFSKiXST", - "zBWjcFoWVHTx8dbRg17IMs/Igq5g8+kSWL37lthvkXWuaF5aOuGpksf5XGpCHRllbEbL3BA/MSlFbtmU", - "Hc1RO+GaFEqueMayseW+FwueLkhKNQ4B75ELnueWBkvNsj5ai69uy2H6GKLEwnUpfMCCPl9k1OvagQm2", - "Bm6QpLnULDFyx/XkbxwqMhJeKPVdpfe7rMjZghGY3D7AyxZwJyxN5/mGGNjXjFBNKPFX05jwGdnIklzA", - "5uT8HL53q7FYWxKLNNicxj1qD28f+jrIiCBvKmXOqADk+XPXRZmY8XmpmCYXC2YW7s5TTBdSaEbk9F8s", - "NXbb/8/p61dEKvKSaU3n7A1NzwkTqcxYNiEnMyKkCUjD0RLg0H7Ztw4HV+yS/5eWliaWel7Q9Dx+o+d8", - "ySOreknXfFkuiSiXU6bslvorxEiimCmV6AMIR9xBiku67k56pkqRwv7X0zZkOUttXBc53QDClnT93YOx", - "A0cTmuekYCLjYk7MWvTKcXbu3eAlSpYiGyDmGLunwcWqC5byGWcZqUbZAombZhc8XOwHTy18BeD4QXrB", - "qWbZAY5g6wjN2NNtn5CCzllAMhPyi2Nu8NTIcyYqQifTDTwqFFtxWerqox4YYertEriQhiWFYjMeobFT", - "hw7LYPAdx4GXTgZKpTCUC5ZZ5gxAS8OQWfXCFEy4Xd/p3uJTqtk3j/vu+PrpwN2fyfaub93xQbsNLyV4", - "JCNXp33qDmxcsmp8P0A/DOfWfJ7gz52N5PMze9vMeA430b/s/nk0lBqYQAMR/m7SfC6oKRV78k7ct3+R", - "hJwaKjKqMvvLEn96WeaGn/K5/SnHn17IOU9P+bwHmRWsUYULPlviP3a8ODs266he8ULK87IIF5Q2FNfp", - "hpw869tkHHNfwjyutN1Q8Thbe2Vk3y/MutrIHiB7cVdQ++I52yhmoaXpDP5Zz4Ce6Ez9af8pitx+bYpZ", - "DLWWjt2VDOYDZ1Y4Loqcp9Qi8a17bJ9aJsBQkaD1G0dwoT75EIBYKFkwZTgOSosiyWVK80QbamCk/6nY", - "bPRk9D+OavvLEX6uj4LJX9ivTuEjK7KiGJTQothjjDdW9NFbmIVl0PAI2ASyPRCauMBNtKTELQvO2YoK", - "M6lVlgY/qA7wb26mGt8o7SC+WypYL8IJvjhlGiVgfPGOJgHqCaCVAFpBIJ3nclr9cPe4KGoMwvPjokB8", - "gPTIOAhmbM210fdg+bQ+SeE8J88m5MdwbBDFpcg39nJAUcPeDTN3a7lbrLItuTXUI97RBLZTqondGo8G", - "K+YfguJArVjI3Eo9O2nFvvyTezckM/v7oI+/DBILcdtPXKBoOcyhjgO/BMrN3RbldAnHmXsm5Lj97eXI", - "xo6yhWD0SY3FQxMP/MINW+qdlBBAFFCT2x6qFN2MnJCYgLDXJZNfNEMKKeicC4B2bNUnQZb0HPdDAt4t", - "ITBd6UVISyhBViZUJ3M61E86dpYvgFpjG+slUSup5lwb0KvhZbJgOQjOVHiCDknlUpQxYMO3LKKC+ULR", - "AmnZPUGxiwvQ5/ElhPWKF+/AOzEKc8Dug40GqC7NlneyzigkwDVaMHyfy/T8J6oXBzjhUz9Wl/ZhGrJg", - "NGOKLKheRA5Oi7br0YbQt30RaJZMg6km1RJfyLk+wBJzuQ/rKoqnNM/t1F2W1VotDDzoIOc5sS8TtuRg", - "MHeKI1rYUf8iz2m6sGIBSWmej2tTkSySnK1YbpV2LgRTY2IW1NSHH0b2eg2cI80sszOMBKtxZiYwsanK", - "FqEYWVK4gZZWmyny5jcVB9V0yVpSENyIsgQrQqBonDzzq2MrJoAnVUMD+NUawVoTDj6xc7tHMLOQuDi0", - "ABrvvqvwV/GLBtD27fo+FfUUUmVoszb2N65IKhUOgTe8m9z+h1FVf4zUebdQLHFDKLpiStPcrq61qHsV", - "+R7qdO44mRk1NDiZjgrjChhyDvgOxDumIlaa1/AfmhP72EoxlpJq6uEgjMjAnZrhxWxRhTPZF8DeKskS", - "TZmkoOn5XlA+rSePs5lBJ+85Wk/dFrpFVDt0tuaZPtQ2wWB9e9U8IWi78uyoI4tsZTrBXEMQcCYLguyj", - "BQJyChgNESLXB7/WvpfrGEzfy3XnSpNrdpCdsOMMZvbfy/UzB5lUuzEPYw9Bul2goEum4XYTIeO0s9R+", - "ueOpVJeTJloXjCC1t5FQO2ogTI1bSIJXyyJxZzPiscAXWgPVAR7bhYD28DGMNbBwaug1YEHbUQ+BheZA", - "h8aCXBY8Zwcg/UVUiJtSzb56RE5/Ov764aPfH339jSXJQsm5oksy3RimyV1nliPabHJ2L6odgXQRH/2b", - "x95H1Rw3No6WpUrZkhbdodD3hdovvkbse12sNdEMq64AHMQRmb3aEO0E3boWtGdsWs5PmTFW032j5Ozg", - "3LAzQww6eOlNoaxgoZt+QictHWX2lSO2NooeFfAmExnGGdh1cG11wOX0IETVt/FZPUtGHEYztvNQ7LtN", - "9TSbcKvURpWHMG8wpaSKXsGFkkamMk+snMdlxEDxxr1B3Bt+u4r27wgtuaCa2LnBe1mKrMcOYdZi+P2F", - "Q5+tRY2brTcYrjeyOjfvkH1pIr/WQgqmErMWBKizYR6ZKbkklGTwIcgaPzKD8hdfslNDl8Xr2eww1k4J", - "A0XsOHzJtJ2J4BtW+tEslQKD+XaYbNyoQ9DTRoz3Mpl+ABxGTjciBVfZIY5tvzVryQX47fVGpIFpy8KY", - "s2zeIMurm7D60IFT3dERcCw6XsBjsNU/Y7mhP0h1VouvPypZFgdnz+05hy6HusU4b0Bmv/VmYC7meTOA", - "dG5hn8TW+EkW9LQyIuAaAHqgyBd8vjCBvvhGyWu4E6OzxACFB2gsyu03XZPRK5lZZmJKfQBRsh6s5nCW", - "bkO+RqeyNIQSITMGm1/quJDZE3IIsU4QomVCuRXsE1yTKbPUldLSrrYsCAQgde6L+sOEpnhCE0CN7gm/", - "qOJm8C2cDsPZcsVotiFTxgSRUxfj4KIvYJEUoqeMF9OciBvhFw24CiVTpjXLEmeK3gmafw+vDrMFTwA4", - "AFzNQrQkM6quDOz5aiec52yTQKyfJnd//lXf+wTwGmlovgOx8E4MvW17WhfqYdNvI7j25CHZoaUOqdaK", - "t5ZB5MywPhTuhZPe/WtD1NnFq6NlxRSElFwrxftJrkZAFajXTO9XhbYseiLYnZpuJTy7YYIK6QWr2GA5", - "1SbZxZbtSw1bgl1BwAljnBgG7hG8XlBtMAyKiwxsmnidwDwohNkp+gHuVUPsyL96DaQ7dmrvQaFLXakj", - "uiwKqQzLYmsAj2zvXK/YuppLzoKxK53HSFJqtmvkPiwF4ztkOQ0Y/qCm8r86j253ceBTt/f8JorKBhA1", - "IrYBcurfCrAbRvH2AMJ1jWgkHK5blFOFDo9H2siisNzCJKWovutD0ym+fWx+qd/tEhc6OfDeziTT4EBx", - "7zvILxCzGL+9oJo4OLyLHcw5GK/VhdkexkRzkbJkG+WDimffCo/AzkNaFnNFM5ZkLKebSHAAPib4eNsA", - "sOO1uisNSzAQN77pNSX7uMctQ0sYT8eERwJPSGqPoFUFagJxX+8YOWMwdow5OTq6Uw0Fc0W3yI8Hy8at", - "jowIt+FKGrvjjh4AZMfRhwDcg4dq6MujAj5Oat2zPcV/Me0mqOSI/SfZMN23hHr8vRbQYwt2OU7BeWmx", - "9xYHjrLNXja2g4/0Hdkew/QbqgxPeQG6zs9sc3DVrz1B1HFOMmYoz1lGggeoBhbh9wRDSNtjXk4VHGR7", - "64LfMb5FluPDdJrAn7MN6NxvMDchMHUcQpeNjGrvJyoIAOojnq0IHr7C1jQ1+cYKambBNuSCKUZ0OcUQ", - "hq4/xcgiCQeI+me2zOi8s1Hf6FZ38SkMFSwvFmuGOsF2+M5aikEDHU4XKKTMB1jIOsiIQjAodoQU0u46", - "d+lPPgHGU1IDSMe0wTVfXf93dAPNsALyX7IkKRWgcpWGVTKNVCAogABpZ7AiWDWnC06sMcRytmSoScKT", - "+/fbC79/3+0512TGLnzOoH2xjY7798GO80Zq0zhcB7CH2uN2Erk+wHFlLz6nhbR5yu6IJzfykJ180xq8", - "8nbZM6W1I1y7/CszgNbJXA9Ze0gjw6K9YNxBvpxmfFBn3bDvp3xZ5tQcwmvFVjRP5IopxTO2k5O7ibkU", - "z1c0f119BvmQLLU0mrIkhSy+gWOxM/sNJv7Zcbjg9gBj0P9QgNgJfnWKH+1QMetIVb5csoxTw/INKRRL", - "Gea7WclRV0udEIyETxdUzEFhULKcu+BWHAcYfqnRNKNK0RkiKlSZtUjAyB27AFyYmk95tOIUo1ala1vI", - "UYG5oNV8Lst1yM0c7EHbYxB1ko1HvRqvReqq1ngROc28zQGXQUPeC/BTTzzQlQKos7JPF1/httjDZDf3", - "ekz29dAxKLsTBxG/9cO+oF+rbuebAwg9OBBRrFBMwxUVmqk0PpWzMEfbhwputGHLriUfP/295/i97dUX", - "pci5YMlSCraJliXhgr2Eh9HjBNdkz8cgsPR929ZBGvC3wGrOM4Qar4pf2O32CW17rPQPUh3KJYoDDhbv", - "B3ggd7rb3ZSX9ZPSPI+4Fl0GZ5sB6HEVrMsVoVrLlIPMdpLpsYsKRm+kS/dsov9NlZdygLPXHrflQwuL", - "A4CNmOUFoSTNOViQpdBGlal5JyjYqIKlRoK4vDLeb7V86l+Jm0kjVkw31DtBIYCvslxFAzZmLGKm+YEx", - "b7zU5XzOtGnpOjPG3gn3FhekFNzAXEt7XBI8LwVTEEk1wTeXdENmliaMJH8yJcm0NE3pHxKUteF57hx6", - "dhoiZ+8ENSRnVBvykouzNQznnf7+yApmLqQ6r7AQv93nTDDNdRIPNvsRn0Jcv1v+wsX4Q7g7PvZBp3XF", - "hJFdZqNIyv93938/+e04+W+a/Pkg+fZ/Hb3/8PjjvfudHx99/O67/7/501cfv7v3v/9nbKc87LH0WQf5", - "yTOnGZ88A/UnCNVvw35j9v8lF0mUyMJojhZtkbtQKsIR0L2mccws2Dth1sIS0ormPLO85TLk0L5hOmcR", - "T0eLahob0TKG+bXuqVRcgcuQCJNpscZLS1Hd+Mx4ojo4JV3uOZyXWSlwK730jXmYPr5MzsZVMQKsU/aE", - "QKb6gvogT/fno6+/GY3rDPPq+Wg8ck/fRyiZZ+tYHYGMrWO6YpgkcUeTgm40M3HuAbBHQ+kwtiMcdsmW", - "U6b0ghc3zym04dM4h/MpS87mtBYnAgP87fkBF+fGeU7k7ObhNoqxjBVmEatf1BDU4K16NxlrhZ0USq6Y", - "GBM+YZO2zSez+qIL6ssZnfnAVCXlEG2oOgdIaJ4qAqyHCxlkWInRTyu9wV3++uDqkBs4Bld7zlhE750f", - "n5+RI8cw9R0saYFDB0UIIqq0S55sBCRZbhbmlL0T78QzNgPrgxRP3omMGno0pZqn+qjUTH1PcypSNplL", - "8sTnYz6jhr4THUmrt7BikDRNinKa85SchwpJTZ5YLKs7wrt3v9F8Lt+9e9+JzeiqD26qKH/BCRIrCMvS", - "JK7UT6LYBVUx35euSr3AyFjLa9usKGTLEg2kvpSQGz/O82hR6HbJh+7yiyK3yw/IULuCBnbLiDayykez", - "AopL6bX7+0q6i0HRC29XKTXT5I8lLX7jwrwnybvywYOvILOvroHwh7vyLU1uCjbYutJbkqJtVIGFo1oJ", - "sepJQecxF9u7d78ZRgvYfZCXl2DjyHMCnzWyDn2CAQxVL6BKce7dAIRj7+RgWNwpfuXLOsaXAI9gC5sJ", - "2FfaryB//tLbtSMHn5ZmkdizHV2VtiTud6aq9ja3QpaPxtB8DtqqK4w3ZSRdsPTcVSxjy8Jsxo3PfcCP", - "EzQ96+Aaa9lhhiFUUwIHxZSRssioE8Wp2LTL2mjMqIBB37JztjmTdTGmferYNMuq6L6DCpQaSJeWWMNj", - "68Zob76LKvOJpq46CSRverJ4UtGF/6b/IKPIe4BDHCOKRtmPPkRQFUEEEn8PCi6xUDvelUg/tjwuUiYM", - "X7GE5XzOp7EyvP/s+sM8rJYqXeVBF4VcDagJnxGryk/xYnXqvaJizuz1bK9UqWmOVVWjQRugDy0YVWbK", - "qNlq5xdhQQoPHaiUF5B5DRa+sV0CW9v95gYsdoJdWK0CDEX4jotenvTHnyHgLLskPP7zWlOY9Oq6DnWR", - "ioP+Vq6wW6m1LjQvpDOAC58vGZQslRd2XywU0lXbxKIuwf1SajpnPbpL6L0bWA+j4fGDQXZJJFEZRM7a", - "okZHEoiCjC8nds3RM8zsE3uIQc1sBWT6mdBB7HxGUETbIWyagwBbRa7i3lPV8KJiVeA+0OKshSlRi4Ie", - "jCZGwuO4oNofR6iX6rnsIOnsGsu+bCtNdxLEEgZFUavCc/42bHPQjt7vCtT5qnS+FF2o9A8oK2d1L0hf", - "iG2HFCCaZixnc1w4vuwJpS6YVG+QheP1bAa8JYmFJQYG6kAAcHMwq7ncJwR9I2TwCDEyDsCGwAcYmLyS", - "4dkU832AFK7gE/VjwxUR/M3iiX0YqG+FUVnYy5X3+BtTzwFcKYpasmhFVMMwhIsxsWxuRXPL5pwuXg/S", - "qZAGCkWrHpoLvbnXp2hscU3hlb/XmlBIuMxqQmnWAx0XtbdAPJXrBDOUo7rIdD219B7NXYB86djBxFp0", - "dzSZyjWEc8HVgrHyO2Dph8ODEdhe1lwDvcJ3fXIWArNt2u1ybowKNZCMM7RW5NIn6A2Zuke27COXu0F5", - "uUsB0DJD1b0anFlip/mgKZ50L/P6VhvXZVN9Wljs+Pcdoegu9eCvax9rFoT7qS78119czJ+oG6mE17Us", - "XaVCIX5cYNXBfQoUtsmhAcQWrL5py4FRtDZjvZp4DbAWYyWW+Xadkl20aZYzUIKThmianMciBawuz+Ae", - "P/WfBcY62D0qNveCAELF5lwbVjuNfFzQpzDHUyifLOWsf3WmUDO7vrdSVpc/us3hw8Yyb3wFEIE/40qb", - "BDxu0SXYl37QYET6wb4al0CbIYrYbIBncY4L056zTZLxvIzTq5v352d22lfVRaPLKdxiXGCA1hSaY0QD", - "l7dMjbHtWxf8Ahf8gh5svcNOg33VTqwsuTTn+ELORYuBbWMHEQKMEUd313pRuoVBBgnnXe4YSKNBTMtk", - "m7ehc5gyP/bOKDWf9t538+NI0bUEZQDjGYJyPmeZL2/m/WEiKCKXSzEPujgVxbaaeROCpeug8tyWonUu", - "DJ/1BeEH4n7CRcbWcehDrQAgrzProOAeTDJnAsuVxM1CUdSEIf7wRmCru2FfaDsBIBoEfdZyZtfRybhL", - "1XbCBuSMZk4n0cyvb/ux7G6IQ924L3y6Ufl0+xGCAYGmuAkam3TLEPQwYFoUPFu3HE84aq8RjO5lXe6R", - "toC1uMF2YKAZBB0luEYpbRdq7QzsR6DzHlmtDGOvXWCxpW+augT8rFTgwWhENnfrtle62sC1//zrqZGK", - "zpnzQiUI0pWGgOXsg4agKromhmM4ScZnMxZ6X/RlPAcN4Do29mwA6UaILO6iKbkw3zyOkdEO6qlh3I2y", - "OMVEaKHPJ3/W9XJ5mT4wJVVXQrA1l3BVRdP1f2ab5Feal1bJ4ErX4bnO7dS8fPfY9dXyZ7aBkXdGvVrA", - "duwKWJ7eMqDBmKW/eqSDAtZ3dKPEP6iXjS3cY6eO47t0oK1xTRn6ib++ZRpNC5pLucrBqIMkLCxDduM0", - "HptgTw9rIr5Nyrs2gWe7ZZBA3g+n4tq3sOxeRVUtil20e8Zo7okXljP6OB5dLRIgdpu5EXfg+k11gUbx", - "DJGm6BluBPbsiXJaFEquaJ64eIm+y1/Jlbv84XUfXnHDmkycss+eH79448D/OB6lOaMqqSwBvauC94ov", - "ZlXYxmH7VYLVvp2hEy1FweZXFZnDGIsLqOzdMjZ1mqLU8TPBUXQxF7N4wPtO3udCfXCJW0J+WFFF/NQ+", - "Twz4aQb50BXluXc2emh7gtNhccM660S5QjjAlYOFgpiv5KDspnO646ejpq4dPAnmeg2lKeMah3CFK4EV", - "ueAfenDp6QepGszfZSZGg4euT6yyQjbisSdW2/evbAtTE4KC1x/zP+xpvH8/PGr374/JH7l7EAAIv0/d", - "76Bf3L8f9R5GzViWSYCVStAlu1dlWfRuxM0q4IJdDLugj1fLSrKU/WRYUShGAXl0XzjsXSju8Jm5XzKW", - "M/vTZIiSHm46ojsEZsgJOu3LRKyCTJfYMlMTKdox1ZAEa0kLmL1ryYDO2O4REuUSHJiJznkaD+0QU23Z", - "q8BgSvsygZd7rLV2xJL3xOaKkgdj2deG1ExtARnMEUWmjpZtrXE3le54l4L/u2SEZ1armXGm4F5rXXVe", - "OYBROwJp3C7mBkY/VT38VewgW/xN3ha0zQiy1X/3rPIp+YXGmv7sGQEezthh3Fuitx19OGrGbLZFMwRz", - "mB4zpHW6Z3TOWdczR7QVOtfJTMk/WdwRAv6jSCEM7/jkYOb9k4lY5F6bpVRO5bqjez37ru0erhv3bfyV", - "dWG/6Krr2GUu0/ip3m8jL6P06ni5ZofkPiUsjDBopgb0sBY4XkEwLLRB8dFHVOB5wioQjQyz+KkMczmP", - "cPz6VDqYO/mvOb2Y0liPGKsLWZiC7W3ESRlJ/Md+A3RV4wBnJ0EEd/Uux0pyBVO1D6JblfaSeg1OO1ij", - "qRUYoKhQdRljmEKuZWSYUlxQgV3E7XfIr9zXmqEL3n51IRXUgdTxkK6MpXwZNce+e/dblnbDdzI+59gg", - "u9Qs6MDsBiJYbBKoyHWxrip3ONSczMiDcdAG3u1Gxldc82nO4I2H+MaUarguK3d49YldHhNmoeH1RwNe", - "X5QiUywzC42I1ZJUuicIeVVg4pSZC8YEeQDvPfyW3IWQTM1X7J7FohOCRk8efgsBNfjHg9gt6xqcb2PZ", - "GfBsH6wdp2OIScUxLJN0o8ajr2eKsT9Z/+2w5TThp0POErzpLpTdZ2lJBZ2zeH7GcgdM+C3sJrjzW3gR", - "6A1g2ii5IdzE52eGWv7Uk/Nt2R+CQVK5XHKzdIF7Wi4tPdXtlXFSPxz2+nf9ojxc/iHEvxY+/K9l67ph", - "NYYue3K2IEr5FfhoQ7SOCcXinzmvI9N9v05y4msLQwOtqm8W4sbOZZcOsiQEqs9IobgwYP8ozSz5h1WL", - "FU0t+5v0gZtMv3kcaUTV7NUi9gP8xvGumGZqFUe96iF7L7O4b8ldIUWytBwlu1fXWAhOZW+gbjwksy8u", - "dPvQQyVfO0rSS25lg9xowKmvRHhiy4BXJMVqPXvR494ru3HKLFWcPGhpd+iXty+clLGUKtYwoD7uTuJQ", - "zCjOVpAxF98kO+YV90Llg3bhKtB/2vgnL3IGYpk/y1FFIPBobkuWt1L8ry/ryufgWMVMxJYNUKqItdPZ", - "7W442nA/q1vbf4sBY/CsB3OD0QajdLHSE32P4fXVN58iXqgNEu55w+D48A+irA4Ocvz9+wD0/ftjJwb/", - "8aj5GNn7/fvxAsRRk5v9tcbCVTRi+Da2h9/LiAHMdy2sAopcfYSIAbLvkrIPLBOcuqHGpNkh7ualiMPk", - "d8WjTeOn4N273+CJxwP80UbEJ2aWsIF1lkL/YW92yIySTFY9D+LcKflerocSTusO8sTzGaCoByUDzXOw", - "kk4H0Ki7fme8SECjdtQpy6VVMsOmQKE9/8vBs138eAu2S55nv9a13VoXiaIiXUSjhKf2w99RRm9cwcgq", - "o31GFlQIlkeHQ932d68DR7T0f8mh8yy5GPhuuwMtLre1uBrwJpgeKD+hRS83uZ0gxGqzbFZVliGfy4zA", - "PHVTi5o5dls5x1poRvKbYdhlaVzcKuSCu4JDM55DGGbcbwxvJoqangJa0O/c9xey40D7cY1mBhydKUL5", - "Ei5mTZdFzuBkrpiic/hUCtb6HEqowchBxwqiC/sI3oSCFZKYUgkiZ7NgGUwYrli+GZOCao2DPLDLYmuY", - "e/Tk4YMHUbMXYGfAShGLfpmv66U8PIJX8IlrsoStAPYCdjesH2uK2mdju4Tjekr+u2TaxHgqPMDMVfCS", - "2lsb+0lWvU8n5EeofGSJuFHqHsyVvohws6BmWeSSZmMobnz2/PgFwVnxG2whj/0s52Cta5J/1L0yvMCo", - "r+zUUzln+DjbS3nYVWuTVO0nY7UJ7Rt1g0zeirkBO16InQl5hibUqoE/TkKgRLZasizodolKPBCH/Y8x", - "NF2AbbIhAfXzyuGNWD07qz03QfZh1f0IGLaF2/VixVasYyLNgqkLrhlk5LMVa5ZDrGqDOtu4L4/YXJ4q", - "hUBKmewhjFa9jvZFuwcOJVkfVBCFrIX4PS1T2I953760p/BVPBej1eS25fX3xfV8iW3y0jkXUiqk4Cm0", - "QohJ0lC6bZibckDXiLh/UY/cCY0crmhr3SoX2GGxt9muZ4QOcV2Xf/DUbipSB/5p2Nq1XJszox1nY9nY", - "d7p2DjEuNHPdrCwRhXxSqkhQUzQRogqg2JOMoCpTj4XzB/vslbN/Q1GMcy7A0uXQ5vQzdFnlmoNnWhBu", - "yFwy7dbTzObRv9lvJlClMWPr95MXcs7TUz6HMTCMzi4bY0a7Qx37CFIXsWnffWrfdbXzq58b4WA46XFR", - "uEn7+6BHBUmzFr0IjsUt+UCSALnV+OFoW8hta+g33KeW0NgKotZYAfdwhzCqXtrNUZ5b3RIpCt4gmFEZ", - "LaDLRQSMF1x4F2r8gkijVwJsDJzXnu90qqhB3WEQTztjNO9JgIAMZfTBX3WoducAixJYo5+jfxvrNuA9", - "jKN6oZb4qdgQfygsdQfCxFOaV6HTkabeIFU5ISqD5KJWm+8Y47CMO/Epkw107Uzfqz6Hbhz73kR9NQqn", - "ZTZnJqFZFitt9T08JfDUJ4mxNUvLqglVlR3YrFHepTY3USqFLpdb5vIvXHG6oG9+hBrC3v1+h6HSznQD", - "/8Y6MPXvjAua3jsr10dIZ/sV5u9mGcekXkvTiebzZDgm4E65OjrqqS9H6PX3B6V0n677WWTjtrhcuEcx", - "/vbcXhxh4d5OfDpeLVVdXYgFl/DcFzyqKkI2uRJcZZ0+YxD1AJsX2bIW8P7FKOArmvdkwoe+Erxf0X/Q", - "lw+f9pZvoMaV5zKUbGVBvSWPMFa45X3puhD74oMxPPhwXgu31q0I7ffd/dzw1GGMWM0sej10l3Oi1Ru8", - "rxft51VfiQTfpwOeh/1AXBTP2JWBZysuSx995WOgvUqIv7oSPI2+Hz3rj2YWfGqvRa+P5cz1r8VlOp38", - "51/RC0uYMGrzGXhcOpvebioTkXbRPFW/QqrWh4NaITZuxSE9bGLtUpxs6G1lyFoatNRpP9Mhq2dDxIEO", - "Pj6ORyfZXhdmrOXOCEeJHbsXfL4wULH/J0Yzpt7s6EhQdyGAI1ZIzesOpLkdzJWAXcBwk6HJBpaAedhR", - "oTuWD0JdsdRA29k6uE4xtk9/BTuZd/rcdiboV6ernAzXkGBbF4Jur9kdd3yncFJQ/Av7dE6G19w/rkKo", - "MQPsguq6XEsrZ3pw5uZsxlKoiry1UNU/F0wERZDG3i4DsMyCulW8ymOCut77Wx1rgLbVkdoKT9Bf58rg", - "9OWxn7PNHU0a1BBtHFol8V2mcDBgAF1gvoZ0nyHZRY1xXVEGYMGHBLtSzHVzjN6az0HZtUvO5UnSXhx1", - "KbYtU8abng+ay366V9lHSMnpq2XV7Zncr388gxbV2gXI0arwcKilk5Nu45wLV7gYyopVvhNfwphp/5uv", - "IYiz5Pzc9Q8ArKCn6oKqzL9xkKJQeDfxONCzamZeJ3B0gxwirRggFyrNpRUjkr6EsmbORBVweEdjZGhd", - "wAfgmjGlWFa5RHKpWWKkT/jYBsc2VGD466WQoHvbHyFwvaWv39a1vaENHIVS19RFvYYLJIotqYVOBRW4", - "++fchuyn+Nwn4fs2YDstTBW97u5H61N3uO4gMaT6GXG35e7k/ssYm7gQTCXe89Quxy2aFdmg7mZWpnhB", - "hwejMsgNrp2zhZVE7TRpd5UtHSFIkj9nmyNUgnwjX7+DIdAoOSHoQcHR1iYf1PymY3DPDwLep60jV0iZ", - "Jz3OjpNuDfE2xZ/z9JxBDcAqxL2nRzu5Czb2ypt9sdj4mtlFwQTL7k0IORaYVOQd2832gq3JxR2zbf41", - "zJqVWNbfGdUm70Q8OwMK7qsrcjM/zHYepplldVecCgfZUaF6LfpCbi6gOH+zi+dkqFbedTW3u8jXRIVQ", - "xGSSU/RYPYWDHjMcQQmEoFYHODIpcZ4uonMZi+W9TJkGO1QcU+FkAJBhYki1gAoKN3gUAdG+6JFTiKXv", - "XNE7OSOK1U7ky1b/67Zwj2n07ZmrWZr8biYVazRjt19jpc8q8QXKaMJ/ptwoqjaXqdHXaSHfsZ70Ynln", - "OFYViVUvpI7G6uIwz+VFAswqqfpcxFRb+55uXsa+6Vr9nT3VUxbEdVHtBLUNWdCMpFIploZfxPM9Eaql", - "VCzJJYR5xTzQM2Pl7iUkeQmSyzmRRSozhv1i4hTUN1cpBAWxiQVRNVEUIO1AtjB+E9DxwCntnYp+pARE", - "rfkevfNThpnrdVUnXHSCvsyeiGWmXRUnhyF8uQvvlt7/e3VqOYEwxhWHWJdm0j5Kn4W9Y6pKBuGZOw3L", - "DBGzULKcL4KCzuSC57k3GNhtUKVTQMNRftElhCNBxpad4jFZSm2cZocj6WqoOsTrbiqFUTLPm0YgFInn", - "zrL9kq6P09S8kPJ8StPze6BHCmmqlWZjn8/cDsarZ1KtUl7NCy/B9uG7S+PiexCa5ohkMENqsZS9G6kH", - "YL7fzbF227iPuwtrr6vJvOJqw7Eg1MglT+M0/GVFt/XGpMVYQrRGGPYyxKoO8Bow6vByqIIZgCV10cwE", - "jTZjOyaOpzmnLjAP+1+QeNvjkhlzl0TPxdTlk05qSdJe2aoFAECKqcamVNgAMZR8Kq4i51iaAFzSbUAH", - "cnGI/LkabHaEgwNl2JWA6kQbVgDeRWV/jLXcMHJxKtf++b262NulgP+4ncobzKMvpOq0Ji2FQVW+MEwP", - "R4iXlN4af3QGaebToVFIVbPagTdqAEB/XFIDhkHRSfuCMaM8Z1kS63V4UtmExoFm61Kh2i3IuXacPKWl", - "bzVoxy4Vc4VKUKRWTX9TQS0pyer1ruVWZGzNMI/iT6Yk9hAcB/4OlmOLwZbyLYskZyvWCNdy1VNKEO34", - "ivlvdfUxyRgrwPvXtknF4pDCu7xlqHBrT4JIliHYjVouELG4U2SHWSJqRFmLBI+JHnqULEQrnpW0gT+9", - "r8jRNLvZoxxBVUcmT7zeNnSaX3CEt36AY/99TJTxmHg/jA/tzYLiqNvGgHbGJZa679SLeFhiWBqocmjA", - "bFnl+EQSr/mGLuiF6DcAdkm+Vm8G7hOXIkDs8zVLQappxt1dHScEBiO6VfarVwRX1Q5f3pD8SWh4Kwn3", - "jhdTNTRziWpbLDWeLpzADi9A02lhxV4rNUM7Qcf/Hf8bk2npB7J6NXY3DDW4Z8x77KASeeWscAItry40", - "H184doUo20o5DyKrl3RDpIJ/rL7275LmfLaBE4rg+8+IXlBLQs5FiL5rF69oJ94umIw9YN4uIP1UuG4+", - "dMxguI0dJQDaXoG+DY0kS3rOwm0AtzxyntRYlqPL6ZJrDZddazu7WHCL98VEljQLdWQoadhs+O2L3Nqv", - "/586ayucylciK3Ka+l6WrplOwyCO/Wo9cZkFW25P6+uqx54Eqh64NdEqnweeXcK4t2fkRixWvq9RSAPs", - "Tm/QTo+UKy1joI2y1Q1iS0LkoKUceheGxod0gA47Cu4CP2yweDP4j1Yb7VvGEPA/F7z3tFQN4cXuqTeA", - "5UatiAisaFedynWi2EzvCoVAw6pVhFVdZcIbJ7lIFaMaY0NOXjuVrS6myYVVITF6sfK+VaNkbMZFzSy5", - "KEoT0QCgpqbYBAgLzdOA1h5nT5+UYMWwFc1fr5hSPOvbOHs6sPlg2MzAm+TdtxHlv7pTuwNwXWs/kEnI", - "6ky14DV7gWO7JAws1IaKjKosfJ0LkjJl731yQTf68r4PC60qrXyxw/tBA2mmmd8e+EGAtBGQfOPcl1f0", - "TFQA0gO6KAa4FiCCNeJWQKOIkT2ehC4M8XocdJ3kcg75ZT0E6KqWgu8HlRUpwGCL8tB+82j+J9s+DRRs", - "dwffSJh1yBTbz9lrQB0oPL8IbraeNLSmtRP+MCITD4KnfzGvw8Jxc7r0H8vRdGU5wjxNL9z5JAa/1xge", - "gvOxHk9G04Lbs4vgIHcJvqG5dngjrKYPPpYJijpsArqt3hL4zXQd5ExTF7jTNfp0lGJEytjl0e5pE0JL", - "sr8HesDDruXubDWnrYIp7Dj7dA/bnjmbFLJI0iHRgNjTIXMGbQdpE8Ye+gjM1T3rrgIndNXlpFERp9Hu", - "ZN8Gar3tVnb5ZYp0m5LdZ9Do4aBNY7mcAS/Dnt5gh4Ecj8p4MW5nHzUNNhWTIJQolpYKDJoXdLO7IVVP", - "LeHTn46/fvjo90dff0PsCyTjc6bretSthk51xBgXbTvLzcaIdZZn4pvg89IRcd5T5tNtqk1xZw25ra6L", - "TXbaWe1jCY1cAJHjGGkkdKm9gnHqoO/Pa7tiizz4jsVQcP17pmSex/sBVKJbxNQf263A2G8l/oIpzbWx", - "jLDpq+OmjpXVCzDHQVXYFdYZkSJ1ZfsrKuCmJxgntpC+UEvgZ5D16/wbhK2L3PEq9ElsW5fTi9AiBsEZ", - "EL8xZaSQhROl+YzEIILcEhXkXDpDI4R3BtGTFbPFOMoYIbqY5Djpha2Ut3P7ZptPE+f0dhMj4oU/lJcg", - "zT5Len9G+2U4SW1K/2z4RyRF/2Bco1rudfCKqH5wuXbtg0DrpmtHyAMA6MnDbGTQBSlEQYlahVZ5sN97", - "V2db/HhZu0B3JgwAJP6DHeCFiZX1e1WMuwPnE9d6fVkhJVjK+z5KaCx/V66mZ73VRRJskTNSGMM0siXZ", - "FQuDRFz9tMpv7dFKOmmwSkpDrGaa55H0WbSbwJkKCceqBGpF85vnGj9wpc0x4INlb/uTZsIcyhDJiEp9", - "uQpuL+iguYN8ycNNLd5Ayu4/md2j6D3nhnLu4s5tBlYv6GU+97cCZgGTCxgTw4EefkOmrg1DoVjKddsN", - "feGFkyplkCk+c6GXbG125CjuWuev0lyBjGc+ZoS8CtxJEsx2NYT1Ef3ETKXn5EapPEZ9HbKI4C/Go8K2", - "rTuuiyuW7L9cQZCgtNeeBUG6DWmHLg+LXthLp9Ssu87Bt3UDt5GLul7b0Go2gyv/v3v3m5kOKUITr9Jv", - "P4cqOAcp179Xsf5rqH+DOHJjuHljFPNrX0VUrPrZU7W5tR8lz3cGiDRqcH8cj+ZMMM01VJn+3XUVudm7", - "1EOAOfndo4qwXqWQCCImstbG5MFUQXXtAYW13WeRasiQ75aWipsNdJT1BjT+e7RSz49V1QdXNaTyXbm7", - "z8hzVnX1rmtElNrfrj9KmsN9hC41YW8hmU/Ic6z97A7Kd3em/8G++sfj7MFXD/9j+o8HXz9I2eOvv33w", - "gH77mD789quH7NE/vn78gD2cffPt9FH26PGj6eNHj7/5+tv0q8cPp4+/+fY/7lg+ZEFGQH3R9yej/0yO", - "87lMjt+cJGcW2BontOA/M7s3oCvPJHQ8tEhN4SSyJeX56In/6f/1J2ySymU9vP915Dr3jBbGFPrJ0dHF", - "xcUk/ORoDknhiZFlujjy80Afuoa88uakiibHuBfY0dp6DJvqSOEYnr19fnpGjt+cTGqCGT0ZPZg8mDx0", - "TY8FLfjoyegr+AlOzwL2/QgqLx5pV1T9qCiwrPrH8ejI0aH7a8FoDuVV7B9LZhRP/SPFaLZx/9cXdD5n", - "agK5BPjT6tGRlziOPri8+Y928qgrDqtsB6WVffOtopzmPPUVqrhGGzGGe+uwtSgaz0s9JlNsPusjSkUG", - "UT+Yiq7DBswnmcUlfn5S8zPfPxdctaMnv0VqGfk0BN/WNYzjCiK8/s/p61dEKuI0nzc0Pa9SMHzOTZ1n", - "FKbc2C8nnrT/XTK1qUnPMcXxqO7/zkS5tPzF5XIs9bxolvWsBa6YQaiDaz+zpZiA5qsqFzVPA+tfAEnN", - "oS3XfZB8+/7D1//4OBoACJRc0Qy6/P1B8/wPtKCxNYR5toJZxn1hRuO6agJ8UO/kGIxV1dPg8/qdZjXs", - "P4QU7I++bXCARfeB5rl9UQoW24P30IcOiAWO46MHDzwPchJ+AN2RO1Ojgd3+fQF4dCBUo3iSuMRAXV6F", - "j95WhREVLfAsuieYxOlcOPjSxLKkxwdcaLN845WX2x6us+jvaUaUS16FpTz8YpdyIjC80t45eDd+HI++", - "/oL35kRYnkNzAm8GTV67F80v4lzIC+HftHJRuVxStQGpx1S8sN2VhM41+E2BReLZDmpvifno/cfeW+8o", - "jCM8+tAonJNd6U5ER0qjp8+Oa/KO7uOcMBamSLkf7h4XBYRRnlbPj4sCe0ZDqADjcPuxNddG35uQH8Ov", - "G/4PhATdH97SYm+9qoWyb8zccIcHzRijl3YjJf32/v609/dx0w7CMyYMn3GQyWPANE7BVpg6AUlXvUC7", - "GStBgZx9Y4yr4shOtEhc462BY7gW7IfrKjegLgbO9D6mJe5k1Le468Fdn5gUwFtJTHVLu5thzb7OanWT", - "NK6Ma2TcX7jQ95Lmlk6C5bb6mZw8uxUG/1bCYFWPcY7SWVEcQDz0yRC7Xjn64GoMHkJqBPV4kLwYat7B", - "t0E8+90Wx7k3Icftdy7HVlyNxp2SoH3vVgb8HGRALHK5S/pzdPxJ5b4wlWqfzKaGwGJ/H/TxFy7o/Y2R", - "1SvZWUh3y3SXYJ8dec0x62tjq39JOc0h7VZC+1tLaFXl5CvJaGF465HL7A8ktisZ+NoGPG4qSaxZPTvg", - "bFD8AnLc8QiP61B+y2IwRtlFJ+uxVx7BGYt6JW7WuKNadkWsH1mow36/OXm2S7r6gkxBg5vgRm6B+N5c", - "Ny+Neibe3oxnYhhvevzg8c1BEO7CK2nID3CLXzOHvFaWFierfVnYNo50NJXrXVxJtNhSVS7NHtoGj6qq", - "Yo6D5/ZtjPG4C1m0zbZJ9ybke/dqXVnDZYnPpWVUPhuMqjl+ZHmdRQa54/98AuPfmZAfIMfR6DGEqkHy", - "BLzIhXny8NFXj90ril5gJFj7vek3j58cf/ede61QXBgIGUA9p/O6NurJguW5dB+4O6I7rn3w5D//678n", - "k8mdnWxVrr/fvMI+q58Lbx3H6u9VBNC3W1/4JsW0ddf/difqbsTD/71cR28Bub69hT7ZLWSx/5e4faZN", - "MnKKaGXsbHRiOeBthMdkn/to7O4fSNSoLpMJeSVdU6wypwprrkBBV03mJVVUGMayiadUyLLT2AQozTmU", - "B1BEM7ViKtG8KpxcKlYVBikUW0GEfV1ytAHBbkYPcbifLZN/SddBavy0uqaNdEsGs+eSrgl0eTBEMzPG", - "qmRr8t135MG41l7y3A6QVIiJMdclXY9u0OpXEdvQUjvPHHak2h3eC2MPsSDV0k9V7bBWNf7unPuLldyR", - "3N3GHohz7u34qR07oR3BtZ7aakFAwc5AbV5dFkW+qauyWinPi1BxFmdnGGoc+Ix9BDtN01EltI3e20N8", - "awS4EitpE9SebANyVvXRB9DLQ57RObeQc/f3cpcGviMll955JMmMmXTh0n1bqI+wJ+VSDvt505ILvrRQ", - "Phhfu1QDu9itKRx2/s0oJtkPaS4VZGKCA4+pCBG/9r3w7WM+w0Ljvv2ErxAIrilXq7lqt4nKNzbgdSH/", - "Piu4oI32obuhfFpP3hXIAC2H8H/eIng/BHeY43NX0QCPl1vEXyEpwKuSCXkl66Rz1KD+kq7H67zZr3tB", - "r6Rg6GO3ki/S4q07tRI7LONApPhqI6i/1M2eLiuCHPkqPVvlkJ/sSztkkSG3N1T8+RKv8J+itYwat4xd", - "22RnKYV6tCHM2b6IPQbCYieTT6nFfBJ++hmqNp+CY90Mi4FD6vmMEwvEYZkOFPBBYj6qWs73caAX9uVA", - "LsOaRoO5kZFVGBqLVA4iU5ZLMdefJyvaRh1xvESoBOtUYauSzvonf8Oz+9T1EfGt3F21KM1FyoiWSwYq", - "g5XRobcFBks+fvCPm4PQ8KXv2yzC9NZPzF2+fvDVzU1/ytSKp4ycsWUhFVU835BfRNUv5CrcThPq9jy0", - "BkeYAxfgbWpWFUvDEkhXYIKub3rcauzs1nVdRI1ylSwNU1gRr9VQiXeYdMweDAzjhZ36APJcLudfmjjn", - "sT60kPNTmueArl1OJhh4UJRynuN+siU3pm7DEN6u5DlNF9XejmvrXtVmztcPH7cqTsLIrucYpvprZvfZ", - "MBKsJrBWMIUNqg30QlpSCFhelrnhRd78purDCN11ImFISJthYf6TZ3516JyVs3roNv36auNu8Imd2z2C", - "mYXExVHFgHdXtpVWw6ZJA2jsM+XDr4PuQK7HkStmyFWrumQdO1MUjKr6Y6T8u4ViiRtC0RVTmsJhbS3q", - "3q2o/nmI6mtXzvgzEdSjPsqr8vrLX0WNKOoPZs2zj7vl8qAi8J4iOReBSB6yCzxrl5fFd0dRtPtcnzwL", - "E1VkVTPLCwg9oFgU7Zmr9b9GA10gUKRFzpweVgoE1JexdBKryyKRs3EVp2kVUjl7Qt6J+0QvqK+y7P58", - "9PU3PU4cO4+rPtd149QD2cc4zBBfzhftmTqsxFHh98lN7/Z+mzge8WwdKSUvMrYOupc0+/C6+/COJgXd", - "+IyOTjXFIl5RuVJMw2GXzF5TesGLm6/aqw2fxsuWe0tc1c/9RHxfGWSxtKyVGopPUa11PDKKsYwVZrGz", - "iDO8Ve8mc+WcuXaNd7DU7pjwCZtgJdq6IVk2Z+5ioiRndFZ1FpNySB5fwGcsoXmqCLAeLmSIJB2lH5B5", - "gShv3k5a57vhReeR1xaKP6kQZj6VEJa0pLAmWj6dTAYtG8ZB5FWhpJGpzDGMsiwKqUx1uvVkkOWB9Ql6", - "DcNDH+FeSZhb80zvdOmcwVsHsAE0KVt/MS6dM4+mmE8ntqhLlpat5xrC0s5kQTp95C0In5Sv3SqVMX7W", - "cv986d4f00t6B3YGpdSki7I4+gD/gdK6H+ucXWg6oo/MWhxBW8ejD1uja4Gl5lY2UdivpGHS7TSJjMbI", - "voDP694oP0jVbsC9M3q2hbRx+9LHFpUQhhthj9ejTf6tlbCtrrPWhl89GiQyYue8ViUpgkZ7Fe0GHXd8", - "lQlssxkh4dvopc9rQbU/ccZFRmiwjS1bU9UK3+sA//hiF/0pXJQ3H7L19Rd8zl5JQ06WRc6WTBiWXS3w", - "nbQ5nL89tl63+wkG7urvRsd37/zwxvc5PZUssvOC30PvCaoYMT8dVVBWyN7V16Pu3N7kn/dN/rTytoZk", - "eHsvfzn3svKZSLdX8Od/BX/1xa7mGmOYBl7Jl3AON6/hWhPf80LuCAPOhtUyHGzzK4Pq3V6l/kEq31fu", - "9hb/Qp2iuJODA7GGWGh2WWLdlIfIOvusoB9mZ8jziKWh76COq1gvDvUaZcqhO89JpscuqAyNE+4U3wo+", - "n7XgE+z1rdxza3r4wkwPPVKO0/rzfIigsa8AtFrKjHnHqpzNXH3kPumn2fTRkqc2dFkQ/HLSG4d9xpfs", - "1L75Gqc46BVbg90Si1rgWWRplkqR6QFRHG7Uy95D4GjqB+DGPZvVDnhYXOWkyaVJ9m1QfrFDCaSNfA3N", - "On2daIeMjK2IJcDJAcj26AP+C+a0QurIak49AXc25q7bFix8jeM2ACRvQAjFCtr+KzkjD7D+dSkgyb3u", - "yk1FRozaWEHVl/tTjOYkbSS3VnB0T85p78nZqQp0VtezprguIOsTesgIhlZhgZ9v/AA8pcKRfBdBRhJK", - "BJtTw1fMu/wnt8WoLn2buVJQWxjgmNAsw9NYbwJbMbUhupxqK+uIZo7SHd08L3swDLYumOL2iqZ57YBH", - "NeEIK01tiyM6xTeueGm1eBHWt1LNqEV/s7rqV3JGXvJUyeN8LqtYeL3Rhi07Pa/dp7/39CvwhoRuzKoU", - "ORcsWUoR68T8Gp6+hIexr6FaV9/HZ/Zh37et+7YJfwus5jxD7uSr4vczOf1XCnRprVaxQiqr3U43mH8B", - "9L/nUfKHZiPS7knaiDRwarmHwUBhc+bGz0c+HaHRqjn65ofGn64inXtTL0qTyYtgFrABYDjjkGJUIHzv", - "meRR29ya2ZNcX6/V7Tq9TQEeYmerehppwVs/7O/C+zdNwnbOmZBIXE7jiindUuRuM7H/UpnYg/d9L26M", - "Led3cbRSH1Z2eSUzhuPW6bj26MeaoAiZMdcZvyuyVGGR8ZQhf3/V77WSOFJazheGlAUxMpYuUn+Y0BSZ", - "bIKKUHzCoOwwqksw3YKuGKG5YjSzyisTRE7touubFBZJNRR+9jknLvgzKjQFcBVKpkxrliW+6csu0Px7", - "GKputuAJAAeAq1mIlmRG1ZWBPV/thPOcbRJQhjW5+/OvVrW+cXhRaNyOWCw3G0FvO+26C/Ww6bcRXHvy", - "kOwwoRupFlLk5LLImUuSi6BwL5z07l8bos4uXh0tkEXGr5ni/SRXI6AK1Gum96tCWxaJvb+7ID7Fp2d8", - "CZKYoEJ6C2RssJxqk+xiy/alcC3ariDghDFODAP3qKYvqDZvXb50BmUe8TqBeVDGtlP0A2xvUdQtIiP/", - "ig9jY6f2PhS61MSN4HOgWBZbg2DrLXO9YutqLqid4seukqzQFrhr5D4sBeM7ZAWdbwg1gd/fDhdZHFgq", - "qTNldFHZAKJGxDZATv1bAXZDh38PIFzXiEbCgUr+IeVMpcwZFZirKovCcguTlKL6rg9Np/j2sfmlfrdL", - "XFgLA+/tTDIdJsA5yC8QsxpMuQuqiYODLOm5y5Gbu06mXZjtYUygzFKyjfLBuGvfCo/AzkNaFnNFM5Zk", - "LKcRo8sv+Jjg420DwI578kxW0rBkCjVS4pteU7LqNSZVQ0sYT8eERwJPSGqPoFWeawJxX+8YOWMwdow5", - "OTq6Uw0Fc0W3yI8Hy8at7jFg2THsjjt6AJAdRx8CcA8eqqEvjwr4OKnNB+0p/otpN0ElR+w/yYbpviXU", - "4++1gLbhL7zAGjdFi723OHCUbfaysR18pO/IxkyNX6RboB3ldI1Jdk1Ta6AATi6j3B5dUG6SmVQoSCd0", - "ZpjaGTr/T8q949yn70pXdYXACO7edOMAkw/7yTkugiAQd11YEnGVpOwdRslDsuSiNPhElmaM5a8Vo+nC", - "Cu2hDRZHgo7ArkiTYnOqshy6xc6qe1MqLPpkWhc8AB3JR2xq/HbdP0g1qKh+s3Qk5YaUwvA8aCxU6e2f", - "n/Xy1iJxa5G4tUjcWiRuLRK3Folbi8StReLWInFrkbi1SNxaJP6+FolPVSYp8RKHr9gopEjawZS3sZR/", - "qary1VXlDSRgnbig3Lg2+b5KQb/dYg9DkGE0BxzwnPVHd2PQ6dnz4xdEy1KljKQWQi5IkVOrGrC1qZo2", - "T6lm3zz2qYZ4ddIldn6H+9W+8NUjcvrTsa84unCVMZvv3j3GeDWizSZn91xbNCYylER9fzQmLNJdezTq", - "rwTf3Nm1uuY5RMZr8hzefsZWLJcFU1jMkBhVsq7F54zR/KnDzQ6Dzz/t5C7U9g872h/jhtHLoW1JCy/m", - "+7VSTShmXJJnQQ7mHzOaa/ZHXxomjrekRay/cnXxoSkImMn3Mtu0TojdtSPYwObZqOuOckHVJlIlqpsC", - "0SYNIy27coTVtWV9PHh13C7RdslsF4XFpHUsgx8fvY/Ko2Vhqw3rDIWJurMWnYxiOabtWqijCsBBhQEh", - "TQL3hLzF7z5tGUCAyB2xmpl/NlGMzTcrpgHvWiXCsZ4vNZfAIz56euHsjy1hZ2XKCDea+AK7u6+X8Wid", - "2JHmTCSOASVTmW2SBvsaNW6hjGuqNVtOd99EIf+EE1ddPvbJ9nvq01wjz4LFbePJIdGsE8eAe7jzxrDB", - "vLnCFozo2HOA8etm0X1sNASBOP4UMyq1eN++TK+eZnPL+G4ZX3AaWxIBF64geZuJTK6R8amNKkU/z3u+", - "ZmlpgQtP8l2wzoNLjq1Nw8masWk5n1ttoeujgzY6MB6X4hOxQlzuUC64HwXh4FW3/KsmqbeH63KXIG/8", - "rq/MeA+2g4oNODOWBRUb7/JliebLMkccYlPpwzJarBkeKzFd2/76rNpvvMkvsN26q7b5O6KFXFBNcH9Z", - "RkqRuYynTm3rtRhe5wSHPluLmk1vrWmC642szs075Irwu9xMNdekYCoxa4EHqnGYXAcDPLmftJb27bVx", - "c9cGJqqzHgbbrcZfM4QD3R4q4GtwfQQ9l+rEvEYnJtpMJ2w8A4tGf4pL2JwJ3zxoYEln+GZ8SW1ucf5T", - "lheEkjTn4F2VQhtVpuadoOC/CRY26caeeEN1P+976l+JuxAjHj431DtBIcio8upEeeCMRVwYPzDmWawu", - "53OmLR8NCWjG2Dvh3uKClMJqYXJGljxVMsHUWnu+rOwywTeXdENmUNFEkj+ZkmRqb/1g19GWrA3Pcxfs", - "YqchcvZOUENyRrUhL7nlwHY4X06hCjlj5kKq8woL8V49cyaY5jqJG2Z+xKfQDsct3xsAwZiJj+s2Fjfb", - "B8fDzrNeyE+eQYwaVGPOuQ77L7ZhvzHf+JKLJEpkZwtGXLhYm7bIXagB5wjoXtNxZBbsnbC3n5EEOD41", - "lyOHtgeocxbxdLSoprERLUeRX+sg9e8gXIZEmMyt2+UvlEIa0IH3bMLGY3391t7v6WJpXLkMWoP2Xcj4", - "1LVP7HnJKRANI1mrwI1746wB8lb/xZdfVvLwuqRH48G0ye6AXXbVbJAHePMbPiY0l2KOdRWtdilhn7go", - "SgMB4NdpwGMrmidyxZTiGdMDV8qleL6i+evqs4/jEVuzNDGKpixBi8JQrJ3Zb5BOodGg4IbTPAGteihA", - "7AS/OsWPdtzHQbfR5ZJlnBqWb0ihWMoyLETGNan1+QkWaCDpgoo5XN1KlvMFvobjXDDFqsaMVoVuDxEv", - "BLMWCRal68J47Bo1h3V7GU0XkcYxcMFZnd0TVNboSTVwDxolR/uU9PGoV9C2SF3VoXOInCabGSBFNOSB", - "AD/1xIeo0XpL9LdE/6UTfaykIqBu1rJWIL7Cbblms9Z1FxC9QSvZJ6kufFui/69eot9zIE0oUbShg8R7", - "w1FNuCEXUBZpyoi9v0qwzruGe05fh0y74Ki7SpvatedLF5QLV1OnymsAOKxKvFxyY3x72msxbCIzA4um", - "RQdLS8XNBrQWWvDfz5n9/3sr9mumVl6hKVU+ejJaGFM8OTrKZUrzhdTmaPRxHD7TrYfvK/g/eF2kUHxl", - "9auPALZUfM6FvXMv6HzOVG1CHD2aPBh9/L8BAAD//3ZeiiwHvQEA", + "H4sIAAAAAAAC/+y9/XfbtrIo+q9g6d618nFFOUnTnt281XWfm6Stb/O1Yrf7nNPktRA5krBNAdwAKEvN", + "y/9+FwYACZKgRNmyk7T+KbFIAoPBYDDf82GUimUhOHCtRk8+jAoq6RI0SPyLpqkouU5YZv7KQKWSFZoJ", + "PnrinxGlJePz0XjEzK8F1YvReMTpEup3zPfjkYR/l0xCNnqiZQnjkUoXsKRmYL0pzNvVSOtkLhI3xLEd", + "4uTZ6OOWBzTLJCjVhfI1zzeE8TQvMyBaUq5oah4pcsH0gugFU8R9TBgnggMRM6IXjZfJjEGeqYlf5L9L", + "kJtglW7y/iV9rEFMpMihC+dTsZwyDh4qqICqNoRoQTKY4UsLqomZwcDqX9SCKKAyXZCZkDtAtUCE8AIv", + "l6Mnv40U8Awk7lYKbIX/nUmAPyHRVM5Bj96PY4ubaZCJZsvI0k4c9iWoMteK4Lu4xjlbASfmqwl5WSpN", + "pkAoJ29/eEq++uqrb81CllRryByR9a6qnj1ck/189GSUUQ3+cZfWaD4XkvIsqd5/+8NTnP/ULXDoW1Qp", + "iB+WY/OEnDzrW4D/MEJCjGuY4z40qN98ETkU9c9TmAkJA/fEvnzQTQnn/6S7klKdLgrBuI7sC8GnxD6O", + "8rDg8208rAKg8X5hMCXNoL89SL59/+Hh+OGDj//jt+Pkv92fX3/1ceDyn1bj7sBA9MW0lBJ4uknmEiie", + "lgXlXXy8dfSgFqLMM7KgK9x8ukRW774l5lvLOlc0Lw2dsFSK43wuFKGOjDKY0TLXxE9MSp4bNmVGc9RO", + "mCKFFCuWQTY23PdiwdIFSamyQ+B75ILluaHBUkHWR2vx1W05TB9DlBi4LoUPXNDni4x6XTswAWvkBkma", + "CwWJFjuuJ3/jUJ6R8EKp7yq132VFzhZAcHLzwF62iDtuaDrPN0TjvmaEKkKJv5rGhM3IRpTkAjcnZ+f4", + "vVuNwdqSGKTh5jTuUXN4+9DXQUYEeVMhcqAckefPXRdlfMbmpQRFLhagF+7Ok6AKwRUQMf0XpNps+/85", + "ff2KCEleglJ0Dm9oek6ApyKDbEJOZoQLHZCGoyXEofmybx0Ortgl/y8lDE0s1byg6Xn8Rs/ZkkVW9ZKu", + "2bJcEl4upyDNlvorRAsiQZeS9wFkR9xBiku67k56Jkue4v7X0zZkOUNtTBU53SDClnT93YOxA0cRmuek", + "AJ4xPid6zXvlODP3bvASKUqeDRBztNnT4GJVBaRsxiAj1ShbIHHT7IKH8f3gqYWvABw/SC841Sw7wOGw", + "jtCMOd3mCSnoHAKSmZBfHHPDp1qcA68InUw3+KiQsGKiVNVHPTDi1NslcC40JIWEGYvQ2KlDh2Ew9h3H", + "gZdOBkoF15RxyAxzRqCFBsusemEKJtyu73Rv8SlV8M3jvju+fjpw92eivetbd3zQbuNLiT2SkavTPHUH", + "Ni5ZNb4foB+Gcys2T+zPnY1k8zNz28xYjjfRv8z+eTSUCplAAxH+blJszqkuJTx5x++bv0hCTjXlGZWZ", + "+WVpf3pZ5pqdsrn5Kbc/vRBzlp6yeQ8yK1ijChd+trT/mPHi7Fivo3rFCyHOyyJcUNpQXKcbcvKsb5Pt", + "mPsS5nGl7YaKx9naKyP7fqHX1Ub2ANmLu4KaF89hI8FAS9MZ/rOeIT3RmfzT/FMUuflaF7MYag0duysZ", + "zQfOrHBcFDlLqUHiW/fYPDVMAKwiQes3jvBCffIhALGQogCpmR2UFkWSi5TmidJU40j/U8Js9GT0P45q", + "+8uR/VwdBZO/MF+d4kdGZLViUEKLYo8x3hjRR21hFoZB4yNkE5btodDEuN1EQ0rMsOAcVpTrSa2yNPhB", + "dYB/czPV+LbSjsV3SwXrRTixL05BWQnYvnhHkQD1BNFKEK0okM5zMa1+uHtcFDUG8flxUVh8oPQIDAUz", + "WDOl1T1cPq1PUjjPybMJ+TEcG0VxwfONuRysqGHuhpm7tdwtVtmW3BrqEe8ogtsp5MRsjUeDEfMPQXGo", + "VixEbqSenbRiXv7JvRuSmfl90MdfBomFuO0nLlS0HOasjoO/BMrN3RbldAnHmXsm5Lj97eXIxoyyhWDU", + "SY3FQxMP/sI0LNVOSgggCqjJbQ+Vkm5GTkhMUNjrkskvCiyFFHTOOEI7NuoTJ0t6bvdDIN4NIYCq9CJL", + "S1aCrEyoTuZ0qJ907CxfALXGNtZLokZSzZnSqFfjy2QBOQrOlHuCDknlUpQxYMO3LKKC+ULSwtKye2LF", + "LsZRn7cvWVivePEOvBOjMAfsPthohOrSbHkn64xCglyjBcP3uUjPf6JqcYATPvVjdWkfpyELoBlIsqBq", + "ETk4LdquRxtC3+ZFpFkyDaaaVEt8IebqAEvMxT6sqyie0jw3U3dZVmu1OPCgg5znxLxMYMnQYO4UR2th", + "t/oXeU7ThRELSErzfFybikSR5LCC3CjtjHOQY6IXVNeHH0f2eg2eIwWG2WkgwWqcmQlNbLKyRUggS4o3", + "0NJoM0Xe/KbioIouoSUF4Y0oSrQiBIrGyTO/OlgBR55UDY3gV2tEa004+MTM7R7hzFzYxVkLoPbuuwp/", + "Fb9oAG3eru9TXk8hZGZt1tr8xiRJhbRD2BveTW7+A1TWH1vqvFtISNwQkq5AKpqb1bUWda8i30Odzh0n", + "M6OaBifTUWFcAbOcA79D8Q5kxErzGv9Dc2IeGynGUFJNPQyFERG4UzN7MRtU2ZnMC2hvFWRpTZmkoOn5", + "XlA+rSePs5lBJ++5tZ66LXSLqHbobM0ydahtwsH69qp5QqztyrOjjiyylekEcw1BwJkoiGUfLRAsp8DR", + "LELE+uDX2vdiHYPpe7HuXGliDQfZCTPOYGb/vVg/c5AJuRvzOPYQpJsFcroEhbcbDxmnmaX2yx1Phbyc", + "NNG6YDipvY2EmlEDYWrcQhK+WhaJO5sRj4V9oTVQHeCxXQhoDx/DWAMLp5peAxaUGfUQWGgOdGgsiGXB", + "cjgA6S+iQtyUKvjqETn96fjrh49+f/T1N4YkCynmki7JdKNBkbvOLEeU3uRwL6odoXQRH/2bx95H1Rw3", + "No4SpUxhSYvuUNb3ZbVf+xox73Wx1kQzrroCcBBHBHO1WbQT69Y1oD2DaTk/Ba2NpvtGitnBuWFnhhh0", + "+NKbQhrBQjX9hE5aOsrMK0ew1pIeFfgm8MzGGZh1MGV0wOX0IETVt/FZPUtGHEYz2Hko9t2meppNuFVy", + "I8tDmDdASiGjV3AhhRapyBMj5zERMVC8cW8Q94bfrqL9u4WWXFBFzNzovSx51mOH0Gs+/P6yQ5+teY2b", + "rTeYXW9kdW7eIfvSRH6thRQgE73mBKmzYR6ZSbEklGT4IcoaP4K28hdbwqmmy+L1bHYYa6fAgSJ2HLYE", + "ZWYi9g0j/ShIBbfBfDtMNm7UIehpI8Z7mXQ/AA4jpxueoqvsEMe235q1ZBz99mrD08C0ZWDMIZs3yPLq", + "Jqw+dNip7qgIOAYdL/Ax2uqfQa7pD0Ke1eLrj1KUxcHZc3vOocuhbjHOG5CZb70ZmPF53gwgnRvYJ7E1", + "fpIFPa2MCHYNCD1S5As2X+hAX3wjxTXcidFZYoDiA2ssys03XZPRK5EZZqJLdQBRsh6s5nCGbkO+Rqei", + "1IQSLjLAzS9VXMjsCTnEWCcM0dKh3Ir2CabIFAx1pbQ0qy0LggFInfui/jChqT2hCaJG9YRfVHEz9i07", + "nQ1nyyXQbEOmAJyIqYtxcNEXuEiK0VPai2lOxI3wiwZchRQpKAVZ4kzRO0Hz79mrQ2/BEwKOAFezECXI", + "jMorA3u+2gnnOWwSjPVT5O7Pv6p7nwBeLTTNdyAW34mht21P60I9bPptBNeePCQ7a6mzVGvEW8MgctDQ", + "h8K9cNK7f22IOrt4dbSsQGJIybVSvJ/kagRUgXrN9H5VaMuiJ4LdqelGwjMbxikXXrCKDZZTpZNdbNm8", + "1LAlmBUEnDDGiXHgHsHrBVXahkExnqFN014nOI8VwswU/QD3qiFm5F+9BtIdOzX3IFelqtQRVRaFkBqy", + "2BrQI9s71ytYV3OJWTB2pfNoQUoFu0buw1IwvkOW04DxD6or/6vz6HYXhz51c89voqhsAFEjYhsgp/6t", + "ALthFG8PIEzViLaEw1SLcqrQ4fFIaVEUhlvopOTVd31oOrVvH+tf6ne7xGWdHPbezgQodKC49x3kFxaz", + "Nn57QRVxcHgXO5pzbLxWF2ZzGBPFeArJNspHFc+8FR6BnYe0LOaSZpBkkNNNJDjAPib28bYBcMdrdVdo", + "SGwgbnzTa0r2cY9bhhY4nooJjwSfkNQcQaMK1ATivt4xcgY4dow5OTq6Uw2Fc0W3yI+Hy7ZbHRkRb8OV", + "0GbHHT0gyI6jDwG4Bw/V0JdHBX6c1Lpne4r/AuUmqOSI/SfZgOpbQj3+XgvosQW7HKfgvLTYe4sDR9lm", + "LxvbwUf6jmyPYfoNlZqlrEBd52fYHFz1a08QdZyTDDRlOWQkeGDVwCL8ntgQ0vaYl1MFB9neuuB3jG+R", + "5fgwnSbw57BBnfuNzU0ITB2H0GUjo5r7iXKCgPqIZyOCh6/AmqY63xhBTS9gQy5AAlHl1IYwdP0pWhRJ", + "OEDUP7NlRuedjfpGt7qLT3GoYHmxWDOrE2yH76ylGDTQ4XSBQoh8gIWsg4woBINiR0ghzK4zl/7kE2A8", + "JTWAdEwbXfPV9X9HNdCMKyD/JUqSUo4qV6mhkmmEREEBBUgzgxHBqjldcGKNIchhCVaTxCf377cXfv++", + "23OmyAwufM6gebGNjvv30Y7zRijdOFwHsIea43YSuT7QcWUuPqeFtHnK7ognN/KQnXzTGrzydpkzpZQj", + "XLP8KzOA1slcD1l7SCPDor1w3EG+nGZ8UGfduO+nbFnmVB/CawUrmidiBVKyDHZycjcxE/z5iuavq88w", + "HxJSQ6MpJClm8Q0cC87MNzbxz4zDODMH2Ab9DwUITuxXp/ajHSpmHanKlkvIGNWQb0ghIQWb72YkR1Ut", + "dUJsJHy6oHyOCoMU5dwFt9pxkOGXyppmZMk7Q0SFKr3mCRq5YxeAC1PzKY9GnAJqVLq2hdwqMBe0ms9l", + "uQ65mYM9aHsMok6y8ahX4zVIXdUar0VOM29zwGXQkPcC/NQTD3SlIOqM7NPFV7gt5jCZzb0ek309dAzK", + "7sRBxG/9sC/o16jb+eYAQo8diEgoJCi8okIzlbJPxSzM0fahghulYdm15NtPf+85fm979UXBc8YhWQoO", + "m2hZEsbhJT6MHie8Jns+RoGl79u2DtKAvwVWc54h1HhV/OJut09o22OlfhDyUC5RO+Bg8X6AB3Knu91N", + "eVk/Kc3ziGvRZXC2GYAaV8G6TBKqlEgZymwnmRq7qGDrjXTpnk30v6nyUg5w9trjtnxoYXEAtBFDXhBK", + "0pyhBVlwpWWZ6necoo0qWGokiMsr4/1Wy6f+lbiZNGLFdEO94xQD+CrLVTRgYwYRM80PAN54qcr5HJRu", + "6TozgHfcvcU4KTnTONfSHJfEnpcCJEZSTeybS7ohM0MTWpA/QQoyLXVT+scEZaVZnjuHnpmGiNk7TjXJ", + "gSpNXjJ+tsbhvNPfH1kO+kLI8woL8dt9DhwUU0k82OxH+xTj+t3yFy7GH8Pd7WMfdFpXTBiZZTaKpPx/", + "d//3k9+Ok/+myZ8Pkm//19H7D48/3rvf+fHRx++++/+bP3318bt7//t/xnbKwx5Ln3WQnzxzmvHJM1R/", + "glD9Nuw3Zv9fMp5EiSyM5mjRFrmLpSIcAd1rGsf0At5xveaGkFY0Z5nhLZchh/YN0zmL9nS0qKaxES1j", + "mF/rnkrFFbgMiTCZFmu8tBTVjc+MJ6qjU9LlnuN5mZXcbqWXvm0epo8vE7NxVYzA1il7QjBTfUF9kKf7", + "89HX34zGdYZ59Xw0Hrmn7yOUzLJ1rI5ABuuYrhgmSdxRpKAbBTrOPRD2aCidje0Ih13CcgpSLVhx85xC", + "aTaNczifsuRsTmt+wm2Avzk/6OLcOM+JmN083FoCZFDoRax+UUNQw7fq3QRohZ0UUqyAjwmbwKRt88mM", + "vuiC+nKgMx+YKoUYog1V58ASmqeKAOvhQgYZVmL000pvcJe/Org65AaOwdWeMxbRe+fH52fkyDFMdceW", + "tLBDB0UIIqq0S55sBCQZbhbmlL3j7/gzmKH1QfAn73hGNT2aUsVSdVQqkN/TnPIUJnNBnvh8zGdU03e8", + "I2n1FlYMkqZJUU5zlpLzUCGpydMWy+qO8O7dbzSfi3fv3ndiM7rqg5sqyl/sBIkRhEWpE1fqJ5FwQWXM", + "96WqUi84sq3ltW1WK2SL0hpIfSkhN36c59GiUO2SD93lF0Vulh+QoXIFDcyWEaVFlY9mBBSX0mv295Vw", + "F4OkF96uUipQ5I8lLX5jXL8nybvywYOvMLOvroHwh7vyDU1uChhsXektSdE2quDCrVqJsepJQecxF9u7", + "d79poAXuPsrLS7Rx5DnBzxpZhz7BAIeqF1ClOPdugIVj7+RgXNyp/cqXdYwvAR/hFjYTsK+0X0H+/KW3", + "a0cOPi31IjFnO7oqZUjc70xV7W1uhCwfjaHYHLVVVxhvCiRdQHruKpbBstCbceNzH/DjBE3POpiytexs", + "hiFWU0IHxRRIWWTUieKUb9plbZTNqMBB38I5bM5EXYxpnzo2zbIqqu+gIqUG0qUh1vDYujHam++iynyi", + "qatOgsmbniyeVHThv+k/yFbkPcAhjhFFo+xHHyKojCDCEn8PCi6xUDPelUg/tjzGU+CarSCBnM3ZNFaG", + "959df5iH1VClqzzoopCrARVhM2JU+am9WJ16Lymfg7mezZUqFM1tVdVo0AbqQwugUk+B6q12fh4WpPDQ", + "oUp5gZnXaOEbmyXA2uw302ix43BhtAo0FNl3XPTypD/+zAIO2SXh8Z/XmsKkV9d1qItUHPS3coXdSq11", + "oXkhnSFc9vkSsGSpuDD7YqAQrtqmLeoS3C+lonPo0V1C793AehgNjx8OsksiicogYtYWNTqSQBRk+3Ji", + "1hw9w2CemEOMamYrINPPZB3EzmeERbQdwqY5CrBV5KrdeyobXlRbFbgPtDhrAclrUdCD0cRIeBwXVPnj", + "iPVSPZcdJJ1dY9mXbaXpToJYwqAoalV4zt+GbQ7a0ftdgTpflc6XoguV/gFl5YzuhekLse0QHEXTDHKY", + "24Xblz2h1AWT6g0ycLyezZC3JLGwxMBAHQgAbg4wmst9QqxvhAweIUbGAdgY+IADk1ciPJt8vg+Q3BV8", + "on5svCKCvyGe2GcD9Y0wKgpzubIef2PqOYArRVFLFq2IahyGMD4mhs2taG7YnNPF60E6FdJQoWjVQ3Oh", + "N/f6FI0tril75e+1JiskXGY1oTTrgY6L2lsgnop1YjOUo7rIdD019B7NXcB86djBtLXo7igyFWsM58Kr", + "xcbK74ClHw4PRmB7WTOF9Irf9clZFpht026Xc2NUqJBknKG1Ipc+QW/I1D2yZR+53A3Ky10KgJYZqu7V", + "4MwSO80HTfGke5nXt9q4Lpvq08Jix7/vCEV3qQd/XftYsyDcT3Xhv/7iYv5E3UglvK5l6SoVCu3Hha06", + "uE+BwjY5NIDYgtU3bTkwitZmrFcTrwHWYqzEMN+uU7KLNgU5oBKcNETT5DwWKWB0ecB7/NR/FhjrcPco", + "39wLAgglzJnSUDuNfFzQpzDHUyyfLMSsf3W6kDOzvrdCVJe/dZvjh41l3vgKMAJ/xqTSCXrcokswL/2g", + "0Ij0g3k1LoE2QxRtswGWxTkuTnsOmyRjeRmnVzfvz8/MtK+qi0aVU7zFGLcBWlNsjhENXN4ytY1t37rg", + "F3bBL+jB1jvsNJhXzcTSkEtzji/kXLQY2DZ2ECHAGHF0d60XpVsYZJBw3uWOgTQaxLRMtnkbOocp82Pv", + "jFLzae99N78dKbqWoAxgPENQzOeQ+fJm3h/GgyJyueDzoItTUWyrmTchtnQdVp7bUrTOheFDXxB+IO4n", + "jGewjkMfagUIeZ1ZhwX3cJI5cFuuJG4WiqImDPHHNwJb3Q37QtsJANEg6LOWM7uOTra7VG0nbkAONHM6", + "iQK/vu3HsrshDnXjvvDpRuXT7UcIB0SaYjpobNItQ9DDgGlRsGzdcjzZUXuNYHQv63KPtIWsxQ22AwPN", + "IOgowTVKabtQa2dgP0Kd98hoZTb22gUWG/qmqUvAz0qJHoxGZHO3bnulqw1c+8+/nmoh6RycFyqxIF1p", + "CFzOPmgIqqIropkNJ8nYbAah90VdxnPQAK5jY88GkG6EyOIumpJx/c3jGBntoJ4axt0oi1NMhBb6fPJn", + "XS+Xl+kDU1J1JQRbcwlXVTRd/2fYJL/SvDRKBpOqDs91bqfm5bvHrq+WP8MGR94Z9WoA27EraHl6C0iD", + "MUt/9UgFBazvqEaJf1QvG1u4x04dx3fpQFvjmjL0E399yzSaFjSXcpWDUQdJGFiG7MZpPDbBnB5oIr5N", + "yrs2gWW7ZZBA3g+nYsq3sOxeRVUtil20ewY098SLyxl9HI+uFgkQu83ciDtw/aa6QKN4xkhT6xluBPbs", + "iXJaFFKsaJ64eIm+y1+Klbv88XUfXnHDmkycss+eH79448D/OB6lOVCZVJaA3lXhe8UXsyrbxmH7VWKr", + "fTtDp7UUBZtfVWQOYywusLJ3y9jUaYpSx88ER9HFXMziAe87eZ8L9bFL3BLyA0UV8VP7PG3ATzPIh64o", + "y72z0UPbE5yOixvWWSfKFcIBrhwsFMR8JQdlN53THT8dNXXt4Ek412ssTRnXOLgrXImsyAX/0INLTz8I", + "2WD+LjMxGjx0fWKVEbItHntitX3/yrYwNSFW8Ppj/oc5jffvh0ft/v0x+SN3DwIA8fep+x31i/v3o97D", + "qBnLMAm0UnG6hHtVlkXvRtysAs7hYtgFfbxaVpKl6CfDikJtFJBH94XD3oVkDp+Z+yWDHMxPkyFKerjp", + "Ft0hMENO0GlfJmIVZLq0LTMVEbwdU41JsIa0kNm7lgzWGds9QrxcogMzUTlL46EdfKoMe+U2mNK8TPDl", + "HmutGbFkPbG5vGTBWOa1ITVTW0AGc0SRqaJlW2vcTYU73iVn/y6BsMxoNTMGEu+11lXnlQMctSOQxu1i", + "bmDrp6qHv4odZIu/yduCthlBtvrvnlU+Jb/QWNOfPSPAwxk7jHtL9LajD0fNNptt0QzBHKbHDGmd7hmd", + "c9b1zBFthc5UMpPiT4g7QtB/FCmE4R2fDM28fwKPRe61WUrlVK47utez79ru4bpx38ZfWRf2i666jl3m", + "Mo2f6v028jJKr4qXa3ZI7lPCwgiDZmpAD2vB4xUEw2IbFB99RLk9T7YKRCPDLH4qw1zOIzt+fSodzJ38", + "15xeTGmsR4zRhQxMwfY24qS0IP5jvwGqqnFgZydBBHf1LrOV5AqQtQ+iW5X2knqNnXawRlMrMEhRoeoy", + "tmEKuRKRYUp+QbntIm6+s/zKfa3AuuDNVxdCYh1IFQ/pyiBly6g59t2737K0G76TsTmzDbJLBUEHZjcQ", + "scUmkYpcF+uqcodDzcmMPBgHbeDdbmRsxRSb5oBvPLRvTKnC67Jyh1efmOUB1wuFrz8a8Pqi5JmETC+U", + "RawSpNI9UcirAhOnoC8AOHmA7z38ltzFkEzFVnDPYNEJQaMnD7/FgBr7x4PYLesanG9j2RnybB+sHadj", + "jEm1Yxgm6UaNR1/PJMCf0H87bDlN9tMhZwnfdBfK7rO0pJzOIZ6fsdwBk/0WdxPd+S28cOsNAKWl2BCm", + "4/ODpoY/9eR8G/ZnwSCpWC6ZXrrAPSWWhp7q9sp2Uj+c7fXv+kV5uPxDjH8tfPhfy9Z1w2oMXfbkbGGU", + "8iv00YZoHRNqi3/mrI5M9/06yYmvLYwNtKq+WRY3Zi6zdJQlMVB9RgrJuEb7R6lnyT+MWixpatjfpA/c", + "ZPrN40gjqmavFr4f4DeOdwkK5CqOetlD9l5mcd+Su1zwZGk4SnavrrEQnMreQN14SGZfXOj2oYdKvmaU", + "pJfcyga50YBTX4nw+JYBr0iK1Xr2ose9V3bjlFnKOHnQ0uzQL29fOCljKWSsYUB93J3EIUFLBivMmItv", + "khnzinsh80G7cBXoP238kxc5A7HMn+WoIhB4NLclyxsp/teXdeVzdKzaTMSWDVDIiLXT2e1uONpwP6tb", + "239rA8bwWQ/mBqMNR+lipSf63obXV998inihNkh2zxsGx4d/EGl0cJTj799HoO/fHzsx+I9HzceWvd+/", + "Hy9AHDW5mV9rLFxFI8ZvY3v4vYgYwHzXwiqgyNVHiBgg+y4p88AwwakbakyaHeJuXoo4TH5XPNo0fgre", + "vfsNn3g84B9tRHxiZokbWGcp9B/2ZofMKMlk1fMgzp2S78V6KOG07iBPPJ8BinpQMtA8hyvpdACNuut3", + "xosENGpGnUIujJIZNgUK7flfDp7N4sdbsF2yPPu1ru3Wukgk5ekiGiU8NR/+bmX0xhVsWWW0z8iCcg55", + "dDir2/7udeCIlv4vMXSeJeMD3213oLXLbS2uBrwJpgfKT2jQy3RuJgix2iybVZVlyOciIzhP3dSiZo7d", + "Vs6xFpqR/GYcdllqF7eKueCu4NCM5RiGGfcb45uJpLqngBb2O/f9hcw42H5cWTODHR0koWyJF7OiyyIH", + "PJkrkHSOnwoOrc+xhBqOHHSsIKowj/BNLFghiC4lJ2I2C5YBXDMJ+WZMCqqUHeSBWRasce7Rk4cPHkTN", + "XoidASu1WPTLfF0v5eERvmKfuCZLthXAXsDuhvVjTVH7bGyXcFxPyX+XoHSMp+IDm7mKXlJza9t+klXv", + "0wn5ESsfGSJulLpHc6UvItwsqFkWuaDZGIsbnz0/fkHsrPYb20Le9rOco7WuSf5R98rwAqO+slNP5Zzh", + "42wv5WFWrXRStZ+M1SY0b9QNMlkr5gbteCF2JuSZNaFWDfztJARLZMslZEG3S6vEI3GY/2hN0wXaJhsS", + "UD+vHN6I1bOz2nMTZB9W3Y+QYRu4XS9W24p1TIRegLxgCjAjH1bQLIdY1QZ1tnFfHrG5PFlybillsocw", + "WvU62hftHjgryfqggihkLcTvaZmy/Zj37Ut7il/FczFaTW5bXn9fXM+X2CYvnXMhpVxwlmIrhJgkjaXb", + "hrkpB3SNiPsX1cid0MjhirbWrXKBHRZ7m+16RugQ13X5B0/NplrqsH9qWLuWa3PQynE2yMa+07VziDGu", + "wHWzMkQU8kkhI0FN0USIKoBiTzLCqkw9Fs4fzLNXzv6NRTHOGUdLl0Ob08+syypXDD3TnDBN5gKUW08z", + "m0f9Zr6ZYJXGDNbvJy/EnKWnbI5j2DA6s2wbM9od6thHkLqITfPuU/Ouq51f/dwIB7OTHheFm7S/D3pU", + "kNRr3ovgWNySDyQJkFuNH462hdy2hn7jfWoIDVYYtQYF3sMdwqh6aTdHeW50S0tR+AaxGZXRArqMR8B4", + "wbh3ocYviDR6JeDG4Hnt+U6lkmqrOwziaWdA854ECMxQtj74qw7V7hxgUIJr9HP0b2PdBryHcVQv1BI/", + "5RviD4Wh7kCYeErzKnQ60tQbpSonRGWYXNRq8x1jHIZxJz5lsoGunel71efYjWPfm6ivRuG0zOagE5pl", + "sdJW3+NTgk99khisIS2rJlRVdmCzRnmX2txEqeCqXG6Zy79wxemCvvkRagh79/sdxko70w3+G+vA1L8z", + "Lmh676xcHyGd7VeYv5tlHJN6DU0nis2T4ZjAO+Xq6Kinvhyh198flNJ9uu5nkY3b4nLhHsX423NzcYSF", + "ezvx6fZqqerqYiy4wOe+4FFVEbLJlfAq6/QZw6gH3LzIlrWA9y9GAV/RvCcTPvSV2PvV+g/68uHT3vIN", + "VLvyXJqSrSyot+SRjRVueV+6LsS++GAbHnw4r4Vb61aE9vvufm546myMWM0sej10l3Oi1Ru8rxft51Vf", + "iQTfpwOfh/1AXBTP2JWBhxUTpY++8jHQXiW0v7oSPI2+Hz3rj2YWfGqvRa+P5cz1r7XLdDr5z79aLywB", + "ruXmM/C4dDa93VQmIu1a81T9CqlaHw5qhdi4FYf0sIm1S3GyobeVWdbSoKVO+5kOWT0bIg508PFxPDrJ", + "9rowYy13RnaU2LF7weYLjRX7fwKagXyzoyNB3YUAj1ghFKs7kOZmMFcCdoHDTYYmGxgCZmFHhe5YPgh1", + "BanGtrN1cJ0E2Ke/gpnMO31uOxP0q9NVToZrSLCtC0G31+yOO75TOCko/mX7dE6G19w/rkKobQbYBVV1", + "uZZWzvTgzM3ZDFKsiry1UNU/F8CDIkhjb5dBWGZB3SpW5TFhXe/9rY41QNvqSG2FJ+ivc2Vw+vLYz2Fz", + "R5EGNUQbh1ZJfJcpHIwYsC4wX0O6z5DsosaYqigDseBDgl0p5ro5Rm/N56Ds2iXn8iRpLo66FNuWKeNN", + "zwfNZT7dq+wjpuT01bLq9kzu1z+eYYtq5QLkaFV4ONTSyUm3cc6FK1yMZcUq34kvYQzK/+ZrCNpZcnbu", + "+gcgVqyn6oLKzL9xkKJQ9m5icaBn1cysTuDoBjlEWjFgLlSaCyNGJH0JZc2ciSrg8I6ykaF1AR+EawZS", + "Qla5RHKhINHCJ3xsg2MbKmz466WQoHrbH1ngektfv61re2MbOIqlrqmLeg0XSCQsqYFOBhW4++fchuyn", + "9rlPwvdtwHZamCp63d2P1qfuMNVBYkj1M+Juy93J/ZcxNjHOQSbe89Qux82bFdmw7mZWpvaCDg9GZZAb", + "XDtnCyuJ2mnS7ipbOkKQJH8OmyOrBPlGvn4HQ6Ct5GRBDwqOtjb5oOY3FYN7fhDwPm0duUKIPOlxdpx0", + "a4i3Kf6cpeeANQCrEPeeHu3kLtrYK2/2xWLja2YXBXDI7k0IOeY2qcg7tpvtBVuT8zt62/xrnDUrbVl/", + "Z1SbvOPx7AwsuC+vyM38MNt5mALD6q44lR1kR4XqNe8LubnA4vzNLp6ToVp519Xc7iJfE5WFIiaTnFqP", + "1VM86DHDEZZACGp1oCOTEufpIioXsVjey5RpMEPFMRVOhgBp4EOqBVRQuMGjCIj2RY+cQlv6zhW9EzMi", + "oXYiX7b6X7eFe0yjb89czdLkdzMhodGM3XxtK31WiS9YRhP/M2VaUrm5TI2+Tgv5jvWkF8s7w7GqSKx6", + "IXU0VheHeS4uEmRWSdXnIqbamvdU8zL2Tdfq78ypnkIQ10WVE9Q2ZEEzkgopIQ2/iOd7WqiWQkKSCwzz", + "inmgZ9rI3UtM8uIkF3MiilRkYPvFxCmob66Sc4piEwRRNVEUWNrBbGH7TUDHA6c0d6r1IyUoas336J2f", + "gs1cr6s62UUn1pfZE7EMylVxchiyL3fh3dL7P86bZ2yNdAMyduRnRMsSxsS90e6R7Q4+lUCWTCkLSkVL", + "FyzPMXGcrQPPaxW4EEdtj9h7gmGVK4axN80iAlYaLsydV1VWCHnAaVj2iOiFFOV8ERSYruD0Kq8snUIc", + "jvKLKjE8CjPIzBSPyVIo7TRNO1K95Drk7G4quJYiz5tGKSuiz52l/SVdH6epfiHE+ZSm5/dQr+VCVyvN", + "xj6/uh0cWM8kW6XFmhdwYtuZ7y7Va9/DUDlHtIMZZIvF7d3YPQDz/W4OutvmftxdWHtdTWYaV2OOOaFa", + "LFkaP1NfVrRdb4xcjEVFa5bZ3oq2ygS+hoc9vKyq4ApkkV00A6fR5nDHxDEC52RGdmP+ixJ4e1wyA8do", + "ei7KLnNxUlSS9sp6LQAQUpv6rEtpGzKGkljFVcTclkpAF3kb0IG3CkYiXQ02M8LBgdJwJaA60Y8VgHet", + "8WFsa8vZSMqpWPvn9+ric5cC/uN2Km8wj74Qr9OatKQN8vKFano4QrzE9dZ4qDNMe58OjYqqmucOvOED", + "APrjpBowDIqW2heMGWU5ZEms9+JJZaMaB5q2S81qt0RnynHylJa+9aEZu5TgCqdYEV82/V8FNaQkqte7", + "lmSewRpsXsefIIXtaTgO/C+Q25aHLWOAKJIcVtAIH3PVXEoUNdkK/Leq+phkAAV6I9s2slhcVHiXtwwn", + "bu1JEFkzBLtRS4pFrN0pssNMEjXqrHlij4kaepQMRCuWlbSBP7WvyNE0A5qjHEFVR0dIvB45dJpf7Ahv", + "/QDH/vuYKOMx8X4YH9qbBcVRt40B7YyTLFXfqefxMMmwVFHlYMHZssoRa0m85huqoBe83yDZJfla3Rq4", + "T0zwALHP15CiVOP0HcicxtPjpHBVT5DaOUBmtQLzScTavgBOuAhaTF5QVakqdQ1F/4OdGF9i3GnTl3Aq", + "19GMV99ZgoMR1Sqm1qtIyIpOL2+e/yQncetB7B0vRiMKXPrfFvuXp26nduAL2Mqbm/00sj82aXS3mOPi", + "YzIt/UB5Li5sz8hQD30G3g9qqc+7gJxYzqpr2Udtjl15z7apgwXx6ku6IULiP0br/HdJczbbIJ+x4PvP", + "iFpQQ0LO8WojAlwUqJl4u3g19oB5a4vwU9l1s6FjBsNtzCgB0OYi9819BFnScwi3AYMdLP9MtWGcqpyi", + "5cJc2a3t7GLBLd6XaFnSLNT0sVBks426Lx1svv5/6ly4cCpf363Iaeo7hLoWRU0+g12APXHpBSy3J0t2", + "+ZongaqzcE200mfXZ5cwme7JumIZCH3tVxpgdzqudjrPXGkZAy2/rR4bW9JMBy3l0LswNOqmA3TYp3EX", + "+GHbypvBf7SGa98yhoD/ueC9p1FtCK/tSXsDWG5U4IjAaq3VU7FOJMzUrgATa6426rysa3d4EyvjqQSq", + "bMTNyWuneNYlShk3irCNCa18mtUoGcwYr5kl40WpI3oMVirlmwBhodEf0drjQuuTEowwuaL56xVIybK+", + "jTOnw7Z0DFtEeEeH+zZiwqju1O4ATNU6HOZn1mb08DVzgdsmVDZcU2nKMyqz8HXGSQrS3Pvkgm7U5T1K", + "lXNgl0+JBtJMs2pA4F1C0raA5BvnFL6iv6cCkB7Q8TPAYYNxwRFnjTXtaNHjn+nC8EU4bJZ0neRijlmE", + "PQfC1aZFD59VAQVHM7iVz4at28+j2J+wfRosy+8YkRY465Aptp/717iVqEb+wpneevKtjbKd1mnjbu3B", + "9Ejl8zr43xJL9zzGMnFd8ZUwG9cLmz5VxdMeBJsIPf6hpl28ZxcxDMKlcYdG8OHtzpqRFrF8X2sZSNBi", + "oLaE94OqQ9lp6sKzuqa0jqnBImXssqX3tLRZ+7y/l3rAs73p3VlvTluFzJhx9ukRtz0/OilEkaRDYj5t", + "547MuQkcpE0Ye+gjcAL0rLsKj1FVL5tG3aNGU5t92+T1NtXZ5e0q0m1Kf5+ZqIejN10QYoa8zHZuR+sW", + "ZvJUxpRxO8esaQarmAShREJaSjQTX9DN7rZjPRWjT386/vrho98fff0NMS+QjM1B1VXHW2276rhAxtt2", + "n5uNBOwsT8c3wVcfsIjz/kefVFVtijtrltuquqRop2nZPvblyAUQOY6RdlGX2iscpw7t/7y2K7bIg+9Y", + "DAXXv2dS5Hm860MlV0UcKLHdClwoRgMpQCqmtGGETQ8o03VEtFqgeRBr/65sNRnBU/D2Y0cFTPeEXMUW", + "0hdQi/wMc7ud14jAusgdr7Kenm3rcnqatdCh0IhRMVMghSicaM9mJAYRZhDJILPWGT7RIh7EyFbM1kbL", + "xgjRRZ7HSS9smL2d2zebueo4pzebGBEv/KG8BGn2+Sf66xZchpPUpv3Phn9ECjEcjGtUy70OXhHVDy7X", + "lH8QaN2k/Ah5IAA92baNPMkgUSwoRCytlwD9Cd6B3BY/XtaO5Z1pIQiJ/2AHeGH6bP1elcngwPnEFX1f", + "VkgJlvK+jxIay9+VketZb3WRBFvkjCZag7JsSXTFwiDdWj2tsph7tJJOsrMUQhOjmeZ5JEna2nHwTIWE", + "Y1QCuaL5zXONH5hU+hjxAdnb/tSoMFM2RLJFpbpcnb4XdNDcQVbs4abmbzAx+59g9ih6z7mhnBO+c5uh", + "cQc71s/9rWBzvckFjmmDrB5+Q6au2UYhIWWq7dy/8MJJlRgKks1cQCus9Y5M1F3r/FXoK5DxzEfikFeB", + "e6vy2TsI6yP6iZlKz8mNUnmM+jpkEcFfjEeFzXl3XBdXbMxwubIvQQG3Pcu+dNsOD12eLW1iLp1SQXed", + "g2/rBm4jF3W9tqE1iwb3d3j37jc9HVJqKN6LwXyOtY4O0pRhr5YM11DlyOLIjeHmjVHMr311b21t157a", + "3K39KFm+M2ClUWn943g0Bw6KKawl/rvrHXOzd6mHwFZe6B5VC+tVysVYxETW2pg8mCqooT6gfLr7LFLz", + "GrMa01IyvcG+wd6Axn6P1mP6sart4WrDVL40d/dpcQ5V7/a6Ekip/O36o6A53kfWxcfNLSTyCXluK3y7", + "g/Ldnel/wFf/eJw9+Orhf0z/8eDrByk8/vrbBw/ot4/pw2+/egiP/vH14wfwcPbNt9NH2aPHj6aPHz3+", + "5utv068eP5w+/ubb/7hj+JAB2QLqS/s/Gf1ncpzPRXL85iQ5M8DWOKEF+xnM3qCuPBPY19IgNcWTCEvK", + "8tET/9P/60/YJBXLenj/68j1ZxottC7Uk6Oji4uLSfjJ0RxT/xMtynRx5OfBboMNeeXNSRWjb+NwcEdr", + "6zFuqiOFY3z29vnpGTl+czKpCWb0ZPRg8mDy0LW25rRgoyejr/AnPD0L3PcjrK95pFzp/KOisMXzP45H", + "R44O3V8LoDkW0TF/LEFLlvpHEmi2cf9XF3Q+BznBDA370+rRkZc4jj646ggfzeRR16CtpR4U0PbBhkU5", + "zVnq65AxZW3ENohehQ1krfG8VGMytS2GfZwuzzAKyRYcUGGb7ZPM4NJ+flLzM98lGV3Hoye/RSpW+eQO", + "37w3jCsLIs7+z+nrV0RI4jSfNzQ9rxJbfCZTnb0VJjKZLyeetP9dgtzUpOeY4nhUd/kHXi4Nf3EZMks1", + "L5rFW2uBK2YQ6uDaz2woJqD5qpZJzdPQ+hdAUnNow3UfJN++//D1Pz6OBgCChXUUYC/HP2ie/2EtaLDG", + "4NlWcM24L+xpXNfGwA/qnRyjsap6Gnxev9Osef4HFxz+6NsGB1h0H2iemxcFh9gevMdug0gseBwfPXjg", + "eZCT8APojtyZCmYZVObfOhCqUTxJXGKgLq+yj95W5S8lLexZPPYhwpuicuHYlyaGJT0+4EKbRTqvvNz2", + "cJ1Ff08zIl2KMi7l4Re7lBNuwz3NnWPvxo/j0ddf8N6ccMNzaE7wzaCVb/ei+YWfc3HB/ZtGLiqXSyo3", + "KPXoihe2e8/QuUK/KbJIe7aDCmt8Pnr/sffWOwrjGo8+NMojZVe6E60jpdG5acc1eUf1cU4cyyaeuR/u", + "HhcFhnWeVs+Pi8J2BsdQAWB4+8GaKa3uTciP4dcN/4eFxLo/GnH/vlG2b7/dcIcHLTejl3aj8MDt/f1p", + "7+/jph2EZcA1mzGUyWPANE7BVpg6AUlXvUC7eUBBGaR9Y56rEthOtEhce7WBY7hG+4frHTig+omd6X1M", + "S9zJqG9x14O7PjEpgLeSmOrGhTfDmn013eomaVwZ18i4v3Ch7yXNDZ0Ey211rTl5disM/q2Ewarq5txK", + "Z0VxAPHQJ2fseuXog6skeQipEdXjQfJiqHkH3wbx9XdbHOfehBy337kcW3GVOHdKgua9Wxnwc5ABbSnT", + "XdKfo+NPKveFqV37ZFo1BBbz+6CPv3BB72+MrF7JzkC6W6a7BPvsyGuOWV8bW/1LymkOabcS2t9aQqvq", + "Y19JRgvDW49cpYFAYruSga9twGO6ksSaNdIDzoYlRTDn3h7hcR3Kb1iMjVF20clq7JVHdMZavdJu1rij", + "WnZFrB8h1GG/35w82yVdfUGmoMGtjiO3QHxvrpuXRj0Tb2/GMzGMNz1+8PjmIAh34ZXQ5Ae8xa+ZQ14r", + "S4uT1b4sbBtHOpqK9S6uxFtsqSpCZw5tg0dVtUbHwXPzto3xuItZvc3mWPcm5Hv3al3pw2Wtz4VhVD4b", + "jMq5/cjwOoMMcsf/+QTHvzMhP2COo1ZjDFXD5Al8kXH95OGjrx67VyS9sJFg7fem3zx+cvzdd+61QjKu", + "MWTA6jmd15WWTxaQ58J94O6I7rjmwZP//K//nkwmd3ayVbH+fvPKdtP9XHjrOFbVsCKAvt36wjcppq27", + "Lsc7UXcjHv7vxTp6C4j17S30yW4hg/2/xO0zbZKRU0QrY2ej384BbyN7TPa5j8bu/sFEjeoymZBXwrU+", + "K3MqbQ0YLJOryLykknINkE08pWKWnbLF6tKcYXkASRTIFchEsaocdSmhKlRSSFhhhH1dyLUBwW5Gj3G4", + "ny2Tf0nXQWr8tLqmtXBLRrPnkq4J9vLQRIEe2yppa/Ldd+TBuNZe8twMkFSIiTHXJV2PbtDqVxHb0NI/", + "zxx2hNwd3otjD7Eg1dJPVUOyVjX+7pz7i5XcLbm7jT0Q59zb8VM7dkI7gmswttWCYAU7jRWPVVkU+aau", + "dWukPC9CxVmcmWGoceAz9hHsNE1HldA2em8P8a0R4EqspE1Qe7INzFlVRx9QLw95RufcYs7d38tdGviO", + "pFh655EgM9DpwqX7tlAfYU/SpRz286Yl42xpoHwwvnapBnexW+M47O+cUZtkP6SFWJCJiQ48kBEifo3/", + "oTnWzWMzW77dN/XwFQvRNeUqYFdNVa3ybdssu5B/nxVc0EaT2N1QPq0n7wpkiJZD+D9vEbwfgjvM8bmr", + "aGCPl1vEXyEpwKuSCXkl6qRzq0H9JV2P13mzX/eCXgkO1sduJF9Li7fu1ErsMIzDIsVXG7H6S91C67Ii", + "yJGv0rNVDvnJvLRDFhlye2PFny/xCv8pWsuoccuYtU12llKoRxvCnM2LtudBWOxk8im1mE/CTz9D1eZT", + "cKybYTF4SD2fcWIBPyzTwQI+lpiPCl9tqY8DvTAvB3KZrWk0mBtpUYWhQaRyEJlCLvhcfZ6saBt1xPES", + "oRJbp8q2Tumsf/I3PLtPXV8T37DfVYtSjKdAlFgCqgxGRndFpy2E/7g5CDVb+u7cPExv/cTc5esHX93c", + "9KcgVywFcgbLQkgqWb4hv/Cqf8lVuJ0i1O15aA2OMAfG0dvUrCqWhiWQrsAEXXf8uNXY2a3ruojKylWi", + "1CBtRbxWmyrWYdIxezAyjBdm6gPIc7mYf2ninMf60ELOT2meI7p2OZlw4EFRynlu9xOWTOu6LUR4u5Ln", + "NF1UezuurXtV8z5fP3zcqjiJI7tObjbVX4HZZw0kWE1grQBp25Br7M20pBiwvCxzzYq8+U3V3RK7/UTC", + "kCxtho0CTp751VnnrJjVQ7fp11cbd4NPzNzuEc7MhV0clYC8u7KttBpITRpA275XPvw66Fbkei65YoZM", + "tqpL1rEzRQFU1h9byr9bSEjcEJKuQCqKh7W1qHu3ovrnIaqvXTnjz0RQj/oor8rrL38VNaKoP+g1yz7u", + "lsuDisB7iuSMByJ5yC7sWbu8LL47iqLdPfzkWZioIqqaWV5A6AHFoGjPXK3/NRroAsEiLWLm9LCSW0B9", + "GUsnsbosEjEbV3GaRiEVsyfkHb9P1IL6Ksvuz0dff9PjxDHzuOpzXTdOPZB5bIcZ4sv5oj1Th5U4Kvw+", + "uend3m8TxyOWrSOl5HkG66B7SbO7sbsP7yhS0I3P6OhUUyziFZUrxTQcdgnmmlILVtx81V6l2TRettxb", + "4qou+Sf8+8oga0vLGqmh+BTVWscjLQEyKPRiZxFnfKveTXDlnJlyjXdsqd0xYROY2Eq0dYO0bA7uYqIk", + "BzqrOp0JMSSPL+AzhtA8VQRYDxcyRJKO0g/KvEiUN28nrfPd7EXnkdcWij+pEKY/lRCWtKSwJlo+nUyG", + "LRvGQeRVIYUWqchtGGVZFELq6nSrySDLA/QJeg3DQx/hXkmYW7NM7XTpnOFbB7ABNClbfTEunTOPpphP", + "J7aoS5aWrecawtLOREE63fkNCJ+Ur90qlTF+1nL/fOneH91Legd2BqVUp4uyOPqA/8HSuh/rnF1sOqKO", + "9JofYZvJow9bo2uRpeZGNpG2X0nDpNtpWhmNkX2Bn9e9UX4Qst0QfGf0bAtp4/alb1tmYhhuhD1ejzb5", + "t1bCtrrOWht+9WiQyIid81qVpAga7VW0G3Tc8VUmbJvNCAnfRi99Xguq/YkzxjNCg21s2Zqq1vxeB/jH", + "F7voT+GivPmQra+/4HP2SmhysixyWALXkF0t8J20OZy/PbZet/sJBu7q70bHd+/88Mb3OT2VLLLzgt9D", + "7wmqGIGfjkosK2Tu6utRd25v8s/7Jn9aeVtDMry9l7+ce1n6TKTbK/jzv4K/+mJXc40xTAOv5Es4h5vX", + "cK2J73khd4QBZ8NqGQ62+ZVR9W6vUv0gpO8rd3uLf6FOUbuTgwOxhlhodlli3ZSHyDr7rKAfZmfI84il", + "oe+gjqtYL4b1GkXKsDvPSabGLqjMGifcKb4VfD5rwSfY61u559b08IWZHnqkHKf15/kQQWNfAWi1FBl4", + "x6qYzVx95D7pp9n00ZCn0nRZEPvlpDcO+4wt4dS8+dpOcdArtga7JRa1wDPIUpAKnqkBURxu1MveQ+ho", + "6gfgxj2b1Q54WFzlpMmlSfZtUH6xQwmkjXyFzTp9nWiHjAxWxBDg5ABke/TB/ovmtEKoyGpOPQF3Nuau", + "2xZb+NqO2wCQvEEh1FbQ9l+JGXlg61+XHJPc667clGdEy40RVH25Pwk0J2kjubWCo3tyTntPzk5VoLO6", + "njXFdQFRn9BDRjC0Cgv8fOMH4CnljuS7CNKCUMJhTjVbgXf5T26LUV36NnOloLYwwDGhWWZPY70JsAK5", + "IaqcKiPr8GaO0h3VPC97MAxYFyCZuaJpXjvgrZpwZCtNbYsjOrVvXPHSavEiW99KNqMW/c3qql+JGXnJ", + "UimO87moYuHVRmlYdnpeu09/7+lX4A0J3ZhVwXPGIVkKHuvE/BqfvsSHsa+xWlffx2fmYd+3rfu2CX8L", + "rOY8Q+7kq+L3Mzn9Vwp0aa1WQiGk0W6nG5t/gfS/51Hyh2bD0+5J2vA0cGq5h8FAYXPmxs9HPh2h0ao5", + "+uaHxp+uIp17Uy1KnYmLYBa0AdhwxiHFqFD43jPJo7a5NbMnmbpeq9t1epsCPMTOVvU00oK3ftjfhfdv", + "moTtnDMhkbicxhVI1VLkbjOx/1KZ2IP3fS9ubFvO7+JopTqs7PJKZGDHrdNxzdGPNUHhIgPXGb8rslRh", + "kfGUIX9/1e+1kjhSWs4XmpQF0SKWLlJ/mNDUMtnEKkLxCYOyw1ZdwukWdAWE5hJoZpRX4ERMzaLrmxQX", + "SRUWfvY5Jy74Myo0BXAVUqSgFGSJb/qyCzT/ng1V11vwhIAjwNUsRAkyo/LKwJ6vdsJ5DpsElWFF7v78", + "q1GtbxxeKzRuR6wtNxtBbzvtugv1sOm3EVx78pDsbEK3pVpMkRPLIgeXJBdB4V446d2/NkSdXbw6WjCL", + "jF0zxftJrkZAFajXTO9XhbYsEnN/d0F8ap+esSVKYpxy4S2QscFyqnSyiy2bl8K1KLOCgBPGODEO3KOa", + "vqBKv3X50hmWebTXCc5jZWwzRT/A5ha1ukVk5F/tw9jYqbkPuSoVcSP4HCjIYmvgsN4y1ytYV3Nh7RQ/", + "dpVkZW2Bu0buw1IwvkNW0PmGUB34/c1wkcWhpZI6U0YXlQ0gakRsA+TUvxVgN3T49wDCVI1oSzhYyT+k", + "nKkQOVBuc1VFURhuoZOSV9/1oenUvn2sf6nf7RKXrYVh7+1MgAoT4BzkFxazCk25C6qIg4Ms6bnLkZu7", + "TqZdmM1hTLDMUrKN8tG4a94Kj8DOQ1oWc0kzSDLIacTo8ot9TOzjbQPgjnvyTFZCQzLFGinxTa8pWfYa", + "k6qhBY6nYsIjwSckNUfQKM81gbivd4ycAY4dY06Oju5UQ+Fc0S3y4+Gy7Vb3GLDMGGbHHT0gyI6jDwG4", + "Bw/V0JdHBX6c1OaD9hT/BcpNUMkR+0+yAdW3hHr8vRbQNvyFF1jjpmix9xYHjrLNXja2g4/0HdmYqfGL", + "dAu0o5yuMcmuaWoNFMDJZZTbowvKdDIT0grSCZ1pkDtD5/9JmXec+/Rd4aquEBzB3ZtuHGTyYT85x0Us", + "CMRdF4ZEXCUpc4dR8pAsGS+1fSJKPbblryXQdGGE9tAGa0fCjsCuSJOEOZVZjt1iZ9W9KaQt+qRbFzwC", + "HclHbGr8Zt0/CDmoqH6zdCRlmpRcszxoLFTp7Z+f9fLWInFrkbi1SNxaJG4tErcWiVuLxK1F4tYicWuR", + "uLVI3Fok/r4WiU9VJinxEoev2MgFT9rBlLexlH+pqvLVVeUNJGiduKBMuzb5vkpBv91iD0OQBpojDlgO", + "/dHdNuj07PnxC6JEKVMgqYGQcVLk1KgGsNZV0+YpVfDNY59qaK9OurSd3/F+NS989Yic/nTsK44uXGXM", + "5rt3j228GlF6k8M91xYNeGYlUd8fDbhBumuPRv2V4Js7u1bXLMfIeEWe49vPYAW5KEDaYoZEyxK6Fp8z", + "oPlTh5sdBp9/msldqO0fZrQ/xg2jl0PbkhZezPdrpYpQm3FJngU5mH/MaK7gj740TDvekhax/srVxWdN", + "QchMvhfZpnVCzK4d4QY2z0Zdd5RxKjeRKlHdFIg2aWhh2JUjrK4t6+PBq+N2ibZLZrsoLCat2zL48dH7", + "qDxaFrbasM5QNlF31qKTUSzHtF0LdVQBOKgwIKZJ2D0hb+13n7YMIELkjljNzD+bKMbmmxXTwHeNEuFY", + "z5eaS+ARHz29ePbHhrCzMgXCtCK+wO7u62U8WidmpDnwxDGgZCqyTdJgX6PGLZQxRZWC5XT3TRTyTzxx", + "1eVjnmy/pz7NNfIsWNw2nhwSzTpxDLiHO280DObNFbZwRMeeA4xfN4vuY6MhCMTxp5hRqcX79mV69TSb", + "W8Z3y/iC09iSCBh3BcnbTGRyjYxPbmTJ+3ne8zWkpQEuPMl30TqPLjlY64aTNYNpOZ8bbaHro8M2Ojge", + "E/wTsUK73KFccD8KsoNX3fKvmqTeHq7LXYK88bu+MuM93A7KN+jMWBaUb7zLFxLFlmVucWibSh+W0dqa", + "4bES07Xtr8+q/cab/ALbrbtqm79btJALqojdX8hIyTOX8dSpbb3mw+uc2KHP1rxm01trmtj1Rlbn5h1y", + "RfhdbqaaK1KATPSa2wPVOEyug4E9uZ+0lvbttXFz14ZNVIceBtutxl8zhAPdHjLga3h9BD2X6sS8Ricm", + "2kwnbDxDi0Z/ikvYnMm+edDAks7wzfiS2tzi/KeQF4SSNGfoXRVcaVmm+h2n6L8JFjbpxp54Q3U/73vq", + "X4m7ECMePjfUO04xyKjy6kR54AwiLowfADyLVeV8Dsrw0ZCAZgDvuHuLcVJyo4WJGVmyVIrEptaa82Vk", + "l4l9c0k3ZIYVTQT5E6QgU3PrB7tubclKszx3wS5mGiJm7zjVJAeqNHnJDAc2w/lyClXIGegLIc8rLMR7", + "9cyBg2IqiRtmfrRPsR2OW743AKIx0z6u21jcbB8cDzvLeiE/eYYxaliNOWcq7L/Yhv3GfONLxpMokZ0t", + "gLhwsTZtkbtYA84R0L2m40gv4B03t58WBDk+1Zcjh7YHqHMW7eloUU1jI1qOIr/WQerfQbgMiTCZW7fL", + "XyiFNKAD79nEjbf19Vt7v6eLpXHlArYG7buQ7VPXPrHnJadANIxkrQI37o2zBshb/RdfflnJw+uSHo0H", + "0ya7A3bZVbNBHuLNb/iY0Fzwua2raLRLgfvEeFFqDAC/TgMerGieiBVIyTJQA1fKBH++ovnr6rOP4xGs", + "IU20pCkk1qIwFGtn5htLp9hokDPNaJ6gVj0UIDixX53aj3bcx0G30eUSMkY15BtSSEghs4XImCK1Pj+x", + "BRpIuqB8jle3FOV8YV+z41yAhKoxo1Gh20PEC8GseWKL0nVhPHaNmsO6vUDTRaRxDF5wRmf3BJU1elIN", + "3INGydE+JX086hW0DVJXdeicRU6TzQyQIhryQICfeuJD1Gi9Jfpbov/SiT5WUhFRN2tZKyy+wm25ZrPW", + "dRcQvUEr2SepLnxbov+vXqLfcyBFKJG0oYPEe8NRRZgmF1gWaQrE3F8lWuddwz2nr2OmXXDUXaVN5drz", + "pQvKuKupU+U1IBxGJV4umda+Pe21GDYtM0OLpkEHpKVkeoNaCy3Y7+dg/v/eiP0K5MorNKXMR09GC62L", + "J0dHuUhpvhBKH40+jsNnqvXwfQX/B6+LFJKtjH71EcEWks0ZN3fuBZ3PQdYmxNGjyYPRx/8bAAD//1zt", + "z0/tvgEA", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/participating/private/routes.go b/daemon/algod/api/server/v2/generated/participating/private/routes.go index 24f66d0cde..981fddde61 100644 --- a/daemon/algod/api/server/v2/generated/participating/private/routes.go +++ b/daemon/algod/api/server/v2/generated/participating/private/routes.go @@ -398,68 +398,70 @@ var swaggerSpec = []string{ "78/czNLld0uloVOM3fWmTJ/NwxdMo4n/WQirud7dJEffoIT8wHoyiuWD4VhNJFa7kDYaa4jDslTXGTKr", "rKlzkVJtXTvTvYxD0bW2nzvVC4jiurjxgtqOrXnBcqU15HGP9HtPgmqjNGSlwjCvlAd6aZ3cvcFHXpKV", "asVUlasCqF5MmoLG5qql5Cg2QRRVk0QB0Q6+FqY+ER1PnNLdqeRHylDUWh1ROz8HerneZnWiRWfkyxyJ", - "WAbjszh5DFHjIbx7av8fVanlHMMYrwTGunQf7ZP0Wbk7pslkEJ+5izjNELNrrerVOkrozK5FWQaDgdsG", - "XXsFNB7lJ1NjOBK+2HJTPGUbZazX7Ggk0wzVhnjdz5W0WpVl1whEIvHKW7a/59uzPLcvlbpc8PzyAeqR", - "UtlmpcU8vGfuB+O1M+leKq/uhZdR+fDDqXGpHYameSKZzJB6LOXoQuoRmO8Pc6zDNu6z4cL66+oyr7Ta", - "cCYZt2oj8jQN/76i20Zj0lIsIZkjjGoZUlYHbIaMOr4cmmAGZElDNIPkyWJsZ8zzNO/URebh/osSb39c", - "tgR/SYxcTEM+6aWWLB+VrXoAIKT01NjWmgogxpJPw1XUilIToEu6D+hELo6RP7eDzY1w50BZuBVQg2jD", - "BsD7pOzPKZcbRS4u1DZ8f9Ame7sR8B/3U3mHeYyFVF20pKUpqCokhhnhCOmU0nvjj97gM/PF1Cikpljt", - "xBs1AmA8LqkDw6TopGPBWHJRQpGlah2eNzaheaTZ+qdQ/RLkwnhOnvM6lBp0Y9cafKISEql1199UcUdK", - "qmk+tNzKArZA7yh+Aa2ohuA88ndASSUGe8q3qrISrqATruWzp9Qo2okrCH1N05kVABV6//o2qVQcUnyX", - "9wwVfu1ZFMkyBbtJywUhlnaKHTBLJI0oW5nRMTFTj5KD6EoUNe/gzxwrcnTNbu4oJ1A1kMmzoLdNneYn", - "GuF1GOAs9E+JMgET76fxoaNZUBp1+xjQwbjE2oydepkOS4xTAzUODZytaByfROIt3zAVv5bjBsAhybfq", - "zcR9EkpGiP16CzlKNd24u9vjhOFgzPTSfo2K4LrZ4Zsbkn8TGt5LwqPjpVQNA/6h2h5LTaALL7BjAyw6", - "LZ3Y66RmLCfo+b/nf3O2qMNATq+m6oaxBvcCgscOM5E3zgov0IrmQgvxhXOfiLKvlIsosnrDd0xp/Mfp", - "a/+oeSmWOzyhBH7oxsyaOxLyLkLyXft4RTfxfsFkHgALdgEVpqJ1i6ljRsPt3CgR0O4KDGVoFNvwS4i3", - "Ad3yxHly61iOqRcbYQxedr3tHGLBLz4kE9nwItaRMaVht+B3SHLrev/P9tVWPFXIRFaVPA+1LH0xnY5B", - "nOrVBuKya9jsf9Y3VI8DCTQ1cFui1eEdeHED496RkRupWPmxQiEdsAe1QQc1Um61jIk2yl41iD0PIict", - "5a53YWp8yADouKLgIfDjAoufBv/JbKNjy5gC/j8L3kdKqsbwUvXUT4DlTq6IBKxkV12obaZhaQ6FQpBh", - "1SnCus0yEYyTQuYauKHYkPMfvcrWJtMU0qmQFL3YeN+aUQpYCtkySyGr2iY0AMypKXcRwmLzNKJ1xNkz", - "JiU4MeyKlz9egdaiGNs4dzqo+GBczCCY5H3fhPLf3KnDAYRptR98SQjtS7WombvAqVwSBRYay2XBdRE3", - "F5LloN29z675ztzc9+Gg1bWTLw54P3gkzXTft0d+ECRtAqTcefflLT0TDYD8Dl0UE1wLGMGacCuQUcSq", - "EU/CEIZ0Pg6+zUq1wvdlIwTos5ai74eUFSXRYEvy0HHzGPEL7J8GE7b7g28Vzjpliv3n7EdEHSo8P0lh", - "9540sqb1H/xRRCYdhED/ctWGhdPmDOk/9UbTp+WI32kG4S48Ygh7TeEhNB+MeDK6FtyRXUQHuX/gG5tr", - "pxfC6vrgUy9BSYfNULc1ewK/wbRBzjz3gTtDo89AKSakzP072iNtQmRJDvfACHhUtdyfre60TTCFG+eY", - "6mH7X85mlaqyfEo0INV0KLxB20PahXGEPiJz9ci6m8AJ01Q56WTE6ZQ7ObaA2mi5lUN+mSrfp2SPGTRG", - "OGjXWK6WyMuopjfaYfCNR2O8mPdfH3UNNg2TYJxpyGuNBs1rvjtckGokl/DFX84+f/zkr08+/4K5BqwQ", - "KzBtPupeQac2YkzIvp3l08aIDZZn05sQ3qUT4oKnLDy3aTbFnzXitqZNNjkoZ3WMJTRxASSOY6KQ0I32", - "Csdpg77/ubYrtcg737EUCn79PdOqLNP1ABrRLWHqT+1WZOx3En8F2ghjHSPs+uqEbWNlzRrNcZgV9ory", - "jCiZ+7T9DRUIOxKMk1rIWKgl8jN89ev9Gwy2Vel5Ffkk9q3L60VkEcPgDIzfWACrVOVFabFkKYjwbYmO", - "3lx6QyOGd0bRkw2zpTjKFCH6mOQ06cWllPdz+26ZT5vm9G4TE+JFOJQ3IM0xS/r4i/abcJLWlP5Pwz8S", - "T/TvjGs0y/01eEVSP7hZufZJoA2fayfIAwEYeYfZeUEXPSGKUtRqssqj/T64Ovvix/etC/TggwGEJHQ4", - "AF78sLJt18S4e3B+41yv3zdIiZbyfowSOss/9FYzsN7mIom2yBsprAVDbEkNxcLoIa553rxvHdFKBs9g", - "tVKWOc20LBPPZ8lugmcqJhynEugrXn56rvGN0MaeIT6geD3+aCZ+QxkjmVBpbpbB7SWfNHf0XvLuppav", - "8Mnuf4Lbo+Q954fy7uLBbYZWL6xlvgq3Ar0CZtc4JoUDPf6CLXwZhkpDLkzfDX0dhJPmySBosfShl7C1", - "B94oHlrnz8regoyXIWaE/RC5kxSa7VoI2yP6GzOVkZObpPIU9Q3IIoG/FI+Ky7YeuC5umbL/ZglBotRe", - "RyYEGRaknbo8SnrhLp3awHCdk2/rDm4TF3W7tqnZbCZn/n/37q1dTElCk87S77pjFpw7Sdd/VLL+XyH/", - "DeHIj+HnTVHMz2MZUSnr50jW5t5+1KI8GCDSycH9cT5bgQQjDGaZ/quvKvJp79IAAb3JHx5VgvU2iUQI", - "MYm1diaPpoqya09IrO27JbIh43u3vNbC7rCibDCgib8mM/V822R98FlDGt+Vv/usuoSmqnebI6I24Xb9", - "VvES7yNyqUl3C6nyhH1NuZ/9QfnzvcW/wWd/elo8+uzxvy3+9OjzRzk8/fzLR4/4l0/54y8/ewxP/vT5", - "00fwePnFl4snxZOnTxZPnzz94vMv88+ePl48/eLLf7vn+JADmQANSd+fzf53dlauVHb26jx744BtccIr", - "8R24vUFdeamw4qFDao4nETZclLNn4af/FU7YSa427fDh15mv3DNbW1uZZ6en19fXJ3GX0xU+Cs+sqvP1", - "aZgH69B15JVX5000OcW94I621mPcVE8KZ/jt9dcXb9jZq/OTlmBmz2aPTh6dPPZFjyWvxOzZ7DP8CU/P", - "Gvf9FDMvnhqfVP20qnxa9aSb7LWvxdOluNAZgW3ycrvdpnTdPjm6iSsenxdIW3aY0h0rc2EYFAL45NGj", - "sCte5omunlN8sfDsw2xa/fPhZLjz/WwLi3r1ysEcknw0ad+8c8LjDP2FhLBmv0gF5iuDlnUtrriF2fuP", - "81lVJ9D5NT5HMPtwNo9ShRM0qiwajA8w+qr+/wSjH+ezU88nZ88+uL/WwEtM/+P+2DhCzcMnDbzY+f+b", - "a75agT7x63Q/XT05DRLx6Qef1+Hjvm+ncTTQ6YdO+oviQM8Q7XKoyemHUNh3/4Cdoq4+zjDqMBHQfc1O", - "F1jMZ2pTiFc3vhSkeXP6AZXB0d9PvUUv/RGVcuL2pyGNzEhLShiQ/thB4Qe7dQvZP5xrE42Xc5uv6+r0", - "A/4HyTZaEeUfPbVbeYpBA6cfOojwnweI6P7edo9bXG1UAQE4tVxSNeR9n08/0L/RRLCtQAunEWHOH/8r", - "5WY7xaJ4u+HPO5knfxyuo5OXat/NUmu0TwoTImm66ayS10c/R5a5LbOblnGjn5lrKOwNb/V9K/s4nz29", - "Q67czWeaAOYrXrDw/Brnfvzp5j6XFNHrxBwSxxCCp58Ogs72se9gx35Qln2DdoOP89nnn3InzqXTYnjJ", - "sGVUlHh4RH6Sl1Jdy9DSyfH1ZsP1bvLx6V+jTg5smskVCSqKnuR3j9pZUQyInvQZMPYrhbfrGMY2ZlV5", - "j2GLtFadE9ItYWgPGqDqDdXm7iW5o1RMQZCQqoBZrGhZXcPHW/KEXqwR1/Y8Yd5EOz0G+S9DGfEI1GTG", - "tn4kBo08VMUPkXBb6b6Njf+Dp/zBUxqe8vmjzz7d9Begr0QO7A1sKqW5FuWO/SSbRxc35nFnRZFMc9k9", - "+gd53Hy2zXJVwApk5hlYtlDFztcNmXUmuASy3AwEmdNg6ehoDCPcM9hQUtJKGwo8e/Y25aL3paerelGK", - "nJGVF80cToePrBBN3sEu85tH2zpgP4nc1qwQZd28gbfXyr8xHV4o7H6cGcL8Q+PFgwdR2B27FrJQ11iS", - "H8H9Rw3I5z28YZpZAsAo3nRYxqV1XjkAB2CNzYderynY2TP5S36zuUt+7NTvb3llHbxMm7xe/3Hx4w/R", - "SzSyNFBwCr6DItLFoHWtMBj7mmN0IpV7e042oHLHpEL/Vm06laZO/riH/uD9t+f93zaJXqnGlMXiMUOW", - "FN0FJ5ME3iRv/9D509stZhQanErP6n5nnK2wPuDwglrs2PmLgfZK3fpXwlc7bNq7FRL8vg/iUYx/hL3s", - "E2ncQlbKNgHStKg/hMw/hMxbKa6TD88U3TVpWaKqnXygj81DAc7OIxRMsYzhRgNQptifftPjeycbP7Rt", - "pWxZlAoaChZ9oMwEfTT/wSL+YBG3YxHfQuIw4qn1TCNBdMfZuqYyDExAU3TC/YLUEZrXJdfRY9BDJuwz", - "HDGtCv4qXONTG+ySuCJ7HcawCwreTGzg3drw/mB5f7C83w/LOzvMaLqCya2tXpew2/CqsXWZdW0LdR15", - "yBEWCrwe+vhI8e//fXrNhc2WSvvCInxpQQ87W+Dlqa8i3Pu1Ldw3+ILVCKMf4xReyV9Peddp2XWcO9Y7", - "1nHgVU999Y7jkUbh/Xz43MaPxfFYyPabSKy37x3LNqCvwo3Qhhc9Oz3FhCprZezp7OP8Qy/0KP74viGP", - "D8094snkI9KF0mIlJC8zHxvRlkKfPTl5NPv4/wIAAP//Vd+WO7oZAQA=", + "WAbjszh5DFHjIbx7av+nefNSbJFuQKeO/JJZXcOc+Rb9Gtn+4HMNbCOMIVAaWroWZYkPx8U28rw2gQtp", + "1I6IvecYVnklMPamm0SApOHK3XlNZoWYB1zEaY+YXWtVr9ZRgukGzqDy6torxPEoP5kaw6PwBZmb4inb", + "KGO9pkkjtUtuQ87u50parcqya5QiEX3lLe3f8+1ZntuXSl0ueH75APVaqWyz0mIe3lf3gwPbmXQvtVj3", + "As6onPnhVL3UDkPlPNFOZpA9Fnd0YfcIzPeHOehhm/vZcGH9dXWZaVqNOZOMW7URefpM/b6i7UZj5FIs", + "KpmzjGorUpYJbIaHPb6smuAKZJFDNIPkyeJwZ8wzAu9kRnbj/osSeH9ctgTPaEYuyiFz8VJUlo/Kej0A", + "EFJ6+mxrTQUZY0ms4SpqRakS0EXeB3TirYKRSLeDzY1w50BZuBVQg+jHBsD7ZHyYU245iqRcqG34/qBN", + "Pncj4D/up/IO8xgL8bpoSUtTkFdIVDPCEdIprvfGQ73BZ++LqVFRTfHciTd8BMB4nFQHhknRUseCseSi", + "hCJL1V48b2xU80jT9k+z+iXRhfGcPOd1KH3oxq41+MQpJOLrrv+r4o6UVNN8aEmWBWyB3nX8AlpRTcN5", + "5H+Bkkoe9owBqspKuIJO+JjP5lKjqCmuIPQ1TWdWAFTojezbyFJxUfFd3jOc+LVnUWTNFOwmLSmEWNop", + "dsBMkjTqbGVGx8RMPUoOoitR1LyDP3OsyNE1A7qjnEDVQEfIgh45dZqfaITXYYCz0D8lygRMvJ/Gh45m", + "QWnU7WNAB+MkazN26mU6TDJOVdQ4WHC2onHEEom3fMNU/FqOGySHJN+qWxP3SSgZIfbrLeQo1Xh9Bwqv", + "8Yw4KXzWE6R2CVCQVuC6JKzta5BMqqjE5DU3jarS5lAMP9DE2EhIr03fwKncRjPefmcZDsZML5naqCKh", + "Gzq9uXn+NzmJew/i6HgpGjHgn//tsX8F6vZqBzbAUt7S7aeT/bFIo7/FPBefs0UdBipLdU01I2M99AUE", + "PyhRX3ABebFcNNdyiNqc+/SefVOHiOLVN3zHlMZ/nNb5j5qXYrlDPkPgh27MrLkjIe94pYgAHwXqJt4v", + "Xs0DYMHaosJUtG4xdcxouJ0bJQLaXeShuI9iG34J8TZgsAPxz9w6xmnqBVou3JXd284hFvziQ4qWDS9i", + "TR8TRXbLqIfUwa73/2zfwsVThfxuVcnzUCHUlyjq8hmsAhyIy65hs/+x5JCvBRJoKgu3RKvD6/riBibT", + "I1lX6gXCWPmVDtiDiquDyjO3WsZEy2+vxsaeZ6aTlnLXuzA16mYAdFyn8RD4cdnKT4P/ZA7XsWVMAf+f", + "Be8jhWpjeKkm7SfAcicDRwJWslYv1DbTsDSHAkzIXO3Ued3m7ggmViFzDdxQxM35j17xbFOUCukUYYoJ", + "bXyazSgFLIVsmaWQVW0TegxmKpW7CGGx0R/ROuJCG5MSnDB5xcsfr0BrUYxtnDsdVNIxLhERHB2+b8KE", + "0dypwwGEaXU4fJ/ZmtHjZu4CpyJUFK5pLJcF10XcXEiWg3b3PrvmO3Nzj1LjHDjkU+KRNNPNGhB5l5C0", + "CZBy553Ct/T3NADyO3T8THDYYFxwwllDph2rRvwzQxh+Fw6bDd9mpVrhK8KRA+Fz06KHj1RAJdEMTvLZ", + "tHWHeYz4BfZPg2n5PSOyCmedMsX+c/8jbiWqkT9JYfeefLJR9p91UtwtHcyAVLlqg/+JWIbnMfUS1ydf", + "iV/jBmEzPFUJtAfRJsKIf6hrFx/ZRQyD8M+4YyP49HJn3UiL1HtfsgxkaDEwe8L7wbSh7Dz34VlDU9rA", + "1EBImfvX0kda2sg+H+6lEfCoNr0/691pm5AZN84xNeL2v4/OKlVl+ZSYT6rcUXg3gYe0C+MIfUROgJF1", + "N+Expqll08l71Clqc2yZvNGiOoe8XVW+T+kfMxONcPSuC0ItkZdR5Xa0buFLnsaYMu+/MeuawRomwTjT", + "kNcazcTXfHe47NhIxuiLv5x9/vjJX598/gVzDVghVmDarOO9sl1tXKCQfbvPp40EHCzPpjchZB8gxAX/", + "Y3hU1WyKP2vEbU2bUnRQtOwY+3LiAkgcx0S5qBvtFY7Thvb/c21XapF3vmMpFPz6e6ZVWaarPjRyVcKB", + "ktqtyIXiNJAKtBHGOkbY9YAK20ZEmzWaBzH37xVlk1Eyh2A/9lQg7EjIVWohYwG1yM/wbbf3GjHYVqXn", + "VeTp2bcur6eRhQ6FRoyKWQCrVOVFe7FkKYjwBZGOXtZ6wydaxKMY2YbZUrRsihB95Hma9OKC2fu5fbeY", + "q01zereJCfEiHMobkOaYf2I8b8FNOElr2v+n4R+JRAx3xjWa5f4avCKpH9ysKP8k0IaP8hPkgQCMvLbt", + "vJOMHopFiYg1eQnQnxAcyH3x4/vWsXzwWQhCEjocAC9+Ptu2a14yeHB+44y+3zdIiZbyfowSOss/9CI3", + "sN7mIom2yBtNrAVDbEkNxcLoubV53rxiHtFKBo+dtVKWOc20LBOPpMmOg2cqJhynEugrXn56rvGN0Mae", + "IT6geD3+NCp+KRsjmVBpbpan7yWfNHf0Kvbuppav8GH2f4Lbo+Q954fyTvjBbYbGHaxYvwq3Ar31Ztc4", + "JgVZPf6CLXyxjUpDLkzfuX8dhJPmYShosfQBrbC1B16iHlrnz8regoyXIRKH/RC5txqfvYewPaK/MVMZ", + "OblJKk9R34AsEvhL8ai4OO+B6+KWhRlulvYlSuB2ZNqXYdnhqcuj1Cbu0qkNDNc5+bbu4DZxUbdrm5qz", + "aHJ9h3fv3trFlFRD6VoMrjvmOrqTogxHlWT4FbIcEY78GH7eFMX8PJb3lnK7juTm7u1HLcqDASudTOsf", + "57MVSDDCYC7xv/raMZ/2Lg0QUOaF4VElWG+TLoYQk1hrZ/JoqiiH+oT06b5bIuc1vmrMay3sDusGBwOa", + "+GsyH9O3TW4Pnxum8aX5u8+qS2hqt7eZQGoTbtdvFS/xPiIXn3S3kCpP2NeU4dsflD/fW/wbfPanp8Wj", + "zx7/2+JPjz5/lMPTz7989Ih/+ZQ//vKzx/DkT58/fQSPl198uXhSPHn6ZPH0ydMvPv8y/+zp48XTL778", + "t3uODzmQCdCQ2v/Z7H9nZ+VKZWevzrM3DtgWJ7wS34HbG9SVlwrrWjqk5ngSYcNFOXsWfvpf4YSd5GrT", + "Dh9+nfn6TLO1tZV5dnp6fX19Enc5XeHT/8yqOl+fhnmw2mBHXnl13sToUxwO7mhrPcZN9aRwht9ef33x", + "hp29Oj9pCWb2bPbo5NHJY1/aWvJKzJ7NPsOf8PSscd9PMb/mqfGp80+ryifPT7rtXvuKS12KC50R2Cb7", + "utttSsruU+CbuK71eYG0ZYeJ+7H+GoZlIYBPHj0Ku+JlnujqOcV3IM8+zKZVuR9Ohjvfz6mxqFevHMwh", + "lUuT3M87JzzO0H9JCGv2i1RgvjJoWdfiiluYvf84n1V1Ap1f4yMPsw9n8yghPEGjyqLB+ACjr+r/TzD6", + "cT479Xxy9uyD+2sNvMQkT+6PjSPUPHzSwIud/7+55qsV6BO/TvfT1ZPTIBGffvDZOz7u+3YaRyedfugk", + "OSkO9AzRN4eanH4I5Zv3D9gp3evjHqMOEwHd1+x0gSWbpjaFeHXjS0GaN6cfUBkc/f3UW/TSH1EpJ25/", + "GpIFjbSktBDpjx0UfrBbt5D9w7k20Xg5t/m6rk4/4H+QbKMVUZbZU7uVpxjEcPqhgwj/eYCI7u9t97jF", + "1UYVEIBTyyXVvN73+fQD/RtNBNsKtHAaEWZ28r9SBr5TLH24G/68k3nyx+E6OtnH9t0stUb7pDAhsqeb", + "tCx5ffQzoZnbMrtpeVX6+deGwt7wVt+3so/z2dM75MrdrLUJYL7iBQuP7HHux59u7nNJEcZOzCFxDCF4", + "+ukg6Gwf+w527Adl2TdoN/g4n33+KXfiXDothpcMW0alp4dH5Cd5KdW1DC2dHF9vNlzvJh+f/jXq5MCm", + "mVyRoKIo8UL3qJ0VxYDoSZ8BY79SeLuOYWxjVpX3GLZIa9U5Id0ShvagAareUAX2XipDSrgVBAmpCpjF", + "ipbVNXy8JU/oxRpxbc8T5k200+Ojg2UoFh+BmszL14/EoJGHqvghEj5/ESZtY/X/4Cl/8JSGp3z+6LNP", + "N/0F6CuRA3sDm0pprkW5Yz/J5hHIjXncWVEkk5l2j/5BHjefbbNcFbACmXkGli1UsfPVYWadCS6BLDcD", + "QeY0WDo6GsMI9ww2lJS00oYmz569Tbno/VO7ql6UImdk5UUzh9PhIytEk12yy/zm0bYO2E8igzkrRFk3", + "mQXstfIvd4cXCrsf59sw/9B48eBBFHbHroUs1PWDkwDuP2pAPu/hDdPMEgBG8abDYj2t88oBOABrbD70", + "ek3Bzp7JX/KbzV3yY6d+f8sr6+Bl2mRv+4+LH3+IXsaRpYGCU/BdFpEuBtFrhcHh1xyjE6mo33OyAZU7", + "fOFpua1Np57YyR/30B+8//a8/9smnS9VErNYImjIkqK74GSSwJvk7R86f3q7xYxCg1NJeN3vjLMVVoEc", + "XlCLHTt/MdBeqVv/Svhqh017t0KC3/dBPIrxj7CXfSKNW8hK2SZAmhb1h5D5h5B5K8V18uGZorsmLUtU", + "m5UP9LF5KLPaeYSCibQx3GgAyhT70296fO9k44e2rZQtixJ+Q8GiD5QpoY/mP1jEHyzidiziW0gcRjy1", + "nmkkiO44W9dUhoFpfYpOuF+QOkLzuuQ6epx6yIR9hiOmVcFfhWt8aoNdEldkr8MYdkHBm4kNvFsb3h8s", + "7w+W9/theWeHGU1XMLm11esSdhteNbYus65toa4jDznCQoHXQx8fKf79v0+vubDZUmlfPoYvLehhZwu8", + "PPW1onu/tuUZB1+w5mT0Y5wYLfnrKe86LbuOc8d6xzoOvOqpr95xPNIovOcPn9v4sTgeC9l+E4n19r1j", + "2Qb0VbgR2vCiZ6enmOBlrYw9nX2cf+iFHsUf3zfk8aG5RzyZfES6UFqshORl5mMj2oL3sycnj2Yf/18A", + "AAD//wm+5emgGwEA", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/participating/public/routes.go b/daemon/algod/api/server/v2/generated/participating/public/routes.go index 4548badf2c..0ada10df52 100644 --- a/daemon/algod/api/server/v2/generated/participating/public/routes.go +++ b/daemon/algod/api/server/v2/generated/participating/public/routes.go @@ -178,16 +178,16 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL var swaggerSpec = []string{ "H4sIAAAAAAAC/+x9/XPctpLgv4Kb3Srb2qEkfyT74qvUnmInedrYsctSsvvW8iUYsmcGTyTAB4DzEZ//", - "9ys0ABIkwRmOJNvJbn6yNSSBRqPR6O9+P0lFUQoOXKvJ0/eTkkpagAaJf9E0FRXXCcvMXxmoVLJSM8En", + "9ys0ABIkwRmOpNjxbn6yNSSBRqPR6O9+P0lFUQoOXKvJ0/eTkkpagAaJf9E0FRXXCcvMXxmoVLJSM8En", "T/0zorRkfDGZTpj5taR6OZlOOC2gecd8P51I+EfFJGSTp1pWMJ2odAkFNQPrbWnerkfaJAuRuCHO7BDn", "zycfdjygWSZBqT6Ur3i+JYyneZUB0ZJyRVPzSJE100uil0wR9zFhnAgORMyJXrZeJnMGeaaO/SL/UYHc", "Bqt0kw8v6UMDYiJFDn04n4lixjh4qKAGqt4QogXJYI4vLakmZgYDq39RC6KAynRJ5kLuAdUCEcILvCom", "T99OFPAMJO5WCmyF/51LgN8g0VQuQE/eTWOLm2uQiWZFZGnnDvsSVJVrRfBdXOOCrYAT89UxeVkpTWZA", "KCdvvntGHj9+/JVZSEG1hswR2eCqmtnDNdnPJ08nGdXgH/dpjeYLISnPkvr9N989w/kv3ALHvkWVgvhh", - "OTNPyPnzoQX4DyMkxLiGBe5Di/rNF5FD0fw8g7mQMHJP7Mt3uinh/J91V1Kq02UpGNeRfSH4lNjHUR4W", + "OTNPyPnzoQX4DyMkxLiGBe5Di/rNF5FD0fw8g7mQMHJP7Mt3uinh/J90V1Kq02UpGNeRfSH4lNjHUR4W", "fL6Lh9UAtN4vDaakGfTtafLVu/cPpw9PP/zT27Pkv9yfXzz+MHL5z+px92Ag+mJaSQk83SYLCRRPy5Ly", "Pj7eOHpQS1HlGVnSFW4+LZDVu2+J+dayzhXNK0MnLJXiLF8IRagjowzmtMo18ROTiueGTZnRHLUTpkgp", - "xYplkE0N910vWbokKVV2CHyPrFmeGxqsFGRDtBZf3Y7D9CFEiYHrRvjABf1+kdGsaw8mYIPcIElzoSDR", + "xYplkE0N910vWbokKVV2CHyPrFmeGxqsFGRDtBZf3Y7D9CFEiYHrRvjABf1xkdGsaw8mYIPcIElzoSDR", "Ys/15G8cyjMSXijNXaUOu6zI5RIITm4e2MsWcccNTef5lmjc14xQRSjxV9OUsDnZioqscXNydo3fu9UY", "rBXEIA03p3WPmsM7hL4eMiLImwmRA+WIPH/u+ijjc7aoJCiyXoJeujtPgioFV0DE7O+QarPt/37x6kci", "JHkJStEFvKbpNQGeigyyY3I+J1zogDQcLSEOzZdD63BwxS75vythaKJQi5Km1/EbPWcFi6zqJd2woioI", @@ -202,212 +202,213 @@ var swaggerSpec = []string{ "k2DyF+arC/zIiKxWDEpoWR4wxmsj+qgdzMIwaHyEbMKyPRSaGLebaEiJGRacw4pyfdyoLC1+UB/gt26m", "Bt9W2rH47qhggwgn9sUZKCsB2xfvKRKgniBaCaIVBdJFLmb1D/fPyrLBID4/K0uLD5QegaFgBhumtHqA", "y6fNSQrnOX9+TL4Px0ZRXPB8ay4HK2qYu2Hubi13i9W2JbeGZsR7iuB2CnlstsajwYj5d0FxqFYsRW6k", - "nr20Yl7+q3s3JDPz+6iP/xgkFuJ2mLhQ0XKYszoO/hIoN/c7lNMnHGfuOSZn3W9vRjZmlB0Eo84bLN41", - "8eAvTEOh9lJCAFFATW57qJR0O3FCYoLCXp9MflJgKaSkC8YR2qlRnzgp6LXdD4F4N4QAqtaLLC1ZCbI2", - "oTqZ06H+uGdn+QNQa2xjvSRqJNWcKY16Nb5MlpCj4Ey5J+iQVG5EGSM2fMciapjXkpaWlt0TK3Yxjvq8", - "fcnCesuLd+SdGIU5YPfBRiNUN2bLe1lnFBLkGh0YvslFev1XqpZ3cMJnfqw+7eM0ZAk0A0mWVC0jB6dD", - "281oY+jbvIg0S2bBVMf1El+IhbqDJebiENZVls9onpup+yyrs1oceNRBznNiXiZQMDSYO8XRWtit/kW+", - "penSiAUkpXk+bUxFokxyWEFulHbGOcgp0Uuqm8OPI3u9Bs+RAsPsNJBgNc7MhCY2WdsiJJCC4g1UGG2m", - "zNvf1BxU0QI6UhDeiKJCK0KgaJw/96uDFXDkSfXQCH69RrTWhIMfm7ndI5yZC7s4awHU3n1X46/mFy2g", - "zdvNfcqbKYTMrM1am9+YJKmQdgh7w7vJzX+AyuZjS533SwmJG0LSFUhFc7O6zqIe1OR7V6dzz8nMqKbB", - "yXRUGFfALOfA71C8Axmx0rzC/9CcmMdGijGU1FAPQ2FEBO7UzF7MBlV2JvMC2lsFKawpk5Q0vT4IymfN", - "5HE2M+rkfWutp24L3SLqHbrcsEzd1TbhYEN71T4h1nbl2VFPFtnJdIK5xiDgUpTEso8OCJZT4GgWIWJz", - "59faN2ITg+kbseldaWIDd7ITZpzRzP4bsXnuIBNyP+Zx7DFINwvktACFtxsPGaeZpfHLnc2EvJk00blg", - "OGm8jYSaUQNhatpBEr5alYk7mxGPhX2hM1AT4LFbCOgOH8NYCwsXmn4ELCgz6l1goT3QXWNBFCXL4Q5I", - "fxkV4mZUweNH5OKvZ188fPTLoy++NCRZSrGQtCCzrQZF7juzHFF6m8ODqHaE0kV89C+feB9Ve9zYOEpU", - "MoWClv2hrO/Lar/2NWLe62OtjWZcdQ3gKI4I5mqzaCfWrWtAew6zanEBWhtN97UU8zvnhr0ZYtDhS69L", - "aQQL1fYTOmnpJDOvnMBGS3pS4pvAMxtnYNbBlNEBi9mdENXQxmfNLBlxGM1g76E4dJuaabbhVsmtrO7C", - "vAFSChm9gksptEhFnhg5j4mIgeK1e4O4N/x2ld3fLbRkTRUxc6P3suLZgB1Cb/j4+8sOfbnhDW523mB2", - "vZHVuXnH7Esb+Y0WUoJM9IYTpM6WeWQuRUEoyfBDlDW+B23lL1bAhaZF+Wo+vxtrp8CBInYcVoAyMxH7", - "hpF+FKSC22C+PSYbN+oY9HQR471MehgAh5GLLU/RVXYXx3bYmlUwjn57teVpYNoyMOaQLVpkeXsT1hA6", - "7FT3VAQcg44X+Bht9c8h1/Q7IS8b8fV7Karyztlzd86xy6FuMc4bkJlvvRmY8UXeDiBdGNiPY2v8LAt6", - "VhsR7BoQeqTIF2yx1IG++FqKj3AnRmeJAYoPrLEoN9/0TUY/iswwE12pOxAlm8EaDmfoNuRrdCYqTSjh", + "nr20Yl7+q3s3JDPz+6iPPw8SC3E7TFyoaDnMWR0HfwmUm/sdyukTjjP3HJOz7rc3Ixszyg6CUecNFu+a", + "ePAXpqFQeykhgCigJrc9VEq6nTghMUFhr08mPymwFFLSBeMI7dSoT5wU9Nruh0C8G0IAVetFlpasBFmb", + "UJ3M6VB/3LOzfAbUGttYL4kaSTVnSqNejS+TJeQoOFPuCToklRtRxogN37GIGua1pKWlZffEil2Moz5v", + "X7Kw3vLiHXknRmEO2H2w0QjVjdnyXtYZhQS5RgeGb3KRXv+VquUdnPCZH6tP+zgNWQLNQJIlVcvIwenQ", + "djPaGPo2LyLNklkw1XG9xBdioe5gibk4hHWV5TOa52bqPsvqrBYHHnWQ85yYlwkUDA3mTnG0Fnarf5Fv", + "abo0YgFJaZ5PG1ORKJMcVpAbpZ1xDnJK9JLq5vDjyF6vwXOkwDA7DSRYjTMzoYlN1rYICaSgeAMVRpsp", + "8/Y3NQdVtICOFIQ3oqjQihAoGufP/epgBRx5Uj00gl+vEa014eDHZm73CGfmwi7OWgC1d9/V+Kv5RQto", + "83Zzn/JmCiEza7PW5jcmSSqkHcLe8G5y8x+gsvnYUuf9UkLihpB0BVLR3Kyus6gHNfne1encczIzqmlw", + "Mh0VxhUwyznwOxTvQEasNK/wPzQn5rGRYgwlNdTDUBgRgTs1sxezQZWdybyA9lZBCmvKJCVNrw+C8lkz", + "eZzNjDp531rrqdtCt4h6hy43LFN3tU042NBetU+ItV15dtSTRXYynWCuMQi4FCWx7KMDguUUOJpFiNjc", + "+bX2jdjEYPpGbHpXmtjAneyEGWc0s/9GbJ47yITcj3kcewzSzQI5LUDh7cZDxmlmafxyZzMhbyZNdC4Y", + "ThpvI6Fm1ECYmnaQhK9WZeLOZsRjYV/oDNQEeOwWArrDxzDWwsKFpr8DFpQZ9S6w0B7orrEgipLlcAek", + "v4wKcTOq4PEjcvHXsy8ePvrl0RdfGpIspVhIWpDZVoMi951Zjii9zeFBVDtC6SI++pdPvI+qPW5sHCUq", + "mUJBy/5Q1vdltV/7GjHv9bHWRjOuugZwFEcEc7VZtBPr1jWgPYdZtbgArY2m+1qK+Z1zw94MMejwpdel", + "NIKFavsJnbR0kplXTmCjJT0p8U3gmY0zMOtgyuiAxexOiGpo47Nmlow4jGaw91Acuk3NNNtwq+RWVndh", + "3gAphYxewaUUWqQiT4ycx0TEQPHavUHcG367yu7vFlqypoqYudF7WfFswA6hN3z8/WWHvtzwBjc7bzC7", + "3sjq3Lxj9qWN/EYLKUEmesMJUmfLPDKXoiCUZPghyhrfg7byFyvgQtOifDWf3421U+BAETsOK0CZmYh9", + "w0g/ClLBbTDfHpONG3UMerqI8V4mPQyAw8jFlqfoKruLYztszSoYR7+92vI0MG0ZGHPIFi2yvL0Jawgd", + "dqp7KgKOQccLfIy2+ueQa/qdkJeN+Pq9FFV55+y5O+fY5VC3GOcNyMy33gzM+CJvB5AuDOzHsTV+kgU9", + "q40Idg0IPVLkC7ZY6kBffC3F73AnRmeJAYoPrLEoN9/0TUY/iswwE12pOxAlm8EaDmfoNuRrdCYqTSjh", "IgPc/ErFhcyBkEOMdcIQLR3KrWifYIrMwFBXSiuz2qokGIDUuy+aDxOa2hOaIGrUQPhFHTdj37LT2XC2", "XALNtmQGwImYuRgHF32Bi6QYPaW9mOZE3Ai/aMFVSpGCUpAlzhS9FzT/nr069A48IeAIcD0LUYLMqbw1", - "sNervXBewzbBWD9F7v/ws3rwGeDVQtN8D2LxnRh6u/a0PtTjpt9FcN3JQ7KzljpLtUa8NQwiBw1DKDwI", - "J4P714Wot4u3R8sKJIaUfFSK95PcjoBqUD8yvd8W2qociGB3arqR8MyGccqFF6xig+VU6WQfWzYvtWwJ", - "ZgUBJ4xxYhx4QPB6QZW2YVCMZ2jTtNcJzmOFMDPFMMCDaogZ+WevgfTHTs09yFWlanVEVWUppIYstgb0", - "yA7O9SNs6rnEPBi71nm0IJWCfSMPYSkY3yHLacD4B9W1/9V5dPuLQ5+6uee3UVS2gGgQsQuQC/9WgN0w", - "incAEKYaRFvCYapDOXXo8HSitChLwy10UvH6uyE0Xdi3z/RPzbt94rJODntvZwIUOlDc+w7ytcWsjd9e", - "UkUcHN7FjuYcG6/Vh9kcxkQxnkKyi/JRxTNvhUdg7yGtyoWkGSQZ5HQbCQ6wj4l9vGsA3PFG3RUaEhuI", - "G9/0hpJ93OOOoQWOp2LCI8EnJDVH0KgCDYG4r/eMnAGOHWNOjo7u1UPhXNEt8uPhsu1WR0bE23AltNlx", - "Rw8IsuPoYwAewEM99M1RgR8nje7ZneJvoNwEtRxx+CRbUENLaMY/aAEDtmCX4xSclw5773DgKNscZGN7", - "+MjQkR0wTL+mUrOUlajr/ADbO1f9uhNEHeckA01ZDhkJHlg1sAy/JzaEtDvmzVTBUba3Pvg941tkOT5M", - "pw38NWxR535tcxMCU8dd6LKRUc39RDlBQH3EsxHBw1dgQ1Odb42gppewJWuQQFQ1syEMfX+KFmUSDhD1", - "z+yY0Xlno77Rne7iCxwqWF4s1szqBLvhu+woBi10OF2gFCIfYSHrISMKwajYEVIKs+vMpT/5BBhPSS0g", - "HdNG13x9/d9TLTTjCsjfREVSylHlqjTUMo2QKCigAGlmMCJYPacLTmwwBDkUYDVJfHJ01F340ZHbc6bI", - "HNY+Z9C82EXH0RHacV4LpVuH6w7soea4nUeuD3RcmYvPaSFdnrI/4smNPGYnX3cGr71d5kwp5QjXLP/W", - "DKBzMjdj1h7SyLhoLxx3lC+nHR/UWzfu+wUrqpzqu/BawYrmiViBlCyDvZzcTcwE/3ZF81f1Z5gPCamh", - "0RSSFLP4Ro4Fl+Ybm/hnxmGcmQNsg/7HAgTn9qsL+9EeFbOJVGVFARmjGvItKSWkYPPdjOSo6qUeExsJ", - "ny4pX6DCIEW1cMGtdhxk+JWyphlZ8d4QUaFKb3iCRu7YBeDC1HzKoxGngBqVrmshtwrMmtbzuSzXMTdz", - "sAddj0HUSTadDGq8BqmrRuO1yGnnbY64DFryXoCfZuKRrhREnZF9+vgKt8UcJrO5H8dk3wwdg7I/cRDx", - "2zwcCvo16na+vQOhxw5EJJQSFF5RoZlK2adiHuZo+1DBrdJQ9C359tNfBo7fm0F9UfCccUgKwWEbLUvC", - "OLzEh9HjhNfkwMcosAx929VBWvB3wGrPM4Yab4tf3O3uCe16rNR3Qt6VS9QOOFq8H+GB3Otud1Pe1E9K", - "8zziWnQZnF0GoKZ1sC6ThColUoYy23mmpi4q2HojXbpnG/2v67yUOzh73XE7PrSwOADaiCEvCSVpztCC", - "LLjSskr1FadoowqWGgni8sr4sNXymX8lbiaNWDHdUFecYgBfbbmKBmzMIWKm+Q7AGy9VtViA0h1dZw5w", - "xd1bjJOKM41zFea4JPa8lCAxkurYvlnQLZkbmtCC/AZSkFml29I/JigrzfLcOfTMNETMrzjVJAeqNHnJ", - "+OUGh/NOf39kOei1kNc1FuK3+wI4KKaSeLDZ9/YpxvW75S9djD+Gu9vHPui0qZgwMctsFUn5v/f/7enb", - "s+S/aPLbafLVv5y8e//kw4Oj3o+PPnz99f9r//T4w9cP/u2fYzvlYY+lzzrIz587zfj8Oao/Qah+F/ZP", - "Zv8vGE+iRBZGc3Roi9zHUhGOgB60jWN6CVdcb7ghpBXNWWZ4y03IoXvD9M6iPR0dqmltRMcY5td6oFJx", - "Cy5DIkymwxpvLEX14zPjierolHS553he5hW3W+mlb5uH6ePLxHxaFyOwdcqeEsxUX1If5On+fPTFl5Np", - "k2FeP59MJ+7puwgls2wTqyOQwSamK4ZJEvcUKelWgY5zD4Q9GkpnYzvCYQsoZiDVkpWfnlMozWZxDudT", - "lpzNacPPuQ3wN+cHXZxb5zkR808Pt5YAGZR6Gatf1BLU8K1mNwE6YSelFCvgU8KO4bhr88mMvuiC+nKg", - "cx+YKoUYow3V58ASmqeKAOvhQkYZVmL000lvcJe/unN1yA0cg6s7Zyyi9973316SE8cw1T1b0sIOHRQh", - "iKjSLnmyFZBkuFmYU3bFr/hzmKP1QfCnVzyjmp7MqGKpOqkUyG9oTnkKxwtBnvp8zOdU0yvek7QGCysG", - "SdOkrGY5S8l1qJA05GmLZfVHuLp6S/OFuLp614vN6KsPbqoof7ETJEYQFpVOXKmfRMKaypjvS9WlXnBk", - "W8tr16xWyBaVNZD6UkJu/DjPo2WpuiUf+ssvy9wsPyBD5QoamC0jSos6H80IKC6l1+zvj8JdDJKuvV2l", - "UqDIrwUt3zKu35Hkqjo9fYyZfU0NhF/dlW9oclvCaOvKYEmKrlEFF27VSoxVT0q6iLnYrq7eaqAl7j7K", - "ywXaOPKc4GetrEOfYIBDNQuoU5wHN8DCcXByMC7uwn7lyzrGl4CPcAvbCdi32q8gf/7G27UnB59WepmY", - "sx1dlTIk7nemrva2MEKWj8ZQbIHaqiuMNwOSLiG9dhXLoCj1dtr63Af8OEHTsw6mbC07m2GI1ZTQQTED", - "UpUZdaI45dtuWRtlMypw0DdwDdtL0RRjOqSOTbusiho6qEipgXRpiDU8tm6M7ua7qDKfaOqqk2DypieL", - "pzVd+G+GD7IVee/gEMeIolX2YwgRVEYQYYl/AAU3WKgZ71akH1se4ylwzVaQQM4WbBYrw/sffX+Yh9VQ", - "pas86KKQ6wEVYXNiVPmZvVidei8pX4C5ns2VKhTNbVXVaNAG6kNLoFLPgOqddn4eFqTw0KFKucbMa7Tw", - "Tc0SYGP2m2m02HFYG60CDUX2HRe9fDwcf2YBh+yG8PjPG03heFDXdaiLVBz0t3KN3VqtdaF5IZ0hXPZ5", - "AViyVKzNvhgohKu2aYu6BPdLpegCBnSX0Hs3sh5Gy+OHg+yTSKIyiJh3RY2eJBAF2b6cmDVHzzCYJ+YQ", - "o5rZCcj0M1kHsfMZYRFth7BZjgJsHblq957KlhfVVgUeAi3OWkDyRhT0YLQxEh7HJVX+OGK9VM9lR0ln", - "H7Hsy67SdOdBLGFQFLUuPOdvwy4H7en9rkCdr0rnS9GFSv+IsnJG98L0hdh2CI6iaQY5LOzC7cueUJqC", - "Sc0GGThezefIW5JYWGJgoA4EADcHGM3liBDrGyGjR4iRcQA2Bj7gwORHEZ5NvjgESO4KPlE/Nl4Rwd8Q", - "T+yzgfpGGBWluVzZgL8x9RzAlaJoJItORDUOQxifEsPmVjQ3bM7p4s0gvQppqFB06qG50JsHQ4rGDteU", - "vfIPWpMVEm6ymlCa9UDHRe0dEM/EJrEZylFdZLaZGXqP5i5gvnTsYNpadPcUmYkNhnPh1WJj5ffAMgyH", - "ByOwvWyYQnrF74bkLAvMrml3y7kxKlRIMs7QWpPLkKA3ZuoB2XKIXO4H5eVuBEDHDNX0anBmib3mg7Z4", - "0r/Mm1tt2pRN9WlhseM/dISiuzSAv759rF0Q7q9N4b/h4mL+RH2SSnh9y9JtKhTaj0tbdfCQAoVdcmgB", - "sQOrr7tyYBSt7VivNl4DrMVYiWG+fadkH20KckAlOGmJpsl1LFLA6PKA9/iF/yww1uHuUb59EAQQSlgw", - "paFxGvm4oM9hjqdYPlmI+fDqdCnnZn1vhKgvf+s2xw9by/zkK8AI/DmTSifocYsuwbz0nUIj0nfm1bgE", - "2g5RtM0GWBbnuDjtNWyTjOVVnF7dvD88N9P+WF80qprhLca4DdCaYXOMaODyjqltbPvOBb+wC35B72y9", - "406DedVMLA25tOf4g5yLDgPbxQ4iBBgjjv6uDaJ0B4MMEs773DGQRoOYluNd3obeYcr82Huj1Hza+9DN", - "b0eKriUoAxjPEBSLBWS+vJn3h/GgiFwu+CLo4lSWu2rmHRNbug4rz+0oWufC8GEoCD8Q9xPGM9jEoQ+1", - "AoS8yazDgns4yQK4LVcSNwtFUROG+OMbga3uE/tCuwkA0SDoy44zu4lOtrtUbyduQA40czqJAr++3cey", - "vyEOddOh8OlW5dPdRwgHRJpiOmhs0i9DMMCAaVmybNNxPNlRB41g9CDr8oC0hazFDbYHA+0g6CjBtUpp", - "u1BrZ2A/QZ33xGhlNvbaBRYb+qapS8DPKokejFZkc79ue62rjVz7Dz9faCHpApwXKrEg3WoIXM4haAiq", - "oiuimQ0nydh8DqH3Rd3Ec9ACrmdjz0aQboTI4i6ainH95ZMYGe2hngbG/SiLU0yEFoZ88pd9L5eX6QNT", - "Un0lBFtzA1dVNF3/B9gmP9O8MkoGk6oJz3Vup/ble8Cur4ofYIsj7416NYDt2RW0PL0BpMGYpb9+pIIC", - "1vdUq8Q/qpetLTxgp87iu3RHW+OaMgwTf3PLtJoWtJdym4PRBEkYWMbsxkU8NsGcHmgjvkvK+zaBZftl", - "kEDeD6diyrew7F9FdS2KfbR7CTT3xIvLmXyYTm4XCRC7zdyIe3D9ur5Ao3jGSFPrGW4F9hyIclqWUqxo", - "nrh4iaHLX4qVu/zxdR9e8Yk1mThlX3579uK1A//DdJLmQGVSWwIGV4XvlX+YVdk2DruvElvt2xk6raUo", - "2Py6InMYY7HGyt4dY1OvKUoTPxMcRRdzMY8HvO/lfS7Uxy5xR8gPlHXET+PztAE/7SAfuqIs985GD+1A", - "cDoublxnnShXCAe4dbBQEPOV3Cm76Z3u+OloqGsPT8K5XmFpyrjGwV3hSmRFLviH3rn09J2QLebvMhOj", - "wUMfT6wyQrbF40Cstu9f2RWmjokVvH5d/GpO49FReNSOjqbk19w9CADE32fud9Qvjo6i3sOoGcswCbRS", - "cVrAgzrLYnAjPq0CzmE97oI+WxW1ZCmGybCmUBsF5NG9dthbS+bwmblfMsjB/HQ8RkkPN92iOwRmzAm6", - "GMpErINMC9syUxHBuzHVmARrSAuZvWvJYJ2x/SPEqwIdmInKWRoP7eAzZdgrt8GU5mWCLw9Ya82IFRuI", - "zeUVC8Yyr42pmdoBMpgjikwVLdva4G4m3PGuOPtHBYRlRquZM5B4r3WuOq8c4Kg9gTRuF3MDWz9VM/xt", - "7CA7/E3eFrTLCLLTf/e89in5hcaa/hwYAR7O2GPcO6K3HX04arbZbMt2COY4PWZM63TP6JyzbmCOaCt0", - "ppK5FL9B3BGC/qNIIQzv+GRo5v0NeCxyr8tSaqdy09G9mX3fdo/XjYc2/ta6sF903XXsJpdp/FQftpE3", - "UXpVvFyzQ/KQEhZGGLRTAwZYCx6vIBgW26D46CPK7XmyVSBaGWbxUxnmcp7Y8ZtT6WDu5b/mdD2jsR4x", - "RhcyMAXb24qT0oL4j/0GqLrGgZ2dBBHc9bvMVpIrQTY+iH5V2hvqNXba0RpNo8AgRYWqy9SGKeRKRIap", - "+Jpy20XcfGf5lftagXXBm6/WQmIdSBUP6cogZUXUHHt19TZL++E7GVsw2yC7UhB0YHYDEVtsEqnIdbGu", - "K3c41JzPyek0aAPvdiNjK6bYLAd846F9Y0YVXpe1O7z+xCwPuF4qfP3RiNeXFc8kZHqpLGKVILXuiUJe", - "HZg4A70G4OQU33v4FbmPIZmKreCBwaITgiZPH36FATX2j9PYLesanO9i2RnybB+sHadjjEm1Yxgm6UaN", - "R1/PJcBvMHw77DhN9tMxZwnfdBfK/rNUUE4XEM/PKPbAZL/F3UR3fgcv3HoDQGkptoTp+PygqeFPAznf", - "hv1ZMEgqioLpwgXuKVEYemraK9tJ/XC217/rF+Xh8g8x/rX04X8dW9cnVmNoMZCzhVHKP6KPNkTrlFBb", - "/DNnTWS679dJzn1tYWygVffNsrgxc5mloyyJgepzUkrGNdo/Kj1P/mLUYklTw/6Oh8BNZl8+iTSiavdq", - "4YcB/snxLkGBXMVRLwfI3sss7ltynwueFIajZA+aGgvBqRwM1I2HZA7Fhe4eeqzka0ZJBsmtapEbDTj1", - "rQiP7xjwlqRYr+cgejx4ZZ+cMisZJw9amR366c0LJ2UUQsYaBjTH3UkcErRksMKMufgmmTFvuRcyH7UL", - "t4H+88Y/eZEzEMv8WY4qAoFHc1eyvJHif37ZVD5Hx6rNROzYAIWMWDud3e4TRxseZnXr+m9twBg+G8Dc", - "aLThKH2sDETf2/D6+pvPES/UBcnuecvg+PBXIo0OjnL80RECfXQ0dWLwr4/ajy17PzqKFyCOmtzMrw0W", - "bqMR47exPfxGRAxgvmthHVDk6iNEDJBDl5R5YJjgzA01Je0OcZ9eirib/K54tGn8FFxdvcUnHg/4RxcR", - "n5lZ4gY2WQrDh73dITNKMln9PIhzp+QbsRlLOJ07yBPP7wBFAygZaZ7DlfQ6gEbd9XvjRQIaNaPOIBdG", - "yQybAoX2/D8Ons3ipzuwXbE8+7mp7da5SCTl6TIaJTwzH/5iZfTWFWxZZbTPyJJyDnl0OKvb/uJ14IiW", - "/ncxdp6C8ZHvdjvQ2uV2FtcA3gbTA+UnNOhlOjcThFhtl82qyzLkC5ERnKdpatEwx34r51gLzUh+Mw5b", - "VNrFrWIuuCs4NGc5hmHG/cb4ZiKpHiighf3OfX8hMw62H1fWzGBHB0koK/BiVrQoc8CTuQJJF/ip4ND5", - "HEuo4chBxwqiSvMI38SCFYLoSnIi5vNgGcA1k5Bvp6SkStlBTs2yYINzT54+PD2Nmr0QOyNWarHol/mq", - "WcrDE3zFPnFNlmwrgIOA3Q/rh4aiDtnYPuG4npL/qEDpGE/FBzZzFb2k5ta2/STr3qfH5HusfGSIuFXq", - "Hs2Vvohwu6BmVeaCZlMsbnz57dkLYme139gW8raf5QKtdW3yj7pXxhcY9ZWdBirnjB9ndykPs2qlk7r9", - "ZKw2oXmjaZDJOjE3aMcLsXNMnlsTat3A305CsES2LCALul1aJR6Jw/xHa5ou0TbZkoCGeeX4RqyenTWe", - "myD7sO5+hAzbwO16sdpWrFMi9BLkminAjHxYQbscYl0b1NnGfXnE9vJkxbmllOMDhNG619GhaPfAWUnW", - "BxVEIesg/kDLlO3HfGhf2gv8Kp6L0Wly2/H6++J6vsQ2eemcCynlgrMUWyHEJGks3TbOTTmia0Tcv6gm", - "7oRGDle0tW6dC+ywONhs1zNCh7i+yz94ajbVUof9U8PGtVxbgFaOs0E29Z2unUOMcQWum5UhopBPChkJ", - "aoomQtQBFAeSEVZlGrBwfmee/ejs31gU45pxtHQ5tDn9zLqscsXQM80J02QhQLn1tLN51FvzzTFWacxg", - "8+74hViw9IItcAwbRmeWbWNG+0Od+QhSF7Fp3n1m3nW18+ufW+FgdtKzsnSTDvdBjwqSesMHERyLW/KB", - "JAFy6/HD0XaQ287Qb7xPDaHBCqPWoMR7uEcYdS/t9ijfGt3SUhS+QWxGZbSALuMRMF4w7l2o8QsijV4J", - "uDF4Xge+U6mk2uoOo3jaJdB8IAECM5StD/62Q3U7BxiU4Br9HMPb2LQBH2Ac9QuNxE/5lvhDYag7ECae", - "0bwOnY409UapyglRGSYXddp8xxiHYdyJT5lsoWtv+l79OXbjOPQmGqpROKuyBeiEZlmstNU3+JTgU58k", - "BhtIq7oJVZ0d2K5R3qc2N1EquKqKHXP5F245XdA3P0INYe9+v8NYaWe2xX9jHZiGd8YFTR+clesjpLPD", - "CvP3s4xjUq+h6USxRTIeE3in3B4dzdQ3I/Tm+zuldJ+u+7vIxu1wuXCPYvztW3NxhIV7e/Hp9mqp6+pi", - "LLjA577gUV0Rss2V8Crr9RnDqAfcvMiWdYD3L0YBX9F8IBM+9JXY+9X6D4by4dPB8g1Uu/JcmpKdLGiw", - "5JGNFe54X/ouxKH4YBsefHdeC7fWnQgd9t390PLU2RixhlkMeuhu5kRrNvhQL9oPq6ESCb5PBz4P+4G4", - "KJ6pKwMPKyYqH33lY6C9Smh/dSV4Wn0/BtYfzSz43F6LQR/Lpetfa5fpdPIffrZeWAJcy+3vwOPS2/Ru", - "U5mItGvNU80rpG59OKoVYutWHNPDJtYuxcmG3lZmWUuLlnrtZ3pk9XyMONDDx4fp5Dw76MKMtdyZ2FFi", - "x+4FWyw1Vuz/K9AM5Os9HQmaLgR4xEqhWNOBNDeDuRKwSxzueGyygSFgFnZU6I/lg1BXkGpsO9sE10mA", - "Q/ormMm80+fPzgTD6nSdk+EaEuzqQtDvNbvnju8VTgqKf9k+ncfja+6f1SHUNgNsTVVTrqWTMz06c3M+", - "hxSrIu8sVPUfS+BBEaSpt8sgLPOgbhWr85iwrvfhVscGoF11pHbCE/TXuTU4Q3ns17C9p0iLGqKNQ+sk", - "vpsUDkYMWBeYryE9ZEh2UWNM1ZSBWPAhwa4Uc9McY7Dmc1B27YZzeZI0F0dTim3HlPGm56PmMp8eVPYR", - "U3KGaln1eyYP6x/PsUW1cgFytC48HGrp5LzfOGftChdjWbHad+JLGIPyv/kagnaWnF27/gGIFeupWlOZ", - "+TfupCiUvZtYHOh5PTNrEjj6QQ6RVgyYC5XmwogRyVBCWTtnog44vKdsZGhTwAfhmoOUkNUukVwoSLTw", - "CR+74NiFChv+eiMkqMH2Rxa4wdLXb5ra3tgGjmKpa+qiXsMFEgkFNdDJoAL38Jy7kP3MPvdJ+L4N2F4L", - "U02v+/vR+tQdpnpIDKl+TtxtuT+5/ybGJsY5yMR7nrrluHm7IhvW3cyq1F7Q4cGoDXKja+fsYCVRO03a", - "X2VHRwiS5K9he2KVIN/I1+9gCLSVnCzoQcHRzibfqflNxeBe3Al4n7eOXClEngw4O877NcS7FH/N0mvA", - "GoB1iPtAj3ZyH23stTd7vdz6mtllCRyyB8eEnHGbVOQd2+32gp3J+T29a/4NzppVtqy/M6odX/F4dgYW", - "3Je35GZ+mN08TIFhdbecyg6yp0L1hg+F3KyxOH+7i+fxWK2872rudpFviMpCEZNJLqzH6hke9JjhCEsg", - "BLU60JFJifN0EZWLWCzvTco0mKHimAonQ4A08DHVAmoo3OBRBET7okdOoS1954reiTmR0DiRb1r9r9/C", - "PabRd2euZ2nzu7mQ0GrGbr62lT7rxBcso4n/mTEtqdzepEZfr4V8z3oyiOW94Vh1JFazkCYaq4/DPBfr", - "BJlVUve5iKm25j3Vvox907XmO3OqZxDEdVHlBLUtWdKMpEJKSMMv4vmeFqpCSEhygWFeMQ/0XBu5u8Ak", - "L05ysSCiTEUGtl9MnIKG5qo4pyg2QRBVE0WBpR3MFrbfBHQ8ckpzp1o/UoKi1uKA3vkp2Mz1pqqTXXRi", - "fZkDEcugXBUnhyH7ch/eHb3/D+rUco5hjCuGsS7tpH0rfZbmjqkrGYRn7iIsM0T0UopqsQwKOpM1y3Nv", - "MDDbICungIaj/KQqDEfCjC0zxRNSCKWdZmdHUvVQTYjX/VRwLUWet41AViReOMv2S7o5S1P9QojrGU2v", - "H6AeyYWuV5pNfT5zNxivmUl2Snm1L7zEtg/fXxrXvoehaY5IRjOkDks5uJF6AOa7/Rxrv437rL+w7rra", - "zCuuNpxxQrUoWBqn4T9WdNtgTFqMJURrhNlehraqA76GjDq8HOpgBmRJfTQDp9FmbGfE8TTn1EXmYf6L", - "Em93XDIHd0kMXEx9PumkliQdlK06ACCkNtVYV9I2QAwln5qriIUtTYAu6S6gI7k4Rv7cDjYzwp0DpeFW", - "QPWiDWsA71tlf2prudnIxZnY+OcPmmJvNwL+w24qbzGPoZCqi4a0pA2q8oVhBjhCvKT0zvijS0wzn42N", - "Qqqb1Y68UQMAhuOSWjCMik46FIw5ZTlkSazX4XltE5oGmq1Lheq2IGfKcfKUVr7VoBm7kuAKlViRWrb9", - "TSU1pCTq1/uWW57BBmwexW8ghe0hOA38HZDbFoMd5VuUSQ4raIVrueopFYp2bAX+W1V/TDKAEr1/XZtU", - "LA4pvMs7hgq39iSIZBmD3ajlwiLW7hTZY5aIGlE2PLHHRI09SgaiFcsq2sKfOlTkaJvdzFGOoKonkyde", - "bxs7zU92hDd+gDP/fUyU8Zh4N44PHcyC4qjbxYD2xiVWaujU83hYYlgaqHZo4GxZ7fi0JN7wDVXSNR82", - "APZJvlFvRu4TEzxA7LcbSFGqacfd3R4nBAcjqlP2a1AEl/UO39yQ/FloeCcJD44XUzUUuES1HZYaTxdO", - "YMcXsOk0N2KvkZqxnaDj/47/Tcms8gMZvdp2Nww1uOfgPXZYibx2VjiBltUXmo8vnLpClF2lnAWR1QXd", - "EiHxH6Ov/aOiOZtv8YRa8P1nRC2pISHnIrS+axevaCbeLZhMPWDeLiD8VHbdbOyYwXBbM0oAtLkCfRsa", - "QQp6DeE2oFvecp5UG5ajqlnBlMLLrrOdfSy4xftiIgXNQh0ZSxq2G377Irfm6//dZG2FU/lKZGVOU9/L", - "0jXTaRnEbb9aT1x6CcXutL6+euxJoO6B2xCt9Hng2Q2MewdGbsRi5YcahbTA7vUG7fVIudUyRtooO90g", - "diREjlrKXe/C2PiQHtBhR8F94IcNFj8N/qPVRoeWMQb83wveB1qqhvDa7qmfAMutWhERWK1ddSY2iYS5", - "2hcKYQ2rRhGWTZUJb5xkPJVAlY0NOX/lVLammCbjRoW00Yu1960eJYM54w2zZLysdEQDwJqafBsgLDRP", - "I1oHnD1DUoIRw1Y0f7UCKVk2tHHmdNjmg2EzA2+Sd99GlP/6Tu0PwFSj/WAmITSZasFr5gK37ZJsYKHS", - "lGdUZuHrjJMUpLn3yZpu1c19HwZaWRn5Yo/3gwbSTDu/PfCDIGlbQPKtc1/e0jNRA0jv0EUxwrWAEawR", - "t4I1imgx4EnowxCvx0E3SS4WmF82QICuain6fqyyIjgabK08dNg8iv0Gu6fBgu3u4GuBs46ZYvc5e4Wo", - "Q4XnJ870zpNmrWndhD8bkWkPgqd/vmjCwu3m9Ok/lqPpynKEeZpeuPNJDH6vbXiInQ8GPBltC+7ALqKD", - "3CX4huba8Y2w2j74WCao1WET1G3VjsBvUE2QM01d4E7f6NNTii1Spi6P9kCbkLUk+3tgADzbtdydrfa0", - "dTCFGeeQ7mG7M2eTUpRJOiYa0PZ0yJxB20HahnGAPgJz9cC668AJVXc5aVXEabU7ObSB2mC7lX1+mTLd", - "pWQPGTQGOGjbWC7myMtsT2+0w2COR228mHazj9oGm5pJEEokpJVEg+aabvc3pBqoJXzx17MvHj765dEX", - "XxLzAsnYAlRTj7rT0KmJGGO8a2f5tDFiveXp+Cb4vHSLOO8p8+k29aa4s2a5rWqKTfbaWR1iCY1cAJHj", - "GGkkdKO9wnGaoO/f13bFFnnnOxZDwcffMynyPN4PoBbdIqb+2G4Fxn4j8ZcgFVPaMMK2r47pJlZWLdEc", - "h1VhV7bOiOCpK9tfUwHTA8E4sYUMhVoiP8OsX+ffILApc8errE9i17qcXmQtYhicgfEbMyClKJ0ozeYk", - "BhHmlsgg59IZGjG8M4ierJmtjaOMEaKLSY6TXthKeTe3b7f51HFObzYxIl74Q3kD0hyypA9ntN+EkzSm", - "9N8N/4ik6N8Z16iX+zF4RVQ/uFm79lGg9dO1I+SBAAzkYbYy6IIUoqBErbRWebTfe1dnV/x42bhA9yYM", - "ICT+gz3ghYmVzXt1jLsD5zPXen1ZIyVYyrshSmgtf1+upme99UUSbJEzUmgNyrIl0RcLg0Rc9azObx3Q", - "SnppsFIITYxmmueR9FlrN8EzFRKOUQnkiuafnmt8x6TSZ4gPyN4MJ82EOZQhki0q1c0quL2go+YO8iXv", - "bmr+GlN2/wPMHkXvOTeUcxf3bjO0emEv84W/FWwWMFnjmDYc6OGXZObaMJQSUqa6bui1F07qlEGQbO5C", - "L2Gj9+Qo7lvnz0LfgoznPmaE/Bi4kwSa7RoImyP6mZnKwMmNUnmM+npkEcFfjEeFbVv3XBe3LNl/s4Ig", - "QWmvAwuC9BvSjl2eLXphLp1KQX+do2/rFm4jF3WztrHVbEZX/r+6eqtnY4rQxKv0m8+xCs6dlOs/qFj/", - "R6h/Y3HkxnDzxijm56GKqLbq50DV5s5+VCzfGyDSqsH9YTpZAAfFFFaZ/sV1Ffm0d6mHwObk94+qhfU2", - "hUQsYiJrbU0eTBVU1x5RWNt9FqmGjPluaSWZ3mJHWW9AY79EK/V8X1d9cFVDat+Vu/u0uIa6q3dTI6JS", - "/nb9XtAc7yPrUuPmFhL5MfnW1n52B+Xre7N/hcd/eZKdPn74r7O/nH5xmsKTL746PaVfPaEPv3r8EB79", - "5Ysnp/Bw/uVXs0fZoyePZk8ePfnyi6/Sx08ezp58+dW/3jN8yIBsAfVF359O/jM5yxciOXt9nlwaYBuc", - "0JL9AGZvUFeeC+x4aJCa4kmEgrJ88tT/9H/8CTtORdEM73+duM49k6XWpXp6crJer4/DT04WmBSeaFGl", - "yxM/D/aha8krr8/raHIb94I72liPcVMdKZzhszffXlySs9fnxw3BTJ5OTo9Pjx+6pseclmzydPIYf8LT", - "s8R9P8HKiyfKFVU/KUtbVv3DdHLi6ND9tQSaY3kV80cBWrLUP5JAs637v1rTxQLkMeYS2J9Wj068xHHy", - "3uXNf9j17CSMtjh53yovkO350kcT7Hvl5L1vnLp7wFbTTBfHZRAXdSN+D9oV27H2hUilBvQmuNGnRGHV", - "dPNTKZkwZ3JqLtgM0NeOIWMSy0drWfHUOmDtFMDxvy/P/hOd0C/P/pN8TU6nLvxdodISm97m29bEdJ5Z", - "sPuxf+qb7Vldy6JxWE+evo0ZklyDtLKa5SwlVhbBw2goLTgr9YgNL0Sr4aTpYN9wdsOtT5Ov3r3/4i8f", - "YhJjT/6tkRSUdwhRr4Xve4lIK+jm6yGUbVw8tBn3HxXIbbOIgm4mIcB9L2mk5pVPV/Htf8N4vyAS8N8v", - "Xv1IhCROQ35N0+s6VcfnZjX5aGFqlvlyCGJ3eYZAA68Kcw+5nJ9CLcp2+dcaze+wVx4Ciizj0emp55NO", - "CwkO6Ik798FMHdNVn9Aw9CUwRvYToRWBDU11viVUBbEHGAno+1p2EqpEmbTCuneaP/szui2JxsQfmosd", - "qU8uNM33wHfZ6QHYQocLoynNRbo/+bmHjCgE72KiQri1nkb+3N3/HrvblzxIKcyZZhjr3Fw5/jprAenk", - "zXzrwR0oM3FM/iYqlA+N5F9piHVAxxms38PN6ariBMFpTSILPjk66i786KgJpZvDGpks5fhiFx1HR8dm", - "p54cyMp22qJbRWRHnZ1Dhutt1ku6qSORKeGCJxwWVLMVkECpfHL68A+7wnNuY7+NQGwF9w/TyRd/4C07", - "50awoTnBN+1qHv9hV3MBcsVSIJdQlEJSyfIt+YnXwfVBg+0++/uJX3Ox5h4RRietioLKrROiac1zKh50", - "fdnJf3r1bRpBG7koXSiMd0ER1cq0vgYeX0zeffA6wEjdY9drJzPsfzj2VQgVlmHtBL0P6uQ92s8Hfz9x", - "TtD4Q/RjWAX5xFfeG3jT1liKP2xpRe/1xixk93DmnWC8lOp0WZUn7/E/qOsGK7Il20/0hp9gnOXJ+xYi", - "3OMeItq/N5+Hb6wKkYEHTsznCvW4XY9P3tt/g4lgU4Jk5jrCMonuV1vO9gT7CG/7P295Gv2xv45WKc+B", - "n0+8qSWmUrfffN/6s01TalnpTKyDWdBJYT1sfcjMw0p1/z5ZU6aNkOQqSNK5Btn/WAPNT1y7mM6vTYX2", - "3hMsOx/82BGrSmFLyLQ12jd0fdnKTJS2dMM3Ag0VQwx3k8wYRy4UcsnG9Ggf9lWkHm+8XIKNsfXe24gM", - "qgWZSUGzlCrsZ+8aK/V04w+31L+6lSbOI745BBPNDf1ihIafHO912OC4Y4TMYF/I+XM/YZPU9dEFsx5E", - "39CM+JpDCXlJc7PhkJEzJ/63sPGxharPLwV9ZrHlk8kZ3/jDpwjFAmwtBVHGS7gEHdDGCBVGizQMYAE8", - "cSwomYls65pUTSRd642tGNFlbie0fWO0DZFU0kINPbwDK+Xv2zS5zyL5pyHwT0Pgn6aiPw2Bf+7un4bA", - "kYbAP81kf5rJ/keayQ6xjcXETGf+GZY2sWs2bc1r9T7adCeoWXy7lhXTtUzWShXFRghMHxNyieVUqLkl", - "YAWS5iSlykpXrmZXgRGcWBELsqdXPGlBYuMkzcT3m//aANWr6vT0MZDTB91vlGZ5HvLm/rco7+Ijm0Py", - "NbmaXE16I0koxAoym/AaVse2X+0d9n/V477qldXHzHKsV+MLZxFVzecsZRblueALQheiCa7G8qBc4BOQ", - "BjjbnIgwPXXJKMxVGnW9y9tFvNuSe18COG+2cG9IQYdc4tEEhvAODCX4lzFxBP+jpfSbVoi6LSPdOXaP", - "q/7JVT4FV/nsfOWP7qQNTIv/LcXMJ6dP/rALCg3RPwpNvsPEgduJY65sZRrt0XRTQcsXX/Hmvib4OAzm", - "xVu0DuN9+85cBArkyl+wTWzq05MTrMa1FEqfTMz1145bDR++q2F+72+nUrIVNgFG66aQbME4zRMX+Jk0", - "8aePjk8nH/5/AAAA//9iOfQe9x8BAA==", + "sNervXBewzbBWD9F7v/ws3rwCeDVQtN8D2LxnRh6u/a0PtTjpt9FcN3JQ7KzljpLtUa8NQwiBw1DKDwI", + "J4P714Wot4u3R8sKJIaU/K4U7ye5HQHVoP7O9H5baKtyIILdqelGwjMbxikXXrCKDZZTpZN9bNm81LIl", + "mBUEnDDGiXHgAcHrBVXahkExnqFN014nOI8VwswUwwAPqiFm5J+9BtIfOzX3IFeVqtURVZWlkBqy2BrQ", + "Izs414+wqecS82DsWufRglQK9o08hKVgfIcspwHjH1TX/lfn0e0vDn3q5p7fRlHZAqJBxC5ALvxbAXbD", + "KN4BQJhqEG0Jh6kO5dShw9OJ0qIsDbfQScXr74bQdGHfPtM/Ne/2ics6Oey9nQlQ6EBx7zvI1xazNn57", + "SRVxcHgXO5pzbLxWH2ZzGBPFeArJLspHFc+8FR6BvYe0KheSZpBkkNNtJDjAPib28a4BcMcbdVdoSGwg", + "bnzTG0r2cY87hhY4nooJjwSfkNQcQaMKNATivt4zcgY4dow5OTq6Vw+Fc0W3yI+Hy7ZbHRkRb8OV0GbH", + "HT0gyI6jjwF4AA/10DdHBX6cNLpnd4q/gXIT1HLE4ZNsQQ0toRn/oAUM2IJdjlNwXjrsvcOBo2xzkI3t", + "4SNDR3bAMP2aSs1SVqKu8wNs71z1604QdZyTDDRlOWQkeGDVwDL8ntgQ0u6YN1MFR9ne+uD3jG+R5fgw", + "nTbw17BFnfu1zU0ITB13octGRjX3E+UEAfURz0YED1+BDU11vjWCml7ClqxBAlHVzIYw9P0pWpRJOEDU", + "P7NjRuedjfpGd7qLL3CoYHmxWDOrE+yG77KjGLTQ4XSBUoh8hIWsh4woBKNiR0gpzK4zl/7kE2A8JbWA", + "dEwbXfP19X9PtdCMKyB/ExVJKUeVq9JQyzRCoqCAAqSZwYhg9ZwuOLHBEORQgNUk8cnRUXfhR0duz5ki", + "c1j7nEHzYhcdR0dox3ktlG4drjuwh5rjdh65PtBxZS4+p4V0ecr+iCc38pidfN0ZvPZ2mTOllCNcs/xb", + "M4DOydyMWXtII+OivXDcUb6cdnxQb9247xesqHKq78JrBSuaJ2IFUrIM9nJyNzET/NsVzV/Vn2E+JKSG", + "RlNIUsziGzkWXJpvbOKfGYdxZg6wDfofCxCc268u7Ed7VMwmUpUVBWSMasi3pJSQgs13M5Kjqpd6TGwk", + "fLqkfIEKgxTVwgW32nGQ4VfKmmZkxXtDRIUqveEJGrljF4ALU/Mpj0acAmpUuq6F3Cowa1rP57Jcx9zM", + "wR50PQZRJ9l0MqjxGqSuGo3XIqedtzniMmjJewF+molHulIQdUb26eMr3BZzmMzm/j4m+2boGJT9iYOI", + "3+bhUNCvUbfz7R0IPXYgIqGUoPCKCs1Uyj4V8zBH24cKbpWGom/Jt5/+MnD83gzqi4LnjENSCA7baFkS", + "xuElPoweJ7wmBz5GgWXo264O0oK/A1Z7njHUeFv84m53T2jXY6W+E/KuXKJ2wNHi/QgP5F53u5vypn5S", + "mucR16LL4OwyADWtg3WZJFQpkTKU2c4zNXVRwdYb6dI92+h/Xeel3MHZ647b8aGFxQHQRgx5SShJc4YW", + "ZMGVllWqrzhFG1Ww1EgQl1fGh62Wz/wrcTNpxIrphrriFAP4astVNGBjDhEzzXcA3nipqsUClO7oOnOA", + "K+7eYpxUnGmcqzDHJbHnpQSJkVTH9s2Cbsnc0IQW5DeQgswq3Zb+MUFZaZbnzqFnpiFifsWpJjlQpclL", + "xi83OJx3+vsjy0GvhbyusRC/3RfAQTGVxIPNvrdPMa7fLX/pYvwx3N0+9kGnTcWEiVlmq0jK/73/b0/f", + "niX/RZPfTpOv/uXk3fsnHx4c9X589OHrr/9f+6fHH75+8G//HNspD3ssfdZBfv7cacbnz1H9CUL1u7B/", + "NPt/wXgSJbIwmqNDW+Q+lopwBPSgbRzTS7jiesMNIa1ozjLDW25CDt0bpncW7enoUE1rIzrGML/WA5WK", + "W3AZEmEyHdZ4YymqH58ZT1RHp6TLPcfzMq+43Uovfds8TB9fJubTuhiBrVP2lGCm+pL6IE/356MvvpxM", + "mwzz+vlkOnFP30UomWWbWB2BDDYxXTFMkrinSEm3CnSceyDs0VA6G9sRDltAMQOplqz8+JxCaTaLczif", + "suRsTht+zm2Avzk/6OLcOs+JmH98uLUEyKDUy1j9opaghm81uwnQCTsppVgBnxJ2DMddm09m9EUX1JcD", + "nfvAVCnEGG2oPgeW0DxVBFgPFzLKsBKjn056g7v81Z2rQ27gGFzdOWMRvfe+//aSnDiGqe7ZkhZ26KAI", + "QUSVdsmTrYAkw83CnLIrfsWfwxytD4I/veIZ1fRkRhVL1UmlQH5Dc8pTOF4I8tTnYz6nml7xnqQ1WFgx", + "SJomZTXLWUquQ4WkIU9bLKs/wtXVW5ovxNXVu15sRl99cFNF+YudIDGCsKh04kr9JBLWVMZ8X6ou9YIj", + "21peu2a1QraorIHUlxJy48d5Hi1L1S350F9+WeZm+QEZKlfQwGwZUVrU+WhGQHEpvWZ/fxTuYpB07e0q", + "lQJFfi1o+ZZx/Y4kV9Xp6WPM7GtqIPzqrnxDk9sSRltXBktSdI0quHCrVmKselLSRczFdnX1VgMtcfdR", + "Xi7QxpHnBD9rZR36BAMcqllAneI8uAEWjoOTg3FxF/YrX9YxvgR8hFvYTsC+1X4F+fM33q49Ofi00svE", + "nO3oqpQhcb8zdbW3hRGyfDSGYgvUVl1hvBmQdAnptatYBkWpt9PW5z7gxwmannUwZWvZ2QxDrKaEDooZ", + "kKrMqBPFKd92y9oom1GBg76Ba9heiqYY0yF1bNplVdTQQUVKDaRLQ6zhsXVjdDffRZX5RFNXnQSTNz1Z", + "PK3pwn8zfJCtyHsHhzhGFK2yH0OIoDKCCEv8Ayi4wULNeLci/djyGE+Ba7aCBHK2YLNYGd7/6PvDPKyG", + "Kl3lQReFXA+oCJsTo8rP7MXq1HtJ+QLM9WyuVKFobquqRoM2UB9aApV6BlTvtPPzsCCFhw5VyjVmXqOF", + "b2qWABuz30yjxY7D2mgVaCiy77jo5ePh+DMLOGQ3hMd/3mgKx4O6rkNdpOKgv5Vr7NZqrQvNC+kM4bLP", + "C8CSpWJt9sVAIVy1TVvUJbhfKkUXMKC7hN67kfUwWh4/HGSfRBKVQcS8K2r0JIEoyPblxKw5eobBPDGH", + "GNXMTkCmn8k6iJ3PCItoO4TNchRg68hVu/dUtryotirwEGhx1gKSN6KgB6ONkfA4LqnyxxHrpXouO0o6", + "+x3LvuwqTXcexBIGRVHrwnP+Nuxy0J7e7wrU+ap0vhRdqPSPKCtndC9MX4hth+AommaQw8Iu3L7sCaUp", + "mNRskIHj1XyOvCWJhSUGBupAAHBzgNFcjgixvhEyeoQYGQdgY+ADDkx+FOHZ5ItDgOSu4BP1Y+MVEfwN", + "8cQ+G6hvhFFRmsuVDfgbU88BXCmKRrLoRFTjMITxKTFsbkVzw+acLt4M0quQhgpFpx6aC715MKRo7HBN", + "2Sv/oDVZIeEmqwmlWQ90XNTeAfFMbBKboRzVRWabmaH3aO4C5kvHDqatRXdPkZnYYDgXXi02Vn4PLMNw", + "eDAC28uGKaRX/G5IzrLA7Jp2t5wbo0KFJOMMrTW5DAl6Y6YekC2HyOV+UF7uRgB0zFBNrwZnlthrPmiL", + "J/3LvLnVpk3ZVJ8WFjv+Q0couksD+Ovbx9oF4f7aFP4bLi7mT9RHqYTXtyzdpkKh/bi0VQcPKVDYJYcW", + "EDuw+rorB0bR2o71auM1wFqMlRjm23dK9tGmIAdUgpOWaJpcxyIFjC4PeI9f+M8CYx3uHuXbB0EAoYQF", + "Uxoap5GPC/oU5niK5ZOFmA+vTpdybtb3Roj68rduc/ywtcyPvgKMwJ8zqXSCHrfoEsxL3yk0In1nXo1L", + "oO0QRdtsgGVxjovTXsM2yVhexenVzfvDczPtj/VFo6oZ3mKM2wCtGTbHiAYu75jaxrbvXPALu+AX9M7W", + "O+40mFfNxNKQS3uOz+RcdBjYLnYQIcAYcfR3bRClOxhkkHDe546BNBrEtBzv8jb0DlPmx94bpebT3odu", + "fjtSdC1BGcB4hqBYLCDz5c28P4wHReRywRdBF6ey3FUz75jY0nVYeW5H0ToXhg9DQfiBuJ8wnsEmDn2o", + "FSDkTWYdFtzDSRbAbbmSuFkoipowxB/fCGx1H9kX2k0AiAZBX3ac2U10st2lejtxA3KgmdNJFPj17T6W", + "/Q1xqJsOhU+3Kp/uPkI4INIU00Fjk34ZggEGTMuSZZuO48mOOmgEowdZlwekLWQtbrA9GGgHQUcJrlVK", + "24VaOwP7Ceq8J0Yrs7HXLrDY0DdNXQJ+Vkn0YLQim/t122tdbeTaf/j5QgtJF+C8UIkF6VZD4HIOQUNQ", + "FV0RzWw4Scbmcwi9L+omnoMWcD0bezaCdCNEFnfRVIzrL5/EyGgP9TQw7kdZnGIitDDkk7/se7m8TB+Y", + "kuorIdiaG7iqoun6P8A2+ZnmlVEymFRNeK5zO7Uv3wN2fVX8AFsceW/UqwFsz66g5ekNIA3GLP31IxUU", + "sL6nWiX+Ub1sbeEBO3UW36U72hrXlGGY+JtbptW0oL2U2xyMJkjCwDJmNy7isQnm9EAb8V1S3rcJLNsv", + "gwTyfjgVU76FZf8qqmtR7KPdS6C5J15czuTDdHK7SIDYbeZG3IPr1/UFGsUzRppaz3ArsOdAlNOylGJF", + "88TFSwxd/lKs3OWPr/vwio+sycQp+/LbsxevHfgfppM0ByqT2hIwuCp8r/xsVmXbOOy+Smy1b2fotJai", + "YPPrisxhjMUaK3t3jE29pihN/ExwFF3MxTwe8L6X97lQH7vEHSE/UNYRP43P0wb8tIN86Iqy3DsbPbQD", + "wem4uHGddaJcIRzg1sFCQcxXcqfspne646ejoa49PAnneoWlKeMaB3eFK5EVueAfeufS03dCtpi/y0yM", + "Bg/9fmKVEbItHgditX3/yq4wdUys4PXr4ldzGo+OwqN2dDQlv+buQQAg/j5zv6N+cXQU9R5GzViGSaCV", + "itMCHtRZFoMb8XEVcA7rcRf02aqoJUsxTIY1hdooII/utcPeWjKHz8z9kkEO5qfjMUp6uOkW3SEwY07Q", + "xVAmYh1kWtiWmYoI3o2pxiRYQ1rI7F1LBuuM7R8hXhXowExUztJ4aAefKcNeuQ2mNC8TfHnAWmtGrNhA", + "bC6vWDCWeW1MzdQOkMEcUWSqaNnWBncz4Y53xdk/KiAsM1rNnIHEe61z1XnlAEftCaRxu5gb2PqpmuFv", + "YwfZ4W/ytqBdRpCd/rvntU/JLzTW9OfACPBwxh7j3hG97ejDUbPNZlu2QzDH6TFjWqd7RuecdQNzRFuh", + "M5XMpfgN4o4Q9B9FCmF4xydDM+9vwGORe12WUjuVm47uzez7tnu8bjy08bfWhf2i665jN7lM46f6sI28", + "idKr4uWaHZKHlLAwwqCdGjDAWvB4BcGw2AbFRx9Rbs+TrQLRyjCLn8owl/PEjt+cSgdzL/81p+sZjfWI", + "MbqQgSnY3laclBbEf+w3QNU1DuzsJIjgrt9ltpJcCbLxQfSr0t5Qr7HTjtZoGgUGKSpUXaY2TCFXIjJM", + "xdeU2y7i5jvLr9zXCqwL3ny1FhLrQKp4SFcGKSui5tirq7dZ2g/fydiC2QbZlYKgA7MbiNhik0hFrot1", + "XbnDoeZ8Tk6nQRt4txsZWzHFZjngGw/tGzOq8Lqs3eH1J2Z5wPVS4euPRry+rHgmIdNLZRGrBKl1TxTy", + "6sDEGeg1ACen+N7Dr8h9DMlUbAUPDBadEDR5+vArDKixf5zGblnX4HwXy86QZ/tg7TgdY0yqHcMwSTdq", + "PPp6LgF+g+HbYcdpsp+OOUv4prtQ9p+lgnK6gHh+RrEHJvst7ia68zt44dYbAEpLsSVMx+cHTQ1/Gsj5", + "NuzPgkFSURRMFy5wT4nC0FPTXtlO6oezvf5dvygPl3+I8a+lD//r2Lo+shpDi4GcLYxS/hF9tCFap4Ta", + "4p85ayLTfb9Ocu5rC2MDrbpvlsWNmcssHWVJDFSfk1IyrtH+Uel58hejFkuaGvZ3PARuMvvySaQRVbtX", + "Cz8M8I+OdwkK5CqOejlA9l5mcd+S+1zwpDAcJXvQ1FgITuVgoG48JHMoLnT30GMlXzNKMkhuVYvcaMCp", + "b0V4fMeAtyTFej0H0ePBK/volFnJOHnQyuzQT29eOCmjEDLWMKA57k7ikKAlgxVmzMU3yYx5y72Q+ahd", + "uA30nzb+yYucgVjmz3JUEQg8mruS5Y0U//PLpvI5OlZtJmLHBihkxNrp7HYfOdrwMKtb139rA8bw2QDm", + "RqMNR+ljZSD63obX1998inihLkh2z1sGx4e/Eml0cJTjj44Q6KOjqRODf33UfmzZ+9FRvABx1ORmfm2w", + "cBuNGL+N7eE3ImIA810L64AiVx8hYoAcuqTMA8MEZ26oKWl3iPv4UsTd5HfFo03jp+Dq6i0+8XjAP7qI", + "+MTMEjewyVIYPuztDplRksnq50GcOyXfiM1YwuncQZ54/gAoGkDJSPMcrqTXATTqrt8bLxLQqBl1Brkw", + "SmbYFCi0538+eDaLn+7AdsXy7OemtlvnIpGUp8tolPDMfPiLldFbV7BlldE+I0vKOeTR4axu+4vXgSNa", + "+t/F2HkKxke+2+1Aa5fbWVwDeBtMD5Sf0KCX6dxMEGK1XTarLsuQL0RGcJ6mqUXDHPutnGMtNCP5zThs", + "UWkXt4q54K7g0JzlGIYZ9xvjm4mkeqCAFvY79/2FzDjYflxZM4MdHSShrMCLWdGizAFP5gokXeCngkPn", + "cyyhhiMHHSuIKs0jfBMLVgiiK8mJmM+DZQDXTEK+nZKSKmUHOTXLgg3OPXn68PQ0avZC7IxYqcWiX+ar", + "ZikPT/AV+8Q1WbKtAA4Cdj+sHxqKOmRj+4Tjekr+owKlYzwVH9jMVfSSmlvb9pOse58ek++x8pEh4lap", + "ezRX+iLC7YKaVZkLmk2xuPHlt2cviJ3VfmNbyNt+lgu01rXJP+peGV9g1Fd2GqicM36c3aU8zKqVTur2", + "k7HahOaNpkEm68TcoB0vxM4xeW5NqHUDfzsJwRLZsoAs6HZplXgkDvMfrWm6RNtkSwIa5pXjG7F6dtZ4", + "boLsw7r7ETJsA7frxWpbsU6J0EuQa6YAM/JhBe1yiHVtUGcb9+UR28uTFeeWUo4PEEbrXkeHot0DZyVZ", + "H1QQhayD+AMtU7Yf86F9aS/wq3guRqfJbcfr74vr+RLb5KVzLqSUC85SbIUQk6SxdNs4N+WIrhFx/6Ka", + "uBMaOVzR1rp1LrDD4mCzXc8IHeL6Lv/gqdlUSx32Tw0b13JtAVo5zgbZ1He6dg4xxhW4blaGiEI+KWQk", + "qCmaCFEHUBxIRliVacDC+Z159qOzf2NRjGvG0dLl0Ob0M+uyyhVDzzQnTJOFAOXW087mUW/NN8dYpTGD", + "zbvjF2LB0gu2wDFsGJ1Zto0Z7Q915iNIXcSmefeZedfVzq9/boWD2UnPytJNOtwHPSpI6g0fRHAsbskH", + "kgTIrccPR9tBbjtDv/E+NYQGK4xagxLv4R5h1L2026N8a3RLS1H4BrEZldECuoxHwHjBuHehxi+INHol", + "4MbgeR34TqWSaqs7jOJpl0DzgQQIzFC2PvjbDtXtHGBQgmv0cwxvY9MGfIBx1C80Ej/lW+IPhaHuQJh4", + "RvM6dDrS1BulKidEZZhc1GnzHWMchnEnPmWyha696Xv159iN49CbaKhG4azKFqATmmWx0lbf4FOCT32S", + "GGwgreomVHV2YLtGeZ/a3ESp4KoqdszlX7jldEHf/Ag1hL37/Q5jpZ3ZFv+NdWAa3hkXNH1wVq6PkM4O", + "K8zfzzKOSb2GphPFFsl4TOCdcnt0NFPfjNCb7++U0n267h8iG7fD5cI9ivG3b83FERbu7cWn26ulrquL", + "seACn/uCR3VFyDZXwqus12cMox5w8yJb1gHevxgFfEXzgUz40Fdi71frPxjKh08HyzdQ7cpzaUp2sqDB", + "kkc2Vrjjfem7EIfig2148N15LdxadyJ02Hf3Q8tTZ2PEGmYx6KG7mROt2eBDvWg/rIZKJPg+Hfg87Afi", + "onimrgw8rJiofPSVj4H2KqH91ZXgafX9GFh/NLPgU3stBn0sl65/rV2m08l/+Nl6YQlwLbd/AI9Lb9O7", + "TWUi0q41TzWvkLr14ahWiK1bcUwPm1i7FCcbeluZZS0tWuq1n+mR1fMx4kAPHx+mk/PsoAsz1nJnYkeJ", + "HbsXbLHUWLH/r0AzkK/3dCRouhDgESuFYk0H0twM5krALnG447HJBoaAWdhRoT+WD0JdQaqx7WwTXCcB", + "DumvYCbzTp8/OxMMq9N1ToZrSLCrC0G/1+yeO75XOCko/mX7dB6Pr7l/VodQ2wywNVVNuZZOzvTozM35", + "HFKsiryzUNV/LIEHRZCm3i6DsMyDulWszmPCut6HWx0bgHbVkdoJT9Bf59bgDOWxX8P2niItaog2Dq2T", + "+G5SOBgxYF1gvob0kCHZRY0xVVMGYsGHBLtSzE1zjMGaz0HZtRvO5UnSXBxNKbYdU8abno+ay3x6UNlH", + "TMkZqmXV75k8rH88xxbVygXI0brwcKilk/N+45y1K1yMZcVq34kvYQzK/+ZrCNpZcnbt+gcgVqynak1l", + "5t+4k6JQ9m5icaDn9cysSeDoBzlEWjFgLlSaCyNGJEMJZe2ciTrg8J6ykaFNAR+Eaw5SQla7RHKhINHC", + "J3zsgmMXKmz4642QoAbbH1ngBktfv2lqe2MbOIqlrqmLeg0XSCQU1EAngwrcw3PuQvYz+9wn4fs2YHst", + "TDW97u9H61N3mOohMaT6OXG35f7k/psYmxjnIBPveeqW4+btimxYdzOrUntBhwejNsiNrp2zg5VE7TRp", + "f5UdHSFIkr+G7YlVgnwjX7+DIdBWcrKgBwVHO5t8p+Y3FYN7cSfgfdo6cqUQeTLg7Djv1xDvUvw1S68B", + "awDWIe4DPdrJfbSx197s9XLra2aXJXDIHhwTcsZtUpF3bLfbC3Ym5/f0rvk3OGtW2bL+zqh2fMXj2RlY", + "cF/ekpv5YXbzMAWG1d1yKjvIngrVGz4UcrPG4vztLp7HY7Xyvqu520W+ISoLRUwmubAeq2d40GOGIyyB", + "ENTqQEcmJc7TRVQuYrG8NynTYIaKYyqcDAHSwMdUC6ihcINHERDtix45hbb0nSt6J+ZEQuNEvmn1v34L", + "95hG3525nqXN7+ZCQqsZu/naVvqsE1+wjCb+Z8a0pHJ7kxp9vRbyPevJIJb3hmPVkVjNQpporD4O81ys", + "E2RWSd3nIqbamvdU+zL2Tdea78ypnkEQ10WVE9S2ZEkzkgopIQ2/iOd7WqgKISHJBYZ5xTzQc23k7gKT", + "vDjJxYKIMhUZ2H4xcQoamqvinKLYBEFUTRQFlnYwW9h+E9DxyCnNnWr9SAmKWosDeuenYDPXm6pOdtGJ", + "9WUORCyDclWcHIbsy314d/T+j/PmOdsg3YCMHfk50bKCKXFvdHtku4NPJZCCKWVBqWlpzfIcE8fZJvC8", + "1oELcdQOiL3nGFa5Yhh70y4iYKXh0tx5dWWFkAdchGWPiF5KUS2WQYHpGk6v8srKKcThKD+pCsOjMIPM", + "TPGEFEJpp2nakZolNyFn91PBtRR53jZKWRF94SztL+nmLE31CyGuZzS9foB6LRe6Xmk29fnV3eDAZibZ", + "KS3WvoAT2858f6le+x6GyjmiHc0gOyzu4MbuAZjv9nPQ/Tb3s/7CuutqM9O4GnPGCdWiYGn8TH1e0XaD", + "MXIxFhWtWWZ7K9oqE/gaHvbwsqqDK5BF9tEMnEabw50RxwickxnZjfkvSuDdcckcHKMZuCj7zMVJUUk6", + "KOt1AEBIbeqzrqRtyBhKYjVXEQtbKgFd5F1AR94qGIl0O9jMCHcOlIZbAdWLfqwBvG+ND1NbW85GUs7E", + "xj9/0BSfuxHwH3ZTeYt5DIV4XTSkJW2Qly9UM8AR4iWud8ZDXWLa+2xsVFTdPHfkDR8AMBwn1YJhVLTU", + "oWDMKcshS2K9F89rG9U00LRdala3JTpTjpOntPKtD83YlQRXOMWK+LLt/yqpISVRv963JPMMNmDzOn4D", + "KWxPw2ngf4HctjzsGANEmeSwglb4mKvmUqGoyVbgv1X1xyQDKNEb2bWRxeKiwru8Yzhxa0+CyJox2I1a", + "Uixi7U6RPWaSqFFnwxN7TNTYo2QgWrGsoi38qUNFjrYZ0BzlCKp6OkLi9cix0/xkR3jjBzjz38dEGY+J", + "d+P40MEsKI66XQxob5xkpYZOPY+HSYalimoHC86W1Y5YS+IN31AlXfNhg2Sf5Bt1a+Q+McEDxH67gRSl", + "GqfvQOY0ngEnhat6gtTOATKrFZhPItb2JXDCRdBick1Vrao0NRT9D3ZifIlxp03fwKncRDPefmcJDkZU", + "p5jaoCIhazq9uXn+k5zEnQdxcLwYjShw6X877F+eup3agS9gK29u9tPI/tik0d1ijotPyazyA+W5WNue", + "kaEe+hy8H9RSn3cBObGc1deyj9qcuvKeXVMHC+LVC7olQuI/Ruv8R0VzNt8in7Hg+8+IWlJDQs7xaiMC", + "XBSomXi3eDX1gHlri/BT2XWzsWMGw23NKAHQ5iL3zX0EKeg1hNuAwQ6Wf6baME5VzdByYa7sznb2seAW", + "70u0FDQLNX0sFNluo+5LB5uv/3eTCxdO5eu7lTlNfYdQ16KozWewC7AnLr2EYneyZJ+veRKoOws3RCt9", + "dn12A5PpgawrloEw1H6lBXav42qv88ytljHS8tvpsbEjzXTUUu56F8ZG3fSADvs07gM/bFv5cfAfreE6", + "tIwx4P9R8D7QqDaE1/ak/QhYblXgiMBqrdUzsUkkzNW+ABNrrjbqvGxqd3gTK+OpBKpsxM35K6d4NiVK", + "GTeKsI0JrX2a9SgZzBlvmCXjZaUjegxWKuXbAGGh0R/ROuBCG5ISjDC5ovmrFUjJsqGNM6fDtnQMW0R4", + "R4f7NmLCqO/U/gBMNToc5mc2ZvTwNXOB2yZUNlxTacozKrPwdcZJCtLc+2RNt+rmHqXaObDPp0QDaaZd", + "NSDwLiFpW0DyrXMK39LfUwNI79DxM8Jhg3HBEWeNNe1oMeCf6cPwWThsCrpJcrHALMKBA+Fq06KHz6qA", + "gqMZ3Mpn49bt51HsN9g9DZbld4xIC5x1zBS7z/0r3EpUI3/iTO88+dZG2U3rtHG39mB6pPJFE/xviaV/", + "HmOZuK74SpiN64VNn6riaQ+CTYQB/1DbLj6wixgG4dK4QyP4+HZn7UiLWL6vtQwkaDFQO8L7QTWh7DR1", + "4Vl9U1rP1GCRMnXZ0gda2qx93t9LA+DZ3vTurLenrUNmzDiH9IjbnR+dlKJM0jExn7ZzR+bcBA7SNowD", + "9BE4AQbWXYfHqLqXTavuUaupzaFt8gab6uzzdpXpLqV/yEw0wNHbLggxR15mO7ejdQszeWpjyrSbY9Y2", + "g9VMglAiIa0kmonXdLu/7dhAxeiLv5598fDRL4+++JKYF0jGFqCaquOdtl1NXCDjXbvPx40E7C1PxzfB", + "Vx+wiPP+R59UVW+KO2uW26qmpGivadkh9uXIBRA5jpF2UTfaKxynCe3/Y21XbJF3vmMxFPz+eyZFnse7", + "PtRyVcSBEtutwIViNJASpGJKG0bY9oAy3UREqyWaB7H278pWkxE8BW8/dlTA9EDIVWwhQwG1yM8wt9t5", + "jQhsytzxKuvp2bUup6dZCx0KjRgVMwNSitKJ9mxOYhBhBpEMMmud4RMt4kGMbM1sbbRsjBBd5Hmc9MKG", + "2bu5fbuZq45zerOJEfHCH8obkOaQf2K4bsFNOElj2v/D8I9IIYY74xr1cn8PXhHVD27WlH8UaP2k/Ah5", + "IAAD2batPMkgUSwoRCytlwD9Cd6B3BU/XjaO5b1pIQiJ/2APeGH6bPNencngwPnEFX1f1kgJlvJuiBJa", + "y9+XketZb32RBFvkjCZag7JsSfTFwiDdWj2rs5gHtJJesrMUQhOjmeZ5JEna2nHwTIWEY1QCuaL5x+ca", + "3zGp9BniA7I3w6lRYaZsiGSLSnWzOn0v6Ki5g6zYu5uav8bE7P8As0fRe84N5ZzwvdsMjTvYsX7hbwWb", + "603WOKYNsnr4JZm5ZhulhJSprnN/7YWTOjEUJJu7gFbY6D2ZqPvW+bPQtyDjuY/EIT8G7q3aZ+8gbI7o", + "J2YqAyc3SuUx6uuRRQR/MR4VNufdc13csjHDzcq+BAXcDiz70m87PHZ5trSJuXQqBf11jr6tW7iNXNTN", + "2sbWLBrd3+Hq6q2ejSk1FO/FYD7HWkd30pThoJYMv0OVI4sjN4abN0YxPw/VvbW1XQdqc3f2o2L53oCV", + "VqX1D9PJAjgoprCW+C+ud8zHvUs9BLbyQv+oWlhvUy7GIiay1tbkwVRBDfUR5dPdZ5Ga15jVmFaS6S32", + "DfYGNPZLtB7T93VtD1cbpvalubtPi2uoe7c3lUAq5W/X7wXN8T6yLj5ubiGRH5NvbYVvd1C+vjf7V3j8", + "lyfZ6eOH/zr7y+kXpyk8+eKr01P61RP68KvHD+HRX754cgoP519+NXuUPXryaPbk0ZMvv/gqffzk4ezJ", + "l1/96z3DhwzIFlBf2v/p5D+Ts3whkrPX58mlAbbBCS3ZD2D2BnXlucC+lgapKZ5EKCjLJ0/9T//Hn7Dj", + "VBTN8P7XievPNFlqXaqnJyfr9fo4/ORkgan/iRZVujzx82C3wZa88vq8jtG3cTi4o431GDfVkcIZPnvz", + "7cUlOXt9ftwQzOTp5PT49Piha23NackmTyeP8Sc8PUvc9xOsr3miXOn8k7K0xfM/TCcnjg7dX0ugORbR", + "MX8UoCVL/SMJNNu6/6s1XSxAHmOGhv1p9ejESxwn7111hA+7np2E0R8n71tFJLI9X/rohn2vnLz37XF3", + "D9hqjeriygziom7N70G7kkrWvhCpx4HeBDf6lCisjW9+KiUT5kxOzQWbAfr+MYRNYpFwLSueWoewnQI4", + "/vfl2X+iU/zl2X+Sr8np1CUVKFRaYtPbrOqamM4zC3Y/FlF9sz2rK5Y0DvTJ07cxQ5ILCC2rWc5SYmUR", + "PIyG0oKzUo/Y8EK0Gk5U3cO84eyGW58mX717/8VfPsQkxp78WyMpKOLR8uwK390UkVbQzddDKNu4KHMz", + "7j8qkNtmEQXdTEKA+17SSGUznwTkmzyH8YdBZOK/X7z6kQhJnIb8mqbXdQKUz3hrsvzChDfz5RDE7vIM", + "gQZeFeYecplUhVqU7SK/NZrfYUdEBBRZxqPTU88nnRYSHNATd+6DmTqmqz6hYShOYIzsp7srAhua6nxL", + "qApiITAy0Xcv7aSpiTJpBcvvNH/2Z3RbEs00ODTjPlKFXmia74HvstPpsYUOF9ZTmot0f4p7DxlRCN7F", + "RIVwaz2N/Lm7/z12ty95kFKYM80w9rq5cvx11gLSyZv51oM7UEzkmPxNVCgfGsm/0hDrc48zWL+Hm9PV", + "PgqC5Zr0IHxydNRd+NFRE9o3hzUyWcrxxS46jo6OzU49OZCV7bRFt0oFjzo7hwzX26yXdFNHRlPCBU84", + "LKhmKyCBUvnk9OFnu8JzbmPRjUBsBfcP08kXn/GWnXMj2NCc4Jt2NY8/29VcgFyxFMglFKWQVLJ8S37i", + "dbB/0Ea9z/5+4tdcrLlHhNFJq6KgcuuEaFrznIoHvX128p9eFaNG0EYuShcK411QRLUyra90yBeTdx+8", + "DjBS99j12skMu1yOfRVChWVYO0Hvgzp5j/bzwd9PnBM0/hD9GFZBPvH1FQfetJW04g9bWtF7vTEL2T2c", + "eScYL6U6XVblyXv8D+q6wYpsYf4TveEnGPd58r6FCPe4h4j2783n4RurQmTggRPzuUI9btfjk/f232Ai", + "2JQgmbmOsBim+9UWLT7BbtHb/s9bnkZ/7K+jVbB14OcTb2qJqdTtN9+3/mzTlFpWOhPrYBZ0UlgPWx8y", + "87BS3b9P1pRpIyS5OqF0rkH2P9ZA8xPXFKjza1OHv/cEmwsEP3bEqlLYQkFtjfYNXV+28j2lLYjxjUBD", + "xRDD3SQzxpELhVyyMT3ah30VqccbL5dgY2y99zYig2pBZlLQLKVKmz9c+6yebvzhlvpXt37HecQ3h2Ci", + "uaFfctLwk+O9Dhscd4yQGewLOX/uJ2ySzH53wawH0Tc0I76yVEJe0txsOGTkzIn/LWz83kLVp5eCPrHY", + "8tHkjG/84VOEYpm9loIo44Vxgj53Y4QKo0UaBrAAnjgWlMxEtnWtyCaSrvXG1uHoMrcT2r4x2oZIKmmh", + "hh7egZXyj22a3GeR/NMQ+Kch8E9T0Z+GwD93909D4EhD4J9msj/NZP8jzWSH2MZiYqYz/wxLm9gbnbbm", + "tXofbXpQ1Cy+XSGM6Voma6WKYrsLpo8JucTyLtTcErACSXOSUmWlK1eKqMAITqwzBtnTK560ILFxkmbi", + "+81/bYDqVXV6+hjI6YPuN0qzPA95c/9blHfxkc0h+ZpcTa4mvZEkFGIFmU14DWug26/2Dvu/6nFf9Zon", + "YKY71s/x5ciIquZzljKL8lzwBaEL0QRXY9FVLvAJSAOcbUFFmJ66ZBTmMqBdh/p2qfa25N6XAM6bLdwb", + "UtAhl3g0gSG8A0MJ/mVMHMH/aCn9phWrbstId47d46p/cpWPwVU+OV/53J20gWnxv6WY+eT0yWe7oNAQ", + "/aPQ5DtMHLidOOaKgabRTlw3FbR8MRhv7muCj8NgXrxF6zDet+/MRaBArvwF28SmPj05wepgS6H0ycRc", + "f+241fDhuxrm9/52KiVbYatntG4KyRaM0zxxgZ9JE3/66Ph08uH/BwAA///VdB8P3SEBAA==", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go index f338c38b1f..4a97d94d07 100644 --- a/daemon/algod/api/server/v2/handlers.go +++ b/daemon/algod/api/server/v2/handlers.go @@ -1210,6 +1210,7 @@ type PreEncodedSimulateTxnResult struct { LogicSigBudgetConsumed *uint64 `codec:"logic-sig-budget-consumed,omitempty"` TransactionTrace *model.SimulationTransactionExecTrace `codec:"exec-trace,omitempty"` UnnamedResourcesAccessed *model.SimulateUnnamedResourcesAccessed `codec:"unnamed-resources-accessed,omitempty"` + FixedSigner *string `codec:"fixed-signer,omitempty"` } // PreEncodedSimulateTxnGroupResult mirrors model.SimulateTransactionGroupResult @@ -1246,6 +1247,7 @@ type PreEncodedSimulateRequest struct { AllowUnnamedResources bool `codec:"allow-unnamed-resources,omitempty"` ExtraOpcodeBudget uint64 `codec:"extra-opcode-budget,omitempty"` ExecTraceConfig simulation.ExecTraceConfig `codec:"exec-trace-config,omitempty"` + FixSigners bool `codec:"fix-signers,omitempty"` } // SimulateTransaction simulates broadcasting a raw transaction to the network, returning relevant simulation results. diff --git a/daemon/algod/api/server/v2/utils.go b/daemon/algod/api/server/v2/utils.go index ee306a62b4..6f36784ee4 100644 --- a/daemon/algod/api/server/v2/utils.go +++ b/daemon/algod/api/server/v2/utils.go @@ -473,13 +473,20 @@ func convertTxnTrace(txnTrace *simulation.TransactionTrace) *model.SimulationTra } func convertTxnResult(txnResult simulation.TxnResult) PreEncodedSimulateTxnResult { - return PreEncodedSimulateTxnResult{ + result := PreEncodedSimulateTxnResult{ Txn: ConvertInnerTxn(&txnResult.Txn), AppBudgetConsumed: omitEmpty(txnResult.AppBudgetConsumed), LogicSigBudgetConsumed: omitEmpty(txnResult.LogicSigBudgetConsumed), TransactionTrace: convertTxnTrace(txnResult.Trace), UnnamedResourcesAccessed: convertUnnamedResourcesAccessed(txnResult.UnnamedResourcesAccessed), } + + if !txnResult.FixedSigner.IsZero() { + fixedSigner := txnResult.FixedSigner.String() + result.FixedSigner = &fixedSigner + } + + return result } func convertUnnamedResourcesAccessed(resources *simulation.ResourceTracker) *model.SimulateUnnamedResourcesAccessed { @@ -588,6 +595,7 @@ func convertSimulationResult(result simulation.Result) PreEncodedSimulateRespons MaxLogSize: result.EvalOverrides.MaxLogSize, MaxLogCalls: result.EvalOverrides.MaxLogCalls, ExtraOpcodeBudget: omitEmpty(result.EvalOverrides.ExtraOpcodeBudget), + FixSigners: omitEmpty(result.EvalOverrides.FixSigners), } } @@ -614,6 +622,7 @@ func convertSimulationRequest(request PreEncodedSimulateRequest) simulation.Requ AllowUnnamedResources: request.AllowUnnamedResources, ExtraOpcodeBudget: request.ExtraOpcodeBudget, TraceConfig: request.ExecTraceConfig, + FixSigners: request.FixSigners, } } diff --git a/ledger/simulation/simulation_eval_test.go b/ledger/simulation/simulation_eval_test.go index 41482e72e2..0399c8215a 100644 --- a/ledger/simulation/simulation_eval_test.go +++ b/ledger/simulation/simulation_eval_test.go @@ -8896,3 +8896,451 @@ func TestUnnamedResourcesCrossProductLimits(t *testing.T) { }) } } + +func TestFixSigners(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + t.Run("AllowEmptySignatures=false", func(t *testing.T) { + t.Parallel() + env := simulationtesting.PrepareSimulatorTest(t) + defer env.Close() + + sender := env.Accounts[0] + + txn := env.TxnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender.Addr, + Receiver: sender.Addr, + }).SignedTxn() + + simRequest := simulation.Request{ + TxnGroups: [][]transactions.SignedTxn{ + {txn}, + }, + AllowEmptySignatures: false, + FixSigners: true, + } + + _, err := simulation.MakeSimulator(env.Ledger, false).Simulate(simRequest) + require.ErrorAs(t, err, &simulation.InvalidRequestError{}) + require.ErrorContains(t, err, "FixSigners requires AllowEmptySignatures to be enabled") + }) + + type testInputs struct { + txgroup []transactions.SignedTxn + sender simulationtesting.Account + other simulationtesting.Account + innerRekeyAddr basics.Address + } + + makeTestInputs := func(env *simulationtesting.Environment) testInputs { + sender := env.Accounts[0] + other := env.Accounts[1] + + innerRekeyAddr := env.Accounts[2].Addr + innerProgram := fmt.Sprintf(`#pragma version 9 + txn ApplicationID + bz end + + // Rekey to the the innerRekeyAddr + itxn_begin + int pay + itxn_field TypeEnum + txn ApplicationArgs 0 + itxn_field Sender + addr %s + itxn_field RekeyTo + itxn_submit + + end: + int 1 + `, innerRekeyAddr) + + innerAppID := env.CreateApp(sender.Addr, simulationtesting.AppParams{ + ApprovalProgram: innerProgram, + ClearStateProgram: "#pragma version 9\nint 1", + }) + + outerProgram := fmt.Sprintf(`#pragma version 9 + txn ApplicationID + bz end + + // Rekey to inner app + itxn_begin + int pay + itxn_field TypeEnum + txn ApplicationArgs 0 + itxn_field Sender + addr %s + itxn_field RekeyTo + itxn_submit + + // Call inner app + itxn_begin + int appl + itxn_field TypeEnum + int %d + itxn_field ApplicationID + txn ApplicationArgs 0 + itxn_field ApplicationArgs + itxn_submit + + end: + int 1`, innerAppID.Address(), innerAppID) + + appID := env.CreateApp(sender.Addr, simulationtesting.AppParams{ + ApprovalProgram: outerProgram, + ClearStateProgram: "#pragma version 9\nint 1", + }) + + env.TransferAlgos(sender.Addr, appID.Address(), 1_000_000) + + // rekey to EOA + pay0 := env.TxnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender.Addr, + Receiver: sender.Addr, + RekeyTo: other.Addr, + }) + // rekey to outer app, which rekeys to inner app, which rekeys to another app + pay1 := env.TxnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender.Addr, + Receiver: sender.Addr, + RekeyTo: appID.Address(), + }) + // app rekeys to random address + appCall := env.TxnInfo.NewTxn(txntest.Txn{ + Type: protocol.ApplicationCallTx, + Sender: other.Addr, + ApplicationID: appID, + ApplicationArgs: [][]byte{sender.Addr[:]}, + ForeignApps: []basics.AppIndex{innerAppID}, + }) + // rekey back to sender (original address) + pay2 := env.TxnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender.Addr, + Receiver: sender.Addr, + RekeyTo: sender.Addr, + }) + // send txn from sender + pay3 := env.TxnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: sender.Addr, + Receiver: sender.Addr, + }) + + txgroup := txntest.Group(&pay0, &pay1, &appCall, &pay2, &pay3) + + return testInputs{ + txgroup: txgroup, + sender: sender, + other: other, + innerRekeyAddr: innerRekeyAddr, + } + } + + // Convenience function for getting the expected app call result. This is a function instead of + // a variable because it's used by multiple tests, and the expected result is modified with the + // input transactions before comparison by each test. + expectedAppCallResultFn := func() simulation.TxnResult { + return simulation.TxnResult{ + AppBudgetConsumed: ignoreAppBudgetConsumed, + Txn: transactions.SignedTxnWithAD{ + ApplyData: transactions.ApplyData{ + EvalDelta: transactions.EvalDelta{ + InnerTxns: []transactions.SignedTxnWithAD{ + {}, + { + ApplyData: transactions.ApplyData{ + EvalDelta: transactions.EvalDelta{ + InnerTxns: []transactions.SignedTxnWithAD{ + {}, + }, + }, + }, + }, + }, + }, + }, + }, + } + } + + t.Run("no signatures", func(t *testing.T) { + t.Parallel() + simulationTest(t, func(env simulationtesting.Environment) simulationTestCase { + inputs := makeTestInputs(&env) + + // Do not sign any of the transactions + + return simulationTestCase{ + input: simulation.Request{ + TxnGroups: [][]transactions.SignedTxn{inputs.txgroup}, + AllowEmptySignatures: true, + FixSigners: true, + }, + expected: simulation.Result{ + Version: simulation.ResultLatestVersion, + LastRound: env.TxnInfo.LatestRound(), + EvalOverrides: simulation.ResultEvalOverrides{ + AllowEmptySignatures: true, + FixSigners: true, + }, + TxnGroups: []simulation.TxnGroupResult{ + { + Txns: []simulation.TxnResult{ + {}, // pay0 + { // pay1 + FixedSigner: inputs.other.Addr, + }, + // appCall + expectedAppCallResultFn(), + { // pay2 + FixedSigner: inputs.innerRekeyAddr, + }, + {}, // pay3 + }, + AppBudgetConsumed: ignoreAppBudgetConsumed, + AppBudgetAdded: 2800, + }, + }, + }, + } + }) + }) + + t.Run("sign pay after outer rekey", func(t *testing.T) { + t.Parallel() + simulationTest(t, func(env simulationtesting.Environment) simulationTestCase { + inputs := makeTestInputs(&env) + + // Sign txn 1, payment after the outer rekey, with the wrong AuthAddr. This renders the + // group invalid, since the AuthAddr will not be corrected if a signature is provided. + inputs.txgroup[1] = inputs.txgroup[1].Txn.Sign(inputs.sender.Sk) + + return simulationTestCase{ + input: simulation.Request{ + TxnGroups: [][]transactions.SignedTxn{inputs.txgroup}, + AllowEmptySignatures: true, + FixSigners: true, + }, + expectedError: fmt.Sprintf("should have been authorized by %s but was actually authorized by %s", inputs.other.Addr, inputs.sender.Addr), + expected: simulation.Result{ + Version: simulation.ResultLatestVersion, + LastRound: env.TxnInfo.LatestRound(), + EvalOverrides: simulation.ResultEvalOverrides{ + AllowEmptySignatures: true, + FixSigners: true, + }, + TxnGroups: []simulation.TxnGroupResult{ + { + FailedAt: simulation.TxnPath{1}, + Txns: []simulation.TxnResult{ + {}, // pay0 + {}, // pay1, does NOT contain FixedSigner + {}, // appCall + {}, // pay2 + {}, // pay3 + }, + AppBudgetConsumed: 0, + // This is here even though we don't make it to the app call because + // pooled app budget is determined before the group is evaluated. + AppBudgetAdded: 700, + }, + }, + }, + } + }) + }) + + t.Run("sign pay after inner rekey", func(t *testing.T) { + t.Parallel() + simulationTest(t, func(env simulationtesting.Environment) simulationTestCase { + inputs := makeTestInputs(&env) + + // Sign txn 3, payment after the inner rekey, with the wrong AuthAddr. This renders the + // group invalid, since the AuthAddr will not be corrected if a signature is provided. + inputs.txgroup[3] = inputs.txgroup[3].Txn.Sign(inputs.other.Sk) + + return simulationTestCase{ + input: simulation.Request{ + TxnGroups: [][]transactions.SignedTxn{inputs.txgroup}, + AllowEmptySignatures: true, + FixSigners: true, + }, + expectedError: fmt.Sprintf("should have been authorized by %s but was actually authorized by %s", inputs.innerRekeyAddr, inputs.other.Addr), + expected: simulation.Result{ + Version: simulation.ResultLatestVersion, + LastRound: env.TxnInfo.LatestRound(), + EvalOverrides: simulation.ResultEvalOverrides{ + AllowEmptySignatures: true, + FixSigners: true, + }, + TxnGroups: []simulation.TxnGroupResult{ + { + FailedAt: simulation.TxnPath{3}, + Txns: []simulation.TxnResult{ + {}, // pay0 + { // pay1 + FixedSigner: inputs.other.Addr, + }, + // appCall + expectedAppCallResultFn(), + {}, // pay2, does NOT contained FixedSigner + {}, // pay3 + }, + AppBudgetConsumed: ignoreAppBudgetConsumed, + AppBudgetAdded: 2800, + }, + }, + }, + } + }) + }) + + // Edge case tests below + + t.Run("sender account is empty", func(t *testing.T) { + t.Parallel() + simulationTest(t, func(env simulationtesting.Environment) simulationTestCase { + sender := env.Accounts[0] + + appID := env.CreateApp(sender.Addr, simulationtesting.AppParams{ + ApprovalProgram: "#pragma version 9\nint 1", + ClearStateProgram: "#pragma version 9\nint 1", + }) + + var noBalanceAccount1 basics.Address + crypto.RandBytes(noBalanceAccount1[:]) + + var noBalanceAccount2 basics.Address + crypto.RandBytes(noBalanceAccount2[:]) + + noBalPay1 := env.TxnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: noBalanceAccount1, + Receiver: noBalanceAccount1, + Fee: 0, + Note: []byte{1}, + }) + appCall := env.TxnInfo.NewTxn(txntest.Txn{ + Type: protocol.ApplicationCallTx, + Sender: sender.Addr, + ApplicationID: appID, + Fee: env.TxnInfo.CurrentProtocolParams().MinTxnFee * 3, + }) + noBalPay2 := env.TxnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: noBalanceAccount2, + Receiver: noBalanceAccount2, + Fee: 0, + Note: []byte{2}, + }) + txgroup := txntest.Group(&noBalPay1, &appCall, &noBalPay2) + + // Testing that our ledger lookup of accounts to retreive their AuthAddr does not crash + // and burn when the account is empty. + + return simulationTestCase{ + input: simulation.Request{ + TxnGroups: [][]transactions.SignedTxn{txgroup}, + AllowEmptySignatures: true, + FixSigners: true, + }, + expected: simulation.Result{ + Version: simulation.ResultLatestVersion, + LastRound: env.TxnInfo.LatestRound(), + EvalOverrides: simulation.ResultEvalOverrides{ + AllowEmptySignatures: true, + FixSigners: true, + }, + TxnGroups: []simulation.TxnGroupResult{ + { + Txns: []simulation.TxnResult{ + {}, // noBalPay1 + { // appCall + AppBudgetConsumed: ignoreAppBudgetConsumed, + }, + {}, // noBalPay2 + }, + AppBudgetAdded: 700, + AppBudgetConsumed: ignoreAppBudgetConsumed, + }, + }, + }, + } + }) + }) + + t.Run("fixed AuthAddr is sender address", func(t *testing.T) { + t.Parallel() + simulationTest(t, func(env simulationtesting.Environment) simulationTestCase { + acct0 := env.Accounts[0] + acct1 := env.Accounts[1] + acct2 := env.Accounts[2] + + appID := env.CreateApp(acct0.Addr, simulationtesting.AppParams{ + ApprovalProgram: "#pragma version 9\nint 1", + ClearStateProgram: "#pragma version 9\nint 1", + }) + + pay1 := env.TxnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: acct1.Addr, + Receiver: acct1.Addr, + Note: []byte{1}, + }) + appCall := env.TxnInfo.NewTxn(txntest.Txn{ + Type: protocol.ApplicationCallTx, + Sender: acct0.Addr, + ApplicationID: appID, + }) + pay2 := env.TxnInfo.NewTxn(txntest.Txn{ + Type: protocol.PaymentTx, + Sender: acct1.Addr, + Receiver: acct1.Addr, + Note: []byte{2}, + }) + txgroup := txntest.Group(&pay1, &appCall, &pay2) + + txgroup[0].AuthAddr = acct2.Addr + txgroup[2].AuthAddr = acct2.Addr + + return simulationTestCase{ + input: simulation.Request{ + TxnGroups: [][]transactions.SignedTxn{txgroup}, + AllowEmptySignatures: true, + FixSigners: true, + }, + expected: simulation.Result{ + Version: simulation.ResultLatestVersion, + LastRound: env.TxnInfo.LatestRound(), + EvalOverrides: simulation.ResultEvalOverrides{ + AllowEmptySignatures: true, + FixSigners: true, + }, + TxnGroups: []simulation.TxnGroupResult{ + { + Txns: []simulation.TxnResult{ + { // pay1 + FixedSigner: acct1.Addr, + }, + { // appCall + AppBudgetConsumed: ignoreAppBudgetConsumed, + }, + { // pay2 + FixedSigner: acct1.Addr, + }, + }, + AppBudgetAdded: 700, + AppBudgetConsumed: ignoreAppBudgetConsumed, + }, + }, + }, + } + }) + }) +} diff --git a/ledger/simulation/simulator.go b/ledger/simulation/simulator.go index c7c722686d..d4ed4e4e94 100644 --- a/ledger/simulation/simulator.go +++ b/ledger/simulation/simulator.go @@ -41,6 +41,7 @@ type Request struct { AllowUnnamedResources bool ExtraOpcodeBudget uint64 TraceConfig ExecTraceConfig + FixSigners bool } // simulatorLedger patches the ledger interface to use a constant latest round. @@ -142,7 +143,7 @@ var proxySigner = crypto.PrivateKey{ // check verifies that the transaction is well-formed and has valid or missing signatures. // An invalid transaction group error is returned if the transaction is not well-formed or there are invalid signatures. // To make things easier, we support submitting unsigned transactions and will respond whether signatures are missing. -func (s Simulator) check(hdr bookkeeping.BlockHeader, txgroup []transactions.SignedTxn, tracer logic.EvalTracer, overrides ResultEvalOverrides) error { +func (s Simulator) check(hdr bookkeeping.BlockHeader, txgroup []transactions.SignedTxnWithAD, tracer logic.EvalTracer, overrides ResultEvalOverrides) error { proxySignerSecrets, err := crypto.SecretKeyToSignatureSecrets(proxySigner) if err != nil { return err @@ -158,7 +159,8 @@ func (s Simulator) check(hdr bookkeeping.BlockHeader, txgroup []transactions.Sig // denoting that a LogicSig's delegation signature is omitted, e.g. by setting all the bits of // the signature. txnsToVerify := make([]transactions.SignedTxn, len(txgroup)) - for i, stxn := range txgroup { + for i, stxnad := range txgroup { + stxn := stxnad.SignedTxn if stxn.Txn.Type == protocol.StateProofTx { return errors.New("cannot simulate StateProof transactions") } @@ -181,16 +183,14 @@ func (s Simulator) check(hdr bookkeeping.BlockHeader, txgroup []transactions.Sig return err } -func (s Simulator) evaluate(hdr bookkeeping.BlockHeader, stxns []transactions.SignedTxn, tracer logic.EvalTracer) (*ledgercore.ValidatedBlock, error) { +func (s Simulator) evaluate(hdr bookkeeping.BlockHeader, group []transactions.SignedTxnWithAD, tracer logic.EvalTracer) (*ledgercore.ValidatedBlock, error) { // s.ledger has 'StartEvaluator' because *data.Ledger is embedded in the simulatorLedger // and data.Ledger embeds *ledger.Ledger - eval, err := s.ledger.StartEvaluator(hdr, len(stxns), 0, tracer) + eval, err := s.ledger.StartEvaluator(hdr, len(group), 0, tracer) if err != nil { return nil, err } - group := transactions.WrapSignedTxnsWithAD(stxns) - err = eval.TransactionGroup(group) if err != nil { return nil, EvalFailureError{SimulatorError{err}} @@ -208,7 +208,7 @@ func (s Simulator) evaluate(hdr bookkeeping.BlockHeader, stxns []transactions.Si return &vb, nil } -func (s Simulator) simulateWithTracer(txgroup []transactions.SignedTxn, tracer logic.EvalTracer, overrides ResultEvalOverrides) (*ledgercore.ValidatedBlock, error) { +func (s Simulator) simulateWithTracer(txgroup []transactions.SignedTxnWithAD, tracer logic.EvalTracer, overrides ResultEvalOverrides) (*ledgercore.ValidatedBlock, error) { prevBlockHdr, err := s.ledger.BlockHdr(s.ledger.start) if err != nil { return nil, err @@ -216,6 +216,48 @@ func (s Simulator) simulateWithTracer(txgroup []transactions.SignedTxn, tracer l nextBlock := bookkeeping.MakeBlock(prevBlockHdr) hdr := nextBlock.BlockHeader + if overrides.FixSigners { + // Map of rekeys for senders in the group + staticRekeys := make(map[basics.Address]basics.Address) + + for i := range txgroup { + stxn := &txgroup[i].SignedTxn + sender := stxn.Txn.Sender + + if authAddr, ok := staticRekeys[sender]; ok && txnHasNoSignature(*stxn) { + // If there is a static rekey for the sender set the auth addr to that address + stxn.AuthAddr = authAddr + if stxn.AuthAddr == sender { + stxn.AuthAddr = basics.Address{} + } + } else { + // Otherwise lookup the sender's account and set the txn auth addr to the account's auth addr + if txnHasNoSignature(*stxn) { + var data ledgercore.AccountData + data, _, _, err = s.ledger.LookupAccount(s.ledger.start, sender) + if err != nil { + return nil, err + } + + stxn.AuthAddr = data.AuthAddr + if stxn.AuthAddr == sender { + stxn.AuthAddr = basics.Address{} + } + } + } + + // Stop processing transactions after the first application because auth addr correction will be done in AfterProgram + if stxn.Txn.Type == protocol.ApplicationCallTx { + break + } + + if stxn.Txn.RekeyTo != (basics.Address{}) { + staticRekeys[sender] = stxn.Txn.RekeyTo + } + } + + } + // check that the transaction is well-formed and mark whether signatures are missing err = s.check(hdr, txgroup, tracer, overrides) if err != nil { @@ -239,6 +281,14 @@ func (s Simulator) simulateWithTracer(txgroup []transactions.SignedTxn, tracer l // Simulate simulates a transaction group using the simulator. Will error if the transaction group is not well-formed. func (s Simulator) Simulate(simulateRequest Request) (Result, error) { + if simulateRequest.FixSigners && !simulateRequest.AllowEmptySignatures { + return Result{}, InvalidRequestError{ + SimulatorError{ + errors.New("FixSigners requires AllowEmptySignatures to be enabled"), + }, + } + } + if simulateRequest.Round != 0 { s.ledger.start = simulateRequest.Round } else { @@ -246,11 +296,6 @@ func (s Simulator) Simulate(simulateRequest Request) (Result, error) { s.ledger.start = s.ledger.Ledger.Latest() } - simulatorTracer, err := makeEvalTracer(s.ledger.start, simulateRequest, s.developerAPI) - if err != nil { - return Result{}, err - } - if len(simulateRequest.TxnGroups) != 1 { return Result{}, InvalidRequestError{ SimulatorError{ @@ -259,7 +304,14 @@ func (s Simulator) Simulate(simulateRequest Request) (Result, error) { } } - block, err := s.simulateWithTracer(simulateRequest.TxnGroups[0], simulatorTracer, simulatorTracer.result.EvalOverrides) + group := transactions.WrapSignedTxnsWithAD(simulateRequest.TxnGroups[0]) + + simulatorTracer, err := makeEvalTracer(s.ledger.start, group, simulateRequest, s.developerAPI) + if err != nil { + return Result{}, err + } + + block, err := s.simulateWithTracer(group, simulatorTracer, simulatorTracer.result.EvalOverrides) if err != nil { var verifyError *verify.TxGroupError switch { @@ -304,5 +356,25 @@ func (s Simulator) Simulate(simulateRequest Request) (Result, error) { } simulatorTracer.result.TxnGroups[0].AppBudgetConsumed = totalCost + // Set the FixedSigner for each transaction that had a signer change during evaluation + for i := range simulatorTracer.result.TxnGroups[0].Txns { + sender := simulatorTracer.result.TxnGroups[0].Txns[i].Txn.Txn.Sender + inputSigner := simulatorTracer.result.TxnGroups[0].Txns[i].Txn.AuthAddr + if inputSigner.IsZero() { + // A zero AuthAddr indicates the sender is the signer + inputSigner = sender + } + + actualSigner := simulatorTracer.groups[0][i].SignedTxn.AuthAddr + if actualSigner.IsZero() { + // A zero AuthAddr indicates the sender is the signer + actualSigner = sender + } + + if inputSigner != actualSigner { + simulatorTracer.result.TxnGroups[0].Txns[i].FixedSigner = actualSigner + } + } + return *simulatorTracer.result, nil } diff --git a/ledger/simulation/simulator_test.go b/ledger/simulation/simulator_test.go index 95cafe12e9..e5a126a29a 100644 --- a/ledger/simulation/simulator_test.go +++ b/ledger/simulation/simulator_test.go @@ -142,7 +142,7 @@ int 1`, mockTracer := &mocktracer.Tracer{} s.ledger.start = s.ledger.Ledger.Latest() // Set starting round for simulation - block, err := s.simulateWithTracer(txgroup, mockTracer, ResultEvalOverrides{}) + block, err := s.simulateWithTracer(transactions.WrapSignedTxnsWithAD(txgroup), mockTracer, ResultEvalOverrides{}) require.NoError(t, err) evalBlock := block.Block() diff --git a/ledger/simulation/trace.go b/ledger/simulation/trace.go index 09d51f3193..a1cb311089 100644 --- a/ledger/simulation/trace.go +++ b/ledger/simulation/trace.go @@ -45,6 +45,10 @@ type TxnResult struct { // // In that case, it will be populated with the unnamed resources accessed by this transaction. UnnamedResourcesAccessed *ResourceTracker + + // If the signer needed to be changed, this will be the address of the required signer + // This will only be present if FixSigners is true in the EvalOverrides + FixedSigner basics.Address } // TxnGroupResult contains the simulation result for a single transaction group @@ -90,6 +94,7 @@ type ResultEvalOverrides struct { MaxLogCalls *uint64 MaxLogSize *uint64 ExtraOpcodeBudget uint64 + FixSigners bool } // LogBytesLimit hardcode limit of how much bytes one can log per transaction during simulation (with AllowMoreLogging) @@ -206,6 +211,7 @@ func makeSimulationResult(lastRound basics.Round, request Request, developerAPI AllowEmptySignatures: request.AllowEmptySignatures, ExtraOpcodeBudget: request.ExtraOpcodeBudget, AllowUnnamedResources: request.AllowUnnamedResources, + FixSigners: request.FixSigners, }.AllowMoreLogging(request.AllowMoreLogging) if err := validateSimulateRequest(request, developerAPI); err != nil { diff --git a/ledger/simulation/tracer.go b/ledger/simulation/tracer.go index b867cf6aac..94fa1394f7 100644 --- a/ledger/simulation/tracer.go +++ b/ledger/simulation/tracer.go @@ -98,14 +98,16 @@ type evalTracer struct { // scratchSlots are the scratch slots changed on current opcode (currently either `store` or `stores`). // NOTE: this field scratchSlots is used only for scratch change exposure. scratchSlots []uint64 + + groups [][]transactions.SignedTxnWithAD } -func makeEvalTracer(lastRound basics.Round, request Request, developerAPI bool) (*evalTracer, error) { +func makeEvalTracer(lastRound basics.Round, group []transactions.SignedTxnWithAD, request Request, developerAPI bool) (*evalTracer, error) { result, err := makeSimulationResult(lastRound, request, developerAPI) if err != nil { return nil, err } - return &evalTracer{result: &result}, nil + return &evalTracer{result: &result, groups: [][]transactions.SignedTxnWithAD{group}}, nil } // handleError is responsible for setting the failedAt field properly. @@ -512,4 +514,44 @@ func (tracer *evalTracer) AfterProgram(cx *logic.EvalContext, pass bool, evalErr } else { tracer.handleError(evalError) } + + // Since an app could rekey multiple accounts, we need to go over the + // rest of the txngroup and make sure all the auth addrs are correct + if tracer.result.EvalOverrides.FixSigners && len(tracer.relativeCursor) == 1 { + knownAuthAddrs := make(map[basics.Address]basics.Address) + // iterate over all txns in the group after this one + for i := groupIndex + 1; i < len(cx.TxnGroup); i++ { + stxn := &tracer.groups[0][i] + sender := stxn.Txn.Sender + + // If we don't already know the auth addr, get it from the ledger + if _, authAddrKnown := knownAuthAddrs[sender]; !authAddrKnown { + // Get the auth addr from the ledger + data, err := cx.Ledger.AccountData(sender) + if err != nil { + panic(err) + } + + knownAuthAddrs[sender] = data.AuthAddr + } + + // Fix the current auth addr if this txn doesn't have a signature + if txnHasNoSignature(stxn.SignedTxn) { + stxn.AuthAddr = knownAuthAddrs[sender] + if stxn.AuthAddr == sender { + stxn.AuthAddr = basics.Address{} + } + } + + // If this is an appl, we can break since we know AfterProgram will be called afterwards + if stxn.Txn.Type == protocol.ApplicationCallTx { + break + } + + // If this is a rekey, save the auth addr for the sender + if stxn.Txn.RekeyTo != (basics.Address{}) { + knownAuthAddrs[sender] = stxn.Txn.RekeyTo + } + } + } } diff --git a/test/e2e-go/restAPI/simulate/simulateRestAPI_test.go b/test/e2e-go/restAPI/simulate/simulateRestAPI_test.go index a30ce06b4f..66601c1737 100644 --- a/test/e2e-go/restAPI/simulate/simulateRestAPI_test.go +++ b/test/e2e-go/restAPI/simulate/simulateRestAPI_test.go @@ -2695,3 +2695,86 @@ int 1 } a.Equal(expectedResult, resp) } + +func TestSimulateWithFixSigners(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + a := require.New(fixtures.SynchronizedTest(t)) + var localFixture fixtures.RestClientFixture + localFixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json")) + defer localFixture.Shutdown() + + testClient := localFixture.LibGoalClient + + _, err := testClient.WaitForRound(1) + a.NoError(err) + + wh, err := testClient.GetUnencryptedWalletHandle() + a.NoError(err) + addresses, err := testClient.ListAddresses(wh) + a.NoError(err) + _, senderAddress := helper.GetMaxBalAddr(t, testClient, addresses) + if senderAddress == "" { + t.Error("no addr with funds") + } + a.NoError(err) + + rekeyTxn, err := testClient.ConstructPayment(senderAddress, senderAddress, 0, 1, nil, "", [32]byte{}, 0, 0) + a.NoError(err) + + var authAddr basics.Address + crypto.RandBytes(authAddr[:]) + rekeyTxn.RekeyTo = authAddr + + txn, err := testClient.ConstructPayment(senderAddress, senderAddress, 0, 1, nil, "", [32]byte{}, 0, 0) + a.NoError(err) + + gid, err := testClient.GroupID([]transactions.Transaction{rekeyTxn, txn}) + a.NoError(err) + + rekeyTxn.Group = gid + txn.Group = gid + + simulateRequest := v2.PreEncodedSimulateRequest{ + TxnGroups: []v2.PreEncodedSimulateRequestTransactionGroup{ + { + Txns: []transactions.SignedTxn{{Txn: rekeyTxn}, {Txn: txn}}, + }, + }, + AllowEmptySignatures: true, + FixSigners: true, + } + result, err := testClient.SimulateTransactions(simulateRequest) + a.NoError(err) + + allowEmptySignatures := true + fixSigners := true + authAddrStr := authAddr.String() + expectedResult := v2.PreEncodedSimulateResponse{ + Version: 2, + LastRound: result.LastRound, + TxnGroups: []v2.PreEncodedSimulateTxnGroupResult{ + { + Txns: []v2.PreEncodedSimulateTxnResult{ + { + Txn: v2.PreEncodedTxInfo{ + Txn: transactions.SignedTxn{Txn: rekeyTxn}, + }, + }, + { + Txn: v2.PreEncodedTxInfo{ + Txn: transactions.SignedTxn{Txn: txn}, + }, + FixedSigner: &authAddrStr, + }, + }, + }, + }, + EvalOverrides: &model.SimulationEvalOverrides{ + AllowEmptySignatures: &allowEmptySignatures, + FixSigners: &fixSigners, + }, + } + a.Equal(expectedResult, result) +} From 02961e3adf8f6de7941d27ce4c073ad8eb8d89b8 Mon Sep 17 00:00:00 2001 From: DevOps Service Date: Mon, 17 Jun 2024 15:20:48 +0000 Subject: [PATCH 11/82] Bump Version, Remove buildnumber.dat and genesistimestamp.dat files. --- buildnumber.dat | 1 - config/version.go | 2 +- genesistimestamp.dat | 1 - 3 files changed, 1 insertion(+), 3 deletions(-) delete mode 100644 buildnumber.dat delete mode 100644 genesistimestamp.dat diff --git a/buildnumber.dat b/buildnumber.dat deleted file mode 100644 index 573541ac97..0000000000 --- a/buildnumber.dat +++ /dev/null @@ -1 +0,0 @@ -0 diff --git a/config/version.go b/config/version.go index 8716fbcdcb..5a1b31ce0e 100644 --- a/config/version.go +++ b/config/version.go @@ -33,7 +33,7 @@ const VersionMajor = 3 // VersionMinor is the Minor semantic version number (x.#.z) - changed when backwards-compatible features are introduced. // Not enforced until after initial public release (x > 0). -const VersionMinor = 25 +const VersionMinor = 26 // Version is the type holding our full version information. type Version struct { diff --git a/genesistimestamp.dat b/genesistimestamp.dat deleted file mode 100644 index c72c6a7795..0000000000 --- a/genesistimestamp.dat +++ /dev/null @@ -1 +0,0 @@ -1558657885 From 052f832ac13402c6f3b8dc3760970c1a4ced9e1b Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Thu, 20 Jun 2024 17:12:48 -0400 Subject: [PATCH 12/82] catchup: do not loop forever if there is no peers (#6037) * Add context cancellation check to fetchRound peers retrieval loop * This prevented some e2e tests to finish when a other nodes quit but the last node fell into catchup mode --- catchup/service.go | 7 +++++++ .../features/catchup/basicCatchup_test.go | 17 ----------------- 2 files changed, 7 insertions(+), 17 deletions(-) diff --git a/catchup/service.go b/catchup/service.go index b1720c4fce..b00a787ee3 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -760,6 +760,13 @@ func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.Asy psp, getPeerErr := ps.getNextPeer() if getPeerErr != nil { s.log.Debugf("fetchRound: was unable to obtain a peer to retrieve the block from") + select { + case <-s.ctx.Done(): + logging.Base().Debugf("fetchRound was asked to quit while collecting peers") + return + default: + } + s.net.RequestConnectOutgoing(true, s.ctx.Done()) continue } diff --git a/test/e2e-go/features/catchup/basicCatchup_test.go b/test/e2e-go/features/catchup/basicCatchup_test.go index 938313206d..2e3ac87943 100644 --- a/test/e2e-go/features/catchup/basicCatchup_test.go +++ b/test/e2e-go/features/catchup/basicCatchup_test.go @@ -128,15 +128,6 @@ func runCatchupOverGossip(t fixtures.TestingTB, a.NoError(err) a.Empty(cfg.NetworkProtocolVersion) cfg.NetworkProtocolVersion = ledgerNodeDowngradeTo - cfg.BaseLoggerDebugLevel = 5 // debug logging while debugging this test - cfg.SaveToDisk(dir) - } else { - // TODO: remove when TestCatchupOverGossip is fixed - dir, err := fixture.GetNodeDir("Node") - a.NoError(err) - cfg, err := config.LoadConfigFromDisk(dir) - a.NoError(err) - cfg.BaseLoggerDebugLevel = 5 // debug logging while debugging this test cfg.SaveToDisk(dir) } @@ -147,14 +138,6 @@ func runCatchupOverGossip(t fixtures.TestingTB, a.NoError(err) a.Empty(cfg.NetworkProtocolVersion) cfg.NetworkProtocolVersion = fetcherNodeDowngradeTo - cfg.BaseLoggerDebugLevel = 5 // debug logging while debugging this test - cfg.SaveToDisk(dir) - } else { - // TODO: remove when TestCatchupOverGossip is fixed - dir := fixture.PrimaryDataDir() - cfg, err := config.LoadConfigFromDisk(dir) - a.NoError(err) - cfg.BaseLoggerDebugLevel = 5 // debug logging while debugging this test cfg.SaveToDisk(dir) } From 052ceb2d9f81b131eae0ee0ee57e2c41d2d731a2 Mon Sep 17 00:00:00 2001 From: John Lee Date: Fri, 21 Jun 2024 14:17:38 -0400 Subject: [PATCH 13/82] CICD: remove mac AMD64 support from circleci (#6036) --- .circleci/config.yml | 24 ++++++------------------ 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 8208fe063b..f431e91160 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -45,30 +45,18 @@ executors: machine: image: << pipeline.parameters.ubuntu_image >> resource_class: arm.large - mac_amd64_medium: + mac_arm64_medium: macos: xcode: 14.2.0 - resource_class: macos.x86.medium.gen2 + resource_class: macos.m1.medium.gen1 environment: HOMEBREW_NO_AUTO_UPDATE: "true" - mac_amd64_large: + mac_arm64_large: macos: xcode: 14.2.0 - # Since they removed the large class for amd64, we will use medium here too. - resource_class: macos.x86.medium.gen2 - environment: - HOMEBREW_NO_AUTO_UPDATE: "true" - mac_arm64: &executor-mac-arm64 - machine: true - resource_class: algorand/macstadium-m1 + resource_class: macos.m1.large.gen1 environment: HOMEBREW_NO_AUTO_UPDATE: "true" - # these are required b/c jobs explicitly assign sizes to the executors - # for `mac_arm64` there is only one size - mac_arm64_medium: - <<: *executor-mac-arm64 - mac_arm64_large: - <<: *executor-mac-arm64 slack-fail-stop-step: &slack-fail-post-step post-steps: @@ -86,7 +74,7 @@ workflows: name: << matrix.platform >>_build_nightly matrix: &matrix-nightly parameters: - platform: ["amd64", "arm64", "mac_amd64", "mac_arm64"] + platform: ["amd64", "arm64", "mac_arm64"] filters: &filters-nightly branches: only: @@ -137,7 +125,7 @@ workflows: name: << matrix.platform >>_<< matrix.job_type >>_verification matrix: parameters: - platform: ["amd64", "arm64", "mac_amd64", "mac_arm64"] + platform: ["amd64", "arm64", "mac_arm64"] job_type: ["test_nightly", "integration_nightly", "e2e_expect_nightly"] requires: - << matrix.platform >>_<< matrix.job_type >> From 24382d85ad35d5a2f200434936fd643a909b6eab Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Tue, 25 Jun 2024 13:05:27 -0400 Subject: [PATCH 14/82] node: close ledger and part keys on node shutdown (#6039) --- data/pools/transactionPool.go | 29 +++++++++++++++++++++++++++++ ledger/notifier.go | 2 ++ node/node.go | 18 ++++++++++++++---- 3 files changed, 45 insertions(+), 4 deletions(-) diff --git a/data/pools/transactionPool.go b/data/pools/transactionPool.go index 687a3db80c..afe12f2363 100644 --- a/data/pools/transactionPool.go +++ b/data/pools/transactionPool.go @@ -95,6 +95,11 @@ type TransactionPool struct { // stateproofOverflowed indicates that a stateproof transaction was allowed to // exceed the txPoolMaxSize. This flag is reset to false OnNewBlock stateproofOverflowed bool + + // shutdown is set to true when the pool is being shut down. It is checked in exported methods + // to prevent pool operations like remember and recomputing the block evaluator + // from using down stream resources like ledger that may be shutting down. + shutdown bool } // BlockEvaluator defines the block evaluator interface exposed by the ledger package. @@ -113,6 +118,8 @@ type VotingAccountSupplier interface { VotingAccountsForRound(basics.Round) []basics.Address } +var errPoolShutdown = errors.New("transaction pool is shutting down") + // MakeTransactionPool makes a transaction pool. func MakeTransactionPool(ledger *ledger.Ledger, cfg config.Local, log logging.Logger, vac VotingAccountSupplier) *TransactionPool { if cfg.TxPoolExponentialIncreaseFactor < 1 { @@ -430,6 +437,10 @@ func (pool *TransactionPool) ingest(txgroup []transactions.SignedTxn, params poo return ErrNoPendingBlockEvaluator } + if pool.shutdown { + return errPoolShutdown + } + if !params.recomputing { // Make sure that the latest block has been processed by OnNewBlock(). // If not, we might be in a race, so wait a little bit for OnNewBlock() @@ -441,6 +452,10 @@ func (pool *TransactionPool) ingest(txgroup []transactions.SignedTxn, params poo if pool.pendingBlockEvaluator == nil { return ErrNoPendingBlockEvaluator } + // recheck if the pool is shutting down since TimedWait above releases the lock + if pool.shutdown { + return errPoolShutdown + } } err := pool.checkSufficientFee(txgroup) @@ -529,6 +544,10 @@ func (pool *TransactionPool) OnNewBlock(block bookkeeping.Block, delta ledgercor pool.mu.Lock() defer pool.mu.Unlock() + if pool.shutdown { + return + } + defer pool.cond.Broadcast() if pool.pendingBlockEvaluator == nil || block.Round() >= pool.pendingBlockEvaluator.Round() { // Adjust the pool fee threshold. The rules are: @@ -1010,3 +1029,13 @@ func (pool *TransactionPool) AssembleDevModeBlock() (assembled *ledgercore.Unfin assembled, err = pool.AssembleBlock(pool.pendingBlockEvaluator.Round(), time.Now().Add(pool.proposalAssemblyTime)) return } + +// Shutdown stops the transaction pool from accepting new transactions and blocks. +// It takes the pool.mu lock in order to ensure there is no pending remember or block operations in flight +// and sets the shutdown flag to true. +func (pool *TransactionPool) Shutdown() { + pool.mu.Lock() + defer pool.mu.Unlock() + + pool.shutdown = true +} diff --git a/ledger/notifier.go b/ledger/notifier.go index aabf62d080..f97e1c77e6 100644 --- a/ledger/notifier.go +++ b/ledger/notifier.go @@ -74,6 +74,8 @@ func (bn *blockNotifier) worker() { func (bn *blockNotifier) close() { bn.mu.Lock() + bn.pendingBlocks = nil + bn.listeners = nil if bn.running { bn.running = false bn.cond.Broadcast() diff --git a/node/node.go b/node/node.go index d1c6cc4b82..6c77b4fbc0 100644 --- a/node/node.go +++ b/node/node.go @@ -152,6 +152,7 @@ type AlgorandFullNode struct { tracer messagetracer.MessageTracer stateProofWorker *stateproof.Worker + partHandles []db.Accessor } // TxnWithStatus represents information about a single transaction, @@ -418,6 +419,12 @@ func (node *AlgorandFullNode) Stop() { defer func() { node.mu.Unlock() node.waitMonitoringRoutines() + + // oldKeyDeletionThread uses accountManager registry so must be stopped before accountManager is closed + node.accountManager.Registry().Close() + for h := range node.partHandles { + node.partHandles[h].Close() + } }() node.net.ClearHandlers() @@ -430,6 +437,7 @@ func (node *AlgorandFullNode) Stop() { node.stateProofWorker.Stop() node.txHandler.Stop() node.agreementService.Shutdown() + node.agreementService.Accessor.Close() node.catchupService.Stop() node.txPoolSyncerService.Stop() node.blockService.Stop() @@ -441,7 +449,9 @@ func (node *AlgorandFullNode) Stop() { node.lowPriorityCryptoVerificationPool.Shutdown() node.cryptoPool.Shutdown() node.log.Debug("crypto worker pools have stopped") + node.transactionPool.Shutdown() node.cancelCtx() + node.ledger.Close() } // note: unlike the other two functions, this accepts a whole filename @@ -987,12 +997,12 @@ func (node *AlgorandFullNode) loadParticipationKeys() error { // These files are not ephemeral and must be deleted eventually since // this function is called to load files located in the node on startup added := node.accountManager.AddParticipation(part, false) - if added { - node.log.Infof("Loaded participation keys from storage: %s %s", part.Address(), info.Name()) - } else { + if !added { part.Close() continue } + node.log.Infof("Loaded participation keys from storage: %s %s", part.Address(), info.Name()) + node.partHandles = append(node.partHandles, handle) err = insertStateProofToRegistry(part, node) if err != nil { return err @@ -1024,7 +1034,7 @@ func (node *AlgorandFullNode) txPoolGaugeThread(done <-chan struct{}) { defer node.monitoringRoutinesWaitGroup.Done() ticker := time.NewTicker(10 * time.Second) defer ticker.Stop() - for true { + for { select { case <-ticker.C: txPoolGauge.Set(uint64(node.transactionPool.PendingCount())) From c99a1c91cc7c11449b77557a1d81e669c92ca87b Mon Sep 17 00:00:00 2001 From: ohill <145173879+ohill@users.noreply.github.com> Date: Wed, 26 Jun 2024 10:36:48 -0400 Subject: [PATCH 15/82] build: add universal Mac make target (#6023) --- Makefile | 40 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 36 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 8301771718..7ad7f219f8 100644 --- a/Makefile +++ b/Makefile @@ -11,6 +11,13 @@ endif SRCPATH := $(shell pwd) ARCH := $(shell ./scripts/archtype.sh) OS_TYPE := $(shell ./scripts/ostype.sh) +# overrides for cross-compiling platform-specific binaries +ifdef CROSS_COMPILE_ARCH + ARCH := $(CROSS_COMPILE_ARCH) + GO_INSTALL := CGO_ENABLED=1 GOOS=$(OS_TYPE) GOARCH=$(ARCH) go build -o $(GOPATH1)/bin-$(OS_TYPE)-$(ARCH) +else + GO_INSTALL := go install +endif S3_RELEASE_BUCKET = $$S3_RELEASE_BUCKET GOLANG_VERSIONS := $(shell ./scripts/get_golang_version.sh all) @@ -153,10 +160,35 @@ crypto/libs/$(OS_TYPE)/$(ARCH)/lib/libsodium.a: cp -R crypto/libsodium-fork/. crypto/copies/$(OS_TYPE)/$(ARCH)/libsodium-fork cd crypto/copies/$(OS_TYPE)/$(ARCH)/libsodium-fork && \ ./autogen.sh --prefix $(SRCPATH)/crypto/libs/$(OS_TYPE)/$(ARCH) && \ - ./configure --disable-shared --prefix="$(SRCPATH)/crypto/libs/$(OS_TYPE)/$(ARCH)" && \ + ./configure --disable-shared --prefix="$(SRCPATH)/crypto/libs/$(OS_TYPE)/$(ARCH)" $(EXTRA_CONFIGURE_FLAGS) && \ $(MAKE) && \ $(MAKE) install +universal: +ifeq ($(OS_TYPE),darwin) + # build amd64 Mac binaries + mkdir -p $(GOPATH1)/bin-darwin-amd64 + CROSS_COMPILE_ARCH=amd64 GOBIN=$(GOPATH1)/bin-darwin-amd64 MACOSX_DEPLOYMENT_TARGET=12.0 EXTRA_CONFIGURE_FLAGS='CFLAGS="-arch x86_64 -mmacos-version-min=12.0" --host=x86_64-apple-darwin' $(MAKE) + + # build arm64 Mac binaries + mkdir -p $(GOPATH1)/bin-darwin-arm64 + CROSS_COMPILE_ARCH=arm64 GOBIN=$(GOPATH1)/bin-darwin-arm64 MACOSX_DEPLOYMENT_TARGET=12.0 EXTRA_CONFIGURE_FLAGS='CFLAGS="-arch arm64 -mmacos-version-min=12.0" --host=aarch64-apple-darwin' $(MAKE) + + # lipo together + mkdir -p $(GOPATH1)/bin-darwin-universal + for binary in $$(ls $(GOPATH1)/bin-darwin-arm64); do \ + if [ -f $(GOPATH1)/bin-darwin-amd64/$$binary ]; then \ + lipo -create -output $(GOPATH1)/bin-darwin-universal/$$binary \ + $(GOPATH1)/bin-darwin-arm64/$$binary \ + $(GOPATH1)/bin-darwin-amd64/$$binary; \ + else \ + echo "Warning: Binary $$binary exists in arm64 but not in amd64"; \ + fi \ + done +else + $(error OS_TYPE must be darwin for universal builds) +endif + deps: ./scripts/check_deps.sh @@ -212,11 +244,11 @@ ${GOCACHE}/file.txt: touch "${GOCACHE}"/file.txt buildsrc: check-go-version crypto/libs/$(OS_TYPE)/$(ARCH)/lib/libsodium.a node_exporter NONGO_BIN ${GOCACHE}/file.txt - go install $(GOTRIMPATH) $(GOTAGS) $(GOBUILDMODE) -ldflags="$(GOLDFLAGS)" ./... + $(GO_INSTALL) $(GOTRIMPATH) $(GOTAGS) $(GOBUILDMODE) -ldflags="$(GOLDFLAGS)" ./... buildsrc-special: cd tools/block-generator && \ - go install $(GOTRIMPATH) $(GOTAGS) $(GOBUILDMODE) -ldflags="$(GOLDFLAGS)" ./... + $(GO_INSTALL) $(GOTRIMPATH) $(GOTAGS) $(GOBUILDMODE) -ldflags="$(GOLDFLAGS)" ./... check-go-version: ./scripts/check_golang_version.sh build @@ -331,7 +363,7 @@ dump: $(addprefix gen/,$(addsuffix /genesis.dump, $(NETWORKS))) install: build scripts/dev_install.sh -p $(GOPATH1)/bin -.PHONY: default fmt lint check_shell sanity cover prof deps build test fulltest shorttest clean cleango deploy node_exporter install %gen gen NONGO_BIN check-go-version rebuild_kmd_swagger +.PHONY: default fmt lint check_shell sanity cover prof deps build test fulltest shorttest clean cleango deploy node_exporter install %gen gen NONGO_BIN check-go-version rebuild_kmd_swagger universal ###### TARGETS FOR CICD PROCESS ###### include ./scripts/release/mule/Makefile.mule From f523300433d101b45b19ae119d1dec95008a6a96 Mon Sep 17 00:00:00 2001 From: Henrik Soerensen Date: Wed, 26 Jun 2024 16:19:29 -0400 Subject: [PATCH 16/82] Metrics: Add algod version to metrics (#6003) Co-authored-by: cce <51567+cce@users.noreply.github.com> --- daemon/algod/server.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/daemon/algod/server.go b/daemon/algod/server.go index 12cbf3e968..13432957c3 100644 --- a/daemon/algod/server.go +++ b/daemon/algod/server.go @@ -28,6 +28,7 @@ import ( "os" "os/signal" "path/filepath" + "runtime" "strings" "syscall" "time" @@ -230,6 +231,16 @@ func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string, genes NodeExporterPath: cfg.NodeExporterPath, }) + var currentVersion = config.GetCurrentVersion() + var algodBuildInfoGauge = metrics.MakeGauge(metrics.MetricName{Name: "algod_build_info", Description: "Algod build info"}) + algodBuildInfoGauge.SetLabels(1, map[string]string{ + "version": currentVersion.String(), + "goarch": runtime.GOARCH, + "goos": runtime.GOOS, + "commit": currentVersion.CommitHash, + "channel": currentVersion.Channel, + }) + var serverNode ServerNode if cfg.EnableFollowMode { var followerNode *node.AlgorandFollowerNode From d3831cdf780ee1fc05c4a5860699a70671b8f265 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Wed, 26 Jun 2024 17:31:00 -0400 Subject: [PATCH 17/82] ledger: restore block listeners on reloadLedger (#6041) --- catchup/fetcher_test.go | 3 +- catchup/pref_test.go | 4 +-- daemon/algod/api/server/v2/test/helpers.go | 2 +- data/common_test.go | 2 +- data/datatest/fabricateLedger.go | 2 +- data/ledger.go | 3 +- data/txHandler_test.go | 18 +++++------ ledger/ledger.go | 9 ++++++ ledger/ledger_test.go | 35 ++++++++++++++++++++++ node/assemble_test.go | 4 +-- node/follower_node.go | 8 ++--- node/node.go | 10 ++----- node/node_test.go | 2 +- rpcs/blockService_test.go | 2 +- 14 files changed, 69 insertions(+), 35 deletions(-) diff --git a/catchup/fetcher_test.go b/catchup/fetcher_test.go index dab8da4688..e219852bf4 100644 --- a/catchup/fetcher_test.go +++ b/catchup/fetcher_test.go @@ -66,8 +66,7 @@ func buildTestLedger(t *testing.T, blk bookkeeping.Block) (ledger *data.Ledger, cfg := config.GetDefaultLocal() cfg.Archival = true ledger, err = data.LoadLedger( - log, t.Name(), inMem, protocol.ConsensusCurrentVersion, genBal, "", genHash, - nil, cfg, + log, t.Name(), inMem, protocol.ConsensusCurrentVersion, genBal, "", genHash, cfg, ) if err != nil { t.Fatal("couldn't build ledger", err) diff --git a/catchup/pref_test.go b/catchup/pref_test.go index 38b2a9d16e..a72ed855ec 100644 --- a/catchup/pref_test.go +++ b/catchup/pref_test.go @@ -62,7 +62,7 @@ func BenchmarkServiceFetchBlocks(b *testing.B) { for i := 0; i < b.N; i++ { inMem := true prefix := b.Name() + "empty" + strconv.Itoa(i) - local, err := data.LoadLedger(logging.TestingLog(b), prefix, inMem, protocol.ConsensusCurrentVersion, genesisBalances, "", crypto.Digest{}, nil, cfg) + local, err := data.LoadLedger(logging.TestingLog(b), prefix, inMem, protocol.ConsensusCurrentVersion, genesisBalances, "", crypto.Digest{}, cfg) require.NoError(b, err) // Make Service @@ -150,7 +150,7 @@ func benchenv(t testing.TB, numAccounts, numBlocks int) (ledger, emptyLedger *da cfg := config.GetDefaultLocal() cfg.Archival = true prefix := t.Name() + "empty" - emptyLedger, err = data.LoadLedger(logging.TestingLog(t), prefix, inMem, protocol.ConsensusCurrentVersion, genesisBalances, "", crypto.Digest{}, nil, cfg) + emptyLedger, err = data.LoadLedger(logging.TestingLog(t), prefix, inMem, protocol.ConsensusCurrentVersion, genesisBalances, "", crypto.Digest{}, cfg) require.NoError(t, err) ledger, err = datatest.FabricateLedger(logging.TestingLog(t), t.Name(), parts, genesisBalances, emptyLedger.LastRound()+basics.Round(numBlocks)) diff --git a/daemon/algod/api/server/v2/test/helpers.go b/daemon/algod/api/server/v2/test/helpers.go index ad028fc8ae..5ba5256d63 100644 --- a/daemon/algod/api/server/v2/test/helpers.go +++ b/daemon/algod/api/server/v2/test/helpers.go @@ -313,7 +313,7 @@ func testingenvWithBalances(t testing.TB, minMoneyAtStart, maxMoneyAtStart, numA const inMem = true cfg := config.GetDefaultLocal() cfg.Archival = true - ledger, err := data.LoadLedger(logging.Base(), t.Name(), inMem, protocol.ConsensusFuture, bootstrap, genesisID, genesisHash, nil, cfg) + ledger, err := data.LoadLedger(logging.Base(), t.Name(), inMem, protocol.ConsensusFuture, bootstrap, genesisID, genesisHash, cfg) if err != nil { panic(err) } diff --git a/data/common_test.go b/data/common_test.go index 6079f4a226..6f947067fa 100644 --- a/data/common_test.go +++ b/data/common_test.go @@ -121,7 +121,7 @@ func testingenv(t testing.TB, numAccounts, numTxs int, offlineAccounts bool) (*L const inMem = true cfg := config.GetDefaultLocal() cfg.Archival = true - ledger, err := LoadLedger(logging.Base(), t.Name(), inMem, protocol.ConsensusCurrentVersion, bootstrap, genesisID, genesisHash, nil, cfg) + ledger, err := LoadLedger(logging.Base(), t.Name(), inMem, protocol.ConsensusCurrentVersion, bootstrap, genesisID, genesisHash, cfg) if err != nil { panic(err) } diff --git a/data/datatest/fabricateLedger.go b/data/datatest/fabricateLedger.go index 9ad7bed4fc..cfde34af8a 100644 --- a/data/datatest/fabricateLedger.go +++ b/data/datatest/fabricateLedger.go @@ -38,7 +38,7 @@ func FabricateLedger(log logging.Logger, ledgerName string, accounts []account.P const inMem = true cfg := config.GetDefaultLocal() cfg.Archival = true - ledger, err := data.LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genesis, "", crypto.Digest{}, nil, cfg) + ledger, err := data.LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genesis, "", crypto.Digest{}, cfg) if err != nil { return nil, err } diff --git a/data/ledger.go b/data/ledger.go index fa3b958373..579d324d0b 100644 --- a/data/ledger.go +++ b/data/ledger.go @@ -81,7 +81,7 @@ type roundSeed struct { func LoadLedger[T string | ledger.DirsAndPrefix]( log logging.Logger, dir T, memory bool, genesisProto protocol.ConsensusVersion, genesisBal bookkeeping.GenesisBalances, genesisID string, genesisHash crypto.Digest, - blockListeners []ledgercore.BlockListener, cfg config.Local, + cfg config.Local, ) (*Ledger, error) { if genesisBal.Balances == nil { genesisBal.Balances = make(map[basics.Address]basics.AccountData) @@ -115,7 +115,6 @@ func LoadLedger[T string | ledger.DirsAndPrefix]( } l.Ledger = ll - l.RegisterBlockListeners(blockListeners) return l, nil } diff --git a/data/txHandler_test.go b/data/txHandler_test.go index 896fbb161d..d395779f33 100644 --- a/data/txHandler_test.go +++ b/data/txHandler_test.go @@ -109,7 +109,7 @@ func BenchmarkTxHandlerProcessing(b *testing.B) { cfg.Archival = true cfg.TxBacklogReservedCapacityPerPeer = 1 cfg.IncomingConnectionsLimit = 10 - ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, nil, cfg) + ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, cfg) require.NoError(b, err) defer ledger.Close() @@ -1027,7 +1027,7 @@ func TestTxHandlerProcessIncomingCacheTxPoolDrop(t *testing.T) { cfg.Archival = true cfg.EnableTxBacklogRateLimiting = false cfg.TxIncomingFilteringFlags = 3 // txFilterRawMsg + txFilterCanonical - ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, nil, cfg) + ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, cfg) require.NoError(t, err) defer ledger.Close() @@ -1196,7 +1196,7 @@ func incomingTxHandlerProcessing(maxGroupSize, numberOfTransactionGroups int, t cfg := config.GetDefaultLocal() cfg.Archival = true cfg.EnableTxBacklogRateLimiting = false - ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, nil, cfg) + ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, cfg) require.NoError(t, err) defer ledger.Close() @@ -1641,7 +1641,7 @@ func (g *txGenerator) makeLedger(tb testing.TB, cfg config.Local, log logging.Lo ledgerName := fmt.Sprintf("%s-in_mem-w_inv=%d", namePrefix, ivrString) ledgerName = strings.Replace(ledgerName, "#", "-", 1) const inMem = true - ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, nil, cfg) + ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, cfg) require.NoError(tb, err) return ledger } @@ -2183,7 +2183,7 @@ func TestTxHandlerRememberReportErrorsWithTxPool(t *testing.T) { //nolint:parall cfg := config.GetDefaultLocal() cfg.Archival = true cfg.TxPoolSize = config.MaxTxGroupSize + 1 - ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, nil, cfg) + ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, cfg) require.NoError(t, err) defer ledger.Close() @@ -2419,7 +2419,7 @@ func TestTxHandlerRestartWithBacklogAndTxPool(t *testing.T) { //nolint:parallelt const inMem = true cfg := config.GetDefaultLocal() cfg.Archival = true - ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, nil, cfg) + ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, cfg) require.NoError(t, err) defer ledger.Ledger.Close() @@ -2524,7 +2524,7 @@ func TestTxHandlerAppRateLimiterERLEnabled(t *testing.T) { cfg.TxBacklogServiceRateWindowSeconds = 1 cfg.TxBacklogAppTxPerSecondRate = 3 cfg.TxBacklogSize = 3 - ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, bookkeeping.GenesisBalances{}, genesisID, genesisHash, nil, cfg) + ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, bookkeeping.GenesisBalances{}, genesisID, genesisHash, cfg) require.NoError(t, err) defer ledger.Close() @@ -2636,7 +2636,7 @@ func TestTxHandlerAppRateLimiter(t *testing.T) { cfg.TxBacklogAppTxRateLimiterMaxSize = 100 cfg.TxBacklogServiceRateWindowSeconds = 1 cfg.TxBacklogAppTxPerSecondRate = 3 - ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, nil, cfg) + ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, cfg) require.NoError(t, err) defer ledger.Close() @@ -2705,7 +2705,7 @@ func TestTxHandlerCapGuard(t *testing.T) { cfg.IncomingConnectionsLimit = 1 cfg.TxBacklogSize = 3 - ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, nil, cfg) + ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, cfg) require.NoError(t, err) defer ledger.Close() diff --git a/ledger/ledger.go b/ledger/ledger.go index 2cc1b36ee1..7459c23037 100644 --- a/ledger/ledger.go +++ b/ledger/ledger.go @@ -210,6 +210,10 @@ func (l *Ledger) reloadLedger() error { l.trackerMu.Lock() defer l.trackerMu.Unlock() + // save block listeners to recover them later + blockListeners := make([]ledgercore.BlockListener, 0, len(l.notifier.listeners)) + blockListeners = append(blockListeners, l.notifier.listeners...) + // close the trackers. l.trackers.close() @@ -256,6 +260,9 @@ func (l *Ledger) reloadLedger() error { return err } + // restore block listeners since l.notifier might not survive a reload + l.notifier.register(blockListeners) + // post-init actions if trackerDBInitParams.VacuumOnStartup || l.cfg.OptimizeAccountsDatabaseOnStartup { err = l.accts.vacuumDatabase(context.Background()) @@ -423,6 +430,8 @@ func (l *Ledger) Close() { // RegisterBlockListeners registers listeners that will be called when a // new block is added to the ledger. func (l *Ledger) RegisterBlockListeners(listeners []ledgercore.BlockListener) { + l.trackerMu.RLock() + defer l.trackerMu.RUnlock() l.notifier.register(listeners) } diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index 2a9666688b..968e6d8b21 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -3422,5 +3422,40 @@ func TestLedgerRetainMinOffCatchpointInterval(t *testing.T) { } }() } +} + +type testBlockListener struct { + id int +} + +func (t *testBlockListener) OnNewBlock(bookkeeping.Block, ledgercore.StateDelta) {} + +// TestLedgerRegisterBlockListeners ensures that the block listeners survive reloadLedger +func TestLedgerRegisterBlockListeners(t *testing.T) { + partitiontest.PartitionTest(t) + + genBalances, _, _ := ledgertesting.NewTestGenesis() + var genHash crypto.Digest + crypto.RandBytes(genHash[:]) + cfg := config.GetDefaultLocal() + l := newSimpleLedgerFull(t, genBalances, protocol.ConsensusCurrentVersion, genHash, cfg) + defer l.Close() + l.RegisterBlockListeners([]ledgercore.BlockListener{&testBlockListener{1}, &testBlockListener{2}}) + l.RegisterBlockListeners([]ledgercore.BlockListener{&testBlockListener{3}}) + + require.Equal(t, 3, len(l.notifier.listeners)) + var ids []int + for _, bl := range l.notifier.listeners { + ids = append(ids, bl.(*testBlockListener).id) + } + require.Equal(t, []int{1, 2, 3}, ids) + + l.reloadLedger() + + ids = nil + for _, bl := range l.notifier.listeners { + ids = append(ids, bl.(*testBlockListener).id) + } + require.Equal(t, []int{1, 2, 3}, ids) } diff --git a/node/assemble_test.go b/node/assemble_test.go index 51ff7d8edc..d2bf4dd7f7 100644 --- a/node/assemble_test.go +++ b/node/assemble_test.go @@ -83,7 +83,7 @@ func BenchmarkAssembleBlock(b *testing.B) { const inMem = true cfg := config.GetDefaultLocal() cfg.Archival = true - ledger, err := data.LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, nil, cfg) + ledger, err := data.LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, cfg) require.NoError(b, err) l := ledger @@ -212,7 +212,7 @@ func TestAssembleBlockTransactionPoolBehind(t *testing.T) { const inMem = true cfg := config.GetDefaultLocal() cfg.Archival = true - ledger, err := data.LoadLedger(log, "ledgerName", inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, nil, cfg) + ledger, err := data.LoadLedger(log, "ledgerName", inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, cfg) require.NoError(t, err) l := ledger diff --git a/node/follower_node.go b/node/follower_node.go index 8483f14679..e475b25481 100644 --- a/node/follower_node.go +++ b/node/follower_node.go @@ -116,17 +116,13 @@ func MakeFollower(log logging.Logger, rootDir string, cfg config.Local, phoneboo DBFilePrefix: config.LedgerFilenamePrefix, ResolvedGenesisDirs: node.genesisDirs, } - node.ledger, err = data.LoadLedger(node.log, ledgerPaths, false, genesis.Proto, genalloc, node.genesisID, node.genesisHash, []ledgercore.BlockListener{}, cfg) + node.ledger, err = data.LoadLedger(node.log, ledgerPaths, false, genesis.Proto, genalloc, node.genesisID, node.genesisHash, cfg) if err != nil { log.Errorf("Cannot initialize ledger (%v): %v", ledgerPaths, err) return nil, err } - blockListeners := []ledgercore.BlockListener{ - node, - } - - node.ledger.RegisterBlockListeners(blockListeners) + node.ledger.RegisterBlockListeners([]ledgercore.BlockListener{node}) if cfg.IsGossipServer() { rpcs.MakeHealthService(node.net) diff --git a/node/node.go b/node/node.go index 6c77b4fbc0..58160ff3e0 100644 --- a/node/node.go +++ b/node/node.go @@ -223,7 +223,7 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd DBFilePrefix: config.LedgerFilenamePrefix, ResolvedGenesisDirs: node.genesisDirs, } - node.ledger, err = data.LoadLedger(node.log, ledgerPaths, false, genesis.Proto, genalloc, node.genesisID, node.genesisHash, []ledgercore.BlockListener{}, cfg) + node.ledger, err = data.LoadLedger(node.log, ledgerPaths, false, genesis.Proto, genalloc, node.genesisID, node.genesisHash, cfg) if err != nil { log.Errorf("Cannot initialize ledger (%v): %v", ledgerPaths, err) return nil, err @@ -246,12 +246,7 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd node.transactionPool = pools.MakeTransactionPool(node.ledger.Ledger, cfg, node.log, node) - blockListeners := []ledgercore.BlockListener{ - node.transactionPool, - node, - } - - node.ledger.RegisterBlockListeners(blockListeners) + node.ledger.RegisterBlockListeners([]ledgercore.BlockListener{node.transactionPool, node}) txHandlerOpts := data.TxHandlerOpts{ TxPool: node.transactionPool, ExecutionPool: node.lowPriorityCryptoVerificationPool, @@ -1211,6 +1206,7 @@ func (node *AlgorandFullNode) SetCatchpointCatchupMode(catchpointCatchupMode boo return } defer node.mu.Unlock() + // start node.transactionPool.Reset() node.catchupService.Start() diff --git a/node/node_test.go b/node/node_test.go index dabb7958a5..54f2e1e6cc 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -168,7 +168,7 @@ func setupFullNodes(t *testing.T, proto protocol.ConsensusVersion, verificationP cfg, err := config.LoadConfigFromDisk(rootDirectory) require.NoError(t, err) cfg.Archival = true - _, err = data.LoadLedger(logging.Base().With("name", nodeID), ledgerFilenamePrefix, inMem, g.Proto, bootstrap, g.ID(), g.Hash(), nil, cfg) + _, err = data.LoadLedger(logging.Base().With("name", nodeID), ledgerFilenamePrefix, inMem, g.Proto, bootstrap, g.ID(), g.Hash(), cfg) require.NoError(t, err) } diff --git a/rpcs/blockService_test.go b/rpcs/blockService_test.go index 3aab7c4abb..7b1f756f08 100644 --- a/rpcs/blockService_test.go +++ b/rpcs/blockService_test.go @@ -520,7 +520,7 @@ func makeLedger(t *testing.T, namePostfix string) *data.Ledger { prefix := t.Name() + namePostfix ledger, err := data.LoadLedger( log, prefix, inMem, protocol.ConsensusCurrentVersion, genBal, "", genHash, - nil, cfg, + cfg, ) require.NoError(t, err) return ledger From 63c0d5bd60fbe50b17788ed619f089d0288b214d Mon Sep 17 00:00:00 2001 From: cce <51567+cce@users.noreply.github.com> Date: Thu, 27 Jun 2024 15:54:11 -0400 Subject: [PATCH 18/82] testing: set pprof labels for goroutines that use the same code for different cases (#4350) --- agreement/demux.go | 2 ++ network/p2pNetwork.go | 4 ++-- network/wsNetwork.go | 10 ++++++---- node/node.go | 7 ++++--- util/execpool/backlog.go | 11 +++++++---- util/execpool/pool.go | 11 +++++++---- util/process.go | 7 +++++++ 7 files changed, 35 insertions(+), 17 deletions(-) diff --git a/agreement/demux.go b/agreement/demux.go index 33e15e6cfd..2f0e9b269b 100644 --- a/agreement/demux.go +++ b/agreement/demux.go @@ -25,6 +25,7 @@ import ( "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/logging/logspec" "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/util" ) const ( @@ -113,6 +114,7 @@ func (d *demux) tokenizeMessages(ctx context.Context, net Network, tag protocol. defer func() { close(decoded) }() + util.SetGoroutineLabels("tokenizeTag", string(tag)) for { select { case raw, ok := <-networkMessages: diff --git a/network/p2pNetwork.go b/network/p2pNetwork.go index c5422af849..6301b1b521 100644 --- a/network/p2pNetwork.go +++ b/network/p2pNetwork.go @@ -138,11 +138,11 @@ func (n *P2PNetwork) Start() { for i := 0; i < incomingThreads; i++ { n.wg.Add(1) // We pass the peersConnectivityCheckTicker.C here so that we don't need to syncronize the access to the ticker's data structure. - go n.handler.messageHandlerThread(&n.wg, n.wsPeersConnectivityCheckTicker.C, n) + go n.handler.messageHandlerThread(&n.wg, n.wsPeersConnectivityCheckTicker.C, n, "network", "P2PNetwork") } n.wg.Add(1) - go n.broadcaster.broadcastThread(&n.wg, n) + go n.broadcaster.broadcastThread(&n.wg, n, "network", "P2PNetwork") n.service.DialPeersUntilTargetCount(n.config.GossipFanout) n.wg.Add(1) diff --git a/network/wsNetwork.go b/network/wsNetwork.go index 6fc97def5e..4a491f4f9f 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -747,10 +747,10 @@ func (wn *WebsocketNetwork) Start() { for i := 0; i < incomingThreads; i++ { wn.wg.Add(1) // We pass the peersConnectivityCheckTicker.C here so that we don't need to syncronize the access to the ticker's data structure. - go wn.handler.messageHandlerThread(&wn.wg, wn.peersConnectivityCheckTicker.C, wn) + go wn.handler.messageHandlerThread(&wn.wg, wn.peersConnectivityCheckTicker.C, wn, "network", "WebsocketNetwork") } wn.wg.Add(1) - go wn.broadcaster.broadcastThread(&wn.wg, wn) + go wn.broadcaster.broadcastThread(&wn.wg, wn, "network", "WebsocketNetwork") if wn.prioScheme != nil { wn.wg.Add(1) go wn.prioWeightRefresh() @@ -1129,8 +1129,9 @@ func (wn *WebsocketNetwork) maybeSendMessagesOfInterest(peer *wsPeer, messagesOf } } -func (wn *msgHandler) messageHandlerThread(wg *sync.WaitGroup, peersConnectivityCheckCh <-chan time.Time, net networkPeerManager) { +func (wn *msgHandler) messageHandlerThread(wg *sync.WaitGroup, peersConnectivityCheckCh <-chan time.Time, net networkPeerManager, profLabels ...string) { defer wg.Done() + util.SetGoroutineLabels(append(profLabels, "func", "msgHandler.messageHandlerThread")...) for { select { @@ -1231,8 +1232,9 @@ func (wn *msgHandler) sendFilterMessage(msg IncomingMessage, net networkPeerMana } } -func (wn *msgBroadcaster) broadcastThread(wg *sync.WaitGroup, net networkPeerManager) { +func (wn *msgBroadcaster) broadcastThread(wg *sync.WaitGroup, net networkPeerManager, profLabels ...string) { defer wg.Done() + util.SetGoroutineLabels(append(profLabels, "func", "msgHandler.broadcastThread")...) slowWritingPeerCheckTicker := time.NewTicker(wn.slowWritingPeerMonitorInterval) defer slowWritingPeerCheckTicker.Stop() diff --git a/node/node.go b/node/node.go index 58160ff3e0..384bd258f6 100644 --- a/node/node.go +++ b/node/node.go @@ -216,9 +216,9 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd } node.net = p2pNode - node.cryptoPool = execpool.MakePool(node) - node.lowPriorityCryptoVerificationPool = execpool.MakeBacklog(node.cryptoPool, 2*node.cryptoPool.GetParallelism(), execpool.LowPriority, node) - node.highPriorityCryptoVerificationPool = execpool.MakeBacklog(node.cryptoPool, 2*node.cryptoPool.GetParallelism(), execpool.HighPriority, node) + node.cryptoPool = execpool.MakePool(node, "worker", "cryptoPool") + node.lowPriorityCryptoVerificationPool = execpool.MakeBacklog(node.cryptoPool, 2*node.cryptoPool.GetParallelism(), execpool.LowPriority, node, "worker", "lowPriorityCryptoVerificationPool") + node.highPriorityCryptoVerificationPool = execpool.MakeBacklog(node.cryptoPool, 2*node.cryptoPool.GetParallelism(), execpool.HighPriority, node, "worker", "highPriorityCryptoVerificationPool") ledgerPaths := ledger.DirsAndPrefix{ DBFilePrefix: config.LedgerFilenamePrefix, ResolvedGenesisDirs: node.genesisDirs, @@ -1061,6 +1061,7 @@ func (node *AlgorandFullNode) OnNewBlock(block bookkeeping.Block, delta ledgerco // don't have to delete key for each block we received. func (node *AlgorandFullNode) oldKeyDeletionThread(done <-chan struct{}) { defer node.monitoringRoutinesWaitGroup.Done() + for { select { case <-done: diff --git a/util/execpool/backlog.go b/util/execpool/backlog.go index 44728d1d9e..c98a2fd427 100644 --- a/util/execpool/backlog.go +++ b/util/execpool/backlog.go @@ -19,6 +19,8 @@ package execpool import ( "context" "sync" + + "github.com/algorand/go-algorand/util" ) // A backlog for an execution pool. The typical usage of this is to @@ -47,7 +49,7 @@ type BacklogPool interface { } // MakeBacklog creates a backlog -func MakeBacklog(execPool ExecutionPool, backlogSize int, priority Priority, owner interface{}) BacklogPool { +func MakeBacklog(execPool ExecutionPool, backlogSize int, priority Priority, owner interface{}, profLabels ...string) BacklogPool { if backlogSize < 0 { return nil } @@ -59,7 +61,7 @@ func MakeBacklog(execPool ExecutionPool, backlogSize int, priority Priority, own bl.ctx, bl.ctxCancel = context.WithCancel(context.Background()) if bl.pool == nil { // create one internally. - bl.pool = MakePool(bl) + bl.pool = MakePool(bl, append(profLabels, "execpool", "internal")...) } if backlogSize == 0 { // use the number of cpus in the system. @@ -68,7 +70,7 @@ func MakeBacklog(execPool ExecutionPool, backlogSize int, priority Priority, own bl.buffer = make(chan backlogItemTask, backlogSize) bl.wg.Add(1) - go bl.worker() + go bl.worker(profLabels) return bl } @@ -129,10 +131,11 @@ func (b *backlog) Shutdown() { } } -func (b *backlog) worker() { +func (b *backlog) worker(profLabels []string) { var t backlogItemTask var ok bool defer b.wg.Done() + util.SetGoroutineLabels(profLabels...) for { diff --git a/util/execpool/pool.go b/util/execpool/pool.go index caa7353ac7..426edd10cb 100644 --- a/util/execpool/pool.go +++ b/util/execpool/pool.go @@ -20,6 +20,8 @@ import ( "context" "runtime" "sync" + + "github.com/algorand/go-algorand/util" ) // The list of all valid priority values. When adding new ones, add them before numPrios. @@ -68,7 +70,7 @@ type enqueuedTask struct { } // MakePool creates a pool. -func MakePool(owner interface{}) ExecutionPool { +func MakePool(owner interface{}, profLabels ...string) ExecutionPool { p := &pool{ inputs: make([]chan enqueuedTask, numPrios), numCPUs: runtime.NumCPU(), @@ -82,9 +84,8 @@ func MakePool(owner interface{}) ExecutionPool { p.wg.Add(p.numCPUs) for i := 0; i < p.numCPUs; i++ { - go p.worker() + go p.worker(profLabels) } - return p } @@ -136,12 +137,14 @@ func (p *pool) Shutdown() { // worker function blocks until a new task is pending on any of the channels and execute the above task. // the implementation below would give higher priority for channels that are on higher priority slot. -func (p *pool) worker() { +func (p *pool) worker(profLabels []string) { var t enqueuedTask var ok bool lowPrio := p.inputs[LowPriority] highPrio := p.inputs[HighPriority] defer p.wg.Done() + util.SetGoroutineLabels(profLabels...) + for { select { diff --git a/util/process.go b/util/process.go index e7ce85ed92..c872b63fe5 100644 --- a/util/process.go +++ b/util/process.go @@ -17,9 +17,11 @@ package util import ( + "context" "io" "os" "os/exec" + "runtime/pprof" "sync" "time" ) @@ -73,3 +75,8 @@ func ExecAndCaptureOutput(command string, args ...string) (string, string, error return string(outputStdout), string(outputStderr), err } + +// SetGoroutineLabels sets profiler labels for identifying goroutines using the pprof package. +func SetGoroutineLabels(args ...string) { + pprof.SetGoroutineLabels(pprof.WithLabels(context.Background(), pprof.Labels(args...))) +} From c8407abca80f4682aac43a5ccc8cd524051f4f63 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Fri, 28 Jun 2024 14:16:40 -0400 Subject: [PATCH 19/82] network: p2p traffic exchange for algorand node (#5939) Functional p2p support for gossip network: * DHT advertisement and peers capabilities * Hybrid networking when a node handles both legacy websocket and libp2p networks * HTTP over p2p support for catching up from p2p nodes * P2P network cluster test scenarios * libp2p and DHT metrics and logging handling Co-authored-by: Eric Warehime Co-authored-by: cce <51567+cce@users.noreply.github.com> --- agreement/fuzzer/networkFacade_test.go | 21 +- agreement/gossip/network_test.go | 9 +- catchup/fetcher_test.go | 18 +- catchup/ledgerFetcher.go | 9 +- catchup/ledgerFetcher_test.go | 56 +- catchup/universalFetcher.go | 7 +- cmd/algod/main.go | 4 +- cmd/goal/node.go | 4 +- components/mocks/mockNetwork.go | 37 +- config/config.go | 1 + config/localTemplate.go | 24 +- config/local_defaults.go | 5 +- daemon/algod/server.go | 11 +- data/txHandler.go | 207 +++- data/txHandler_test.go | 3 +- go.mod | 31 +- go.sum | 93 ++ installer/config.json.example | 5 +- logging/log.go | 9 +- netdeploy/remote/nodeConfig.go | 1 + netdeploy/remote/nodecfg/nodeConfigurator.go | 35 + netdeploy/remote/nodecfg/nodeDir.go | 47 + network/addr.go | 64 +- network/addr/addr.go | 88 ++ network/{ => addr}/addr_test.go | 9 +- network/connPerfMon_test.go | 2 +- network/gossipNode.go | 95 +- network/hybridNetwork.go | 233 +++++ network/{ => limitcaller}/dialer.go | 10 +- .../rateLimitingTransport.go | 53 +- network/multiplexer.go | 96 +- network/netidentity.go | 46 +- network/netidentity_test.go | 4 +- network/netprio.go | 2 +- network/p2p/capabilities.go | 179 ++++ network/p2p/capabilities_test.go | 354 +++++++ network/p2p/dht/dht.go | 75 ++ network/p2p/dht/dht_test.go | 64 ++ network/p2p/dnsaddr/resolve.go | 10 +- network/p2p/dnsaddr/resolveController.go | 20 +- network/p2p/dnsaddr/resolve_test.go | 60 +- network/p2p/http.go | 92 ++ network/p2p/logger.go | 123 +++ network/p2p/p2p.go | 203 +++- network/p2p/p2p_test.go | 163 +++- network/p2p/peerID.go | 41 + network/p2p/peerstore/peerstore.go | 90 +- network/p2p/peerstore/peerstore_test.go | 195 ++-- network/p2p/peerstore/utils.go | 4 +- network/p2p/peerstore/utils_test.go | 3 +- network/p2p/pubsub.go | 6 +- network/p2p/streams.go | 70 +- network/p2p/testing/httpNode.go | 122 +++ network/p2pNetwork.go | 617 ++++++++++-- network/p2pNetwork_test.go | 891 +++++++++++++++++- network/{ => phonebook}/phonebook.go | 13 +- network/{ => phonebook}/phonebook_test.go | 2 +- network/requestLogger_test.go | 16 +- network/requestTracker.go | 5 +- network/requestTracker_test.go | 24 +- network/websocketProxy_test.go | 7 +- network/wsNetwork.go | 207 ++-- network/wsNetwork_test.go | 205 ++-- network/wsPeer.go | 26 +- network/wsPeer_test.go | 12 +- node/follower_node.go | 22 +- node/node.go | 47 +- node/node_test.go | 407 ++++++-- rpcs/blockService.go | 40 +- rpcs/blockService_test.go | 60 +- rpcs/healthService_test.go | 3 +- rpcs/httpTxSync.go | 18 +- rpcs/ledgerService.go | 10 +- rpcs/ledgerService_test.go | 59 ++ rpcs/txService_test.go | 101 +- rpcs/txSyncer_test.go | 25 +- test/heapwatch/agreement-log.py | 187 ++++ test/heapwatch/block_history_plot.py | 30 +- test/heapwatch/client_ram_report.py | 4 + test/heapwatch/metrics_aggs.py | 175 ++++ test/heapwatch/metrics_delta.py | 75 +- test/heapwatch/metrics_lib.py | 272 ++++++ test/heapwatch/metrics_viz.py | 191 ++-- test/heapwatch/requirements.txt | 3 + test/heapwatch/topology-extract-p2p.py | 104 ++ test/heapwatch/topology-extract-ws.py | 115 +++ test/heapwatch/topology-viz.py | 75 ++ test/testdata/configs/config-v34.json | 5 +- .../hello-world-small-p2p/genesis.json | 30 + .../recipes/hello-world-small-p2p/net.json | 107 +++ .../recipes/hello-world-small-p2p/recipe.json | 7 + .../hello-world-small-p2p/topology.json | 20 + .../recipes/hello-world-tiny-p2p/genesis.json | 30 + .../hosttemplates.json | 0 .../recipes/hello-world-tiny-p2p/net.json | 101 ++ .../recipes/hello-world-tiny-p2p/recipe.json | 7 + .../hello-world-tiny-p2p/topology.json | 20 + .../recipes/hello-world/genesis.json | 2 +- .../recipes/scenario1s-p2p/Makefile | 23 + .../recipes/scenario1s-p2p/README.md | 16 + .../scenario1s-p2p/copy-node-configs.py | 55 ++ .../recipes/scenario1s-p2p/recipe.json | 7 + .../recipes/scenario1s/Makefile | 4 +- tools/block-generator/go.mod | 23 + tools/block-generator/go.sum | 96 ++ tools/debug/algodump/main.go | 6 +- tools/debug/transplanter/main.go | 6 +- util/metrics/counter.go | 9 +- util/metrics/gauge.go | 9 +- util/metrics/opencensus.go | 172 ++++ util/metrics/opencensus_test.go | 147 +++ util/metrics/prometheus.go | 106 +++ util/metrics/prometheus_test.go | 148 +++ util/metrics/registry.go | 6 + 114 files changed, 7391 insertions(+), 1032 deletions(-) create mode 100644 network/addr/addr.go rename network/{ => addr}/addr_test.go (95%) create mode 100644 network/hybridNetwork.go rename network/{ => limitcaller}/dialer.go (91%) rename network/{ => limitcaller}/rateLimitingTransport.go (55%) create mode 100644 network/p2p/capabilities.go create mode 100644 network/p2p/capabilities_test.go create mode 100644 network/p2p/dht/dht.go create mode 100644 network/p2p/dht/dht_test.go create mode 100644 network/p2p/http.go create mode 100644 network/p2p/logger.go create mode 100644 network/p2p/testing/httpNode.go rename network/{ => phonebook}/phonebook.go (96%) rename network/{ => phonebook}/phonebook_test.go (99%) create mode 100644 test/heapwatch/agreement-log.py create mode 100644 test/heapwatch/metrics_aggs.py create mode 100644 test/heapwatch/metrics_lib.py create mode 100644 test/heapwatch/topology-extract-p2p.py create mode 100644 test/heapwatch/topology-extract-ws.py create mode 100644 test/heapwatch/topology-viz.py create mode 100644 test/testdata/deployednettemplates/recipes/hello-world-small-p2p/genesis.json create mode 100644 test/testdata/deployednettemplates/recipes/hello-world-small-p2p/net.json create mode 100644 test/testdata/deployednettemplates/recipes/hello-world-small-p2p/recipe.json create mode 100644 test/testdata/deployednettemplates/recipes/hello-world-small-p2p/topology.json create mode 100644 test/testdata/deployednettemplates/recipes/hello-world-tiny-p2p/genesis.json rename test/testdata/deployednettemplates/recipes/{hello-world => hello-world-tiny-p2p}/hosttemplates.json (100%) create mode 100644 test/testdata/deployednettemplates/recipes/hello-world-tiny-p2p/net.json create mode 100644 test/testdata/deployednettemplates/recipes/hello-world-tiny-p2p/recipe.json create mode 100644 test/testdata/deployednettemplates/recipes/hello-world-tiny-p2p/topology.json create mode 100644 test/testdata/deployednettemplates/recipes/scenario1s-p2p/Makefile create mode 100644 test/testdata/deployednettemplates/recipes/scenario1s-p2p/README.md create mode 100644 test/testdata/deployednettemplates/recipes/scenario1s-p2p/copy-node-configs.py create mode 100644 test/testdata/deployednettemplates/recipes/scenario1s-p2p/recipe.json create mode 100644 util/metrics/opencensus.go create mode 100644 util/metrics/opencensus_test.go create mode 100644 util/metrics/prometheus.go create mode 100644 util/metrics/prometheus_test.go diff --git a/agreement/fuzzer/networkFacade_test.go b/agreement/fuzzer/networkFacade_test.go index 131361e3e7..804fb1e7ff 100644 --- a/agreement/fuzzer/networkFacade_test.go +++ b/agreement/fuzzer/networkFacade_test.go @@ -70,9 +70,16 @@ type NetworkFacade struct { rand *rand.Rand timeoutAtInitOnce sync.Once timeoutAtInitWait sync.WaitGroup - peerToNode map[network.Peer]int + peerToNode map[*facadePeer]int } +type facadePeer struct { + id int + net network.GossipNode +} + +func (p *facadePeer) GetNetwork() network.GossipNode { return p.net } + // MakeNetworkFacade creates a facade with a given nodeID. func MakeNetworkFacade(fuzzer *Fuzzer, nodeID int) *NetworkFacade { n := &NetworkFacade{ @@ -83,12 +90,12 @@ func MakeNetworkFacade(fuzzer *Fuzzer, nodeID int) *NetworkFacade { eventsQueues: make(map[string]int), eventsQueuesCh: make(chan int, 1000), rand: rand.New(rand.NewSource(int64(nodeID))), - peerToNode: make(map[network.Peer]int, fuzzer.nodesCount), + peerToNode: make(map[*facadePeer]int, fuzzer.nodesCount), debugMessages: false, } n.timeoutAtInitWait.Add(1) for i := 0; i < fuzzer.nodesCount; i++ { - n.peerToNode[network.Peer(new(int))] = i + n.peerToNode[&facadePeer{id: i, net: n}] = i } return n } @@ -179,7 +186,7 @@ func (n *NetworkFacade) WaitForEventsQueue(cleared bool) { func (n *NetworkFacade) Broadcast(ctx context.Context, tag protocol.Tag, data []byte, wait bool, exclude network.Peer) error { excludeNode := -1 if exclude != nil { - excludeNode = n.peerToNode[exclude] + excludeNode = n.peerToNode[exclude.(*facadePeer)] } return n.broadcast(tag, data, excludeNode, "NetworkFacade service-%v Broadcast %v %v\n") } @@ -240,7 +247,7 @@ func (n *NetworkFacade) PushDownstreamMessage(newMsg context.CancelFunc) bool { func (n *NetworkFacade) Address() (string, bool) { return "mock network", true } // Start - unused function -func (n *NetworkFacade) Start() {} +func (n *NetworkFacade) Start() error { return nil } // Stop - unused function func (n *NetworkFacade) Stop() {} @@ -341,8 +348,8 @@ func (n *NetworkFacade) ReceiveMessage(sourceNode int, tag protocol.Tag, data [] n.pushPendingReceivedMessage() } -func (n *NetworkFacade) Disconnect(sender network.Peer) { - sourceNode := n.peerToNode[sender] +func (n *NetworkFacade) Disconnect(sender network.DisconnectablePeer) { + sourceNode := n.peerToNode[sender.(*facadePeer)] n.fuzzer.Disconnect(n.nodeID, sourceNode) } diff --git a/agreement/gossip/network_test.go b/agreement/gossip/network_test.go index c168b77cce..a3c5328716 100644 --- a/agreement/gossip/network_test.go +++ b/agreement/gossip/network_test.go @@ -18,7 +18,6 @@ package gossip import ( "context" - "net" "net/http" "sync" "sync/atomic" @@ -136,7 +135,7 @@ func (w *whiteholeNetwork) Relay(ctx context.Context, tag protocol.Tag, data []b func (w *whiteholeNetwork) BroadcastSimple(tag protocol.Tag, data []byte) error { return w.Broadcast(context.Background(), tag, data, true, nil) } -func (w *whiteholeNetwork) Disconnect(badnode network.Peer) { +func (w *whiteholeNetwork) Disconnect(badnode network.DisconnectablePeer) { return } func (w *whiteholeNetwork) DisconnectPeers() { @@ -156,11 +155,11 @@ func (w *whiteholeNetwork) GetPeers(options ...network.PeerOption) []network.Pee } func (w *whiteholeNetwork) RegisterHTTPHandler(path string, handler http.Handler) { } -func (w *whiteholeNetwork) GetHTTPRequestConnection(request *http.Request) (conn net.Conn) { +func (w *whiteholeNetwork) GetHTTPRequestConnection(request *http.Request) (conn network.DeadlineSettableConn) { return nil } -func (w *whiteholeNetwork) Start() { +func (w *whiteholeNetwork) Start() error { w.quit = make(chan struct{}) go func(w *whiteholeNetwork) { w.domain.messagesMu.Lock() @@ -216,7 +215,7 @@ func (w *whiteholeNetwork) Start() { atomic.AddUint32(&w.lastMsgRead, 1) } }(w) - return + return nil } func (w *whiteholeNetwork) getMux() *network.Multiplexer { return w.mux diff --git a/catchup/fetcher_test.go b/catchup/fetcher_test.go index e219852bf4..52b0b32a8f 100644 --- a/catchup/fetcher_test.go +++ b/catchup/fetcher_test.go @@ -21,7 +21,6 @@ import ( "net" "net/http" "net/url" - "strings" "testing" "github.com/gorilla/mux" @@ -173,8 +172,8 @@ func (b *basicRPCNode) GetPeers(options ...network.PeerOption) []network.Peer { return b.peers } -func (b *basicRPCNode) SubstituteGenesisID(rawURL string) string { - return strings.Replace(rawURL, "{genesisID}", "test genesisID", -1) +func (b *basicRPCNode) GetGenesisID() string { + return "test genesisID" } type httpTestPeerSource struct { @@ -191,8 +190,8 @@ func (s *httpTestPeerSource) RegisterHandlers(dispatch []network.TaggedMessageHa s.dispatchHandlers = append(s.dispatchHandlers, dispatch...) } -func (s *httpTestPeerSource) SubstituteGenesisID(rawURL string) string { - return strings.Replace(rawURL, "{genesisID}", "test genesisID", -1) +func (s *httpTestPeerSource) GetGenesisID() string { + return "test genesisID" } // implement network.HTTPPeer @@ -201,8 +200,13 @@ type testHTTPPeer string func (p *testHTTPPeer) GetAddress() string { return string(*p) } + func (p *testHTTPPeer) GetHTTPClient() *http.Client { - return &http.Client{} + return &http.Client{ + Transport: &network.HTTPPAddressBoundTransport{ + Addr: p.GetAddress(), + InnerTransport: http.DefaultTransport}, + } } func (p *testHTTPPeer) GetHTTPPeer() network.HTTPPeer { return p @@ -238,6 +242,8 @@ func (p *testUnicastPeer) GetAddress() string { return "test" } +func (p *testUnicastPeer) GetNetwork() network.GossipNode { return p.gn } + func (p *testUnicastPeer) Request(ctx context.Context, tag protocol.Tag, topics network.Topics) (resp *network.Response, e error) { responseChannel := make(chan *network.Response, 1) diff --git a/catchup/ledgerFetcher.go b/catchup/ledgerFetcher.go index 2f87b5d576..916627db8f 100644 --- a/catchup/ledgerFetcher.go +++ b/catchup/ledgerFetcher.go @@ -23,7 +23,6 @@ import ( "fmt" "io" "net/http" - "path" "strconv" "time" @@ -74,13 +73,7 @@ func makeLedgerFetcher(net network.GossipNode, accessor ledger.CatchpointCatchup } func (lf *ledgerFetcher) requestLedger(ctx context.Context, peer network.HTTPPeer, round basics.Round, method string) (*http.Response, error) { - parsedURL, err := network.ParseHostOrURL(peer.GetAddress()) - if err != nil { - return nil, err - } - - parsedURL.Path = lf.net.SubstituteGenesisID(path.Join(parsedURL.Path, "/v1/{genesisID}/ledger/"+strconv.FormatUint(uint64(round), 36))) - ledgerURL := parsedURL.String() + ledgerURL := network.SubstituteGenesisID(lf.net, "/v1/{genesisID}/ledger/"+strconv.FormatUint(uint64(round), 36)) lf.log.Debugf("ledger %s %#v peer %#v %T", method, ledgerURL, peer, peer) request, err := http.NewRequestWithContext(ctx, method, ledgerURL, nil) if err != nil { diff --git a/catchup/ledgerFetcher_test.go b/catchup/ledgerFetcher_test.go index 6bbde32120..a080aca31e 100644 --- a/catchup/ledgerFetcher_test.go +++ b/catchup/ledgerFetcher_test.go @@ -17,6 +17,7 @@ package catchup import ( + "archive/tar" "context" "fmt" "net" @@ -30,6 +31,8 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/ledger" "github.com/algorand/go-algorand/logging" + p2ptesting "github.com/algorand/go-algorand/network/p2p/testing" + "github.com/algorand/go-algorand/rpcs" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -125,7 +128,7 @@ func TestLedgerFetcherErrorResponseHandling(t *testing.T) { } } -func TestLedgerFetcherHeadLedger(t *testing.T) { +func TestLedgerFetcher(t *testing.T) { partitiontest.PartitionTest(t) // create a dummy server. @@ -136,16 +139,19 @@ func TestLedgerFetcherHeadLedger(t *testing.T) { listener, err := net.Listen("tcp", "localhost:") var httpServerResponse = 0 - var contentTypes = make([]string, 0) require.NoError(t, err) go s.Serve(listener) defer s.Close() defer listener.Close() mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - for _, contentType := range contentTypes { - w.Header().Add("Content-Type", contentType) + if req.Method == http.MethodHead { + w.WriteHeader(httpServerResponse) + } else { + w.Header().Add("Content-Type", rpcs.LedgerResponseContentType) + w.WriteHeader(httpServerResponse) + wtar := tar.NewWriter(w) + wtar.Close() } - w.WriteHeader(httpServerResponse) }) successPeer := testHTTPPeer(listener.Addr().String()) lf := makeLedgerFetcher(&mocks.MockNetwork{}, &mocks.MockCatchpointCatchupAccessor{}, logging.TestingLog(t), &dummyLedgerFetcherReporter{}, config.GetDefaultLocal()) @@ -157,7 +163,7 @@ func TestLedgerFetcherHeadLedger(t *testing.T) { // headLedger parseURL failure parseFailurePeer := testHTTPPeer("foobar") err = lf.headLedger(context.Background(), &parseFailurePeer, basics.Round(0)) - require.Equal(t, fmt.Errorf("could not parse a host from url"), err) + require.ErrorContains(t, err, "could not parse a host from url") // headLedger 404 response httpServerResponse = http.StatusNotFound @@ -169,8 +175,46 @@ func TestLedgerFetcherHeadLedger(t *testing.T) { err = lf.headLedger(context.Background(), &successPeer, basics.Round(0)) require.NoError(t, err) + httpServerResponse = http.StatusOK + err = lf.downloadLedger(context.Background(), &successPeer, basics.Round(0)) + require.NoError(t, err) + // headLedger 500 response httpServerResponse = http.StatusInternalServerError err = lf.headLedger(context.Background(), &successPeer, basics.Round(0)) require.Equal(t, fmt.Errorf("headLedger error response status code %d", http.StatusInternalServerError), err) } + +func TestLedgerFetcherP2P(t *testing.T) { + partitiontest.PartitionTest(t) + + mux := http.NewServeMux() + nodeA := p2ptesting.MakeHTTPNode(t) + nodeA.RegisterHTTPHandler("/v1/ledger/0", mux) + var httpServerResponse = 0 + mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + if req.Method == http.MethodHead { + w.WriteHeader(httpServerResponse) + } else { + w.Header().Add("Content-Type", rpcs.LedgerResponseContentType) + w.WriteHeader(httpServerResponse) + wtar := tar.NewWriter(w) + wtar.Close() + } + }) + + nodeA.Start() + defer nodeA.Stop() + + successPeer := nodeA.GetHTTPPeer() + lf := makeLedgerFetcher(nodeA, &mocks.MockCatchpointCatchupAccessor{}, logging.TestingLog(t), &dummyLedgerFetcherReporter{}, config.GetDefaultLocal()) + + // headLedger 200 response + httpServerResponse = http.StatusOK + err := lf.headLedger(context.Background(), successPeer, basics.Round(0)) + require.NoError(t, err) + + httpServerResponse = http.StatusOK + err = lf.downloadLedger(context.Background(), successPeer, basics.Round(0)) + require.NoError(t, err) +} diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go index 31d82adc98..27b970fc26 100644 --- a/catchup/universalFetcher.go +++ b/catchup/universalFetcher.go @@ -219,13 +219,8 @@ type HTTPFetcher struct { // getBlockBytes gets a block. // Core piece of FetcherClient interface func (hf *HTTPFetcher) getBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) { - parsedURL, err := network.ParseHostOrURL(hf.rootURL) - if err != nil { - return nil, err - } + blockURL := rpcs.FormatBlockQuery(uint64(r), "", hf.net) - parsedURL.Path = rpcs.FormatBlockQuery(uint64(r), parsedURL.Path, hf.net) - blockURL := parsedURL.String() hf.log.Debugf("block GET %#v peer %#v %T", blockURL, hf.peer, hf.peer) request, err := http.NewRequest("GET", blockURL, nil) if err != nil { diff --git a/cmd/algod/main.go b/cmd/algod/main.go index 0f93ed447f..09770cb6e0 100644 --- a/cmd/algod/main.go +++ b/cmd/algod/main.go @@ -33,7 +33,7 @@ import ( "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/logging/telemetryspec" - "github.com/algorand/go-algorand/network" + "github.com/algorand/go-algorand/network/addr" "github.com/algorand/go-algorand/protocol" toolsnet "github.com/algorand/go-algorand/tools/network" "github.com/algorand/go-algorand/util" @@ -282,7 +282,7 @@ func run() int { // make sure that the format of each entry is valid: for idx, peer := range peerOverrideArray { - addr, addrErr := network.ParseHostOrURLOrMultiaddr(peer) + addr, addrErr := addr.ParseHostOrURLOrMultiaddr(peer) if addrErr != nil { fmt.Fprintf(os.Stderr, "Provided command line parameter '%s' is not a valid host:port pair\n", peer) return 1 diff --git a/cmd/goal/node.go b/cmd/goal/node.go index 17de96a81a..2db08fd4e5 100644 --- a/cmd/goal/node.go +++ b/cmd/goal/node.go @@ -39,7 +39,7 @@ import ( "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/libgoal" - "github.com/algorand/go-algorand/network" + naddr "github.com/algorand/go-algorand/network/addr" "github.com/algorand/go-algorand/nodecontrol" "github.com/algorand/go-algorand/util" "github.com/algorand/go-algorand/util/tokens" @@ -751,7 +751,7 @@ func verifyPeerDialArg() bool { // make sure that the format of each entry is valid: for _, peer := range strings.Split(peerDial, ";") { - _, err := network.ParseHostOrURLOrMultiaddr(peer) + _, err := naddr.ParseHostOrURLOrMultiaddr(peer) if err != nil { reportErrorf("Provided peer '%s' is not a valid peer address : %v", peer, err) return false diff --git a/components/mocks/mockNetwork.go b/components/mocks/mockNetwork.go index 25486cb050..f933a553a9 100644 --- a/components/mocks/mockNetwork.go +++ b/components/mocks/mockNetwork.go @@ -18,7 +18,7 @@ package mocks import ( "context" - "net" + "errors" "net/http" "github.com/algorand/go-algorand/network" @@ -28,6 +28,7 @@ import ( // MockNetwork is a dummy network that doesn't do anything type MockNetwork struct { network.GossipNode + GenesisID string } // Broadcast - unused function @@ -46,7 +47,8 @@ func (network *MockNetwork) Address() (string, bool) { } // Start - unused function -func (network *MockNetwork) Start() { +func (network *MockNetwork) Start() error { + return nil } // Stop - unused function @@ -58,7 +60,7 @@ func (network *MockNetwork) RequestConnectOutgoing(replace bool, quit <-chan str } // Disconnect - unused function -func (network *MockNetwork) Disconnect(badpeer network.Peer) { +func (network *MockNetwork) Disconnect(badpeer network.DisconnectablePeer) { } // DisconnectPeers - unused function @@ -74,11 +76,6 @@ func (network *MockNetwork) GetPeers(options ...network.PeerOption) []network.Pe return nil } -// GetRoundTripper -- returns the network round tripper -func (network *MockNetwork) GetRoundTripper() http.RoundTripper { - return http.DefaultTransport -} - // Ready - always ready func (network *MockNetwork) Ready() chan struct{} { c := make(chan struct{}) @@ -94,6 +91,14 @@ func (network *MockNetwork) RegisterHandlers(dispatch []network.TaggedMessageHan func (network *MockNetwork) ClearHandlers() { } +// RegisterProcessors - empty implementation. +func (network *MockNetwork) RegisterProcessors(dispatch []network.TaggedMessageProcessor) { +} + +// ClearProcessors - empty implementation +func (network *MockNetwork) ClearProcessors() { +} + // RegisterHTTPHandler - empty implementation func (network *MockNetwork) RegisterHTTPHandler(path string, handler http.Handler) { } @@ -102,11 +107,19 @@ func (network *MockNetwork) RegisterHTTPHandler(path string, handler http.Handle func (network *MockNetwork) OnNetworkAdvance() {} // GetHTTPRequestConnection - empty implementation -func (network *MockNetwork) GetHTTPRequestConnection(request *http.Request) (conn net.Conn) { +func (network *MockNetwork) GetHTTPRequestConnection(request *http.Request) (conn network.DeadlineSettableConn) { return nil } -// SubstituteGenesisID - empty implementation -func (network *MockNetwork) SubstituteGenesisID(rawURL string) string { - return rawURL +// GetGenesisID - empty implementation +func (network *MockNetwork) GetGenesisID() string { + if network.GenesisID == "" { + return "mocknet" + } + return network.GenesisID +} + +// GetHTTPClient returns a http.Client with a suitable for the network +func (network *MockNetwork) GetHTTPClient(address string) (*http.Client, error) { + return nil, errors.New("not implemented") } diff --git a/config/config.go b/config/config.go index a8beb05800..2d5d0bdbfe 100644 --- a/config/config.go +++ b/config/config.go @@ -268,6 +268,7 @@ const ( dnssecSRV = 1 << iota dnssecRelayAddr dnssecTelemetryAddr + dnssecTXT ) const ( diff --git a/config/localTemplate.go b/config/localTemplate.go index 309ffcb798..314b83a78b 100644 --- a/config/localTemplate.go +++ b/config/localTemplate.go @@ -362,8 +362,9 @@ type Local struct { // 0x01 (dnssecSRV) - validate SRV response // 0x02 (dnssecRelayAddr) - validate relays' names to addresses resolution // 0x04 (dnssecTelemetryAddr) - validate telemetry and metrics names to addresses resolution + // 0x08 (dnssecTXT) - validate TXT response // ... - DNSSecurityFlags uint32 `version[6]:"1"` + DNSSecurityFlags uint32 `version[6]:"1" version[34]:"9"` // EnablePingHandler controls whether the gossip node would respond to ping messages with a pong message. EnablePingHandler bool `version[6]:"true"` @@ -596,9 +597,19 @@ type Local struct { // When it exceeds this capacity, it redirects the block requests to a different node BlockServiceMemCap uint64 `version[28]:"500000000"` - // EnableP2P turns on the peer to peer network + // EnableP2P turns on the peer to peer network. + // When both EnableP2P and EnableP2PHybridMode (below) are set, EnableP2PHybridMode takes precedence. EnableP2P bool `version[31]:"false"` + // EnableP2PHybridMode turns on both websockets and P2P networking. + EnableP2PHybridMode bool `version[34]:"false"` + + // P2PNetAddress sets the listen address used for P2P networking, if hybrid mode is set. + P2PNetAddress string `version[34]:""` + + // EnableDHT will turn on the hash table for use with capabilities advertisement + EnableDHTProviders bool `version[34]:"false"` + // P2PPersistPeerID will write the private key used for the node's PeerID to the P2PPrivateKeyLocation. // This is only used when P2PEnable is true. If P2PPrivateKey is not specified, it uses the default location. P2PPersistPeerID bool `version[29]:"false"` @@ -683,11 +694,16 @@ func (cfg Local) DNSSecurityRelayAddrEnforced() bool { return cfg.DNSSecurityFlags&dnssecRelayAddr != 0 } -// DNSSecurityTelemeryAddrEnforced returns true if relay name to ip addr resolution enforced -func (cfg Local) DNSSecurityTelemeryAddrEnforced() bool { +// DNSSecurityTelemetryAddrEnforced returns true if relay name to ip addr resolution enforced +func (cfg Local) DNSSecurityTelemetryAddrEnforced() bool { return cfg.DNSSecurityFlags&dnssecTelemetryAddr != 0 } +// DNSSecurityTXTEnforced returns true if TXT response verification enforced +func (cfg Local) DNSSecurityTXTEnforced() bool { + return cfg.DNSSecurityFlags&dnssecTXT != 0 +} + // CatchupVerifyCertificate returns true if certificate verification is needed func (cfg Local) CatchupVerifyCertificate() bool { return cfg.CatchupBlockValidateMode&catchupValidationModeCertificate == 0 diff --git a/config/local_defaults.go b/config/local_defaults.go index f5f02082aa..ae2ed22ebf 100644 --- a/config/local_defaults.go +++ b/config/local_defaults.go @@ -51,7 +51,7 @@ var defaultLocal = Local{ ConnectionsRateLimitingWindowSeconds: 1, CrashDBDir: "", DNSBootstrapID: ".algorand.network?backup=.algorand.net&dedup=.algorand-.(network|net)", - DNSSecurityFlags: 1, + DNSSecurityFlags: 9, DeadlockDetection: 0, DeadlockDetectionThreshold: 30, DisableAPIAuth: false, @@ -64,6 +64,7 @@ var defaultLocal = Local{ EnableAgreementTimeMetrics: false, EnableAssembleStats: false, EnableBlockService: false, + EnableDHTProviders: false, EnableDeveloperAPI: false, EnableExperimentalAPI: false, EnableFollowMode: false, @@ -74,6 +75,7 @@ var defaultLocal = Local{ EnableMetricReporting: false, EnableOutgoingNetworkMessageFiltering: true, EnableP2P: false, + EnableP2PHybridMode: false, EnablePingHandler: true, EnableProcessBlockStats: false, EnableProfiler: false, @@ -117,6 +119,7 @@ var defaultLocal = Local{ OptimizeAccountsDatabaseOnStartup: false, OutgoingMessageFilterBucketCount: 3, OutgoingMessageFilterBucketSize: 128, + P2PNetAddress: "", P2PPersistPeerID: false, P2PPrivateKeyLocation: "", ParticipationKeysRefreshInterval: 60000000000, diff --git a/daemon/algod/server.go b/daemon/algod/server.go index 13432957c3..c43b0b0693 100644 --- a/daemon/algod/server.go +++ b/daemon/algod/server.go @@ -59,7 +59,7 @@ const maxHeaderBytes = 4096 type ServerNode interface { apiServer.APINodeInterface ListeningAddress() (string, bool) - Start() + Start() error Stop() } @@ -298,7 +298,13 @@ func getPortFromAddress(addr string) (string, error) { func (s *Server) Start() { s.log.Info("Trying to start an Algorand node") fmt.Print("Initializing the Algorand node... ") - s.node.Start() + err := s.node.Start() + if err != nil { + msg := fmt.Sprintf("Failed to start an Algorand node: %v", err) + s.log.Error(msg) + fmt.Println(msg) + os.Exit(1) + } s.log.Info("Successfully started an Algorand node.") fmt.Println("Success!") @@ -317,7 +323,6 @@ func (s *Server) Start() { } var apiToken string - var err error fmt.Printf("API authentication disabled: %v\n", cfg.DisableAPIAuth) if !cfg.DisableAPIAuth { apiToken, err = tokens.GetAndValidateAPIToken(s.RootPath, tokens.AlgodTokenFilename) diff --git a/data/txHandler.go b/data/txHandler.go index 871e71cbc3..eae9586c47 100644 --- a/data/txHandler.go +++ b/data/txHandler.go @@ -240,9 +240,26 @@ func (handler *TxHandler) Start() { if handler.msgCache != nil { handler.msgCache.Start(handler.ctx, 60*time.Second) } + // wsNetwork handler handler.net.RegisterHandlers([]network.TaggedMessageHandler{ {Tag: protocol.TxnTag, MessageHandler: network.HandlerFunc(handler.processIncomingTxn)}, }) + + // libp2p pubsub validator and handler abstracted as TaggedMessageProcessor + handler.net.RegisterProcessors([]network.TaggedMessageProcessor{ + { + Tag: protocol.TxnTag, + // create anonymous struct to hold the two functions and satisfy the network.MessageProcessor interface + MessageHandler: struct { + network.ProcessorValidateFunc + network.ProcessorHandleFunc + }{ + network.ProcessorValidateFunc(handler.validateIncomingTxMessage), + network.ProcessorHandleFunc(handler.processIncomingTxMessage), + }, + }, + }) + handler.backlogWg.Add(2) go handler.backlogWorker() go handler.backlogGaugeThread() @@ -533,7 +550,7 @@ func (handler *TxHandler) deleteFromCaches(msgKey *crypto.Digest, canonicalKey * // dedupCanonical checks if the transaction group has been seen before after reencoding to canonical representation. // returns a key used for insertion if the group was not found. -func (handler *TxHandler) dedupCanonical(ntx int, unverifiedTxGroup []transactions.SignedTxn, consumed int) (key *crypto.Digest, isDup bool) { +func (handler *TxHandler) dedupCanonical(unverifiedTxGroup []transactions.SignedTxn, consumed int) (key *crypto.Digest, isDup bool) { // consider situations where someone want to censor transactions A // 1. Txn A is not part of a group => txn A with a valid signature is OK // Censorship attempts are: @@ -550,6 +567,7 @@ func (handler *TxHandler) dedupCanonical(ntx int, unverifiedTxGroup []transactio // - using individual txn from a group: {A, Z} could be poisoned by {A, B}, where B is invalid var d crypto.Digest + ntx := len(unverifiedTxGroup) if ntx == 1 { // a single transaction => cache/dedup canonical txn with its signature enc := unverifiedTxGroup[0].MarshalMsg(nil) @@ -577,61 +595,58 @@ func (handler *TxHandler) dedupCanonical(ntx int, unverifiedTxGroup []transactio return &d, false } -// processIncomingTxn decodes a transaction group from incoming message and enqueues into the back log for processing. -// The function also performs some input data pre-validation; -// - txn groups are cut to MaxTxGroupSize size -// - message are checked for duplicates -// - transactions are checked for duplicates - -func (handler *TxHandler) processIncomingTxn(rawmsg network.IncomingMessage) network.OutgoingMessage { +// incomingMsgDupCheck runs the duplicate check on a raw incoming message. +// Returns: +// - the key used for insertion if the message was not found in the cache +// - a boolean indicating if the message was a duplicate +func (handler *TxHandler) incomingMsgDupCheck(data []byte) (*crypto.Digest, bool) { var msgKey *crypto.Digest var isDup bool if handler.msgCache != nil { // check for duplicate messages // this helps against relaying duplicates - if msgKey, isDup = handler.msgCache.CheckAndPut(rawmsg.Data); isDup { + if msgKey, isDup = handler.msgCache.CheckAndPut(data); isDup { transactionMessagesDupRawMsg.Inc(nil) - return network.OutgoingMessage{Action: network.Ignore} + return msgKey, true } } + return msgKey, false +} - unverifiedTxGroup := make([]transactions.SignedTxn, 1) - dec := protocol.NewMsgpDecoderBytes(rawmsg.Data) - ntx := 0 - consumed := 0 - - var err error +// incomingMsgErlCheck runs the rate limiting check on a sender. +// Returns: +// - the capacity guard returned by the elastic rate limiter +// - a boolean indicating if the sender is rate limited +func (handler *TxHandler) incomingMsgErlCheck(sender network.DisconnectablePeer) (*util.ErlCapacityGuard, bool) { var capguard *util.ErlCapacityGuard - accepted := false - defer func() { - // if we failed to put the item onto the backlog, we should release the capacity if any - if !accepted { - if capguard != nil { - if capErr := capguard.Release(); capErr != nil { - logging.Base().Warnf("Failed to release capacity to ElasticRateLimiter: %v", capErr) - } - } - } - }() - + var err error if handler.erl != nil { congestedERL := float64(cap(handler.backlogQueue))*handler.backlogCongestionThreshold < float64(len(handler.backlogQueue)) // consume a capacity unit // if the elastic rate limiter cannot vend a capacity, the error it returns // is sufficient to indicate that we should enable Congestion Control, because // an issue in vending capacity indicates the underlying resource (TXBacklog) is full - capguard, err = handler.erl.ConsumeCapacity(rawmsg.Sender.(util.ErlClient)) + capguard, err = handler.erl.ConsumeCapacity(sender.(util.ErlClient)) if err != nil { handler.erl.EnableCongestionControl() // if there is no capacity, it is the same as if we failed to put the item onto the backlog, so report such transactionMessagesDroppedFromBacklog.Inc(nil) - return network.OutgoingMessage{Action: network.Ignore} + return capguard, true } // if the backlog Queue has 50% of its buffer back, turn congestion control off if !congestedERL { handler.erl.DisableCongestionControl() } } + return capguard, false +} + +// decodeMsg decodes TX message buffer into transactions.SignedTxn, +// and returns number of bytes consumed from the buffer and a boolean indicating if the message was invalid. +func decodeMsg(data []byte) (unverifiedTxGroup []transactions.SignedTxn, consumed int, invalid bool) { + unverifiedTxGroup = make([]transactions.SignedTxn, 1) + dec := protocol.NewMsgpDecoderBytes(data) + ntx := 0 for { if len(unverifiedTxGroup) == ntx { @@ -645,7 +660,7 @@ func (handler *TxHandler) processIncomingTxn(rawmsg network.IncomingMessage) net break } logging.Base().Warnf("Received a non-decodable txn: %v", err) - return network.OutgoingMessage{Action: network.Disconnect} + return nil, 0, true } consumed = dec.Consumed() ntx++ @@ -654,13 +669,13 @@ func (handler *TxHandler) processIncomingTxn(rawmsg network.IncomingMessage) net if dec.Remaining() > 0 { // if something else left in the buffer - this is an error, drop transactionMessageTxGroupExcessive.Inc(nil) - return network.OutgoingMessage{Action: network.Disconnect} + return nil, 0, true } } } if ntx == 0 { logging.Base().Warnf("Received empty tx group") - return network.OutgoingMessage{Action: network.Disconnect} + return nil, 0, true } unverifiedTxGroup = unverifiedTxGroup[:ntx] @@ -669,22 +684,72 @@ func (handler *TxHandler) processIncomingTxn(rawmsg network.IncomingMessage) net transactionMessageTxGroupFull.Inc(nil) } + return unverifiedTxGroup, consumed, false +} + +// incomingTxGroupDupRateLimit checks +// - if the incoming transaction group has been seen before after reencoding to canonical representation, and +// - if the sender is rate limited by the per-application rate limiter. +func (handler *TxHandler) incomingTxGroupDupRateLimit(unverifiedTxGroup []transactions.SignedTxn, encodedExpectedSize int, sender network.DisconnectablePeer) (*crypto.Digest, bool) { var canonicalKey *crypto.Digest if handler.txCanonicalCache != nil { - if canonicalKey, isDup = handler.dedupCanonical(ntx, unverifiedTxGroup, consumed); isDup { + var isDup bool + if canonicalKey, isDup = handler.dedupCanonical(unverifiedTxGroup, encodedExpectedSize); isDup { transactionMessagesDupCanonical.Inc(nil) - return network.OutgoingMessage{Action: network.Ignore} + return canonicalKey, true } } // rate limit per application in a group. Limiting any app in a group drops the entire message. if handler.appLimiter != nil { congestedARL := len(handler.backlogQueue) > handler.appLimiterBacklogThreshold - if congestedARL && handler.appLimiter.shouldDrop(unverifiedTxGroup, rawmsg.Sender.(network.IPAddressable).RoutingAddr()) { + if congestedARL && handler.appLimiter.shouldDrop(unverifiedTxGroup, sender.(network.IPAddressable).RoutingAddr()) { transactionMessagesAppLimiterDrop.Inc(nil) - return network.OutgoingMessage{Action: network.Ignore} + return canonicalKey, true } } + return canonicalKey, false +} + +// processIncomingTxn decodes a transaction group from incoming message and enqueues into the back log for processing. +// The function also performs some input data pre-validation; +// - txn groups are cut to MaxTxGroupSize size +// - message are checked for duplicates +// - transactions are checked for duplicates +func (handler *TxHandler) processIncomingTxn(rawmsg network.IncomingMessage) network.OutgoingMessage { + msgKey, shouldDrop := handler.incomingMsgDupCheck(rawmsg.Data) + if shouldDrop { + return network.OutgoingMessage{Action: network.Ignore} + } + + capguard, shouldDrop := handler.incomingMsgErlCheck(rawmsg.Sender) + accepted := false + defer func() { + // if we failed to put the item onto the backlog, we should release the capacity if any + if !accepted && capguard != nil { + if capErr := capguard.Release(); capErr != nil { + logging.Base().Warnf("processIncomingTxn: failed to release capacity to ElasticRateLimiter: %v", capErr) + } + } + }() + + if shouldDrop { + // this TX message was rate-limited by ERL + return network.OutgoingMessage{Action: network.Ignore} + } + + unverifiedTxGroup, consumed, invalid := decodeMsg(rawmsg.Data) + if invalid { + // invalid encoding or exceeding txgroup, disconnect from this peer + return network.OutgoingMessage{Action: network.Disconnect} + } + + canonicalKey, drop := handler.incomingTxGroupDupRateLimit(unverifiedTxGroup, consumed, rawmsg.Sender) + if drop { + // this re-serialized txgroup was detected as a duplicate by the canonical message cache, + // or it was rate-limited by the per-app rate limiter + return network.OutgoingMessage{Action: network.Ignore} + } select { case handler.backlogQueue <- &txBacklogMsg{ @@ -701,14 +766,70 @@ func (handler *TxHandler) processIncomingTxn(rawmsg network.IncomingMessage) net transactionMessagesDroppedFromBacklog.Inc(nil) // additionally, remove the txn from duplicate caches to ensure it can be re-submitted - if handler.txCanonicalCache != nil && canonicalKey != nil { - handler.txCanonicalCache.Delete(canonicalKey) - } - if handler.msgCache != nil && msgKey != nil { - handler.msgCache.DeleteByKey(msgKey) - } + handler.deleteFromCaches(msgKey, canonicalKey) + } + + return network.OutgoingMessage{Action: network.Ignore} +} + +type validatedIncomingTxMessage struct { + rawmsg network.IncomingMessage + unverifiedTxGroup []transactions.SignedTxn + msgKey *crypto.Digest + canonicalKey *crypto.Digest +} + +// validateIncomingTxMessage is the validator for the MessageProcessor implementation used by P2PNetwork. +func (handler *TxHandler) validateIncomingTxMessage(rawmsg network.IncomingMessage) network.ValidatedMessage { + msgKey, isDup := handler.incomingMsgDupCheck(rawmsg.Data) + if isDup { + return network.ValidatedMessage{Action: network.Ignore, ValidatedMessage: nil} } + unverifiedTxGroup, consumed, invalid := decodeMsg(rawmsg.Data) + if invalid { + // invalid encoding or exceeding txgroup, disconnect from this peer + return network.ValidatedMessage{Action: network.Disconnect, ValidatedMessage: nil} + } + + canonicalKey, drop := handler.incomingTxGroupDupRateLimit(unverifiedTxGroup, consumed, rawmsg.Sender) + if drop { + // this re-serialized txgroup was detected as a duplicate by the canonical message cache, + // or it was rate-limited by the per-app rate limiter + return network.ValidatedMessage{Action: network.Ignore, ValidatedMessage: nil} + } + + return network.ValidatedMessage{ + Action: network.Accept, + Tag: rawmsg.Tag, + ValidatedMessage: &validatedIncomingTxMessage{ + rawmsg: rawmsg, + unverifiedTxGroup: unverifiedTxGroup, + msgKey: msgKey, + canonicalKey: canonicalKey, + }, + } +} + +// processIncomingTxMessage is the handler for the MessageProcessor implementation used by P2PNetwork. +func (handler *TxHandler) processIncomingTxMessage(validatedMessage network.ValidatedMessage) network.OutgoingMessage { + msg := validatedMessage.ValidatedMessage.(*validatedIncomingTxMessage) + select { + case handler.backlogQueue <- &txBacklogMsg{ + rawmsg: &msg.rawmsg, + unverifiedTxGroup: msg.unverifiedTxGroup, + rawmsgDataHash: msg.msgKey, + unverifiedTxGroupHash: msg.canonicalKey, + capguard: nil, + }: + default: + // if we failed here we want to increase the corresponding metric. It might suggest that we + // want to increase the queue size. + transactionMessagesDroppedFromBacklog.Inc(nil) + + // additionally, remove the txn from duplicate caches to ensure it can be re-submitted + handler.deleteFromCaches(msg.msgKey, msg.canonicalKey) + } return network.OutgoingMessage{Action: network.Ignore} } diff --git a/data/txHandler_test.go b/data/txHandler_test.go index d395779f33..9237865037 100644 --- a/data/txHandler_test.go +++ b/data/txHandler_test.go @@ -61,7 +61,8 @@ var txBacklogSize = config.GetDefaultLocal().TxBacklogSize // mock sender is used to implement OnClose, since TXHandlers expect to use Senders and ERL Clients type mockSender struct{} -func (m mockSender) OnClose(func()) {} +func (m mockSender) OnClose(func()) {} +func (m mockSender) GetNetwork() network.GossipNode { panic("not implemented") } func (m mockSender) IPAddr() []byte { return nil } func (m mockSender) RoutingAddr() []byte { return nil } diff --git a/go.mod b/go.mod index a32da9da8c..e4d8c5b8d5 100644 --- a/go.mod +++ b/go.mod @@ -28,10 +28,13 @@ require ( github.com/google/go-cmp v0.6.0 github.com/google/go-querystring v1.0.0 github.com/gorilla/mux v1.8.0 + github.com/ipfs/go-log v1.0.5 + github.com/ipfs/go-log/v2 v2.5.1 github.com/jmoiron/sqlx v1.2.0 github.com/karalabe/usb v0.0.2 github.com/labstack/echo/v4 v4.9.1 github.com/libp2p/go-libp2p v0.33.2 + github.com/libp2p/go-libp2p-kad-dht v0.24.3 github.com/libp2p/go-libp2p-pubsub v0.10.0 github.com/libp2p/go-yamux/v4 v4.0.1 github.com/mattn/go-sqlite3 v1.14.16 @@ -39,9 +42,13 @@ require ( github.com/multiformats/go-multiaddr v0.12.3 github.com/multiformats/go-multiaddr-dns v0.3.1 github.com/olivere/elastic v6.2.14+incompatible + github.com/prometheus/client_golang v1.18.0 + github.com/prometheus/client_model v0.6.0 github.com/sirupsen/logrus v1.8.1 github.com/spf13/cobra v1.5.0 github.com/stretchr/testify v1.8.4 + go.opencensus.io v0.24.0 + go.uber.org/zap v1.27.0 golang.org/x/crypto v0.21.0 golang.org/x/exp v0.0.0-20240213143201-ec583247a57a golang.org/x/sync v0.6.0 @@ -73,24 +80,33 @@ require ( github.com/flynn/noise v1.1.0 // indirect github.com/fortytw2/leaktest v1.3.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect + github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/swag v0.19.5 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect github.com/google/uuid v1.4.0 // indirect github.com/gorilla/websocket v1.5.1 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/invopop/yaml v0.1.0 // indirect + github.com/ipfs/boxo v0.10.0 // indirect github.com/ipfs/go-cid v0.4.1 // indirect - github.com/ipfs/go-log/v2 v2.5.1 // indirect + github.com/ipfs/go-datastore v0.6.0 // indirect + github.com/ipld/go-ipld-prime v0.20.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect + github.com/jbenet/goprocess v0.1.4 // indirect github.com/jmespath/go-jmespath v0.3.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/klauspost/compress v1.17.6 // indirect @@ -100,8 +116,11 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/labstack/gommon v0.4.0 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect + github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect + github.com/libp2p/go-libp2p-record v0.2.0 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect github.com/libp2p/go-nat v0.2.0 // indirect github.com/libp2p/go-netroute v0.2.1 // indirect @@ -126,12 +145,12 @@ require ( github.com/multiformats/go-varint v0.0.7 // indirect github.com/onsi/ginkgo/v2 v2.15.0 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.18.0 // indirect - github.com/prometheus/client_model v0.6.0 // indirect + github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/common v0.47.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/quic-go/qpack v0.4.0 // indirect @@ -145,16 +164,20 @@ require ( github.com/stretchr/objx v0.5.0 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.1 // indirect + github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect + go.opentelemetry.io/otel v1.16.0 // indirect + go.opentelemetry.io/otel/metric v1.16.0 // indirect + go.opentelemetry.io/otel/trace v1.16.0 // indirect go.uber.org/dig v1.17.1 // indirect go.uber.org/fx v1.20.1 // indirect go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect golang.org/x/mod v0.15.0 // indirect golang.org/x/net v0.23.0 // indirect golang.org/x/term v0.18.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.18.0 // indirect + gonum.org/v1/gonum v0.13.0 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index cdae303d5b..1a52e04711 100644 --- a/go.sum +++ b/go.sum @@ -125,6 +125,7 @@ github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0 github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= @@ -140,6 +141,8 @@ github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8 github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= @@ -152,8 +155,11 @@ github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aev github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -164,6 +170,7 @@ github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gG github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= @@ -183,6 +190,9 @@ github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -195,7 +205,9 @@ github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:x github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= @@ -206,7 +218,9 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= @@ -218,12 +232,16 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo= github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -231,7 +249,14 @@ github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/ github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= @@ -244,18 +269,34 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/invopop/yaml v0.1.0 h1:YW3WGUoJEXYfzWBjn00zIlrw7brGVD0fUKRYDPAPhrc= github.com/invopop/yaml v0.1.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= +github.com/ipfs/boxo v0.10.0 h1:tdDAxq8jrsbRkYoF+5Rcqyeb91hgWe2hp7iLu7ORZLY= +github.com/ipfs/boxo v0.10.0/go.mod h1:Fg+BnfxZ0RPzR0nOodzdIq3A7KgoWAOWsEIImrIQdBM= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= +github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= +github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= +github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= +github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= +github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= +github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= +github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= +github.com/ipld/go-ipld-prime v0.20.0 h1:Ud3VwE9ClxpO2LkCYP7vWPc0Fo+dYdYzgxUJZ3uRG4g= +github.com/ipld/go-ipld-prime v0.20.0/go.mod h1:PzqZ/ZR981eKbgdr3y2DJYeD/8bgMawdGVlJDE8kK+M= github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI= github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= +github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= +github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= @@ -265,6 +306,7 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= @@ -310,14 +352,22 @@ github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= +github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= github.com/libp2p/go-libp2p v0.33.2 h1:vCdwnFxoGOXMKmaGHlDSnL4bM3fQeW8pgIa9DECnb40= github.com/libp2p/go-libp2p v0.33.2/go.mod h1:zTeppLuCvUIkT118pFVzA8xzP/p2dJYOMApCkFh0Yww= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= +github.com/libp2p/go-libp2p-kad-dht v0.24.3 h1:VjxtDVWaaf4UFjGBf+yl2JCiGaHx7+ctAUa9oJCR3QE= +github.com/libp2p/go-libp2p-kad-dht v0.24.3/go.mod h1:BShPzRbK6+fN3hk8a0WGAYKpb8m4k+DtchkqouGTrSg= +github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= +github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0= github.com/libp2p/go-libp2p-pubsub v0.10.0 h1:wS0S5FlISavMaAbxyQn3dxMOe2eegMfswM471RuHJwA= github.com/libp2p/go-libp2p-pubsub v0.10.0/go.mod h1:1OxbaT/pFRO5h+Dpze8hdHQ63R0ke55XTs6b6NwLLkw= +github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= +github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= @@ -430,6 +480,8 @@ github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8P github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= @@ -444,6 +496,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= +github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= @@ -465,6 +519,7 @@ github.com/quic-go/webtransport-go v0.6.0 h1:CvNsKqc4W2HljHJnoT+rMmbRJybShZ0YPFD github.com/quic-go/webtransport-go v0.6.0/go.mod h1:9KjU4AEBqEQidGHNDkZrb8CAa1abRaosM2yGOyiikEc= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= @@ -503,7 +558,11 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= +github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= @@ -531,12 +590,14 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= @@ -547,6 +608,10 @@ github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+ github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= @@ -560,6 +625,15 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= +go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= +go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= +go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= +go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= +go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -572,9 +646,12 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= @@ -584,6 +661,7 @@ golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -602,6 +680,7 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -628,6 +707,7 @@ golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= @@ -711,6 +791,9 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -723,6 +806,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM= +gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= @@ -737,6 +822,7 @@ google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= @@ -744,13 +830,18 @@ google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -758,6 +849,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= @@ -783,6 +875,7 @@ honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= pgregory.net/rapid v0.6.2 h1:ErW5sL+UKtfBfUTsWHDCoeB+eZKLKMxrSd1VJY6W4bw= diff --git a/installer/config.json.example b/installer/config.json.example index 4a9714115f..7f16155303 100644 --- a/installer/config.json.example +++ b/installer/config.json.example @@ -30,7 +30,7 @@ "ConnectionsRateLimitingWindowSeconds": 1, "CrashDBDir": "", "DNSBootstrapID": ".algorand.network?backup=.algorand.net&dedup=.algorand-.(network|net)", - "DNSSecurityFlags": 1, + "DNSSecurityFlags": 9, "DeadlockDetection": 0, "DeadlockDetectionThreshold": 30, "DisableAPIAuth": false, @@ -43,6 +43,7 @@ "EnableAgreementTimeMetrics": false, "EnableAssembleStats": false, "EnableBlockService": false, + "EnableDHTProviders": false, "EnableDeveloperAPI": false, "EnableExperimentalAPI": false, "EnableFollowMode": false, @@ -53,6 +54,7 @@ "EnableMetricReporting": false, "EnableOutgoingNetworkMessageFiltering": true, "EnableP2P": false, + "EnableP2PHybridMode": false, "EnablePingHandler": true, "EnableProcessBlockStats": false, "EnableProfiler": false, @@ -96,6 +98,7 @@ "OptimizeAccountsDatabaseOnStartup": false, "OutgoingMessageFilterBucketCount": 3, "OutgoingMessageFilterBucketSize": 128, + "P2PNetAddress": "", "P2PPersistPeerID": false, "P2PPrivateKeyLocation": "", "ParticipationKeysRefreshInterval": 60000000000, diff --git a/logging/log.go b/logging/log.go index 48f83c1b9e..770bf08bb9 100644 --- a/logging/log.go +++ b/logging/log.go @@ -148,6 +148,9 @@ type Logger interface { // source adds file, line and function fields to the event source() *logrus.Entry + // Entry returns the logrus raw entry + Entry() *logrus.Entry + // Adds a hook to the logger AddHook(hook logrus.Hook) @@ -316,7 +319,11 @@ func (l logger) getOutput() io.Writer { } func (l logger) SetJSONFormatter() { - l.entry.Logger.Formatter = &logrus.JSONFormatter{TimestampFormat: "2006-01-02T15:04:05.000000Z07:00"} + l.entry.Logger.SetFormatter(&logrus.JSONFormatter{TimestampFormat: "2006-01-02T15:04:05.000000Z07:00"}) +} + +func (l logger) Entry() *logrus.Entry { + return l.entry } func (l logger) source() *logrus.Entry { diff --git a/netdeploy/remote/nodeConfig.go b/netdeploy/remote/nodeConfig.go index 2c6e0e423f..4880d76eb9 100644 --- a/netdeploy/remote/nodeConfig.go +++ b/netdeploy/remote/nodeConfig.go @@ -34,6 +34,7 @@ type NodeConfig struct { DashboardEndpoint string `json:",omitempty"` DeadlockOverride int `json:",omitempty"` // -1 = Disable deadlock detection, 0 = Use Default for build, 1 = Enable ConfigJSONOverride string `json:",omitempty"` // Raw json to merge into config.json after other modifications are complete + P2PBootstrap bool // True if this node should be a p2p bootstrap node and registered in DNS // NodeNameMatchRegex is tested against Name in generated configs and if matched the rest of the configs in this record are applied as a template NodeNameMatchRegex string `json:",omitempty"` diff --git a/netdeploy/remote/nodecfg/nodeConfigurator.go b/netdeploy/remote/nodecfg/nodeConfigurator.go index 5ab43d5ff7..842570bfc8 100644 --- a/netdeploy/remote/nodecfg/nodeConfigurator.go +++ b/netdeploy/remote/nodecfg/nodeConfigurator.go @@ -42,6 +42,7 @@ type nodeConfigurator struct { bootstrappedTrackerDir string relayEndpoints []srvEntry metricsEndpoints []srvEntry + p2pBootstrapEndpoints []txtEntry } type srvEntry struct { @@ -49,6 +50,11 @@ type srvEntry struct { port string } +type txtEntry struct { + netAddress string + peerID string +} + // ApplyConfigurationToHost attempts to apply the provided configuration to the local host, // based on the configuration specified for the provided hostName, with node // directories being created / updated under the specified rootNodeDir @@ -248,6 +254,31 @@ func (nc *nodeConfigurator) registerDNSRecords() (err error) { return } } + + dnsaddrsFrom := fmt.Sprintf("_dnsaddr.%s.algodev.network", nc.genesisData.Network) + for _, entry := range nc.p2pBootstrapEndpoints { + port, parseErr := strconv.ParseInt(strings.Split(entry.netAddress, ":")[1], 10, 64) + if parseErr != nil { + return parseErr + } + var addrType string + if isIP { + addrType = "ip4" + } else { + addrType = "dnsaddr" + } + addrInfoString := fmt.Sprintf("/%s/%s/tcp/%d/p2p/%s", addrType, nc.dnsName, port, entry.peerID) + to := fmt.Sprintf("dnsaddr=%s", addrInfoString) + + fmt.Fprintf(os.Stdout, "...... Adding P2P TXT Record '%s' -> '%s' .\n", dnsaddrsFrom, to) + const priority = 1 + const proxied = false + dnsErr := cloudflareDNS.CreateDNSRecord(context.Background(), "TXT", dnsaddrsFrom, to, cloudflare.AutomaticTTL, priority, proxied) + if dnsErr != nil { + return dnsErr + } + } + return } @@ -281,3 +312,7 @@ func (nc *nodeConfigurator) addRelaySrv(srvRecord string, port string) { func (nc *nodeConfigurator) registerMetricsSrv(srvRecord string, port string) { nc.metricsEndpoints = append(nc.metricsEndpoints, srvEntry{srvRecord, port}) } + +func (nc *nodeConfigurator) addP2PBootstrap(netAddress string, peerID string) { + nc.p2pBootstrapEndpoints = append(nc.p2pBootstrapEndpoints, txtEntry{netAddress, peerID}) +} diff --git a/netdeploy/remote/nodecfg/nodeDir.go b/netdeploy/remote/nodecfg/nodeDir.go index 9bd13343c1..bdfc037438 100644 --- a/netdeploy/remote/nodecfg/nodeDir.go +++ b/netdeploy/remote/nodecfg/nodeDir.go @@ -18,6 +18,7 @@ package nodecfg import ( "encoding/json" + "errors" "fmt" "net/url" "os" @@ -27,6 +28,7 @@ import ( "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/netdeploy/remote" + "github.com/algorand/go-algorand/network/p2p" "github.com/algorand/go-algorand/shared/algoh" "github.com/algorand/go-algorand/util/tokens" ) @@ -101,6 +103,12 @@ func (nd *nodeDir) configure() (err error) { fmt.Fprintf(os.Stdout, "Error during configureNetAddress: %s\n", err) return } + + if err = nd.configureP2PDNSBootstrap(nd.P2PBootstrap); err != nil { + fmt.Fprintf(os.Stdout, "Error during configureP2PDNSBootstrap: %s\n", err) + return + } + fmt.Println("Done configuring node directory.") return } @@ -156,6 +164,45 @@ func (nd *nodeDir) configureNetAddress() (err error) { return } +func (nd *nodeDir) configureP2PDNSBootstrap(p2pBootstrap bool) error { + if !p2pBootstrap { + return nil + } + fmt.Fprintf(os.Stdout, " - Configuring P2P DNS Bootstrap: %s\n", nd.Name) + if err := nd.ensureConfig(); err != nil { + return err + } + // ensure p2p config params set are what is expected: + // - EnableP2P or EnableP2PHybridMode + // - NetAddress or P2PNetAddress is set + // - EnableGossipService + if !nd.config.EnableP2P && !nd.config.EnableP2PHybridMode { + return errors.New("p2p bootstrap requires EnableP2P or EnableP2PHybridMode to be set") + } + if nd.NetAddress == "" && nd.config.P2PNetAddress == "" { + return errors.New("p2p bootstrap requires NetAddress or P2PNetAddress to be set") + } + if !nd.config.EnableGossipService { + return errors.New("p2p bootstrap requires EnableGossipService to be set") + } + + netAddress := nd.NetAddress + if nd.config.P2PNetAddress != "" { + netAddress = nd.config.P2PNetAddress + } + + key, err := p2p.GetPrivKey(config.Local{P2PPersistPeerID: true}, nd.dataDir) + if err != nil { + return err + } + peerID, err := p2p.PeerIDFromPublicKey(key.GetPublic()) + if err != nil { + return err + } + nd.configurator.addP2PBootstrap(netAddress, peerID.String()) + return nil +} + func (nd *nodeDir) configureAPIEndpoint(address string) (err error) { if err = nd.ensureConfig(); err != nil { return diff --git a/network/addr.go b/network/addr.go index 1e2b04a447..00eb368881 100644 --- a/network/addr.go +++ b/network/addr.go @@ -17,73 +17,17 @@ package network import ( - "errors" - "net/url" "path" - "regexp" "strings" - "github.com/multiformats/go-multiaddr" + "github.com/algorand/go-algorand/network/addr" ) -var errURLNoHost = errors.New("could not parse a host from url") - -var errURLColonHost = errors.New("host name starts with a colon") - -// HostColonPortPattern matches "^[-a-zA-Z0-9.]+:\\d+$" e.g. "foo.com.:1234" -var HostColonPortPattern = regexp.MustCompile(`^[-a-zA-Z0-9.]+:\d+$`) - -// ParseHostOrURL handles "host:port" or a full URL. -// Standard library net/url.Parse chokes on "host:port". -func ParseHostOrURL(addr string) (*url.URL, error) { - // If the entire addr is "host:port" grab that right away. - // Don't try url.Parse() because that will grab "host:" as if it were "scheme:" - if HostColonPortPattern.MatchString(addr) { - return &url.URL{Scheme: "http", Host: addr}, nil - } - parsed, err := url.Parse(addr) - if err == nil { - if parsed.Host == "" { - return nil, errURLNoHost - } - return parsed, nil - } - if strings.HasPrefix(addr, "http:") || strings.HasPrefix(addr, "https:") || strings.HasPrefix(addr, "ws:") || strings.HasPrefix(addr, "wss:") || strings.HasPrefix(addr, "://") || strings.HasPrefix(addr, "//") { - return parsed, err - } - // This turns "[::]:4601" into "http://[::]:4601" which url.Parse can do - parsed, e2 := url.Parse("http://" + addr) - if e2 == nil { - // https://datatracker.ietf.org/doc/html/rfc1123#section-2 - // first character is relaxed to allow either a letter or a digit - if parsed.Host[0] == ':' && (len(parsed.Host) < 2 || parsed.Host[1] != ':') { - return nil, errURLColonHost - } - return parsed, nil - } - return parsed, err /* return original err, not our prefix altered try */ -} - -// ParseHostOrURLOrMultiaddr returns an error if it could not parse the provided -// string as a valid "host:port", full URL, or multiaddr. If no error, it returns -// a host:port address, or a multiaddr. -func ParseHostOrURLOrMultiaddr(addr string) (string, error) { - if strings.HasPrefix(addr, "/") && !strings.HasPrefix(addr, "//") { // multiaddr starts with '/' but not '//' which is possible for scheme relative URLS - _, err := multiaddr.NewMultiaddr(addr) - return addr, err - } - url, err := ParseHostOrURL(addr) - if err != nil { - return "", err - } - return url.Host, nil -} - // addrToGossipAddr parses host:port or a URL and returns the URL to the websocket interface at that address. -func (wn *WebsocketNetwork) addrToGossipAddr(addr string) (string, error) { - parsedURL, err := ParseHostOrURL(addr) +func (wn *WebsocketNetwork) addrToGossipAddr(a string) (string, error) { + parsedURL, err := addr.ParseHostOrURL(a) if err != nil { - wn.log.Warnf("could not parse addr %#v: %s", addr, err) + wn.log.Warnf("could not parse addr %#v: %s", a, err) return "", errBadAddr } parsedURL.Scheme = websocketsScheme[parsedURL.Scheme] diff --git a/network/addr/addr.go b/network/addr/addr.go new file mode 100644 index 0000000000..c8c0c0b6ab --- /dev/null +++ b/network/addr/addr.go @@ -0,0 +1,88 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package addr + +import ( + "errors" + "net/url" + "regexp" + "strings" + + "github.com/multiformats/go-multiaddr" +) + +var errURLNoHost = errors.New("could not parse a host from url") + +var errURLColonHost = errors.New("host name starts with a colon") + +// HostColonPortPattern matches "^[-a-zA-Z0-9.]+:\\d+$" e.g. "foo.com.:1234" +var HostColonPortPattern = regexp.MustCompile(`^[-a-zA-Z0-9.]+:\d+$`) + +// ParseHostOrURL handles "host:port" or a full URL. +// Standard library net/url.Parse chokes on "host:port". +func ParseHostOrURL(addr string) (*url.URL, error) { + // If the entire addr is "host:port" grab that right away. + // Don't try url.Parse() because that will grab "host:" as if it were "scheme:" + if HostColonPortPattern.MatchString(addr) { + return &url.URL{Scheme: "http", Host: addr}, nil + } + parsed, err := url.Parse(addr) + if err == nil { + if parsed.Host == "" { + return nil, errURLNoHost + } + return parsed, nil + } + if strings.HasPrefix(addr, "http:") || strings.HasPrefix(addr, "https:") || strings.HasPrefix(addr, "ws:") || strings.HasPrefix(addr, "wss:") || strings.HasPrefix(addr, "://") || strings.HasPrefix(addr, "//") { + return parsed, err + } + // This turns "[::]:4601" into "http://[::]:4601" which url.Parse can do + parsed, e2 := url.Parse("http://" + addr) + if e2 == nil { + // https://datatracker.ietf.org/doc/html/rfc1123#section-2 + // first character is relaxed to allow either a letter or a digit + if parsed.Host[0] == ':' && (len(parsed.Host) < 2 || parsed.Host[1] != ':') { + return nil, errURLColonHost + } + return parsed, nil + } + return parsed, err /* return original err, not our prefix altered try */ +} + +// IsMultiaddr returns true if the provided string is a valid multiaddr. +func IsMultiaddr(addr string) bool { + if strings.HasPrefix(addr, "/") && !strings.HasPrefix(addr, "//") { // multiaddr starts with '/' but not '//' which is possible for scheme relative URLS + _, err := multiaddr.NewMultiaddr(addr) + return err == nil + } + return false +} + +// ParseHostOrURLOrMultiaddr returns an error if it could not parse the provided +// string as a valid "host:port", full URL, or multiaddr. If no error, it returns +// a host:port address, or a multiaddr. +func ParseHostOrURLOrMultiaddr(addr string) (string, error) { + if strings.HasPrefix(addr, "/") && !strings.HasPrefix(addr, "//") { // multiaddr starts with '/' but not '//' which is possible for scheme relative URLS + _, err := multiaddr.NewMultiaddr(addr) + return addr, err + } + url, err := ParseHostOrURL(addr) + if err != nil { + return "", err + } + return url.Host, nil +} diff --git a/network/addr_test.go b/network/addr/addr_test.go similarity index 95% rename from network/addr_test.go rename to network/addr/addr_test.go index 377fe72a91..bceeb079f2 100644 --- a/network/addr_test.go +++ b/network/addr/addr_test.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package network +package addr import ( "net/url" @@ -31,6 +31,8 @@ type urlCase struct { func TestParseHostOrURL(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() + urlTestCases := []urlCase{ {"localhost:123", url.URL{Scheme: "http", Host: "localhost:123"}}, {"http://localhost:123", url.URL{Scheme: "http", Host: "localhost:123"}}, @@ -89,10 +91,12 @@ func TestParseHostOrURL(t *testing.T) { t.Run(addr, func(t *testing.T) { _, err := ParseHostOrURL(addr) require.Error(t, err, "url should fail", addr) + require.False(t, IsMultiaddr(addr)) }) t.Run(addr+"-multiaddr", func(t *testing.T) { _, err := ParseHostOrURLOrMultiaddr(addr) require.Error(t, err, "url should fail", addr) + require.False(t, IsMultiaddr(addr)) }) } @@ -100,6 +104,7 @@ func TestParseHostOrURL(t *testing.T) { func TestParseHostURLOrMultiaddr(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() validMultiAddrs := []string{ "/ip4/127.0.0.1/tcp/8080", @@ -124,6 +129,7 @@ func TestParseHostURLOrMultiaddr(t *testing.T) { v, err := ParseHostOrURLOrMultiaddr(addr) require.NoError(t, err) require.Equal(t, addr, v) + require.True(t, IsMultiaddr(addr)) }) } @@ -131,6 +137,7 @@ func TestParseHostURLOrMultiaddr(t *testing.T) { t.Run(addr, func(t *testing.T) { _, err := ParseHostOrURLOrMultiaddr(addr) require.Error(t, err) + require.False(t, IsMultiaddr(addr)) }) } diff --git a/network/connPerfMon_test.go b/network/connPerfMon_test.go index b2f24f1214..560be72a96 100644 --- a/network/connPerfMon_test.go +++ b/network/connPerfMon_test.go @@ -48,7 +48,7 @@ func makeMsgPool(N int, peers []Peer) (out []IncomingMessage) { addMsg := func(msgCount int) { for i := 0; i < msgCount; i++ { - msg.Sender = peers[(int(msgIndex)+i)%len(peers)] + msg.Sender = peers[(int(msgIndex)+i)%len(peers)].(DisconnectablePeer) timer += int64(7 * time.Nanosecond) msg.Received = timer out = append(out, msg) diff --git a/network/gossipNode.go b/network/gossipNode.go index 3ac5cc7df0..6a028ff193 100644 --- a/network/gossipNode.go +++ b/network/gossipNode.go @@ -18,8 +18,9 @@ package network import ( "context" - "net" "net/http" + "strings" + "time" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/protocol" @@ -28,6 +29,11 @@ import ( // Peer opaque interface for referring to a neighbor in the network type Peer interface{} +// DisconnectablePeer is a Peer with a long-living connection to a network that can be disconnected +type DisconnectablePeer interface { + GetNetwork() GossipNode +} + // PeerOption allows users to specify a subset of peers to query // //msgp:ignore PeerOption @@ -44,12 +50,19 @@ const ( PeersPhonebookArchivalNodes PeerOption = iota ) +// DeadlineSettableConn abstracts net.Conn and related types as deadline-settable +type DeadlineSettableConn interface { + SetDeadline(time.Time) error + SetReadDeadline(time.Time) error + SetWriteDeadline(time.Time) error +} + // GossipNode represents a node in the gossip network type GossipNode interface { Address() (string, bool) Broadcast(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except Peer) error Relay(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except Peer) error - Disconnect(badnode Peer) + Disconnect(badnode DisconnectablePeer) DisconnectPeers() // only used by testing // RegisterHTTPHandler path accepts gorilla/mux path annotations @@ -64,7 +77,7 @@ type GossipNode interface { GetPeers(options ...PeerOption) []Peer // Start threads, listen on sockets. - Start() + Start() error // Close sockets. Stop threads. Stop() @@ -75,8 +88,15 @@ type GossipNode interface { // ClearHandlers deregisters all the existing message handlers. ClearHandlers() - // GetRoundTripper returns a Transport that would limit the number of outgoing connections. - GetRoundTripper() http.RoundTripper + // RegisterProcessors adds to the set of given message processors. + RegisterProcessors(dispatch []TaggedMessageProcessor) + + // ClearProcessors deregisters all the existing message processors. + ClearProcessors() + + // GetHTTPClient returns a http.Client with a suitable for the network Transport + // that would also limit the number of outgoing connections. + GetHTTPClient(address string) (*http.Client, error) // OnNetworkAdvance notifies the network library that the agreement protocol was able to make a notable progress. // this is the only indication that we have that we haven't formed a clique, where all incoming messages @@ -86,10 +106,10 @@ type GossipNode interface { // GetHTTPRequestConnection returns the underlying connection for the given request. Note that the request must be the same // request that was provided to the http handler ( or provide a fallback Context() to that ) - GetHTTPRequestConnection(request *http.Request) (conn net.Conn) + GetHTTPRequestConnection(request *http.Request) (conn DeadlineSettableConn) - // SubstituteGenesisID substitutes the "{genesisID}" with their network-specific genesisID. - SubstituteGenesisID(rawURL string) string + // GetGenesisID returns the network-specific genesisID. + GetGenesisID() string // called from wsPeer to report that it has closed peerRemoteClose(peer *wsPeer, reason disconnectReason) @@ -107,7 +127,7 @@ var outgoingMessagesBufferSize = int( // IncomingMessage represents a message arriving from some peer in our p2p network type IncomingMessage struct { - Sender Peer + Sender DisconnectablePeer Tag Tag Data []byte Err error @@ -148,6 +168,14 @@ type OutgoingMessage struct { OnRelease func() } +// ValidatedMessage is a message that has been validated and is ready to be processed. +// Think as an intermediate one between IncomingMessage and OutgoingMessage +type ValidatedMessage struct { + Action ForwardingPolicy + Tag Tag + ValidatedMessage interface{} +} + // ForwardingPolicy is an enum indicating to whom we should send a message // //msgp:ignore ForwardingPolicy @@ -165,6 +193,9 @@ const ( // Respond - reply to the sender Respond + + // Accept - accept for further processing after successful validation + Accept ) // MessageHandler takes a IncomingMessage (e.g., vote, transaction), processes it, and returns what (if anything) @@ -175,20 +206,51 @@ type MessageHandler interface { Handle(message IncomingMessage) OutgoingMessage } -// HandlerFunc represents an implemenation of the MessageHandler interface +// HandlerFunc represents an implementation of the MessageHandler interface type HandlerFunc func(message IncomingMessage) OutgoingMessage -// Handle implements MessageHandler.Handle, calling the handler with the IncomingKessage and returning the OutgoingMessage +// Handle implements MessageHandler.Handle, calling the handler with the IncomingMessage and returning the OutgoingMessage func (f HandlerFunc) Handle(message IncomingMessage) OutgoingMessage { return f(message) } -// TaggedMessageHandler receives one type of broadcast messages -type TaggedMessageHandler struct { +// MessageProcessor takes a IncomingMessage (e.g., vote, transaction), processes it, and returns what (if anything) +// to send to the network in response. +// This is an extension of the MessageHandler that works in two stages: validate ->[result]-> handle. +type MessageProcessor interface { + Validate(message IncomingMessage) ValidatedMessage + Handle(message ValidatedMessage) OutgoingMessage +} + +// ProcessorValidateFunc represents an implementation of the MessageProcessor interface +type ProcessorValidateFunc func(message IncomingMessage) ValidatedMessage + +// ProcessorHandleFunc represents an implementation of the MessageProcessor interface +type ProcessorHandleFunc func(message ValidatedMessage) OutgoingMessage + +// Validate implements MessageProcessor.Validate, calling the validator with the IncomingMessage and returning the action +// and validation extra data that can be use as the handler input. +func (f ProcessorValidateFunc) Validate(message IncomingMessage) ValidatedMessage { + return f(message) +} + +// Handle implements MessageProcessor.Handle calling the handler with the ValidatedMessage and returning the OutgoingMessage +func (f ProcessorHandleFunc) Handle(message ValidatedMessage) OutgoingMessage { + return f(message) +} + +type taggedMessageDispatcher[T any] struct { Tag - MessageHandler + MessageHandler T } +// TaggedMessageHandler receives one type of broadcast messages +type TaggedMessageHandler = taggedMessageDispatcher[MessageHandler] + +// TaggedMessageProcessor receives one type of broadcast messages +// and performs two stage processing: validating and handling +type TaggedMessageProcessor = taggedMessageDispatcher[MessageProcessor] + // Propagate is a convenience function to save typing in the common case of a message handler telling us to propagate an incoming message // "return network.Propagate(msg)" instead of "return network.OutgoingMsg{network.Broadcast, msg.Tag, msg.Data}" func Propagate(msg IncomingMessage) OutgoingMessage { @@ -205,3 +267,8 @@ func max(numbers ...uint64) (maxNum uint64) { } return } + +// SubstituteGenesisID substitutes the "{genesisID}" with their network-specific genesisID. +func SubstituteGenesisID(net GossipNode, rawURL string) string { + return strings.Replace(rawURL, "{genesisID}", net.GetGenesisID(), -1) +} diff --git a/network/hybridNetwork.go b/network/hybridNetwork.go new file mode 100644 index 0000000000..f324deb73f --- /dev/null +++ b/network/hybridNetwork.go @@ -0,0 +1,233 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package network + +import ( + "context" + "fmt" + "net/http" + "sync" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/network/addr" + "github.com/algorand/go-algorand/protocol" +) + +// HybridP2PNetwork runs both P2PNetwork and WebsocketNetwork to implement the GossipNode interface +type HybridP2PNetwork struct { + p2pNetwork *P2PNetwork + wsNetwork *WebsocketNetwork + genesisID string + + useP2PAddress bool +} + +// NewHybridP2PNetwork constructs a GossipNode that combines P2PNetwork and WebsocketNetwork +func NewHybridP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID, nodeInfo NodeInfo) (*HybridP2PNetwork, error) { + // supply alternate NetAddress for P2P network + p2pcfg := cfg + p2pcfg.NetAddress = cfg.P2PNetAddress + p2pnet, err := NewP2PNetwork(log, p2pcfg, datadir, phonebookAddresses, genesisID, networkID, nodeInfo) + if err != nil { + return nil, err + } + wsnet, err := NewWebsocketNetwork(log, cfg, phonebookAddresses, genesisID, networkID, nodeInfo, p2pnet.PeerID(), p2pnet.PeerIDSigner()) + if err != nil { + return nil, err + } + return &HybridP2PNetwork{ + p2pNetwork: p2pnet, + wsNetwork: wsnet, + genesisID: genesisID, + }, nil +} + +// Address implements GossipNode +func (n *HybridP2PNetwork) Address() (string, bool) { + // TODO map from configuration? used for REST API, goal status, algod.net, etc + if n.useP2PAddress { + return n.p2pNetwork.Address() + } + return n.wsNetwork.Address() +} + +type hybridNetworkError struct{ p2pErr, wsErr error } + +func (e *hybridNetworkError) Error() string { + return fmt.Sprintf("p2pErr: %s, wsErr: %s", e.p2pErr, e.wsErr) +} +func (e *hybridNetworkError) Unwrap() []error { return []error{e.p2pErr, e.wsErr} } + +func (n *HybridP2PNetwork) runParallel(fn func(net GossipNode) error) error { + var wg sync.WaitGroup + var p2pErr, wsErr error + + wg.Add(2) + go func() { + defer wg.Done() + p2pErr = fn(n.p2pNetwork) + }() + go func() { + defer wg.Done() + wsErr = fn(n.wsNetwork) + }() + wg.Wait() + + if p2pErr != nil && wsErr != nil { + return &hybridNetworkError{p2pErr, wsErr} + } + if p2pErr != nil { + return p2pErr + } + if wsErr != nil { + return wsErr + } + return nil +} + +// Broadcast implements GossipNode +func (n *HybridP2PNetwork) Broadcast(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except Peer) error { + return n.runParallel(func(net GossipNode) error { + return net.Broadcast(ctx, tag, data, wait, except) + }) +} + +// Relay implements GossipNode +func (n *HybridP2PNetwork) Relay(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except Peer) error { + return n.runParallel(func(net GossipNode) error { + return net.Relay(ctx, tag, data, wait, except) + }) +} + +// Disconnect implements GossipNode +func (n *HybridP2PNetwork) Disconnect(badnode DisconnectablePeer) { + net := badnode.GetNetwork() + if net == n.p2pNetwork { + n.p2pNetwork.Disconnect(badnode) + } else if net == n.wsNetwork { + n.wsNetwork.Disconnect(badnode) + } else { + panic("badnode.GetNetwork() returned a network that is not part of this HybridP2PNetwork") + } +} + +// DisconnectPeers implements GossipNode +func (n *HybridP2PNetwork) DisconnectPeers() { + _ = n.runParallel(func(net GossipNode) error { + net.DisconnectPeers() + return nil + }) +} + +// RegisterHTTPHandler implements GossipNode +func (n *HybridP2PNetwork) RegisterHTTPHandler(path string, handler http.Handler) { + n.p2pNetwork.RegisterHTTPHandler(path, handler) + n.wsNetwork.RegisterHTTPHandler(path, handler) +} + +// RequestConnectOutgoing implements GossipNode +func (n *HybridP2PNetwork) RequestConnectOutgoing(replace bool, quit <-chan struct{}) {} + +// GetPeers implements GossipNode +func (n *HybridP2PNetwork) GetPeers(options ...PeerOption) []Peer { + // TODO better way of combining data from peerstore and returning in GetPeers + var peers []Peer + peers = append(peers, n.p2pNetwork.GetPeers(options...)...) + peers = append(peers, n.wsNetwork.GetPeers(options...)...) + return peers +} + +// Start implements GossipNode +func (n *HybridP2PNetwork) Start() error { + err := n.runParallel(func(net GossipNode) error { + return net.Start() + }) + return err +} + +// Stop implements GossipNode +func (n *HybridP2PNetwork) Stop() { + _ = n.runParallel(func(net GossipNode) error { + net.Stop() + return nil + }) +} + +// RegisterHandlers adds to the set of given message handlers. +func (n *HybridP2PNetwork) RegisterHandlers(dispatch []TaggedMessageHandler) { + n.p2pNetwork.RegisterHandlers(dispatch) + n.wsNetwork.RegisterHandlers(dispatch) +} + +// ClearHandlers deregisters all the existing message handlers. +func (n *HybridP2PNetwork) ClearHandlers() { + n.p2pNetwork.ClearHandlers() + n.wsNetwork.ClearHandlers() +} + +// RegisterProcessors adds to the set of given message processors. +func (n *HybridP2PNetwork) RegisterProcessors(dispatch []TaggedMessageProcessor) { + n.p2pNetwork.RegisterProcessors(dispatch) + n.wsNetwork.RegisterProcessors(dispatch) +} + +// ClearProcessors deregisters all the existing message processors. +func (n *HybridP2PNetwork) ClearProcessors() { + n.p2pNetwork.ClearProcessors() + n.wsNetwork.ClearProcessors() +} + +// GetHTTPClient returns a http.Client with a suitable for the network Transport +// that would also limit the number of outgoing connections. +func (n *HybridP2PNetwork) GetHTTPClient(address string) (*http.Client, error) { + if addr.IsMultiaddr(address) { + return n.p2pNetwork.GetHTTPClient(address) + } + return n.wsNetwork.GetHTTPClient(address) +} + +// OnNetworkAdvance notifies the network library that the agreement protocol was able to make a notable progress. +// this is the only indication that we have that we haven't formed a clique, where all incoming messages +// arrive very quickly, but might be missing some votes. The usage of this call is expected to have similar +// characteristics as with a watchdog timer. +func (n *HybridP2PNetwork) OnNetworkAdvance() { + _ = n.runParallel(func(net GossipNode) error { + net.OnNetworkAdvance() + return nil + }) +} + +// GetHTTPRequestConnection returns the underlying connection for the given request. Note that the request must be the same +// request that was provided to the http handler ( or provide a fallback Context() to that ) +func (n *HybridP2PNetwork) GetHTTPRequestConnection(request *http.Request) (conn DeadlineSettableConn) { + conn = n.wsNetwork.GetHTTPRequestConnection(request) + if conn != nil { + return conn + } + return n.p2pNetwork.GetHTTPRequestConnection(request) +} + +// GetGenesisID returns the network-specific genesisID. +func (n *HybridP2PNetwork) GetGenesisID() string { + return n.genesisID +} + +// called from wsPeer to report that it has closed +func (n *HybridP2PNetwork) peerRemoteClose(peer *wsPeer, reason disconnectReason) { + panic("wsPeer should only call WebsocketNetwork.peerRemoteClose or P2PNetwork.peerRemoteClose") +} diff --git a/network/dialer.go b/network/limitcaller/dialer.go similarity index 91% rename from network/dialer.go rename to network/limitcaller/dialer.go index 3aa59f493d..ee9b2e364a 100644 --- a/network/dialer.go +++ b/network/limitcaller/dialer.go @@ -14,13 +14,14 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package network +package limitcaller import ( "context" "net" "time" + "github.com/algorand/go-algorand/network/phonebook" "github.com/algorand/go-algorand/tools/network/dnssec" "github.com/algorand/go-algorand/util" ) @@ -31,14 +32,13 @@ type netDialer interface { // Dialer establish tcp-level connection with the destination type Dialer struct { - phonebook Phonebook + phonebook phonebook.Phonebook innerDialer netDialer - resolver *net.Resolver } -// makeRateLimitingDialer creates a rate limiting dialer that would limit the connections +// MakeRateLimitingDialer creates a rate limiting dialer that would limit the connections // according to the entries in the phonebook. -func makeRateLimitingDialer(phonebook Phonebook, resolver dnssec.ResolverIf) Dialer { +func MakeRateLimitingDialer(phonebook phonebook.Phonebook, resolver dnssec.ResolverIf) Dialer { var innerDialer netDialer = &net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, diff --git a/network/rateLimitingTransport.go b/network/limitcaller/rateLimitingTransport.go similarity index 55% rename from network/rateLimitingTransport.go rename to network/limitcaller/rateLimitingTransport.go index 461a468da5..45bc0725ed 100644 --- a/network/rateLimitingTransport.go +++ b/network/limitcaller/rateLimitingTransport.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package network +package limitcaller import ( "errors" @@ -22,24 +22,35 @@ import ( "time" "github.com/algorand/go-algorand/util" + "github.com/libp2p/go-libp2p/core/peer" ) -// rateLimitingTransport is the transport for execute a single HTTP transaction, obtaining the Response for a given Request. -type rateLimitingTransport struct { - phonebook Phonebook - innerTransport *http.Transport +// ConnectionTimeStore is a subset of the phonebook that is used to store the connection times. +type ConnectionTimeStore interface { + GetConnectionWaitTime(addrOrPeerID string) (bool, time.Duration, time.Time) + UpdateConnectionTime(addrOrPeerID string, provisionalTime time.Time) bool +} + +// RateLimitingTransport is the transport for execute a single HTTP transaction, obtaining the Response for a given Request. +type RateLimitingTransport struct { + phonebook ConnectionTimeStore + innerTransport http.RoundTripper queueingTimeout time.Duration + targetAddr interface{} // target address for the p2p http request } +// DefaultQueueingTimeout is the default timeout for queueing the request. +const DefaultQueueingTimeout = 10 * time.Second + // ErrConnectionQueueingTimeout indicates that we've exceeded the time allocated for // queueing the current request before the request attempt could be made. var ErrConnectionQueueingTimeout = errors.New("rateLimitingTransport: queueing timeout") -// makeRateLimitingTransport creates a rate limiting http transport that would limit the requests rate +// MakeRateLimitingTransport creates a rate limiting http transport that would limit the requests rate // according to the entries in the phonebook. -func makeRateLimitingTransport(phonebook Phonebook, queueingTimeout time.Duration, dialer *Dialer, maxIdleConnsPerHost int) rateLimitingTransport { +func MakeRateLimitingTransport(phonebook ConnectionTimeStore, queueingTimeout time.Duration, dialer *Dialer, maxIdleConnsPerHost int) RateLimitingTransport { defaultTransport := http.DefaultTransport.(*http.Transport) - return rateLimitingTransport{ + return RateLimitingTransport{ phonebook: phonebook, innerTransport: &http.Transport{ Proxy: defaultTransport.Proxy, @@ -54,14 +65,34 @@ func makeRateLimitingTransport(phonebook Phonebook, queueingTimeout time.Duratio } } +// MakeRateLimitingTransportWithRoundTripper creates a rate limiting http transport that would limit the requests rate +// according to the entries in the phonebook. +func MakeRateLimitingTransportWithRoundTripper(phonebook ConnectionTimeStore, queueingTimeout time.Duration, rt http.RoundTripper, target interface{}, maxIdleConnsPerHost int) RateLimitingTransport { + return RateLimitingTransport{ + phonebook: phonebook, + innerTransport: rt, + queueingTimeout: queueingTimeout, + targetAddr: target, + } +} + // RoundTrip connects to the address on the named network using the provided context. // It waits if needed not to exceed connectionsRateLimitingCount. -func (r *rateLimitingTransport) RoundTrip(req *http.Request) (res *http.Response, err error) { +func (r *RateLimitingTransport) RoundTrip(req *http.Request) (res *http.Response, err error) { var waitTime time.Duration var provisionalTime time.Time queueingDeadline := time.Now().Add(r.queueingTimeout) + addrOrPeerID := req.Host + // p2p/http clients have per-connection transport and address info so use that + if len(req.Host) == 0 && req.URL != nil && len(req.URL.Host) == 0 { + addrInfo, ok := r.targetAddr.(*peer.AddrInfo) + if !ok { + return nil, errors.New("rateLimitingTransport: request without Host/URL and targetAddr is not a peer.AddrInfo") + } + addrOrPeerID = string(addrInfo.ID) + } for { - _, waitTime, provisionalTime = r.phonebook.GetConnectionWaitTime(req.Host) + _, waitTime, provisionalTime = r.phonebook.GetConnectionWaitTime(addrOrPeerID) if waitTime == 0 { break // break out of the loop and proceed to the connection } @@ -73,6 +104,6 @@ func (r *rateLimitingTransport) RoundTrip(req *http.Request) (res *http.Response return nil, ErrConnectionQueueingTimeout } res, err = r.innerTransport.RoundTrip(req) - r.phonebook.UpdateConnectionTime(req.Host, provisionalTime) + r.phonebook.UpdateConnectionTime(addrOrPeerID, provisionalTime) return } diff --git a/network/multiplexer.go b/network/multiplexer.go index 0e97d63f28..dc38fba277 100644 --- a/network/multiplexer.go +++ b/network/multiplexer.go @@ -24,49 +24,74 @@ import ( // Multiplexer is a message handler that sorts incoming messages by Tag and passes // them along to the relevant message handler for that type of message. type Multiplexer struct { - msgHandlers atomic.Value // stores map[Tag]MessageHandler, an immutable map. + msgHandlers atomic.Value // stores map[Tag]MessageHandler, an immutable map. + msgProcessors atomic.Value // stores map[Tag]MessageProcessor, an immutable map. } // MakeMultiplexer creates an empty Multiplexer func MakeMultiplexer() *Multiplexer { m := &Multiplexer{} - m.ClearHandlers([]Tag{}) // allocate the map + m.ClearHandlers(nil) // allocate the map + m.ClearProcessors(nil) // allocate the map return m } -// getHandlersMap retrieves the handlers map. -func (m *Multiplexer) getHandlersMap() map[Tag]MessageHandler { - handlersVal := m.msgHandlers.Load() - if handlers, valid := handlersVal.(map[Tag]MessageHandler); valid { +// getMap retrieves a typed map from an atomic.Value. +func getMap[T any](source *atomic.Value) map[Tag]T { + mp := source.Load() + if handlers, valid := mp.(map[Tag]T); valid { return handlers } return nil } -// Retrives the handler for the given message Tag from the handlers array while taking a read lock. -func (m *Multiplexer) getHandler(tag Tag) (MessageHandler, bool) { - if handlers := m.getHandlersMap(); handlers != nil { +// Retrieves the handler for the given message Tag from the given value while. +func getHandler[T any](source *atomic.Value, tag Tag) (T, bool) { + if handlers := getMap[T](source); handlers != nil { handler, ok := handlers[tag] return handler, ok } - return nil, false + var empty T + return empty, false +} + +// Retrieves the handler for the given message Tag from the handlers array. +func (m *Multiplexer) getHandler(tag Tag) (MessageHandler, bool) { + return getHandler[MessageHandler](&m.msgHandlers, tag) +} + +// Retrieves the processor for the given message Tag from the processors array. +func (m *Multiplexer) getProcessor(tag Tag) (MessageProcessor, bool) { + return getHandler[MessageProcessor](&m.msgProcessors, tag) } // Handle is the "input" side of the multiplexer. It dispatches the message to the previously defined handler. func (m *Multiplexer) Handle(msg IncomingMessage) OutgoingMessage { - handler, ok := m.getHandler(msg.Tag) + if handler, ok := m.getHandler(msg.Tag); ok { + return handler.Handle(msg) + } + return OutgoingMessage{} +} - if ok { - outmsg := handler.Handle(msg) - return outmsg +// Validate is an alternative "input" side of the multiplexer. It dispatches the message to the previously defined validator. +func (m *Multiplexer) Validate(msg IncomingMessage) ValidatedMessage { + if handler, ok := m.getProcessor(msg.Tag); ok { + return handler.Validate(msg) + } + return ValidatedMessage{} +} + +// Process is the second step of message handling after validation. It dispatches the message to the previously defined processor. +func (m *Multiplexer) Process(msg ValidatedMessage) OutgoingMessage { + if handler, ok := m.getProcessor(msg.Tag); ok { + return handler.Handle(msg) } return OutgoingMessage{} } -// RegisterHandlers registers the set of given message handlers. -func (m *Multiplexer) RegisterHandlers(dispatch []TaggedMessageHandler) { - mp := make(map[Tag]MessageHandler) - if existingMap := m.getHandlersMap(); existingMap != nil { +func registerMultiplexer[T any](target *atomic.Value, dispatch []taggedMessageDispatcher[T]) { + mp := make(map[Tag]T) + if existingMap := getMap[T](target); existingMap != nil { for k, v := range existingMap { mp[k] = v } @@ -77,13 +102,22 @@ func (m *Multiplexer) RegisterHandlers(dispatch []TaggedMessageHandler) { } mp[v.Tag] = v.MessageHandler } - m.msgHandlers.Store(mp) + target.Store(mp) } -// ClearHandlers deregisters all the existing message handlers other than the one provided in the excludeTags list -func (m *Multiplexer) ClearHandlers(excludeTags []Tag) { +// RegisterHandlers registers the set of given message handlers. +func (m *Multiplexer) RegisterHandlers(dispatch []TaggedMessageHandler) { + registerMultiplexer(&m.msgHandlers, dispatch) +} + +// RegisterProcessors registers the set of given message handlers. +func (m *Multiplexer) RegisterProcessors(dispatch []TaggedMessageProcessor) { + registerMultiplexer(&m.msgProcessors, dispatch) +} + +func clearMultiplexer[T any](target *atomic.Value, excludeTags []Tag) { if len(excludeTags) == 0 { - m.msgHandlers.Store(make(map[Tag]MessageHandler)) + target.Store(make(map[Tag]T)) return } @@ -93,13 +127,23 @@ func (m *Multiplexer) ClearHandlers(excludeTags []Tag) { excludeTagsMap[tag] = true } - currentHandlersMap := m.getHandlersMap() - newMap := make(map[Tag]MessageHandler, len(excludeTagsMap)) - for tag, handler := range currentHandlersMap { + currentMap := getMap[T](target) + newMap := make(map[Tag]T, len(excludeTagsMap)) + for tag, handler := range currentMap { if excludeTagsMap[tag] { newMap[tag] = handler } } - m.msgHandlers.Store(newMap) + target.Store(newMap) +} + +// ClearHandlers deregisters all the existing message handlers other than the one provided in the excludeTags list +func (m *Multiplexer) ClearHandlers(excludeTags []Tag) { + clearMultiplexer[MessageHandler](&m.msgHandlers, excludeTags) +} + +// ClearProcessors deregisters all the existing message handlers other than the one provided in the excludeTags list +func (m *Multiplexer) ClearProcessors(excludeTags []Tag) { + clearMultiplexer[MessageProcessor](&m.msgProcessors, excludeTags) } diff --git a/network/netidentity.go b/network/netidentity.go index 1474739bd1..4d797a1a5b 100644 --- a/network/netidentity.go +++ b/network/netidentity.go @@ -94,12 +94,34 @@ type identityChallengeScheme interface { VerifyResponse(h http.Header, c identityChallengeValue) (crypto.PublicKey, []byte, error) } +type identityChallengeSigner interface { + Sign(message crypto.Hashable) crypto.Signature + SignBytes(message []byte) crypto.Signature + PublicKey() crypto.PublicKey +} + +type identityChallengeLegacySigner struct { + keys *crypto.SignatureSecrets +} + +func (s *identityChallengeLegacySigner) Sign(message crypto.Hashable) crypto.Signature { + return s.keys.Sign(message) +} + +func (s *identityChallengeLegacySigner) SignBytes(message []byte) crypto.Signature { + return s.keys.SignBytes(message) +} + +func (s *identityChallengeLegacySigner) PublicKey() crypto.PublicKey { + return s.keys.SignatureVerifier +} + // identityChallengePublicKeyScheme implements IdentityChallengeScheme by // exchanging and verifying public key challenges and attaching them to headers, // or returning the message payload to be sent type identityChallengePublicKeyScheme struct { dedupName string - identityKeys *crypto.SignatureSecrets + identityKeys identityChallengeSigner } // NewIdentityChallengeScheme will create a default Identification Scheme @@ -108,15 +130,21 @@ func NewIdentityChallengeScheme(dn string) *identityChallengePublicKeyScheme { if dn == "" { return &identityChallengePublicKeyScheme{} } + var seed crypto.Seed crypto.RandBytes(seed[:]) return &identityChallengePublicKeyScheme{ dedupName: dn, - identityKeys: crypto.GenerateSignatureSecrets(seed), + identityKeys: &identityChallengeLegacySigner{keys: crypto.GenerateSignatureSecrets(seed)}, } } +// NewIdentityChallengeSchemeWithSigner will create an identification Scheme with a given signer +func NewIdentityChallengeSchemeWithSigner(dn string, signer identityChallengeSigner) *identityChallengePublicKeyScheme { + return &identityChallengePublicKeyScheme{dedupName: dn, identityKeys: signer} +} + // AttachChallenge will generate a new identity challenge and will encode and attach the challenge // as a header. It returns the identityChallengeValue used for this challenge, so the network can // confirm it later (by passing it to VerifyResponse), or returns an empty challenge if dedupName is @@ -126,7 +154,7 @@ func (i identityChallengePublicKeyScheme) AttachChallenge(attachTo http.Header, return identityChallengeValue{} } c := identityChallenge{ - Key: i.identityKeys.SignatureVerifier, + Key: i.identityKeys.PublicKey(), Challenge: newIdentityChallengeValue(), PublicAddress: []byte(addr), } @@ -173,7 +201,7 @@ func (i identityChallengePublicKeyScheme) VerifyRequestAndAttachResponse(attachT } // make the response object, encode it and attach it to the header r := identityChallengeResponse{ - Key: i.identityKeys.SignatureVerifier, + Key: i.identityKeys.PublicKey(), Challenge: idChal.Msg.Challenge, ResponseChallenge: newIdentityChallengeValue(), } @@ -271,12 +299,12 @@ type identityVerificationMessageSigned struct { Signature crypto.Signature `codec:"sig"` } -func (i identityChallenge) signAndEncodeB64(s *crypto.SignatureSecrets) string { +func (i identityChallenge) signAndEncodeB64(s identityChallengeSigner) string { signedChal := i.Sign(s) return base64.StdEncoding.EncodeToString(protocol.Encode(&signedChal)) } -func (i identityChallenge) Sign(secrets *crypto.SignatureSecrets) identityChallengeSigned { +func (i identityChallenge) Sign(secrets identityChallengeSigner) identityChallengeSigned { return identityChallengeSigned{Msg: i, Signature: secrets.Sign(i)} } @@ -289,12 +317,12 @@ func (i identityChallengeSigned) Verify() bool { return i.Msg.Key.Verify(i.Msg, i.Signature) } -func (i identityChallengeResponse) signAndEncodeB64(s *crypto.SignatureSecrets) string { +func (i identityChallengeResponse) signAndEncodeB64(s identityChallengeSigner) string { signedChalResp := i.Sign(s) return base64.StdEncoding.EncodeToString(protocol.Encode(&signedChalResp)) } -func (i identityChallengeResponse) Sign(secrets *crypto.SignatureSecrets) identityChallengeResponseSigned { +func (i identityChallengeResponse) Sign(secrets identityChallengeSigner) identityChallengeResponseSigned { return identityChallengeResponseSigned{Msg: i, Signature: secrets.Sign(i)} } @@ -307,7 +335,7 @@ func (i identityChallengeResponseSigned) Verify() bool { return i.Msg.Key.Verify(i.Msg, i.Signature) } -func (i identityVerificationMessage) Sign(secrets *crypto.SignatureSecrets) identityVerificationMessageSigned { +func (i identityVerificationMessage) Sign(secrets identityChallengeSigner) identityVerificationMessageSigned { return identityVerificationMessageSigned{Msg: i, Signature: secrets.Sign(i)} } diff --git a/network/netidentity_test.go b/network/netidentity_test.go index 9650069224..f87480c1b1 100644 --- a/network/netidentity_test.go +++ b/network/netidentity_test.go @@ -180,7 +180,7 @@ func TestIdentityChallengeSchemeBadSignature(t *testing.T) { // Copy the logic of attaching the header and signing so we can sign it wrong c := identityChallengeSigned{ Msg: identityChallenge{ - Key: i.identityKeys.SignatureVerifier, + Key: i.identityKeys.PublicKey(), Challenge: newIdentityChallengeValue(), PublicAddress: []byte("i1"), }} @@ -232,7 +232,7 @@ func TestIdentityChallengeSchemeBadResponseSignature(t *testing.T) { r := http.Header{} resp := identityChallengeResponseSigned{ Msg: identityChallengeResponse{ - Key: i.identityKeys.SignatureVerifier, + Key: i.identityKeys.PublicKey(), Challenge: origChal, ResponseChallenge: newIdentityChallengeValue(), }} diff --git a/network/netprio.go b/network/netprio.go index 5d91dad8d1..9c6c510608 100644 --- a/network/netprio.go +++ b/network/netprio.go @@ -46,7 +46,7 @@ func prioResponseHandler(message IncomingMessage) OutgoingMessage { addr, err := wn.prioScheme.VerifyPrioResponse(challenge, message.Data) if err != nil { - wn.log.Warnf("prioScheme.VerifyPrioResponse from %s: %v", peer.rootURL, err) + wn.log.Warnf("prioScheme.VerifyPrioResponse from %s: %v", peer.GetAddress(), err) } else { weight := wn.prioScheme.GetPrioWeight(addr) diff --git a/network/p2p/capabilities.go b/network/p2p/capabilities.go new file mode 100644 index 0000000000..e5781aa389 --- /dev/null +++ b/network/p2p/capabilities.go @@ -0,0 +1,179 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package p2p + +import ( + "context" + "sync" + "time" + + dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p/core/discovery" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + libpeerstore "github.com/libp2p/go-libp2p/core/peerstore" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/logging" + algoDht "github.com/algorand/go-algorand/network/p2p/dht" + "github.com/algorand/go-algorand/protocol" +) + +// Capability represents functions that some nodes may provide and other nodes would want to know about +type Capability string + +const ( + // Archival nodes + Archival Capability = "archival" + // Catchpoints storing nodes + Catchpoints = "catchpointStoring" + // Gossip nodes are non permissioned relays + Gossip = "gossip" +) + +const operationTimeout = time.Second * 5 +const maxAdvertisementInterval = time.Hour * 22 + +// CapabilitiesDiscovery exposes Discovery interfaces and wraps underlying DHT methods to provide capabilities advertisement for the node +type CapabilitiesDiscovery struct { + disc discovery.Discovery + dht *dht.IpfsDHT + log logging.Logger + wg sync.WaitGroup +} + +// Advertise implements the discovery.Discovery/discovery.Advertiser interface +func (c *CapabilitiesDiscovery) Advertise(ctx context.Context, ns string, opts ...discovery.Option) (time.Duration, error) { + return c.disc.Advertise(ctx, ns, opts...) +} + +// FindPeers implements the discovery.Discovery/discovery.Discoverer interface +func (c *CapabilitiesDiscovery) FindPeers(ctx context.Context, ns string, opts ...discovery.Option) (<-chan peer.AddrInfo, error) { + return c.disc.FindPeers(ctx, ns, opts...) +} + +// Close should be called when fully shutting down the node +func (c *CapabilitiesDiscovery) Close() error { + err := c.dht.Close() + c.wg.Wait() + return err +} + +// Host exposes the underlying libp2p host.Host object +func (c *CapabilitiesDiscovery) Host() host.Host { + return c.dht.Host() +} + +// AddPeer adds a given peer.AddrInfo to the Host's Peerstore, and the DHT's routing table +func (c *CapabilitiesDiscovery) AddPeer(p peer.AddrInfo) (bool, error) { + c.Host().Peerstore().AddAddrs(p.ID, p.Addrs, libpeerstore.AddressTTL) + return c.dht.RoutingTable().TryAddPeer(p.ID, true, true) +} + +// PeersForCapability returns a slice of peer.AddrInfo for a Capability +// Since CapabilitiesDiscovery uses a backoffcache, it will attempt to hit cache, then disk, then network +// in order to fetch n peers which are advertising the required capability. +func (c *CapabilitiesDiscovery) PeersForCapability(capability Capability, n int) ([]peer.AddrInfo, error) { + ctx, cancel := context.WithTimeout(context.Background(), operationTimeout) + defer cancel() + var peers []peer.AddrInfo + // +1 because it can include self but we exclude self from the returned list + // that might confuse the caller (and tests assertions) + peersChan, err := c.FindPeers(ctx, string(capability), discovery.Limit(n+1)) + if err != nil { + return nil, err + } + for p := range peersChan { + if p.ID.Size() > 0 && p.ID != c.Host().ID() { + peers = append(peers, p) + } + if len(peers) >= n { + break + } + } + return peers, nil +} + +// AdvertiseCapabilities periodically runs the Advertiser interface on the DHT +// If a capability fails to advertise we will retry every 10 seconds until full success +// This gets rerun every at the minimum ttl or the maxAdvertisementInterval. +func (c *CapabilitiesDiscovery) AdvertiseCapabilities(capabilities ...Capability) { + c.wg.Add(1) + go func() { + // Run the initial Advertisement immediately + nextExecution := time.After(time.Second / 10000) + defer func() { + c.wg.Done() + }() + + for { + select { + case <-c.dht.Context().Done(): + return + case <-nextExecution: + var err error + advertisementInterval := maxAdvertisementInterval + for _, capa := range capabilities { + ttl, err0 := c.Advertise(c.dht.Context(), string(capa)) + if err0 != nil { + err = err0 + c.log.Errorf("failed to advertise for capability %s: %v", capa, err0) + break + } + if ttl < advertisementInterval { + advertisementInterval = ttl + } + c.log.Infof("advertised capability %s", capa) + } + // If we failed to advertise, retry every 10 seconds until successful + if err != nil { + nextExecution = time.After(time.Second * 10) + } else { + // Otherwise, ensure we're at the correct interval + nextExecution = time.After(advertisementInterval) + } + } + } + }() +} + +// Sizer exposes the Size method +type Sizer interface { + Size() int +} + +// RoutingTable exposes some knowledge about the DHT routing table +func (c *CapabilitiesDiscovery) RoutingTable() Sizer { + return c.dht.RoutingTable() +} + +// MakeCapabilitiesDiscovery creates a new CapabilitiesDiscovery object which exposes peer discovery and capabilities advertisement +func MakeCapabilitiesDiscovery(ctx context.Context, cfg config.Local, h host.Host, networkID protocol.NetworkID, log logging.Logger, bootstrapFunc func() []peer.AddrInfo) (*CapabilitiesDiscovery, error) { + discDht, err := algoDht.MakeDHT(ctx, h, networkID, cfg, bootstrapFunc) + if err != nil { + return nil, err + } + discImpl, err := algoDht.MakeDiscovery(discDht) + if err != nil { + return nil, err + } + return &CapabilitiesDiscovery{ + disc: discImpl, + dht: discDht, + log: log, + }, nil +} diff --git a/network/p2p/capabilities_test.go b/network/p2p/capabilities_test.go new file mode 100644 index 0000000000..881860f647 --- /dev/null +++ b/network/p2p/capabilities_test.go @@ -0,0 +1,354 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package p2p + +import ( + "context" + "math/rand" + "sync" + "testing" + "time" + + golog "github.com/ipfs/go-log" + "github.com/libp2p/go-libp2p" + dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p/core/discovery" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/logging" + algodht "github.com/algorand/go-algorand/network/p2p/dht" + "github.com/algorand/go-algorand/network/p2p/peerstore" + "github.com/algorand/go-algorand/test/partitiontest" +) + +func TestCapabilities_Discovery(t *testing.T) { + partitiontest.PartitionTest(t) + + golog.SetDebugLogging() + var caps []*CapabilitiesDiscovery + var addrs []peer.AddrInfo + testSize := 3 + for i := 0; i < testSize; i++ { + tempdir := t.TempDir() + ps, err := peerstore.NewPeerStore(nil, "") + require.NoError(t, err) + h, _, err := MakeHost(config.GetDefaultLocal(), tempdir, ps) + require.NoError(t, err) + capD, err := MakeCapabilitiesDiscovery(context.Background(), config.GetDefaultLocal(), h, "devtestnet", logging.Base(), func() []peer.AddrInfo { return nil }) + require.NoError(t, err) + caps = append(caps, capD) + addrs = append(addrs, peer.AddrInfo{ + ID: capD.Host().ID(), + Addrs: capD.Host().Addrs(), + }) + } + for _, capD := range caps { + peersAdded := 0 + for _, addr := range addrs { + added, err := capD.AddPeer(addr) + require.NoError(t, err) + require.True(t, added) + peersAdded++ + } + err := capD.dht.Bootstrap(context.Background()) + require.NoError(t, err) + capD.dht.ForceRefresh() + require.Equal(t, peersAdded, capD.dht.RoutingTable().Size()) + } +} + +func setupDHTHosts(t *testing.T, numHosts int) []*dht.IpfsDHT { + var hosts []host.Host + var bootstrapPeers []peer.AddrInfo + var dhts []*dht.IpfsDHT + cfg := config.GetDefaultLocal() + for i := 0; i < numHosts; i++ { + tmpdir := t.TempDir() + pk, err := GetPrivKey(cfg, tmpdir) + require.NoError(t, err) + ps, err := peerstore.NewPeerStore([]*peer.AddrInfo{}, "") + require.NoError(t, err) + h, err := libp2p.New( + libp2p.ListenAddrStrings("/dns4/localhost/tcp/0"), + libp2p.Identity(pk), + libp2p.Peerstore(ps)) + require.NoError(t, err) + hosts = append(hosts, h) + bootstrapPeers = append(bootstrapPeers, peer.AddrInfo{ID: h.ID(), Addrs: h.Addrs()}) + } + for _, h := range hosts { + ht, err := algodht.MakeDHT(context.Background(), h, "devtestnet", cfg, func() []peer.AddrInfo { return bootstrapPeers }) + require.NoError(t, err) + // this is a workaround for the following issue + // "failed to negotiate security protocol: error reading handshake message: noise: message is too short" + // it appears simultaneous connection attempts (dht.New() attempts to connect) causes this handshake error. + // https://github.com/libp2p/go-libp2p-noise/issues/70 + time.Sleep(200 * time.Millisecond) + + err = ht.Bootstrap(context.Background()) + require.NoError(t, err) + dhts = append(dhts, ht) + } + return dhts +} + +func waitForRouting(t *testing.T, disc *CapabilitiesDiscovery) { + refreshCtx, refCancel := context.WithTimeout(context.Background(), time.Second*5) + for { + select { + case <-refreshCtx.Done(): + refCancel() + require.Fail(t, "failed to populate routing table before timeout") + default: + if disc.dht.RoutingTable().Size() > 0 { + refCancel() + return + } + } + } +} + +func setupCapDiscovery(t *testing.T, numHosts int, numBootstrapPeers int) []*CapabilitiesDiscovery { + var hosts []host.Host + var bootstrapPeers []peer.AddrInfo + var capsDisc []*CapabilitiesDiscovery + cfg := config.GetDefaultLocal() + for i := 0; i < numHosts; i++ { + tmpdir := t.TempDir() + pk, err := GetPrivKey(cfg, tmpdir) + require.NoError(t, err) + ps, err := peerstore.NewPeerStore([]*peer.AddrInfo{}, "") + require.NoError(t, err) + h, err := libp2p.New( + libp2p.ListenAddrStrings("/dns4/localhost/tcp/0"), + libp2p.Identity(pk), + libp2p.Peerstore(ps)) + require.NoError(t, err) + hosts = append(hosts, h) + bootstrapPeers = append(bootstrapPeers, peer.AddrInfo{ID: h.ID(), Addrs: h.Addrs()}) + } + for _, h := range hosts { + bp := bootstrapPeers + if numBootstrapPeers != 0 && numBootstrapPeers != numHosts { + bp = make([]peer.AddrInfo, len(bootstrapPeers)) + copy(bp, bootstrapPeers) + rand.Shuffle(len(bootstrapPeers), func(i, j int) { + bp[i], bp[j] = bp[j], bp[i] + }) + bp = bp[:numBootstrapPeers] + } + ht, err := algodht.MakeDHT(context.Background(), h, "devtestnet", cfg, func() []peer.AddrInfo { return bp }) + require.NoError(t, err) + // this is a workaround for the following issue + // "failed to negotiate security protocol: error reading handshake message: noise: message is too short" + // it appears simultaneous connection attempts (dht.New() attempts to connect) causes this handshake error. + // https://github.com/libp2p/go-libp2p-noise/issues/70 + time.Sleep(200 * time.Millisecond) + + disc, err := algodht.MakeDiscovery(ht) + require.NoError(t, err) + cd := &CapabilitiesDiscovery{ + disc: disc, + dht: ht, + log: logging.Base(), + } + capsDisc = append(capsDisc, cd) + } + return capsDisc +} + +func TestCapabilities_DHTTwoPeers(t *testing.T) { + partitiontest.PartitionTest(t) + + numAdvertisers := 2 + dhts := setupDHTHosts(t, numAdvertisers) + topic := "foobar" + for i, ht := range dhts { + disc, err := algodht.MakeDiscovery(ht) + require.NoError(t, err) + refreshCtx, refCancel := context.WithTimeout(context.Background(), time.Second*5) + peersPopulated: + for { + select { + case <-refreshCtx.Done(): + refCancel() + require.Fail(t, "failed to populate routing table before timeout") + default: + if ht.RoutingTable().Size() > 0 { + refCancel() + break peersPopulated + } + } + } + _, err = disc.Advertise(context.Background(), topic) + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + var advertisers []peer.AddrInfo + peersChan, err := disc.FindPeers(ctx, topic, discovery.Limit(numAdvertisers)) + require.NoError(t, err) + pollingForPeers: + for { + select { + case p, open := <-peersChan: + if p.ID.Size() > 0 { + advertisers = append(advertisers, p) + } + if !open { + break pollingForPeers + } + } + } + cancel() + // Returned peers will include the querying node's ID since it advertises for the topic as well + require.Equal(t, i+1, len(advertisers)) + } +} + +func TestCapabilities_Varying(t *testing.T) { + partitiontest.PartitionTest(t) + + const numAdvertisers = 10 + + var tests = []struct { + name string + numBootstrap int + }{ + {"bootstrap=all", numAdvertisers}, + {"bootstrap=2", 2}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + capsDisc := setupCapDiscovery(t, numAdvertisers, test.numBootstrap) + noCap := capsDisc[:3] + archOnly := capsDisc[3:5] + catchOnly := capsDisc[5:7] + archCatch := capsDisc[7:] + + var wg sync.WaitGroup + wg.Add(len(archOnly) + len(catchOnly) + len(archCatch)) + for _, disc := range archOnly { + go func(disc *CapabilitiesDiscovery) { + defer wg.Done() + waitForRouting(t, disc) + disc.AdvertiseCapabilities(Archival) + }(disc) + } + for _, disc := range catchOnly { + go func(disc *CapabilitiesDiscovery) { + defer wg.Done() + waitForRouting(t, disc) + disc.AdvertiseCapabilities(Catchpoints) + }(disc) + } + for _, disc := range archCatch { + go func(disc *CapabilitiesDiscovery) { + defer wg.Done() + waitForRouting(t, disc) + disc.AdvertiseCapabilities(Archival, Catchpoints) + }(disc) + } + + wg.Wait() + + wg.Add(len(noCap) * 2) + for _, disc := range noCap { + go func(disc *CapabilitiesDiscovery) { + defer wg.Done() + require.Eventuallyf(t, + func() bool { + numArchPeers := len(archOnly) + len(archCatch) + peers, err := disc.PeersForCapability(Archival, numArchPeers) + if err == nil && len(peers) == numArchPeers { + return true + } + return false + }, + time.Minute, + time.Second, + "Not all expected archival peers were found", + ) + }(disc) + + go func(disc *CapabilitiesDiscovery) { + defer wg.Done() + require.Eventuallyf(t, + func() bool { + numCatchPeers := len(catchOnly) + len(archCatch) + peers, err := disc.PeersForCapability(Catchpoints, numCatchPeers) + if err == nil && len(peers) == numCatchPeers { + return true + } + return false + }, + time.Minute, + time.Second, + "Not all expected catchpoint peers were found", + ) + }(disc) + } + + wg.Wait() + + for _, disc := range capsDisc[3:] { + err := disc.Close() + require.NoError(t, err) + // Make sure it actually closes + disc.wg.Wait() + } + }) + } +} + +func TestCapabilities_ExcludesSelf(t *testing.T) { + partitiontest.PartitionTest(t) + disc := setupCapDiscovery(t, 2, 2) + + testPeersFound := func(disc *CapabilitiesDiscovery, n int, cap Capability) bool { + peers, err := disc.PeersForCapability(cap, n) + if err == nil && len(peers) == n { + return true + } + return false + } + + waitForRouting(t, disc[0]) + disc[0].AdvertiseCapabilities(Archival) + // disc[1] finds Archival + require.Eventuallyf(t, + func() bool { return testPeersFound(disc[1], 1, Archival) }, + time.Minute, + time.Second, + "Could not find archival peer", + ) + + // disc[0] doesn't find itself + require.Neverf(t, + func() bool { return testPeersFound(disc[0], 1, Archival) }, + time.Second*5, + time.Second, + "Found self when searching for capability", + ) + + err := disc[0].Close() + require.NoError(t, err) + disc[0].wg.Wait() +} diff --git a/network/p2p/dht/dht.go b/network/p2p/dht/dht.go new file mode 100644 index 0000000000..1ea38bdd78 --- /dev/null +++ b/network/p2p/dht/dht.go @@ -0,0 +1,75 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package dht + +import ( + "context" + "fmt" + "math/rand" + "time" + + dht "github.com/libp2p/go-libp2p-kad-dht" + dhtmetrics "github.com/libp2p/go-libp2p-kad-dht/metrics" + "github.com/libp2p/go-libp2p/core/discovery" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + crouting "github.com/libp2p/go-libp2p/core/routing" + "github.com/libp2p/go-libp2p/p2p/discovery/backoff" + "github.com/libp2p/go-libp2p/p2p/discovery/routing" + "go.opencensus.io/stats/view" + + "github.com/algorand/go-algorand/config" + algoproto "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/util/metrics" +) + +const minBackoff = time.Second * 5 +const maxBackoff = time.Second * 20 +const baseBackoff = float64(1.1) + +func dhtProtocolPrefix(networkID algoproto.NetworkID) protocol.ID { + return protocol.ID(fmt.Sprintf("/algorand/kad/%s", networkID)) +} + +// MakeDHT creates the dht.IpfsDHT object +func MakeDHT(ctx context.Context, h host.Host, networkID algoproto.NetworkID, cfg config.Local, bootstrapFunc func() []peer.AddrInfo) (*dht.IpfsDHT, error) { + dhtCfg := []dht.Option{ + // Automatically determine server or client mode + dht.Mode(dht.ModeAutoServer), + // We don't need the value store right now + dht.DisableValues(), + dht.ProtocolPrefix(dhtProtocolPrefix(networkID)), + dht.BootstrapPeersFunc(bootstrapFunc), + } + + if err := view.Register(dhtmetrics.DefaultViews...); err != nil { + return nil, err + } + metrics.DefaultRegistry().Register(&metrics.OpencensusDefaultMetrics) + + return dht.New(ctx, h, dhtCfg...) +} + +func backoffFactory() backoff.BackoffFactory { + return backoff.NewExponentialDecorrelatedJitter(minBackoff, maxBackoff, baseBackoff, rand.NewSource(rand.Int63())) +} + +// MakeDiscovery creates a discovery.Discovery object using backoff and cacching +func MakeDiscovery(r crouting.ContentRouting) (discovery.Discovery, error) { + return backoff.NewBackoffDiscovery(routing.NewRoutingDiscovery(r), backoffFactory(), backoff.WithBackoffDiscoveryReturnedChannelSize(0), backoff.WithBackoffDiscoverySimultaneousQueryBufferSize(0)) +} diff --git a/network/p2p/dht/dht_test.go b/network/p2p/dht/dht_test.go new file mode 100644 index 0000000000..51cb8978f7 --- /dev/null +++ b/network/p2p/dht/dht_test.go @@ -0,0 +1,64 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package dht + +import ( + "context" + "testing" + + logging "github.com/ipfs/go-log" + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/test/partitiontest" +) + +func TestDHTBasic(t *testing.T) { + partitiontest.PartitionTest(t) + + h, err := libp2p.New() + require.NoError(t, err) + dht, err := MakeDHT( + context.Background(), + h, + "devtestnet", + config.GetDefaultLocal(), + func() []peer.AddrInfo { return nil }) + require.NoError(t, err) + _, err = MakeDiscovery(dht) + require.NoError(t, err) + err = dht.Bootstrap(context.Background()) + require.NoError(t, err) +} + +func TestDHTBasicAlgodev(t *testing.T) { + partitiontest.PartitionTest(t) + + logging.SetDebugLogging() + h, err := libp2p.New() + require.NoError(t, err) + cfg := config.GetDefaultLocal() + cfg.DNSBootstrapID = ".algodev.network" + dht, err := MakeDHT(context.Background(), h, "betanet", cfg, func() []peer.AddrInfo { return nil }) + require.NoError(t, err) + _, err = MakeDiscovery(dht) + require.NoError(t, err) + err = dht.Bootstrap(context.Background()) + require.NoError(t, err) +} diff --git a/network/p2p/dnsaddr/resolve.go b/network/p2p/dnsaddr/resolve.go index 176d62f946..5e0e8007fc 100644 --- a/network/p2p/dnsaddr/resolve.go +++ b/network/p2p/dnsaddr/resolve.go @@ -30,13 +30,19 @@ func isDnsaddr(maddr multiaddr.Multiaddr) bool { } // Iterate runs through the resolvable dnsaddrs in the tree using the resolveController and invokes f for each dnsaddr node lookup -func Iterate(initial multiaddr.Multiaddr, controller *MultiaddrDNSResolveController, f func(dnsaddr multiaddr.Multiaddr, entries []multiaddr.Multiaddr) error) error { +func Iterate(initial multiaddr.Multiaddr, controller ResolveController, f func(dnsaddr multiaddr.Multiaddr, entries []multiaddr.Multiaddr) error) error { resolver := controller.Resolver() if resolver == nil { return errors.New("passed controller has no resolvers Iterate") } + const maxHops = 25 // any reasonable number to prevent infinite loop in case of circular dnsaddr + hops := 0 var toResolve = []multiaddr.Multiaddr{initial} for resolver != nil && len(toResolve) > 0 { + hops++ + if hops > maxHops { + return errors.New("max hops reached while resolving dnsaddr " + initial.String()) + } curr := toResolve[0] maddrs, resolveErr := resolver.Resolve(context.Background(), curr) if resolveErr != nil { @@ -64,7 +70,7 @@ func Iterate(initial multiaddr.Multiaddr, controller *MultiaddrDNSResolveControl // Any further dnsaddrs will be looked up until all TXT records have been fetched, // and the full list of resulting Multiaddrs is returned. // It uses the MultiaddrDNSResolveController to cycle through DNS resolvers on failure. -func MultiaddrsFromResolver(domain string, controller *MultiaddrDNSResolveController) ([]multiaddr.Multiaddr, error) { +func MultiaddrsFromResolver(domain string, controller ResolveController) ([]multiaddr.Multiaddr, error) { dnsaddr, err := multiaddr.NewMultiaddr(fmt.Sprintf("/dnsaddr/%s", domain)) if err != nil { return nil, fmt.Errorf("unable to construct multiaddr for %s : %v", domain, err) diff --git a/network/p2p/dnsaddr/resolveController.go b/network/p2p/dnsaddr/resolveController.go index 73a46243d0..ff606b39a1 100644 --- a/network/p2p/dnsaddr/resolveController.go +++ b/network/p2p/dnsaddr/resolveController.go @@ -17,15 +17,29 @@ package dnsaddr import ( + "context" + + "github.com/multiformats/go-multiaddr" madns "github.com/multiformats/go-multiaddr-dns" log "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/tools/network" ) +// Resolver is an interface for resolving dnsaddrs +type Resolver interface { + Resolve(ctx context.Context, maddr multiaddr.Multiaddr) ([]multiaddr.Multiaddr, error) +} + +// ResolveController is an interface for cycling through resolvers +type ResolveController interface { + Resolver() Resolver + NextResolver() Resolver +} + // MultiaddrDNSResolveController returns a madns.Resolver, cycling through underlying net.Resolvers type MultiaddrDNSResolveController struct { - resolver *madns.Resolver + resolver Resolver nextResolvers []func() *madns.Resolver controller network.ResolveController } @@ -45,7 +59,7 @@ func NewMultiaddrDNSResolveController(secure bool, fallbackDNSResolverAddress st } // NextResolver applies the nextResolvers functions in order and returns the most recent result -func (c *MultiaddrDNSResolveController) NextResolver() *madns.Resolver { +func (c *MultiaddrDNSResolveController) NextResolver() Resolver { if len(c.nextResolvers) == 0 { c.resolver = nil } else { @@ -56,7 +70,7 @@ func (c *MultiaddrDNSResolveController) NextResolver() *madns.Resolver { } // Resolver returns the current resolver, invokes NextResolver if the resolver is nil -func (c *MultiaddrDNSResolveController) Resolver() *madns.Resolver { +func (c *MultiaddrDNSResolveController) Resolver() Resolver { if c.resolver == nil { c.resolver = c.NextResolver() } diff --git a/network/p2p/dnsaddr/resolve_test.go b/network/p2p/dnsaddr/resolve_test.go index 937e4db183..30acbd3e5f 100644 --- a/network/p2p/dnsaddr/resolve_test.go +++ b/network/p2p/dnsaddr/resolve_test.go @@ -21,6 +21,7 @@ import ( "fmt" "net" "testing" + "time" "github.com/multiformats/go-multiaddr" madns "github.com/multiformats/go-multiaddr-dns" @@ -36,22 +37,22 @@ func TestIsDnsaddr(t *testing.T) { t.Parallel() testcases := []struct { - name string - addr string - expected bool + name string + addr string + isDnsaddr bool }{ - {name: "DnsAddr", addr: "/dnsaddr/foobar.com", expected: true}, - {name: "DnsAddrWithPeerId", addr: "/dnsaddr/foobar.com/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", expected: true}, - {name: "DnsAddrWithIPPeerId", addr: "/dnsaddr/foobar.com/ip4/127.0.0.1/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", expected: true}, - {name: "Dns4Addr", addr: "/dns4/foobar.com/", expected: false}, - {name: "Dns6Addr", addr: "/dns6/foobar.com/", expected: false}, - {name: "Dns4AddrWithPeerId", addr: "/dns4/foobar.com/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", expected: false}, + {name: "DnsAddr", addr: "/dnsaddr/foobar.com", isDnsaddr: true}, + {name: "DnsAddrWithPeerId", addr: "/dnsaddr/foobar.com/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", isDnsaddr: true}, + {name: "DnsAddrWithIPPeerId", addr: "/dnsaddr/foobar.com/ip4/127.0.0.1/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", isDnsaddr: true}, + {name: "Dns4Addr", addr: "/dns4/foobar.com/", isDnsaddr: false}, + {name: "Dns6Addr", addr: "/dns6/foobar.com/", isDnsaddr: false}, + {name: "Dns4AddrWithPeerId", addr: "/dns4/foobar.com/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", isDnsaddr: false}, } for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { maddr, err := multiaddr.NewMultiaddr(testcase.addr) require.NoError(t, err) - require.Equal(t, testcase.expected, isDnsaddr(maddr)) + require.Equal(t, testcase.isDnsaddr, isDnsaddr(maddr)) }) } } @@ -109,3 +110,42 @@ func TestMultiaddrsFromResolverDnsFailure(t *testing.T) { assert.Empty(t, maddrs) assert.ErrorContains(t, err, "always errors") } + +type mockController struct { +} + +func (c mockController) Resolver() Resolver { + return selfResolver{} +} + +func (c mockController) NextResolver() Resolver { + return nil +} + +type selfResolver struct { +} + +func (r selfResolver) Resolve(ctx context.Context, maddr multiaddr.Multiaddr) ([]multiaddr.Multiaddr, error) { + return []multiaddr.Multiaddr{maddr}, nil +} + +// TestIterate ensures the Iterate() does not hang in infinite loop +// when resolver returns the same dnsaddr +func TestIterate(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + dnsAddr := "/dnsaddr/foobar.com" + require.True(t, isDnsaddr(multiaddr.StringCast(dnsAddr))) + ma, err := multiaddr.NewMultiaddr(dnsAddr) + require.NoError(t, err) + + require.Eventually(t, func() bool { + Iterate( + ma, + mockController{}, + func(dnsaddr multiaddr.Multiaddr, entries []multiaddr.Multiaddr) error { return nil }, + ) + return true + }, 100*time.Millisecond, 50*time.Millisecond) +} diff --git a/network/p2p/http.go b/network/p2p/http.go new file mode 100644 index 0000000000..9f2622d015 --- /dev/null +++ b/network/p2p/http.go @@ -0,0 +1,92 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package p2p + +import ( + "net/http" + "sync" + "time" + + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/network/limitcaller" + "github.com/gorilla/mux" + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + libp2phttp "github.com/libp2p/go-libp2p/p2p/http" +) + +// algorandP2pHTTPProtocol defines a libp2p protocol name for algorand's http over p2p messages +const algorandP2pHTTPProtocol = "/algorand-http/1.0.0" + +// HTTPServer is a wrapper around libp2phttp.Host that allows registering http handlers with path parameters. +type HTTPServer struct { + libp2phttp.Host + p2phttpMux *mux.Router + p2phttpMuxRegistrarOnce sync.Once +} + +// MakeHTTPServer creates a new HTTPServer +func MakeHTTPServer(streamHost host.Host) *HTTPServer { + httpServer := HTTPServer{ + Host: libp2phttp.Host{StreamHost: streamHost}, + p2phttpMux: mux.NewRouter(), + } + return &httpServer +} + +// RegisterHTTPHandler registers a http handler with a given path. +func (s *HTTPServer) RegisterHTTPHandler(path string, handler http.Handler) { + s.p2phttpMux.Handle(path, handler) + s.p2phttpMuxRegistrarOnce.Do(func() { + s.Host.SetHTTPHandlerAtPath(algorandP2pHTTPProtocol, "/", s.p2phttpMux) + }) +} + +// MakeHTTPClient creates a http.Client that uses libp2p transport for a given protocol and peer address. +func MakeHTTPClient(addrInfo *peer.AddrInfo) (*http.Client, error) { + clientStreamHost, err := libp2p.New(libp2p.NoListenAddrs) + if err != nil { + return nil, err + } + logging.Base().Debugf("MakeHTTPClient made a new P2P host %s for %s", clientStreamHost.ID(), addrInfo.String()) + + client := libp2phttp.Host{StreamHost: clientStreamHost} + + // Do not use client.NamespacedClient to prevent it making connection to a well-known handler + // to make a NamespaceRoundTripper that limits to specific URL paths. + // First, we do not want make requests when listing peers (the main MakeHTTPClient invoker). + // Secondly, this makes unit testing easier - no need to register fake handlers. + rt, err := client.NewConstrainedRoundTripper(*addrInfo) + if err != nil { + return nil, err + } + + return &http.Client{Transport: rt}, nil +} + +// MakeHTTPClientWithRateLimit creates a http.Client that uses libp2p transport for a given protocol and peer address. +func MakeHTTPClientWithRateLimit(addrInfo *peer.AddrInfo, pstore limitcaller.ConnectionTimeStore, queueingTimeout time.Duration, maxIdleConnsPerHost int) (*http.Client, error) { + cl, err := MakeHTTPClient(addrInfo) + if err != nil { + return nil, err + } + rlrt := limitcaller.MakeRateLimitingTransportWithRoundTripper(pstore, queueingTimeout, cl.Transport, addrInfo, maxIdleConnsPerHost) + cl.Transport = &rlrt + return cl, nil + +} diff --git a/network/p2p/logger.go b/network/p2p/logger.go new file mode 100644 index 0000000000..26c738e1e1 --- /dev/null +++ b/network/p2p/logger.go @@ -0,0 +1,123 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +// This package implement a zap.Core in order to wrap lip2p logger into algod's logger. + +package p2p + +import ( + "runtime" + "strings" + + p2plogging "github.com/ipfs/go-log/v2" + "github.com/sirupsen/logrus" + "go.uber.org/zap/zapcore" + + "github.com/algorand/go-algorand/logging" +) + +// var levelsMap = map[logging.Level]zapcore.Level{ +// logging.Debug: zapcore.DebugLevel, +// logging.Info: zapcore.InfoLevel, +// logging.Warn: zapcore.WarnLevel, +// logging.Error: zapcore.ErrorLevel, +// logging.Fatal: zapcore.FatalLevel, +// logging.Panic: zapcore.PanicLevel, +// } + +var levelsMap = map[zapcore.Level]logging.Level{ + zapcore.DebugLevel: logging.Debug, + zapcore.InfoLevel: logging.Info, + zapcore.WarnLevel: logging.Warn, + zapcore.ErrorLevel: logging.Error, + zapcore.FatalLevel: logging.Fatal, + zapcore.PanicLevel: logging.Panic, +} + +// loggingCore implements zapcore.Core +type loggingCore struct { + log logging.Logger + level logging.Level + fields []zapcore.Field + zapcore.Core +} + +// EnableP2PLogging enables libp2p logging into the provided logger with the provided level. +func EnableP2PLogging(log logging.Logger, l logging.Level) { + core := loggingCore{ + log: log, + level: l, + } + for p2pLevel, logLevel := range levelsMap { + if logLevel == l { + p2plogging.SetAllLoggers(p2plogging.LogLevel(p2pLevel)) + break + } + } + p2plogging.SetPrimaryCore(&core) +} + +func (c *loggingCore) Enabled(l zapcore.Level) bool { + level := levelsMap[l] + return c.log.IsLevelEnabled(level) +} + +func (c *loggingCore) With(fields []zapcore.Field) zapcore.Core { + return &loggingCore{ + log: c.log, + level: c.level, + fields: append(c.fields, fields...), + } +} + +func (c *loggingCore) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { + if c.Enabled(e.Level) { + return ce.AddCore(e, c) + } + return ce +} + +func (c *loggingCore) Write(e zapcore.Entry, fields []zapcore.Field) error { + allFields := append(c.fields, fields...) + loggingFields := make(logging.Fields, len(allFields)) + + for _, f := range allFields { + if len(f.String) > 0 { + loggingFields[f.Key] = f.String + } else if f.Interface != nil { + loggingFields[f.Key] = f.Interface + } else { + loggingFields[f.Key] = f.Integer + } + } + event := c.log.WithFields(loggingFields).With("libp2p", e.LoggerName) + file := e.Caller.File + slash := strings.LastIndex(file, "/") + file = file[slash+1:] + event = event.WithFields(logrus.Fields{ + "file": file, + "line": e.Caller.Line, + }) + if function := runtime.FuncForPC(e.Caller.PC); function != nil { + event = event.With("function", function.Name()) + } + event.Entry().Log(logrus.Level(levelsMap[e.Level]), e.Message) + return nil +} + +func (c *loggingCore) Sync() error { + return nil +} diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index cd84f1c69c..f4ed670f3e 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -18,6 +18,7 @@ package p2p import ( "context" + "encoding/base32" "fmt" "runtime" "strings" @@ -25,22 +26,37 @@ import ( "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/logging" + pstore "github.com/algorand/go-algorand/network/p2p/peerstore" + "github.com/algorand/go-algorand/network/phonebook" + "github.com/algorand/go-algorand/util/metrics" "github.com/algorand/go-deadlock" "github.com/libp2p/go-libp2p" pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/protocol" + libp2phttp "github.com/libp2p/go-libp2p/p2p/http" "github.com/libp2p/go-libp2p/p2p/muxer/yamux" + "github.com/libp2p/go-libp2p/p2p/security/noise" "github.com/libp2p/go-libp2p/p2p/transport/tcp" + "github.com/multiformats/go-multiaddr" ) +// SubNextCancellable is an abstraction for pubsub.Subscription +type SubNextCancellable interface { + Next(ctx context.Context) (*pubsub.Message, error) + Cancel() +} + // Service defines the interface used by the network integrating with underlying p2p implementation type Service interface { + Start() error Close() error - ID() peer.ID // return peer.ID for self + ID() peer.ID // return peer.ID for self + IDSigner() *PeerIDChallengeSigner AddrInfo() peer.AddrInfo // return addrInfo for self DialNode(context.Context, *peer.AddrInfo) error @@ -49,17 +65,21 @@ type Service interface { Conns() []network.Conn ListPeersForTopic(topic string) []peer.ID - Subscribe(topic string, val pubsub.ValidatorEx) (*pubsub.Subscription, error) + Subscribe(topic string, val pubsub.ValidatorEx) (SubNextCancellable, error) Publish(ctx context.Context, topic string, data []byte) error + + GetStream(peer.ID) (network.Stream, bool) } // serviceImpl manages integration with libp2p and implements the Service interface type serviceImpl struct { - log logging.Logger - host host.Host - streams *streamManager - pubsub *pubsub.PubSub - pubsubCtx context.Context + log logging.Logger + listenAddr string + host host.Host + streams *streamManager + pubsub *pubsub.PubSub + pubsubCtx context.Context + privKey crypto.PrivKey topics map[string]*pubsub.Topic topicsMu deadlock.RWMutex @@ -68,14 +88,19 @@ type serviceImpl struct { // AlgorandWsProtocol defines a libp2p protocol name for algorand's websockets messages const AlgorandWsProtocol = "/algorand-ws/1.0.0" +// algorandGUIDProtocolPrefix defines a libp2p protocol name for algorand node telemetry GUID exchange +const algorandGUIDProtocolPrefix = "/algorand-telemetry/1.0.0/" +const algorandGUIDProtocolTemplate = algorandGUIDProtocolPrefix + "%s/%s" + const dialTimeout = 30 * time.Second -// MakeService creates a P2P service instance -func MakeService(ctx context.Context, log logging.Logger, cfg config.Local, datadir string, pstore peerstore.Peerstore, wsStreamHandler StreamHandler) (*serviceImpl, error) { +// MakeHost creates a libp2p host but does not start listening. +// Use host.Network().Listen() on the returned address to start listening. +func MakeHost(cfg config.Local, datadir string, pstore *pstore.PeerStore) (host.Host, string, error) { // load stored peer ID, or make ephemeral peer ID privKey, err := GetPrivKey(cfg, datadir) if err != nil { - return nil, err + return nil, "", err } // muxer supports tweaking fields from yamux.Config @@ -93,38 +118,110 @@ func MakeService(ctx context.Context, log logging.Logger, cfg config.Local, data listenAddr = "/ip4/0.0.0.0/tcp/0" } - h, err := libp2p.New( + // the libp2p.NoListenAddrs builtin disables relays but this one does not + var noListenAddrs = func(cfg *libp2p.Config) error { + cfg.ListenAddrs = []multiaddr.Multiaddr{} + return nil + } + + var disableMetrics = func(cfg *libp2p.Config) error { return nil } + metrics.DefaultRegistry().Register(&metrics.PrometheusDefaultMetrics) + + host, err := libp2p.New( libp2p.Identity(privKey), libp2p.UserAgent(ua), libp2p.Transport(tcp.NewTCPTransport), libp2p.Muxer("/yamux/1.0.0", &ymx), libp2p.Peerstore(pstore), - libp2p.ListenAddrStrings(listenAddr), + noListenAddrs, + libp2p.Security(noise.ID, noise.New), + disableMetrics, ) - if err != nil { - return nil, err + return &StreamChainingHost{ + Host: host, + handlers: map[protocol.ID][]network.StreamHandler{}, + }, listenAddr, err +} + +// StreamChainingHost is a wrapper around host.Host that overrides SetStreamHandler +// to allow chaining multiple handlers for the same protocol. +// Note, there should be probably only single handler that writes/reads streams. +type StreamChainingHost struct { + host.Host + handlers map[protocol.ID][]network.StreamHandler + mutex deadlock.Mutex +} + +// SetStreamHandler overrides the host.Host.SetStreamHandler method for chaining multiple handlers. +// Function objects are not comparable so theoretically it could have duplicates. +// The main use case is to track HTTP streams for ProtocolIDForMultistreamSelect = "/http/1.1" +// so it could just filter for such protocol if there any issues with other protocols like kad or mesh. +func (h *StreamChainingHost) SetStreamHandler(pid protocol.ID, handler network.StreamHandler) { + h.mutex.Lock() + defer h.mutex.Unlock() + + handlers := h.handlers[pid] + if len(handlers) == 0 { + // no other handlers, do not set a proxy handler + h.Host.SetStreamHandler(pid, handler) + h.handlers[pid] = append(handlers, handler) + return } - log.Infof("P2P service started: peer ID %s addrs %s", h.ID(), h.Addrs()) + // otherwise chain the handlers with a copy of the existing handlers + handlers = append(handlers, handler) + // copy to save it in the closure and call lock free + currentHandlers := make([]network.StreamHandler, len(handlers)) + copy(currentHandlers, handlers) + h.Host.SetStreamHandler(pid, func(s network.Stream) { + for _, h := range currentHandlers { + h(s) + } + }) + h.handlers[pid] = handlers +} + +// MakeService creates a P2P service instance +func MakeService(ctx context.Context, log logging.Logger, cfg config.Local, h host.Host, listenAddr string, wsStreamHandler StreamHandler, bootstrapPeers []*peer.AddrInfo) (*serviceImpl, error) { sm := makeStreamManager(ctx, log, h, wsStreamHandler) h.Network().Notify(sm) h.SetStreamHandler(AlgorandWsProtocol, sm.streamHandler) + h.SetStreamHandler(libp2phttp.ProtocolIDForMultistreamSelect, sm.streamHandlerHTTP) + + // set an empty handler for telemetryID/telemetryInstance protocol in order to allow other peers to know our telemetryID + telemetryID := log.GetTelemetryGUID() + telemetryInstance := log.GetInstanceName() + telemetryProtoInfo := formatPeerTelemetryInfoProtocolName(telemetryID, telemetryInstance) + h.SetStreamHandler(protocol.ID(telemetryProtoInfo), func(s network.Stream) { s.Close() }) ps, err := makePubSub(ctx, cfg, h) if err != nil { return nil, err } - return &serviceImpl{ - log: log, - host: h, - streams: sm, - pubsub: ps, - pubsubCtx: ctx, - topics: make(map[string]*pubsub.Topic), + + log: log, + listenAddr: listenAddr, + host: h, + streams: sm, + pubsub: ps, + pubsubCtx: ctx, + privKey: h.Peerstore().PrivKey(h.ID()), + topics: make(map[string]*pubsub.Topic), }, nil } +// Start starts the P2P service +func (s *serviceImpl) Start() error { + listenAddr, err := multiaddr.NewMultiaddr(s.listenAddr) + if err != nil { + s.log.Errorf("failed to create multiaddress: %s", err) + return err + } + + return s.host.Network().Listen(listenAddr) +} + // Close shuts down the P2P service func (s *serviceImpl) Close() error { return s.host.Close() @@ -135,22 +232,35 @@ func (s *serviceImpl) ID() peer.ID { return s.host.ID() } +// IDSigner returns a PeerIDChallengeSigner that implements the network identityChallengeSigner interface +func (s *serviceImpl) IDSigner() *PeerIDChallengeSigner { + return &PeerIDChallengeSigner{key: s.privKey} +} + // DialPeersUntilTargetCount attempts to establish connections to the provided phonebook addresses func (s *serviceImpl) DialPeersUntilTargetCount(targetConnCount int) { - peerIDs := s.host.Peerstore().Peers() - for _, peerID := range peerIDs { + ps := s.host.Peerstore().(*pstore.PeerStore) + peerIDs := ps.GetAddresses(targetConnCount, phonebook.PhoneBookEntryRelayRole) + conns := s.host.Network().Conns() + var numOutgoingConns int + for _, conn := range conns { + if conn.Stat().Direction == network.DirOutbound { + numOutgoingConns++ + } + } + for _, peerInfo := range peerIDs { + peerInfo := peerInfo.(*peer.AddrInfo) // if we are at our target count stop trying to connect - if len(s.host.Network().Conns()) == targetConnCount { + if numOutgoingConns >= targetConnCount { return } // if we are already connected to this peer, skip it - if len(s.host.Network().ConnsToPeer(peerID)) > 0 { + if len(s.host.Network().ConnsToPeer(peerInfo.ID)) > 0 { continue } - peerInfo := s.host.Peerstore().PeerInfo(peerID) - err := s.DialNode(context.Background(), &peerInfo) // leaving the calls as blocking for now, to not over-connect beyond fanout + err := s.DialNode(context.Background(), peerInfo) // leaving the calls as blocking for now, to not over-connect beyond fanout if err != nil { - s.log.Warnf("failed to connect to peer %s: %v", peerID, err) + s.log.Warnf("failed to connect to peer %s: %v", peerInfo.ID, err) } } } @@ -184,6 +294,10 @@ func (s *serviceImpl) ClosePeer(peer peer.ID) error { return s.host.Network().ClosePeer(peer) } +func (s *serviceImpl) GetStream(peerID peer.ID) (network.Stream, bool) { + return s.streams.getStream(peerID) +} + // netAddressToListenAddress converts a netAddress in "ip:port" format to a listen address // that can be passed in to libp2p.ListenAddrStrings func netAddressToListenAddress(netAddress string) (string, error) { @@ -203,3 +317,32 @@ func netAddressToListenAddress(netAddress string) (string, error) { return fmt.Sprintf("/ip4/%s/tcp/%s", ip, parts[1]), nil } + +// GetPeerTelemetryInfo returns the telemetry ID of a peer by looking at its protocols +func GetPeerTelemetryInfo(peerProtocols []protocol.ID) (telemetryID string, telemetryInstance string) { + for _, protocol := range peerProtocols { + if strings.HasPrefix(string(protocol), algorandGUIDProtocolPrefix) { + telemetryInfo := string(protocol[len(algorandGUIDProtocolPrefix):]) + telemetryInfoParts := strings.Split(telemetryInfo, "/") + if len(telemetryInfoParts) == 2 { + telemetryIDBytes, err := base32.StdEncoding.DecodeString(telemetryInfoParts[0]) + if err == nil { + telemetryID = string(telemetryIDBytes) + } + telemetryInstanceBytes, err := base32.StdEncoding.DecodeString(telemetryInfoParts[1]) + if err == nil { + telemetryInstance = string(telemetryInstanceBytes) + } + return telemetryID, telemetryInstance + } + } + } + return "", "" +} + +func formatPeerTelemetryInfoProtocolName(telemetryID string, telemetryInstance string) string { + return fmt.Sprintf(algorandGUIDProtocolTemplate, + base32.StdEncoding.EncodeToString([]byte(telemetryID)), + base32.StdEncoding.EncodeToString([]byte(telemetryInstance)), + ) +} diff --git a/network/p2p/p2p_test.go b/network/p2p/p2p_test.go index 558131fe48..fb14193a55 100644 --- a/network/p2p/p2p_test.go +++ b/network/p2p/p2p_test.go @@ -17,11 +17,22 @@ package p2p import ( + "context" "fmt" + "sync/atomic" "testing" + "time" - "github.com/algorand/go-algorand/test/partitiontest" + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/require" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/network/p2p/peerstore" + "github.com/algorand/go-algorand/test/partitiontest" ) // Tests the helper function netAddressToListenAddress which converts @@ -74,3 +85,153 @@ func TestNetAddressToListenAddress(t *testing.T) { }) } } + +func TestP2PStreamingHost(t *testing.T) { + partitiontest.PartitionTest(t) + + cfg := config.GetDefaultLocal() + dir := t.TempDir() + pstore, err := peerstore.NewPeerStore(nil, "") + require.NoError(t, err) + h, la, err := MakeHost(cfg, dir, pstore) + require.NoError(t, err) + + var h1calls atomic.Int64 + h1 := func(network.Stream) { + h1calls.Add(1) + } + var h2calls atomic.Int64 + h2 := func(network.Stream) { + h2calls.Add(1) + } + + ma, err := multiaddr.NewMultiaddr(la) + require.NoError(t, err) + h.Network().Listen(ma) + defer h.Close() + + h.SetStreamHandler(AlgorandWsProtocol, h1) + h.SetStreamHandler(AlgorandWsProtocol, h2) + + addrInfo := peer.AddrInfo{ + ID: h.ID(), + Addrs: h.Addrs(), + } + cpstore, err := peerstore.NewPeerStore([]*peer.AddrInfo{&addrInfo}, "") + require.NoError(t, err) + c, _, err := MakeHost(cfg, dir, cpstore) + require.NoError(t, err) + defer c.Close() + + s1, err := c.NewStream(context.Background(), h.ID(), AlgorandWsProtocol) + require.NoError(t, err) + s1.Write([]byte("hello")) + defer s1.Close() + + require.Eventually(t, func() bool { + return h1calls.Load() == 1 && h2calls.Load() == 1 + }, 5*time.Second, 100*time.Millisecond) + + // ensure a single handler also works as expected + h1calls.Store(0) + h.SetStreamHandler(algorandP2pHTTPProtocol, h1) + + s2, err := c.NewStream(context.Background(), h.ID(), algorandP2pHTTPProtocol) + require.NoError(t, err) + s2.Write([]byte("hello")) + defer s2.Close() + + require.Eventually(t, func() bool { + return h1calls.Load() == 1 + }, 5*time.Second, 100*time.Millisecond) + +} + +// TestP2PGetPeerTelemetryInfo tests the GetPeerTelemetryInfo function +func TestP2PGetPeerTelemetryInfo(t *testing.T) { + partitiontest.PartitionTest(t) + + testCases := []struct { + name string + peerProtocols []protocol.ID + expectedTelemetryID string + expectedTelemetryInstance string + }{ + { + name: "Valid Telemetry Info", + peerProtocols: []protocol.ID{protocol.ID(formatPeerTelemetryInfoProtocolName("telemetryID", "telemetryInstance"))}, + expectedTelemetryID: "telemetryID", + expectedTelemetryInstance: "telemetryInstance", + }, + { + name: "Partial Telemetry Info 1", + peerProtocols: []protocol.ID{protocol.ID(formatPeerTelemetryInfoProtocolName("telemetryID", ""))}, + expectedTelemetryID: "telemetryID", + expectedTelemetryInstance: "", + }, + { + name: "Partial Telemetry Info 2", + peerProtocols: []protocol.ID{protocol.ID(formatPeerTelemetryInfoProtocolName("", "telemetryInstance"))}, + expectedTelemetryID: "", + expectedTelemetryInstance: "telemetryInstance", + }, + { + name: "No Telemetry Info", + peerProtocols: []protocol.ID{protocol.ID("/some-other-protocol/1.0.0/otherID/otherInstance")}, + expectedTelemetryID: "", + expectedTelemetryInstance: "", + }, + { + name: "Invalid Telemetry Info Format", + peerProtocols: []protocol.ID{protocol.ID("/algorand-telemetry/1.0.0/invalidFormat")}, + expectedTelemetryID: "", + expectedTelemetryInstance: "", + }, + { + name: "Special Characters Telemetry Info Format", + peerProtocols: []protocol.ID{protocol.ID(formatPeerTelemetryInfoProtocolName("telemetry/ID", "123-//11-33"))}, + expectedTelemetryID: "telemetry/ID", + expectedTelemetryInstance: "123-//11-33", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + telemetryID, telemetryInstance := GetPeerTelemetryInfo(tc.peerProtocols) + if telemetryID != tc.expectedTelemetryID || telemetryInstance != tc.expectedTelemetryInstance { + t.Errorf("Expected telemetry ID: %s, telemetry instance: %s, but got telemetry ID: %s, telemetry instance: %s", + tc.expectedTelemetryID, tc.expectedTelemetryInstance, telemetryID, telemetryInstance) + } + }) + } +} + +func TestP2PProtocolAsMeta(t *testing.T) { + partitiontest.PartitionTest(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + h1, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0")) + require.NoError(t, err) + defer h1.Close() + + h1TID := "telemetryID1" + h1Inst := "telemetryInstance2" + telemetryProtoInfo := formatPeerTelemetryInfoProtocolName(h1TID, h1Inst) + h1.SetStreamHandler(protocol.ID(telemetryProtoInfo), func(s network.Stream) { s.Close() }) + + h2, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0")) + require.NoError(t, err) + defer h2.Close() + + err = h2.Connect(ctx, peer.AddrInfo{ID: h1.ID(), Addrs: h1.Addrs()}) + require.NoError(t, err) + + protos, err := h2.Peerstore().GetProtocols(h1.ID()) + require.NoError(t, err) + + tid, inst := GetPeerTelemetryInfo(protos) + require.Equal(t, h1TID, tid) + require.Equal(t, h1Inst, inst) +} diff --git a/network/p2p/peerID.go b/network/p2p/peerID.go index f31d29ffb7..ca7526977b 100644 --- a/network/p2p/peerID.go +++ b/network/p2p/peerID.go @@ -25,6 +25,7 @@ import ( "path" "github.com/algorand/go-algorand/config" + algocrypto "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/util" "github.com/libp2p/go-libp2p/core/crypto" @@ -38,6 +39,10 @@ const DefaultPrivKeyPath = "peerIDPrivKey.pem" // PeerID is a string representation of a peer's public key, primarily used to avoid importing libp2p into packages that shouldn't need it type PeerID string +func (id PeerID) String() string { + return peer.ID(id).String() +} + // GetPrivKey manages loading and creation of private keys for network PeerIDs // It prioritizes, in this order: // 1. user supplied path to privKey @@ -104,3 +109,39 @@ func generatePrivKey() (crypto.PrivKey, error) { priv, _, err := crypto.GenerateEd25519Key(rand.Reader) return priv, err } + +// PeerIDChallengeSigner implements the identityChallengeSigner interface in the network package. +type PeerIDChallengeSigner struct { + key crypto.PrivKey +} + +// Sign implements the identityChallengeSigner interface. +func (p *PeerIDChallengeSigner) Sign(message algocrypto.Hashable) algocrypto.Signature { + return p.SignBytes(algocrypto.HashRep(message)) +} + +// SignBytes implements the identityChallengeSigner interface. +func (p *PeerIDChallengeSigner) SignBytes(message []byte) algocrypto.Signature { + // libp2p Ed25519PrivateKey.Sign() returns a signature with a length of 64 bytes and no error + sig, err := p.key.Sign(message) + if len(sig) != len(algocrypto.Signature{}) { + panic(fmt.Sprintf("invalid signature length: %d", len(sig))) + } + if err != nil { + panic(err) + } + return algocrypto.Signature(sig) +} + +// PublicKey implements the identityChallengeSigner interface. +func (p *PeerIDChallengeSigner) PublicKey() algocrypto.PublicKey { + // libp2p Ed25519PublicKey.Raw() returns a 32-byte public key and no error + pub, err := p.key.GetPublic().Raw() + if len(pub) != len(algocrypto.PublicKey{}) { + panic(fmt.Sprintf("invalid public key length: %d", len(pub))) + } + if err != nil { + panic(err) + } + return algocrypto.PublicKey(pub) +} diff --git a/network/p2p/peerstore/peerstore.go b/network/p2p/peerstore/peerstore.go index fa572c5912..3eda0d3686 100644 --- a/network/p2p/peerstore/peerstore.go +++ b/network/p2p/peerstore/peerstore.go @@ -22,6 +22,8 @@ import ( "math/rand" "time" + "github.com/algorand/go-algorand/network/phonebook" + "github.com/algorand/go-deadlock" "github.com/libp2p/go-libp2p/core/peer" libp2p "github.com/libp2p/go-libp2p/core/peerstore" mempstore "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem" @@ -32,12 +34,6 @@ import ( // of how many addresses the phonebook actually has. ( with the retry-after logic applied ) const getAllAddresses = math.MaxInt32 -// PhoneBookEntryRoles defines the roles that a single entry on the phonebook can take. -// currently, we have two roles : relay role and archiver role, which are mutually exclusive. -// -//msgp:ignore PhoneBookEntryRoles -type PhoneBookEntryRoles int - const addressDataKey string = "addressData" // PeerStore implements Peerstore and CertifiedAddrBook. @@ -58,9 +54,10 @@ type addressData struct { // networkNames: lists the networks to which the given address belongs. networkNames map[string]bool + mu *deadlock.RWMutex // role is the role that this address serves. - role PhoneBookEntryRoles + role phonebook.PhoneBookEntryRoles // persistent is set true for peers whose record should not be removed for the peer list persistent bool @@ -73,18 +70,20 @@ type peerStoreCAB interface { } // NewPeerStore creates a new peerstore backed by a datastore. -func NewPeerStore(addrInfo []*peer.AddrInfo) (*PeerStore, error) { +func NewPeerStore(addrInfo []*peer.AddrInfo, network string) (*PeerStore, error) { ps, err := mempstore.NewPeerstore() if err != nil { return nil, fmt.Errorf("cannot initialize a peerstore: %w", err) } // initialize peerstore with addresses + peers := make([]interface{}, len(addrInfo)) for i := 0; i < len(addrInfo); i++ { - info := addrInfo[i] - ps.AddAddrs(info.ID, info.Addrs, libp2p.AddressTTL) + peers[i] = addrInfo[i] } + pstore := &PeerStore{peerStoreCAB: ps} + pstore.AddPersistentPeers(peers, network, phonebook.PhoneBookEntryRelayRole) return pstore, nil } @@ -103,13 +102,13 @@ func MakePhonebook(connectionsRateLimitingCount uint, } // GetAddresses returns up to N addresses, but may return fewer -func (ps *PeerStore) GetAddresses(n int, role PhoneBookEntryRoles) []string { +func (ps *PeerStore) GetAddresses(n int, role phonebook.PhoneBookEntryRoles) []interface{} { return shuffleSelect(ps.filterRetryTime(time.Now(), role), n) } // UpdateRetryAfter updates the retryAfter time for the given address. func (ps *PeerStore) UpdateRetryAfter(addr string, retryAfter time.Time) { - info, err := PeerInfoFromDomainPort(addr) + info, err := peerInfoFromDomainPort(addr) if err != nil { return } @@ -130,15 +129,12 @@ func (ps *PeerStore) UpdateRetryAfter(addr string, retryAfter time.Time) { // The connection should be established when the waitTime is 0. // It will register a provisional next connection time when the waitTime is 0. // The provisional time should be updated after the connection with UpdateConnectionTime -func (ps *PeerStore) GetConnectionWaitTime(addr string) (bool, time.Duration, time.Time) { +func (ps *PeerStore) GetConnectionWaitTime(addrOrPeerID string) (bool, time.Duration, time.Time) { curTime := time.Now() - info, err := PeerInfoFromDomainPort(addr) - if err != nil { - return false, 0 /* not used */, curTime /* not used */ - } var timeSince time.Duration var numElmtsToRemove int - metadata, err := ps.Get(info.ID, addressDataKey) + peerID := peer.ID(addrOrPeerID) + metadata, err := ps.Get(peerID, addressDataKey) if err != nil { return false, 0 /* not used */, curTime /* not used */ } @@ -157,9 +153,9 @@ func (ps *PeerStore) GetConnectionWaitTime(addr string) (bool, time.Duration, ti } // Remove the expired elements from e.data[addr].recentConnectionTimes - ps.popNElements(numElmtsToRemove, peer.ID(addr)) + ps.popNElements(numElmtsToRemove, peerID) // If there are max number of connections within the time window, wait - metadata, _ = ps.Get(info.ID, addressDataKey) + metadata, _ = ps.Get(peerID, addressDataKey) ad, ok = metadata.(addressData) if !ok { return false, 0 /* not used */, curTime /* not used */ @@ -175,17 +171,14 @@ func (ps *PeerStore) GetConnectionWaitTime(addr string) (bool, time.Duration, ti // Update curTime, since it may have significantly changed if waited provisionalTime := time.Now() // Append the provisional time for the next connection request - ps.appendTime(info.ID, provisionalTime) + ps.appendTime(peerID, provisionalTime) return true, 0 /* no wait. proceed */, provisionalTime } // UpdateConnectionTime updates the connection time for the given address. -func (ps *PeerStore) UpdateConnectionTime(addr string, provisionalTime time.Time) bool { - info, err := PeerInfoFromDomainPort(addr) - if err != nil { - return false - } - metadata, err := ps.Get(info.ID, addressDataKey) +func (ps *PeerStore) UpdateConnectionTime(addrOrPeerID string, provisionalTime time.Time) bool { + peerID := peer.ID(addrOrPeerID) + metadata, err := ps.Get(peerID, addressDataKey) if err != nil { return false } @@ -194,7 +187,7 @@ func (ps *PeerStore) UpdateConnectionTime(addr string, provisionalTime time.Time return false } defer func() { - _ = ps.Put(info.ID, addressDataKey, ad) + _ = ps.Put(peerID, addressDataKey, ad) }() @@ -217,7 +210,7 @@ func (ps *PeerStore) UpdateConnectionTime(addr string, provisionalTime time.Time } // ReplacePeerList replaces the peer list for the given networkName and role. -func (ps *PeerStore) ReplacePeerList(addressesThey []string, networkName string, role PhoneBookEntryRoles) { +func (ps *PeerStore) ReplacePeerList(addressesThey []interface{}, networkName string, role phonebook.PhoneBookEntryRoles) { // prepare a map of items we'd like to remove. removeItems := make(map[peer.ID]bool, 0) peerIDs := ps.Peers() @@ -225,23 +218,24 @@ func (ps *PeerStore) ReplacePeerList(addressesThey []string, networkName string, data, _ := ps.Get(pid, addressDataKey) if data != nil { ad := data.(addressData) + ad.mu.RLock() if ad.networkNames[networkName] && ad.role == role && !ad.persistent { removeItems[pid] = true } + ad.mu.RUnlock() } } for _, addr := range addressesThey { - info, err := PeerInfoFromDomainPort(addr) - if err != nil { - return - } + info := addr.(*peer.AddrInfo) data, _ := ps.Get(info.ID, addressDataKey) if data != nil { // we already have this. // Update the networkName ad := data.(addressData) + ad.mu.Lock() ad.networkNames[networkName] = true + ad.mu.Unlock() // do not remove this entry delete(removeItems, info.ID) @@ -261,13 +255,9 @@ func (ps *PeerStore) ReplacePeerList(addressesThey []string, networkName string, // AddPersistentPeers stores addresses of peers which are persistent. // i.e. they won't be replaced by ReplacePeerList calls -func (ps *PeerStore) AddPersistentPeers(dnsAddresses []string, networkName string, role PhoneBookEntryRoles) { - +func (ps *PeerStore) AddPersistentPeers(dnsAddresses []interface{}, networkName string, role phonebook.PhoneBookEntryRoles) { for _, addr := range dnsAddresses { - info, err := PeerInfoFromDomainPort(addr) - if err != nil { - return - } + info := addr.(*peer.AddrInfo) data, _ := ps.Get(info.ID, addressDataKey) if data != nil { // we already have this. @@ -291,9 +281,10 @@ func (ps *PeerStore) Length() int { } // makePhonebookEntryData creates a new address entry for provided network name and role. -func makePhonebookEntryData(networkName string, role PhoneBookEntryRoles, persistent bool) addressData { +func makePhonebookEntryData(networkName string, role phonebook.PhoneBookEntryRoles, persistent bool) addressData { pbData := addressData{ networkNames: make(map[string]bool), + mu: &deadlock.RWMutex{}, recentConnectionTimes: make([]time.Time, 0), role: role, persistent: persistent, @@ -308,8 +299,11 @@ func (ps *PeerStore) deletePhonebookEntry(peerID peer.ID, networkName string) { return } ad := data.(addressData) + ad.mu.Lock() delete(ad.networkNames, networkName) - if 0 == len(ad.networkNames) { + isEmpty := len(ad.networkNames) == 0 + ad.mu.Unlock() + if isEmpty { ps.ClearAddrs(peerID) _ = ps.Put(peerID, addressDataKey, nil) } @@ -334,21 +328,23 @@ func (ps *PeerStore) popNElements(n int, peerID peer.ID) { _ = ps.Put(peerID, addressDataKey, ad) } -func (ps *PeerStore) filterRetryTime(t time.Time, role PhoneBookEntryRoles) []string { - o := make([]string, 0, len(ps.Peers())) +func (ps *PeerStore) filterRetryTime(t time.Time, role phonebook.PhoneBookEntryRoles) []interface{} { + o := make([]interface{}, 0, len(ps.Peers())) for _, peerID := range ps.Peers() { data, _ := ps.Get(peerID, addressDataKey) if data != nil { ad := data.(addressData) if t.After(ad.retryAfter) && role == ad.role { - o = append(o, string(peerID)) + mas := ps.Addrs(peerID) + info := peer.AddrInfo{ID: peerID, Addrs: mas} + o = append(o, &info) } } } return o } -func shuffleSelect(set []string, n int) []string { +func shuffleSelect(set []interface{}, n int) []interface{} { if n >= len(set) || n == getAllAddresses { // return shuffled copy of everything out := slices.Clone(set) @@ -365,13 +361,13 @@ func shuffleSelect(set []string, n int) []string { } } } - out := make([]string, n) + out := make([]interface{}, n) for i, index := range indexSample { out[i] = set[index] } return out } -func shuffleStrings(set []string) { +func shuffleStrings(set []interface{}) { rand.Shuffle(len(set), func(i, j int) { set[i], set[j] = set[j], set[i] }) } diff --git a/network/p2p/peerstore/peerstore_test.go b/network/p2p/peerstore/peerstore_test.go index 9bbf2b87c6..e855013d76 100644 --- a/network/p2p/peerstore/peerstore_test.go +++ b/network/p2p/peerstore/peerstore_test.go @@ -23,11 +23,13 @@ import ( "testing" "time" - "github.com/algorand/go-algorand/test/partitiontest" libp2p_crypto "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" libp2p "github.com/libp2p/go-libp2p/core/peerstore" "github.com/stretchr/testify/require" + + "github.com/algorand/go-algorand/network/phonebook" + "github.com/algorand/go-algorand/test/partitiontest" ) // PhoneBookEntryRelayRole used for all the relays that are provided either via the algobootstrap SRV record @@ -49,7 +51,7 @@ func TestPeerstore(t *testing.T) { } addrInfo, _ := PeerInfoFromAddrs(peerAddrs) - ps, err := NewPeerStore(addrInfo) + ps, err := NewPeerStore(addrInfo, "net-id") require.NoError(t, err) defer ps.Close() @@ -87,12 +89,13 @@ func TestPeerstore(t *testing.T) { } -func testPhonebookAll(t *testing.T, set []string, ph *PeerStore) { +func testPhonebookAll(t *testing.T, set []*peer.AddrInfo, ph *PeerStore) { actual := ph.GetAddresses(len(set), PhoneBookEntryRelayRole) for _, got := range actual { + info := got.(*peer.AddrInfo) ok := false for _, known := range set { - if got == known { + if info.ID == known.ID { ok = true break } @@ -104,7 +107,8 @@ func testPhonebookAll(t *testing.T, set []string, ph *PeerStore) { for _, known := range set { ok := false for _, got := range actual { - if got == known { + info := got.(*peer.AddrInfo) + if info.ID == known.ID { ok = true break } @@ -115,18 +119,19 @@ func testPhonebookAll(t *testing.T, set []string, ph *PeerStore) { } } -func testPhonebookUniform(t *testing.T, set []string, ph *PeerStore, getsize int) { +func testPhonebookUniform(t *testing.T, set []*peer.AddrInfo, ph *PeerStore, getsize int) { uniformityTestLength := 250000 / len(set) expected := (uniformityTestLength * getsize) / len(set) counts := make(map[string]int) for i := 0; i < len(set); i++ { - counts[set[i]] = 0 + counts[set[i].ID.String()] = 0 } for i := 0; i < uniformityTestLength; i++ { actual := ph.GetAddresses(getsize, PhoneBookEntryRelayRole) for _, xa := range actual { - if _, ok := counts[xa]; ok { - counts[xa]++ + info := xa.(*peer.AddrInfo) + if _, ok := counts[info.ID.String()]; ok { + counts[info.ID.String()]++ } } } @@ -149,57 +154,84 @@ func TestArrayPhonebookAll(t *testing.T) { partitiontest.PartitionTest(t) set := []string{"a:4041", "b:4042", "c:4043", "d:4044", "e:4045", "f:4046", "g:4047", "h:4048", "i:4049", "j:4010"} + infoSet := make([]*peer.AddrInfo, 0) + for _, addr := range set { + info, err := peerInfoFromDomainPort(addr) + require.NoError(t, err) + infoSet = append(infoSet, info) + } + ph, err := MakePhonebook(1, 1*time.Millisecond) require.NoError(t, err) for _, addr := range set { entry := makePhonebookEntryData("", PhoneBookEntryRelayRole, false) - info, _ := PeerInfoFromDomainPort(addr) + info, _ := peerInfoFromDomainPort(addr) ph.AddAddrs(info.ID, info.Addrs, libp2p.AddressTTL) ph.Put(info.ID, addressDataKey, entry) } - testPhonebookAll(t, set, ph) + testPhonebookAll(t, infoSet, ph) } func TestArrayPhonebookUniform1(t *testing.T) { partitiontest.PartitionTest(t) set := []string{"a:4041", "b:4042", "c:4043", "d:4044", "e:4045", "f:4046", "g:4047", "h:4048", "i:4049", "j:4010"} + infoSet := make([]*peer.AddrInfo, 0) + for _, addr := range set { + info, err := peerInfoFromDomainPort(addr) + require.NoError(t, err) + infoSet = append(infoSet, info) + } + ph, err := MakePhonebook(1, 1*time.Millisecond) require.NoError(t, err) for _, addr := range set { entry := makePhonebookEntryData("", PhoneBookEntryRelayRole, false) - info, _ := PeerInfoFromDomainPort(addr) + info, _ := peerInfoFromDomainPort(addr) ph.AddAddrs(info.ID, info.Addrs, libp2p.AddressTTL) ph.Put(info.ID, addressDataKey, entry) } - testPhonebookUniform(t, set, ph, 1) + testPhonebookUniform(t, infoSet, ph, 1) } func TestArrayPhonebookUniform3(t *testing.T) { partitiontest.PartitionTest(t) set := []string{"a:4041", "b:4042", "c:4043", "d:4044", "e:4045", "f:4046", "g:4047", "h:4048", "i:4049", "j:4010"} + infoSet := make([]*peer.AddrInfo, 0) + for _, addr := range set { + info, err := peerInfoFromDomainPort(addr) + require.NoError(t, err) + infoSet = append(infoSet, info) + } + ph, err := MakePhonebook(1, 1*time.Millisecond) require.NoError(t, err) for _, addr := range set { entry := makePhonebookEntryData("", PhoneBookEntryRelayRole, false) - info, _ := PeerInfoFromDomainPort(addr) + info, _ := peerInfoFromDomainPort(addr) ph.AddAddrs(info.ID, info.Addrs, libp2p.AddressTTL) ph.Put(info.ID, addressDataKey, entry) } - testPhonebookUniform(t, set, ph, 3) + testPhonebookUniform(t, infoSet, ph, 3) } func TestMultiPhonebook(t *testing.T) { partitiontest.PartitionTest(t) set := []string{"a:4041", "b:4042", "c:4043", "d:4044", "e:4045", "f:4046", "g:4047", "h:4048", "i:4049", "j:4010"} - pha := make([]string, 0) - for _, e := range set[:5] { + infoSet := make([]*peer.AddrInfo, 0) + for _, addr := range set { + info, err := peerInfoFromDomainPort(addr) + require.NoError(t, err) + infoSet = append(infoSet, info) + } + pha := make([]interface{}, 0) + for _, e := range infoSet[:5] { pha = append(pha, e) } - phb := make([]string, 0) - for _, e := range set[5:] { + phb := make([]interface{}, 0) + for _, e := range infoSet[5:] { phb = append(phb, e) } @@ -208,9 +240,9 @@ func TestMultiPhonebook(t *testing.T) { ph.ReplacePeerList(pha, "pha", PhoneBookEntryRelayRole) ph.ReplacePeerList(phb, "phb", PhoneBookEntryRelayRole) - testPhonebookAll(t, set, ph) - testPhonebookUniform(t, set, ph, 1) - testPhonebookUniform(t, set, ph, 3) + testPhonebookAll(t, infoSet, ph) + testPhonebookUniform(t, infoSet, ph, 1) + testPhonebookUniform(t, infoSet, ph, 3) } // TestMultiPhonebookPersistentPeers validates that the peers added via Phonebook.AddPersistentPeers @@ -218,14 +250,23 @@ func TestMultiPhonebook(t *testing.T) { func TestMultiPhonebookPersistentPeers(t *testing.T) { partitiontest.PartitionTest(t) - persistentPeers := []string{"a:4041"} + info, err := peerInfoFromDomainPort("a:4041") + require.NoError(t, err) + persistentPeers := []interface{}{info} set := []string{"b:4042", "c:4043", "d:4044", "e:4045", "f:4046", "g:4047", "h:4048", "i:4049", "j:4010"} - pha := make([]string, 0) - for _, e := range set[:5] { + infoSet := make([]*peer.AddrInfo, 0) + for _, addr := range set { + info, err := peerInfoFromDomainPort(addr) + require.NoError(t, err) + infoSet = append(infoSet, info) + } + + pha := make([]interface{}, 0) + for _, e := range infoSet[:5] { pha = append(pha, e) } - phb := make([]string, 0) - for _, e := range set[5:] { + phb := make([]interface{}, 0) + for _, e := range infoSet[5:] { phb = append(phb, e) } ph, err := MakePhonebook(1, 1*time.Millisecond) @@ -235,10 +276,19 @@ func TestMultiPhonebookPersistentPeers(t *testing.T) { ph.ReplacePeerList(pha, "pha", PhoneBookEntryRelayRole) ph.ReplacePeerList(phb, "phb", PhoneBookEntryRelayRole) - testPhonebookAll(t, append(set, persistentPeers...), ph) + testPhonebookAll(t, append(infoSet, info), ph) allAddresses := ph.GetAddresses(len(set)+len(persistentPeers), PhoneBookEntryRelayRole) for _, pp := range persistentPeers { - require.Contains(t, allAddresses, pp) + pp := pp.(*peer.AddrInfo) + found := false + for _, addr := range allAddresses { + addr := addr.(*peer.AddrInfo) + if addr.ID == pp.ID { + found = true + break + } + } + require.True(t, found, fmt.Sprintf("%s not found in %v", string(pp.ID), allAddresses)) } } @@ -246,12 +296,19 @@ func TestMultiPhonebookDuplicateFiltering(t *testing.T) { partitiontest.PartitionTest(t) set := []string{"b:4042", "c:4043", "d:4044", "e:4045", "f:4046", "g:4047", "h:4048", "i:4049", "j:4010"} - pha := make([]string, 0) - for _, e := range set[:7] { + infoSet := make([]*peer.AddrInfo, 0) + for _, addr := range set { + info, err := peerInfoFromDomainPort(addr) + require.NoError(t, err) + infoSet = append(infoSet, info) + } + + pha := make([]interface{}, 0) + for _, e := range infoSet[:7] { pha = append(pha, e) } - phb := make([]string, 0) - for _, e := range set[3:] { + phb := make([]interface{}, 0) + for _, e := range infoSet[3:] { phb = append(phb, e) } ph, err := MakePhonebook(1, 1*time.Millisecond) @@ -259,9 +316,9 @@ func TestMultiPhonebookDuplicateFiltering(t *testing.T) { ph.ReplacePeerList(pha, "pha", PhoneBookEntryRelayRole) ph.ReplacePeerList(phb, "phb", PhoneBookEntryRelayRole) - testPhonebookAll(t, set, ph) - testPhonebookUniform(t, set, ph, 1) - testPhonebookUniform(t, set, ph, 3) + testPhonebookAll(t, infoSet, ph) + testPhonebookUniform(t, infoSet, ph, 1) + testPhonebookUniform(t, infoSet, ph, 3) } func TestWaitAndAddConnectionTimeLongtWindow(t *testing.T) { @@ -276,21 +333,21 @@ func TestWaitAndAddConnectionTimeLongtWindow(t *testing.T) { require.NoError(t, err) addr1 := "addrABC:4040" addr2 := "addrXYZ:4041" - info1, _ := PeerInfoFromDomainPort(addr1) - info2, _ := PeerInfoFromDomainPort(addr2) + info1, _ := peerInfoFromDomainPort(addr1) + info2, _ := peerInfoFromDomainPort(addr2) // Address not in. Should return false - addrInPhonebook, _, provisionalTime := entries.GetConnectionWaitTime(addr1) + addrInPhonebook, _, provisionalTime := entries.GetConnectionWaitTime(string(info1.ID)) require.Equal(t, false, addrInPhonebook) - require.Equal(t, false, entries.UpdateConnectionTime(addr1, provisionalTime)) + require.Equal(t, false, entries.UpdateConnectionTime(string(info1.ID), provisionalTime)) // Test the addresses are populated in the phonebook and a // time can be added to one of them - entries.ReplacePeerList([]string{addr1, addr2}, "default", PhoneBookEntryRelayRole) - addrInPhonebook, waitTime, provisionalTime := entries.GetConnectionWaitTime(addr1) + entries.ReplacePeerList([]interface{}{info1, info2}, "default", PhoneBookEntryRelayRole) + addrInPhonebook, waitTime, provisionalTime := entries.GetConnectionWaitTime(string(info1.ID)) require.Equal(t, true, addrInPhonebook) require.Equal(t, time.Duration(0), waitTime) - require.Equal(t, true, entries.UpdateConnectionTime(addr1, provisionalTime)) + require.Equal(t, true, entries.UpdateConnectionTime(string(info1.ID), provisionalTime)) data, _ := entries.Get(info1.ID, addressDataKey) require.NotNil(t, data) ad := data.(addressData) @@ -303,9 +360,9 @@ func TestWaitAndAddConnectionTimeLongtWindow(t *testing.T) { } // add another value to addr - addrInPhonebook, waitTime, provisionalTime = entries.GetConnectionWaitTime(addr1) + addrInPhonebook, waitTime, provisionalTime = entries.GetConnectionWaitTime(string(info1.ID)) require.Equal(t, time.Duration(0), waitTime) - require.Equal(t, true, entries.UpdateConnectionTime(addr1, provisionalTime)) + require.Equal(t, true, entries.UpdateConnectionTime(string(info1.ID), provisionalTime)) data, _ = entries.Get(info1.ID, addressDataKey) ad = data.(addressData) phBookData = ad.recentConnectionTimes @@ -318,9 +375,9 @@ func TestWaitAndAddConnectionTimeLongtWindow(t *testing.T) { // the first time should be removed and a new one added // there should not be any wait - addrInPhonebook, waitTime, provisionalTime = entries.GetConnectionWaitTime(addr1) + addrInPhonebook, waitTime, provisionalTime = entries.GetConnectionWaitTime(string(info1.ID)) require.Equal(t, time.Duration(0), waitTime) - require.Equal(t, true, entries.UpdateConnectionTime(addr1, provisionalTime)) + require.Equal(t, true, entries.UpdateConnectionTime(string(info1.ID), provisionalTime)) data, _ = entries.Get(info1.ID, addressDataKey) ad = data.(addressData) phBookData2 := ad.recentConnectionTimes @@ -335,9 +392,9 @@ func TestWaitAndAddConnectionTimeLongtWindow(t *testing.T) { // add 3 values to another address. should not wait // value 1 - _, waitTime, provisionalTime = entries.GetConnectionWaitTime(addr2) + _, waitTime, provisionalTime = entries.GetConnectionWaitTime(string(info2.ID)) require.Equal(t, time.Duration(0), waitTime) - require.Equal(t, true, entries.UpdateConnectionTime(addr2, provisionalTime)) + require.Equal(t, true, entries.UpdateConnectionTime(string(info2.ID), provisionalTime)) // introduce a gap between the two requests so that only the first will be removed later when waited // simulate passing a unit of time @@ -349,13 +406,13 @@ func TestWaitAndAddConnectionTimeLongtWindow(t *testing.T) { } // value 2 - _, waitTime, provisionalTime = entries.GetConnectionWaitTime(addr2) + _, waitTime, provisionalTime = entries.GetConnectionWaitTime(string(info2.ID)) require.Equal(t, time.Duration(0), waitTime) - require.Equal(t, true, entries.UpdateConnectionTime(addr2, provisionalTime)) + require.Equal(t, true, entries.UpdateConnectionTime(string(info2.ID), provisionalTime)) // value 3 - _, waitTime, provisionalTime = entries.GetConnectionWaitTime(addr2) + _, waitTime, provisionalTime = entries.GetConnectionWaitTime(string(info2.ID)) require.Equal(t, time.Duration(0), waitTime) - require.Equal(t, true, entries.UpdateConnectionTime(addr2, provisionalTime)) + require.Equal(t, true, entries.UpdateConnectionTime(string(info2.ID), provisionalTime)) data2, _ = entries.Get(info2.ID, addressDataKey) ad2 = data2.(addressData) @@ -364,7 +421,7 @@ func TestWaitAndAddConnectionTimeLongtWindow(t *testing.T) { require.Equal(t, 3, len(phBookData)) // add another element to trigger wait - _, waitTime, provisionalTime = entries.GetConnectionWaitTime(addr2) + _, waitTime, provisionalTime = entries.GetConnectionWaitTime(string(info2.ID)) require.Greater(t, int64(waitTime), int64(0)) // no element should be removed data2, _ = entries.Get(info2.ID, addressDataKey) @@ -379,9 +436,9 @@ func TestWaitAndAddConnectionTimeLongtWindow(t *testing.T) { } // The wait should be sufficient - _, waitTime, provisionalTime = entries.GetConnectionWaitTime(addr2) + _, waitTime, provisionalTime = entries.GetConnectionWaitTime(string(info2.ID)) require.Equal(t, time.Duration(0), waitTime) - require.Equal(t, true, entries.UpdateConnectionTime(addr2, provisionalTime)) + require.Equal(t, true, entries.UpdateConnectionTime(string(info2.ID), provisionalTime)) // only one element should be removed, and one added data2, _ = entries.Get(info2.ID, addressDataKey) ad2 = data2.(addressData) @@ -401,24 +458,40 @@ func TestPhonebookRoles(t *testing.T) { relaysSet := []string{"relay1:4040", "relay2:4041", "relay3:4042"} archiverSet := []string{"archiver1:1111", "archiver2:1112", "archiver3:1113"} + infoRelaySet := make([]interface{}, 0) + for _, addr := range relaysSet { + info, err := peerInfoFromDomainPort(addr) + require.NoError(t, err) + infoRelaySet = append(infoRelaySet, info) + } + + infoArchiverSet := make([]interface{}, 0) + for _, addr := range archiverSet { + info, err := peerInfoFromDomainPort(addr) + require.NoError(t, err) + infoArchiverSet = append(infoArchiverSet, info) + } + ph, err := MakePhonebook(1, 1) require.NoError(t, err) - ph.ReplacePeerList(relaysSet, "default", PhoneBookEntryRelayRole) - ph.ReplacePeerList(archiverSet, "default", PhoneBookEntryArchiverRole) + ph.ReplacePeerList(infoRelaySet, "default", PhoneBookEntryRelayRole) + ph.ReplacePeerList(infoArchiverSet, "default", PhoneBookEntryArchiverRole) require.Equal(t, len(relaysSet)+len(archiverSet), len(ph.Peers())) require.Equal(t, len(relaysSet)+len(archiverSet), ph.Length()) - for _, role := range []PhoneBookEntryRoles{PhoneBookEntryRelayRole, PhoneBookEntryArchiverRole} { + for _, role := range []phonebook.PhoneBookEntryRoles{PhoneBookEntryRelayRole, PhoneBookEntryArchiverRole} { for k := 0; k < 100; k++ { for l := 0; l < 3; l++ { entries := ph.GetAddresses(l, role) if role == PhoneBookEntryRelayRole { for _, entry := range entries { - require.Contains(t, entry, "relay") + entry := entry.(*peer.AddrInfo) + require.Contains(t, string(entry.ID), "relay") } } else if role == PhoneBookEntryArchiverRole { for _, entry := range entries { - require.Contains(t, entry, "archiver") + entry := entry.(*peer.AddrInfo) + require.Contains(t, string(entry.ID), "archiver") } } } diff --git a/network/p2p/peerstore/utils.go b/network/p2p/peerstore/utils.go index 02c6b2d8e6..90b0af497c 100644 --- a/network/p2p/peerstore/utils.go +++ b/network/p2p/peerstore/utils.go @@ -53,8 +53,8 @@ func PeerInfoFromAddr(addr string) (*peer.AddrInfo, error) { return info, nil } -// PeerInfoFromDomainPort converts a string of the form domain:port to AddrInfo -func PeerInfoFromDomainPort(domainPort string) (*peer.AddrInfo, error) { +// peerInfoFromDomainPort converts a string of the form domain:port to AddrInfo +func peerInfoFromDomainPort(domainPort string) (*peer.AddrInfo, error) { parts := strings.Split(domainPort, ":") if len(parts) != 2 || parts[0] == "" || parts[1] == "" { return nil, fmt.Errorf("invalid domain port string %s, found %d colon-separated parts", domainPort, len(parts)) diff --git a/network/p2p/peerstore/utils_test.go b/network/p2p/peerstore/utils_test.go index 9f20b0f75b..d0d67f4fe0 100644 --- a/network/p2p/peerstore/utils_test.go +++ b/network/p2p/peerstore/utils_test.go @@ -20,8 +20,9 @@ import ( "fmt" "testing" - "github.com/algorand/go-algorand/test/partitiontest" "github.com/stretchr/testify/require" + + "github.com/algorand/go-algorand/test/partitiontest" ) func TestPeerInfoFromAddr(t *testing.T) { diff --git a/network/p2p/pubsub.go b/network/p2p/pubsub.go index 372c9249c8..a968bcb6a9 100644 --- a/network/p2p/pubsub.go +++ b/network/p2p/pubsub.go @@ -53,6 +53,8 @@ const ( // TXTopicName defines a pubsub topic for TX messages const TXTopicName = "/algo/tx/0.1.0" +const incomingThreads = 20 // matches to number wsNetwork workers + func makePubSub(ctx context.Context, cfg config.Local, host host.Host) (*pubsub.PubSub, error) { //defaultParams := pubsub.DefaultGossipSubParams() @@ -93,7 +95,9 @@ func makePubSub(ctx context.Context, cfg config.Local, host host.Host) (*pubsub. pubsub.WithSubscriptionFilter(pubsub.WrapLimitSubscriptionFilter(pubsub.NewAllowlistSubscriptionFilter(TXTopicName), 100)), // pubsub.WithEventTracer(jsonTracer), pubsub.WithValidateQueueSize(256), + pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign), // pubsub.WithValidateThrottle(cfg.TxBacklogSize), + pubsub.WithValidateWorkers(incomingThreads), } return pubsub.NewGossipSub(ctx, host, options...) @@ -133,7 +137,7 @@ func (s *serviceImpl) getOrCreateTopic(topicName string) (*pubsub.Topic, error) } // Subscribe returns a subscription to the given topic -func (s *serviceImpl) Subscribe(topic string, val pubsub.ValidatorEx) (*pubsub.Subscription, error) { +func (s *serviceImpl) Subscribe(topic string, val pubsub.ValidatorEx) (SubNextCancellable, error) { if err := s.pubsub.RegisterTopicValidator(topic, val); err != nil { return nil, err } diff --git a/network/p2p/streams.go b/network/p2p/streams.go index 4a7a2d8e01..d16633adfd 100644 --- a/network/p2p/streams.go +++ b/network/p2p/streams.go @@ -73,7 +73,17 @@ func (n *streamManager) streamHandler(stream network.Stream) { n.log.Infof("Failed to check old stream with %s: %v", remotePeer, err) } n.streams[stream.Conn().RemotePeer()] = stream - n.handler(n.ctx, remotePeer, stream, true) + + // streamHandler is supposed to be called for accepted streams, so we expect incoming here + incoming := stream.Conn().Stat().Direction == network.DirInbound + if !incoming { + if stream.Stat().Direction == network.DirUnknown { + n.log.Warnf("Unknown direction for a steam %s to/from %s", stream.ID(), remotePeer) + } else { + n.log.Warnf("Unexpected outgoing stream in streamHandler for connection %s (%s): %s vs %s stream", stream.Conn().ID(), remotePeer, stream.Conn().Stat().Direction, stream.Stat().Direction.String()) + } + } + n.handler(n.ctx, remotePeer, stream, incoming) return } // otherwise, the old stream is still open, so we can close the new one @@ -82,7 +92,30 @@ func (n *streamManager) streamHandler(stream network.Stream) { } // no old stream n.streams[stream.Conn().RemotePeer()] = stream - n.handler(n.ctx, remotePeer, stream, true) + // streamHandler is supposed to be called for accepted streams, so we expect incoming here + incoming := stream.Conn().Stat().Direction == network.DirInbound + if !incoming { + if stream.Stat().Direction == network.DirUnknown { + n.log.Warnf("streamHandler: unknown direction for a steam %s to/from %s", stream.ID(), remotePeer) + } else { + n.log.Warnf("Unexpected outgoing stream in streamHandler for connection %s (%s): %s vs %s stream", stream.Conn().ID(), remotePeer, stream.Conn().Stat().Direction, stream.Stat().Direction.String()) + } + } + n.handler(n.ctx, remotePeer, stream, incoming) +} + +// streamHandlerHTTP tracks the ProtocolIDForMultistreamSelect = "/http/1.1" streams +func (n *streamManager) streamHandlerHTTP(stream network.Stream) { + n.streamsLock.Lock() + defer n.streamsLock.Unlock() + n.streams[stream.Conn().LocalPeer()] = stream +} + +func (n *streamManager) getStream(peerID peer.ID) (network.Stream, bool) { + n.streamsLock.Lock() + defer n.streamsLock.Unlock() + stream, ok := n.streams[peerID] + return stream, ok } // Connected is called when a connection is opened @@ -95,8 +128,13 @@ func (n *streamManager) Connected(net network.Network, conn network.Conn) { return } + needUnlock := true n.streamsLock.Lock() - defer n.streamsLock.Unlock() + defer func() { + if needUnlock { + n.streamsLock.Unlock() + } + }() _, ok := n.streams[remotePeer] if ok { return // there's already an active stream with this peer for our protocol @@ -104,12 +142,26 @@ func (n *streamManager) Connected(net network.Network, conn network.Conn) { stream, err := n.host.NewStream(n.ctx, remotePeer, AlgorandWsProtocol) if err != nil { - n.log.Infof("Failed to open stream to %s: %v", remotePeer, err) + n.log.Infof("Failed to open stream to %s (%s): %v", remotePeer, conn.RemoteMultiaddr().String(), err) return } - n.streams[remotePeer] = stream - n.handler(n.ctx, remotePeer, stream, false) + + // release the lock to let handler do its thing + // otherwise reading/writing to the stream will deadlock + needUnlock = false + n.streamsLock.Unlock() + + // a new stream created above, expected direction is outbound + incoming := stream.Conn().Stat().Direction == network.DirInbound + if incoming { + n.log.Warnf("Unexpected incoming stream in streamHandler for connection %s (%s): %s vs %s stream", stream.Conn().ID(), remotePeer, stream.Conn().Stat().Direction, stream.Stat().Direction.String()) + } else { + if stream.Stat().Direction == network.DirUnknown { + n.log.Warnf("Connected: unknown direction for a steam %s to/from %s", stream.ID(), remotePeer) + } + } + n.handler(n.ctx, remotePeer, stream, incoming) } // Disconnected is called when a connection is closed @@ -122,6 +174,12 @@ func (n *streamManager) Disconnected(net network.Network, conn network.Conn) { stream.Close() delete(n.streams, conn.RemotePeer()) } + + stream, ok = n.streams[conn.LocalPeer()] + if ok { + stream.Close() + delete(n.streams, conn.LocalPeer()) + } } // Listen is called when network starts listening on an addr diff --git a/network/p2p/testing/httpNode.go b/network/p2p/testing/httpNode.go new file mode 100644 index 0000000000..523cdc5d4c --- /dev/null +++ b/network/p2p/testing/httpNode.go @@ -0,0 +1,122 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +// This package wraps and re-exports the libp2p functions on order to keep +// all go-libp2p imports in one place. + +package p2p + +import ( + "net/http" + "testing" + + "github.com/algorand/go-algorand/components/mocks" + "github.com/algorand/go-algorand/network" + "github.com/algorand/go-algorand/network/p2p" + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" +) + +// HTTPNode is a mock network node that uses libp2p and http. +type HTTPNode struct { + mocks.MockNetwork + host.Host + httpServer *p2p.HTTPServer + peers []network.Peer + tb testing.TB + genesisID string +} + +// MakeHTTPNode returns a new P2PHTTPNode node. +func MakeHTTPNode(tb testing.TB) *HTTPNode { + p2pHost, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0")) + require.NoError(tb, err) + + return &HTTPNode{ + Host: p2pHost, + httpServer: p2p.MakeHTTPServer(p2pHost), + tb: tb, + } +} + +// RegisterHTTPHandler registers a http handler with a given path. +func (p *HTTPNode) RegisterHTTPHandler(path string, handler http.Handler) { + p.httpServer.RegisterHTTPHandler(path, handler) +} + +// RegisterHandlers not implemented. +func (p *HTTPNode) RegisterHandlers(dispatch []network.TaggedMessageHandler) {} + +// Start starts http service +func (p *HTTPNode) Start() error { + go func() { + err := p.httpServer.Serve() + require.NoError(p.tb, err) + }() + return nil +} + +// Stop stops http service +func (p *HTTPNode) Stop() { + p.httpServer.Close() + p.Host.Close() +} + +// GetHTTPPeer returns the http peer for connecting to this node +func (p *HTTPNode) GetHTTPPeer() network.Peer { + addrInfo := peer.AddrInfo{ID: p.ID(), Addrs: p.Addrs()} + return httpPeer{addrInfo, p.tb} +} + +// GetGenesisID returns genesisID +func (p *HTTPNode) GetGenesisID() string { return p.genesisID } + +// SetGenesisID sets genesisID +func (p *HTTPNode) SetGenesisID(genesisID string) { p.genesisID = genesisID } + +type httpPeer struct { + addrInfo peer.AddrInfo + tb testing.TB +} + +// GetAddress implements HTTPPeer interface returns the address of the peer +func (p httpPeer) GetAddress() string { + mas, err := peer.AddrInfoToP2pAddrs(&p.addrInfo) + require.NoError(p.tb, err) + require.Len(p.tb, mas, 1) + return mas[0].String() +} + +// GetAddress implements HTTPPeer interface and returns the http client for a peer +func (p httpPeer) GetHTTPClient() *http.Client { + c, err := p2p.MakeHTTPClient(&p.addrInfo) + require.NoError(p.tb, err) + return c +} + +// SetPeers sets peers +func (p *HTTPNode) SetPeers(other *HTTPNode) { + addrInfo := peer.AddrInfo{ID: other.ID(), Addrs: other.Addrs()} + hpeer := httpPeer{addrInfo, p.tb} + p.peers = append(p.peers, hpeer) +} + +// GetPeers returns peers +func (p *HTTPNode) GetPeers(options ...network.PeerOption) []network.Peer { + return p.peers +} diff --git a/network/p2pNetwork.go b/network/p2pNetwork.go index 6301b1b521..7ebbb5a665 100644 --- a/network/p2pNetwork.go +++ b/network/p2pNetwork.go @@ -18,6 +18,7 @@ package network import ( "context" + "math/rand" "net" "net/http" "strings" @@ -27,14 +28,20 @@ import ( "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/logging/telemetryspec" + "github.com/algorand/go-algorand/network/limitcaller" "github.com/algorand/go-algorand/network/p2p" + "github.com/algorand/go-algorand/network/p2p/dnsaddr" "github.com/algorand/go-algorand/network/p2p/peerstore" + "github.com/algorand/go-algorand/network/phonebook" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-deadlock" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/p2p/discovery/backoff" + "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" ) @@ -59,17 +66,156 @@ type P2PNetwork struct { handler msgHandler broadcaster msgBroadcaster wsPeers map[peer.ID]*wsPeer + wsPeersToIDs map[*wsPeer]peer.ID wsPeersLock deadlock.RWMutex wsPeersChangeCounter atomic.Int32 wsPeersConnectivityCheckTicker *time.Ticker + peerStater peerConnectionStater + + relayMessages bool // True if we should relay messages from other nodes (nominally true for relays, false otherwise) + wantTXGossip atomic.Bool + + capabilitiesDiscovery *p2p.CapabilitiesDiscovery + + bootstrapperStart func() + bootstrapperStop func() + nodeInfo NodeInfo + pstore *peerstore.PeerStore + httpServer *p2p.HTTPServer +} + +type bootstrapper struct { + cfg config.Local + networkID protocol.NetworkID + phonebookPeers []*peer.AddrInfo + resolveController dnsaddr.ResolveController + started atomic.Bool + log logging.Logger +} + +func (b *bootstrapper) start() { + b.started.Store(true) +} + +func (b *bootstrapper) stop() { + b.started.Store(false) +} + +func (b *bootstrapper) BootstrapFunc() []peer.AddrInfo { + // not started yet, do not give it any peers + if !b.started.Load() { + return nil + } + + // have a list of peers, use them + if len(b.phonebookPeers) > 0 { + var addrs []peer.AddrInfo + for _, bPeer := range b.phonebookPeers { + if bPeer != nil { + addrs = append(addrs, *bPeer) + } + } + return addrs + } + + return dnsLookupBootstrapPeers(b.log, b.cfg, b.networkID, b.resolveController) +} + +// dnsLookupBootstrapPeers looks up a list of Multiaddrs strings from the dnsaddr records at the primary +// SRV record domain. +func dnsLookupBootstrapPeers(log logging.Logger, cfg config.Local, network protocol.NetworkID, controller dnsaddr.ResolveController) []peer.AddrInfo { + var addrs []peer.AddrInfo + bootstraps := cfg.DNSBootstrapArray(network) + for _, dnsBootstrap := range bootstraps { + var resolvedAddrs, resolvedAddrsBackup []multiaddr.Multiaddr + var errPrim, errBackup error + resolvedAddrs, errPrim = dnsaddr.MultiaddrsFromResolver(dnsBootstrap.PrimarySRVBootstrap, controller) + if errPrim != nil { + log.Infof("Failed to resolve bootstrap peers from %s: %v", dnsBootstrap.PrimarySRVBootstrap, errPrim) + } + if dnsBootstrap.BackupSRVBootstrap != "" { + resolvedAddrsBackup, errBackup = dnsaddr.MultiaddrsFromResolver(dnsBootstrap.BackupSRVBootstrap, controller) + if errBackup != nil { + log.Infof("Failed to resolve bootstrap peers from %s: %v", dnsBootstrap.BackupSRVBootstrap, errBackup) + } + } + + if len(resolvedAddrs) > 0 || len(resolvedAddrsBackup) > 0 { + resolvedAddrInfos := mergeP2PMultiaddrResolvedAddresses(resolvedAddrs, resolvedAddrsBackup) + addrs = append(addrs, resolvedAddrInfos...) + } + } + return addrs +} + +func mergeP2PMultiaddrResolvedAddresses(primary, backup []multiaddr.Multiaddr) []peer.AddrInfo { + // deduplicate addresses by PeerID + unique := make(map[peer.ID]*peer.AddrInfo) + for _, addr := range primary { + info, err0 := peer.AddrInfoFromP2pAddr(addr) + if err0 != nil { + continue + } + unique[info.ID] = info + } + for _, addr := range backup { + info, err0 := peer.AddrInfoFromP2pAddr(addr) + if err0 != nil { + continue + } + unique[info.ID] = info + } + var result []peer.AddrInfo + for _, addr := range unique { + result = append(result, *addr) + } + return result +} + +func mergeP2PAddrInfoResolvedAddresses(primary, backup []peer.AddrInfo) []peer.AddrInfo { + // deduplicate addresses by PeerID + unique := make(map[peer.ID]peer.AddrInfo) + for _, addr := range primary { + unique[addr.ID] = addr + } + for _, addr := range backup { + unique[addr.ID] = addr + } + var result []peer.AddrInfo + for _, addr := range unique { + result = append(result, addr) + } + return result } type p2pPeerStats struct { txReceived atomic.Uint64 } +// gossipSubPeer implements the DeadlineSettableConn, IPAddressable, and ErlClient interfaces. +type gossipSubPeer struct { + peerID peer.ID + net GossipNode + routingAddr [8]byte +} + +func (p gossipSubPeer) GetNetwork() GossipNode { return p.net } + +func (p gossipSubPeer) OnClose(f func()) { + net := p.GetNetwork().(*P2PNetwork) + net.wsPeersLock.Lock() + defer net.wsPeersLock.Unlock() + if wsp, ok := net.wsPeers[p.peerID]; ok { + wsp.OnClose(f) + } +} + +func (p gossipSubPeer) RoutingAddr() []byte { + return p.routingAddr[:] +} + // NewP2PNetwork returns an instance of GossipNode that uses the p2p.Service -func NewP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID) (*P2PNetwork, error) { +func NewP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID, node NodeInfo) (*P2PNetwork, error) { const readBufferLen = 2048 // create Peerstore and add phonebook addresses @@ -77,20 +223,31 @@ func NewP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebo for malAddr, malErr := range malformedAddrs { log.Infof("Ignoring malformed phonebook address %s: %s", malAddr, malErr) } - pstore, err := peerstore.NewPeerStore(addrInfo) + pstore, err := peerstore.NewPeerStore(addrInfo, string(networkID)) if err != nil { return nil, err } + relayMessages := cfg.IsGossipServer() || cfg.ForceRelayMessages net := &P2PNetwork{ - log: log, - config: cfg, - genesisID: genesisID, - networkID: networkID, - topicTags: map[protocol.Tag]string{"TX": p2p.TXTopicName}, - wsPeers: make(map[peer.ID]*wsPeer), - peerStats: make(map[peer.ID]*p2pPeerStats), + log: log, + config: cfg, + genesisID: genesisID, + networkID: networkID, + topicTags: map[protocol.Tag]string{protocol.TxnTag: p2p.TXTopicName}, + wsPeers: make(map[peer.ID]*wsPeer), + wsPeersToIDs: make(map[*wsPeer]peer.ID), + peerStats: make(map[peer.ID]*p2pPeerStats), + nodeInfo: node, + pstore: pstore, + relayMessages: relayMessages, + peerStater: peerConnectionStater{ + log: log, + peerConnectionsUpdateInterval: time.Duration(cfg.PeerConnectionsUpdateInterval) * time.Second, + lastPeerConnectionsSent: time.Now(), + }, } + net.ctx, net.ctxCancel = context.WithCancel(context.Background()) net.handler = msgHandler{ ctx: net.ctx, @@ -106,10 +263,39 @@ func NewP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebo broadcastQueueBulk: make(chan broadcastRequest, 100), } - net.service, err = p2p.MakeService(net.ctx, log, cfg, datadir, pstore, net.wsStreamHandler) + p2p.EnableP2PLogging(log, logging.Level(cfg.BaseLoggerDebugLevel)) + + h, la, err := p2p.MakeHost(cfg, datadir, pstore) if err != nil { return nil, err } + log.Infof("P2P host created: peer ID %s addrs %s", h.ID(), h.Addrs()) + + net.service, err = p2p.MakeService(net.ctx, log, cfg, h, la, net.wsStreamHandler, addrInfo) + if err != nil { + return nil, err + } + + bootstrapper := &bootstrapper{ + cfg: cfg, + networkID: networkID, + phonebookPeers: addrInfo, + resolveController: dnsaddr.NewMultiaddrDNSResolveController(cfg.DNSSecurityTXTEnforced(), ""), + log: net.log, + } + net.bootstrapperStart = bootstrapper.start + net.bootstrapperStop = bootstrapper.stop + + if cfg.EnableDHTProviders { + disc, err0 := p2p.MakeCapabilitiesDiscovery(net.ctx, cfg, h, networkID, net.log, bootstrapper.BootstrapFunc) + if err0 != nil { + log.Errorf("Failed to create dht node capabilities discovery: %v", err) + return nil, err + } + net.capabilitiesDiscovery = disc + } + + net.httpServer = p2p.MakeHTTPServer(h) err = net.setup() if err != nil { @@ -126,10 +312,30 @@ func (n *P2PNetwork) setup() error { return nil } +// PeerID returns this node's peer ID. +func (n *P2PNetwork) PeerID() p2p.PeerID { + return p2p.PeerID(n.service.ID()) +} + +// PeerIDSigner returns an identityChallengeSigner that uses the libp2p peer ID's private key. +func (n *P2PNetwork) PeerIDSigner() identityChallengeSigner { + return n.service.IDSigner() +} + // Start threads, listen on sockets. -func (n *P2PNetwork) Start() { - n.wg.Add(1) - go n.txTopicHandleLoop() +func (n *P2PNetwork) Start() error { + n.bootstrapperStart() + err := n.service.Start() + if err != nil { + return err + } + + wantTXGossip := n.relayMessages || n.config.ForceFetchTransactions || n.nodeInfo.IsParticipating() + if wantTXGossip { + n.wantTXGossip.Store(true) + n.wg.Add(1) + go n.txTopicHandleLoop() + } if n.wsPeersConnectivityCheckTicker != nil { n.wsPeersConnectivityCheckTicker.Stop() @@ -141,16 +347,31 @@ func (n *P2PNetwork) Start() { go n.handler.messageHandlerThread(&n.wg, n.wsPeersConnectivityCheckTicker.C, n, "network", "P2PNetwork") } + n.wg.Add(1) + go n.httpdThread() + n.wg.Add(1) go n.broadcaster.broadcastThread(&n.wg, n, "network", "P2PNetwork") - n.service.DialPeersUntilTargetCount(n.config.GossipFanout) n.wg.Add(1) go n.meshThread() + + if n.capabilitiesDiscovery != nil { + n.capabilitiesDiscovery.AdvertiseCapabilities(n.nodeInfo.Capabilities()...) + } + + return nil } // Stop closes sockets and stop threads. func (n *P2PNetwork) Stop() { + if n.capabilitiesDiscovery != nil { + err := n.capabilitiesDiscovery.Close() + if err != nil { + n.log.Warnf("Error closing capabilities discovery: %v", err) + } + } + n.handler.ClearHandlers([]Tag{}) if n.wsPeersConnectivityCheckTicker != nil { n.wsPeersConnectivityCheckTicker.Stop() @@ -159,6 +380,8 @@ func (n *P2PNetwork) Stop() { n.innerStop() n.ctxCancel() n.service.Close() + n.bootstrapperStop() + n.httpServer.Close() n.wg.Wait() } @@ -176,22 +399,83 @@ func (n *P2PNetwork) innerStop() { n.log.Warnf("Error closing peer %s: %v", peerID, err) } delete(n.wsPeers, peerID) + delete(n.wsPeersToIDs, peer) } n.wsPeersLock.Unlock() closeGroup.Wait() } +// meshThreadInner fetches nodes from DHT and attempts to connect to them +func (n *P2PNetwork) meshThreadInner() int { + defer n.service.DialPeersUntilTargetCount(n.config.GossipFanout) + + // fetch peers from DNS + var dnsPeers, dhtPeers []peer.AddrInfo + dnsPeers = dnsLookupBootstrapPeers(n.log, n.config, n.networkID, dnsaddr.NewMultiaddrDNSResolveController(n.config.DNSSecurityTXTEnforced(), "")) + + // discover peers from DHT + if n.capabilitiesDiscovery != nil { + var err error + dhtPeers, err = n.capabilitiesDiscovery.PeersForCapability(p2p.Gossip, n.config.GossipFanout) + if err != nil { + n.log.Warnf("Error getting relay nodes from capabilities discovery: %v", err) + } + n.log.Debugf("Discovered %d gossip peers from DHT", len(dhtPeers)) + } + + peers := mergeP2PAddrInfoResolvedAddresses(dnsPeers, dhtPeers) + replace := make([]interface{}, 0, len(peers)) + for i := range peers { + replace = append(replace, &peers[i]) + } + if len(peers) > 0 { + n.pstore.ReplacePeerList(replace, string(n.networkID), phonebook.PhoneBookEntryRelayRole) + } + return len(peers) +} + func (n *P2PNetwork) meshThread() { defer n.wg.Done() - timer := time.NewTicker(meshThreadInterval) + + timer := time.NewTicker(1) // start immediately and reset after + + // Add exponential backoff with jitter to the mesh thread to handle new networks startup + // when no DNS or DHT peers are available. + // The parameters produce approximate the following delays (although they are random but the sequence give the idea): + // 2 2.4 4.6 9 20 19.5 28 24 14 14 35 60 60 + ebf := backoff.NewExponentialDecorrelatedJitter(2*time.Second, meshThreadInterval, 3.0, rand.NewSource(rand.Int63())) + eb := ebf() + defer timer.Stop() for { select { case <-timer.C: - n.service.DialPeersUntilTargetCount(n.config.GossipFanout) + numPeers := n.meshThreadInner() + if numPeers > 0 { + // found something, reset timer to the default value + timer.Reset(meshThreadInterval) + eb.Reset() + } else { + // no peers found, backoff + timer.Reset(eb.Delay()) + } case <-n.ctx.Done(): return } + + // send the currently connected peers information to the + // telemetry server; that would allow the telemetry server + // to construct a cross-node map of all the nodes interconnections. + n.peerStater.sendPeerConnectionsTelemetryStatus(n) + } +} + +func (n *P2PNetwork) httpdThread() { + defer n.wg.Done() + err := n.httpServer.Serve() + if err != nil { + n.log.Errorf("Error serving libp2phttp: %v", err) + return } } @@ -240,31 +524,42 @@ func (n *P2PNetwork) Broadcast(ctx context.Context, tag protocol.Tag, data []byt // Relay message func (n *P2PNetwork) Relay(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except Peer) error { - return n.Broadcast(ctx, tag, data, wait, except) + if n.relayMessages { + return n.Broadcast(ctx, tag, data, wait, except) + } + return nil } // Disconnect from a peer, probably due to protocol errors. -func (n *P2PNetwork) Disconnect(badnode Peer) { - node, ok := badnode.(peer.ID) - if !ok { - n.log.Warnf("Unknown peer type %T", badnode) - return - } +func (n *P2PNetwork) Disconnect(badpeer DisconnectablePeer) { + var peerID peer.ID + var wsp *wsPeer + n.wsPeersLock.Lock() defer n.wsPeersLock.Unlock() - if wsPeer, ok := n.wsPeers[node]; ok { - wsPeer.CloseAndWait(time.Now().Add(peerDisconnectionAckDuration)) - delete(n.wsPeers, node) + switch p := badpeer.(type) { + case gossipSubPeer: // Disconnect came from a message received via GossipSub + peerID, wsp = p.peerID, n.wsPeers[p.peerID] + case *wsPeer: // Disconnect came from a message received via wsPeer + peerID, wsp = n.wsPeersToIDs[p], p + default: + n.log.Warnf("Unknown peer type %T", badpeer) + return + } + if wsp != nil { + wsp.CloseAndWait(time.Now().Add(peerDisconnectionAckDuration)) + delete(n.wsPeers, peerID) + delete(n.wsPeersToIDs, wsp) } else { - n.log.Warnf("Could not find wsPeer reference for peer %s", node) + n.log.Warnf("Could not find wsPeer reference for peer %s", peerID) } - err := n.service.ClosePeer(node) + err := n.service.ClosePeer(peerID) if err != nil { - n.log.Warnf("Error disconnecting from peer %s: %v", node, err) + n.log.Warnf("Error disconnecting from peer %s: %v", peerID, err) } } -func (n *P2PNetwork) disconnectThread(badnode Peer, reason disconnectReason) { +func (n *P2PNetwork) disconnectThread(badnode DisconnectablePeer, reason disconnectReason) { defer n.wg.Done() n.Disconnect(badnode) // ignores reason } @@ -278,23 +573,106 @@ func (n *P2PNetwork) DisconnectPeers() { // RegisterHTTPHandler path accepts gorilla/mux path annotations func (n *P2PNetwork) RegisterHTTPHandler(path string, handler http.Handler) { + n.httpServer.RegisterHTTPHandler(path, handler) } // RequestConnectOutgoing asks the system to actually connect to peers. // `replace` optionally drops existing connections before making new ones. // `quit` chan allows cancellation. func (n *P2PNetwork) RequestConnectOutgoing(replace bool, quit <-chan struct{}) { + n.meshThreadInner() +} + +func addrInfoToWsPeerCore(n *P2PNetwork, addrInfo *peer.AddrInfo) (wsPeerCore, bool) { + mas, err := peer.AddrInfoToP2pAddrs(addrInfo) + if err != nil { + n.log.Warnf("Archival AddrInfo conversion error: %v", err) + return wsPeerCore{}, false + } + if len(mas) == 0 { + n.log.Warnf("Archival AddrInfo: empty multiaddr for : %v", addrInfo) + return wsPeerCore{}, false + } + addr := mas[0].String() + + maxIdleConnsPerHost := int(n.config.ConnectionsRateLimitingCount) + client, err := p2p.MakeHTTPClientWithRateLimit(addrInfo, n.pstore, limitcaller.DefaultQueueingTimeout, maxIdleConnsPerHost) + if err != nil { + n.log.Warnf("MakeHTTPClient failed: %v", err) + return wsPeerCore{}, false + } + + peerCore := makePeerCore( + n.ctx, n, n.log, n.handler.readBuffer, + addr, client, "", /*origin address*/ + ) + return peerCore, true } // GetPeers returns a list of Peers we could potentially send a direct message to. func (n *P2PNetwork) GetPeers(options ...PeerOption) []Peer { - // currently returns same list of peers for all PeerOption filters. peers := make([]Peer, 0) - n.wsPeersLock.RLock() - for _, peer := range n.wsPeers { - peers = append(peers, Peer(peer)) + for _, option := range options { + switch option { + case PeersConnectedOut: + n.wsPeersLock.RLock() + for _, peer := range n.wsPeers { + if peer.outgoing { + peers = append(peers, Peer(peer)) + } + } + n.wsPeersLock.RUnlock() + case PeersPhonebookRelays: + const maxNodes = 100 + peerIDs := n.pstore.GetAddresses(maxNodes, phonebook.PhoneBookEntryRelayRole) + for _, peerInfo := range peerIDs { + peerInfo := peerInfo.(*peer.AddrInfo) + if peerCore, ok := addrInfoToWsPeerCore(n, peerInfo); ok { + peers = append(peers, &peerCore) + } + } + if n.log.GetLevel() >= logging.Debug && len(peers) > 0 { + addrs := make([]string, 0, len(peers)) + for _, peer := range peers { + addrs = append(addrs, peer.(*wsPeerCore).GetAddress()) + } + n.log.Debugf("Relay node(s) from peerstore: %v", addrs) + } + case PeersPhonebookArchivalNodes: + // query known archival nodes from DHT if enabled + if n.config.EnableDHTProviders { + const nodesToFind = 5 + infos, err := n.capabilitiesDiscovery.PeersForCapability(p2p.Archival, nodesToFind) + if err != nil { + n.log.Warnf("Error getting archival nodes from capabilities discovery: %v", err) + return peers + } + n.log.Debugf("Got %d archival node(s) from DHT", len(infos)) + for _, addrInfo := range infos { + // TODO: remove after go1.22 + info := addrInfo + if peerCore, ok := addrInfoToWsPeerCore(n, &info); ok { + peers = append(peers, &peerCore) + } + } + if n.log.GetLevel() >= logging.Debug && len(peers) > 0 { + addrs := make([]string, 0, len(peers)) + for _, peer := range peers { + addrs = append(addrs, peer.(*wsPeerCore).GetAddress()) + } + n.log.Debugf("Archival node(s) from DHT: %v", addrs) + } + } + case PeersConnectedIn: + n.wsPeersLock.RLock() + for _, peer := range n.wsPeers { + if !peer.outgoing { + peers = append(peers, Peer(peer)) + } + } + n.wsPeersLock.RUnlock() + } } - n.wsPeersLock.RUnlock() return peers } @@ -308,31 +686,67 @@ func (n *P2PNetwork) ClearHandlers() { n.handler.ClearHandlers([]Tag{}) } -// GetRoundTripper returns a Transport that would limit the number of outgoing connections. -func (n *P2PNetwork) GetRoundTripper() http.RoundTripper { - return http.DefaultTransport +// RegisterProcessors adds to the set of given message handlers. +func (n *P2PNetwork) RegisterProcessors(dispatch []TaggedMessageProcessor) { + n.handler.RegisterProcessors(dispatch) +} + +// ClearProcessors deregisters all the existing message handlers. +func (n *P2PNetwork) ClearProcessors() { + n.handler.ClearProcessors([]Tag{}) +} + +// GetHTTPClient returns a http.Client with a suitable for the network Transport +// that would also limit the number of outgoing connections. +func (n *P2PNetwork) GetHTTPClient(address string) (*http.Client, error) { + addrInfo, err := peer.AddrInfoFromString(address) + if err != nil { + return nil, err + } + maxIdleConnsPerHost := int(n.config.ConnectionsRateLimitingCount) + return p2p.MakeHTTPClientWithRateLimit(addrInfo, n.pstore, limitcaller.DefaultQueueingTimeout, maxIdleConnsPerHost) } // OnNetworkAdvance notifies the network library that the agreement protocol was able to make a notable progress. // this is the only indication that we have that we haven't formed a clique, where all incoming messages // arrive very quickly, but might be missing some votes. The usage of this call is expected to have similar // characteristics as with a watchdog timer. -func (n *P2PNetwork) OnNetworkAdvance() {} +func (n *P2PNetwork) OnNetworkAdvance() { + if n.nodeInfo != nil { + old := n.wantTXGossip.Load() + new := n.relayMessages || n.config.ForceFetchTransactions || n.nodeInfo.IsParticipating() + if old != new { + n.wantTXGossip.Store(new) + if new { + n.wg.Add(1) + go n.txTopicHandleLoop() + } + } + } +} // GetHTTPRequestConnection returns the underlying connection for the given request. Note that the request must be the same // request that was provided to the http handler ( or provide a fallback Context() to that ) -func (n *P2PNetwork) GetHTTPRequestConnection(request *http.Request) (conn net.Conn) { return nil } - -// SubstituteGenesisID substitutes the "{genesisID}" with their network-specific genesisID. -func (n *P2PNetwork) SubstituteGenesisID(rawURL string) string { - return strings.Replace(rawURL, "{genesisID}", n.genesisID, -1) +func (n *P2PNetwork) GetHTTPRequestConnection(request *http.Request) (conn DeadlineSettableConn) { + addr := request.Context().Value(http.LocalAddrContextKey).(net.Addr) + peerID, err := peer.Decode(addr.String()) + if err != nil { + n.log.Infof("GetHTTPRequestConnection failed to decode %s", addr.String()) + return nil + } + conn, ok := n.service.GetStream(peerID) + if !ok { + n.log.Warnf("GetHTTPRequestConnection no such stream for peer %s", peerID.String()) + return nil + } + return conn } // wsStreamHandler is a callback that the p2p package calls when a new peer connects and establishes a // stream for the websocket protocol. -func (n *P2PNetwork) wsStreamHandler(ctx context.Context, peer peer.ID, stream network.Stream, incoming bool) { +func (n *P2PNetwork) wsStreamHandler(ctx context.Context, p2pPeer peer.ID, stream network.Stream, incoming bool) { if stream.Protocol() != p2p.AlgorandWsProtocol { - n.log.Warnf("unknown protocol %s", stream.Protocol()) + n.log.Warnf("unknown protocol %s from peer%s", stream.Protocol(), p2pPeer) return } @@ -340,7 +754,7 @@ func (n *P2PNetwork) wsStreamHandler(ctx context.Context, peer peer.ID, stream n var initMsg [1]byte rn, err := stream.Read(initMsg[:]) if rn == 0 || err != nil { - n.log.Warnf("wsStreamHandler: error reading initial message: %s", err) + n.log.Warnf("wsStreamHandler: error reading initial message: %s, peer %s (%s)", err, p2pPeer, stream.Conn().RemoteMultiaddr().String()) return } } else { @@ -352,21 +766,63 @@ func (n *P2PNetwork) wsStreamHandler(ctx context.Context, peer peer.ID, stream n } // get address for peer ID - addr := stream.Conn().RemoteMultiaddr().String() + ma := stream.Conn().RemoteMultiaddr() + addr := ma.String() if addr == "" { - n.log.Warnf("Could not get address for peer %s", peer) + n.log.Warnf("Could not get address for peer %s", p2pPeer) } // create a wsPeer for this stream and added it to the peers map. + + addrInfo := &peer.AddrInfo{ID: p2pPeer, Addrs: []multiaddr.Multiaddr{ma}} + maxIdleConnsPerHost := int(n.config.ConnectionsRateLimitingCount) + client, err := p2p.MakeHTTPClientWithRateLimit(addrInfo, n.pstore, limitcaller.DefaultQueueingTimeout, maxIdleConnsPerHost) + if err != nil { + client = nil + } + peerCore := makePeerCore(ctx, n, n.log, n.handler.readBuffer, addr, client, addr) wsp := &wsPeer{ - wsPeerCore: makePeerCore(ctx, n, n.log, n.handler.readBuffer, addr, n.GetRoundTripper(), addr), + wsPeerCore: peerCore, conn: &wsPeerConnP2PImpl{stream: stream}, outgoing: !incoming, } + protos, err := n.pstore.GetProtocols(p2pPeer) + if err != nil { + n.log.Warnf("Error getting protocols for peer %s: %v", p2pPeer, err) + } + wsp.TelemetryGUID, wsp.InstanceName = p2p.GetPeerTelemetryInfo(protos) + wsp.init(n.config, outgoingMessagesBufferSize) n.wsPeersLock.Lock() - n.wsPeers[peer] = wsp + n.wsPeers[p2pPeer] = wsp + n.wsPeersToIDs[wsp] = p2pPeer n.wsPeersLock.Unlock() n.wsPeersChangeCounter.Add(1) + + event := "ConnectedOut" + msg := "Made outgoing connection to peer %s" + if incoming { + event = "ConnectedIn" + msg = "Accepted incoming connection from peer %s" + } + localAddr, has := n.Address() + if !has { + n.log.Warn("Could not get local address") + } + n.log.With("event", event).With("remote", addr).With("local", localAddr).Infof(msg, p2pPeer.String()) + + if n.log.GetLevel() >= logging.Debug { + n.log.Debugf("streams for %s conn %s ", stream.Conn().Stat().Direction.String(), stream.Conn().ID()) + for _, s := range stream.Conn().GetStreams() { + n.log.Debugf("%s stream %s protocol %s", s.Stat().Direction.String(), s.ID(), s.Protocol()) + } + } + n.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerEvent, + telemetryspec.PeerEventDetails{ + Address: addr, + TelemetryGUID: wsp.TelemetryGUID, + Incoming: incoming, + InstanceName: wsp.InstanceName, + }) } // peerRemoteClose called from wsPeer to report that it has closed @@ -374,8 +830,30 @@ func (n *P2PNetwork) peerRemoteClose(peer *wsPeer, reason disconnectReason) { remotePeerID := peer.conn.(*wsPeerConnP2PImpl).stream.Conn().RemotePeer() n.wsPeersLock.Lock() delete(n.wsPeers, remotePeerID) + delete(n.wsPeersToIDs, peer) n.wsPeersLock.Unlock() n.wsPeersChangeCounter.Add(1) + + eventDetails := telemetryspec.PeerEventDetails{ + Address: peer.GetAddress(), // p2p peers store p2p addresses + TelemetryGUID: peer.TelemetryGUID, + InstanceName: peer.InstanceName, + Incoming: !peer.outgoing, + } + if peer.outgoing { + eventDetails.Endpoint = peer.GetAddress() + eventDetails.MessageDelay = peer.peerMessageDelay + } + + n.log.EventWithDetails(telemetryspec.Network, telemetryspec.DisconnectPeerEvent, + telemetryspec.DisconnectPeerEventDetails{ + PeerEventDetails: eventDetails, + Reason: string(reason), + TXCount: peer.txMessageCount.Load(), + MICount: peer.miMessageCount.Load(), + AVCount: peer.avMessageCount.Load(), + PPCount: peer.ppMessageCount.Load(), + }) } func (n *P2PNetwork) peerSnapshot(dest []*wsPeer) ([]*wsPeer, int32) { @@ -417,6 +895,7 @@ func (n *P2PNetwork) txTopicHandleLoop() { n.log.Errorf("Failed to subscribe to topic %s: %v", p2p.TXTopicName, err) return } + n.log.Debugf("Subscribed to topic %s", p2p.TXTopicName) for { msg, err := sub.Next(n.ctx) @@ -424,21 +903,40 @@ func (n *P2PNetwork) txTopicHandleLoop() { if err != pubsub.ErrSubscriptionCancelled && err != context.Canceled { n.log.Errorf("Error reading from subscription %v, peerId %s", err, n.service.ID()) } + n.log.Debugf("Cancelling subscription to topic %s due Subscription.Next error: %v", p2p.TXTopicName, err) sub.Cancel() return } + // if there is a self-sent the message no need to process it. + if msg.ReceivedFrom == n.service.ID() { + continue + } + + _ = n.handler.Process(msg.ValidatorData.(ValidatedMessage)) - // discard TX message. - // from gossipsub's point of view, it's just waiting to hear back from the validator, - // and txHandler does all its work in the validator, so we don't need to do anything here - _ = msg + // participation or configuration change, cancel subscription and quit + if !n.wantTXGossip.Load() { + n.log.Debugf("Cancelling subscription to topic %s due participation change", p2p.TXTopicName) + sub.Cancel() + return + } } } // txTopicValidator calls txHandler to validate and process incoming transactions. func (n *P2PNetwork) txTopicValidator(ctx context.Context, peerID peer.ID, msg *pubsub.Message) pubsub.ValidationResult { + var routingAddr [8]byte + n.wsPeersLock.Lock() + if wsp, ok := n.wsPeers[peerID]; ok { + copy(routingAddr[:], wsp.RoutingAddr()) + } else { + // well, otherwise use last 8 bytes of peerID + copy(routingAddr[:], peerID[len(peerID)-8:]) + } + n.wsPeersLock.Unlock() + inmsg := IncomingMessage{ - Sender: msg.ReceivedFrom, + Sender: gossipSubPeer{peerID: msg.ReceivedFrom, net: n, routingAddr: routingAddr}, Tag: protocol.TxnTag, Data: msg.Data, Net: n, @@ -446,7 +944,7 @@ func (n *P2PNetwork) txTopicValidator(ctx context.Context, peerID peer.ID, msg * } // if we sent the message, don't validate it - if inmsg.Sender == n.service.ID() { + if msg.ReceivedFrom == n.service.ID() { return pubsub.ValidationAccept } @@ -459,14 +957,15 @@ func (n *P2PNetwork) txTopicValidator(ctx context.Context, peerID peer.ID, msg * peerStats.txReceived.Add(1) n.peerStatsMu.Unlock() - outmsg := n.handler.Handle(inmsg) + outmsg := n.handler.Validate(inmsg) // there was a decision made in the handler about this message switch outmsg.Action { case Ignore: return pubsub.ValidationIgnore case Disconnect: return pubsub.ValidationReject - case Broadcast: // TxHandler.processIncomingTxn does not currently return this Action + case Accept: + msg.ValidatorData = outmsg return pubsub.ValidationAccept default: n.log.Warnf("handler returned invalid action %d", outmsg.Action) diff --git a/network/p2pNetwork_test.go b/network/p2pNetwork_test.go index c67ef5b1fc..5bd582ead0 100644 --- a/network/p2pNetwork_test.go +++ b/network/p2pNetwork_test.go @@ -18,49 +18,64 @@ package network import ( "context" + "errors" "fmt" + "io" + "net/http" + "sync" "sync/atomic" "testing" "time" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/network/limitcaller" "github.com/algorand/go-algorand/network/p2p" + "github.com/algorand/go-algorand/network/p2p/dnsaddr" + "github.com/algorand/go-algorand/network/p2p/peerstore" + "github.com/algorand/go-algorand/network/phonebook" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/partitiontest" + "github.com/algorand/go-algorand/util" pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" - peerstore "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/require" ) +func (n *P2PNetwork) hasPeers() bool { + n.wsPeersLock.RLock() + defer n.wsPeersLock.RUnlock() + return len(n.wsPeers) > 0 +} + func TestP2PSubmitTX(t *testing.T) { partitiontest.PartitionTest(t) cfg := config.GetDefaultLocal() + cfg.ForceFetchTransactions = true log := logging.TestingLog(t) - netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet) + netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}) require.NoError(t, err) - peerInfoA := netA.service.AddrInfo() + netA.Start() + defer netA.Stop() - addrsA, err := peerstore.AddrInfoToP2pAddrs(&peerInfoA) + peerInfoA := netA.service.AddrInfo() + addrsA, err := peer.AddrInfoToP2pAddrs(&peerInfoA) require.NoError(t, err) require.NotZero(t, addrsA[0]) - netA.Start() - defer netA.Stop() multiAddrStr := addrsA[0].String() phoneBookAddresses := []string{multiAddrStr} - netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet) + netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) require.NoError(t, err) netB.Start() defer netB.Stop() - netC, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet) - + netC, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) require.NoError(t, err) netC.Start() defer netC.Stop() @@ -75,19 +90,36 @@ func TestP2PSubmitTX(t *testing.T) { 2*time.Second, 50*time.Millisecond, ) + require.Eventually(t, func() bool { + return netA.hasPeers() && netB.hasPeers() && netC.hasPeers() + }, 2*time.Second, 50*time.Millisecond) + + // for some reason the above check is not enough in race builds on CI time.Sleep(time.Second) // give time for peers to connect. + // now we should be connected in a line: B <-> A <-> C where both B and C are connected to A but not each other // Since we aren't using the transaction handler in this test, we need to register a pass-through handler - passThroughHandler := []TaggedMessageHandler{ - {Tag: protocol.TxnTag, MessageHandler: HandlerFunc(func(msg IncomingMessage) OutgoingMessage { - return OutgoingMessage{Action: Broadcast} - })}, + passThroughHandler := []TaggedMessageProcessor{ + { + Tag: protocol.TxnTag, + MessageHandler: struct { + ProcessorValidateFunc + ProcessorHandleFunc + }{ + ProcessorValidateFunc(func(msg IncomingMessage) ValidatedMessage { + return ValidatedMessage{Action: Accept, Tag: msg.Tag, ValidatedMessage: nil} + }), + ProcessorHandleFunc(func(msg ValidatedMessage) OutgoingMessage { + return OutgoingMessage{Action: Ignore} + }), + }, + }, } - netA.RegisterHandlers(passThroughHandler) - netB.RegisterHandlers(passThroughHandler) - netC.RegisterHandlers(passThroughHandler) + netA.RegisterProcessors(passThroughHandler) + netB.RegisterProcessors(passThroughHandler) + netC.RegisterProcessors(passThroughHandler) // send messages from B and confirm that they get received by C (via A) for i := 0; i < 10; i++ { @@ -111,45 +143,139 @@ func TestP2PSubmitTX(t *testing.T) { ) } -func TestP2PSubmitWS(t *testing.T) { +// TestP2PSubmitTXNoGossip tests nodes without gossip enabled cannot receive transactions +func TestP2PSubmitTXNoGossip(t *testing.T) { partitiontest.PartitionTest(t) cfg := config.GetDefaultLocal() + cfg.ForceFetchTransactions = true log := logging.TestingLog(t) - netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet) + netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}) require.NoError(t, err) + netA.Start() + defer netA.Stop() peerInfoA := netA.service.AddrInfo() - addrsA, err := peerstore.AddrInfoToP2pAddrs(&peerInfoA) + addrsA, err := peer.AddrInfoToP2pAddrs(&peerInfoA) require.NoError(t, err) require.NotZero(t, addrsA[0]) - netA.Start() - defer netA.Stop() multiAddrStr := addrsA[0].String() phoneBookAddresses := []string{multiAddrStr} - netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet) + netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) require.NoError(t, err) netB.Start() defer netB.Stop() - netC, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet) + require.Eventually( + t, + func() bool { + return len(netA.service.ListPeersForTopic(p2p.TXTopicName)) == 1 && + len(netB.service.ListPeersForTopic(p2p.TXTopicName)) == 1 + }, + 2*time.Second, + 50*time.Millisecond, + ) + // run netC in NPN mode (no relay => no gossip sup => no TX receiving) + cfg.ForceFetchTransactions = false + netC, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) require.NoError(t, err) netC.Start() defer netC.Stop() + require.Eventually(t, func() bool { + return netA.hasPeers() && netB.hasPeers() && netC.hasPeers() + }, 2*time.Second, 50*time.Millisecond) + + time.Sleep(time.Second) // give time for peers to connect. + + // ensure netC cannot receive messages + + passThroughHandler := []TaggedMessageProcessor{ + { + Tag: protocol.TxnTag, + MessageHandler: struct { + ProcessorValidateFunc + ProcessorHandleFunc + }{ + ProcessorValidateFunc(func(msg IncomingMessage) ValidatedMessage { + return ValidatedMessage{Action: Accept, Tag: msg.Tag, ValidatedMessage: nil} + }), + ProcessorHandleFunc(func(msg ValidatedMessage) OutgoingMessage { + return OutgoingMessage{Action: Ignore} + }), + }, + }, + } + + netB.RegisterProcessors(passThroughHandler) + netC.RegisterProcessors(passThroughHandler) + for i := 0; i < 10; i++ { + err = netA.Broadcast(context.Background(), protocol.TxnTag, []byte(fmt.Sprintf("test %d", i)), false, nil) + require.NoError(t, err) + } + + // check netB received the messages require.Eventually( t, func() bool { - return len(netA.service.ListPeersForTopic(p2p.TXTopicName)) == 2 && - len(netB.service.ListPeersForTopic(p2p.TXTopicName)) == 1 && - len(netC.service.ListPeersForTopic(p2p.TXTopicName)) == 1 + netB.peerStatsMu.Lock() + netBpeerStatsA, ok := netB.peerStats[netA.service.ID()] + netB.peerStatsMu.Unlock() + if !ok { + return false + } + return netBpeerStatsA.txReceived.Load() == 10 }, - 2*time.Second, + 1*time.Second, 50*time.Millisecond, ) - time.Sleep(time.Second) // XX give time for peers to connect. Knowing about them being subscribed to topics is clearly not enough + + // check netC did not receive the messages + netC.peerStatsMu.Lock() + _, ok := netC.peerStats[netA.service.ID()] + netC.peerStatsMu.Unlock() + require.False(t, ok) +} + +func TestP2PSubmitWS(t *testing.T) { + partitiontest.PartitionTest(t) + + cfg := config.GetDefaultLocal() + log := logging.TestingLog(t) + netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}) + require.NoError(t, err) + + err = netA.Start() + require.NoError(t, err) + defer netA.Stop() + + peerInfoA := netA.service.AddrInfo() + addrsA, err := peer.AddrInfoToP2pAddrs(&peerInfoA) + require.NoError(t, err) + require.NotZero(t, addrsA[0]) + + multiAddrStr := addrsA[0].String() + phoneBookAddresses := []string{multiAddrStr} + netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) + require.NoError(t, err) + err = netB.Start() + require.NoError(t, err) + defer netB.Stop() + + netC, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) + require.NoError(t, err) + err = netC.Start() + require.NoError(t, err) + defer netC.Stop() + + require.Eventually(t, func() bool { + return netA.hasPeers() && netB.hasPeers() && netC.hasPeers() + }, 2*time.Second, 50*time.Millisecond) + + time.Sleep(time.Second) // give time for peers to connect. + // now we should be connected in a line: B <-> A <-> C where both B and C are connected to A but not each other testTag := protocol.AgreementVoteTag @@ -189,6 +315,10 @@ type mockService struct { peers map[peer.ID]peer.AddrInfo } +func (s *mockService) Start() error { + return nil +} + func (s *mockService) Close() error { return nil } @@ -197,6 +327,10 @@ func (s *mockService) ID() peer.ID { return s.id } +func (s *mockService) IDSigner() *p2p.PeerIDChallengeSigner { + panic("not implemented") +} + func (s *mockService) AddrInfo() peer.AddrInfo { return peer.AddrInfo{ ID: s.id, @@ -213,9 +347,7 @@ func (s *mockService) DialPeersUntilTargetCount(targetConnCount int) { } func (s *mockService) ClosePeer(peer peer.ID) error { - if _, ok := s.peers[peer]; ok { - delete(s.peers, peer) - } + delete(s.peers, peer) return nil } @@ -227,15 +359,15 @@ func (s *mockService) ListPeersForTopic(topic string) []peer.ID { return nil } -func (s *mockService) Subscribe(topic string, val pubsub.ValidatorEx) (*pubsub.Subscription, error) { +func (s *mockService) Subscribe(topic string, val pubsub.ValidatorEx) (p2p.SubNextCancellable, error) { return nil, nil } func (s *mockService) Publish(ctx context.Context, topic string, data []byte) error { return nil } -func (s *mockService) setAddrs(addrs []ma.Multiaddr) { - s.addrs = addrs +func (s *mockService) GetStream(peer.ID) (network.Stream, bool) { + return nil, false } func makeMockService(id peer.ID, addrs []ma.Multiaddr) *mockService { @@ -250,7 +382,7 @@ func TestP2PNetworkAddress(t *testing.T) { cfg := config.GetDefaultLocal() log := logging.TestingLog(t) - netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet) + netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}) defer netA.Stop() require.NoError(t, err) addrInfo := netA.service.AddrInfo() @@ -304,3 +436,692 @@ func TestP2PNetworkAddress(t *testing.T) { require.False(t, ok) require.Empty(t, retAddr) } + +type nilResolveController struct{} + +func (c *nilResolveController) Resolver() dnsaddr.Resolver { + return nil +} + +func (c *nilResolveController) NextResolver() dnsaddr.Resolver { + return nil +} + +type mockResolveController struct { + nilResolveController +} + +func (c *mockResolveController) Resolver() dnsaddr.Resolver { + return &mockResolver{} +} + +type mockResolver struct{} + +func (r *mockResolver) Resolve(ctx context.Context, _ ma.Multiaddr) ([]ma.Multiaddr, error) { + // return random stuff each time + _, publicKey, err := crypto.GenerateKeyPair(crypto.RSA, 2048) + if err != nil { + panic(err) + } + peerID, err := peer.IDFromPublicKey(publicKey) + if err != nil { + panic(err) + } + maddr, err := ma.NewMultiaddr("/ip4/127.0.0.1/p2p/" + peerID.String()) + return []ma.Multiaddr{maddr}, err +} + +func TestP2PBootstrapFunc(t *testing.T) { + t.Parallel() + partitiontest.PartitionTest(t) + + b := bootstrapper{} + require.Nil(t, b.BootstrapFunc()) + + b.started.Store(true) + p := peer.AddrInfo{ID: "test"} + b.phonebookPeers = []*peer.AddrInfo{&p} + require.Equal(t, []peer.AddrInfo{p}, b.BootstrapFunc()) + + b.phonebookPeers = nil + + b.cfg = config.GetDefaultLocal() + b.cfg.DNSBootstrapID = ".algodev.network" + b.cfg.DNSSecurityFlags = 0 + b.networkID = "devnet" + b.resolveController = &mockResolveController{} + + addrs := b.BootstrapFunc() + + require.GreaterOrEqual(t, len(addrs), 1) + addr := addrs[0] + require.Equal(t, len(addr.Addrs), 1) + require.GreaterOrEqual(t, len(addr.Addrs), 1) +} + +func TestP2PdnsLookupBootstrapPeersFailure(t *testing.T) { + t.Parallel() + partitiontest.PartitionTest(t) + + cfg := config.GetDefaultLocal() + cfg.DNSSecurityFlags = 0 + cfg.DNSBootstrapID = "non-existent.algodev.network" + + controller := nilResolveController{} + addrs := dnsLookupBootstrapPeers(logging.TestingLog(t), cfg, "test", &controller) + + require.Equal(t, 0, len(addrs)) +} + +func TestP2PdnsLookupBootstrapPeersInvalidAddr(t *testing.T) { + t.Parallel() + partitiontest.PartitionTest(t) + + cfg := config.GetDefaultLocal() + cfg.DNSSecurityFlags = 0 + cfg.DNSBootstrapID = ".algodev.network" + + controller := nilResolveController{} + addrs := dnsLookupBootstrapPeers(logging.TestingLog(t), cfg, "testInvalidAddr", &controller) + + require.Equal(t, 0, len(addrs)) +} + +func TestP2PdnsLookupBootstrapPeersWithBackup(t *testing.T) { + t.Parallel() + partitiontest.PartitionTest(t) + + cfg := config.GetDefaultLocal() + cfg.DNSSecurityFlags = 0 + cfg.DNSBootstrapID = ".algodev.network" + + controller := &mockResolveController{} + addrs := dnsLookupBootstrapPeers(logging.TestingLog(t), cfg, "test", controller) + require.GreaterOrEqual(t, len(addrs), 1) + + cfg.DNSBootstrapID = ".algodev.network?backup=.backup.algodev.network" + addrs = dnsLookupBootstrapPeers(logging.TestingLog(t), cfg, "test", controller) + require.GreaterOrEqual(t, len(addrs), 2) + +} + +type capNodeInfo struct { + nopeNodeInfo + cap p2p.Capability +} + +func (ni *capNodeInfo) Capabilities() []p2p.Capability { + return []p2p.Capability{ni.cap} +} + +func waitForRouting(t *testing.T, disc *p2p.CapabilitiesDiscovery) { + refreshCtx, refCancel := context.WithTimeout(context.Background(), time.Second*5) + for { + select { + case <-refreshCtx.Done(): + refCancel() + require.Fail(t, "failed to populate routing table before timeout") + default: + if disc.RoutingTable().Size() > 0 { + refCancel() + return + } + } + time.Sleep(50 * time.Millisecond) + } +} + +// TestP2PNetworkDHTCapabilities runs nodes with capabilities and ensures that connected nodes +// can discover itself. The other nodes receive the first node in bootstrap list before starting. +// There is two variations of the test: only netA advertises capabilities, and all nodes advertise. +func TestP2PNetworkDHTCapabilities(t *testing.T) { + partitiontest.PartitionTest(t) + + cfg := config.GetDefaultLocal() + cfg.EnableDHTProviders = true + log := logging.TestingLog(t) + + cap := p2p.Archival + tests := []struct { + name string + nis []NodeInfo + numCapPeers int + }{ + {"cap=all", []NodeInfo{&capNodeInfo{cap: cap}, &capNodeInfo{cap: cap}, &capNodeInfo{cap: cap}}, 2}, // each has 2 peers with capabilities + {"cap=netA", []NodeInfo{&capNodeInfo{cap: cap}, &nopeNodeInfo{}, &nopeNodeInfo{}}, 1}, // each has 1 peer with capabilities + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, test.nis[0]) + require.NoError(t, err) + + err = netA.Start() + require.NoError(t, err) + defer netA.Stop() + + peerInfoA := netA.service.AddrInfo() + addrsA, err := peer.AddrInfoToP2pAddrs(&peerInfoA) + require.NoError(t, err) + require.NotZero(t, addrsA[0]) + + multiAddrStr := addrsA[0].String() + phoneBookAddresses := []string{multiAddrStr} + netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, test.nis[1]) + require.NoError(t, err) + err = netB.Start() + require.NoError(t, err) + defer netB.Stop() + + netC, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, test.nis[2]) + require.NoError(t, err) + err = netC.Start() + require.NoError(t, err) + defer netC.Stop() + + require.Eventually(t, func() bool { + return netA.hasPeers() && netB.hasPeers() && netC.hasPeers() + }, 2*time.Second, 50*time.Millisecond) + + t.Logf("peers connected") + + nets := []*P2PNetwork{netA, netB, netC} + discs := []*p2p.CapabilitiesDiscovery{netA.capabilitiesDiscovery, netB.capabilitiesDiscovery, netC.capabilitiesDiscovery} + + var wg sync.WaitGroup + wg.Add(len(discs)) + for _, disc := range discs { + if disc == nil { + wg.Done() + continue + } + go func(disc *p2p.CapabilitiesDiscovery) { + defer wg.Done() + waitForRouting(t, disc) + }(disc) + } + wg.Wait() + + t.Logf("DHT is ready") + + // ensure all peers are connected - wait for connectivity as needed + for _, disc := range discs { + go func(disc *p2p.CapabilitiesDiscovery) { + require.Eventuallyf(t, func() bool { + return len(disc.Host().Network().Peers()) == 2 + }, time.Minute, time.Second, "Not all peers were found") + }(disc) + } + + wg.Add(len(discs)) + for i := range discs { + go func(idx int) { + disc := discs[idx] + defer wg.Done() + // skip netA since it is special for the test cap=netA + if test.name == "cap=netA" && disc == netA.capabilitiesDiscovery { + return + } + require.Eventuallyf(t, + func() bool { + peers, err := disc.PeersForCapability(cap, test.numCapPeers) + if err == nil && len(peers) == test.numCapPeers { + return true + } + return false + }, + time.Minute, + time.Second, + fmt.Sprintf("Not all expected %s cap peers were found", cap), + ) + // ensure GetPeers gets PeersPhonebookArchivalNodes peers + // it appears there are artifical peers because of listening on localhost and on a real network interface + // so filter out and save only unique peers by their IDs + net := nets[idx] + peers := net.GetPeers(PeersPhonebookArchivalNodes) + uniquePeerIDs := make(map[peer.ID]struct{}) + for _, p := range peers { + wsPeer := p.(*wsPeerCore) + pi, err := peer.AddrInfoFromString(wsPeer.GetAddress()) + require.NoError(t, err) + uniquePeerIDs[pi.ID] = struct{}{} + } + require.Equal(t, test.numCapPeers, len(uniquePeerIDs)) + }(i) + } + wg.Wait() + }) + } +} + +// TestMultiaddrConversionToFrom ensures Multiaddr can be serialized back to an address without losing information +func TestP2PMultiaddrConversionToFrom(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + const a = "/ip4/192.168.1.1/tcp/8180/p2p/Qmewz5ZHN1AAGTarRbMupNPbZRfg3p5jUGoJ3JYEatJVVk" + ma, err := ma.NewMultiaddr(a) + require.NoError(t, err) + require.Equal(t, a, ma.String()) + + // this conversion drops the p2p proto part + pi, err := peer.AddrInfoFromP2pAddr(ma) + require.NoError(t, err) + require.NotEqual(t, a, pi.Addrs[0].String()) + require.Len(t, pi.Addrs, 1) + + mas, err := peer.AddrInfoToP2pAddrs(pi) + require.NoError(t, err) + require.Len(t, mas, 1) + require.Equal(t, a, mas[0].String()) +} + +type p2phttpHandler struct { + tb testing.TB + retData string + net GossipNode +} + +func (h *p2phttpHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(h.retData)) + if r.URL.Path == "/check-conn" { + c := h.net.GetHTTPRequestConnection(r) + require.NotNil(h.tb, c) + } +} + +func TestP2PHTTPHandler(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + cfg := config.GetDefaultLocal() + cfg.EnableDHTProviders = true + cfg.GossipFanout = 1 + log := logging.TestingLog(t) + + netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}) + require.NoError(t, err) + + h := &p2phttpHandler{t, "hello", nil} + netA.RegisterHTTPHandler("/test", h) + + h2 := &p2phttpHandler{t, "world", netA} + netA.RegisterHTTPHandler("/check-conn", h2) + + netA.Start() + defer netA.Stop() + + peerInfoA := netA.service.AddrInfo() + addrsA, err := peer.AddrInfoToP2pAddrs(&peerInfoA) + require.NoError(t, err) + require.NotZero(t, addrsA[0]) + + httpClient, err := p2p.MakeHTTPClient(&peerInfoA) + require.NoError(t, err) + resp, err := httpClient.Get("/test") + require.NoError(t, err) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, "hello", string(body)) + + // check another endpoint that also access the underlying connection/stream + httpClient, err = p2p.MakeHTTPClient(&peerInfoA) + require.NoError(t, err) + resp, err = httpClient.Get("/check-conn") + require.NoError(t, err) + defer resp.Body.Close() + + body, err = io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, "world", string(body)) + + // check rate limiting client: + // zero clients allowed, rate limiting window (10s) is greater than queue deadline (1s) + pstore, err := peerstore.MakePhonebook(0, 10*time.Second) + require.NoError(t, err) + pstore.AddPersistentPeers([]interface{}{&peerInfoA}, "net", phonebook.PhoneBookEntryRelayRole) + httpClient, err = p2p.MakeHTTPClientWithRateLimit(&peerInfoA, pstore, 1*time.Second, 1) + require.NoError(t, err) + _, err = httpClient.Get("/test") + require.ErrorIs(t, err, limitcaller.ErrConnectionQueueingTimeout) +} + +func TestP2PRelay(t *testing.T) { + partitiontest.PartitionTest(t) + + cfg := config.GetDefaultLocal() + cfg.ForceFetchTransactions = true + log := logging.TestingLog(t) + netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}) + require.NoError(t, err) + + err = netA.Start() + require.NoError(t, err) + defer netA.Stop() + + peerInfoA := netA.service.AddrInfo() + addrsA, err := peer.AddrInfoToP2pAddrs(&peerInfoA) + require.NoError(t, err) + require.NotZero(t, addrsA[0]) + + multiAddrStr := addrsA[0].String() + phoneBookAddresses := []string{multiAddrStr} + + netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) + require.NoError(t, err) + err = netB.Start() + require.NoError(t, err) + defer netB.Stop() + + require.Eventually( + t, + func() bool { + return len(netA.service.ListPeersForTopic(p2p.TXTopicName)) > 0 && + len(netB.service.ListPeersForTopic(p2p.TXTopicName)) > 0 + }, + 2*time.Second, + 50*time.Millisecond, + ) + + require.Eventually(t, func() bool { + return netA.hasPeers() && netB.hasPeers() + }, 2*time.Second, 50*time.Millisecond) + + makeCounterHandler := func(numExpected int) ([]TaggedMessageProcessor, *atomic.Uint32, chan struct{}) { + var numActual atomic.Uint32 + counterDone := make(chan struct{}) + counterHandler := []TaggedMessageProcessor{ + { + Tag: protocol.TxnTag, + MessageHandler: struct { + ProcessorValidateFunc + ProcessorHandleFunc + }{ + ProcessorValidateFunc(func(msg IncomingMessage) ValidatedMessage { + return ValidatedMessage{Action: Accept, Tag: msg.Tag, ValidatedMessage: nil} + }), + ProcessorHandleFunc(func(msg ValidatedMessage) OutgoingMessage { + if count := numActual.Add(1); int(count) >= numExpected { + close(counterDone) + } + return OutgoingMessage{Action: Ignore} + }), + }, + }, + } + return counterHandler, &numActual, counterDone + } + counterHandler, _, counterDone := makeCounterHandler(1) + netA.RegisterProcessors(counterHandler) + + // send 5 messages from both netB to netA + // since there is no node with listening address set => no messages should be received + for i := 0; i < 5; i++ { + err := netB.Relay(context.Background(), protocol.TxnTag, []byte{1, 2, 3, byte(i)}, true, nil) + require.NoError(t, err) + } + + select { + case <-counterDone: + require.Fail(t, "No messages should have been received") + case <-time.After(1 * time.Second): + } + + // add netC with listening address set, and enable relaying on netB + // ensure all messages are received by netA + cfg.NetAddress = "127.0.0.1:0" + netC, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) + require.NoError(t, err) + err = netC.Start() + require.NoError(t, err) + defer netC.Stop() + + netB.relayMessages = true + + require.Eventually( + t, + func() bool { + return len(netA.service.ListPeersForTopic(p2p.TXTopicName)) > 0 && + len(netB.service.ListPeersForTopic(p2p.TXTopicName)) > 0 && + len(netC.service.ListPeersForTopic(p2p.TXTopicName)) > 0 + }, + 2*time.Second, + 50*time.Millisecond, + ) + + require.Eventually(t, func() bool { + return netA.hasPeers() && netB.hasPeers() && netC.hasPeers() + }, 2*time.Second, 50*time.Millisecond) + + const expectedMsgs = 10 + counterHandler, count, counterDone := makeCounterHandler(expectedMsgs) + netA.ClearProcessors() + netA.RegisterProcessors(counterHandler) + + for i := 0; i < expectedMsgs/2; i++ { + err := netB.Relay(context.Background(), protocol.TxnTag, []byte{1, 2, 3, byte(i)}, true, nil) + require.NoError(t, err) + err = netC.Relay(context.Background(), protocol.TxnTag, []byte{11, 12, 10 + byte(i), 14}, true, nil) + require.NoError(t, err) + } + // send some duplicate messages, they should be dropped + for i := 0; i < expectedMsgs/2; i++ { + err := netB.Relay(context.Background(), protocol.TxnTag, []byte{1, 2, 3, byte(i)}, true, nil) + require.NoError(t, err) + } + + select { + case <-counterDone: + case <-time.After(2 * time.Second): + if c := count.Load(); c < expectedMsgs { + require.Failf(t, "One or more messages failed to reach destination network", "%d > %d", expectedMsgs, c) + } else if c > expectedMsgs { + require.Failf(t, "One or more messages that were expected to be dropped, reached destination network", "%d < %d", expectedMsgs, c) + } + } +} + +type mockSubPService struct { + mockService + count atomic.Int64 + otherPeerID peer.ID + shouldNextFail bool +} + +type mockSubscription struct { + peerID peer.ID + shouldNextFail bool +} + +func (m *mockSubscription) Next(ctx context.Context) (*pubsub.Message, error) { + if m.shouldNextFail { + return nil, errors.New("mockSubscription error") + } + return &pubsub.Message{ReceivedFrom: m.peerID}, nil +} +func (m *mockSubscription) Cancel() {} + +func (m *mockSubPService) Subscribe(topic string, val pubsub.ValidatorEx) (p2p.SubNextCancellable, error) { + m.count.Add(1) + otherPeerID := m.otherPeerID + if otherPeerID == "" { + otherPeerID = "mockSubPServicePeerID" + } + return &mockSubscription{peerID: otherPeerID, shouldNextFail: m.shouldNextFail}, nil +} + +// TestP2PWantTXGossip checks txTopicHandleLoop runs as expected on wantTXGossip changes +func TestP2PWantTXGossip(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + // cancelled context to trigger subscription.Next to return + ctx, cancel := context.WithCancel(context.Background()) + cancel() + peerID := peer.ID("myPeerID") + mockService := &mockSubPService{mockService: mockService{id: peerID}, shouldNextFail: true} + net := &P2PNetwork{ + service: mockService, + log: logging.TestingLog(t), + ctx: ctx, + nodeInfo: &nopeNodeInfo{}, + } + + // ensure wantTXGossip from false to false is noop + net.wantTXGossip.Store(false) + net.OnNetworkAdvance() + require.Eventually(t, func() bool { net.wg.Wait(); return true }, 1*time.Second, 50*time.Millisecond) + require.Equal(t, int64(0), mockService.count.Load()) + require.False(t, net.wantTXGossip.Load()) + + // ensure wantTXGossip from true (wantTXGossip) to false (nopeNodeInfo) is noop + net.wantTXGossip.Store(true) + net.OnNetworkAdvance() + require.Eventually(t, func() bool { net.wg.Wait(); return true }, 1*time.Second, 50*time.Millisecond) + require.Equal(t, int64(0), mockService.count.Load()) + require.False(t, net.wantTXGossip.Load()) + + // check false to true change triggers subscription + net.wantTXGossip.Store(false) + net.nodeInfo = &participatingNodeInfo{} + net.OnNetworkAdvance() + require.Eventually(t, func() bool { return mockService.count.Load() == 1 }, 1*time.Second, 50*time.Millisecond) + require.True(t, net.wantTXGossip.Load()) + + // check IsParticipating changes wantTXGossip + net.wantTXGossip.Store(true) + net.nodeInfo = &nopeNodeInfo{} + net.config.ForceFetchTransactions = false + net.relayMessages = false + net.OnNetworkAdvance() + require.Eventually(t, func() bool { net.wg.Wait(); return true }, 1*time.Second, 50*time.Millisecond) + require.False(t, net.wantTXGossip.Load()) + + // check ForceFetchTransactions and relayMessages also take effect + net.wantTXGossip.Store(false) + net.nodeInfo = &nopeNodeInfo{} + net.config.ForceFetchTransactions = true + net.relayMessages = false + net.OnNetworkAdvance() + require.Eventually(t, func() bool { return mockService.count.Load() == 2 }, 1*time.Second, 50*time.Millisecond) + require.True(t, net.wantTXGossip.Load()) + + net.wantTXGossip.Store(false) + net.nodeInfo = &nopeNodeInfo{} + net.config.ForceFetchTransactions = false + net.relayMessages = true + net.OnNetworkAdvance() + require.Eventually(t, func() bool { return mockService.count.Load() == 3 }, 1*time.Second, 50*time.Millisecond) + require.True(t, net.wantTXGossip.Load()) + + // ensure empty nodeInfo prevents changing the value + net.wantTXGossip.Store(false) + net.nodeInfo = nil + net.config.ForceFetchTransactions = true + net.relayMessages = true + net.OnNetworkAdvance() + require.Eventually(t, func() bool { net.wg.Wait(); return true }, 1*time.Second, 50*time.Millisecond) + require.False(t, net.wantTXGossip.Load()) + + // check true to true change is noop + net.wantTXGossip.Store(true) + net.nodeInfo = &participatingNodeInfo{} + net.OnNetworkAdvance() + require.Eventually(t, func() bool { return mockService.count.Load() == 3 }, 1*time.Second, 50*time.Millisecond) + require.True(t, net.wantTXGossip.Load()) +} + +func TestMergeP2PAddrInfoResolvedAddresses(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + m1, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/4001/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN") + require.NoError(t, err) + m2, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/4001/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb") + require.NoError(t, err) + m3, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/4001/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC") + require.NoError(t, err) + m4, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/4001") + require.NoError(t, err) + + var tests = []struct { + name string + primary []ma.Multiaddr + backup []ma.Multiaddr + expected int + hasInvalid bool + }{ + {"no overlap", []ma.Multiaddr{m1}, []ma.Multiaddr{m2}, 2, false}, + {"complete overlap", []ma.Multiaddr{m1}, []ma.Multiaddr{m1}, 1, false}, + {"partial overlap", []ma.Multiaddr{m1, m2}, []ma.Multiaddr{m1, m3}, 3, false}, + {"empty slices", []ma.Multiaddr{}, []ma.Multiaddr{}, 0, false}, + {"nil slices", nil, nil, 0, false}, + {"invalid p2p", []ma.Multiaddr{m1, m4}, []ma.Multiaddr{m2, m4}, 2, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r1 := mergeP2PMultiaddrResolvedAddresses(tt.primary, tt.backup) + if len(r1) != tt.expected { + t.Errorf("Expected %d addresses, got %d", tt.expected, len(r1)) + } + + var info1 []peer.AddrInfo + var info2 []peer.AddrInfo + for _, addr := range tt.primary { + info, err0 := peer.AddrInfoFromP2pAddr(addr) + if tt.hasInvalid { + if err0 == nil { + info1 = append(info1, *info) + } + } else { + require.NoError(t, err0) + info1 = append(info1, *info) + } + } + for _, addr := range tt.backup { + info, err0 := peer.AddrInfoFromP2pAddr(addr) + if tt.hasInvalid { + if err0 == nil { + info2 = append(info2, *info) + } + } else { + require.NoError(t, err0) + info2 = append(info2, *info) + } + } + if info1 == nil && tt.primary != nil { + info1 = []peer.AddrInfo{} + } + if info2 == nil && tt.backup != nil { + info1 = []peer.AddrInfo{} + } + + r2 := mergeP2PAddrInfoResolvedAddresses(info1, info2) + if len(r2) != tt.expected { + t.Errorf("Expected %d addresses, got %d", tt.expected, len(r2)) + } + }) + } +} + +// TestP2PGossipSubPeerCasts checks that gossipSubPeer implements the ErlClient and IPAddressable interfaces +// needed by TxHandler +func TestP2PGossipSubPeerCasts(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + var g interface{} = gossipSubPeer{} + _, ok := g.(util.ErlClient) + require.True(t, ok) + + _, ok = g.(IPAddressable) + require.True(t, ok) + + // check that gossipSubPeer is hashable as ERL wants + var m map[util.ErlClient]struct{} + require.Equal(t, m[gossipSubPeer{}], struct{}{}) + require.Equal(t, m[g.(util.ErlClient)], struct{}{}) +} diff --git a/network/phonebook.go b/network/phonebook/phonebook.go similarity index 96% rename from network/phonebook.go rename to network/phonebook/phonebook.go index 0c431fd2f3..634ca9c16c 100644 --- a/network/phonebook.go +++ b/network/phonebook/phonebook.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package network +package phonebook import ( "math" @@ -55,12 +55,12 @@ type Phonebook interface { // The connection should be established when the waitTime is 0. // It will register a provisional next connection time when the waitTime is 0. // The provisional time should be updated after the connection with UpdateConnectionTime - GetConnectionWaitTime(addr string) (addrInPhonebook bool, + GetConnectionWaitTime(addrOrPeerID string) (addrInPhonebook bool, waitTime time.Duration, provisionalTime time.Time) // UpdateConnectionTime will update the provisional connection time. // Returns true of the addr was in the phonebook - UpdateConnectionTime(addr string, provisionalTime time.Time) bool + UpdateConnectionTime(addrOrPeerID string, provisionalTime time.Time) bool // ReplacePeerList merges a set of addresses with that passed in for networkName // new entries in dnsAddresses are being added @@ -231,8 +231,10 @@ func (e *phonebookImpl) UpdateRetryAfter(addr string, retryAfter time.Time) { // The connection should be established when the waitTime is 0. // It will register a provisional next connection time when the waitTime is 0. // The provisional time should be updated after the connection with UpdateConnectionTime -func (e *phonebookImpl) GetConnectionWaitTime(addr string) (addrInPhonebook bool, +func (e *phonebookImpl) GetConnectionWaitTime(addrOrPeerID string) (addrInPhonebook bool, waitTime time.Duration, provisionalTime time.Time) { + + addr := addrOrPeerID e.lock.Lock() defer e.lock.Unlock() @@ -276,7 +278,8 @@ func (e *phonebookImpl) GetConnectionWaitTime(addr string) (addrInPhonebook bool // UpdateConnectionTime will update the provisional connection time. // Returns true of the addr was in the phonebook -func (e *phonebookImpl) UpdateConnectionTime(addr string, provisionalTime time.Time) bool { +func (e *phonebookImpl) UpdateConnectionTime(addrOrPeerID string, provisionalTime time.Time) bool { + addr := addrOrPeerID e.lock.Lock() defer e.lock.Unlock() diff --git a/network/phonebook_test.go b/network/phonebook/phonebook_test.go similarity index 99% rename from network/phonebook_test.go rename to network/phonebook/phonebook_test.go index 2643e722ea..d603a51a1a 100644 --- a/network/phonebook_test.go +++ b/network/phonebook/phonebook_test.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . -package network +package phonebook import ( "testing" diff --git a/network/requestLogger_test.go b/network/requestLogger_test.go index cb1d7b963d..0de6a41c73 100644 --- a/network/requestLogger_test.go +++ b/network/requestLogger_test.go @@ -25,6 +25,7 @@ import ( "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/logging/telemetryspec" + "github.com/algorand/go-algorand/network/phonebook" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -49,11 +50,12 @@ func TestRequestLogger(t *testing.T) { dl := eventsDetailsLogger{Logger: log, eventReceived: make(chan interface{}, 1), eventIdentifier: telemetryspec.HTTPRequestEvent} log.SetLevel(logging.Level(defaultConfig.BaseLoggerDebugLevel)) netA := &WebsocketNetwork{ - log: dl, - config: defaultConfig, - phonebook: MakePhonebook(1, 1*time.Millisecond), - GenesisID: "go-test-network-genesis", - NetworkID: config.Devtestnet, + log: dl, + config: defaultConfig, + phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), + GenesisID: "go-test-network-genesis", + NetworkID: config.Devtestnet, + peerStater: peerConnectionStater{log: log}, } netA.config.EnableRequestLogger = true netA.setup() @@ -67,8 +69,8 @@ func TestRequestLogger(t *testing.T) { addrA, postListen := netA.Address() require.True(t, postListen) t.Log(addrA) - netB.phonebook = MakePhonebook(1, 1*time.Millisecond) - netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netB.phonebook = phonebook.MakePhonebook(1, 1*time.Millisecond) + netB.phonebook.ReplacePeerList([]string{addrA}, "default", phonebook.PhoneBookEntryRelayRole) netB.Start() defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }() diff --git a/network/requestTracker.go b/network/requestTracker.go index c38cc9d2ed..47eba90c7e 100644 --- a/network/requestTracker.go +++ b/network/requestTracker.go @@ -31,6 +31,7 @@ import ( "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/logging/telemetryspec" + "github.com/algorand/go-algorand/network/addr" ) const ( @@ -94,12 +95,12 @@ func makeTrackerRequest(remoteAddr, remoteHost, remotePort string, createTime ti // - remoteAddr is used otherwise. func (tr *TrackerRequest) remoteAddress() string { if len(tr.otherPublicAddr) != 0 { - url, err := ParseHostOrURL(tr.otherPublicAddr) + url, err := addr.ParseHostOrURL(tr.otherPublicAddr) if err == nil && len(tr.remoteHost) > 0 && url.Hostname() == tr.remoteHost { return tr.otherPublicAddr } } - url, err := ParseHostOrURL(tr.remoteAddr) + url, err := addr.ParseHostOrURL(tr.remoteAddr) if err != nil { // tr.remoteAddr can't be parsed so try to use tr.remoteHost // there is a chance it came from a proxy and has a meaningful value diff --git a/network/requestTracker_test.go b/network/requestTracker_test.go index 0a8c934c53..158cf45336 100644 --- a/network/requestTracker_test.go +++ b/network/requestTracker_test.go @@ -27,6 +27,7 @@ import ( "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/network/phonebook" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -86,11 +87,12 @@ func TestRateLimiting(t *testing.T) { // This test is conducted locally, so we want to treat all hosts the same for counting incoming requests. testConfig.DisableLocalhostConnectionRateLimit = false wn := &WebsocketNetwork{ - log: log, - config: testConfig, - phonebook: MakePhonebook(1, 1), - GenesisID: "go-test-network-genesis", - NetworkID: config.Devtestnet, + log: log, + config: testConfig, + phonebook: phonebook.MakePhonebook(1, 1), + GenesisID: "go-test-network-genesis", + NetworkID: config.Devtestnet, + peerStater: peerConnectionStater{log: log}, } // increase the IncomingConnectionsLimit/MaxConnectionsPerIP limits, since we don't want to test these. @@ -115,15 +117,15 @@ func TestRateLimiting(t *testing.T) { clientsCount := int(testConfig.ConnectionsRateLimitingCount + 5) networks := make([]*WebsocketNetwork, clientsCount) - phonebooks := make([]Phonebook, clientsCount) + phonebooks := make([]phonebook.Phonebook, clientsCount) for i := 0; i < clientsCount; i++ { networks[i] = makeTestWebsocketNodeWithConfig(t, noAddressConfig) networks[i].config.GossipFanout = 1 - phonebooks[i] = MakePhonebook(networks[i].config.ConnectionsRateLimitingCount, + phonebooks[i] = phonebook.MakePhonebook(networks[i].config.ConnectionsRateLimitingCount, time.Duration(networks[i].config.ConnectionsRateLimitingWindowSeconds)*time.Second) - phonebooks[i].ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) - networks[i].phonebook = MakePhonebook(1, 1*time.Millisecond) - networks[i].phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + phonebooks[i].ReplacePeerList([]string{addrA}, "default", phonebook.PhoneBookEntryRelayRole) + networks[i].phonebook = phonebook.MakePhonebook(1, 1*time.Millisecond) + networks[i].phonebook.ReplacePeerList([]string{addrA}, "default", phonebook.PhoneBookEntryRelayRole) defer func(net *WebsocketNetwork, i int) { t.Logf("stopping network %d", i) net.Stop() @@ -153,7 +155,7 @@ func TestRateLimiting(t *testing.T) { case <-readyCh: // it's closed, so this client got connected. connectedClients++ - phonebookLen := len(phonebooks[i].GetAddresses(1, PhoneBookEntryRelayRole)) + phonebookLen := len(phonebooks[i].GetAddresses(1, phonebook.PhoneBookEntryRelayRole)) // if this channel is ready, than we should have an address, since it didn't get blocked. require.Equal(t, 1, phonebookLen) default: diff --git a/network/websocketProxy_test.go b/network/websocketProxy_test.go index 73298ccd64..96628acb69 100644 --- a/network/websocketProxy_test.go +++ b/network/websocketProxy_test.go @@ -28,6 +28,7 @@ import ( "testing" "time" + "github.com/algorand/go-algorand/network/addr" "github.com/algorand/go-algorand/test/partitiontest" "github.com/algorand/websocket" "github.com/stretchr/testify/require" @@ -71,7 +72,7 @@ func (w *websocketProxy) ServeHTTP(response http.ResponseWriter, request *http.R } // set X-Forwarded-For - url, err := ParseHostOrURL(request.RemoteAddr) + url, err := addr.ParseHostOrURL(request.RemoteAddr) if err != nil { http.Error(response, err.Error(), http.StatusInternalServerError) return @@ -254,7 +255,7 @@ func TestWebsocketProxyWsNet(t *testing.T) { gossipA, err := netA.addrToGossipAddr(addrA) require.NoError(t, err) - parsedA, err := ParseHostOrURL(gossipA) + parsedA, err := addr.ParseHostOrURL(gossipA) require.NoError(t, err) // setup the proxy @@ -316,7 +317,7 @@ func TestWebsocketProxyWsNet(t *testing.T) { peerB := netA.peers[0] require.NotEmpty(t, peerB.originAddress) require.Equal(t, fakeXForwardedFor, peerB.originAddress) - require.NotEqual(t, peerB.RoutingAddr(), peerB.IPAddr()) + require.NotEqual(t, peerB.RoutingAddr(), peerB.ipAddr()) fakeXForwardedForParsed := net.ParseIP(fakeXForwardedFor) require.NotEqual(t, fakeXForwardedForParsed, peerB.RoutingAddr()) } diff --git a/network/wsNetwork.go b/network/wsNetwork.go index 4a491f4f9f..8a43ad5234 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -27,6 +27,7 @@ import ( "net/http" "net/textproto" "net/url" + "path" "regexp" "runtime" "strconv" @@ -43,7 +44,11 @@ import ( "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/logging/telemetryspec" + "github.com/algorand/go-algorand/network/addr" + "github.com/algorand/go-algorand/network/limitcaller" "github.com/algorand/go-algorand/network/limitlistener" + "github.com/algorand/go-algorand/network/p2p" + "github.com/algorand/go-algorand/network/phonebook" "github.com/algorand/go-algorand/protocol" tools_network "github.com/algorand/go-algorand/tools/network" "github.com/algorand/go-algorand/tools/network/dnssec" @@ -83,7 +88,7 @@ const httpServerMaxHeaderBytes = 4096 const connectionActivityMonitorInterval = 3 * time.Minute // maxPeerInactivityDuration is the maximum allowed duration for a -// peer to remain completly idle (i.e. no inbound or outbound communication), before +// peer to remain completely idle (i.e. no inbound or outbound communication), before // we discard the connection. const maxPeerInactivityDuration = 5 * time.Minute @@ -163,6 +168,8 @@ const HealthServiceStatusPath = "/status" type NodeInfo interface { // IsParticipating returns true if this node has stake and may vote on blocks or propose blocks. IsParticipating() bool + // Capabilities returns a list of capabilities this node has. + Capabilities() []p2p.Capability } type nopeNodeInfo struct { @@ -172,6 +179,10 @@ func (nnni *nopeNodeInfo) IsParticipating() bool { return false } +func (nnni *nopeNodeInfo) Capabilities() []p2p.Capability { + return nil +} + // WebsocketNetwork implements GossipNode type WebsocketNetwork struct { listener net.Listener @@ -197,12 +208,15 @@ type WebsocketNetwork struct { broadcaster msgBroadcaster handler msgHandler - phonebook Phonebook + phonebook phonebook.Phonebook GenesisID string NetworkID protocol.NetworkID RandomID string + peerID p2p.PeerID + peerIDSigner identityChallengeSigner + ready atomic.Int32 readyChan chan struct{} @@ -238,8 +252,8 @@ type WebsocketNetwork struct { requestsTracker *RequestTracker requestsLogger *RequestLogger - // lastPeerConnectionsSent is the last time the peer connections were sent ( or attempted to be sent ) to the telemetry server. - lastPeerConnectionsSent time.Time + // peerStater collects and report peers connectivity telemetry + peerStater peerConnectionStater // connPerfMonitor is used on outgoing connections to measure their relative message timing connPerfMonitor *connectionPerformanceMonitor @@ -256,8 +270,8 @@ type WebsocketNetwork struct { // transport and dialer are customized to limit the number of // connection in compliance with connectionsRateLimitingCount. - transport rateLimitingTransport - dialer Dialer + transport limitcaller.RateLimitingTransport + dialer limitcaller.Dialer // messagesOfInterest specifies the message types that this node // wants to receive. nil means default. non-nil causes this @@ -348,7 +362,7 @@ type networkPeerManager interface { // used by msgHandler Broadcast(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except Peer) error - disconnectThread(badnode Peer, reason disconnectReason) + disconnectThread(badnode DisconnectablePeer, reason disconnectReason) checkPeersConnectivity() } @@ -463,13 +477,13 @@ func (wn *WebsocketNetwork) RelayArray(ctx context.Context, tags []protocol.Tag, return nil } -func (wn *WebsocketNetwork) disconnectThread(badnode Peer, reason disconnectReason) { +func (wn *WebsocketNetwork) disconnectThread(badnode DisconnectablePeer, reason disconnectReason) { defer wn.wg.Done() wn.disconnect(badnode, reason) } // Disconnect from a peer, probably due to protocol errors. -func (wn *WebsocketNetwork) Disconnect(node Peer) { +func (wn *WebsocketNetwork) Disconnect(node DisconnectablePeer) { wn.disconnect(node, disconnectBadData) } @@ -549,16 +563,18 @@ func (wn *WebsocketNetwork) GetPeers(options ...PeerOption) []Peer { case PeersPhonebookRelays: // return copy of phonebook, which probably also contains peers we're connected to, but if it doesn't maybe we shouldn't be making new connections to those peers (because they disappeared from the directory) var addrs []string - addrs = wn.phonebook.GetAddresses(1000, PhoneBookEntryRelayRole) + addrs = wn.phonebook.GetAddresses(1000, phonebook.PhoneBookEntryRelayRole) for _, addr := range addrs { - peerCore := makePeerCore(wn.ctx, wn, wn.log, wn.handler.readBuffer, addr, wn.GetRoundTripper(), "" /*origin address*/) + client, _ := wn.GetHTTPClient(addr) + peerCore := makePeerCore(wn.ctx, wn, wn.log, wn.handler.readBuffer, addr, client, "" /*origin address*/) outPeers = append(outPeers, &peerCore) } case PeersPhonebookArchivalNodes: var addrs []string - addrs = wn.phonebook.GetAddresses(1000, PhoneBookEntryArchivalRole) + addrs = wn.phonebook.GetAddresses(1000, phonebook.PhoneBookEntryArchivalRole) for _, addr := range addrs { - peerCore := makePeerCore(wn.ctx, wn, wn.log, wn.handler.readBuffer, addr, wn.GetRoundTripper(), "" /*origin address*/) + client, _ := wn.GetHTTPClient(addr) + peerCore := makePeerCore(wn.ctx, wn, wn.log, wn.handler.readBuffer, addr, client, "" /*origin address*/) outPeers = append(outPeers, &peerCore) } case PeersConnectedIn: @@ -583,13 +599,12 @@ func (wn *WebsocketNetwork) setup() { wn.nodeInfo = &nopeNodeInfo{} } maxIdleConnsPerHost := int(wn.config.ConnectionsRateLimitingCount) - wn.dialer = makeRateLimitingDialer(wn.phonebook, preferredResolver) - wn.transport = makeRateLimitingTransport(wn.phonebook, 10*time.Second, &wn.dialer, maxIdleConnsPerHost) + wn.dialer = limitcaller.MakeRateLimitingDialer(wn.phonebook, preferredResolver) + wn.transport = limitcaller.MakeRateLimitingTransport(wn.phonebook, limitcaller.DefaultQueueingTimeout, &wn.dialer, maxIdleConnsPerHost) wn.upgrader.ReadBufferSize = 4096 wn.upgrader.WriteBufferSize = 4096 wn.upgrader.EnableCompression = false - wn.lastPeerConnectionsSent = time.Now() wn.router = mux.NewRouter() if wn.config.EnableGossipService { wn.router.Handle(GossipNetworkPath, wn) @@ -676,7 +691,7 @@ func (wn *WebsocketNetwork) setup() { } // Start makes network connections and threads -func (wn *WebsocketNetwork) Start() { +func (wn *WebsocketNetwork) Start() error { wn.messagesOfInterestMu.Lock() defer wn.messagesOfInterestMu.Unlock() wn.messagesOfInterestEncoded = true @@ -688,7 +703,7 @@ func (wn *WebsocketNetwork) Start() { listener, err := net.Listen("tcp", wn.config.NetAddress) if err != nil { wn.log.Errorf("network could not listen %v: %s", wn.config.NetAddress, err) - return + return err } // wrap the original listener with a limited connection listener listener = limitlistener.RejectingLimitListener( @@ -720,12 +735,17 @@ func (wn *WebsocketNetwork) Start() { } } } - // if the network has a public address, use that as the name for connection deduplication - if wn.config.PublicAddress != "" { + // if the network has a public address or a libp2p peer ID, use that as the name for connection deduplication + if wn.config.PublicAddress != "" || (wn.peerID != "" && wn.peerIDSigner != nil) { wn.RegisterHandlers(identityHandlers) } - if wn.identityScheme == nil && wn.config.PublicAddress != "" { - wn.identityScheme = NewIdentityChallengeScheme(wn.config.PublicAddress) + if wn.identityScheme == nil { + if wn.peerID != "" && wn.peerIDSigner != nil { + wn.identityScheme = NewIdentityChallengeSchemeWithSigner(string(wn.peerID), wn.peerIDSigner) + } + if wn.config.PublicAddress != "" { + wn.identityScheme = NewIdentityChallengeScheme(wn.config.PublicAddress) + } } wn.meshUpdateRequests <- meshRequest{false, nil} @@ -759,6 +779,8 @@ func (wn *WebsocketNetwork) Start() { go wn.postMessagesOfInterestThread() wn.log.Infof("serving genesisID=%s on %#v with RandomID=%s", wn.GenesisID, wn.PublicAddress(), wn.RandomID) + + return nil } func (wn *WebsocketNetwork) httpdThread() { @@ -842,6 +864,14 @@ func (wn *WebsocketNetwork) ClearHandlers() { wn.handler.ClearHandlers([]Tag{protocol.PingTag, protocol.PingReplyTag, protocol.NetPrioResponseTag}) } +// RegisterProcessors registers the set of given message handlers. +func (wn *WebsocketNetwork) RegisterProcessors(dispatch []TaggedMessageProcessor) { +} + +// ClearProcessors deregisters all the existing message handlers. +func (wn *WebsocketNetwork) ClearProcessors() { +} + func (wn *WebsocketNetwork) setHeaders(header http.Header) { localTelemetryGUID := wn.log.GetTelemetryGUID() localInstanceName := wn.log.GetInstanceName() @@ -1005,7 +1035,7 @@ func (wn *WebsocketNetwork) checkIncomingConnectionVariables(response http.Respo // request that was provided to the http handler ( or provide a fallback Context() to that ) // if the provided request has no associated connection, it returns nil. ( this should not happen for any http request that was registered // by WebsocketNetwork ) -func (wn *WebsocketNetwork) GetHTTPRequestConnection(request *http.Request) (conn net.Conn) { +func (wn *WebsocketNetwork) GetHTTPRequestConnection(request *http.Request) (conn DeadlineSettableConn) { if wn.requestsTracker != nil { conn = wn.requestsTracker.GetRequestConnection(request) } @@ -1080,8 +1110,9 @@ func (wn *WebsocketNetwork) ServeHTTP(response http.ResponseWriter, request *htt wn.requestsLogger.SetStatusCode(response, http.StatusSwitchingProtocols) } + client, _ := wn.GetHTTPClient(trackedRequest.remoteAddress()) peer := &wsPeer{ - wsPeerCore: makePeerCore(wn.ctx, wn, wn.log, wn.handler.readBuffer, trackedRequest.remoteAddress(), wn.GetRoundTripper(), trackedRequest.remoteHost), + wsPeerCore: makePeerCore(wn.ctx, wn, wn.log, wn.handler.readBuffer, trackedRequest.remoteAddress(), client, trackedRequest.remoteHost), conn: wsPeerWebsocketConnImpl{conn}, outgoing: false, InstanceName: trackedRequest.otherInstanceName, @@ -1528,7 +1559,7 @@ func (wn *WebsocketNetwork) isConnectedTo(addr string) bool { wn.peersLock.RLock() defer wn.peersLock.RUnlock() for _, peer := range wn.peers { - if addr == peer.rootURL { + if addr == peer.GetAddress() { return true } } @@ -1605,7 +1636,7 @@ func (wn *WebsocketNetwork) meshThread() { // send the currently connected peers information to the // telemetry server; that would allow the telemetry server // to construct a cross-node map of all the nodes interconnections. - wn.sendPeerConnectionsTelemetryStatus() + wn.peerStater.sendPeerConnectionsTelemetryStatus(wn) } } @@ -1632,12 +1663,12 @@ func (wn *WebsocketNetwork) refreshRelayArchivePhonebookAddresses() { func (wn *WebsocketNetwork) updatePhonebookAddresses(relayAddrs []string, archiveAddrs []string) { if len(relayAddrs) > 0 { wn.log.Debugf("got %d relay dns addrs, %#v", len(relayAddrs), relayAddrs[:imin(5, len(relayAddrs))]) - wn.phonebook.ReplacePeerList(relayAddrs, string(wn.NetworkID), PhoneBookEntryRelayRole) + wn.phonebook.ReplacePeerList(relayAddrs, string(wn.NetworkID), phonebook.PhoneBookEntryRelayRole) } else { wn.log.Infof("got no relay DNS addrs for network %s", wn.NetworkID) } if len(archiveAddrs) > 0 { - wn.phonebook.ReplacePeerList(archiveAddrs, string(wn.NetworkID), PhoneBookEntryArchivalRole) + wn.phonebook.ReplacePeerList(archiveAddrs, string(wn.NetworkID), phonebook.PhoneBookEntryArchivalRole) } else { wn.log.Infof("got no archive DNS addrs for network %s", wn.NetworkID) } @@ -1656,7 +1687,7 @@ func (wn *WebsocketNetwork) checkNewConnectionsNeeded() bool { return false } // get more than we need so that we can ignore duplicates - newAddrs := wn.phonebook.GetAddresses(desired+numOutgoingTotal, PhoneBookEntryRelayRole) + newAddrs := wn.phonebook.GetAddresses(desired+numOutgoingTotal, phonebook.PhoneBookEntryRelayRole) for _, na := range newAddrs { if na == wn.config.PublicAddress { // filter out self-public address, so we won't try to connect to ourselves. @@ -1767,27 +1798,38 @@ func (wn *WebsocketNetwork) OnNetworkAdvance() { } } +type peerConnectionStater struct { + log logging.Logger + + peerConnectionsUpdateInterval time.Duration + lastPeerConnectionsSent time.Time +} + +type peerSnapshotter interface { + peerSnapshot(peers []*wsPeer) ([]*wsPeer, int32) +} + // sendPeerConnectionsTelemetryStatus sends a snapshot of the currently connected peers // to the telemetry server. Internally, it's using a timer to ensure that it would only // send the information once every hour ( configurable via PeerConnectionsUpdateInterval ) -func (wn *WebsocketNetwork) sendPeerConnectionsTelemetryStatus() { - if !wn.log.GetTelemetryEnabled() { +func (pcs *peerConnectionStater) sendPeerConnectionsTelemetryStatus(snapshotter peerSnapshotter) { + if !pcs.log.GetTelemetryEnabled() { return } now := time.Now() - if wn.lastPeerConnectionsSent.Add(time.Duration(wn.config.PeerConnectionsUpdateInterval)*time.Second).After(now) || wn.config.PeerConnectionsUpdateInterval <= 0 { + if pcs.lastPeerConnectionsSent.Add(pcs.peerConnectionsUpdateInterval).After(now) || pcs.peerConnectionsUpdateInterval <= 0 { // it's not yet time to send the update. return } - wn.lastPeerConnectionsSent = now + pcs.lastPeerConnectionsSent = now var peers []*wsPeer - peers, _ = wn.peerSnapshot(peers) - connectionDetails := wn.getPeerConnectionTelemetryDetails(now, peers) - wn.log.EventWithDetails(telemetryspec.Network, telemetryspec.PeerConnectionsEvent, connectionDetails) + peers, _ = snapshotter.peerSnapshot(peers) + connectionDetails := getPeerConnectionTelemetryDetails(now, peers) + pcs.log.EventWithDetails(telemetryspec.Network, telemetryspec.PeerConnectionsEvent, connectionDetails) } -func (wn *WebsocketNetwork) getPeerConnectionTelemetryDetails(now time.Time, peers []*wsPeer) telemetryspec.PeersConnectionDetails { +func getPeerConnectionTelemetryDetails(now time.Time, peers []*wsPeer) telemetryspec.PeersConnectionDetails { var connectionDetails telemetryspec.PeersConnectionDetails for _, peer := range peers { connDetail := telemetryspec.PeerConnectionDetails{ @@ -2021,10 +2063,31 @@ func (wn *WebsocketNetwork) numOutgoingPending() int { return len(wn.tryConnectAddrs) } -// GetRoundTripper returns an http.Transport that limits the number of connection -// to comply with connectionsRateLimitingCount. -func (wn *WebsocketNetwork) GetRoundTripper() http.RoundTripper { - return &wn.transport +// GetHTTPClient returns a http.Client with a suitable for the network Transport +// that would also limit the number of outgoing connections. +func (wn *WebsocketNetwork) GetHTTPClient(address string) (*http.Client, error) { + return &http.Client{ + Transport: &HTTPPAddressBoundTransport{address, &wn.transport}, + }, nil +} + +// HTTPPAddressBoundTransport is a http.RoundTripper that sets the scheme and host of the request URL to the given address +type HTTPPAddressBoundTransport struct { + Addr string + InnerTransport http.RoundTripper +} + +// RoundTrip implements http.RoundTripper by adding the schema, host, port, path prefix from the +// parsed address to the request URL and then calling the inner transport. +func (t *HTTPPAddressBoundTransport) RoundTrip(req *http.Request) (*http.Response, error) { + url, err := addr.ParseHostOrURL(t.Addr) + if err != nil { + return nil, err + } + req.URL.Scheme = url.Scheme + req.URL.Host = url.Host + req.URL.Path = path.Join(url.Path, req.URL.Path) + return t.InnerTransport.RoundTrip(req) } // filterASCII filter out the non-ascii printable characters out of the given input string and @@ -2045,9 +2108,9 @@ func filterASCII(unfilteredString string) (filteredString string) { } // tryConnect opens websocket connection and checks initial connection parameters. -// addr should be 'host:port' or a URL, gossipAddr is the websocket endpoint URL -func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) { - defer wn.tryConnectReleaseAddr(addr, gossipAddr) +// netAddr should be 'host:port' or a URL, gossipAddr is the websocket endpoint URL +func (wn *WebsocketNetwork) tryConnect(netAddr, gossipAddr string) { + defer wn.tryConnectReleaseAddr(netAddr, gossipAddr) defer func() { if xpanic := recover(); xpanic != nil { wn.log.Errorf("panic in tryConnect: %v", xpanic) @@ -2063,7 +2126,7 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) { var idChallenge identityChallengeValue if wn.identityScheme != nil { - idChallenge = wn.identityScheme.AttachChallenge(requestHeader, addr) + idChallenge = wn.identityScheme.AttachChallenge(requestHeader, netAddr) } // for backward compatibility, include the ProtocolVersion header as well. @@ -2108,7 +2171,7 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) { // we've got a retry-after header. // convert it to a timestamp so that we could use it. retryAfterTime := time.Now().Add(time.Duration(retryAfter) * time.Second) - wn.phonebook.UpdateRetryAfter(addr, retryAfterTime) + wn.phonebook.UpdateRetryAfter(netAddr, retryAfterTime) } default: wn.log.Warnf("ws connect(%s) fail - bad handshake, Status code = %d, Headers = %#v, Body = %s", gossipAddr, response.StatusCode, response.Header, errString) @@ -2149,7 +2212,7 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) { peerID, idVerificationMessage, err = wn.identityScheme.VerifyResponse(response.Header, idChallenge) if err != nil { networkPeerIdentityError.Inc(nil) - wn.log.With("err", err).With("remote", addr).With("local", localAddr).Warn("peer supplied an invalid identity response, abandoning peering") + wn.log.With("err", err).With("remote", netAddr).With("local", localAddr).Warn("peer supplied an invalid identity response, abandoning peering") closeEarly("Invalid identity response") return } @@ -2162,8 +2225,9 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) { wn.throttledOutgoingConnections.Add(int32(1)) } + client, _ := wn.GetHTTPClient(netAddr) peer := &wsPeer{ - wsPeerCore: makePeerCore(wn.ctx, wn, wn.log, wn.handler.readBuffer, addr, wn.GetRoundTripper(), "" /* origin */), + wsPeerCore: makePeerCore(wn.ctx, wn, wn.log, wn.handler.readBuffer, netAddr, client, "" /* origin */), conn: wsPeerWebsocketConnImpl{conn}, outgoing: true, incomingMsgFilter: wn.incomingMsgFilter, @@ -2185,7 +2249,7 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) { wn.peersLock.Unlock() if !ok { networkPeerIdentityDisconnect.Inc(nil) - wn.log.With("remote", addr).With("local", localAddr).Warn("peer deduplicated before adding because the identity is already known") + wn.log.With("remote", netAddr).With("local", localAddr).Warn("peer deduplicated before adding because the identity is already known") closeEarly("Duplicate connection") return } @@ -2193,7 +2257,7 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) { peer.init(wn.config, wn.outgoingMessagesBufferSize) wn.addPeer(peer) - wn.log.With("event", "ConnectedOut").With("remote", addr).With("local", localAddr).Infof("Made outgoing connection to peer %v", addr) + wn.log.With("event", "ConnectedOut").With("remote", netAddr).With("local", localAddr).Infof("Made outgoing connection to peer %v", netAddr) wn.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerEvent, telemetryspec.PeerEventDetails{ Address: justHost(conn.RemoteAddr().String()), @@ -2209,7 +2273,7 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) { if len(idVerificationMessage) > 0 { sent := peer.writeNonBlock(context.Background(), idVerificationMessage, true, crypto.Digest{}, time.Now()) if !sent { - wn.log.With("remote", addr).With("local", localAddr).Warn("could not send identity challenge verification") + wn.log.With("remote", netAddr).With("local", localAddr).Warn("could not send identity challenge verification") } } @@ -2224,7 +2288,7 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) { mbytes := append([]byte(protocol.NetPrioResponseTag), resp...) sent := peer.writeNonBlock(context.Background(), mbytes, true, crypto.Digest{}, time.Now()) if !sent { - wn.log.With("remote", addr).With("local", localAddr).Warnf("could not send priority response to %v", addr) + wn.log.With("remote", netAddr).With("local", localAddr).Warnf("could not send priority response to %v", netAddr) } } } @@ -2252,18 +2316,33 @@ func (wn *WebsocketNetwork) SetPeerData(peer Peer, key string, value interface{} } // NewWebsocketNetwork constructor for websockets based gossip network -func NewWebsocketNetwork(log logging.Logger, config config.Local, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID, nodeInfo NodeInfo) (wn *WebsocketNetwork, err error) { - phonebook := MakePhonebook(config.ConnectionsRateLimitingCount, +func NewWebsocketNetwork(log logging.Logger, config config.Local, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID, nodeInfo NodeInfo, peerID p2p.PeerID, idSigner identityChallengeSigner) (wn *WebsocketNetwork, err error) { + pb := phonebook.MakePhonebook(config.ConnectionsRateLimitingCount, time.Duration(config.ConnectionsRateLimitingWindowSeconds)*time.Second) - phonebook.AddPersistentPeers(phonebookAddresses, string(networkID), PhoneBookEntryRelayRole) + + addresses := make([]string, 0, len(phonebookAddresses)) + for _, a := range phonebookAddresses { + _, err := addr.ParseHostOrURL(a) + if err == nil { + addresses = append(addresses, a) + } + } + pb.AddPersistentPeers(addresses, string(networkID), phonebook.PhoneBookEntryRelayRole) wn = &WebsocketNetwork{ log: log, config: config, - phonebook: phonebook, + phonebook: pb, GenesisID: genesisID, NetworkID: networkID, nodeInfo: nodeInfo, + peerID: peerID, + peerIDSigner: idSigner, resolveSRVRecords: tools_network.ReadFromSRV, + peerStater: peerConnectionStater{ + log: log, + peerConnectionsUpdateInterval: time.Duration(config.PeerConnectionsUpdateInterval) * time.Second, + lastPeerConnectionsSent: time.Now(), + }, } wn.setup() @@ -2272,7 +2351,7 @@ func NewWebsocketNetwork(log logging.Logger, config config.Local, phonebookAddre // NewWebsocketGossipNode constructs a websocket network node and returns it as a GossipNode interface implementation func NewWebsocketGossipNode(log logging.Logger, config config.Local, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID) (gn GossipNode, err error) { - return NewWebsocketNetwork(log, config, phonebookAddresses, genesisID, networkID, nil) + return NewWebsocketNetwork(log, config, phonebookAddresses, genesisID, networkID, nil, "", nil) } // SetPrioScheme specifies the network priority scheme for a network node @@ -2289,11 +2368,11 @@ func (wn *WebsocketNetwork) removePeer(peer *wsPeer, reason disconnectReason) { // first logging, then take the lock and do the actual accounting. // definitely don't change this to do the logging while holding the lock. localAddr, _ := wn.Address() - logEntry := wn.log.With("event", "Disconnected").With("remote", peer.rootURL).With("local", localAddr) + logEntry := wn.log.With("event", "Disconnected").With("remote", peer.GetAddress()).With("local", localAddr) if peer.outgoing && peer.peerMessageDelay > 0 { logEntry = logEntry.With("messageDelay", peer.peerMessageDelay) } - logEntry.Infof("Peer %s disconnected: %s", peer.rootURL, reason) + logEntry.Infof("Peer %s disconnected: %s", peer.GetAddress(), reason) peerAddr := peer.OriginAddress() // we might be able to get addr out of conn, or it might be closed if peerAddr == "" && peer.conn != nil { @@ -2304,12 +2383,12 @@ func (wn *WebsocketNetwork) removePeer(peer *wsPeer, reason disconnectReason) { } if peerAddr == "" { // didn't get addr from peer, try from url - url, err := url.Parse(peer.rootURL) + url, err := url.Parse(peer.GetAddress()) if err == nil { peerAddr = justHost(url.Host) } else { // use whatever it is - peerAddr = justHost(peer.rootURL) + peerAddr = justHost(peer.GetAddress()) } } eventDetails := telemetryspec.PeerEventDetails{ @@ -2493,7 +2572,5 @@ func (wn *WebsocketNetwork) postMessagesOfInterestThread() { } } -// SubstituteGenesisID substitutes the "{genesisID}" with their network-specific genesisID. -func (wn *WebsocketNetwork) SubstituteGenesisID(rawURL string) string { - return strings.Replace(rawURL, "{genesisID}", wn.GenesisID, -1) -} +// GetGenesisID returns the network-specific genesisID. +func (wn *WebsocketNetwork) GetGenesisID() string { return wn.GenesisID } diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go index 8daf4d196f..038a9d6e2d 100644 --- a/network/wsNetwork_test.go +++ b/network/wsNetwork_test.go @@ -40,6 +40,7 @@ import ( "time" "github.com/algorand/go-algorand/internal/rapidgen" + "github.com/algorand/go-algorand/network/phonebook" "pgregory.net/rapid" "github.com/stretchr/testify/assert" @@ -127,11 +128,12 @@ func makeTestWebsocketNodeWithConfig(t testing.TB, conf config.Local, opts ...te log := logging.TestingLog(t) log.SetLevel(logging.Warn) wn := &WebsocketNetwork{ - log: log, - config: conf, - phonebook: MakePhonebook(1, 1*time.Millisecond), - GenesisID: genesisID, - NetworkID: config.Devtestnet, + log: log, + config: conf, + phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), + GenesisID: genesisID, + NetworkID: config.Devtestnet, + peerStater: peerConnectionStater{log: log}, } // apply options to newly-created WebsocketNetwork, if provided for _, opt := range opts { @@ -322,7 +324,7 @@ func setupWebsocketNetworkABwithLogger(t *testing.T, countTarget int, log loggin addrA, postListen := netA.Address() require.True(t, postListen) t.Log(addrA) - netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netB.phonebook.ReplacePeerList([]string{addrA}, "default", phonebook.PhoneBookEntryRelayRole) netB.Start() defer func() { if !success { @@ -458,7 +460,7 @@ func TestWebsocketProposalPayloadCompression(t *testing.T) { addrA, postListen := netA.Address() require.True(t, postListen) t.Log(addrA) - netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netB.phonebook.ReplacePeerList([]string{addrA}, "default", phonebook.PhoneBookEntryRelayRole) netB.Start() defer netStop(t, netB, "B") messages := [][]byte{ @@ -637,7 +639,7 @@ func TestWebsocketNetworkNoAddress(t *testing.T) { addrA, postListen := netA.Address() require.True(t, postListen) t.Log(addrA) - netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netB.phonebook.ReplacePeerList([]string{addrA}, "default", phonebook.PhoneBookEntryRelayRole) netB.Start() defer netStop(t, netB, "B") @@ -702,7 +704,7 @@ func lineNetwork(t *testing.T, numNodes int) (nodes []*WebsocketNetwork, counter if i > 0 { addrPrev, postListen := nodes[i-1].Address() require.True(t, postListen) - nodes[i].phonebook.ReplacePeerList([]string{addrPrev}, "default", PhoneBookEntryRelayRole) + nodes[i].phonebook.ReplacePeerList([]string{addrPrev}, "default", phonebook.PhoneBookEntryRelayRole) nodes[i].RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: &counters[i]}}) } nodes[i].Start() @@ -1053,11 +1055,12 @@ func makeTestFilterWebsocketNode(t *testing.T, nodename string) *WebsocketNetwor dc.OutgoingMessageFilterBucketCount = 3 dc.OutgoingMessageFilterBucketSize = 128 wn := &WebsocketNetwork{ - log: logging.TestingLog(t).With("node", nodename), - config: dc, - phonebook: MakePhonebook(1, 1*time.Millisecond), - GenesisID: genesisID, - NetworkID: config.Devtestnet, + log: logging.TestingLog(t).With("node", nodename), + config: dc, + phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), + GenesisID: genesisID, + NetworkID: config.Devtestnet, + peerStater: peerConnectionStater{log: logging.TestingLog(t).With("node", nodename)}, } require.True(t, wn.config.EnableIncomingMessageFilter) wn.setup() @@ -1078,7 +1081,7 @@ func TestDupFilter(t *testing.T) { addrA, postListen := netA.Address() require.True(t, postListen) t.Log(addrA) - netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netB.phonebook.ReplacePeerList([]string{addrA}, "default", phonebook.PhoneBookEntryRelayRole) netB.Start() defer netStop(t, netB, "B") counter := &messageCounterHandler{t: t, limit: 1, done: make(chan struct{})} @@ -1091,7 +1094,7 @@ func TestDupFilter(t *testing.T) { require.True(t, postListen) netC := makeTestFilterWebsocketNode(t, "c") netC.config.GossipFanout = 1 - netC.phonebook.ReplacePeerList([]string{addrB}, "default", PhoneBookEntryRelayRole) + netC.phonebook.ReplacePeerList([]string{addrB}, "default", phonebook.PhoneBookEntryRelayRole) netC.Start() defer netC.Stop() @@ -1169,8 +1172,8 @@ func TestGetPeers(t *testing.T) { addrA, postListen := netA.Address() require.True(t, postListen) t.Log(addrA) - phbMulti := MakePhonebook(1, 1*time.Millisecond) - phbMulti.ReplacePeerList([]string{addrA}, "phba", PhoneBookEntryRelayRole) + phbMulti := phonebook.MakePhonebook(1, 1*time.Millisecond) + phbMulti.ReplacePeerList([]string{addrA}, "phba", phonebook.PhoneBookEntryRelayRole) netB.phonebook = phbMulti netB.Start() defer netB.Stop() @@ -1181,10 +1184,10 @@ func TestGetPeers(t *testing.T) { waitReady(t, netB, readyTimeout.C) t.Log("b ready") - phbMulti.ReplacePeerList([]string{"a", "b", "c"}, "ph", PhoneBookEntryRelayRole) + phbMulti.ReplacePeerList([]string{"a", "b", "c"}, "ph", phonebook.PhoneBookEntryRelayRole) // A few for archival node roles - phbMulti.ReplacePeerList([]string{"d", "e", "f"}, "ph", PhoneBookEntryArchivalRole) + phbMulti.ReplacePeerList([]string{"d", "e", "f"}, "ph", phonebook.PhoneBookEntryArchivalRole) //addrB, _ := netB.Address() @@ -1767,7 +1770,7 @@ func TestPeeringWithBadIdentityChallenge(t *testing.T) { attachChallenge: func(attach http.Header, addr string) identityChallengeValue { s := NewIdentityChallengeScheme("does not matter") // make a scheme to use its keys c := identityChallenge{ - Key: s.identityKeys.SignatureVerifier, + Key: s.identityKeys.PublicKey(), Challenge: newIdentityChallengeValue(), PublicAddress: []byte("incorrect address!"), } @@ -1785,7 +1788,7 @@ func TestPeeringWithBadIdentityChallenge(t *testing.T) { attachChallenge: func(attach http.Header, addr string) identityChallengeValue { s := NewIdentityChallengeScheme("does not matter") // make a scheme to use its keys c := identityChallenge{ - Key: s.identityKeys.SignatureVerifier, + Key: s.identityKeys.PublicKey(), Challenge: newIdentityChallengeValue(), PublicAddress: []byte("incorrect address!"), }.Sign(s.identityKeys) @@ -1905,7 +1908,7 @@ func TestPeeringWithBadIdentityChallengeResponse(t *testing.T) { protocol.Decode(msg, &idChal) // make the response object, with an incorrect challenge encode it and attach it to the header r := identityChallengeResponse{ - Key: s.identityKeys.SignatureVerifier, + Key: s.identityKeys.PublicKey(), Challenge: newIdentityChallengeValue(), ResponseChallenge: newIdentityChallengeValue(), } @@ -1928,7 +1931,7 @@ func TestPeeringWithBadIdentityChallengeResponse(t *testing.T) { protocol.Decode(msg, &idChal) // make the response object, then change the signature and encode and attach r := identityChallengeResponse{ - Key: s.identityKeys.SignatureVerifier, + Key: s.identityKeys.PublicKey(), Challenge: newIdentityChallengeValue(), ResponseChallenge: newIdentityChallengeValue(), }.Sign(s.identityKeys) @@ -2180,7 +2183,7 @@ func BenchmarkWebsocketNetworkBasic(t *testing.B) { addrA, postListen := netA.Address() require.True(t, postListen) t.Log(addrA) - netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netB.phonebook.ReplacePeerList([]string{addrA}, "default", phonebook.PhoneBookEntryRelayRole) netB.Start() defer netStop(t, netB, "B") returns := make(chan uint64, 100) @@ -2262,7 +2265,7 @@ func TestWebsocketNetworkPrio(t *testing.T) { addrA, postListen := netA.Address() require.True(t, postListen) t.Log(addrA) - netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netB.phonebook.ReplacePeerList([]string{addrA}, "default", phonebook.PhoneBookEntryRelayRole) netB.Start() defer netStop(t, netB, "B") @@ -2309,7 +2312,7 @@ func TestWebsocketNetworkPrioLimit(t *testing.T) { netB.SetPrioScheme(&prioB) netB.config.GossipFanout = 1 netB.config.NetAddress = "" - netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netB.phonebook.ReplacePeerList([]string{addrA}, "default", phonebook.PhoneBookEntryRelayRole) netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counterB}}) netB.Start() defer netStop(t, netB, "B") @@ -2323,7 +2326,7 @@ func TestWebsocketNetworkPrioLimit(t *testing.T) { netC.SetPrioScheme(&prioC) netC.config.GossipFanout = 1 netC.config.NetAddress = "" - netC.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netC.phonebook.ReplacePeerList([]string{addrA}, "default", phonebook.PhoneBookEntryRelayRole) netC.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counterC}}) netC.Start() defer func() { t.Log("stopping C"); netC.Stop(); t.Log("C done") }() @@ -2366,8 +2369,8 @@ func TestWebsocketNetworkPrioLimit(t *testing.T) { } if failed { - t.Errorf("NetA had the following two peers priorities : [0]:%s=%d [1]:%s=%d", netA.peers[0].rootURL, netA.peers[0].prioWeight, netA.peers[1].rootURL, netA.peers[1].prioWeight) - t.Errorf("first peer before broadcasting was %s", firstPeer.rootURL) + t.Errorf("NetA had the following two peers priorities : [0]:%s=%d [1]:%s=%d", netA.peers[0].GetAddress(), netA.peers[0].prioWeight, netA.peers[1].GetAddress(), netA.peers[1].prioWeight) + t.Errorf("first peer before broadcasting was %s", firstPeer.GetAddress()) } } @@ -2408,7 +2411,7 @@ func TestWebsocketNetworkManyIdle(t *testing.T) { for i := 0; i < numClients; i++ { client := makeTestWebsocketNodeWithConfig(t, clientConf) client.config.GossipFanout = 1 - client.phonebook.ReplacePeerList([]string{relayAddr}, "default", PhoneBookEntryRelayRole) + client.phonebook.ReplacePeerList([]string{relayAddr}, "default", phonebook.PhoneBookEntryRelayRole) client.Start() defer client.Stop() @@ -2474,7 +2477,7 @@ func TestWebsocketNetwork_checkServerResponseVariables(t *testing.T) { noVersionHeader := http.Header{} noVersionHeader.Set(NodeRandomHeader, wn.RandomID+"tag") noVersionHeader.Set(GenesisHeader, wn.GenesisID) - responseVariableOk, matchingVersion = wn.checkServerResponseVariables(noVersionHeader, "addressX") + responseVariableOk, _ = wn.checkServerResponseVariables(noVersionHeader, "addressX") require.Equal(t, false, responseVariableOk) noRandomHeader := http.Header{} @@ -2533,7 +2536,7 @@ func TestDelayedMessageDrop(t *testing.T) { addrA, postListen := netA.Address() require.True(t, postListen) t.Log(addrA) - netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netB.phonebook.ReplacePeerList([]string{addrA}, "default", phonebook.PhoneBookEntryRelayRole) netB.Start() defer netStop(t, netB, "B") counter := newMessageCounter(t, 5) @@ -2563,11 +2566,12 @@ func TestSlowPeerDisconnection(t *testing.T) { log := logging.TestingLog(t) log.SetLevel(logging.Info) wn := &WebsocketNetwork{ - log: log, - config: defaultConfig, - phonebook: MakePhonebook(1, 1*time.Millisecond), - GenesisID: genesisID, - NetworkID: config.Devtestnet, + log: log, + config: defaultConfig, + phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), + GenesisID: genesisID, + NetworkID: config.Devtestnet, + peerStater: peerConnectionStater{log: log}, } wn.setup() wn.broadcaster.slowWritingPeerMonitorInterval = time.Millisecond * 50 @@ -2586,7 +2590,7 @@ func TestSlowPeerDisconnection(t *testing.T) { addrA, postListen := netA.Address() require.True(t, postListen) t.Log(addrA) - netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netB.phonebook.ReplacePeerList([]string{addrA}, "default", phonebook.PhoneBookEntryRelayRole) netB.Start() defer netStop(t, netB, "B") @@ -2638,11 +2642,12 @@ func TestForceMessageRelaying(t *testing.T) { log := logging.TestingLog(t) log.SetLevel(logging.Level(defaultConfig.BaseLoggerDebugLevel)) wn := &WebsocketNetwork{ - log: log, - config: defaultConfig, - phonebook: MakePhonebook(1, 1*time.Millisecond), - GenesisID: genesisID, - NetworkID: config.Devtestnet, + log: log, + config: defaultConfig, + phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), + GenesisID: genesisID, + NetworkID: config.Devtestnet, + peerStater: peerConnectionStater{log: log}, } wn.setup() wn.eventualReadyDelay = time.Second @@ -2663,14 +2668,14 @@ func TestForceMessageRelaying(t *testing.T) { noAddressConfig.NetAddress = "" netB := makeTestWebsocketNodeWithConfig(t, noAddressConfig) netB.config.GossipFanout = 1 - netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netB.phonebook.ReplacePeerList([]string{addrA}, "default", phonebook.PhoneBookEntryRelayRole) netB.Start() defer netStop(t, netB, "B") noAddressConfig.ForceRelayMessages = true netC := makeTestWebsocketNodeWithConfig(t, noAddressConfig) netC.config.GossipFanout = 1 - netC.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netC.phonebook.ReplacePeerList([]string{addrA}, "default", phonebook.PhoneBookEntryRelayRole) netC.Start() defer func() { t.Log("stopping C"); netC.Stop(); t.Log("C done") }() @@ -2732,11 +2737,12 @@ func TestCheckProtocolVersionMatch(t *testing.T) { log := logging.TestingLog(t) log.SetLevel(logging.Level(defaultConfig.BaseLoggerDebugLevel)) wn := &WebsocketNetwork{ - log: log, - config: defaultConfig, - phonebook: MakePhonebook(1, 1*time.Millisecond), - GenesisID: genesisID, - NetworkID: config.Devtestnet, + log: log, + config: defaultConfig, + phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), + GenesisID: genesisID, + NetworkID: config.Devtestnet, + peerStater: peerConnectionStater{log: log}, } wn.setup() wn.supportedProtocolVersions = []string{"2", "1"} @@ -2814,7 +2820,7 @@ func TestWebsocketNetworkTopicRoundtrip(t *testing.T) { addrA, postListen := netA.Address() require.True(t, postListen) t.Log(addrA) - netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netB.phonebook.ReplacePeerList([]string{addrA}, "default", phonebook.PhoneBookEntryRelayRole) netB.Start() defer netStop(t, netB, "B") @@ -2914,7 +2920,7 @@ func TestWebsocketNetworkMessageOfInterest(t *testing.T) { addrA, postListen := netA.Address() require.True(t, postListen) t.Logf("netA %s", addrA) - netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netB.phonebook.ReplacePeerList([]string{addrA}, "default", phonebook.PhoneBookEntryRelayRole) // have netB asking netA to send it ft2, deregister ping handler to make sure that we aren't exceeding the maximum MOI messagesize // Max MOI size is calculated by encoding all of the valid tags, since we are using a custom tag here we must deregister one in the default set. @@ -3040,7 +3046,7 @@ func TestWebsocketNetworkTXMessageOfInterestRelay(t *testing.T) { addrA, postListen := netA.Address() require.True(t, postListen) t.Log(addrA) - netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netB.phonebook.ReplacePeerList([]string{addrA}, "default", phonebook.PhoneBookEntryRelayRole) netB.Start() defer netStop(t, netB, "B") @@ -3124,7 +3130,7 @@ func TestWebsocketNetworkTXMessageOfInterestForceTx(t *testing.T) { addrA, postListen := netA.Address() require.True(t, postListen) t.Log(addrA) - netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netB.phonebook.ReplacePeerList([]string{addrA}, "default", phonebook.PhoneBookEntryRelayRole) netB.Start() defer netStop(t, netB, "B") @@ -3206,7 +3212,7 @@ func TestWebsocketNetworkTXMessageOfInterestNPN(t *testing.T) { addrA, postListen := netA.Address() require.True(t, postListen) t.Log(addrA) - netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netB.phonebook.ReplacePeerList([]string{addrA}, "default", phonebook.PhoneBookEntryRelayRole) netB.Start() defer netStop(t, netB, "B") require.False(t, netB.relayMessages) @@ -3286,6 +3292,7 @@ func TestWebsocketNetworkTXMessageOfInterestNPN(t *testing.T) { } type participatingNodeInfo struct { + nopeNodeInfo } func (nnni *participatingNodeInfo) IsParticipating() bool { @@ -3311,7 +3318,7 @@ func TestWebsocketNetworkTXMessageOfInterestPN(t *testing.T) { addrA, postListen := netA.Address() require.True(t, postListen) t.Log(addrA) - netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netB.phonebook.ReplacePeerList([]string{addrA}, "default", phonebook.PhoneBookEntryRelayRole) netB.Start() defer netStop(t, netB, "B") require.False(t, netB.relayMessages) @@ -3433,7 +3440,7 @@ func testWebsocketDisconnection(t *testing.T, disconnectFunc func(wn *WebsocketN addrA, postListen := netA.Address() require.True(t, postListen) t.Log(addrA) - netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netB.phonebook.ReplacePeerList([]string{addrA}, "default", phonebook.PhoneBookEntryRelayRole) netB.Start() defer netStop(t, netB, "B") @@ -3628,7 +3635,7 @@ func BenchmarkVariableTransactionMessageBlockSizes(t *testing.B) { addrA, postListen := netA.Address() require.True(t, postListen) t.Log(addrA) - netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netB.phonebook.ReplacePeerList([]string{addrA}, "default", phonebook.PhoneBookEntryRelayRole) netB.Start() defer func() { netB.Stop() }() @@ -3773,9 +3780,9 @@ func TestWebsocketNetworkTelemetryTCP(t *testing.T) { // get RTT from both ends and assert nonzero var peersA, peersB []*wsPeer peersA, _ = netA.peerSnapshot(peersA) - detailsA := netA.getPeerConnectionTelemetryDetails(time.Now(), peersA) + detailsA := getPeerConnectionTelemetryDetails(time.Now(), peersA) peersB, _ = netB.peerSnapshot(peersB) - detailsB := netB.getPeerConnectionTelemetryDetails(time.Now(), peersB) + detailsB := getPeerConnectionTelemetryDetails(time.Now(), peersB) require.Len(t, detailsA.IncomingPeers, 1) assert.NotZero(t, detailsA.IncomingPeers[0].TCP.RTT) require.Len(t, detailsB.OutgoingPeers, 1) @@ -3796,8 +3803,8 @@ func TestWebsocketNetworkTelemetryTCP(t *testing.T) { defer closeFunc2() // use stale peers snapshot from closed networks to get telemetry // *net.OpError "use of closed network connection" err results in 0 rtt values - detailsA = netA.getPeerConnectionTelemetryDetails(time.Now(), peersA) - detailsB = netB.getPeerConnectionTelemetryDetails(time.Now(), peersB) + detailsA = getPeerConnectionTelemetryDetails(time.Now(), peersA) + detailsB = getPeerConnectionTelemetryDetails(time.Now(), peersB) require.Len(t, detailsA.IncomingPeers, 1) assert.Zero(t, detailsA.IncomingPeers[0].TCP.RTT) require.Len(t, detailsB.OutgoingPeers, 1) @@ -4540,3 +4547,75 @@ func TestSendMessageCallbackDrain(t *testing.T) { 50*time.Millisecond, ) } + +// TestWsNetworkPhonebookMix ensures p2p addresses are not added into wsNetwork via phonebook +func TestWsNetworkPhonebookMix(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + net, err := NewWebsocketNetwork( + logging.TestingLog(t), + config.GetDefaultLocal(), + []string{"127.0.0.1:1234", "/ip4/127.0.0.1/tcp/1234", "/ip4/127.0.0.1/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC"}, + "test", + "net", + nil, + "", + nil, + ) + require.NoError(t, err) + addrs := net.phonebook.GetAddresses(10, phonebook.PhoneBookEntryRelayRole) + require.Len(t, addrs, 1) +} + +type testRecordingTransport struct { + resultURL string +} + +func (rt *testRecordingTransport) RoundTrip(req *http.Request) (*http.Response, error) { + rt.resultURL = req.URL.String() + return &http.Response{StatusCode: 200}, nil +} + +func TestHTTPPAddressBoundTransport(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + // first ensure url.URL.String() on path-only URLs works as expected + var url = &url.URL{} + url.Path = "/test" + require.Equal(t, "/test", url.String()) + + // now test some combinations of address and path + const path = "/test/path" + const expErr = "ERR" + tests := []struct { + addr string + expected string + }{ + {"", expErr}, + {":", expErr}, + {"host:1234/lbr", expErr}, + {"host:1234", "http://host:1234" + path}, + {"http://host:1234", "http://host:1234" + path}, + {"http://host:1234/lbr", "http://host:1234/lbr" + path}, + } + + for _, test := range tests { + recorder := testRecordingTransport{} + tr := HTTPPAddressBoundTransport{ + Addr: test.addr, + InnerTransport: &recorder, + } + req, err := http.NewRequest("GET", path, nil) + require.NoError(t, err) + resp, err := tr.RoundTrip(req) + if test.expected == expErr { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, 200, resp.StatusCode) + require.Equal(t, test.expected, recorder.resultURL) + } + } +} diff --git a/network/wsPeer.go b/network/wsPeer.go index c4b64bdec4..2b302f071f 100644 --- a/network/wsPeer.go +++ b/network/wsPeer.go @@ -172,7 +172,7 @@ type wsPeerCore struct { readBuffer chan<- IncomingMessage rootURL string originAddress string // incoming connection remote host - client http.Client + client *http.Client } type disconnectReason string @@ -329,7 +329,6 @@ type HTTPPeer interface { // IPAddressable is addressable with either IPv4 or IPv6 address type IPAddressable interface { - IPAddr() []byte RoutingAddr() []byte } @@ -352,21 +351,20 @@ type TCPInfoUnicastPeer interface { } // Create a wsPeerCore object -func makePeerCore(ctx context.Context, net GossipNode, log logging.Logger, readBuffer chan<- IncomingMessage, rootURL string, roundTripper http.RoundTripper, originAddress string) wsPeerCore { +func makePeerCore(ctx context.Context, net GossipNode, log logging.Logger, readBuffer chan<- IncomingMessage, addr string, client *http.Client, originAddress string) wsPeerCore { return wsPeerCore{ net: net, netCtx: ctx, log: log, readBuffer: readBuffer, - rootURL: rootURL, + rootURL: addr, originAddress: originAddress, - client: http.Client{Transport: roundTripper}, + client: client, } } -// GetAddress returns the root url to use to connect to this peer. -// This implements HTTPPeer interface and used by external services to determine where to connect to. -// TODO: should GetAddress be added to Peer interface? +// GetAddress returns the root url to use to identify or connect to this peer. +// This implements HTTPPeer interface and used to distinguish between peers. func (wp *wsPeerCore) GetAddress() string { return wp.rootURL } @@ -374,7 +372,11 @@ func (wp *wsPeerCore) GetAddress() string { // GetHTTPClient returns a client for this peer. // http.Client will maintain a cache of connections with some keepalive. func (wp *wsPeerCore) GetHTTPClient() *http.Client { - return &wp.client + return wp.client +} + +func (wp *wsPeerCore) GetNetwork() GossipNode { + return wp.net } // Version returns the matching version from network.SupportedProtocolVersions @@ -382,7 +384,7 @@ func (wp *wsPeer) Version() string { return wp.version } -func (wp *wsPeer) IPAddr() []byte { +func (wp *wsPeer) ipAddr() []byte { remote := wp.conn.RemoteAddr() if remote == nil { return nil @@ -417,7 +419,7 @@ func (wp *wsPeer) RoutingAddr() []byte { if wp.wsPeerCore.originAddress != "" { ip = net.ParseIP(wp.wsPeerCore.originAddress) } else { - ip = wp.IPAddr() + ip = wp.ipAddr() } if len(ip) != net.IPv6len { @@ -511,7 +513,7 @@ func (wp *wsPeer) Respond(ctx context.Context, reqMsg IncomingMessage, outMsg Ou // setup values not trivially assigned func (wp *wsPeer) init(config config.Local, sendBufferLength int) { - wp.log.Debugf("wsPeer init outgoing=%v %#v", wp.outgoing, wp.rootURL) + wp.log.Debugf("wsPeer init outgoing=%v %#v", wp.outgoing, wp.GetAddress()) wp.closing = make(chan struct{}) wp.sendBufferHighPrio = make(chan sendMessages, sendBufferLength) wp.sendBufferBulk = make(chan sendMessages, sendBufferLength) diff --git a/network/wsPeer_test.go b/network/wsPeer_test.go index d1f32302a0..973c027b16 100644 --- a/network/wsPeer_test.go +++ b/network/wsPeer_test.go @@ -288,32 +288,32 @@ func TestWsPeerIPAddr(t *testing.T) { } // some raw IPv4 address conn.addr.IP = []byte{127, 0, 0, 1} - require.Equal(t, []byte{127, 0, 0, 1}, peer.IPAddr()) + require.Equal(t, []byte{127, 0, 0, 1}, peer.ipAddr()) require.Equal(t, []byte{127, 0, 0, 1}, peer.RoutingAddr()) // IPv4 constructed from net.IPv4 conn.addr.IP = net.IPv4(127, 0, 0, 2) - require.Equal(t, []byte{127, 0, 0, 2}, peer.IPAddr()) + require.Equal(t, []byte{127, 0, 0, 2}, peer.ipAddr()) require.Equal(t, []byte{127, 0, 0, 2}, peer.RoutingAddr()) // some IPv6 address conn.addr.IP = net.IPv6linklocalallrouters - require.Equal(t, []byte(net.IPv6linklocalallrouters), peer.IPAddr()) + require.Equal(t, []byte(net.IPv6linklocalallrouters), peer.ipAddr()) require.Equal(t, []byte(net.IPv6linklocalallrouters[0:8]), peer.RoutingAddr()) // embedded IPv4 into IPv6 conn.addr.IP = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 127, 0, 0, 3} require.Equal(t, 16, len(conn.addr.IP)) - require.Equal(t, []byte{127, 0, 0, 3}, peer.IPAddr()) + require.Equal(t, []byte{127, 0, 0, 3}, peer.ipAddr()) require.Equal(t, []byte{127, 0, 0, 3}, peer.RoutingAddr()) conn.addr.IP = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 0, 0, 4} require.Equal(t, 16, len(conn.addr.IP)) - require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 0, 0, 4}, peer.IPAddr()) + require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 0, 0, 4}, peer.ipAddr()) require.Equal(t, []byte{127, 0, 0, 4}, peer.RoutingAddr()) // check incoming peer with originAddress set conn.addr.IP = []byte{127, 0, 0, 1} peer.wsPeerCore.originAddress = "127.0.0.2" - require.Equal(t, []byte{127, 0, 0, 1}, peer.IPAddr()) + require.Equal(t, []byte{127, 0, 0, 1}, peer.ipAddr()) require.Equal(t, []byte{127, 0, 0, 2}, peer.RoutingAddr()) } diff --git a/node/follower_node.go b/node/follower_node.go index e475b25481..117cc56e86 100644 --- a/node/follower_node.go +++ b/node/follower_node.go @@ -94,7 +94,7 @@ func MakeFollower(log logging.Logger, rootDir string, cfg config.Local, phoneboo node.config = cfg // tie network, block fetcher, and agreement services together - p2pNode, err := network.NewWebsocketNetwork(node.log, node.config, phonebookAddresses, genesis.ID(), genesis.Network, nil) + p2pNode, err := network.NewWebsocketNetwork(node.log, node.config, phonebookAddresses, genesis.ID(), genesis.Network, nil, "", nil) if err != nil { log.Errorf("could not create websocket node: %v", err) return nil, err @@ -163,7 +163,7 @@ func (node *AlgorandFollowerNode) Config() config.Local { } // Start the node: connect to peers while obtaining a lock. Doesn't wait for initial sync. -func (node *AlgorandFollowerNode) Start() { +func (node *AlgorandFollowerNode) Start() error { node.mu.Lock() defer node.mu.Unlock() @@ -173,22 +173,30 @@ func (node *AlgorandFollowerNode) Start() { // The start network is being called only after the various services start up. // We want to do so in order to let the services register their callbacks with the // network package before any connections are being made. - startNetwork := func() { + startNetwork := func() error { if !node.config.DisableNetworking { // start accepting connections - node.net.Start() + err := node.net.Start() + if err != nil { + return err + } node.config.NetAddress, _ = node.net.Address() } + return nil } + var err error if node.catchpointCatchupService != nil { - startNetwork() - _ = node.catchpointCatchupService.Start(node.ctx) + err = startNetwork() + if err == nil { + err = node.catchpointCatchupService.Start(node.ctx) + } } else { node.catchupService.Start() node.blockService.Start() - startNetwork() + err = startNetwork() } + return err } // ListeningAddress retrieves the node's current listening address, if any. diff --git a/node/node.go b/node/node.go index 384bd258f6..5f1baa56be 100644 --- a/node/node.go +++ b/node/node.go @@ -28,6 +28,8 @@ import ( "sync" "time" + "github.com/algorand/go-deadlock" + "github.com/algorand/go-algorand/agreement" "github.com/algorand/go-algorand/agreement/gossip" "github.com/algorand/go-algorand/catchup" @@ -47,6 +49,7 @@ import ( "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/network" "github.com/algorand/go-algorand/network/messagetracer" + "github.com/algorand/go-algorand/network/p2p" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/rpcs" "github.com/algorand/go-algorand/stateproof" @@ -54,7 +57,6 @@ import ( "github.com/algorand/go-algorand/util/execpool" "github.com/algorand/go-algorand/util/metrics" "github.com/algorand/go-algorand/util/timers" - "github.com/algorand/go-deadlock" ) const ( @@ -197,16 +199,21 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd // tie network, block fetcher, and agreement services together var p2pNode network.GossipNode - if cfg.EnableP2P { - // TODO: pass more appropriate genesisDir (hot/cold). Presently this is just used to store a peerID key. - p2pNode, err = network.NewP2PNetwork(node.log, node.config, node.genesisDirs.RootGenesisDir, phonebookAddresses, genesis.ID(), genesis.Network) + if cfg.EnableP2PHybridMode { + p2pNode, err = network.NewHybridP2PNetwork(node.log, node.config, rootDir, phonebookAddresses, genesis.ID(), genesis.Network, node) + if err != nil { + log.Errorf("could not create hybrid p2p node: %v", err) + return nil, err + } + } else if cfg.EnableP2P { + p2pNode, err = network.NewP2PNetwork(node.log, node.config, rootDir, phonebookAddresses, genesis.ID(), genesis.Network, node) if err != nil { log.Errorf("could not create p2p node: %v", err) return nil, err } } else { var wsNode *network.WebsocketNetwork - wsNode, err = network.NewWebsocketNetwork(node.log, node.config, phonebookAddresses, genesis.ID(), genesis.Network, node) + wsNode, err = network.NewWebsocketNetwork(node.log, node.config, phonebookAddresses, genesis.ID(), genesis.Network, node, "", nil) if err != nil { log.Errorf("could not create websocket node: %v", err) return nil, err @@ -340,7 +347,7 @@ func (node *AlgorandFullNode) Config() config.Local { } // Start the node: connect to peers and run the agreement service while obtaining a lock. Doesn't wait for initial sync. -func (node *AlgorandFullNode) Start() { +func (node *AlgorandFullNode) Start() error { node.mu.Lock() defer node.mu.Unlock() @@ -350,12 +357,16 @@ func (node *AlgorandFullNode) Start() { // The start network is being called only after the various services start up. // We want to do so in order to let the services register their callbacks with the // network package before any connections are being made. - startNetwork := func() { + startNetwork := func() error { if !node.config.DisableNetworking { // start accepting connections - node.net.Start() + err := node.net.Start() + if err != nil { + return err + } node.config.NetAddress, _ = node.net.Address() } + return nil } if node.catchpointCatchupService != nil { @@ -369,11 +380,29 @@ func (node *AlgorandFullNode) Start() { node.ledgerService.Start() node.txHandler.Start() node.stateProofWorker.Start() - startNetwork() + err := startNetwork() + if err != nil { + return err + } node.startMonitoringRoutines() } + return nil +} +// Capabilities returns the node's capabilities for advertising to other nodes. +func (node *AlgorandFullNode) Capabilities() []p2p.Capability { + var caps []p2p.Capability + if node.config.Archival { + caps = append(caps, p2p.Archival) + } + if node.config.StoresCatchpoints() { + caps = append(caps, p2p.Catchpoints) + } + if node.config.EnableGossipService && node.config.IsGossipServer() { + caps = append(caps, p2p.Gossip) + } + return caps } // startMonitoringRoutines starts the internal monitoring routines used by the node. diff --git a/node/node_test.go b/node/node_test.go index 54f2e1e6cc..3ea6d4a33d 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -33,19 +33,18 @@ import ( "github.com/algorand/go-algorand/agreement" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" - "github.com/algorand/go-algorand/data" "github.com/algorand/go-algorand/data/account" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/network" + "github.com/algorand/go-algorand/network/p2p" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/stateproof" "github.com/algorand/go-algorand/test/partitiontest" "github.com/algorand/go-algorand/util" "github.com/algorand/go-algorand/util/db" - "github.com/algorand/go-algorand/util/execpool" ) var expectedAgreementTime = 2*config.Protocol.BigLambda + config.Protocol.SmallLambda + config.Consensus[protocol.ConsensusCurrentVersion].AgreementFilterTimeout + 2*time.Second @@ -61,31 +60,78 @@ var defaultConfig = config.Local{ IncomingConnectionsLimit: -1, } -func setupFullNodes(t *testing.T, proto protocol.ConsensusVersion, verificationPool execpool.BacklogPool, customConsensus config.ConsensusProtocols) ([]*AlgorandFullNode, []string) { +type nodeInfo struct { + idx int + host string + wsPort int + p2pPort int + p2pID p2p.PeerID + rootDir string + genesis bookkeeping.Genesis +} + +func (ni nodeInfo) wsNetAddr() string { + return fmt.Sprintf("%s:%d", ni.host, ni.wsPort) +} + +func (ni nodeInfo) p2pNetAddr() string { + return fmt.Sprintf("%s:%d", ni.host, ni.p2pPort) +} + +func (ni nodeInfo) p2pMultiAddr() string { + return fmt.Sprintf("/ip4/%s/tcp/%d/p2p/%s", ni.host, ni.p2pPort, ni.p2pID.String()) +} + +type configHook func(ni nodeInfo, cfg config.Local) (nodeInfo, config.Local) +type phonebookHook func([]nodeInfo, int) []string + +func setupFullNodes(t *testing.T, proto protocol.ConsensusVersion, customConsensus config.ConsensusProtocols) ([]*AlgorandFullNode, []string) { + minMoneyAtStart := 10000 + maxMoneyAtStart := 100000 + gen := rand.New(rand.NewSource(2)) + + const numAccounts = 10 + acctStake := make([]basics.MicroAlgos, numAccounts) + for i := range acctStake { + acctStake[i] = basics.MicroAlgos{Raw: uint64(minMoneyAtStart + (gen.Int() % (maxMoneyAtStart - minMoneyAtStart)))} + } + + configHook := func(ni nodeInfo, cfg config.Local) (nodeInfo, config.Local) { + cfg.NetAddress = ni.wsNetAddr() + return ni, cfg + } + + phonebookHook := func(nodes []nodeInfo, nodeIdx int) []string { + phonebook := make([]string, 0, len(nodes)-1) + for i := range nodes { + if i != nodeIdx { + phonebook = append(phonebook, nodes[i].wsNetAddr()) + } + } + return phonebook + } + nodes, wallets := setupFullNodesEx(t, proto, customConsensus, acctStake, configHook, phonebookHook) + require.Len(t, nodes, numAccounts) + require.Len(t, wallets, numAccounts) + return nodes, wallets +} + +func setupFullNodesEx( + t *testing.T, proto protocol.ConsensusVersion, customConsensus config.ConsensusProtocols, + acctStake []basics.MicroAlgos, configHook configHook, phonebookHook phonebookHook, +) ([]*AlgorandFullNode, []string) { + util.SetFdSoftLimit(1000) + f, _ := os.Create(t.Name() + ".log") logging.Base().SetJSONFormatter() logging.Base().SetOutput(f) logging.Base().SetLevel(logging.Debug) - - numAccounts := 10 - minMoneyAtStart := 10000 - maxMoneyAtStart := 100000 + t.Logf("Logging to %s\n", t.Name()+".log") firstRound := basics.Round(0) lastRound := basics.Round(200) - genesis := make(map[basics.Address]basics.AccountData) - gen := rand.New(rand.NewSource(2)) - neighbors := make([]string, numAccounts) - for i := range neighbors { - neighbors[i] = "127.0.0.1:" + strconv.Itoa(10000+i) - } - - wallets := make([]string, numAccounts) - nodes := make([]*AlgorandFullNode, numAccounts) - rootDirs := make([]string, 0) - // The genesis configuration is missing allocations, but that's OK // because we explicitly generated the sqlite database above (in // installFullNode). @@ -97,16 +143,27 @@ func setupFullNodes(t *testing.T, proto protocol.ConsensusVersion, verificationP RewardsPool: poolAddr.String(), } + genesis := make(map[basics.Address]basics.AccountData) + numAccounts := len(acctStake) + wallets := make([]string, numAccounts) + nodeInfos := make([]nodeInfo, numAccounts) + for i := range wallets { rootDirectory := t.TempDir() - rootDirs = append(rootDirs, rootDirectory) + nodeInfos[i] = nodeInfo{ + idx: i, + host: "127.0.0.1", + wsPort: 10000 + 100*i, + p2pPort: 10000 + 100*i + 1, + rootDir: rootDirectory, + genesis: g, + } - defaultConfig.NetAddress = neighbors[i] - defaultConfig.SaveToDisk(rootDirectory) + ni, cfg := configHook(nodeInfos[i], defaultConfig) + nodeInfos[i] = ni + cfg.SaveToDisk(rootDirectory) - // Save empty phonebook - we'll add peers after they've been assigned listening ports - err := config.SavePhonebookToDisk(make([]string, 0), rootDirectory) - require.NoError(t, err) + t.Logf("Root directory of node %d (%s): %s\n", i, ni.wsNetAddr(), rootDirectory) genesisDir := filepath.Join(rootDirectory, g.ID()) os.Mkdir(genesisDir, 0700) @@ -140,7 +197,7 @@ func setupFullNodes(t *testing.T, proto protocol.ConsensusVersion, verificationP data := basics.AccountData{ Status: basics.Online, - MicroAlgos: basics.MicroAlgos{Raw: uint64(minMoneyAtStart + (gen.Int() % (maxMoneyAtStart - minMoneyAtStart)))}, + MicroAlgos: acctStake[i], SelectionID: part.VRFSecrets().PK, VoteID: part.VotingSecrets().OneTimeSignatureVerifier, } @@ -152,34 +209,37 @@ func setupFullNodes(t *testing.T, proto protocol.ConsensusVersion, verificationP MicroAlgos: basics.MicroAlgos{Raw: uint64(100000)}, } - bootstrap := bookkeeping.MakeGenesisBalances(genesis, sinkAddr, poolAddr) + for addr, data := range genesis { + g.Allocation = append(g.Allocation, bookkeeping.GenesisAllocation{ + Address: addr.String(), + State: bookkeeping.GenesisAccountData{ + Status: data.Status, + MicroAlgos: data.MicroAlgos, + VoteID: data.VoteID, + StateProofID: data.StateProofID, + SelectionID: data.SelectionID, + VoteFirstValid: data.VoteFirstValid, + VoteLastValid: data.VoteLastValid, + VoteKeyDilution: data.VoteKeyDilution, + }, + }) + } - for i, rootDirectory := range rootDirs { + nodes := make([]*AlgorandFullNode, numAccounts) + for i := range nodes { + rootDirectory := nodeInfos[i].rootDir genesisDir := filepath.Join(rootDirectory, g.ID()) - ledgerFilenamePrefix := filepath.Join(genesisDir, config.LedgerFilenamePrefix) if customConsensus != nil { - err := config.SaveConfigurableConsensus(genesisDir, customConsensus) - require.Nil(t, err) + err0 := config.SaveConfigurableConsensus(genesisDir, customConsensus) + require.Nil(t, err0) + err0 = config.LoadConfigurableConsensusProtocols(genesisDir) + require.Nil(t, err0) } - err1 := config.LoadConfigurableConsensusProtocols(genesisDir) - require.Nil(t, err1) - nodeID := fmt.Sprintf("Node%d", i) - const inMem = false - cfg, err := config.LoadConfigFromDisk(rootDirectory) - require.NoError(t, err) - cfg.Archival = true - _, err = data.LoadLedger(logging.Base().With("name", nodeID), ledgerFilenamePrefix, inMem, g.Proto, bootstrap, g.ID(), g.Hash(), cfg) - require.NoError(t, err) - } - for i := range nodes { - var nodeNeighbors []string - nodeNeighbors = append(nodeNeighbors, neighbors[:i]...) - nodeNeighbors = append(nodeNeighbors, neighbors[i+1:]...) - rootDirectory := rootDirs[i] cfg, err := config.LoadConfigFromDisk(rootDirectory) + phonebook := phonebookHook(nodeInfos, i) require.NoError(t, err) - node, err := MakeFull(logging.Base().With("source", t.Name()+strconv.Itoa(i)), rootDirectory, cfg, nodeNeighbors, g) + node, err := MakeFull(logging.Base(), rootDirectory, cfg, phonebook, g) nodes[i] = node require.NoError(t, err) } @@ -190,12 +250,16 @@ func setupFullNodes(t *testing.T, proto protocol.ConsensusVersion, verificationP func TestSyncingFullNode(t *testing.T) { partitiontest.PartitionTest(t) - t.Skip("Flaky in nightly test environment") + if testing.Short() { + t.Skip("Test takes ~50 seconds.") + } - backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil) - defer backlogPool.Shutdown() + if (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" && runtime.GOOS != "darwin") && + strings.ToUpper(os.Getenv("CIRCLECI")) == "TRUE" { + t.Skip("Test is too heavy for amd64 builder running in parallel with other packages") + } - nodes, wallets := setupFullNodes(t, protocol.ConsensusCurrentVersion, backlogPool, nil) + nodes, wallets := setupFullNodes(t, protocol.ConsensusCurrentVersion, nil) for i := 0; i < len(nodes); i++ { defer os.Remove(wallets[i]) defer nodes[i].Stop() @@ -203,7 +267,7 @@ func TestSyncingFullNode(t *testing.T) { initialRound := nodes[0].ledger.NextRound() - startAndConnectNodes(nodes, true) + startAndConnectNodes(nodes, defaultFirstNodeStartDelay) counter := 0 for tests := uint64(0); tests < 16; tests++ { @@ -252,22 +316,19 @@ func TestInitialSync(t *testing.T) { t.Skip("Test takes ~25 seconds.") } - if (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") && + if (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" && runtime.GOOS != "darwin") && strings.ToUpper(os.Getenv("CIRCLECI")) == "TRUE" { t.Skip("Test is too heavy for amd64 builder running in parallel with other packages") } - backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil) - defer backlogPool.Shutdown() - - nodes, wallets := setupFullNodes(t, protocol.ConsensusCurrentVersion, backlogPool, nil) + nodes, wallets := setupFullNodes(t, protocol.ConsensusCurrentVersion, nil) for i := 0; i < len(nodes); i++ { defer os.Remove(wallets[i]) defer nodes[i].Stop() } initialRound := nodes[0].ledger.NextRound() - startAndConnectNodes(nodes, true) + startAndConnectNodes(nodes, defaultFirstNodeStartDelay) select { case <-nodes[0].ledger.Wait(initialRound): @@ -289,10 +350,14 @@ func TestInitialSync(t *testing.T) { func TestSimpleUpgrade(t *testing.T) { partitiontest.PartitionTest(t) - t.Skip("Flaky in nightly test environment.") + if testing.Short() { + t.Skip("Test takes ~50 seconds.") + } - backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil) - defer backlogPool.Shutdown() + if (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" && runtime.GOOS != "darwin") && + strings.ToUpper(os.Getenv("CIRCLECI")) == "TRUE" { + t.Skip("Test is too heavy for amd64 builder running in parallel with other packages") + } // ConsensusTest0 is a version of ConsensusV0 used for testing // (it has different approved upgrade paths). @@ -330,7 +395,7 @@ func TestSimpleUpgrade(t *testing.T) { testParams1.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} configurableConsensus[consensusTest1] = testParams1 - nodes, wallets := setupFullNodes(t, consensusTest0, backlogPool, configurableConsensus) + nodes, wallets := setupFullNodes(t, consensusTest0, configurableConsensus) for i := 0; i < len(nodes); i++ { defer os.Remove(wallets[i]) defer nodes[i].Stop() @@ -338,13 +403,13 @@ func TestSimpleUpgrade(t *testing.T) { initialRound := nodes[0].ledger.NextRound() - startAndConnectNodes(nodes, false) + startAndConnectNodes(nodes, nodelayFirstNodeStartDelay) maxRounds := basics.Round(16) roundsCheckedForUpgrade := 0 for tests := basics.Round(0); tests < maxRounds; tests++ { - blocks := make([]bookkeeping.Block, len(wallets), len(wallets)) + blocks := make([]bookkeeping.Block, len(wallets)) for i := range wallets { select { case <-nodes[i].ledger.Wait(initialRound + tests): @@ -387,10 +452,13 @@ func TestSimpleUpgrade(t *testing.T) { require.Equal(t, 2, roundsCheckedForUpgrade) } -func startAndConnectNodes(nodes []*AlgorandFullNode, delayStartFirstNode bool) { +const defaultFirstNodeStartDelay = 20 * time.Second +const nodelayFirstNodeStartDelay = 0 + +func startAndConnectNodes(nodes []*AlgorandFullNode, delayStartFirstNode time.Duration) { var wg sync.WaitGroup for i := range nodes { - if delayStartFirstNode && i == 0 { + if delayStartFirstNode != nodelayFirstNodeStartDelay && i == 0 { continue } wg.Add(1) @@ -401,9 +469,9 @@ func startAndConnectNodes(nodes []*AlgorandFullNode, delayStartFirstNode bool) { } wg.Wait() - if delayStartFirstNode { + if delayStartFirstNode != nodelayFirstNodeStartDelay { connectPeers(nodes[1:]) - delayStartNode(nodes[0], nodes[1:], 20*time.Second) + delayStartNode(nodes[0], nodes[1:], delayStartFirstNode) } else { connectPeers(nodes) } @@ -754,3 +822,208 @@ func TestMaxSizesCorrect(t *testing.T) { tsSize := uint64(network.MaxMessageLength) require.Equal(t, tsSize, protocol.TopicMsgRespTag.MaxMessageSize()) } + +// TestNodeHybridTopology set ups 3 nodes network with the following topology: +// N -- R -- A and ensures N can discover A and download blocks from it. +// +// N is a non-part node that joins the network later +// R is a non-archival relay node with block service disabled. It MUST NOT service blocks to force N to discover A. +// A is a archival node that can only provide blocks. +// Nodes N and A have only R in their initial phonebook, and all nodes are in hybrid mode. +func TestNodeHybridTopology(t *testing.T) { + partitiontest.PartitionTest(t) + + const consensusTest0 = protocol.ConsensusVersion("test0") + + configurableConsensus := make(config.ConsensusProtocols) + + testParams0 := config.Consensus[protocol.ConsensusCurrentVersion] + testParams0.AgreementFilterTimeoutPeriod0 = 500 * time.Millisecond + configurableConsensus[consensusTest0] = testParams0 + + minMoneyAtStart := 1_000_000 + maxMoneyAtStart := 100_000_000_000 + gen := rand.New(rand.NewSource(2)) + + const numAccounts = 3 + acctStake := make([]basics.MicroAlgos, numAccounts) + for i := range acctStake { + acctStake[i] = basics.MicroAlgos{Raw: uint64(minMoneyAtStart + (gen.Int() % (maxMoneyAtStart - minMoneyAtStart)))} + } + acctStake[0] = basics.MicroAlgos{} // no stake at node 0 + + configHook := func(ni nodeInfo, cfg config.Local) (nodeInfo, config.Local) { + cfg = config.GetDefaultLocal() + if ni.idx != 2 { + cfg.EnableBlockService = false + cfg.EnableGossipBlockService = false + cfg.EnableLedgerService = false + cfg.CatchpointInterval = 0 + cfg.Archival = false + } else { + // node 2 is archival + cfg.EnableBlockService = true + cfg.EnableGossipBlockService = true + cfg.EnableLedgerService = true + cfg.CatchpointInterval = 200 + cfg.Archival = true + } + if ni.idx == 0 { + // do not allow node 0 (N) to make any outgoing connections + cfg.GossipFanout = 0 + } + + cfg.NetAddress = ni.wsNetAddr() + cfg.EnableP2PHybridMode = true + cfg.EnableDHTProviders = true + cfg.P2PPersistPeerID = true + privKey, err := p2p.GetPrivKey(cfg, ni.rootDir) + require.NoError(t, err) + ni.p2pID, err = p2p.PeerIDFromPublicKey(privKey.GetPublic()) + require.NoError(t, err) + + cfg.P2PNetAddress = ni.p2pNetAddr() + return ni, cfg + } + + phonebookHook := func(ni []nodeInfo, i int) []string { + switch i { + case 0: + // node 0 (N) only accept connections at the beginning to learn about archival node from DHT + t.Logf("Node%d phonebook: empty", i) + return []string{} + case 1: + // node 1 (R) connects to all + t.Logf("Node%d phonebook: %s, %s, %s, %s", i, ni[0].wsNetAddr(), ni[2].wsNetAddr(), ni[0].p2pMultiAddr(), ni[2].p2pMultiAddr()) + return []string{ni[0].wsNetAddr(), ni[2].wsNetAddr(), ni[0].p2pMultiAddr(), ni[2].p2pMultiAddr()} + case 2: + // node 2 (A) connects to R + t.Logf("Node%d phonebook: %s, %s", i, ni[1].wsNetAddr(), ni[1].p2pMultiAddr()) + return []string{ni[1].wsNetAddr(), ni[1].p2pMultiAddr()} + default: + t.Errorf("not expected number of nodes: %d", i) + t.FailNow() + } + return nil + } + + nodes, wallets := setupFullNodesEx(t, consensusTest0, configurableConsensus, acctStake, configHook, phonebookHook) + require.Len(t, nodes, 3) + require.Len(t, wallets, 3) + for i := 0; i < len(nodes); i++ { + defer os.Remove(wallets[i]) + defer nodes[i].Stop() + } + + startAndConnectNodes(nodes, 10*time.Second) + + initialRound := nodes[0].ledger.NextRound() + targetRound := initialRound + 10 + + select { + case <-nodes[0].ledger.Wait(targetRound): + e0, err := nodes[0].ledger.Block(targetRound) + require.NoError(t, err) + e1, err := nodes[1].ledger.Block(targetRound) + require.NoError(t, err) + require.Equal(t, e1.Hash(), e0.Hash()) + case <-time.After(120 * time.Second): + require.Fail(t, fmt.Sprintf("no block notification for wallet: %v.", wallets[0])) + } +} + +// TestNodeP2PRelays creates a network of 3 nodes with the following topology: +// R1 (relay, DHT) -> R2 (relay, phonebook) <- N (part node) +// Expect N to discover R1 via DHT and connect to it. +func TestNodeP2PRelays(t *testing.T) { + partitiontest.PartitionTest(t) + + const consensusTest0 = protocol.ConsensusVersion("test0") + + configurableConsensus := make(config.ConsensusProtocols) + + testParams0 := config.Consensus[protocol.ConsensusCurrentVersion] + testParams0.AgreementFilterTimeoutPeriod0 = 500 * time.Millisecond + configurableConsensus[consensusTest0] = testParams0 + + minMoneyAtStart := 1_000_000 + maxMoneyAtStart := 100_000_000_000 + gen := rand.New(rand.NewSource(2)) + + const numAccounts = 3 + acctStake := make([]basics.MicroAlgos, numAccounts) + // only node N has stake + acctStake[2] = basics.MicroAlgos{Raw: uint64(minMoneyAtStart + (gen.Int() % (maxMoneyAtStart - minMoneyAtStart)))} + + configHook := func(ni nodeInfo, cfg config.Local) (nodeInfo, config.Local) { + cfg = config.GetDefaultLocal() + cfg.BaseLoggerDebugLevel = uint32(logging.Debug) + cfg.EnableP2P = true + cfg.NetAddress = "" + cfg.EnableDHTProviders = true + + cfg.P2PPersistPeerID = true + privKey, err := p2p.GetPrivKey(cfg, ni.rootDir) + require.NoError(t, err) + ni.p2pID, err = p2p.PeerIDFromPublicKey(privKey.GetPublic()) + require.NoError(t, err) + + switch ni.idx { + case 2: + // N is not a relay + default: + cfg.NetAddress = ni.p2pNetAddr() + } + return ni, cfg + } + + phonebookHook := func(ni []nodeInfo, i int) []string { + switch i { + case 0: + // node R1 connects to R2 + t.Logf("Node%d phonebook: %s", i, ni[1].p2pMultiAddr()) + return []string{ni[1].p2pMultiAddr()} + case 1: + // node R2 connects to none one + t.Logf("Node%d phonebook: empty", i) + return []string{} + case 2: + // node N only connects to R1 + t.Logf("Node%d phonebook: %s", i, ni[1].p2pMultiAddr()) + return []string{ni[1].p2pMultiAddr()} + default: + t.Errorf("not expected number of nodes: %d", i) + t.FailNow() + } + return nil + } + + nodes, wallets := setupFullNodesEx(t, consensusTest0, configurableConsensus, acctStake, configHook, phonebookHook) + require.Len(t, nodes, 3) + require.Len(t, wallets, 3) + for i := 0; i < len(nodes); i++ { + defer os.Remove(wallets[i]) + defer nodes[i].Stop() + } + + startAndConnectNodes(nodes, nodelayFirstNodeStartDelay) + + require.Eventually(t, func() bool { + connectPeers(nodes) + + // since p2p open streams based on peer ID, there is no way to judge + // connectivity based on exact In/Out so count both + return len(nodes[0].net.GetPeers(network.PeersConnectedIn, network.PeersConnectedOut)) >= 1 && + len(nodes[1].net.GetPeers(network.PeersConnectedIn, network.PeersConnectedOut)) >= 2 && + len(nodes[2].net.GetPeers(network.PeersConnectedIn, network.PeersConnectedOut)) >= 1 + }, 60*time.Second, 1*time.Second) + + t.Log("Nodes connected to R2") + + // wait until N gets R1 in its phonebook + require.Eventually(t, func() bool { + // refresh N's peers in order to learn DHT data faster + nodes[2].net.RequestConnectOutgoing(false, nil) + return len(nodes[2].net.GetPeers(network.PeersPhonebookRelays)) == 2 + }, 80*time.Second, 1*time.Second) +} diff --git a/rpcs/blockService.go b/rpcs/blockService.go index d1ef82dfd4..1a9893b70a 100644 --- a/rpcs/blockService.go +++ b/rpcs/blockService.go @@ -41,6 +41,7 @@ import ( "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/network" + "github.com/algorand/go-algorand/network/addr" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/metrics" ) @@ -381,7 +382,6 @@ func (bs *BlockService) handleCatchupReq(ctx context.Context, reqMsg network.Inc return } respTopics, n = topicBlockBytes(bs.log, bs.ledger, basics.Round(round), string(requestType)) - return } // redirectRequest redirects the request to the next round robin fallback endpoint if available @@ -392,18 +392,24 @@ func (bs *BlockService) redirectRequest(round uint64, response http.ResponseWrit return false } - parsedURL, err := network.ParseHostOrURL(peerAddress) - if err != nil { - bs.log.Debugf("redirectRequest: %s", err.Error()) - return false + var redirectURL string + if addr.IsMultiaddr(peerAddress) { + redirectURL = strings.Replace(FormatBlockQuery(round, "", bs.net), "{genesisID}", bs.genesisID, 1) + } else { + parsedURL, err := addr.ParseHostOrURL(peerAddress) + if err != nil { + bs.log.Debugf("redirectRequest: %s", err.Error()) + return false + } + parsedURL.Path = strings.Replace(FormatBlockQuery(round, parsedURL.Path, bs.net), "{genesisID}", bs.genesisID, 1) + redirectURL = parsedURL.String() } - parsedURL.Path = strings.Replace(FormatBlockQuery(round, parsedURL.Path, bs.net), "{genesisID}", bs.genesisID, 1) - http.Redirect(response, request, parsedURL.String(), http.StatusTemporaryRedirect) - bs.log.Debugf("redirectRequest: redirected block request to %s", parsedURL.String()) + http.Redirect(response, request, redirectURL, http.StatusTemporaryRedirect) + bs.log.Debugf("redirectRequest: redirected block request to %s", redirectURL) return true } -// getNextCustomFallbackEndpoint returns the next custorm fallback endpoint in RR ordering +// getNextCustomFallbackEndpoint returns the next custom fallback endpoint in RR ordering func (bs *BlockService) getNextCustomFallbackEndpoint() (endpointAddress string) { if len(bs.fallbackEndpoints.endpoints) == 0 { return @@ -487,7 +493,7 @@ func RawBlockBytes(l LedgerForBlockService, round basics.Round) ([]byte, error) // FormatBlockQuery formats a block request query for the given network and round number func FormatBlockQuery(round uint64, parsedURL string, net network.GossipNode) string { - return net.SubstituteGenesisID(path.Join(parsedURL, "/v1/{genesisID}/block/"+strconv.FormatUint(uint64(round), 36))) + return network.SubstituteGenesisID(net, path.Join(parsedURL, "/v1/{genesisID}/block/"+strconv.FormatUint(uint64(round), 36))) } func makeFallbackEndpoints(log logging.Logger, customFallbackEndpoints string) (fe fallbackEndpoints) { @@ -496,12 +502,16 @@ func makeFallbackEndpoints(log logging.Logger, customFallbackEndpoints string) ( } endpoints := strings.Split(customFallbackEndpoints, ",") for _, ep := range endpoints { - parsed, err := network.ParseHostOrURL(ep) - if err != nil { - log.Warnf("makeFallbackEndpoints: error parsing %s %s", ep, err.Error()) - continue + if addr.IsMultiaddr(ep) { + fe.endpoints = append(fe.endpoints, ep) + } else { + parsed, err := addr.ParseHostOrURL(ep) + if err != nil { + log.Warnf("makeFallbackEndpoints: error parsing %s %s", ep, err.Error()) + continue + } + fe.endpoints = append(fe.endpoints, parsed.String()) } - fe.endpoints = append(fe.endpoints, parsed.String()) } return } diff --git a/rpcs/blockService_test.go b/rpcs/blockService_test.go index 7b1f756f08..e637796adf 100644 --- a/rpcs/blockService_test.go +++ b/rpcs/blockService_test.go @@ -23,6 +23,7 @@ import ( "fmt" "io" "net/http" + "net/http/httptest" "strings" "sync" "testing" @@ -38,6 +39,7 @@ import ( "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/network" + "github.com/algorand/go-algorand/network/addr" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -70,6 +72,10 @@ func (mup *mockUnicastPeer) Respond(ctx context.Context, reqMsg network.Incoming return nil } +func (mup *mockUnicastPeer) GetNetwork() network.GossipNode { + panic("not implemented") +} + // TestHandleCatchupReqNegative covers the error reporting in handleCatchupReq func TestHandleCatchupReqNegative(t *testing.T) { partitiontest.PartitionTest(t) @@ -142,6 +148,8 @@ func TestRedirectFallbackEndpoints(t *testing.T) { net1 := &httpTestPeerSource{} net2 := &httpTestPeerSource{} + net1.GenesisID = "test-genesis-ID" + net2.GenesisID = "test-genesis-ID" nodeA := &basicRPCNode{} nodeB := &basicRPCNode{} @@ -161,7 +169,7 @@ func TestRedirectFallbackEndpoints(t *testing.T) { nodeA.RegisterHTTPHandler(BlockServiceBlockPath, bs1) nodeB.RegisterHTTPHandler(BlockServiceBlockPath, bs2) - parsedURL, err := network.ParseHostOrURL(nodeA.rootURL()) + parsedURL, err := addr.ParseHostOrURL(nodeA.rootURL()) require.NoError(t, err) client := http.Client{} @@ -206,7 +214,7 @@ func TestBlockServiceShutdown(t *testing.T) { nodeA.start() defer nodeA.stop() - parsedURL, err := network.ParseHostOrURL(nodeA.rootURL()) + parsedURL, err := addr.ParseHostOrURL(nodeA.rootURL()) require.NoError(t, err) client := http.Client{} @@ -259,6 +267,8 @@ func TestRedirectOnFullCapacity(t *testing.T) { net1 := &httpTestPeerSource{} net2 := &httpTestPeerSource{} + net1.GenesisID = "test-genesis-ID" + net2.GenesisID = "test-genesis-ID" nodeA := &basicRPCNode{} nodeB := &basicRPCNode{} @@ -286,7 +296,7 @@ func TestRedirectOnFullCapacity(t *testing.T) { nodeB.RegisterHTTPHandler(BlockServiceBlockPath, bs2) - parsedURL, err := network.ParseHostOrURL(nodeA.rootURL()) + parsedURL, err := addr.ParseHostOrURL(nodeA.rootURL()) require.NoError(t, err) client := http.Client{} @@ -445,6 +455,8 @@ func TestRedirectExceptions(t *testing.T) { net1 := &httpTestPeerSource{} net2 := &httpTestPeerSource{} + net1.GenesisID = "{genesisID}" + net2.GenesisID = "{genesisID}" nodeA := &basicRPCNode{} nodeB := &basicRPCNode{} @@ -465,7 +477,7 @@ func TestRedirectExceptions(t *testing.T) { nodeA.RegisterHTTPHandler(BlockServiceBlockPath, bs1) nodeB.RegisterHTTPHandler(BlockServiceBlockPath, bs2) - parsedURL, err := network.ParseHostOrURL(nodeA.rootURL()) + parsedURL, err := addr.ParseHostOrURL(nodeA.rootURL()) require.NoError(t, err) client := http.Client{} @@ -484,12 +496,13 @@ func TestRedirectExceptions(t *testing.T) { require.NoError(t, err) require.Equal(t, response.StatusCode, http.StatusNotFound) - parsedURLNodeB, err := network.ParseHostOrURL(nodeB.rootURL()) + parsedURLNodeB, err := addr.ParseHostOrURL(nodeB.rootURL()) require.NoError(t, err) parsedURLNodeB.Path = FormatBlockQuery(uint64(4), parsedURLNodeB.Path, net2) blockURLNodeB := parsedURLNodeB.String() requestNodeB, err := http.NewRequest("GET", blockURLNodeB, nil) + require.NoError(t, err) _, err = client.Do(requestNodeB) require.Error(t, err) @@ -548,8 +561,45 @@ func addBlock(t *testing.T, ledger *data.Ledger) (timestamp int64) { func TestErrMemoryAtCapacity(t *testing.T) { partitiontest.PartitionTest(t) + t.Parallel() macError := errMemoryAtCapacity{capacity: uint64(100), used: uint64(110)} errStr := macError.Error() require.Equal(t, "block service memory over capacity: 110 / 100", errStr) } + +func TestBlockServiceRedirect(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + log := logging.TestingLog(t) + + ep1 := "http://localhost:1234" + ep2 := "/ip4/127.0.0.1/tcp/2345/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN" + endpoints := strings.Join([]string{ep1, ep2}, ",") + fb := makeFallbackEndpoints(log, endpoints) + require.Len(t, fb.endpoints, 2) + require.Equal(t, ep1, fb.endpoints[0]) + require.Equal(t, ep2, fb.endpoints[1]) + + bs := BlockService{ + net: &httpTestPeerSource{}, + fallbackEndpoints: fb, + log: log, + } + + r := httptest.NewRequest("GET", "/", strings.NewReader("")) + w := httptest.NewRecorder() + ok := bs.redirectRequest(10, w, r) + require.True(t, ok) + expectedPath := ep1 + FormatBlockQuery(10, "/", bs.net) + require.Equal(t, expectedPath, w.Result().Header.Get("Location")) + + r = httptest.NewRequest("GET", "/", strings.NewReader("")) + w = httptest.NewRecorder() + ok = bs.redirectRequest(11, w, r) + require.True(t, ok) + // for p2p nodes the url is actually a peer address in p2p network and not part of HTTP path + expectedPath = FormatBlockQuery(11, "", bs.net) + require.Equal(t, expectedPath, w.Result().Header.Get("Location")) +} diff --git a/rpcs/healthService_test.go b/rpcs/healthService_test.go index f2846c322d..c1153d98ba 100644 --- a/rpcs/healthService_test.go +++ b/rpcs/healthService_test.go @@ -23,6 +23,7 @@ import ( "testing" "github.com/algorand/go-algorand/network" + "github.com/algorand/go-algorand/network/addr" "github.com/algorand/go-algorand/test/partitiontest" "github.com/stretchr/testify/require" ) @@ -36,7 +37,7 @@ func TestHealthService_ServeHTTP(t *testing.T) { _ = MakeHealthService(nodeA) - parsedURL, err := network.ParseHostOrURL(nodeA.rootURL()) + parsedURL, err := addr.ParseHostOrURL(nodeA.rootURL()) require.NoError(t, err) client := http.Client{} diff --git a/rpcs/httpTxSync.go b/rpcs/httpTxSync.go index 43258337cb..311a87cf7b 100644 --- a/rpcs/httpTxSync.go +++ b/rpcs/httpTxSync.go @@ -24,7 +24,6 @@ import ( "math/rand" "net/http" "net/url" - "path" "strings" "github.com/algorand/go-algorand/data/bookkeeping" @@ -103,19 +102,18 @@ func (hts *HTTPTxSync) Sync(ctx context.Context, bloom *bloom.Filter) (txgroups if !ok { return nil, fmt.Errorf("cannot HTTPTxSync non http peer %T %#v", peer, peer) } + var syncURL string hts.rootURL = hpeer.GetAddress() + client := hpeer.GetHTTPClient() if client == nil { - client = &http.Client{} - client.Transport = hts.peers.GetRoundTripper() - } - parsedURL, err := network.ParseHostOrURL(hts.rootURL) - if err != nil { - hts.log.Warnf("txSync bad url %v: %s", hts.rootURL, err) - return nil, err + client, err = hts.peers.GetHTTPClient(hts.rootURL) + if err != nil { + return nil, fmt.Errorf("HTTPTxSync cannot create a HTTP client for a peer %T %#v: %s", peer, peer, err.Error()) + } } - parsedURL.Path = hts.peers.SubstituteGenesisID(path.Join(parsedURL.Path, TxServiceHTTPPath)) - syncURL := parsedURL.String() + syncURL = network.SubstituteGenesisID(hts.peers, TxServiceHTTPPath) + hts.log.Infof("http sync from %s", syncURL) params := url.Values{} params.Set("bf", bloomParam) diff --git a/rpcs/ledgerService.go b/rpcs/ledgerService.go index 5f75a36b0c..823895a417 100644 --- a/rpcs/ledgerService.go +++ b/rpcs/ledgerService.go @@ -60,19 +60,25 @@ type LedgerForService interface { GetCatchpointStream(round basics.Round) (ledger.ReadCloseSizer, error) } +// httpGossipNode is a reduced interface for the gossipNode that only includes the methods needed by the LedgerService +type httpGossipNode interface { + RegisterHTTPHandler(path string, handler http.Handler) + GetHTTPRequestConnection(request *http.Request) (conn network.DeadlineSettableConn) +} + // LedgerService represents the Ledger RPC API type LedgerService struct { // running is non-zero once the service is running, and zero when it's not running. it needs to be at a 32-bit aligned address for RasPI support. running atomic.Int32 ledger LedgerForService genesisID string - net network.GossipNode + net httpGossipNode enableService bool stopping sync.WaitGroup } // MakeLedgerService creates a LedgerService around the provider Ledger and registers it with the HTTP router -func MakeLedgerService(config config.Local, ledger LedgerForService, net network.GossipNode, genesisID string) *LedgerService { +func MakeLedgerService(config config.Local, ledger LedgerForService, net httpGossipNode, genesisID string) *LedgerService { service := &LedgerService{ ledger: ledger, genesisID: genesisID, diff --git a/rpcs/ledgerService_test.go b/rpcs/ledgerService_test.go index 1cc52fc9c0..a100f03c2b 100644 --- a/rpcs/ledgerService_test.go +++ b/rpcs/ledgerService_test.go @@ -17,6 +17,9 @@ package rpcs import ( + "archive/tar" + "bytes" + "compress/gzip" "fmt" "io" "net/http" @@ -172,3 +175,59 @@ func TestLedgerService(t *testing.T) { ledgerService.Stop() require.Equal(t, int32(0), ledgerService.running.Load()) } + +type mockSizedStream struct { + *bytes.Buffer +} + +func (mss mockSizedStream) Size() (int64, error) { + return int64(mss.Len()), nil +} + +func (mss mockSizedStream) Close() error { + return nil +} + +type mockLedgerForService struct { +} + +func (l *mockLedgerForService) GetCatchpointStream(round basics.Round) (ledger.ReadCloseSizer, error) { + buf := bytes.NewBuffer(nil) + gz := gzip.NewWriter(buf) + wtar := tar.NewWriter(gz) + wtar.Close() + gz.Close() + + buf2 := bytes.NewBuffer(buf.Bytes()) + return mockSizedStream{buf2}, nil +} + +// TestLedgerServiceP2P creates a ledger service on a node, and a p2p client tries to download +// an empty catchpoint file from the ledger service. +func TestLedgerServiceP2P(t *testing.T) { + partitiontest.PartitionTest(t) + + nodeA, nodeB := nodePairP2p(t) + defer nodeA.Stop() + defer nodeB.Stop() + + genesisID := "test GenesisID" + cfg := config.GetDefaultLocal() + cfg.EnableLedgerService = true + l := mockLedgerForService{} + ledgerService := MakeLedgerService(cfg, &l, nodeA, genesisID) + ledgerService.Start() + defer ledgerService.Stop() + + nodeA.RegisterHTTPHandler(LedgerServiceLedgerPath, ledgerService) + + httpPeer := nodeA.GetHTTPPeer().(network.HTTPPeer) + + req, err := http.NewRequest("GET", fmt.Sprintf("/v1/%s/ledger/0", genesisID), nil) + require.NoError(t, err) + resp, err := httpPeer.GetHTTPClient().Do(req) + require.NoError(t, err) + defer func() { _ = resp.Body.Close() }() + + require.Equal(t, http.StatusOK, resp.StatusCode) +} diff --git a/rpcs/txService_test.go b/rpcs/txService_test.go index 1e7a5036e4..fcfae1044d 100644 --- a/rpcs/txService_test.go +++ b/rpcs/txService_test.go @@ -22,7 +22,6 @@ import ( "net/http" "net/url" "os" - "strings" "sync" "testing" "time" @@ -34,6 +33,7 @@ import ( "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/network" + p2ptesting "github.com/algorand/go-algorand/network/p2p/testing" "github.com/algorand/go-algorand/test/partitiontest" "github.com/algorand/go-algorand/util/bloom" ) @@ -64,7 +64,11 @@ func (p testHTTPPeer) GetAddress() string { return string(p) } func (p *testHTTPPeer) GetHTTPClient() *http.Client { - return &http.Client{} + return &http.Client{ + Transport: &network.HTTPPAddressBoundTransport{ + Addr: p.GetAddress(), + InnerTransport: http.DefaultTransport}, + } } func (p *testHTTPPeer) GetHTTPPeer() network.HTTPPeer { return p @@ -116,9 +120,7 @@ func (b *basicRPCNode) GetPeers(options ...network.PeerOption) []network.Peer { return b.peers } -func (b *basicRPCNode) SubstituteGenesisID(rawURL string) string { - return strings.Replace(rawURL, "{genesisID}", "test genesisID", -1) -} +func (b *basicRPCNode) GetGenesisID() string { return "test genesisID" } func nodePair() (*basicRPCNode, *basicRPCNode) { nodeA := &basicRPCNode{} @@ -132,27 +134,84 @@ func nodePair() (*basicRPCNode, *basicRPCNode) { return nodeA, nodeB } +func nodePairP2p(tb testing.TB) (*p2ptesting.HTTPNode, *p2ptesting.HTTPNode) { + nodeA := p2ptesting.MakeHTTPNode(tb) + addrsA := nodeA.Addrs() + require.Greater(tb, len(addrsA), 0) + + nodeB := p2ptesting.MakeHTTPNode(tb) + addrsB := nodeA.Addrs() + require.Greater(tb, len(addrsB), 0) + + nodeA.SetPeers(nodeB) + nodeB.SetPeers(nodeA) + nodeA.SetGenesisID("test genesisID") + nodeB.SetGenesisID("test genesisID") + + nodeA.Start() + nodeB.Start() + + return nodeA, nodeB +} + +// TestTxSync checks txsync on a network with two nodes, A and B func TestTxSync(t *testing.T) { partitiontest.PartitionTest(t) - // A network with two nodes, A and B - nodeA, nodeB := nodePair() - defer nodeA.stop() - defer nodeB.stop() + type txSyncNode interface { + Registrar + network.GossipNode + } - pool := makeMockPendingTxAggregate(3) - RegisterTxService(pool, nodeA, "test genesisID", config.GetDefaultLocal().TxPoolSize, config.GetDefaultLocal().TxSyncServeResponseSize) + tests := []struct { + name string + setup func(t *testing.T) (txSyncNode, txSyncNode, func()) + }{ + { + name: "tcp", + setup: func(t *testing.T) (txSyncNode, txSyncNode, func()) { + nodeA, nodeB := nodePair() + cleanup := func() { + nodeA.stop() + nodeB.stop() + } + return nodeA, nodeB, cleanup + }, + }, + { + name: "p2p", + setup: func(t *testing.T) (txSyncNode, txSyncNode, func()) { + nodeA, nodeB := nodePairP2p(t) + cleanup := func() { + nodeA.Stop() + nodeB.Stop() + } + return nodeA, nodeB, cleanup + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // A network with two nodes, A and B + nodeA, nodeB, cleanupFn := test.setup(t) + defer cleanupFn() - // B tries to fetch block - handler := mockHandler{} - syncInterval := time.Second - syncTimeout := time.Second - syncerPool := makeMockPendingTxAggregate(0) - syncer := MakeTxSyncer(syncerPool, nodeB, &handler, syncInterval, syncTimeout, config.GetDefaultLocal().TxSyncServeResponseSize) - // Since syncer is not Started, set the context here - syncer.ctx, syncer.cancel = context.WithCancel(context.Background()) - require.NoError(t, syncer.sync()) - require.Equal(t, int32(3), handler.messageCounter.Load()) + pool := makeMockPendingTxAggregate(3) + RegisterTxService(pool, nodeA, "test genesisID", config.GetDefaultLocal().TxPoolSize, config.GetDefaultLocal().TxSyncServeResponseSize) + + // B tries to fetch block + handler := mockHandler{} + syncInterval := time.Second + syncTimeout := time.Second + syncerPool := makeMockPendingTxAggregate(0) + syncer := MakeTxSyncer(syncerPool, nodeB, &handler, syncInterval, syncTimeout, config.GetDefaultLocal().TxSyncServeResponseSize) + // Since syncer is not Started, set the context here + syncer.ctx, syncer.cancel = context.WithCancel(context.Background()) + require.NoError(t, syncer.sync()) + require.Equal(t, int32(3), handler.messageCounter.Load()) + }) + } } func BenchmarkTxSync(b *testing.B) { diff --git a/rpcs/txSyncer_test.go b/rpcs/txSyncer_test.go index 1c3f071879..eb3e4eab60 100644 --- a/rpcs/txSyncer_test.go +++ b/rpcs/txSyncer_test.go @@ -22,7 +22,6 @@ import ( "math/rand" "net/http" "net/rpc" - "strings" "sync/atomic" "testing" "time" @@ -158,6 +157,7 @@ func (client *mockRPCClient) Sync(ctx context.Context, bloom *bloom.Filter) (txg func (client *mockRPCClient) GetAddress() string { return client.rootURL } + func (client *mockRPCClient) GetHTTPClient() *http.Client { return nil } @@ -170,20 +170,13 @@ type mockClientAggregator struct { func (mca *mockClientAggregator) GetPeers(options ...network.PeerOption) []network.Peer { return mca.peers } -func (mca *mockClientAggregator) SubstituteGenesisID(rawURL string) string { - return strings.Replace(rawURL, "{genesisID}", "test genesisID", -1) -} - -const numberOfPeers = 10 -func makeMockClientAggregator(t *testing.T, failWithNil bool, failWithError bool) *mockClientAggregator { - clients := make([]network.Peer, 0) - for i := 0; i < numberOfPeers; i++ { - runner := mockRunner{failWithNil: failWithNil, failWithError: failWithError, done: make(chan *rpc.Call)} - clients = append(clients, &mockRPCClient{client: &runner, log: logging.TestingLog(t)}) - } - t.Logf("len(mca.clients) = %d", len(clients)) - return &mockClientAggregator{peers: clients} +func (mca *mockClientAggregator) GetHTTPClient(address string) (*http.Client, error) { + return &http.Client{ + Transport: &network.HTTPPAddressBoundTransport{ + Addr: address, + InnerTransport: http.DefaultTransport}, + }, nil } func TestSyncFromClient(t *testing.T) { @@ -283,7 +276,7 @@ func TestSync(t *testing.T) { runner := mockRunner{failWithNil: false, failWithError: false, txgroups: pool.PendingTxGroups()[len(pool.PendingTxGroups())-1:], done: make(chan *rpc.Call)} client := mockRPCClient{client: &runner, rootURL: nodeAURL, log: logging.TestingLog(t)} - clientAgg := mockClientAggregator{peers: []network.Peer{&client}} + clientAgg := mockClientAggregator{peers: []network.Peer{&client}, MockNetwork: mocks.MockNetwork{GenesisID: "test genesisID"}} handler := mockHandler{} syncerPool := makeMockPendingTxAggregate(3) syncer := MakeTxSyncer(syncerPool, &clientAgg, &handler, testSyncInterval, testSyncTimeout, config.GetDefaultLocal().TxSyncServeResponseSize) @@ -322,7 +315,7 @@ func TestStartAndStop(t *testing.T) { runner := mockRunner{failWithNil: false, failWithError: false, txgroups: pool.PendingTxGroups()[len(pool.PendingTxGroups())-1:], done: make(chan *rpc.Call)} client := mockRPCClient{client: &runner, rootURL: nodeAURL, log: logging.TestingLog(t)} - clientAgg := mockClientAggregator{peers: []network.Peer{&client}} + clientAgg := mockClientAggregator{peers: []network.Peer{&client}, MockNetwork: mocks.MockNetwork{GenesisID: "test genesisID"}} handler := mockHandler{} syncerPool := makeMockPendingTxAggregate(0) diff --git a/test/heapwatch/agreement-log.py b/test/heapwatch/agreement-log.py new file mode 100644 index 0000000000..4109b37a71 --- /dev/null +++ b/test/heapwatch/agreement-log.py @@ -0,0 +1,187 @@ +""" +Agreement logs parser, takes either separate node.log files from a directory and guessing names from the file names, +or parses the e2e test failure log file watching for node names as " libgoalFixture.go:376: Relay0/node.log:" strings. + +This tool similar a bit to carpenter but takes multiple log files at once. +To force colors when outputting to a file, set FORCE_COLOR=1 in the environment. +""" + +import argparse +from datetime import datetime, timedelta +import glob +import json +import logging +import os +import time + +from termcolor import COLORS, colored + +logger = logging.getLogger(__name__) + +filtered_events = frozenset(['Persisted']) + +def process_json_line(line: str, node_name: str, by_node: dict, events: list): + """Handles a single line of json log file, returns parsed event or None if it's not an agreement event. + + line is a single line of json log file. + node_name is a name of the node that produced this line. + by_node is dict with unique nodes meta information. + events is a list of all parsed events. It is appended in this function to keep the caller code clean. + """ + try: + evt = json.loads(line) + except json.JSONDecodeError: + logger.error('failed to parse json: %s', line) + return None + if evt.get('Context') == 'Agreement' and evt.get('Type'): + if evt['Type'] in filtered_events: + return None + dt = datetime.strptime(evt['time'], '%Y-%m-%dT%H:%M:%S.%f%z') + sender = evt.get('Sender') + sender = sender[:12] if sender else '' + h = evt.get('Hash') + h = h[:8] if h else '' + w = evt.get('Weight', '-') if not evt['Type'].startswith('Proposal') else ' ' + wt = evt.get('WeightTotal', '-') if not evt['Type'].startswith('Proposal') else ' ' + if evt['Type'] in ('StepTimeout', 'VoteAttest', 'BlockAssembled', 'BlockPipelined'): + w, wt = ' ', ' ' + result = { + 'time': dt, + 'type': evt.get('Type'), + 'round': evt.get('Round', '-'), + 'period': evt.get('Period', '-'), + 'step': evt.get('Step', '-'), + 'object_round': evt.get('ObjectRound', '-'), + 'object_period': evt.get('ObjectPeriod', '-'), + 'object_step': evt.get('ObjectStep', '-'), + 'hash': h, + 'sender': sender, + 'weight': w, + 'weight_total': wt, + 'node': node_name, + } + events.append(result) + metadata = by_node.get(node_name) + if not metadata: + metadata = { + 'type': evt.get('Type'), + 'time': dt + } + by_node[node_name] = metadata + else: + if evt.get('Type') == 'RoundConcluded': + rt = dt - metadata['time'] + result['round_time_ms'] = rt / timedelta(milliseconds=1) + elif evt.get('Type') == 'RoundStart': + metadata['time'] = dt + metadata['type'] = 'RoundStart' + by_node[node_name] = metadata + + return result + return None + +def main(): + os.environ['TZ'] = 'UTC' + time.tzset() + + ap = argparse.ArgumentParser() + ap.add_argument('test_log_or_dir', help='Dir with log files or a single log file from e2e tests') + ap.add_argument('-e', '--end-round', type=int, help=f'Round to end at') + args = ap.parse_args() + + by_node = {} + events = [] + if os.path.isdir(args.test_log_or_dir): + logger.info('processing directory %s', args.test_log_or_dir) + log_files = sorted(glob.glob(os.path.join(args.test_log_or_dir, '*-node.log'))) + if not log_files: + logger.error('no log files found in %s', args.test_log_or_dir) + return 1 + for filename in os.listdir(args.test_log_or_dir): + if filename.endswith("-node.log"): + with open(os.path.join(args.test_log_or_dir, filename), 'r') as file: + node_name = filename[:len(filename) - len('-node.log')] + node_name = node_name.replace('relay', 'R') + node_name = node_name.replace('nonParticipatingNode', 'NPN') + node_name = node_name.replace('node', 'N') + for line in file: + event = process_json_line(line, node_name, by_node, events) + if event and args.end_round and \ + isinstance(event['round'], int) and event['round'] >= args.end_round: + break + + else: + logger.info('processing file %s', args.test_log_or_dir) + with open(args.test_log_or_dir, 'r') as file: + line0 = None + while not line0: + line0 = file.readline() + line0 = line0.strip() + + if line0[0] == '{': + # regular json line + node_name = 'node' + process_json_line(line, node_name, by_node, events) + for line in file: + line = line.strip() + event = process_json_line(line, node_name, by_node, events) + if event and args.end_round and \ + isinstance(event['round'], int) and event['round'] >= args.end_round: + break + else: + # looks like e2e test output with lines line this: + """ + libgoalFixture.go:374: ===================... + libgoalFixture.go:376: Relay0/node.log: + libgoalFixture.go:379: {"file":"server.go"... + """ + node_name = None + if line0.endswith('node.log:'): + node_name = line0.split(' ')[1].split('/')[0] + logger.info('found node name: %s', node_name) + for line in file: + line = line.strip() + if line.endswith('node.log:'): + node_name = line.split(' ')[1].split('/')[0] + logger.info('found node name: %s', node_name) + if node_name: + for line in file: + json_start = line.find('{') + if json_start == -1: + # end of continuous json block + node_name = None + break + line = line[json_start:] + event = process_json_line(line, node_name, by_node, events) + if event and args.end_round and \ + isinstance(event['round'], int) and event['round'] >= args.end_round: + break + + log = sorted(events, key=lambda x: x['time']) + + # num_nodes = len(by_node) + colors = list(COLORS) + colors = colors[colors.index('light_grey'):] + if len(colors) < len(by_node): + colors = colors * (len(by_node) // len(colors) + 1) + node_color = {k: v for k, v in zip(by_node.keys(), colors)} + + fmt = '%15s (%s,%s,%s) (%s,%s,%s) %4s|%-4s %-8s %-18s %8s %12s %5s' + print(fmt % ('TS', 'R', 'P', 'S', 'r', 'p', 's', 'W', 'WT', 'NODE', 'EVENT TYPE', 'HASH', 'SENDER', 'RT ms')) + for e in log: + color = node_color[e['node']] + text = colored(fmt % ( + e['time'].strftime('%H:%M:%S.%f'), + e['round'], e['period'], e['step'], + e['object_round'], e['object_period'], e['object_step'], + e['weight'], e['weight_total'], + e['node'][:8], + e['type'], e['hash'], e['sender'], + int(e['round_time_ms']) if 'round_time_ms' in e else ''), + color, + ) + print(text) + +if __name__ == '__main__': + logging.basicConfig(level=logging.INFO) + main() diff --git a/test/heapwatch/block_history_plot.py b/test/heapwatch/block_history_plot.py index 7de45e21b0..d8c86b454f 100644 --- a/test/heapwatch/block_history_plot.py +++ b/test/heapwatch/block_history_plot.py @@ -138,19 +138,21 @@ def process(path, args): min(tpsv[start:end]), max(tpsv[start:end]), )) print('long round times: {}'.format(' '.join(list(map(str,filter(lambda x: x >= 9,dtv[start:end])))))) - fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2) - ax1.set_title('round time (seconds)') + fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(2,3, figsize=(10, 5)) + ax1.set_title('round time histogram (sec)') ax1.hist(list(filter(lambda x: x < 9,dtv[start:end])),bins=20) - if args.rtime: - ax2.set_title('round time') - ax2.plot(dtv) - else: - ax2.set_title('TPS') - ax2.hist(tpsv[start:end],bins=20) + ax4.set_title('round time') + ax4.plot(dtv[start:end]) + + ax2.set_title('txn/block histogram') + ax2.hist(txnv[start:end],bins=20) + + ax5.set_title('txn/block') + ax5.plot(txnv[start:end]) - ax3.set_title('txn/block') - ax3.hist(txnv[start:end],bins=20) + ax3.set_title('TPS') + ax3.hist(tpsv[start:end],bins=20) # 10 round moving average TPS tpsv10 = [] @@ -165,12 +167,12 @@ def process(path, args): dtxn = tca-tc0 tpsv10.append(dtxn/dt) if args.tps1: - ax4.set_title('TPS') - ax4.plot(tpsv[start:end]) + ax6.set_title('TPS') + ax6.plot(tpsv[start:end]) print('fullish block sizes: {}'.format(list(filter(lambda x: x > 100, txnv)))) else: - ax4.set_title('TPS(10 round window)') - ax4.plot(tpsv10) + ax6.set_title('TPS(10 round window)') + ax6.plot(tpsv10) fig.tight_layout() plt.savefig(path + '_hist.svg', format='svg') plt.savefig(path + '_hist.png', format='png') diff --git a/test/heapwatch/client_ram_report.py b/test/heapwatch/client_ram_report.py index 97a1171630..f16fbeaa3f 100644 --- a/test/heapwatch/client_ram_report.py +++ b/test/heapwatch/client_ram_report.py @@ -202,6 +202,10 @@ def main(): heap_totals = get_heap_inuse_totals(args.dir) heap_details = get_heap_metrics(args.dir) + if not heap_totals and not heap_details: + print('no data found', file=sys.stderr) + return 0 + if args.csv: if args.csv == '-': csvf = sys.stdout diff --git a/test/heapwatch/metrics_aggs.py b/test/heapwatch/metrics_aggs.py new file mode 100644 index 0000000000..0189634be5 --- /dev/null +++ b/test/heapwatch/metrics_aggs.py @@ -0,0 +1,175 @@ +#!/usr/bin/env python3 +# Copyright (C) 2019-2024 Algorand, Inc. +# This file is part of go-algorand +# +# go-algorand is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# go-algorand is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with go-algorand. If not, see . +# +### +# +# Process and aggregate /metrics data captured by heapWatch.py +# Useful for metrics with labels and bandwidth analysis. +# +import argparse +import glob +import logging +import os +import time +import sys + +import dash +from dash import dcc, html +import plotly.graph_objs as go +from plotly.subplots import make_subplots + + +from metrics_lib import MetricType, parse_metrics, gather_metrics_files_by_nick + +logger = logging.getLogger(__name__) + + +def main(): + os.environ['TZ'] = 'UTC' + time.tzset() + default_img_filename = 'metrics_aggs.png' + default_html_filename = 'metrics_aggs.html' + + ap = argparse.ArgumentParser() + ap.add_argument('metrics_names', nargs='+', default=None, help='metric name(s) to track') + ap.add_argument('-d', '--dir', type=str, default=None, help='dir path to find /*.metrics in') + ap.add_argument('-l', '--list-nodes', default=False, action='store_true', help='list available node names with metrics') + ap.add_argument('-t', '--tags', action='append', default=[], help='tag/label pairs in a=b format to aggregate by, may be repeated. Empty means aggregation by metric name') + ap.add_argument('--nick-re', action='append', default=[], help='regexp to filter node names, may be repeated') + ap.add_argument('--nick-lre', action='append', default=[], help='label:regexp to filter node names, may be repeated') + ap.add_argument('-s', '--save', type=str, choices=['png', 'html'], help=f'save plot to \'{default_img_filename}\' or \'{default_html_filename}\' file instead of showing it') + ap.add_argument('--verbose', default=False, action='store_true') + + args = ap.parse_args() + if args.verbose: + logging.basicConfig(level=logging.DEBUG) + else: + logging.basicConfig(level=logging.INFO) + + tags = {} + if args.tags: + for tag in args.tags: + if '=' not in tag: + raise (f'Invalid tag: {tag}') + k, v = tag.split('=', 1) + tags[k] = v + tag_keys = set(tags.keys()) + + metrics_files = sorted(glob.glob(os.path.join(args.dir, '*.metrics'))) + metrics_files.extend(glob.glob(os.path.join(args.dir, 'terraform-inventory.host'))) + filesByNick = gather_metrics_files_by_nick(metrics_files, args.nick_re, args.nick_lre) + + if args.list_nodes: + print('Available nodes:', ', '.join(sorted(filesByNick.keys()))) + return 0 + + app = dash.Dash(__name__) + app.layout = html.Div( + html.Div([ + html.H4('Algod Metrics'), + html.Div(id='text'), + dcc.Graph(id='graph'), + ]) + ) + metrics_names = set(args.metrics_names) + nrows = len(metrics_names) + + fig = make_subplots( + rows=nrows, cols=1, + vertical_spacing=0.03, shared_xaxes=True, + subplot_titles=[f'{name}' for name in sorted(metrics_names)], + ) + + fig['layout']['margin'] = { + 'l': 30, 'r': 10, 'b': 10, 't': 20 + } + fig['layout']['height'] = 500 * nrows + + + for nick, files_by_date in filesByNick.items(): + active_metrics = {} + data = {'time': []} + raw_series = {} + raw_times = {} + idx = 0 + for dt, metrics_file in files_by_date.items(): + data['time'].append(dt) + with open(metrics_file, 'rt') as f: + metrics = parse_metrics(f, nick, metrics_names) + for metric_name, metrics_seq in metrics.items(): + active_metric_names = [] + raw_value = 0 + for metric in metrics_seq: + if metric.type != MetricType.COUNTER: + raise RuntimeError('Only COUNT metrics are supported') + if tags is None or tags is not None and metric.has_tags(tag_keys, tags): + raw_value += metric.value + full_name = metric.string(set(tag_keys).union({'n'})) + + if full_name is None: + continue + + if full_name not in data: + # handle gaps in data, sometimes metric file might miss a value + # but the chart requires matching x and y series (time and metric value) + # data is what does into the chart, and raw_series is used to calculate + data[full_name] = [0] * len(files_by_date) + raw_series[full_name] = [] + raw_times[full_name] = [] + + metric_value = raw_value + if len(raw_series[full_name]) > 0 and len(raw_times[full_name]) > 0: + metric_value = (metric_value - raw_series[full_name][-1]) / (dt - raw_times[full_name][-1]).total_seconds() + else: + metric_value = 0 + + data[full_name][idx] = metric_value + raw_series[full_name].append(raw_value) + raw_times[full_name].append(dt) + + active_metric_names.append(full_name) + + active_metric_names.sort() + active_metrics[full_name] = active_metric_names + idx += 1 + + for i, metric_pair in enumerate(sorted(active_metrics.items())): + metric_name, metric_fullnames = metric_pair + for metric_fullname in metric_fullnames: + fig.append_trace(go.Scatter( + x=data['time'], + y=data[metric_fullname], + name=metric_fullname, + mode='lines+markers', + line=dict(width=1), + ), i+1, 1) + + if args.save: + if args.save == 'html': + target_path = os.path.join(args.dir, default_html_filename) + fig.write_html(target_path) + else: + target_path = os.path.join(args.dir, default_img_filename) + fig.write_image(target_path) + print(f'Saved plot to {target_path}') + else: + fig.show() + + return 0 + +if __name__ == '__main__': + sys.exit(main()) \ No newline at end of file diff --git a/test/heapwatch/metrics_delta.py b/test/heapwatch/metrics_delta.py index 50b1e9e2e3..2d64ee097a 100644 --- a/test/heapwatch/metrics_delta.py +++ b/test/heapwatch/metrics_delta.py @@ -22,7 +22,6 @@ # Generate text report on bandwidth in and out of relays/PN/NPN import argparse -import configparser import contextlib import csv import glob @@ -36,42 +35,10 @@ import sys import time -logger = logging.getLogger(__name__) +from metrics_lib import num, hunum, terraform_inventory_ip_not_names, \ + metric_line_re, test_metric_line_re -def num(x): - if '.' in x: - return float(x) - return int(x) - -def hunum(x): - if x >= 10000000000: - return '{:.1f}G'.format(x / 1000000000.0) - if x >= 1000000000: - return '{:.2f}G'.format(x / 1000000000.0) - if x >= 10000000: - return '{:.1f}M'.format(x / 1000000.0) - if x >= 1000000: - return '{:.2f}M'.format(x / 1000000.0) - if x >= 10000: - return '{:.1f}k'.format(x / 1000.0) - if x >= 1000: - return '{:.2f}k'.format(x / 1000.0) - return '{:.2f}x'.format(x) - -metric_line_re = re.compile(r'(\S+\{[^}]*\})\s+(.*)') - -def test_metric_line_re(): - testlines = ( - ('algod_network_connections_dropped_total{reason="write err"} 1', 1), - #('algod_network_sent_bytes_MS 274992', 274992), # handled by split - ) - for line, n in testlines: - try: - m = metric_line_re.match(line) - assert int(m.group(2)) == n - except: - logger.error('failed on line %r', line, exc_info=True) - raise +logger = logging.getLogger(__name__) def parse_metrics(fin): out = dict() @@ -86,10 +53,15 @@ def parse_metrics(fin): continue m = metric_line_re.match(line) if m: - out[m.group(1)] = num(m.group(2)) + key = m.group(1) + val = m.group(2) else: ab = line.split() - out[ab[0]] = num(ab[1]) + key = ab[0] + val = ab[1] + if key.endswith('{}'): + key = key[:-2] + out[key] = num(val) except: print(f'An exception occurred in parse_metrics: {sys.exc_info()}') pass @@ -371,21 +343,6 @@ def process_nick_re(nre, filesByNick, nick_to_tfname, rsum, args, grsum): 'npn': (.7,.7,0), } -def terraform_inventory_ip_not_names(tf_inventory_path): - """return ip to nickname mapping""" - tf_inventory = configparser.ConfigParser(allow_no_value=True) - tf_inventory.read(tf_inventory_path) - ip_to_name = {} - for k, sub in tf_inventory.items(): - if k.startswith('name_'): - for ip in sub: - if ip in ip_to_name: - logger.warning('ip %r already named %r, also got %r', ip, ip_to_name[ip], k) - ip_to_name[ip] = k - #logger.debug('names: %r', sorted(ip_to_name.values())) - #logger.debug('ip to name %r', ip_to_name) - return ip_to_name - def main(): os.environ['TZ'] = 'UTC' time.tzset() @@ -541,7 +498,7 @@ def __init__(self): self.txPLists = {} self.txPSums = {} self.times = [] - # algod_tx_pool_count{} + # algod_tx_pool_count self.txPool = [] # objectBytes = [(curtime, algod_go_memory_classes_heap_objects_bytes), ...] self.objectBytes = [] @@ -601,13 +558,13 @@ def process_files(self, args, nick=None, metrics_files=None, bisource=None): bi = bisource.get(curtime) if bi is None: logger.warning('%s no blockinfo', path) - self.txPool.append(cur.get('algod_tx_pool_count{}')) + self.txPool.append(cur.get('algod_tx_pool_count')) objectBytes = cur.get('algod_go_memory_classes_heap_objects_bytes') if objectBytes: self.objectBytes.append((curtime, objectBytes)) #logger.debug('%s: %r', path, cur) - verifyGood = cur.get('algod_agreement_proposal_verify_good{}') - verifyMs = cur.get('algod_agreement_proposal_verify_ms{}') + verifyGood = cur.get('algod_agreement_proposal_verify_good') + verifyMs = cur.get('algod_agreement_proposal_verify_ms') if verifyGood and verifyMs: # last writer wins self.verifyMillis = verifyMs / verifyGood @@ -626,8 +583,8 @@ def process_files(self, args, nick=None, metrics_files=None, bisource=None): rounds = (bi.get('block',{}).get('rnd', 0) - prevbi.get('block',{}).get('rnd', 0)) if rounds != 0: blocktime = dt/rounds - txBytes = d.get('algod_network_sent_bytes_total{}',0) - rxBytes = d.get('algod_network_received_bytes_total{}',0) + txBytes = d.get('algod_network_sent_bytes_total',0) + rxBytes = d.get('algod_network_received_bytes_total',0) txBytesPerSec = txBytes / dt rxBytesPerSec = rxBytes / dt # TODO: gather algod_network_sent_bytes_* and algod_network_received_bytes_* diff --git a/test/heapwatch/metrics_lib.py b/test/heapwatch/metrics_lib.py new file mode 100644 index 0000000000..fbda555b90 --- /dev/null +++ b/test/heapwatch/metrics_lib.py @@ -0,0 +1,272 @@ +#!/usr/bin/env python3 +# Copyright (C) 2019-2024 Algorand, Inc. +# This file is part of go-algorand +# +# go-algorand is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# go-algorand is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with go-algorand. If not, see . +# +### +# +# Common functions for processing /metrics data captured by heapWatch.py +# +import configparser +from datetime import datetime +from enum import Enum +import logging +import os +import re +import sys +from typing import Dict, Iterable, List, Optional, Tuple, Union +from urllib.parse import urlparse + + +from client_ram_report import dapp + + +logger = logging.getLogger(__name__) +metric_line_re = re.compile(r'(\S+\{[^}]*\})\s+(.*)') + +def num(x): + if '.' in x: + return float(x) + return int(x) + +def hunum(x): + if x >= 10000000000: + return '{:.1f}G'.format(x / 1000000000.0) + if x >= 1000000000: + return '{:.2f}G'.format(x / 1000000000.0) + if x >= 10000000: + return '{:.1f}M'.format(x / 1000000.0) + if x >= 1000000: + return '{:.2f}M'.format(x / 1000000.0) + if x >= 10000: + return '{:.1f}k'.format(x / 1000.0) + if x >= 1000: + return '{:.2f}k'.format(x / 1000.0) + return '{:.2f}x'.format(x) + + +def test_metric_line_re(): + testlines = ( + ('algod_network_connections_dropped_total{reason="write err"} 1', 1), + #('algod_network_sent_bytes_MS 274992', 274992), # handled by split + ) + for line, n in testlines: + try: + m = metric_line_re.match(line) + assert int(m.group(2)) == n + except: + print('failed on line %r', line) + raise + +def terraform_inventory_ip_not_names(tf_inventory_path): + """return ip to nickname mapping""" + tf_inventory = configparser.ConfigParser(allow_no_value=True) + tf_inventory.read(tf_inventory_path) + ip_to_name = {} + for k, sub in tf_inventory.items(): + if k.startswith('name_'): + for ip in sub: + if ip in ip_to_name: + logger.warning('ip %r already named %r, also got %r', ip, ip_to_name[ip], k) + ip_to_name[ip] = k + #logger.debug('names: %r', sorted(ip_to_name.values())) + #logger.debug('ip to name %r', ip_to_name) + return ip_to_name + +metrics_fname_re = re.compile(r'(.*?)\.(\d+_\d+)\.metrics') + +def gather_metrics_files_by_nick( + metrics_files: Iterable[str], nick_res: List[str], nick_lres: List[str] +) -> Dict[str, Dict[datetime, str]]: + """return {"node nickname": {datetime: path, ...}, ...}} + after resolving ip addresses into nodes nick names and applying nick_re and nick_lre filters. + """ + filesByNick = {} + tf_inventory_path = None + for path in metrics_files: + fname = os.path.basename(path) + if fname == 'terraform-inventory.host': + tf_inventory_path = path + continue + m = metrics_fname_re.match(fname) + if not m: + continue + nick = m.group(1) + timestamp = m.group(2) + timestamp = datetime.strptime(timestamp, '%Y%m%d_%H%M%S') + dapp(filesByNick, nick, timestamp, path) + + if tf_inventory_path: + # remap ip addresses to node names + ip_to_name = terraform_inventory_ip_not_names(tf_inventory_path) + filesByNick2 = {} + for nick in filesByNick.keys(): + parsed = urlparse('//' + nick) + name: str = ip_to_name.get(parsed.hostname) + val = filesByNick[nick] + filesByNick2[name] = val + + filesByNick = filesByNick2 + filesByNick2 = {} + + for nick in filesByNick.keys(): + if nick_res or not nick_res and not nick_lres: + # filter by regexp or apply default renaming + for nick_re in nick_res: + if re.match(nick_re, nick): + break + else: + if nick_res: + # regex is given but not matched, continue to the next node + continue + + # apply default renaming + name = nick + idx = name.find('_') + if idx != -1: + name = name[idx+1:] + val = filesByNick[nick] + filesByNick2[name] = val + + elif nick_lres: + # filter by label:regexp + label = None + for nick_lre in nick_lres: + label, nick_re = nick_lre.split(':') + if re.match(nick_re, nick): + break + else: + if nick_lres: + # regex is given but not matched, continue to the next node + continue + + val = filesByNick[nick] + filesByNick2[label] = val + else: + raise RuntimeError('unexpected options combination') + + if filesByNick2: + filesByNick = filesByNick2 + + return filesByNick + +class MetricType(Enum): + GAUGE = 0 + COUNTER = 1 + +class Metric: + """Metric with tags""" + def __init__(self, metric_name: str, type: MetricType, value: Union[int, float]): + full_name = metric_name.strip() + self.name = full_name + self.value = value + self.type = type + self.tags: Dict[str, str] = {} + self.tag_keys: set = set() + + det_idx = self.name.find('{') + if det_idx != -1: + self.name = self.name[:det_idx] + # ensure that the last character is '}' + idx = full_name.index('}') + if idx != len(full_name) - 1: + raise ValueError(f'Invalid metric name: {full_name}') + raw_tags = full_name[full_name.find('{')+1:full_name.find('}')] + tags = raw_tags.split(',') + for tag in tags: + key, value = tag.split('=') + if value[0] == '"' and value[-1] == '"': + value = value[1:-1] + self.tags[key] = value + self.tag_keys.add(key) + + def short_name(self): + return self.name + + def __str__(self): + return self.string() + + def string(self, tags: Optional[set[str]]=None): + result = self.name + if self.tags: + if not tags: + tags = self.tags + result += '{' + ','.join([f'{k}={v}' for k, v in sorted(self.tags.items()) if k in tags]) + '}' + return result + + def add_tag(self, key: str, value: str): + self.tags[key] = value + self.tag_keys.add(key) + + def has_tags(self, tag_keys: set, tags: Dict[str, str]): + """return True if all tags are present in the metric tags + tag_keys are not strictly needed but used as an optimization + """ + if self.tag_keys.intersection(tag_keys) != tag_keys: + return False + for k, v in tags.items(): + if self.tags.get(k) != v: + return False + return True + +def parse_metrics( + fin: Iterable[str], nick: str, metrics_names: set=None, diff: bool=None +) -> Dict[str, List[Metric]]: + """Parse metrics file and return dicts of metric names (no tags) and list of Metric objects + each containing the metric name, value and tags. + """ + out = {} + try: + last_type = None + for line in fin: + if not line: + continue + line = line.strip() + if not line: + continue + if line[0] == '#': + if line.startswith('# TYPE'): + tpe = line.split()[-1] + if tpe == 'gauge': + last_type = MetricType.GAUGE + elif tpe == 'counter': + last_type = MetricType.COUNTER + continue + m = metric_line_re.match(line) + if m: + name = m.group(1) + value = num(m.group(2)) + else: + ab = line.split() + name = ab[0] + value = num(ab[1]) + + metric = Metric(name, last_type, value) + metric.add_tag('n', nick) + if not metrics_names or metric.name in metrics_names: + if metric.name not in out: + out[metric.name] = [metric] + else: + out[metric.name].append(metric) + except: + print(f'An exception occurred in parse_metrics: {sys.exc_info()}') + pass + if diff and metrics_names and len(metrics_names) == 2 and len(out) == 2: + m = list(out.keys()) + name = f'{m[0]}_-_{m[1]}' + metric = Metric(name, MetricType.GAUGE, out[m[0]].value - out[m[1]].value) + out = [{name: metric}] + + return out diff --git a/test/heapwatch/metrics_viz.py b/test/heapwatch/metrics_viz.py index 584fc0ae59..741aa2dd73 100644 --- a/test/heapwatch/metrics_viz.py +++ b/test/heapwatch/metrics_viz.py @@ -11,13 +11,11 @@ """ import argparse -from datetime import datetime import glob import logging import os import re import time -from typing import Dict, Iterable, Tuple import sys import dash @@ -25,95 +23,24 @@ import plotly.graph_objs as go from plotly.subplots import make_subplots -from metrics_delta import metric_line_re, num, terraform_inventory_ip_not_names -from client_ram_report import dapp +from metrics_lib import MetricType, parse_metrics, gather_metrics_files_by_nick logger = logging.getLogger(__name__) -metrics_fname_re = re.compile(r'(.*?)\.(\d+_\d+)\.metrics') - -def gather_metrics_files_by_nick(metrics_files: Iterable[str]) -> Dict[str, Dict[datetime, str]]: - """return {"node nickname": {datetime: path, ...}, ...}}""" - filesByNick = {} - tf_inventory_path = None - for path in metrics_files: - fname = os.path.basename(path) - if fname == 'terraform-inventory.host': - tf_inventory_path = path - continue - m = metrics_fname_re.match(fname) - if not m: - continue - nick = m.group(1) - timestamp = m.group(2) - timestamp = datetime.strptime(timestamp, '%Y%m%d_%H%M%S') - dapp(filesByNick, nick, timestamp, path) - return tf_inventory_path, filesByNick - - -TYPE_GAUGE = 0 -TYPE_COUNTER = 1 - -def parse_metrics(fin: Iterable[str], nick: str, metrics_names: set=None, diff: bool=None) -> Tuple[Dict[str, float], Dict[str, int]]: - """Parse metrics file and return dicts of values and types""" - out = {} - types = {} - try: - last_type = None - for line in fin: - if not line: - continue - line = line.strip() - if not line: - continue - if line[0] == '#': - if line.startswith('# TYPE'): - tpe = line.split()[-1] - if tpe == 'gauge': - last_type = TYPE_GAUGE - elif tpe == 'counter': - last_type = TYPE_COUNTER - continue - m = metric_line_re.match(line) - if m: - name = m.group(1) - value = num(m.group(2)) - else: - ab = line.split() - name = ab[0] - value = num(ab[1]) - - det_idx = name.find('{') - if det_idx != -1: - name = name[:det_idx] - fullname = f'{name}{{n={nick}}}' - if not metrics_names or name in metrics_names: - out[fullname] = value - types[fullname] = last_type - except: - print(f'An exception occurred in parse_metrics: {sys.exc_info()}') - pass - if diff and metrics_names and len(metrics_names) == 2 and len(out) == 2: - m = list(out.keys()) - name = f'{m[0]}_-_{m[1]}' - new_out = {name: out[m[0]] - out[m[1]]} - new_types = {name: TYPE_GAUGE} - out = new_out - types = new_types - - return out, types - def main(): os.environ['TZ'] = 'UTC' time.tzset() - default_output_file = 'metrics_viz.png' + default_img_filename = 'metrics_viz.png' + default_html_filename = 'metrics_viz.html' ap = argparse.ArgumentParser() ap.add_argument('metrics_names', nargs='+', default=None, help='metric name(s) to track') ap.add_argument('-d', '--dir', type=str, default=None, help='dir path to find /*.metrics in') ap.add_argument('-l', '--list-nodes', default=False, action='store_true', help='list available node names with metrics') - ap.add_argument('-s', '--save', action='store_true', default=None, help=f'save plot to \'{default_output_file}\' file instead of showing it') + ap.add_argument('--nick-re', action='append', default=[], help='regexp to filter node names, may be repeated') + ap.add_argument('--nick-lre', action='append', default=[], help='label:regexp to filter node names, may be repeated') + ap.add_argument('-s', '--save', type=str, choices=['png', 'html'], help=f'save plot to \'{default_img_filename}\' or \'{default_html_filename}\' file instead of showing it') ap.add_argument('--diff', action='store_true', default=None, help='diff two gauge metrics instead of plotting their values. Requires two metrics names to be set') ap.add_argument('--verbose', default=False, action='store_true') @@ -128,16 +55,8 @@ def main(): return 1 metrics_files = sorted(glob.glob(os.path.join(args.dir, '*.metrics'))) - tf_inventory_path, filesByNick = gather_metrics_files_by_nick(metrics_files) - if tf_inventory_path: - # remap ip addresses to node names - ip_to_name = terraform_inventory_ip_not_names(tf_inventory_path) - for nick in filesByNick.keys(): - name = ip_to_name.get(nick) - if name: - val = filesByNick[nick] - filesByNick[name] = val - del filesByNick[nick] + metrics_files.extend(glob.glob(os.path.join(args.dir, 'terraform-inventory.host'))) + filesByNick = gather_metrics_files_by_nick(metrics_files, args.nick_re, args.nick_lre) if args.list_nodes: print('Available nodes:', ', '.join(sorted(filesByNick.keys()))) @@ -156,50 +75,76 @@ def main(): fig = make_subplots( rows=nrows, cols=1, - vertical_spacing=0.03, shared_xaxes=True) + vertical_spacing=0.03, shared_xaxes=True, + subplot_titles=[f'{name}' for name in sorted(metrics_names)], + ) fig['layout']['margin'] = { - 'l': 30, 'r': 10, 'b': 10, 't': 10 + 'l': 30, 'r': 10, 'b': 10, 't': 20 } fig['layout']['height'] = 500 * nrows # fig.update_layout(template="plotly_dark") - data = { - 'time': [], - } - raw_series = {} - for nick, items in filesByNick.items(): - active_metrics = set() - for dt, metrics_file in items.items(): + for nick, files_by_date in filesByNick.items(): + active_metrics = {} + data = {'time': []} + raw_series = {} + raw_times = {} + idx = 0 + for dt, metrics_file in files_by_date.items(): data['time'].append(dt) with open(metrics_file, 'rt') as f: - metrics, types = parse_metrics(f, nick, metrics_names, args.diff) - for metric_name, metric_value in metrics.items(): - raw_value = metric_value - if metric_name not in data: - data[metric_name] = [] - raw_series[metric_name] = [] - if types[metric_name] == TYPE_COUNTER: - if len(raw_series[metric_name]) > 0: - metric_value = (metric_value - raw_series[metric_name][-1]) / (dt - data['time'][-2]).total_seconds() - else: - metric_value = 0 - data[metric_name].append(metric_value) - raw_series[metric_name].append(raw_value) - - active_metrics.add(metric_name) - - for i, metric in enumerate(sorted(active_metrics)): - fig.append_trace(go.Scatter( - x=data['time'], - y=data[metric], - name=metric, - mode='lines+markers', - line=dict(width=1), - ), i+1, 1) + metrics = parse_metrics(f, nick, metrics_names, args.diff) + for metric_name, metrics_seq in metrics.items(): + active_metric_names = [] + for metric in metrics_seq: + raw_value = metric.value + + full_name = metric.string() + if full_name not in data: + # handle gaps in data, sometimes metric file might miss a value + # but the chart requires matching x and y series (time and metric value) + # data is what does into the chart, and raw_series is used to calculate + data[full_name] = [0] * len(files_by_date) + raw_series[full_name] = [] + raw_times[full_name] = [] + + metric_value = metric.value + if metric.type == MetricType.COUNTER: + if len(raw_series[full_name]) > 0 and len(raw_times[full_name]) > 0: + metric_value = (metric_value - raw_series[full_name][-1]) / (dt - raw_times[full_name][-1]).total_seconds() + else: + metric_value = 0 + + data[full_name][idx] = metric_value + raw_series[full_name].append(raw_value) + raw_times[full_name].append(dt) + + active_metric_names.append(full_name) + + active_metric_names.sort() + active_metrics[metric_name] = active_metric_names + idx += 1 + + for i, metric_pair in enumerate(sorted(active_metrics.items())): + metric_name, metric_fullnames = metric_pair + for metric_fullname in metric_fullnames: + fig.append_trace(go.Scatter( + x=data['time'], + y=data[metric_fullname], + name=metric_fullname, + mode='lines+markers', + line=dict(width=1), + ), i+1, 1) if args.save: - fig.write_image(os.path.join(args.dir, default_output_file)) + if args.save == 'html': + target_path = os.path.join(args.dir, default_html_filename) + fig.write_html(target_path) + else: + target_path = os.path.join(args.dir, default_img_filename) + fig.write_image(target_path) + print(f'Saved plot to {target_path}') else: fig.show() diff --git a/test/heapwatch/requirements.txt b/test/heapwatch/requirements.txt index d4d68874dd..db92372c6d 100644 --- a/test/heapwatch/requirements.txt +++ b/test/heapwatch/requirements.txt @@ -5,3 +5,6 @@ matplotlib==3.7.2 plotly==5.16.0 py-algorand-sdk==2.3.0 kaleido==0.2.1 +networkx==3.3 +gravis=0.1.0 +termcolor=2.4.0 diff --git a/test/heapwatch/topology-extract-p2p.py b/test/heapwatch/topology-extract-p2p.py new file mode 100644 index 0000000000..41f2be9ffc --- /dev/null +++ b/test/heapwatch/topology-extract-p2p.py @@ -0,0 +1,104 @@ +""" +P2P network topology extraction script from node.log files. + +1. Run P2P scenario like scenario1s-p2p +2. Fetch logs with `algonet play fetch_node_logs` +3. Extract logs +``` +cd nodelog +find . -name 'nodelog.tar.gz' -print | xargs -I{} tar -zxf {} +``` +4. Run this script `python3 topology-extract-p2p.py -o top.json nodelog` +5. Run the visualizer `topology-viz.py top.json` +""" +import argparse +from datetime import datetime +import json +import re +import os +import sys + + +def main(): + # Regex patterns to find node IDs and connections + node_pattern = r"P2P host created: peer ID (\w{52})" + edge_pattern = r"Made outgoing connection to peer (\w{52})" + + ap = argparse.ArgumentParser() + ap.add_argument('log_dir_path', help='logs directory path') + ap.add_argument('-o', '--output', type=argparse.FileType('wt', encoding='utf-8'), help=f'save topology to the file specified instead of showing it') + ap.add_argument('-t', '--timestamp', action='store_true', help=f'store connection timestamp for each edge') + + args = ap.parse_args() + + # Directory containing log files + log_dir_path = args.log_dir_path + + nodes = [] + edges = [] + mapping = {} + + # Iterate through all files in the specified directory + for filename in os.listdir(log_dir_path): + if filename.endswith("-node.log"): + with open(os.path.join(log_dir_path, filename), 'r') as file: + mapped = filename[:len(filename) - len('-node.log')] + mapped = mapped.replace('relay', 'R') + mapped = mapped.replace('nonParticipatingNode', 'NPN') + mapped = mapped.replace('node', 'N') + node_id = None + for line in file: + # Check if line contains relevant substrings before parsing as JSON + if "P2P host created" in line or "Made outgoing connection to peer" in line: + data = json.loads(line.strip()) + + # Check for node creation + if "P2P host created" in data.get("msg", ""): + match = re.search(node_pattern, data["msg"]) + if match: + node_id = match.group(1) + nodes.append(node_id) + mapping[node_id] = mapped + + # Check for connections + elif "Made outgoing connection to peer" in data.get("msg", ""): + match = re.search(edge_pattern, data["msg"]) + if match: + target_node_id = match.group(1) + match = re.findall(r"/p2p/(\w{52})", data["local"]) + if match: + source_node_id = match[0] + else: + print('WARN: no local addr set', data, file=sys.stderr) + source_node_id = node_id + + if args.timestamp: + # datetime is not serializable, so we store it as string for now + edge = (source_node_id, target_node_id, {'dt': data["time"]}) + else: + edge = (source_node_id, target_node_id) + + edges.append(edge) + + result = { + "mapping": mapping, + "nodes": nodes, + "edges": edges + } + + if args.timestamp and not args.output: + edges = sorted(edges, key=lambda x: x[2]['dt']) + for edge in edges: + ts = datetime.strptime(edge[2]['dt'], "%Y-%m-%dT%H:%M:%S.%f%z") + print('%15s %5s -> %-5s' % (ts.strftime('%H:%M:%S.%f'), mapping[edge[0]], mapping[edge[1]])) + return + + if args.output: + json.dump(result, args.output, indent=2) + else: + json.dump(result, sys.stdout, indent=2) + print(file=sys.stdout) + + +if __name__ == '__main__': + main() diff --git a/test/heapwatch/topology-extract-ws.py b/test/heapwatch/topology-extract-ws.py new file mode 100644 index 0000000000..75f1d99f57 --- /dev/null +++ b/test/heapwatch/topology-extract-ws.py @@ -0,0 +1,115 @@ +""" +WSNet network topology extraction script from node.log files. + +1. Run cluster scenario like scenario1s +2. Fetch logs with `algonet play fetch_node_logs` +3. Extract logs +``` +cd nodelog +find . -name 'nodelog.tar.gz' -print | xargs -I{} tar -zxf {} +``` +4. Run this script `python3 topology-extract-ws.py -o top.json -i ../terraform-inventory.json nodelog` +5. Run the visualizer `topology-viz.py top.json` +""" +import argparse +from datetime import datetime +import json +import os +import sys + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument('log_dir_path', help='logs directory path') + ap.add_argument('-i', '--inventory-file', type=argparse.FileType('rt', encoding='utf-8'), required=True, help='terraform inventory file path') + ap.add_argument('-o', '--output', type=argparse.FileType('wt', encoding='utf-8'), help=f'save topology to the file specified instead of showing it') + ap.add_argument('-t', '--timestamp', action='store_true', help=f'store connection timestamp for each edge') + + args = ap.parse_args() + + # Directory containing log files + log_dir_path = args.log_dir_path + inventory_file = args.inventory_file + + nodes = [] + edges = [] + mapping = {} + + inventory = json.load(inventory_file) + + ip_to_name = {} + for k, v in inventory.items(): + if k.startswith('name_'): + name = k.split('_')[1].upper() + if not isinstance(v, list) or len(v) != 1: + raise RuntimeError(f"Invalid inventory entry, expected a single item list: {k}={v}") + ip = v[0] + ip_to_name[ip] = name + # no need for mapping but keep the data compatible with the topology-viz script + mapping[name] = name + + # Iterate through all files in the specified directory + for filename in os.listdir(log_dir_path): + if filename.endswith('-node.log'): + with open(os.path.join(log_dir_path, filename), 'r') as file: + mapped = filename[:len(filename) - len('-node.log')] + mapped = mapped.replace('relay', 'R') + mapped = mapped.replace('nonParticipatingNode', 'NPN') + mapped = mapped.replace('node', 'N') + nodes.append(mapped) + for line in file: + # Check if line contains relevant substrings before parsing as JSON + if "Accepted incoming connection from peer" in line or "Made outgoing connection to peer" in line: + data = json.loads(line.strip()) + + # Check for incoming connections + if "Accepted incoming connection from peer" in data.get("msg", ""): + remote = data['remote'] + remote_ip = remote.split(':')[0] + remote_name = ip_to_name[remote_ip] + source = remote_name + target = mapped + edges.append((source, target)) + + # Check for outgoing connections + elif "Made outgoing connection to peer" in data.get('msg', ""): + remote = data['remote'] + name: str = remote.split('.')[0] + # check ip or name + if name.isdigit(): + remote_ip = remote.split(':')[0] + remote_name = ip_to_name[remote_ip] + target = remote_name + source = mapped + else: + target = name.upper() + source = mapped + + if args.timestamp: + # datetime is not serializable, so we store it as string for now + edge = (source, target, {'dt': data["time"]}) + else: + edge = (source, target) + + edges.append(edge) + + result = { + "mapping": mapping, + "nodes": nodes, + "edges": edges + } + + if args.timestamp and not args.output: + edges = sorted(edges, key=lambda x: x[2]['dt']) + for edge in edges: + ts = datetime.strptime(edge[2]['dt'], "%Y-%m-%dT%H:%M:%S.%f%z") + print('%15s %5s -> %-5s' % (ts.strftime('%H:%M:%S.%f'), edge[0], edge[1])) + return + + if args.output: + json.dump(result, args.output, indent=2) + else: + json.dump(result, sys.stdout, indent=2) + print(file=sys.stdout) + +if __name__ == '__main__': + main() diff --git a/test/heapwatch/topology-viz.py b/test/heapwatch/topology-viz.py new file mode 100644 index 0000000000..1393421696 --- /dev/null +++ b/test/heapwatch/topology-viz.py @@ -0,0 +1,75 @@ +""" +P2P network topology visualization script. +See topology-extract-p2p[-ws].py for details. +""" +import argparse +import json +import sys + +import gravis as gv +import networkx as nx + +ap = argparse.ArgumentParser() +ap.add_argument('topology_filename', help='topology json file') +ap.add_argument('-o', '--output', type=argparse.FileType('wt', encoding='utf-8'), help=f'save plot to the file specified instead of showing it') + +args = ap.parse_args() + +with open(args.topology_filename, 'rt') as f: + topology = json.load(f) + +# Create a new directed graph +G = nx.DiGraph() + +G.add_edges_from(topology['edges']) +nx.relabel_nodes(G, topology['mapping'], copy=False) + +# Set node colors +for node in G: + if node.startswith('R'): + G.nodes[node]['color'] = 'red' + elif node.startswith('NPN'): + G.nodes[node]['color'] = 'blue' + elif node.startswith('N'): + G.nodes[node]['color'] = 'green' + else: + raise RuntimeError(f"Unknown node type: {node}") + +# Calculate in-degrees +in_degrees = dict(G.in_degree()) +out_degrees = dict(G.out_degree()) +degree_centrality = nx.degree_centrality(G) +load_centrality = nx.algorithms.load_centrality(G) + +for node in G: + size = max(2, in_degrees[node]) + G.nodes[node]['size'] = size + G.nodes[node]['in_degree'] = in_degrees[node] + G.nodes[node]['out_degree'] = out_degrees[node] + hover = f'In: {in_degrees[node]}, Out: {out_degrees[node]}' + hover += f'\nDegree centrality: {degree_centrality[node]:.2f}' + hover += f'\nLoad centrality: {load_centrality[node]:.2f}' + G.nodes[node]['hover'] = hover + +print('Transitivity:', nx.transitivity(G)) +print('Clustering coefficient:', nx.average_clustering(G)) +print('Avg shortest path length:', nx.average_shortest_path_length(G.to_undirected(as_view=True))) + +res = gv.d3( + G, + node_hover_tooltip=True, + node_size_data_source='size', + node_label_size_factor=0.5, + use_node_size_normalization=True, + node_size_normalization_max=20, + use_edge_size_normalization=True, + edge_curvature=0.1 + ) + +if not args.output: + res.display() + sys.exit(0) + +# Save to file +data = res.to_html() +args.output.write(data) diff --git a/test/testdata/configs/config-v34.json b/test/testdata/configs/config-v34.json index 4a9714115f..7f16155303 100644 --- a/test/testdata/configs/config-v34.json +++ b/test/testdata/configs/config-v34.json @@ -30,7 +30,7 @@ "ConnectionsRateLimitingWindowSeconds": 1, "CrashDBDir": "", "DNSBootstrapID": ".algorand.network?backup=.algorand.net&dedup=.algorand-.(network|net)", - "DNSSecurityFlags": 1, + "DNSSecurityFlags": 9, "DeadlockDetection": 0, "DeadlockDetectionThreshold": 30, "DisableAPIAuth": false, @@ -43,6 +43,7 @@ "EnableAgreementTimeMetrics": false, "EnableAssembleStats": false, "EnableBlockService": false, + "EnableDHTProviders": false, "EnableDeveloperAPI": false, "EnableExperimentalAPI": false, "EnableFollowMode": false, @@ -53,6 +54,7 @@ "EnableMetricReporting": false, "EnableOutgoingNetworkMessageFiltering": true, "EnableP2P": false, + "EnableP2PHybridMode": false, "EnablePingHandler": true, "EnableProcessBlockStats": false, "EnableProfiler": false, @@ -96,6 +98,7 @@ "OptimizeAccountsDatabaseOnStartup": false, "OutgoingMessageFilterBucketCount": 3, "OutgoingMessageFilterBucketSize": 128, + "P2PNetAddress": "", "P2PPersistPeerID": false, "P2PPrivateKeyLocation": "", "ParticipationKeysRefreshInterval": 60000000000, diff --git a/test/testdata/deployednettemplates/recipes/hello-world-small-p2p/genesis.json b/test/testdata/deployednettemplates/recipes/hello-world-small-p2p/genesis.json new file mode 100644 index 0000000000..7ae67edf88 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/hello-world-small-p2p/genesis.json @@ -0,0 +1,30 @@ +{ + "NetworkName": "hello-p2p", + "VersionModifier": "", + "ConsensusProtocol": "future", + "FirstPartKeyRound": 0, + "LastPartKeyRound": 5000, + "PartKeyDilution": 0, + "Wallets": [ + { + "Name": "Wallet1", + "Stake": 25, + "Online": true + }, + { + "Name": "Wallet2", + "Stake": 25, + "Online": true + }, + { + "Name": "Wallet3", + "Stake": 25, + "Online": true + }, + { + "Name": "Wallet4", + "Stake": 25, + "Online": false + } + ] +} diff --git a/test/testdata/deployednettemplates/recipes/hello-world-small-p2p/net.json b/test/testdata/deployednettemplates/recipes/hello-world-small-p2p/net.json new file mode 100644 index 0000000000..423d31c1a4 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/hello-world-small-p2p/net.json @@ -0,0 +1,107 @@ +{ + "Hosts": [ + { + "Name": "R1", + "Nodes": [ + { + "Name": "relay1", + "IsRelay": true, + "Wallets": [ + { + "Name": "Wallet1", + "ParticipationOnly": false + } + ], + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "AdminAPIToken": "{{AdminAPIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "P2PBootstrap": true, + "ConfigJSONOverride": "{ \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"EnableAccountUpdatesStats\": true, \"EnableP2P\": true }" + } + ] + }, + { + "Name": "R2", + "Nodes": [ + { + "Name": "relay2", + "IsRelay": true, + "Wallets": [ + { + "Name": "Wallet2", + "ParticipationOnly": false + } + ], + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "AdminAPIToken": "{{AdminAPIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "P2PBootstrap": true, + "ConfigJSONOverride": "{ \"DNSBootstrapID\": \".algodev.network\",\"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"EnableAccountUpdatesStats\": true, \"EnableP2P\": true }" + } + ] + }, + { + "Name": "N1", + "Group": "", + "Nodes": [ + { + "Name": "node1", + "Wallets": [ + { + "Name": "Wallet3", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "AdminAPIToken": "{{AdminAPIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"EnableAccountUpdatesStats\": true, \"EnableP2P\": true }" + } + ] + }, + { + "Name": "NPN1", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode1", + "Wallets": [ + { + "Name": "Wallet4", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "AdminAPIToken": "{{AdminAPIToken}}", + "EnableTelemetry": false, + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"EnableAccountUpdatesStats\": true, \"EnableP2P\": true}" + } + ] + } + ] +} diff --git a/test/testdata/deployednettemplates/recipes/hello-world-small-p2p/recipe.json b/test/testdata/deployednettemplates/recipes/hello-world-small-p2p/recipe.json new file mode 100644 index 0000000000..a2f88f63b4 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/hello-world-small-p2p/recipe.json @@ -0,0 +1,7 @@ +{ + "GenesisFile":"genesis.json", + "NetworkFile":"net.json", + "ConfigFile": "../../configs/reference.json", + "HostTemplatesFile": "../../hosttemplates/hosttemplates.json", + "TopologyFile": "topology.json" +} diff --git a/test/testdata/deployednettemplates/recipes/hello-world-small-p2p/topology.json b/test/testdata/deployednettemplates/recipes/hello-world-small-p2p/topology.json new file mode 100644 index 0000000000..acc7cca9ec --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/hello-world-small-p2p/topology.json @@ -0,0 +1,20 @@ +{ + "Hosts": [ + { + "Name": "R1", + "Template": "AWS-US-EAST-1-Small" + }, + { + "Name": "R2", + "Template": "AWS-US-EAST-1-Small" + }, + { + "Name": "N1", + "Template": "AWS-US-EAST-1-Small" + }, + { + "Name": "NPN1", + "Template": "AWS-US-EAST-1-Small" + } + ] +} diff --git a/test/testdata/deployednettemplates/recipes/hello-world-tiny-p2p/genesis.json b/test/testdata/deployednettemplates/recipes/hello-world-tiny-p2p/genesis.json new file mode 100644 index 0000000000..7ae67edf88 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/hello-world-tiny-p2p/genesis.json @@ -0,0 +1,30 @@ +{ + "NetworkName": "hello-p2p", + "VersionModifier": "", + "ConsensusProtocol": "future", + "FirstPartKeyRound": 0, + "LastPartKeyRound": 5000, + "PartKeyDilution": 0, + "Wallets": [ + { + "Name": "Wallet1", + "Stake": 25, + "Online": true + }, + { + "Name": "Wallet2", + "Stake": 25, + "Online": true + }, + { + "Name": "Wallet3", + "Stake": 25, + "Online": true + }, + { + "Name": "Wallet4", + "Stake": 25, + "Online": false + } + ] +} diff --git a/test/testdata/deployednettemplates/recipes/hello-world/hosttemplates.json b/test/testdata/deployednettemplates/recipes/hello-world-tiny-p2p/hosttemplates.json similarity index 100% rename from test/testdata/deployednettemplates/recipes/hello-world/hosttemplates.json rename to test/testdata/deployednettemplates/recipes/hello-world-tiny-p2p/hosttemplates.json diff --git a/test/testdata/deployednettemplates/recipes/hello-world-tiny-p2p/net.json b/test/testdata/deployednettemplates/recipes/hello-world-tiny-p2p/net.json new file mode 100644 index 0000000000..8ea8328c62 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/hello-world-tiny-p2p/net.json @@ -0,0 +1,101 @@ +{ + "Hosts": [ + { + "Name": "R1", + "Nodes": [ + { + "Name": "relay1", + "IsRelay": true, + "Wallets": [ + { + "Name": "Wallet1", + "ParticipationOnly": false + } + ], + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "P2PBootstrap": true, + "ConfigJSONOverride": "{ \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"EnableP2P\": true }" + } + ] + }, + { + "Name": "R2", + "Nodes": [ + { + "Name": "relay2", + "IsRelay": true, + "Wallets": [ + { + "Name": "Wallet2", + "ParticipationOnly": false + } + ], + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"DNSBootstrapID\": \".algodev.network\",\"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"EnableP2P\": true }" + } + ] + }, + { + "Name": "N1", + "Group": "", + "Nodes": [ + { + "Name": "node1", + "Wallets": [ + { + "Name": "Wallet3", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableP2P\": true }" + } + ] + }, + { + "Name": "NPN1", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode1", + "Wallets": [ + { + "Name": "Wallet4", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableP2P\": true }" + } + ] + } + ] +} diff --git a/test/testdata/deployednettemplates/recipes/hello-world-tiny-p2p/recipe.json b/test/testdata/deployednettemplates/recipes/hello-world-tiny-p2p/recipe.json new file mode 100644 index 0000000000..be6b71ec55 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/hello-world-tiny-p2p/recipe.json @@ -0,0 +1,7 @@ +{ + "GenesisFile":"genesis.json", + "NetworkFile":"net.json", + "ConfigFile": "../../configs/reference.json", + "HostTemplatesFile": "../../hosttemplates/t2micro-useast1.json", + "TopologyFile": "topology.json" +} diff --git a/test/testdata/deployednettemplates/recipes/hello-world-tiny-p2p/topology.json b/test/testdata/deployednettemplates/recipes/hello-world-tiny-p2p/topology.json new file mode 100644 index 0000000000..acc7cca9ec --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/hello-world-tiny-p2p/topology.json @@ -0,0 +1,20 @@ +{ + "Hosts": [ + { + "Name": "R1", + "Template": "AWS-US-EAST-1-Small" + }, + { + "Name": "R2", + "Template": "AWS-US-EAST-1-Small" + }, + { + "Name": "N1", + "Template": "AWS-US-EAST-1-Small" + }, + { + "Name": "NPN1", + "Template": "AWS-US-EAST-1-Small" + } + ] +} diff --git a/test/testdata/deployednettemplates/recipes/hello-world/genesis.json b/test/testdata/deployednettemplates/recipes/hello-world/genesis.json index 218b694d5f..b7fdd9502b 100644 --- a/test/testdata/deployednettemplates/recipes/hello-world/genesis.json +++ b/test/testdata/deployednettemplates/recipes/hello-world/genesis.json @@ -3,7 +3,7 @@ "VersionModifier": "", "ConsensusProtocol": "future", "FirstPartKeyRound": 0, - "LastPartKeyRound": 1000300, + "LastPartKeyRound": 5000, "PartKeyDilution": 0, "Wallets": [ { diff --git a/test/testdata/deployednettemplates/recipes/scenario1s-p2p/Makefile b/test/testdata/deployednettemplates/recipes/scenario1s-p2p/Makefile new file mode 100644 index 0000000000..f4ec4b3c1f --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/scenario1s-p2p/Makefile @@ -0,0 +1,23 @@ +# scenario1s is scenario1 but smaller, (100 nodes, 100 wallets) -> (20 nodes, 20 wallets), each algod gets single tenancy on a smaller ec2 instance +PARAMS=-w 20 -R 8 -N 20 -n 20 --npn-algod-nodes 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json + +.PHONY: clean all + +all: net.json genesis.json topology.json + +node.json nonPartNode.json relay.json: + python3 copy-node-configs.py + +net.json: node.json nonPartNode.json relay.json ${GOPATH}/bin/netgoal Makefile + netgoal generate -t net -r /tmp/wat -o net.json ${PARAMS} + +genesis.json: ${GOPATH}/bin/netgoal Makefile + netgoal generate -t genesis -r /tmp/wat -o genesis.l.json ${PARAMS} + jq '.LastPartKeyRound=5000|.NetworkName="s1s-p2p"|.ConsensusProtocol="future"' < genesis.l.json > genesis.json + rm genesis.l.json + +topology.json: ../scenario1s/gen_topology.py + python3 ../scenario1s/gen_topology.py + +clean: + rm -f net.json genesis.json node.json nonPartNode.json relay.json topology.json diff --git a/test/testdata/deployednettemplates/recipes/scenario1s-p2p/README.md b/test/testdata/deployednettemplates/recipes/scenario1s-p2p/README.md new file mode 100644 index 0000000000..1cad95bc2d --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/scenario1s-p2p/README.md @@ -0,0 +1,16 @@ +# Scenario1s for P2P testing + +This is a copy of scenario1s with the following changes in nodes configuration: +1. All nodes get `"EnableP2P": true` into their config. +1. All relays additionally get `"P2PBootstrap": true` to their netgoal config. + +## Build + +```sh +export GOPATH=~/go +make +``` + +## Run + +Run as usual cluster test scenario with algonet. diff --git a/test/testdata/deployednettemplates/recipes/scenario1s-p2p/copy-node-configs.py b/test/testdata/deployednettemplates/recipes/scenario1s-p2p/copy-node-configs.py new file mode 100644 index 0000000000..6ffbc01d8d --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/scenario1s-p2p/copy-node-configs.py @@ -0,0 +1,55 @@ +""" +Copies node.json, relay.json and nonPartNode.json from scenario1s: +1. Append \"EnableP2P\": true to all configs +2. Set P2PBootstrap: true to relay.json +3. Set DNSSecurityFlags: 0 to all configs +""" + +import json +import os + +CURRENT_DIR = os.path.dirname(os.path.realpath(__file__)) +SCENARIO1S_DIR = os.path.join(CURRENT_DIR, "..", "scenario1s") + +def main(): + """main""" + with open(os.path.join(SCENARIO1S_DIR, "node.json"), "r") as f: + node = json.load(f) + with open(os.path.join(SCENARIO1S_DIR, "relay.json"), "r") as f: + relay = json.load(f) + with open(os.path.join(SCENARIO1S_DIR, "nonPartNode.json"), "r") as f: + non_part_node = json.load(f) + + # make all relays P2PBootstrap'able + relay["P2PBootstrap"] = True + + # enable P2P for all configs + for config in (node, relay, non_part_node): + override = config.get("ConfigJSONOverride") + if override: + override_json = json.loads(override) + override_json["EnableP2P"] = True + override_json["DNSSecurityFlags"] = 0x8000 # set to some unused value otherwise 0 would be migrated to default that enables DNSSEC + config["ConfigJSONOverride"] = json.dumps(override_json) + altconfigs = config.get("AltConfigs", []) + if altconfigs: + for i, altconfig in enumerate(altconfigs): + override = altconfig.get("ConfigJSONOverride") + if override: + override_json = json.loads(override) + override_json["EnableP2P"] = True + override_json["DNSSecurityFlags"] = 0x8000 # set to some unused value otherwise 0 would be migrated to default that enables DNSSEC + altconfigs[i]["ConfigJSONOverride"] = json.dumps(override_json) + config["AltConfigs"] = altconfigs + + with open("node.json", "w") as f: + json.dump(node, f, indent=4) + with open("relay.json", "w") as f: + json.dump(relay, f, indent=4) + with open("nonPartNode.json", "w") as f: + json.dump(non_part_node, f, indent=4) + + print("Done!") + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/test/testdata/deployednettemplates/recipes/scenario1s-p2p/recipe.json b/test/testdata/deployednettemplates/recipes/scenario1s-p2p/recipe.json new file mode 100644 index 0000000000..a2f88f63b4 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/scenario1s-p2p/recipe.json @@ -0,0 +1,7 @@ +{ + "GenesisFile":"genesis.json", + "NetworkFile":"net.json", + "ConfigFile": "../../configs/reference.json", + "HostTemplatesFile": "../../hosttemplates/hosttemplates.json", + "TopologyFile": "topology.json" +} diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/Makefile b/test/testdata/deployednettemplates/recipes/scenario1s/Makefile index ed8a70132e..8b83c38b6c 100644 --- a/test/testdata/deployednettemplates/recipes/scenario1s/Makefile +++ b/test/testdata/deployednettemplates/recipes/scenario1s/Makefile @@ -1,14 +1,14 @@ # scenario1s is scenario1 but smaller, (100 nodes, 100 wallets) -> (20 nodes, 20 wallets), each algod gets single tenancy on a smaller ec2 instance PARAMS=-w 20 -R 8 -N 20 -n 20 --npn-algod-nodes 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json -all: net.json genesis.json topology.json bootstrappedFile.json +all: net.json genesis.json topology.json net.json: node.json nonPartNode.json ${GOPATH}/bin/netgoal Makefile netgoal generate -t net -r /tmp/wat -o net.json ${PARAMS} genesis.json: ${GOPATH}/bin/netgoal Makefile netgoal generate -t genesis -r /tmp/wat -o genesis.l.json ${PARAMS} - jq '.LastPartKeyRound=22000|.NetworkName="s1s"|.ConsensusProtocol="future"' < genesis.l.json > genesis.json + jq '.LastPartKeyRound=5000|.NetworkName="s1s"|.ConsensusProtocol="future"' < genesis.l.json > genesis.json rm genesis.l.json topology.json: gen_topology.py diff --git a/tools/block-generator/go.mod b/tools/block-generator/go.mod index cfeb442668..9baf9fafef 100644 --- a/tools/block-generator/go.mod +++ b/tools/block-generator/go.mod @@ -48,22 +48,33 @@ require ( github.com/elastic/gosigar v0.14.2 // indirect github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect + github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect github.com/google/uuid v1.4.0 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/gorilla/websocket v1.5.1 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/ipfs/boxo v0.10.0 // indirect github.com/ipfs/go-cid v0.4.1 // indirect + github.com/ipfs/go-datastore v0.6.0 // indirect + github.com/ipfs/go-log v1.0.5 // indirect github.com/ipfs/go-log/v2 v2.5.1 // indirect + github.com/ipld/go-ipld-prime v0.20.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect + github.com/jbenet/goprocess v0.1.4 // indirect github.com/jmespath/go-jmespath v0.3.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/klauspost/compress v1.17.6 // indirect @@ -72,10 +83,14 @@ require ( github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect github.com/libp2p/go-libp2p v0.33.2 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect + github.com/libp2p/go-libp2p-kad-dht v0.24.3 // indirect + github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect github.com/libp2p/go-libp2p-pubsub v0.10.0 // indirect + github.com/libp2p/go-libp2p-record v0.2.0 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect github.com/libp2p/go-nat v0.2.0 // indirect github.com/libp2p/go-netroute v0.2.1 // indirect @@ -104,10 +119,12 @@ require ( github.com/olivere/elastic v6.2.14+incompatible // indirect github.com/onsi/ginkgo/v2 v2.15.0 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_golang v1.18.0 // indirect github.com/prometheus/client_model v0.6.0 // indirect github.com/prometheus/common v0.47.0 // indirect @@ -120,6 +137,11 @@ require ( github.com/sirupsen/logrus v1.8.1 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/otel v1.16.0 // indirect + go.opentelemetry.io/otel/metric v1.16.0 // indirect + go.opentelemetry.io/otel/trace v1.16.0 // indirect go.uber.org/dig v1.17.1 // indirect go.uber.org/fx v1.20.1 // indirect go.uber.org/mock v0.4.0 // indirect @@ -133,6 +155,7 @@ require ( golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect + gonum.org/v1/gonum v0.13.0 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009 // indirect lukechampine.com/blake3 v1.2.1 // indirect diff --git a/tools/block-generator/go.sum b/tools/block-generator/go.sum index dbd7cdeb72..5ed96a265b 100644 --- a/tools/block-generator/go.sum +++ b/tools/block-generator/go.sum @@ -118,6 +118,7 @@ github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0 github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= @@ -131,6 +132,8 @@ github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8 github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= @@ -141,12 +144,16 @@ github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aev github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= @@ -162,6 +169,9 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -174,7 +184,9 @@ github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:x github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= @@ -185,7 +197,9 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= @@ -196,12 +210,16 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo= github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -209,7 +227,14 @@ github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/ github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= @@ -221,18 +246,34 @@ github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/C github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/ipfs/boxo v0.10.0 h1:tdDAxq8jrsbRkYoF+5Rcqyeb91hgWe2hp7iLu7ORZLY= +github.com/ipfs/boxo v0.10.0/go.mod h1:Fg+BnfxZ0RPzR0nOodzdIq3A7KgoWAOWsEIImrIQdBM= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= +github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= +github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= +github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= +github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= +github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= +github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= +github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= +github.com/ipld/go-ipld-prime v0.20.0 h1:Ud3VwE9ClxpO2LkCYP7vWPc0Fo+dYdYzgxUJZ3uRG4g= +github.com/ipld/go-ipld-prime v0.20.0/go.mod h1:PzqZ/ZR981eKbgdr3y2DJYeD/8bgMawdGVlJDE8kK+M= github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI= github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= +github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= +github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= @@ -240,6 +281,7 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= @@ -278,14 +320,22 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= +github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= github.com/libp2p/go-libp2p v0.33.2 h1:vCdwnFxoGOXMKmaGHlDSnL4bM3fQeW8pgIa9DECnb40= github.com/libp2p/go-libp2p v0.33.2/go.mod h1:zTeppLuCvUIkT118pFVzA8xzP/p2dJYOMApCkFh0Yww= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= +github.com/libp2p/go-libp2p-kad-dht v0.24.3 h1:VjxtDVWaaf4UFjGBf+yl2JCiGaHx7+ctAUa9oJCR3QE= +github.com/libp2p/go-libp2p-kad-dht v0.24.3/go.mod h1:BShPzRbK6+fN3hk8a0WGAYKpb8m4k+DtchkqouGTrSg= +github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= +github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0= github.com/libp2p/go-libp2p-pubsub v0.10.0 h1:wS0S5FlISavMaAbxyQn3dxMOe2eegMfswM471RuHJwA= github.com/libp2p/go-libp2p-pubsub v0.10.0/go.mod h1:1OxbaT/pFRO5h+Dpze8hdHQ63R0ke55XTs6b6NwLLkw= +github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= +github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= @@ -388,6 +438,8 @@ github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8P github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= @@ -402,6 +454,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= +github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= @@ -423,6 +477,7 @@ github.com/quic-go/webtransport-go v0.6.0 h1:CvNsKqc4W2HljHJnoT+rMmbRJybShZ0YPFD github.com/quic-go/webtransport-go v0.6.0/go.mod h1:9KjU4AEBqEQidGHNDkZrb8CAa1abRaosM2yGOyiikEc= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= @@ -460,7 +515,11 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= +github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= @@ -476,6 +535,7 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -484,12 +544,16 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= @@ -497,6 +561,10 @@ github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPU github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= @@ -510,6 +578,15 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= +go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= +go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= +go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= +go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= +go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -522,9 +599,12 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= @@ -534,6 +614,7 @@ golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -552,6 +633,7 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -578,6 +660,7 @@ golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= @@ -655,6 +738,9 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -667,6 +753,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM= +gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= @@ -681,6 +769,7 @@ google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= @@ -688,13 +777,18 @@ google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -702,6 +796,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= @@ -726,6 +821,7 @@ honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= pgregory.net/rapid v0.6.2 h1:ErW5sL+UKtfBfUTsWHDCoeB+eZKLKMxrSd1VJY6W4bw= diff --git a/tools/debug/algodump/main.go b/tools/debug/algodump/main.go index 4899fb0c73..525ddba705 100644 --- a/tools/debug/algodump/main.go +++ b/tools/debug/algodump/main.go @@ -179,7 +179,11 @@ func main() { *genesisID, protocol.NetworkID(*networkID)) setDumpHandlers(n) - n.Start() + err := n.Start() + if err != nil { + log.Errorf("Failed to start network: %v", err) + return + } for { time.Sleep(time.Second) diff --git a/tools/debug/transplanter/main.go b/tools/debug/transplanter/main.go index 1a41504c99..e6f88d007e 100644 --- a/tools/debug/transplanter/main.go +++ b/tools/debug/transplanter/main.go @@ -393,7 +393,11 @@ func main() { os.Exit(1) } - followerNode.Start() + err = followerNode.Start() + if err != nil { + fmt.Fprintf(os.Stderr, "Cannot start follower node: %v", err) + os.Exit(1) + } for followerNode.Ledger().Latest() < basics.Round(*roundStart) { fmt.Printf("At round %d, waiting for %d\n", followerNode.Ledger().Latest(), *roundStart) diff --git a/util/metrics/counter.go b/util/metrics/counter.go index 38852386d8..e9b437a4a2 100644 --- a/util/metrics/counter.go +++ b/util/metrics/counter.go @@ -28,6 +28,14 @@ type Counter struct { // MakeCounter create a new counter with the provided name and description. func MakeCounter(metric MetricName) *Counter { + c := makeCounter(metric) + c.Register(nil) + return c +} + +// makeCounter create a new counter with the provided name and description +// but does not register it with the default registry. +func makeCounter(metric MetricName) *Counter { c := &Counter{c: couge{ values: make([]*cougeValues, 0), description: metric.Description, @@ -35,7 +43,6 @@ func MakeCounter(metric MetricName) *Counter { labels: make(map[string]int), valuesIndices: make(map[int]int), }} - c.Register(nil) return c } diff --git a/util/metrics/gauge.go b/util/metrics/gauge.go index bbc143a14f..edf144e48f 100644 --- a/util/metrics/gauge.go +++ b/util/metrics/gauge.go @@ -27,6 +27,14 @@ type Gauge struct { // MakeGauge create a new gauge with the provided name and description. func MakeGauge(metric MetricName) *Gauge { + c := makeGauge(metric) + c.Register(nil) + return c +} + +// makeGauge create a new gauge with the provided name and description +// but does not register it with the default registry. +func makeGauge(metric MetricName) *Gauge { c := &Gauge{g: couge{ values: make([]*cougeValues, 0), description: metric.Description, @@ -34,7 +42,6 @@ func MakeGauge(metric MetricName) *Gauge { labels: make(map[string]int), valuesIndices: make(map[int]int), }} - c.Register(nil) return c } diff --git a/util/metrics/opencensus.go b/util/metrics/opencensus.go new file mode 100644 index 0000000000..fefb1d054b --- /dev/null +++ b/util/metrics/opencensus.go @@ -0,0 +1,172 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +// Functions for opencensus stats aggs conversion to our internal data type +// suitable for further reporting + +package metrics + +import ( + "context" + "strings" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricexport" + "golang.org/x/exp/slices" +) + +type defaultOpencensusGatherer struct { + names []string +} + +// WriteMetric return opencensus data converted to algorand format +func (og *defaultOpencensusGatherer) WriteMetric(buf *strings.Builder, parentLabels string) { + metrics := collectOpenCensusMetrics(og.names) + for _, metric := range metrics { + metric.WriteMetric(buf, parentLabels) + } +} + +// AddMetric return opencensus data converted to algorand format +func (og *defaultOpencensusGatherer) AddMetric(values map[string]float64) { + metrics := collectOpenCensusMetrics(og.names) + for _, metric := range metrics { + metric.AddMetric(values) + } +} + +type statExporter struct { + names map[string]struct{} + metrics []Metric +} + +func collectOpenCensusMetrics(names []string) []Metric { + exporter := &statExporter{} + if len(names) > 0 { + exporter.names = make(map[string]struct{}, len(names)) + for _, name := range names { + exporter.names[name] = struct{}{} + } + } + reader := metricexport.NewReader() + reader.ReadAndExport(exporter) + + return exporter.metrics +} + +// statCounter stores single int64 value per stat with labels +type statCounter struct { + name string + description string + labels []map[string]string + values []int64 +} + +// WriteMetric outputs Prometheus metrics for all labels/values in statCounter +func (st *statCounter) WriteMetric(buf *strings.Builder, parentLabels string) { + name := sanitizePrometheusName(st.name) + counter := makeCounter(MetricName{name, st.description}) + for i := 0; i < len(st.labels); i++ { + counter.AddUint64(uint64(st.values[i]), st.labels[i]) + } + counter.WriteMetric(buf, parentLabels) +} + +// AddMetric outputs all statCounter's labels/values into a map +func (st *statCounter) AddMetric(values map[string]float64) { + counter := makeCounter(MetricName{st.name, st.description}) + for i := 0; i < len(st.labels); i++ { + counter.AddUint64(uint64(st.values[i]), st.labels[i]) + } + counter.AddMetric(values) +} + +// statCounter stores single float64 sun value per stat with labels +type statDistribution struct { + name string + description string + labels []map[string]string + values []float64 +} + +// WriteMetric outputs Prometheus metrics for all labels/values in statCounter +func (st *statDistribution) WriteMetric(buf *strings.Builder, parentLabels string) { + name := sanitizePrometheusName(st.name) + gauge := makeGauge(MetricName{name, st.description}) + for i := 0; i < len(st.labels); i++ { + gauge.SetLabels(uint64(st.values[i]), st.labels[i]) + } + gauge.WriteMetric(buf, parentLabels) +} + +// AddMetric outputs all statCounter's labels/values into a map +func (st *statDistribution) AddMetric(values map[string]float64) { + gauge := makeGauge(MetricName{st.name, st.description}) + for i := 0; i < len(st.labels); i++ { + gauge.SetLabels(uint64(st.values[i]), st.labels[i]) + } + gauge.AddMetric(values) +} + +func (s *statExporter) ExportMetrics(ctx context.Context, data []*metricdata.Metric) error { + labeler := func(lk []metricdata.LabelKey, lv []metricdata.LabelValue, ignores ...string) map[string]string { + // default labeler concatenates labels + labels := make(map[string]string, len(lk)) + for i := range lk { + if lv[i].Present && (len(ignores) == 0 || len(ignores) > 0 && !slices.Contains(ignores, lk[i].Key)) { + labels[lk[i].Key] = lv[i].Value + } + } + return labels + } + + for _, m := range data { + if _, ok := s.names[m.Descriptor.Name]; len(s.names) > 0 && !ok { + continue + } + if m.Descriptor.Type == metricdata.TypeCumulativeInt64 { + counter := statCounter{ + name: m.Descriptor.Name, + description: m.Descriptor.Description, + } + for _, d := range m.TimeSeries { + // ignore a known useless instance_id label + labels := labeler(m.Descriptor.LabelKeys, d.LabelValues, "instance_id") + counter.labels = append(counter.labels, labels) + counter.values = append(counter.values, d.Points[0].Value.(int64)) + } + + s.metrics = append(s.metrics, &counter) + } else if m.Descriptor.Type == metricdata.TypeCumulativeDistribution { + // TODO: the metrics below cannot be integer gauge, and Sum statistic does not make any sense. + // libp2p.io/dht/kad/outbound_request_latency + // libp2p.io/dht/kad/inbound_request_latency + // Ignore? + dist := statDistribution{ + name: m.Descriptor.Name, + description: m.Descriptor.Description, + } + // check if we are processing a known DHT metric + for _, d := range m.TimeSeries { + label := labeler(m.Descriptor.LabelKeys, d.LabelValues, "instance_id") + dist.labels = append(dist.labels, label) + dist.values = append(dist.values, d.Points[0].Value.(*metricdata.Distribution).Sum) + } + s.metrics = append(s.metrics, &dist) + } + } + return nil +} diff --git a/util/metrics/opencensus_test.go b/util/metrics/opencensus_test.go new file mode 100644 index 0000000000..f5401af541 --- /dev/null +++ b/util/metrics/opencensus_test.go @@ -0,0 +1,147 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package metrics + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/stretchr/testify/require" +) + +// TestDHTOpenCensusMetrics ensures both count and distribution stats are properly converted to our metrics +func TestDHTOpenCensusMetrics(t *testing.T) { + partitiontest.PartitionTest(t) + + defaultBytesDistribution := view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + + keyMessageType := tag.MustNewKey("message_type") + keyPeerID := tag.MustNewKey("peer_id") + keyInstanceID := tag.MustNewKey("instance_id") + + sentMessages := stats.Int64("my_sent_messages", "Total number of messages sent per RPC", stats.UnitDimensionless) + receivedBytes := stats.Int64("my_received_bytes", "Total received bytes per RPC", stats.UnitBytes) + + receivedBytesView := &view.View{ + Measure: receivedBytes, + TagKeys: []tag.Key{keyMessageType, keyPeerID, keyInstanceID}, + Aggregation: defaultBytesDistribution, + } + sentMessagesView := &view.View{ + Measure: sentMessages, + TagKeys: []tag.Key{keyMessageType, keyPeerID, keyInstanceID}, + Aggregation: view.Count(), + } + + err := view.Register(receivedBytesView, sentMessagesView) + require.NoError(t, err) + defer view.Unregister(receivedBytesView, sentMessagesView) + + ctx := context.Background() + tags1 := []tag.Mutator{ + tag.Upsert(keyMessageType, "UNKNOWN"), + tag.Upsert(keyPeerID, "1234"), + tag.Upsert(keyInstanceID, fmt.Sprintf("%p", t)), + } + ctx1, _ := tag.New(ctx, tags1...) + + stats.Record(ctx1, + sentMessages.M(1), + receivedBytes.M(int64(100)), + ) + + tags2 := []tag.Mutator{ + tag.Upsert(keyMessageType, "ADD_PROVIDER"), + tag.Upsert(keyPeerID, "abcd"), + tag.Upsert(keyInstanceID, fmt.Sprintf("%p", t)), + } + ctx2, _ := tag.New(ctx, tags2...) + + stats.Record(ctx2, + sentMessages.M(1), + receivedBytes.M(int64(123)), + ) + + // first check some metrics are collected when no names provided + // cannot assert on specific values because network tests might run in parallel with this package + // and produce some metric under specific configuration + require.Eventually(t, func() bool { + // stats are written by a background goroutine, give it a chance to finish + metrics := collectOpenCensusMetrics(nil) + return len(metrics) >= 2 + }, 10*time.Second, 20*time.Millisecond) + + // now assert on specific names and values + metrics := collectOpenCensusMetrics([]string{"my_sent_messages", "my_received_bytes"}) + require.Len(t, metrics, 2) + for _, m := range metrics { + var buf strings.Builder + m.WriteMetric(&buf, "") + promValue := buf.String() + if strings.Contains(promValue, "my_sent_messages") { + require.Contains(t, promValue, "my_sent_messages counter\n") + require.Contains(t, promValue, `peer_id="abcd"`) + require.Contains(t, promValue, `peer_id="1234"`) + require.Contains(t, promValue, `message_type="ADD_PROVIDER"`) + require.Contains(t, promValue, `message_type="UNKNOWN"`) + require.Contains(t, promValue, "} 1\n") + } else if strings.Contains(promValue, "my_received_bytes") { + require.Contains(t, promValue, "my_received_bytes gauge\n") + require.Contains(t, promValue, `peer_id="1234"`) + require.Contains(t, promValue, `peer_id="abcd"`) + require.Contains(t, promValue, `message_type="ADD_PROVIDER"`) + require.Contains(t, promValue, `message_type="UNKNOWN"`) + require.Contains(t, promValue, "} 123\n") + require.Contains(t, promValue, "} 100\n") + } else { + require.Fail(t, "not expected metric", promValue) + } + + values := make(map[string]float64) + m.AddMetric(values) + for k, v := range values { + require.True(t, strings.Contains(k, "message_type__ADD_PROVIDER") || strings.Contains(k, "message_type__UNKNOWN")) + require.True(t, strings.Contains(k, "peer_id__1234") || strings.Contains(k, "peer_id__abcd")) + if strings.Contains(k, "my_sent_messages") { + require.Equal(t, v, float64(1)) + } else if strings.Contains(k, "my_received_bytes") { + require.True(t, v == 100 || v == 123) + } else { + require.Fail(t, "not expected metric key", k) + } + } + } + + // ensure the exported gatherer works + reg := MakeRegistry() + reg.Register(&OpencensusDefaultMetrics) + defer reg.Deregister(&OpencensusDefaultMetrics) + + var buf strings.Builder + reg.WriteMetrics(&buf, "") + + require.Contains(t, buf.String(), "my_sent_messages") + require.Contains(t, buf.String(), "my_received_bytes") +} diff --git a/util/metrics/prometheus.go b/util/metrics/prometheus.go new file mode 100644 index 0000000000..b55f931001 --- /dev/null +++ b/util/metrics/prometheus.go @@ -0,0 +1,106 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +// Functions for Prometheus metrics conversion to our internal data type +// suitable for further reporting + +package metrics + +import ( + "strings" + + "github.com/prometheus/client_golang/prometheus" + iopc "github.com/prometheus/client_model/go" +) + +type defaultPrometheusGatherer struct { + names []string +} + +// WriteMetric return prometheus converted to algorand format. +// Supports only counter and gauge types and ignores go_ metrics. +func (pg *defaultPrometheusGatherer) WriteMetric(buf *strings.Builder, parentLabels string) { + metrics := collectPrometheusMetrics(pg.names) + for _, metric := range metrics { + metric.WriteMetric(buf, parentLabels) + } +} + +// AddMetric return prometheus data converted to algorand format. +// Supports only counter and gauge types and ignores go_ metrics. +func (pg *defaultPrometheusGatherer) AddMetric(values map[string]float64) { + metrics := collectPrometheusMetrics(pg.names) + for _, metric := range metrics { + metric.AddMetric(values) + } +} + +func collectPrometheusMetrics(names []string) []Metric { + var result []Metric + var namesMap map[string]struct{} + if len(names) > 0 { + namesMap = make(map[string]struct{}, len(names)) + for _, name := range names { + namesMap[name] = struct{}{} + } + } + + convertLabels := func(m *iopc.Metric) map[string]string { + var labels map[string]string + if lbls := m.GetLabel(); len(lbls) > 0 { + labels = make(map[string]string, len(lbls)) + for _, lbl := range lbls { + labels[lbl.GetName()] = lbl.GetValue() + } + } + return labels + } + metrics, _ := prometheus.DefaultGatherer.Gather() + for _, metric := range metrics { + if strings.HasPrefix(metric.GetName(), "go_") { + continue + } + if _, ok := namesMap[metric.GetName()]; len(namesMap) > 0 && ok || len(namesMap) == 0 { + if metric.GetType() == iopc.MetricType_COUNTER && metric.GetMetric() != nil { + counter := makeCounter(MetricName{metric.GetName(), metric.GetHelp()}) + ma := metric.GetMetric() + for _, m := range ma { + if m.GetCounter() == nil { + continue + } + val := uint64(m.GetCounter().GetValue()) + labels := convertLabels(m) + counter.AddUint64(val, labels) + } + result = append(result, counter) + } else if metric.GetType() == iopc.MetricType_GAUGE && metric.GetMetric() != nil { + gauge := makeGauge(MetricName{metric.GetName(), metric.GetHelp()}) + + ma := metric.GetMetric() + for _, m := range ma { + if m.GetGauge() == nil { + continue + } + val := uint64(m.GetGauge().GetValue()) + labels := convertLabels(m) + gauge.SetLabels(val, labels) + } + result = append(result, gauge) + } + } + } + return result +} diff --git a/util/metrics/prometheus_test.go b/util/metrics/prometheus_test.go new file mode 100644 index 0000000000..75ef94f97e --- /dev/null +++ b/util/metrics/prometheus_test.go @@ -0,0 +1,148 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +// Functions for Prometheus metrics conversion to our internal data type +// suitable for further reporting + +package metrics + +import ( + "strings" + "testing" + "time" + + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" +) + +func TestPrometheusMetrics(t *testing.T) { + partitiontest.PartitionTest(t) + + const metricNamespace = "test_metric" + + // gauge vec with labels + gaugeLabels := prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: metricNamespace, + Name: "streams", + Help: "Number of Streams", + }, []string{"dir", "scope", "protocol"}) + + // gauge without labels + gauge := prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: metricNamespace, + Name: "protocols_count", + Help: "Protocols Count", + }, + ) + + // counter with labels + counterLabels := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "identify_total", + Help: "Identify", + }, + []string{"dir"}, + ) + + // counter without labels + counter := prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "counter_total", + Help: "Counter", + }, + ) + + prometheus.DefaultRegisterer.MustRegister(gaugeLabels) + prometheus.DefaultRegisterer.MustRegister(gauge) + prometheus.DefaultRegisterer.MustRegister(counterLabels) + prometheus.DefaultRegisterer.MustRegister(counter) + + defer prometheus.DefaultRegisterer.Unregister(gaugeLabels) + defer prometheus.DefaultRegisterer.Unregister(gauge) + defer prometheus.DefaultRegisterer.Unregister(counterLabels) + defer prometheus.DefaultRegisterer.Unregister(counter) + + // set some values + tags := []string{"outbound", "protocol", "/test/proto"} + gaugeLabels.WithLabelValues(tags...).Set(float64(1)) + + gauge.Set(float64(2)) + + tags = []string{"inbound"} + counterLabels.WithLabelValues(tags...).Add(float64(3)) + + counter.Add(float64(4)) + + // wait they collected and ready for gathering + require.Eventually(t, func() bool { + metrics := collectPrometheusMetrics(nil) + return len(metrics) >= 4 + }, 5*time.Second, 100*time.Millisecond) + + metrics := collectPrometheusMetrics([]string{ + metricNamespace + "_streams", + metricNamespace + "_protocols_count", + metricNamespace + "_identify_total", + metricNamespace + "_counter_total"}) + require.Len(t, metrics, 4) + + for _, m := range metrics { + buf := strings.Builder{} + m.WriteMetric(&buf, "") + promValue := buf.String() + if strings.Contains(promValue, metricNamespace+"_streams") { + require.Contains(t, promValue, metricNamespace+"_streams gauge\n") + require.Contains(t, promValue, metricNamespace+"_streams{") + // map/labels order is not guaranteed + require.Contains(t, promValue, "dir=\"outbound\"") + require.Contains(t, promValue, "protocol=\"/test/proto\"") + require.Contains(t, promValue, "scope=\"protocol\"") + require.Contains(t, promValue, "} 1\n") + } else if strings.Contains(promValue, metricNamespace+"_protocols_count") { + require.Contains(t, promValue, metricNamespace+"_protocols_count gauge\n") + require.Contains(t, promValue, metricNamespace+"_protocols_count 2\n") + } else if strings.Contains(promValue, metricNamespace+"_identify_total") { + require.Contains(t, promValue, metricNamespace+"_identify_total counter\n") + require.Contains(t, promValue, metricNamespace+"_identify_total{dir=\"inbound\"} 3\n") + } else if strings.Contains(promValue, metricNamespace+"_counter_total") { + require.Contains(t, promValue, metricNamespace+"_counter_total counter\n") + require.Contains(t, promValue, metricNamespace+"_counter_total 4\n") + } else { + require.Fail(t, "not expected metric", promValue) + } + + values := make(map[string]float64) + m.AddMetric(values) + require.Len(t, values, 1) + } + + // ensure the exported gatherer works + reg := MakeRegistry() + reg.Register(&PrometheusDefaultMetrics) + defer reg.Deregister(&PrometheusDefaultMetrics) + + var buf strings.Builder + reg.WriteMetrics(&buf, "") + + require.Contains(t, buf.String(), metricNamespace+"_streams") + require.Contains(t, buf.String(), metricNamespace+"_protocols_count") + require.Contains(t, buf.String(), metricNamespace+"_identify_total") + require.Contains(t, buf.String(), metricNamespace+"_counter_total") +} diff --git a/util/metrics/registry.go b/util/metrics/registry.go index 43078bb4c2..d525bc1833 100644 --- a/util/metrics/registry.go +++ b/util/metrics/registry.go @@ -37,6 +37,12 @@ func DefaultRegistry() *Registry { return defaultRegistry } +// PrometheusDefaultMetrics is the default prometheus gatherer implementing the Metric interface +var PrometheusDefaultMetrics = defaultPrometheusGatherer{} + +// OpencensusDefaultMetrics is the default prometheus gatherer implementing the Metric interface +var OpencensusDefaultMetrics = defaultOpencensusGatherer{} + func init() { defaultRegistry = MakeRegistry() } From 4ba009b3d60be88377d9661ce6eb96d8fa1fdeed Mon Sep 17 00:00:00 2001 From: cce <51567+cce@users.noreply.github.com> Date: Mon, 1 Jul 2024 13:32:54 -0400 Subject: [PATCH 20/82] network: use http.ResponseController instead of GetHTTPRequestConnection (#6044) --- agreement/gossip/network_test.go | 3 - components/mocks/mockNetwork.go | 5 -- network/gossipNode.go | 12 ---- network/hybridNetwork.go | 10 --- network/p2p/p2p.go | 50 +------------ network/p2p/p2p_test.go | 66 ----------------- network/p2p/streams.go | 20 ------ network/p2pNetwork.go | 18 ----- network/p2pNetwork_test.go | 9 +-- network/requestTracker.go | 120 +++---------------------------- network/requestTracker_test.go | 4 +- network/wsNetwork.go | 11 --- rpcs/ledgerService.go | 19 +++-- 13 files changed, 25 insertions(+), 322 deletions(-) diff --git a/agreement/gossip/network_test.go b/agreement/gossip/network_test.go index a3c5328716..2867734ff6 100644 --- a/agreement/gossip/network_test.go +++ b/agreement/gossip/network_test.go @@ -155,9 +155,6 @@ func (w *whiteholeNetwork) GetPeers(options ...network.PeerOption) []network.Pee } func (w *whiteholeNetwork) RegisterHTTPHandler(path string, handler http.Handler) { } -func (w *whiteholeNetwork) GetHTTPRequestConnection(request *http.Request) (conn network.DeadlineSettableConn) { - return nil -} func (w *whiteholeNetwork) Start() error { w.quit = make(chan struct{}) diff --git a/components/mocks/mockNetwork.go b/components/mocks/mockNetwork.go index f933a553a9..47b1a5b5e4 100644 --- a/components/mocks/mockNetwork.go +++ b/components/mocks/mockNetwork.go @@ -106,11 +106,6 @@ func (network *MockNetwork) RegisterHTTPHandler(path string, handler http.Handle // OnNetworkAdvance - empty implementation func (network *MockNetwork) OnNetworkAdvance() {} -// GetHTTPRequestConnection - empty implementation -func (network *MockNetwork) GetHTTPRequestConnection(request *http.Request) (conn network.DeadlineSettableConn) { - return nil -} - // GetGenesisID - empty implementation func (network *MockNetwork) GetGenesisID() string { if network.GenesisID == "" { diff --git a/network/gossipNode.go b/network/gossipNode.go index 6a028ff193..1592641f70 100644 --- a/network/gossipNode.go +++ b/network/gossipNode.go @@ -20,7 +20,6 @@ import ( "context" "net/http" "strings" - "time" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/protocol" @@ -50,13 +49,6 @@ const ( PeersPhonebookArchivalNodes PeerOption = iota ) -// DeadlineSettableConn abstracts net.Conn and related types as deadline-settable -type DeadlineSettableConn interface { - SetDeadline(time.Time) error - SetReadDeadline(time.Time) error - SetWriteDeadline(time.Time) error -} - // GossipNode represents a node in the gossip network type GossipNode interface { Address() (string, bool) @@ -104,10 +96,6 @@ type GossipNode interface { // characteristics as with a watchdog timer. OnNetworkAdvance() - // GetHTTPRequestConnection returns the underlying connection for the given request. Note that the request must be the same - // request that was provided to the http handler ( or provide a fallback Context() to that ) - GetHTTPRequestConnection(request *http.Request) (conn DeadlineSettableConn) - // GetGenesisID returns the network-specific genesisID. GetGenesisID() string diff --git a/network/hybridNetwork.go b/network/hybridNetwork.go index f324deb73f..27fc6edbb0 100644 --- a/network/hybridNetwork.go +++ b/network/hybridNetwork.go @@ -212,16 +212,6 @@ func (n *HybridP2PNetwork) OnNetworkAdvance() { }) } -// GetHTTPRequestConnection returns the underlying connection for the given request. Note that the request must be the same -// request that was provided to the http handler ( or provide a fallback Context() to that ) -func (n *HybridP2PNetwork) GetHTTPRequestConnection(request *http.Request) (conn DeadlineSettableConn) { - conn = n.wsNetwork.GetHTTPRequestConnection(request) - if conn != nil { - return conn - } - return n.p2pNetwork.GetHTTPRequestConnection(request) -} - // GetGenesisID returns the network-specific genesisID. func (n *HybridP2PNetwork) GetGenesisID() string { return n.genesisID diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index f4ed670f3e..ac0489d5e1 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -38,7 +38,6 @@ import ( "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" - libp2phttp "github.com/libp2p/go-libp2p/p2p/http" "github.com/libp2p/go-libp2p/p2p/muxer/yamux" "github.com/libp2p/go-libp2p/p2p/security/noise" "github.com/libp2p/go-libp2p/p2p/transport/tcp" @@ -67,8 +66,6 @@ type Service interface { ListPeersForTopic(topic string) []peer.ID Subscribe(topic string, val pubsub.ValidatorEx) (SubNextCancellable, error) Publish(ctx context.Context, topic string, data []byte) error - - GetStream(peer.ID) (network.Stream, bool) } // serviceImpl manages integration with libp2p and implements the Service interface @@ -137,47 +134,7 @@ func MakeHost(cfg config.Local, datadir string, pstore *pstore.PeerStore) (host. libp2p.Security(noise.ID, noise.New), disableMetrics, ) - return &StreamChainingHost{ - Host: host, - handlers: map[protocol.ID][]network.StreamHandler{}, - }, listenAddr, err -} - -// StreamChainingHost is a wrapper around host.Host that overrides SetStreamHandler -// to allow chaining multiple handlers for the same protocol. -// Note, there should be probably only single handler that writes/reads streams. -type StreamChainingHost struct { - host.Host - handlers map[protocol.ID][]network.StreamHandler - mutex deadlock.Mutex -} - -// SetStreamHandler overrides the host.Host.SetStreamHandler method for chaining multiple handlers. -// Function objects are not comparable so theoretically it could have duplicates. -// The main use case is to track HTTP streams for ProtocolIDForMultistreamSelect = "/http/1.1" -// so it could just filter for such protocol if there any issues with other protocols like kad or mesh. -func (h *StreamChainingHost) SetStreamHandler(pid protocol.ID, handler network.StreamHandler) { - h.mutex.Lock() - defer h.mutex.Unlock() - - handlers := h.handlers[pid] - if len(handlers) == 0 { - // no other handlers, do not set a proxy handler - h.Host.SetStreamHandler(pid, handler) - h.handlers[pid] = append(handlers, handler) - return - } - // otherwise chain the handlers with a copy of the existing handlers - handlers = append(handlers, handler) - // copy to save it in the closure and call lock free - currentHandlers := make([]network.StreamHandler, len(handlers)) - copy(currentHandlers, handlers) - h.Host.SetStreamHandler(pid, func(s network.Stream) { - for _, h := range currentHandlers { - h(s) - } - }) - h.handlers[pid] = handlers + return host, listenAddr, err } // MakeService creates a P2P service instance @@ -186,7 +143,6 @@ func MakeService(ctx context.Context, log logging.Logger, cfg config.Local, h ho sm := makeStreamManager(ctx, log, h, wsStreamHandler) h.Network().Notify(sm) h.SetStreamHandler(AlgorandWsProtocol, sm.streamHandler) - h.SetStreamHandler(libp2phttp.ProtocolIDForMultistreamSelect, sm.streamHandlerHTTP) // set an empty handler for telemetryID/telemetryInstance protocol in order to allow other peers to know our telemetryID telemetryID := log.GetTelemetryGUID() @@ -294,10 +250,6 @@ func (s *serviceImpl) ClosePeer(peer peer.ID) error { return s.host.Network().ClosePeer(peer) } -func (s *serviceImpl) GetStream(peerID peer.ID) (network.Stream, bool) { - return s.streams.getStream(peerID) -} - // netAddressToListenAddress converts a netAddress in "ip:port" format to a listen address // that can be passed in to libp2p.ListenAddrStrings func netAddressToListenAddress(netAddress string) (string, error) { diff --git a/network/p2p/p2p_test.go b/network/p2p/p2p_test.go index fb14193a55..dab6aa5456 100644 --- a/network/p2p/p2p_test.go +++ b/network/p2p/p2p_test.go @@ -19,19 +19,14 @@ package p2p import ( "context" "fmt" - "sync/atomic" "testing" - "time" "github.com/libp2p/go-libp2p" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" - "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/require" - "github.com/algorand/go-algorand/config" - "github.com/algorand/go-algorand/network/p2p/peerstore" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -86,67 +81,6 @@ func TestNetAddressToListenAddress(t *testing.T) { } } -func TestP2PStreamingHost(t *testing.T) { - partitiontest.PartitionTest(t) - - cfg := config.GetDefaultLocal() - dir := t.TempDir() - pstore, err := peerstore.NewPeerStore(nil, "") - require.NoError(t, err) - h, la, err := MakeHost(cfg, dir, pstore) - require.NoError(t, err) - - var h1calls atomic.Int64 - h1 := func(network.Stream) { - h1calls.Add(1) - } - var h2calls atomic.Int64 - h2 := func(network.Stream) { - h2calls.Add(1) - } - - ma, err := multiaddr.NewMultiaddr(la) - require.NoError(t, err) - h.Network().Listen(ma) - defer h.Close() - - h.SetStreamHandler(AlgorandWsProtocol, h1) - h.SetStreamHandler(AlgorandWsProtocol, h2) - - addrInfo := peer.AddrInfo{ - ID: h.ID(), - Addrs: h.Addrs(), - } - cpstore, err := peerstore.NewPeerStore([]*peer.AddrInfo{&addrInfo}, "") - require.NoError(t, err) - c, _, err := MakeHost(cfg, dir, cpstore) - require.NoError(t, err) - defer c.Close() - - s1, err := c.NewStream(context.Background(), h.ID(), AlgorandWsProtocol) - require.NoError(t, err) - s1.Write([]byte("hello")) - defer s1.Close() - - require.Eventually(t, func() bool { - return h1calls.Load() == 1 && h2calls.Load() == 1 - }, 5*time.Second, 100*time.Millisecond) - - // ensure a single handler also works as expected - h1calls.Store(0) - h.SetStreamHandler(algorandP2pHTTPProtocol, h1) - - s2, err := c.NewStream(context.Background(), h.ID(), algorandP2pHTTPProtocol) - require.NoError(t, err) - s2.Write([]byte("hello")) - defer s2.Close() - - require.Eventually(t, func() bool { - return h1calls.Load() == 1 - }, 5*time.Second, 100*time.Millisecond) - -} - // TestP2PGetPeerTelemetryInfo tests the GetPeerTelemetryInfo function func TestP2PGetPeerTelemetryInfo(t *testing.T) { partitiontest.PartitionTest(t) diff --git a/network/p2p/streams.go b/network/p2p/streams.go index d16633adfd..e7277f4871 100644 --- a/network/p2p/streams.go +++ b/network/p2p/streams.go @@ -104,20 +104,6 @@ func (n *streamManager) streamHandler(stream network.Stream) { n.handler(n.ctx, remotePeer, stream, incoming) } -// streamHandlerHTTP tracks the ProtocolIDForMultistreamSelect = "/http/1.1" streams -func (n *streamManager) streamHandlerHTTP(stream network.Stream) { - n.streamsLock.Lock() - defer n.streamsLock.Unlock() - n.streams[stream.Conn().LocalPeer()] = stream -} - -func (n *streamManager) getStream(peerID peer.ID) (network.Stream, bool) { - n.streamsLock.Lock() - defer n.streamsLock.Unlock() - stream, ok := n.streams[peerID] - return stream, ok -} - // Connected is called when a connection is opened func (n *streamManager) Connected(net network.Network, conn network.Conn) { remotePeer := conn.RemotePeer() @@ -174,12 +160,6 @@ func (n *streamManager) Disconnected(net network.Network, conn network.Conn) { stream.Close() delete(n.streams, conn.RemotePeer()) } - - stream, ok = n.streams[conn.LocalPeer()] - if ok { - stream.Close() - delete(n.streams, conn.LocalPeer()) - } } // Listen is called when network starts listening on an addr diff --git a/network/p2pNetwork.go b/network/p2pNetwork.go index 7ebbb5a665..1ad49bd045 100644 --- a/network/p2pNetwork.go +++ b/network/p2pNetwork.go @@ -19,7 +19,6 @@ package network import ( "context" "math/rand" - "net" "net/http" "strings" "sync" @@ -725,23 +724,6 @@ func (n *P2PNetwork) OnNetworkAdvance() { } } -// GetHTTPRequestConnection returns the underlying connection for the given request. Note that the request must be the same -// request that was provided to the http handler ( or provide a fallback Context() to that ) -func (n *P2PNetwork) GetHTTPRequestConnection(request *http.Request) (conn DeadlineSettableConn) { - addr := request.Context().Value(http.LocalAddrContextKey).(net.Addr) - peerID, err := peer.Decode(addr.String()) - if err != nil { - n.log.Infof("GetHTTPRequestConnection failed to decode %s", addr.String()) - return nil - } - conn, ok := n.service.GetStream(peerID) - if !ok { - n.log.Warnf("GetHTTPRequestConnection no such stream for peer %s", peerID.String()) - return nil - } - return conn -} - // wsStreamHandler is a callback that the p2p package calls when a new peer connects and establishes a // stream for the websocket protocol. func (n *P2PNetwork) wsStreamHandler(ctx context.Context, p2pPeer peer.ID, stream network.Stream, incoming bool) { diff --git a/network/p2pNetwork_test.go b/network/p2pNetwork_test.go index 5bd582ead0..3548dbd1cb 100644 --- a/network/p2pNetwork_test.go +++ b/network/p2pNetwork_test.go @@ -366,10 +366,6 @@ func (s *mockService) Publish(ctx context.Context, topic string, data []byte) er return nil } -func (s *mockService) GetStream(peer.ID) (network.Stream, bool) { - return nil, false -} - func makeMockService(id peer.ID, addrs []ma.Multiaddr) *mockService { return &mockService{ id: id, @@ -725,8 +721,9 @@ type p2phttpHandler struct { func (h *p2phttpHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.Write([]byte(h.retData)) if r.URL.Path == "/check-conn" { - c := h.net.GetHTTPRequestConnection(r) - require.NotNil(h.tb, c) + rc := http.NewResponseController(w) + err := rc.SetWriteDeadline(time.Now().Add(10 * time.Second)) + require.NoError(h.tb, err) } } diff --git a/network/requestTracker.go b/network/requestTracker.go index 47eba90c7e..8c603e2a32 100644 --- a/network/requestTracker.go +++ b/network/requestTracker.go @@ -62,12 +62,10 @@ type TrackerRequest struct { otherTelemetryGUID string otherInstanceName string - connection net.Conn - noPrune bool } // makeTrackerRequest creates a new TrackerRequest. -func makeTrackerRequest(remoteAddr, remoteHost, remotePort string, createTime time.Time, conn net.Conn) *TrackerRequest { +func makeTrackerRequest(remoteAddr, remoteHost, remotePort string, createTime time.Time) *TrackerRequest { if remoteHost == "" { remoteHost, remotePort, _ = net.SplitHostPort(remoteAddr) } @@ -77,7 +75,6 @@ func makeTrackerRequest(remoteAddr, remoteHost, remotePort string, createTime ti remoteAddr: remoteAddr, remoteHost: remoteHost, remotePort: remotePort, - connection: conn, } } @@ -120,9 +117,8 @@ func (tr *TrackerRequest) remoteAddress() string { // hostIncomingRequests holds all the requests that are originating from a single host. type hostIncomingRequests struct { - remoteHost string - requests []*TrackerRequest // this is an ordered list, according to the requestsHistory.created - additionalHostRequests map[*TrackerRequest]struct{} // additional requests that aren't included in the "requests", and always assumed to be "alive". + remoteHost string + requests []*TrackerRequest // this is an ordered list, according to the requestsHistory.created } // findTimestampIndex finds the first an index (i) in the sorted requests array, where requests[i].created is greater than t. @@ -137,45 +133,6 @@ func (ard *hostIncomingRequests) findTimestampIndex(t time.Time) int { return i } -// convertToAdditionalRequest converts the given trackerRequest into a "additional request". -// unlike regular tracker requests, additional requests does not get pruned. -func (ard *hostIncomingRequests) convertToAdditionalRequest(trackerRequest *TrackerRequest) { - if _, has := ard.additionalHostRequests[trackerRequest]; has { - return - } - - i := sort.Search(len(ard.requests), func(i int) bool { - return ard.requests[i].created.After(trackerRequest.created) - }) - i-- - if i < 0 { - return - } - // we could have several entries with the same timestamp, so we need to consider all of them. - for ; i >= 0; i-- { - if ard.requests[i] == trackerRequest { - break - } - if ard.requests[i].created != trackerRequest.created { - // we can't find the item in the list. - return - } - } - if i < 0 { - return - } - // ok, item was found at index i. - copy(ard.requests[i:], ard.requests[i+1:]) - ard.requests[len(ard.requests)-1] = nil - ard.requests = ard.requests[:len(ard.requests)-1] - ard.additionalHostRequests[trackerRequest] = struct{}{} -} - -// removeTrackedConnection removes a trackerRequest from the additional requests map -func (ard *hostIncomingRequests) removeTrackedConnection(trackerRequest *TrackerRequest) { - delete(ard.additionalHostRequests, trackerRequest) -} - // add adds the trackerRequest at the correct index within the sorted array. func (ard *hostIncomingRequests) add(trackerRequest *TrackerRequest) { // find the new item index. @@ -197,7 +154,7 @@ func (ard *hostIncomingRequests) add(trackerRequest *TrackerRequest) { // countConnections counts the number of connection that we have that occurred after the provided specified time func (ard *hostIncomingRequests) countConnections(rateLimitingWindowStartTime time.Time) (count uint) { i := ard.findTimestampIndex(rateLimitingWindowStartTime) - return uint(len(ard.requests) - i + len(ard.additionalHostRequests)) + return uint(len(ard.requests) - i) } //msgp:ignore hostsIncomingMap @@ -232,9 +189,8 @@ func (him *hostsIncomingMap) addRequest(trackerRequest *TrackerRequest) { requestData, has := (*him)[trackerRequest.remoteHost] if !has { requestData = &hostIncomingRequests{ - remoteHost: trackerRequest.remoteHost, - requests: make([]*TrackerRequest, 0, 1), - additionalHostRequests: make(map[*TrackerRequest]struct{}), + remoteHost: trackerRequest.remoteHost, + requests: make([]*TrackerRequest, 0, 1), } (*him)[trackerRequest.remoteHost] = requestData } @@ -250,24 +206,6 @@ func (him *hostsIncomingMap) countOriginConnections(remoteHost string, rateLimit return 0 } -// convertToAdditionalRequest converts the given trackerRequest into a "additional request". -func (him *hostsIncomingMap) convertToAdditionalRequest(trackerRequest *TrackerRequest) { - requestData, has := (*him)[trackerRequest.remoteHost] - if !has { - return - } - requestData.convertToAdditionalRequest(trackerRequest) -} - -// removeTrackedConnection removes a trackerRequest from the additional requests map -func (him *hostsIncomingMap) removeTrackedConnection(trackerRequest *TrackerRequest) { - requestData, has := (*him)[trackerRequest.remoteHost] - if !has { - return - } - requestData.removeTrackedConnection(trackerRequest) -} - // RequestTracker tracks the incoming request connections type RequestTracker struct { downstreamHandler http.Handler @@ -300,29 +238,6 @@ func makeRequestsTracker(downstreamHandler http.Handler, log logging.Logger, con } } -// requestTrackedConnection used to track the active connections. In particular, it used to remove the -// tracked connection entry from the RequestTracker once a connection is closed. -type requestTrackedConnection struct { - net.Conn - tracker *RequestTracker -} - -func (c *requestTrackedConnection) UnderlyingConn() net.Conn { - return c.Conn -} - -// Close removes the connection from the tracker's connections map and call the underlaying Close function. -func (c *requestTrackedConnection) Close() error { - c.tracker.hostRequestsMu.Lock() - trackerRequest := c.tracker.acceptedConnections[c.Conn.LocalAddr()] - delete(c.tracker.acceptedConnections, c.Conn.LocalAddr()) - if trackerRequest != nil { - c.tracker.hostRequests.removeTrackedConnection(trackerRequest) - } - c.tracker.hostRequestsMu.Unlock() - return c.Conn.Close() -} - // Accept waits for and returns the next connection to the listener. func (rt *RequestTracker) Accept() (conn net.Conn, err error) { // the following for loop is a bit tricky : @@ -334,7 +249,7 @@ func (rt *RequestTracker) Accept() (conn net.Conn, err error) { return } - trackerRequest := makeTrackerRequest(conn.RemoteAddr().String(), "", "", time.Now(), conn) + trackerRequest := makeTrackerRequest(conn.RemoteAddr().String(), "", "", time.Now()) rateLimitingWindowStartTime := trackerRequest.created.Add(-time.Duration(rt.config.ConnectionsRateLimitingWindowSeconds) * time.Second) rt.hostRequestsMu.Lock() @@ -376,7 +291,6 @@ func (rt *RequestTracker) Accept() (conn net.Conn, err error) { // add an entry to the acceptedConnections so that the ServeHTTP could find the connection quickly. rt.acceptedConnections[conn.LocalAddr()] = trackerRequest rt.hostRequestsMu.Unlock() - conn = &requestTrackedConnection{Conn: conn, tracker: rt} return } } @@ -421,7 +335,7 @@ func (rt *RequestTracker) sendBlockedConnectionResponse(conn net.Conn, requestTi func (rt *RequestTracker) pruneAcceptedConnections(pruneStartDate time.Time) { localAddrToRemove := []net.Addr{} for localAddr, request := range rt.acceptedConnections { - if !request.noPrune && request.created.Before(pruneStartDate) { + if !request.created.Before(pruneStartDate) { localAddrToRemove = append(localAddrToRemove, localAddr) } } @@ -478,14 +392,6 @@ func (rt *RequestTracker) GetTrackedRequest(request *http.Request) (trackedReque return rt.httpConnections[localAddr] } -// GetRequestConnection return the underlying connection for the given request -func (rt *RequestTracker) GetRequestConnection(request *http.Request) net.Conn { - rt.httpConnectionsMu.Lock() - defer rt.httpConnectionsMu.Unlock() - localAddr := request.Context().Value(http.LocalAddrContextKey).(net.Addr) - return rt.httpConnections[localAddr].connection -} - func (rt *RequestTracker) ServeHTTP(response http.ResponseWriter, request *http.Request) { // this function is called only after we've fetched all the headers. on some malicious clients, this could get delayed, so we can't rely on the // tcp-connection established time to align with current time. @@ -510,20 +416,16 @@ func (rt *RequestTracker) ServeHTTP(response http.ResponseWriter, request *http. } trackedRequest := rt.acceptedConnections[localAddr] + delete(rt.acceptedConnections, localAddr) if trackedRequest != nil { - // update the original tracker request so that it won't get pruned. - if !trackedRequest.noPrune { - trackedRequest.noPrune = true - rt.hostRequests.convertToAdditionalRequest(trackedRequest) - } // create a copy, so we can unlock - trackedRequest = makeTrackerRequest(trackedRequest.remoteAddr, trackedRequest.remoteHost, trackedRequest.remotePort, trackedRequest.created, trackedRequest.connection) + trackedRequest = makeTrackerRequest(trackedRequest.remoteAddr, trackedRequest.remoteHost, trackedRequest.remotePort, trackedRequest.created) } rt.hostRequestsMu.Unlock() // we have no request tracker ? no problem; create one on the fly. if trackedRequest == nil { - trackedRequest = makeTrackerRequest(request.RemoteAddr, "", "", time.Now(), nil) + trackedRequest = makeTrackerRequest(request.RemoteAddr, "", "", time.Now()) } // update the origin address. diff --git a/network/requestTracker_test.go b/network/requestTracker_test.go index 158cf45336..d814507c78 100644 --- a/network/requestTracker_test.go +++ b/network/requestTracker_test.go @@ -51,7 +51,7 @@ func TestHostIncomingRequestsOrdering(t *testing.T) { now := time.Now() perm := rand.Perm(100) for i := 0; i < 100; i++ { - trackedRequest := makeTrackerRequest("remoteaddr", "host", "port", now.Add(time.Duration(perm[i])*time.Minute), nil) + trackedRequest := makeTrackerRequest("remoteaddr", "host", "port", now.Add(time.Duration(perm[i])*time.Minute)) hir.add(trackedRequest) } require.Equal(t, 100, len(hir.requests)) @@ -178,7 +178,7 @@ func TestRemoteAddress(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - tr := makeTrackerRequest("127.0.0.1:444", "", "", time.Now(), nil) + tr := makeTrackerRequest("127.0.0.1:444", "", "", time.Now()) require.Equal(t, "127.0.0.1:444", tr.remoteAddr) require.Equal(t, "127.0.0.1", tr.remoteHost) require.Equal(t, "444", tr.remotePort) diff --git a/network/wsNetwork.go b/network/wsNetwork.go index 8a43ad5234..f222d2ff27 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -1031,17 +1031,6 @@ func (wn *WebsocketNetwork) checkIncomingConnectionVariables(response http.Respo return http.StatusOK } -// GetHTTPRequestConnection returns the underlying connection for the given request. Note that the request must be the same -// request that was provided to the http handler ( or provide a fallback Context() to that ) -// if the provided request has no associated connection, it returns nil. ( this should not happen for any http request that was registered -// by WebsocketNetwork ) -func (wn *WebsocketNetwork) GetHTTPRequestConnection(request *http.Request) (conn DeadlineSettableConn) { - if wn.requestsTracker != nil { - conn = wn.requestsTracker.GetRequestConnection(request) - } - return -} - // ServerHTTP handles the gossip network functions over websockets func (wn *WebsocketNetwork) ServeHTTP(response http.ResponseWriter, request *http.Request) { if !wn.config.EnableGossipService { diff --git a/rpcs/ledgerService.go b/rpcs/ledgerService.go index 823895a417..d76273de62 100644 --- a/rpcs/ledgerService.go +++ b/rpcs/ledgerService.go @@ -34,7 +34,6 @@ import ( "github.com/algorand/go-algorand/ledger" "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/logging" - "github.com/algorand/go-algorand/network" ) const ( @@ -63,7 +62,6 @@ type LedgerForService interface { // httpGossipNode is a reduced interface for the gossipNode that only includes the methods needed by the LedgerService type httpGossipNode interface { RegisterHTTPHandler(path string, handler http.Handler) - GetHTTPRequestConnection(request *http.Request) (conn network.DeadlineSettableConn) } // LedgerService represents the Ledger RPC API @@ -211,17 +209,16 @@ func (ls *LedgerService) ServeHTTP(response http.ResponseWriter, request *http.R response.WriteHeader(http.StatusOK) return } - if conn := ls.net.GetHTTPRequestConnection(request); conn != nil { - maxCatchpointFileWritingDuration := 2 * time.Minute + rc := http.NewResponseController(response) + maxCatchpointFileWritingDuration := 2 * time.Minute - catchpointFileSize, err := cs.Size() - if err != nil || catchpointFileSize <= 0 { - maxCatchpointFileWritingDuration += maxCatchpointFileSize * time.Second / expectedWorstUploadSpeedBytesPerSecond - } else { - maxCatchpointFileWritingDuration += time.Duration(catchpointFileSize) * time.Second / expectedWorstUploadSpeedBytesPerSecond - } - conn.SetWriteDeadline(time.Now().Add(maxCatchpointFileWritingDuration)) + catchpointFileSize, err := cs.Size() + if err != nil || catchpointFileSize <= 0 { + maxCatchpointFileWritingDuration += maxCatchpointFileSize * time.Second / expectedWorstUploadSpeedBytesPerSecond } else { + maxCatchpointFileWritingDuration += time.Duration(catchpointFileSize) * time.Second / expectedWorstUploadSpeedBytesPerSecond + } + if wdErr := rc.SetWriteDeadline(time.Now().Add(maxCatchpointFileWritingDuration)); wdErr != nil { logging.Base().Warnf("LedgerService.ServeHTTP unable to set connection timeout") } From 3bcfedb34871fddba13bfe6125f92b60a975fd06 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Mon, 1 Jul 2024 15:32:26 -0400 Subject: [PATCH 21/82] tests: extend TestP2PRelay logging (#6048) --- network/p2pNetwork_test.go | 48 +++++++++++++++++++++++++------------- 1 file changed, 32 insertions(+), 16 deletions(-) diff --git a/network/p2pNetwork_test.go b/network/p2pNetwork_test.go index 3548dbd1cb..7c94be98e4 100644 --- a/network/p2pNetwork_test.go +++ b/network/p2pNetwork_test.go @@ -785,13 +785,20 @@ func TestP2PHTTPHandler(t *testing.T) { require.ErrorIs(t, err, limitcaller.ErrConnectionQueueingTimeout) } +// TestP2PRelay checks p2p nodes can properly relay messages: +// netA and netB are started with ForceFetchTransactions so it subscribes to the txn topic, +// both of them are connected and do not relay messages. +// Later, netB is forced to relay messages and netC is started with a listening address set +// so that it relays messages as well. +// The test checks messages from both netB and netC are received by netA. func TestP2PRelay(t *testing.T) { partitiontest.PartitionTest(t) cfg := config.GetDefaultLocal() cfg.ForceFetchTransactions = true log := logging.TestingLog(t) - netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}) + log.Debugln("Starting netA") + netA, err := NewP2PNetwork(log.With("net", "netA"), cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}) require.NoError(t, err) err = netA.Start() @@ -806,7 +813,8 @@ func TestP2PRelay(t *testing.T) { multiAddrStr := addrsA[0].String() phoneBookAddresses := []string{multiAddrStr} - netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) + log.Debugf("Starting netB with phonebook addresses %v", phoneBookAddresses) + netB, err := NewP2PNetwork(log.With("net", "netB"), cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) require.NoError(t, err) err = netB.Start() require.NoError(t, err) @@ -826,8 +834,7 @@ func TestP2PRelay(t *testing.T) { return netA.hasPeers() && netB.hasPeers() }, 2*time.Second, 50*time.Millisecond) - makeCounterHandler := func(numExpected int) ([]TaggedMessageProcessor, *atomic.Uint32, chan struct{}) { - var numActual atomic.Uint32 + makeCounterHandler := func(numExpected int, counter *atomic.Uint32, msgs *[][]byte) ([]TaggedMessageProcessor, chan struct{}) { counterDone := make(chan struct{}) counterHandler := []TaggedMessageProcessor{ { @@ -837,10 +844,13 @@ func TestP2PRelay(t *testing.T) { ProcessorHandleFunc }{ ProcessorValidateFunc(func(msg IncomingMessage) ValidatedMessage { - return ValidatedMessage{Action: Accept, Tag: msg.Tag, ValidatedMessage: nil} + return ValidatedMessage{Action: Accept, Tag: msg.Tag, ValidatedMessage: msg.Data} }), ProcessorHandleFunc(func(msg ValidatedMessage) OutgoingMessage { - if count := numActual.Add(1); int(count) >= numExpected { + if msgs != nil { + *msgs = append(*msgs, msg.ValidatedMessage.([]byte)) + } + if count := counter.Add(1); int(count) >= numExpected { close(counterDone) } return OutgoingMessage{Action: Ignore} @@ -848,9 +858,10 @@ func TestP2PRelay(t *testing.T) { }, }, } - return counterHandler, &numActual, counterDone + return counterHandler, counterDone } - counterHandler, _, counterDone := makeCounterHandler(1) + var counter atomic.Uint32 + counterHandler, counterDone := makeCounterHandler(1, &counter, nil) netA.RegisterProcessors(counterHandler) // send 5 messages from both netB to netA @@ -866,10 +877,11 @@ func TestP2PRelay(t *testing.T) { case <-time.After(1 * time.Second): } - // add netC with listening address set, and enable relaying on netB - // ensure all messages are received by netA + // add a netC with listening address set and enable relaying on netB + // ensure all messages from netB and netC are received by netA cfg.NetAddress = "127.0.0.1:0" - netC, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) + log.Debugf("Starting netB with phonebook addresses %v", phoneBookAddresses) + netC, err := NewP2PNetwork(log.With("net", "netC"), cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) require.NoError(t, err) err = netC.Start() require.NoError(t, err) @@ -893,28 +905,32 @@ func TestP2PRelay(t *testing.T) { }, 2*time.Second, 50*time.Millisecond) const expectedMsgs = 10 - counterHandler, count, counterDone := makeCounterHandler(expectedMsgs) + counter.Store(0) + var loggedMsgs [][]byte + counterHandler, counterDone = makeCounterHandler(expectedMsgs, &counter, &loggedMsgs) netA.ClearProcessors() netA.RegisterProcessors(counterHandler) for i := 0; i < expectedMsgs/2; i++ { - err := netB.Relay(context.Background(), protocol.TxnTag, []byte{1, 2, 3, byte(i)}, true, nil) + err := netB.Relay(context.Background(), protocol.TxnTag, []byte{5, 6, 7, byte(i)}, true, nil) require.NoError(t, err) err = netC.Relay(context.Background(), protocol.TxnTag, []byte{11, 12, 10 + byte(i), 14}, true, nil) require.NoError(t, err) } // send some duplicate messages, they should be dropped for i := 0; i < expectedMsgs/2; i++ { - err := netB.Relay(context.Background(), protocol.TxnTag, []byte{1, 2, 3, byte(i)}, true, nil) + err := netB.Relay(context.Background(), protocol.TxnTag, []byte{5, 6, 7, byte(i)}, true, nil) require.NoError(t, err) } select { case <-counterDone: - case <-time.After(2 * time.Second): - if c := count.Load(); c < expectedMsgs { + case <-time.After(3 * time.Second): + if c := counter.Load(); c < expectedMsgs { + t.Logf("Logged messages: %v", loggedMsgs) require.Failf(t, "One or more messages failed to reach destination network", "%d > %d", expectedMsgs, c) } else if c > expectedMsgs { + t.Logf("Logged messages: %v", loggedMsgs) require.Failf(t, "One or more messages that were expected to be dropped, reached destination network", "%d < %d", expectedMsgs, c) } } From 1b29523079bebaf1809306efdeb800bc5da70736 Mon Sep 17 00:00:00 2001 From: John Lee Date: Wed, 3 Jul 2024 12:33:40 -0400 Subject: [PATCH 22/82] CI: increase artifact upload timeout (#6050) --- .circleci/config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index f431e91160..4057049dac 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -721,6 +721,7 @@ commands: export GOPATH="<< parameters.build_dir >>/go" export TRAVIS_BRANCH=${CIRCLE_BRANCH} scripts/travis/deploy_packages.sh + no_output_timeout: 20m - when: condition: equal: [ "amd64", << parameters.platform >> ] From b3c7bca66d3dcbccfd5cf7abc5d30620b175406b Mon Sep 17 00:00:00 2001 From: Gary Malouf <982483+gmalouf@users.noreply.github.com> Date: Mon, 8 Jul 2024 09:36:10 -0400 Subject: [PATCH 23/82] Config: Expose merged configuration for easier debugging (#6049) --- cmd/algod/main.go | 9 + daemon/algod/api/algod.oas2.json | 27 + daemon/algod/api/algod.oas3.yml | 26 + .../api/server/v2/generated/data/routes.go | 30 +- .../v2/generated/experimental/routes.go | 448 ++++++------- .../nonparticipating/private/routes.go | 466 +++++++------- .../nonparticipating/public/routes.go | 604 +++++++++--------- .../generated/participating/private/routes.go | 474 +++++++------- .../generated/participating/public/routes.go | 52 +- daemon/algod/api/server/v2/handlers.go | 5 + .../algod/api/server/v2/test/handlers_test.go | 16 + 11 files changed, 1136 insertions(+), 1021 deletions(-) diff --git a/cmd/algod/main.go b/cmd/algod/main.go index 09770cb6e0..c4d1c09bc8 100644 --- a/cmd/algod/main.go +++ b/cmd/algod/main.go @@ -17,6 +17,7 @@ package main import ( + "encoding/json" "flag" "fmt" "math/rand" @@ -174,6 +175,14 @@ func run() int { log.Fatalf("Cannot load config: %v", err) } + // log is not setup yet + fmt.Printf("Config loaded from %s\n", absolutePath) + fmt.Println("Configuration after loading/defaults merge: ") + err = json.NewEncoder(os.Stdout).Encode(cfg) + if err != nil { + fmt.Println("Error encoding config: ", err) + } + // set soft memory limit, if configured if cfg.GoMemLimit > 0 { debug.SetMemoryLimit(int64(cfg.GoMemLimit)) diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json index e0827feb16..4cc1e0ced7 100644 --- a/daemon/algod/api/algod.oas2.json +++ b/daemon/algod/api/algod.oas2.json @@ -216,6 +216,33 @@ } } }, + "/debug/settings/config": { + "get": { + "description": "Returns the merged (defaults + overrides) config file in json.", + "tags": [ + "private" + ], + "produces": [ + "application/json" + ], + "schemes": [ + "http" + ], + "summary": "Gets the merged config file.", + "operationId": "GetConfig", + "responses": { + "200": { + "description": "The merged config file in json.", + "schema": { + "type": "string" + } + }, + "default": { + "description": "Unknown Error" + } + } + } + }, "/v2/accounts/{address}": { "get": { "description": "Given a specific account public key, this call returns the accounts status, balance and spendable amounts", diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml index 3fb04f5add..2f57b62453 100644 --- a/daemon/algod/api/algod.oas3.yml +++ b/daemon/algod/api/algod.oas3.yml @@ -2788,6 +2788,32 @@ }, "openapi": "3.0.1", "paths": { + "/debug/settings/config": { + "get": { + "description": "Returns the merged (defaults + overrides) config file in json.", + "operationId": "GetConfig", + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "type": "string" + } + } + }, + "description": "The merged config file in json." + }, + "default": { + "content": {}, + "description": "Unknown Error" + } + }, + "summary": "Gets the merged config file.", + "tags": [ + "private" + ] + } + }, "/debug/settings/pprof": { "get": { "description": "Retrieves the current settings for blocking and mutex profiles", diff --git a/daemon/algod/api/server/v2/generated/data/routes.go b/daemon/algod/api/server/v2/generated/data/routes.go index dc0159d20d..4c32b80945 100644 --- a/daemon/algod/api/server/v2/generated/data/routes.go +++ b/daemon/algod/api/server/v2/generated/data/routes.go @@ -320,21 +320,21 @@ var swaggerSpec = []string{ "+Omj/1j85eFnD3N48tkXDx/yL57wR198+gge/+WzJw/h0fLzLxaPi8dPHi+ePH7y+Wdf5J8+ebR48vkX", "/3HP8SEHMgEaUvs/nf2v7Kxcqezs5Xn22gHb4oRX4ltwe4O68lJhXUuH1BxPImy4KGdPw0//I5ywk1xt", "2uHDrzNfn2m2trYyT09Pr6+vT+Iupyt8+p9ZVefr0zAPVhvsyCsvz5sYfYrDwR1trce4qZ4UzvDbq68u", - "XrOzl+cnLcHMns4enjw8eeRLW0teidnT2af4E56eNe77KebXPDU+df5pVVHy/A/z2amnQ//XGniJSXTc", - "HxuwWuThkwZe7Pz/zTVfrUCf4AsN+unq8WmQOE7f++wIH/Z9O42jP07fd5JIFAd6huiGQ01O34fyuPsH", - "7JRG9XFlUYeJgO5rdrrAkjhTm0K8uvGloKpiTt+jsD36+6m3mKQ/otJDp+k0JGMZaUnP7tMfOyh8b7du", - "IfuHc22i8XJu83Vdnb7H/+DBiFZEWTxP7VaeopP49H0HEf7zABHd39vucYurjSogAKeWS6opvO/z6Xv6", - "N5oIthVo4SROzJzjf6UMZ6dYWm43/HknvUuzhFRemh+lAdKIQ1WBnczb520NrzgvQuOLncyDaBziHpED", - "PH74kKZ/gv+Z+dJLvewtp/48z0xTa36vYaaTNxP5a88m18BLj/jAnswQhkcfD4ZzSbGOjuHSxfBhPvvs", - "Y2LhXDoZhpcMW9L0n37ETQB9JXJgr2FTKc21KHfsR9mEa0aFcFMUeCnVtQyQO6mi3my43qG0vlFXYJiv", - "sRsRJ9Pg5CMK6UA3f0vDeK1xx0fezKp6UYp8Nqcsqe9QIrMp4SQYioYzBSNZO3j3VHxz8ExM34WuzLsn", - "Lc0kOA8kLKDhhwL7cH/D3vfdrDTVvdQGzf7FCP7FCO6QEdhay9EjGt1fmFsNKv+MNef5Gvbxg+FtGV3w", - "s0qlkkdc7GEWvoLJGK+46PKKNpxw9vTNtAJ/3rNBRusCjDvMJ0FhcdJ4q0/ohiOFM49+1Wiv99Uu//Du", - "D3G/P+MynOfOjpPrkutSgG6ogMthUZl/cYH/Z7gAVcfitK9zZqEsTXz2rcKzT14enzJTkvdtIh/oZDht", - "henOz6fBNpHSQbst33f+7OpVZl3bQl1Hs6BVn1xSQy3DfaxN/+/Tay5stlTaJ9bkSwt62NkCL099FZ3e", - "r23i+sEXzMYf/Rg/GU3+esq9upH6hrxurONAH0599SrfSKMQ6Rw+t5a12FKFfLaxUb1557gcFlr3LLg1", - "vDw9PcWnL2tl7Onsw/x9zygTf3zXEFaoDzqrtLjCOgbv5rNtprRYCcnLzFs12lJgs8cnD2cf/m8AAAD/", - "/xb9Ejq6CAEA", + "XrOzl+cnLcHMns4enjw8eeRLW0teidnT2af4E56eNe77KebXPDU+df5p81brw3zwraoosb775GnU/7UG", + "XmKCHffHBqwWefikgRc7/39zzVcr0Cf4eoN+unp8GqSR0/c+c8KHfd9O48iQ0/edBBPFgZ4h8uFQk9P3", + "oXTu/gE7ZVN9zFnUYSKg+5qdLrBcztSmEK9ufCmoxpjT9yiIj/5+6q0p6Y+oENFJOw2JWkZa0pP89McO", + "Ct/brVvI/uFcm2i8nNt8XVen7/E/eGiiFVGGz1O7lafoQD5930GE/zxARPf3tnvc4mqjCgjAqeWS6g3v", + "+3z6nv6NJoJtBVo4aRSz6vhfKfvZKZad2w1/3knv7iwhlbPmR2mAtOVQcWAn8/bpW8NHzovQ+GIn8yA2", + "h5hI5A6PHz6k6Z/gf2a+LFMvs8upP88z09Sh32u06eTURN7bs9c18NIDP7AnM4Th0ceD4VxSHKRjxnRp", + "fJjPPvuYWDiXTr7hJcOWNP2nH3ETQF+JHNhr2FRKcy3KHftRNqGcUZHcFAVeSnUtA+RO4qg3G653KMlv", + "1BUY5uvvRsTJNDjZicI9MASgpWG88rjjI29mVb0oRT6bUwbVdyit2ZTgEoxIw5mCAa0dvHsqvjl4Jqbv", + "Qlce3pOyZhKcB5IZ0PBDYX64v2Hv+y5YmupeaoNm/2IE/2IEd8gIbK3l6BGN7i/MuwaVf+Ka83wN+/jB", + "8LaMLvhZpVKJJS72MAtf3WSMV1x0eUUbajh7+mZa8T/v9SCDdgHGHeaToMw4Sb3VNXTDkcKZR59rtNf7", + "6pp/ePeHuN+fcRnOc2fHya3JdSlAN1TA5bDgzL+4wP8zXIAqZ3Ha1zmzUJYmPvtW4dknD5BPpynJMzeR", + "D3Syn7bCdOfn02C3SOmg3ZbvO3929Sqzrm2hrqNZ0OJP7qqhluE+1qb/9+k1FzZbKu2TbvKlBT3sbIGX", + "p77CTu/XNqn94Atm6o9+jJ+TJn895V7dSH1DXjfWcaAPp756lW+kUYiCDp9bq1tsxUI+29iv3rxzXA6L", + "sHsW3Bplnp6e4rOYtTL2dPZh/r5nsIk/vmsIK9QOnVVaXGGNg3fz2TZTWqyE5GXmrRptmbDZ45OHsw//", + "NwAA//+q4vIh1ggBAA==", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/experimental/routes.go b/daemon/algod/api/server/v2/generated/experimental/routes.go index 5d9044ed56..83312b906f 100644 --- a/daemon/algod/api/server/v2/generated/experimental/routes.go +++ b/daemon/algod/api/server/v2/generated/experimental/routes.go @@ -131,230 +131,230 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL var swaggerSpec = []string{ "H4sIAAAAAAAC/+x9f5PbtpLgV0Fpt8qxT5yxHSf74qtXexM7yZuLnbg8TvZ2bd8LRLYkvKEAPgCckeLz", - "d79CN0CCJChRMxMnqdq/7BHxo9FoNPoXuj/McrWplARpzezph1nFNd+ABY1/8TxXtbSZKNxfBZhci8oK", - "JWdPwzdmrBZyNZvPhPu14nY9m88k30DbxvWfzzT8sxYaitlTq2uYz0y+hg13A9td5Vo3I22zlcr8EGc0", - "xPnz2cc9H3hRaDBmCOWPstwxIfOyLoBZzaXhuftk2LWwa2bXwjDfmQnJlASmlsyuO43ZUkBZmJOwyH/W", - "oHfRKv3k40v62IKYaVXCEM5narMQEgJU0ADVbAizihWwxEZrbpmbwcEaGlrFDHCdr9lS6QOgEhAxvCDr", - "zezp25kBWYDG3cpBXOF/lxrgV8gs1yuws/fz1OKWFnRmxSaxtHOPfQ2mLq1h2BbXuBJXIJnrdcJe1say", - "BTAu2etvn7HPP//8K7eQDbcWCk9ko6tqZ4/XRN1nT2cFtxA+D2mNlyuluSyypv3rb5/h/Bd+gVNbcWMg", - "fVjO3Bd2/nxsAaFjgoSEtLDCfehQv+uROBTtzwtYKg0T94Qa3+mmxPP/rruSc5uvKyWkTewLw6+MPid5", - "WNR9Hw9rAOi0rxymtBv07cPsq/cfHs0fPfz4L2/Psv/yf37x+ceJy3/WjHsAA8mGea01yHyXrTRwPC1r", - "Lof4eO3pwaxVXRZsza9w8/kGWb3vy1xfYp1XvKwdnYhcq7NypQzjnowKWPK6tCxMzGpZOjblRvPUzoRh", - "lVZXooBi7rjv9Vrka5ZzQ0NgO3YtytLRYG2gGKO19Or2HKaPMUocXDfCBy7oj4uMdl0HMAFb5AZZXioD", - "mVUHrqdw43BZsPhCae8qc9xlxd6sgeHk7gNdtog76Wi6LHfM4r4WjBvGWbia5kws2U7V7Bo3pxSX2N+v", - "xmFtwxzScHM696g7vGPoGyAjgbyFUiVwicgL526IMrkUq1qDYddrsGt/52kwlZIGmFr8A3Lrtv1/X/z4", - "A1OavQRj+Ape8fySgcxVAcUJO18yqWxEGp6WEIeu59g6PFypS/4fRjma2JhVxfPL9I1eio1IrOol34pN", - "vWGy3ixAuy0NV4hVTIOttRwDiEY8QIobvh1O+kbXMsf9b6ftyHKO2oSpSr5DhG349q8P5x4cw3hZsgpk", - "IeSK2a0clePc3IfBy7SqZTFBzLFuT6OL1VSQi6WAgjWj7IHET3MIHiGPg6cVviJwwiCj4DSzHABHwjZB", - "M+50uy+s4iuISOaE/eSZG3616hJkQ+hsscNPlYYroWrTdBqBEafeL4FLZSGrNCxFgsYuPDocg6E2ngNv", - "vAyUK2m5kFA45oxAKwvErEZhiibcr+8Mb/EFN/Dlk7E7vv06cfeXqr/re3d80m5jo4yOZOLqdF/9gU1L", - "Vp3+E/TDeG4jVhn9PNhIsXrjbpulKPEm+ofbv4CG2iAT6CAi3E1GrCS3tYan7+QD9xfL2IXlsuC6cL9s", - "6KeXdWnFhVi5n0r66YVaifxCrEaQ2cCaVLiw24b+ceOl2bHdJvWKF0pd1lW8oLyjuC527Pz52CbTmMcS", - "5lmj7caKx5ttUEaO7WG3zUaOADmKu4q7hpew0+Cg5fkS/9kukZ74Uv/q/qmq0vW21TKFWkfH/kpG84E3", - "K5xVVSly7pD42n92Xx0TAFIkeNviFC/Upx8iECutKtBW0KC8qrJS5bzMjOUWR/pXDcvZ09m/nLb2l1Pq", - "bk6jyV+4XhfYyYmsJAZlvKqOGOOVE33MHmbhGDR+QjZBbA+FJiFpEx0pCceCS7ji0p60KkuHHzQH+K2f", - "qcU3STuE754KNopwRg0XYEgCpob3DItQzxCtDNGKAumqVIvmh8/OqqrFIH4/qyrCB0qPIFAwg60w1tzH", - "5fP2JMXznD8/Yd/FY6MormS5c5cDiRrublj6W8vfYo1tya+hHfGeYbidSp+4rQlocGL+XVAcqhVrVTqp", - "5yCtuMZ/821jMnO/T+r85yCxGLfjxIWKlscc6Tj4S6TcfNajnCHheHPPCTvr970Z2bhR9hCMOW+xeNfE", - "g78ICxtzkBIiiCJq8tvDtea7mRcSMxT2hmTykwGikIqvhERo5059kmzDL2k/FOLdEQKYRi8iWiIJsjGh", - "epnTo/5kYGf5E1BramODJOok1VIYi3o1NmZrKFFw5jIQdEwqN6KMCRu+ZxENzNeaV0TL/guJXUKiPk+N", - "CNZbXrwT78QkzBG7jzYaoboxWz7IOpOQINfowfB1qfLLv3GzvoMTvghjDWkfp2Fr4AVotuZmnTg4Pdpu", - "R5tC364h0ixbRFOdNEt8oVbmDpZYqmNYV1U942Xpph6yrN5qceBJB7ksmWvMYCPQYO4VR7Kwk/7FvuH5", - "2okFLOdlOW9NRarKSriC0intQkrQc2bX3LaHH0cOeg2eIwOO2Vlg0Wq8mQlNbLqxRWhgG4430MZpM1XZ", - "7dNwUMM30JOC8EZUNVoRIkXj/HlYHVyBRJ7UDI3gN2tEa008+Imb23/CmaWixZEF0Ab3XYO/hl90gHat", - "2/tUtlMoXZDN2rrfhGa50jQE3fB+cvcf4LrtTNT5WaUh80NofgXa8NKtrreo+w353tXpPHAyC255dDI9", - "FaYVMOIc2A/FO9AJK82P+B9eMvfZSTGOklrqESiMqMidWtDF7FBFM7kGaG9VbEOmTFbx/PIoKJ+1k6fZ", - "zKST9w1ZT/0W+kU0O/RmKwpzV9uEg43tVfeEkO0qsKOBLLKX6URzTUHAG1UxYh89EIhT4GiEELW982vt", - "a7VNwfS12g6uNLWFO9kJN85kZv+12j73kCl9GPM49hSkuwVKvgGDt5uMGaebpfXLnS2Uvpk00btgJGu9", - "jYy7USNhat5DEjatq8yfzYTHghr0BmoDPPYLAf3hUxjrYOHC8t8AC8aNehdY6A5011hQm0qUcAekv04K", - "cQtu4PPH7OJvZ188evz3x1986Uiy0mql+YYtdhYM+8yb5ZixuxLuJ7UjlC7So3/5JPiouuOmxjGq1jls", - "eDUcinxfpP1SM+baDbHWRTOuugFwEkcEd7UR2hm5dR1oz2FRry7AWqfpvtJqeefccDBDCjps9KrSTrAw", - "XT+hl5ZOC9fkFLZW89MKW4IsKM7ArUMYpwNuFndCVGMbX7SzFMxjtICDh+LYbWqn2cVbpXe6vgvzBmit", - "dPIKrrSyKldl5uQ8oRIGile+BfMtwnZV/d8JWnbNDXNzo/eylsWIHcJu5fT7i4Z+s5UtbvbeYLTexOr8", - "vFP2pYv8VgupQGd2KxlSZ8c8stRqwzgrsCPKGt+BJflLbODC8k3143J5N9ZOhQMl7DhiA8bNxKiFk34M", - "5EpSMN8Bk40fdQp6+ogJXiY7DoDHyMVO5ugqu4tjO27N2giJfnuzk3lk2nIwllCsOmR5exPWGDpoqnsm", - "AY5Dxwv8jLb651Ba/q3Sb1rx9Tut6urO2XN/zqnL4X4x3htQuL7BDCzkquwGkK4c7CepNf4uC3rWGBFo", - "DQg9UuQLsVrbSF98pdVvcCcmZ0kBih/IWFS6PkOT0Q+qcMzE1uYORMl2sJbDObqN+RpfqNoyzqQqADe/", - "NmkhcyTkEGOdMETLxnIr2ieEYQtw1JXz2q22rhgGIA3ui7ZjxnM6oRmixoyEXzRxM9SKpqNwtlIDL3Zs", - "ASCZWvgYBx99gYvkGD1lg5jmRdwEv+jAVWmVgzFQZN4UfRC00I6uDrsHTwg4AtzMwoxiS65vDezl1UE4", - "L2GXYayfYZ99/7O5/zvAa5Xl5QHEYpsUevv2tCHU06bfR3D9yWOyI0sdUa0Tbx2DKMHCGAqPwsno/vUh", - "Guzi7dFyBRpDSn5Tig+T3I6AGlB/Y3q/LbR1NRLB7tV0J+G5DZNcqiBYpQYrubHZIbbsGnVsCW4FESdM", - "cWIceETwesGNpTAoIQu0adJ1gvOQEOamGAd4VA1xI/8cNJDh2Lm7B6WpTaOOmLqqlLZQpNaAHtnRuX6A", - "bTOXWkZjNzqPVaw2cGjkMSxF43tkeQ0Y/+C28b96j+5wcehTd/f8LonKDhAtIvYBchFaRdiNo3hHABGm", - "RTQRjjA9ymlCh+czY1VVOW5hs1o2/cbQdEGtz+xPbdshcZGTg+7tQoFBB4pv7yG/JsxS/PaaG+bhCC52", - "NOdQvNYQZncYMyNkDtk+ykcVz7WKj8DBQ1pXK80LyAoo+S4RHECfGX3eNwDueKvuKgsZBeKmN72l5BD3", - "uGdoheOZlPDI8AvL3RF0qkBLIL73gZELwLFTzMnT0b1mKJwruUVhPFw2bXViRLwNr5R1O+7pAUH2HH0K", - "wCN4aIa+OSqwc9bqnv0p/hOMn6CRI46fZAdmbAnt+EctYMQW7N84Reelx957HDjJNkfZ2AE+MnZkRwzT", - "r7i2IhcV6jrfw+7OVb/+BEnHOSvAclFCwaIPpAZWcX9GIaT9MW+mCk6yvQ3BHxjfEssJYTpd4C9hhzr3", - "K3qbEJk67kKXTYzq7icuGQIaIp6dCB43gS3Pbblzgppdw45dgwZm6gWFMAz9KVZVWTxA0j+zZ0bvnU36", - "Rve6iy9wqGh5qVgz0gn2w/empxh00OF1gUqpcoKFbICMJASTYkdYpdyuC//8KTyACZTUAdIzbXTNN9f/", - "PdNBM66A/aeqWc4lqly1hUamURoFBRQg3QxOBGvm9MGJLYaghA2QJolfHjzoL/zBA7/nwrAlXIc3g65h", - "Hx0PHqAd55UytnO47sAe6o7beeL6QMeVu/i8FtLnKYcjnvzIU3byVW/wxtvlzpQxnnDd8m/NAHoncztl", - "7TGNTIv2wnEn+XK68UGDdeO+X4hNXXJ7F14ruOJlpq5Aa1HAQU7uJxZKfnPFyx+bbvgeEnJHozlkOb7i", - "mzgWvHF96OGfG0dI4Q4wBf1PBQjOqdcFdTqgYraRqmKzgUJwC+WOVRpyoPduTnI0zVJPGEXC52suV6gw", - "aFWvfHArjYMMvzZkmtG1HAyRFKrsVmZo5E5dAD5MLTx5dOIUcKfS9S3kpMBc82Y+/8p1ys0c7UHfY5B0", - "ks1noxqvQ+pVq/EScrrvNidcBh15L8JPO/FEVwqizsk+Q3zF2+IOk9vc38Zk3w6dgnI4cRTx234cC/p1", - "6na5uwOhhwZiGioNBq+o2Exl6Ktaxm+0Q6jgzljYDC351PXvI8fv9ai+qGQpJGQbJWGXTEsiJLzEj8nj", - "hNfkSGcUWMb69nWQDvw9sLrzTKHG2+IXd7t/QvseK/Ot0nflEqUBJ4v3EzyQB93tfsqb+kl5WSZci/4F", - "Z58BmHkTrCs048aoXKDMdl6YuY8KJm+kf+7ZRf+r5l3KHZy9/rg9H1qcHABtxFBWjLO8FGhBVtJYXef2", - "neRoo4qWmgjiCsr4uNXyWWiSNpMmrJh+qHeSYwBfY7lKBmwsIWGm+RYgGC9NvVqBsT1dZwnwTvpWQrJa", - "Cotzbdxxyei8VKAxkuqEWm74ji0dTVjFfgWt2KK2XekfHygbK8rSO/TcNEwt30luWQncWPZSyDdbHC44", - "/cORlWCvlb5ssJC+3VcgwQiTpYPNvqOvGNfvl7/2Mf4Y7k6fQ9BpmzFh5pbZSZLyfz/796dvz7L/4tmv", - "D7Ov/sfp+w9PPt5/MPjx8ce//vX/dX/6/ONf7//7v6Z2KsCeej7rIT9/7jXj8+eo/kSh+n3YP5n9fyNk", - "liSyOJqjR1vsM0wV4Qnoftc4ZtfwTtqtdIR0xUtRON5yE3Lo3zCDs0ino0c1nY3oGcPCWo9UKm7BZViC", - "yfRY442lqGF8ZvqhOjol/dtzPC/LWtJWBumb3mGG+DK1nDfJCChP2VOGL9XXPAR5+j8ff/HlbN6+MG++", - "z+Yz//V9gpJFsU3lEShgm9IV40cS9wyr+M6ATXMPhD0ZSkexHfGwG9gsQJu1qD49pzBWLNIcLjxZ8jan", - "rTyXFODvzg+6OHfec6KWnx5uqwEKqOw6lb+oI6hhq3Y3AXphJ5VWVyDnTJzASd/mUzh90Qf1lcCXITBV", - "KzVFG2rOARFaoIoI6/FCJhlWUvTTe97gL39z5+qQHzgFV3/OVETvve++ecNOPcM09yilBQ0dJSFIqNL+", - "8WQnIMlxs/hN2Tv5Tj6HJVoflHz6Thbc8tMFNyI3p7UB/TUvuczhZKXY0/Ae8zm3/J0cSFqjiRWjR9Os", - "qhelyNllrJC05EnJsoYjvHv3lpcr9e7d+0FsxlB98FMl+QtNkDlBWNU286l+Mg3XXKd8X6ZJ9YIjUy6v", - "fbOSkK1qMpCGVEJ+/DTP41Vl+ikfhsuvqtItPyJD4xMauC1jxqrmPZoTUPyTXre/Pyh/MWh+HewqtQHD", - "ftnw6q2Q9j3L3tUPH36OL/vaHAi/+Cvf0eSugsnWldGUFH2jCi6c1EqMVc8qvkq52N69e2uBV7j7KC9v", - "0MZRlgy7dV4dhgcGOFS7gOaJ8+gGEBxHPw7GxV1Qr5DWMb0E/IRb2H2Afav9it7P33i7DrzB57VdZ+5s", - "J1dlHImHnWmyva2ckBWiMYxYobbqE+MtgOVryC99xjLYVHY373QPAT9e0AysQxjKZUcvDDGbEjooFsDq", - "quBeFOdy109rY+hFBQ76Gi5h90a1yZiOyWPTTatixg4qUmokXTpijY+tH6O/+T6qLDw09dlJ8PFmIIun", - "DV2EPuMHmUTeOzjEKaLopP0YQwTXCUQQ8Y+g4AYLdePdivRTyxMyB2nFFWRQipVYpNLw/sfQHxZgdVTp", - "Mw/6KORmQMPEkjlVfkEXq1fvNZcrcNezu1KV4SVlVU0GbaA+tAau7QK43Wvnl3FCigAdqpTX+PIaLXxz", - "twTYuv0WFi12Eq6dVoGGImrjo5dPxuPPCHAobghP6N5qCiejuq5HXSLjYLiVG+w2aq0PzYvpDOGi7xvA", - "lKXq2u2Lg0L5bJuU1CW6X2rDVzCiu8Teu4n5MDoePxzkkESSlEHUsi9qDCSBJMjUOHNrTp5hcF/cIUY1", - "sxeQGWYiB7H3GWESbY+wRYkCbBO5SnvPdceLSlmBx0BLsxbQshUFAxhdjMTHcc1NOI6YLzVw2UnS2W+Y", - "9mVfarrzKJYwSoraJJ4Lt2Gfgw70fp+gLmSlC6noYqV/Qlo5p3vh84XUdiiJomkBJaxo4dQ4EEqbMKnd", - "IAfHj8sl8pYsFZYYGagjAcDPAU5zecAY+UbY5BFSZByBjYEPODD7QcVnU66OAVL6hE88jI1XRPQ3pB/2", - "UaC+E0ZV5S5XMeJvzAMH8KkoWsmiF1GNwzAh58yxuSteOjbndfF2kEGGNFQoevnQfOjN/TFFY49riq78", - "o9ZEQsJNVhNLswHotKi9B+KF2mb0Qjmpiyy2C0fvybcL+F46dTApF909wxZqi+FceLVQrPwBWMbhCGBE", - "tpetMEiv2G9MziJg9k27X85NUaFBkvGG1oZcxgS9KVOPyJZj5PJZlF7uRgD0zFBtrQZvljhoPuiKJ8PL", - "vL3V5m3a1PAsLHX8x45QcpdG8De0j3UTwv2tTfw3nlwsnKhPkglvaFm6TYZC6lxR1sFjEhT2yaEDxB6s", - "vurLgUm0dmO9uniNsJZiJY75Dp2SQ7QZKAGV4KwjmmaXqUgBp8sD3uMXoVtkrMPd43J3Pwog1LASxkLr", - "NApxQb+HOZ5j+mSlluOrs5VeuvW9Vqq5/Mltjh07y/zkK8AI/KXQxmbocUsuwTX61qAR6VvXNC2BdkMU", - "qdiAKNIcF6e9hF1WiLJO06uf9/vnbtofmovG1Au8xYSkAK0FFsdIBi7vmZpi2/cu+AUt+AW/s/VOOw2u", - "qZtYO3LpzvEnORc9BraPHSQIMEUcw10bRekeBhk9OB9yx0gajWJaTvZ5GwaHqQhjH4xSC8/ex25+Gim5", - "ligNYPqFoFqtoAjpzYI/TEZJ5EolV1EVp6ralzPvhFHqOsw8tydpnQ/Dh7Eg/Ejcz4QsYJuGPtYKEPL2", - "ZR0m3MNJViApXUnaLJRETRzijy0iW90n9oX2HwAkg6Df9JzZbXQy7VKznbgBJfDC6yQGwvr2H8vhhnjU", - "zcfCpzuZT/cfIRwQaUrYqLDJMA3BCAPmVSWKbc/xRKOOGsH4UdblEWkLWYsf7AAGukHQSYLrpNL2odbe", - "wH6KOu+p08oo9toHFjv65rl/gF/UGj0YncjmYd72RlebuPbvf76wSvMVeC9URiDdaghczjFoiLKiG2YF", - "hZMUYrmE2PtibuI56AA3sLEXE0g3QWRpF00tpP3ySYqMDlBPC+NhlKUpJkELYz75N0MvV5DpI1NScyVE", - "W3MDV1Xyuf73sMt+5mXtlAyhTRue691O3cv3iF2/2nwPOxz5YNSrA+zArqDl6TUgDaYs/c0nEyWwvmc6", - "Kf5Rvexs4RE7dZbepTvaGl+UYZz421umU7Sgu5TbHIw2SMLBMmU3LtKxCe70QBfxfVI+tAmiOCyDRPJ+", - "PJUwoYTl8CpqclEcot03wMtAvLic2cf57HaRAKnbzI94ANevmgs0iWeMNCXPcCew50iU86rS6oqXmY+X", - "GLv8tbrylz82D+EVn1iTSVP2m2/OXrzy4H+cz/ISuM4aS8DoqrBd9adZFZVx2H+VULZvb+gkS1G0+U1G", - "5jjG4hoze/eMTYOiKG38THQUfczFMh3wfpD3+VAfWuKekB+omoif1udJAT/dIB9+xUUZnI0B2pHgdFzc", - "tMo6Sa4QD3DrYKEo5iu7U3YzON3p09FS1wGehHP9iKkp0xqH9IkrkRX54B9+59LTt0p3mL9/mZgMHvrt", - "xConZBMeR2K1Q/3KvjB1wkjw+mX1izuNDx7ER+3Bgzn7pfQfIgDx94X/HfWLBw+S3sOkGcsxCbRSSb6B", - "+80ri9GN+LQKuITraRf02dWmkSzVOBk2FEpRQAHd1x5711p4fBb+lwJKcD+dTFHS400ndMfATDlBF2Mv", - "EZsg0w2VzDRMyX5MNT6CdaSFzN6XZCBn7PAIyXqDDszMlCJPh3bIhXHsVVIwpWvMsPGItdaNWIuR2FxZ", - "i2gs12xKztQekNEcSWSaZNrWFncL5Y93LcU/a2CicFrNUoDGe6131QXlAEcdCKRpu5gfmPxU7fC3sYPs", - "8TcFW9A+I8he/93zxqcUFpoq+nNkBHg844Bx74ne9vThqZles627IZjT9JgppdMDo/POupE5kqXQhcmW", - "Wv0KaUcI+o8SiTCC41OgmfdXkKnIvT5LaZzKbUX3dvZD2z1dNx7b+FvrwmHRTdWxm1ym6VN93EbeROk1", - "6XTNHsljSlgcYdB9GjDCWvB4RcGwWAYlRB9xSeeJskB0XpilT2X8lvOUxm9PpYd58P615NcLnqoR43Qh", - "B1O0vZ04KatY6Bw2wDQ5Dmh2FkVwN20FZZKrQLc+iGFW2hvqNTTtZI2mVWCQomLVZU5hCqVRiWFqec0l", - "VRF3/Yhf+d4GyAXvel0rjXkgTTqkq4BcbJLm2Hfv3hb5MHynECtBBbJrA1EFZj8Qo2STSEW+inWTucOj", - "5nzJHs6jMvB+NwpxJYxYlIAtHlGLBTd4XTbu8KaLWx5IuzbY/PGE5utaFhoKuzaEWKNYo3uikNcEJi7A", - "XgNI9hDbPfqKfYYhmUZcwX2HRS8EzZ4++goDauiPh6lb1hc438eyC+TZIVg7TccYk0pjOCbpR01HXy81", - "wK8wfjvsOU3UdcpZwpb+Qjl8ljZc8hWk32dsDsBEfXE30Z3fw4skbwAYq9WOCZueHyx3/GnkzbdjfwQG", - "y9VmI+zGB+4ZtXH01JZXpknDcFTr39eLCnCFjxj/WoXwv56t6xOrMXwz8mYLo5R/QB9tjNY545T8sxRt", - "ZHqo18nOQ25hLKDV1M0i3Li53NJRlsRA9SWrtJAW7R+1XWZ/cWqx5rljfydj4GaLL58kClF1a7XI4wD/", - "5HjXYEBfpVGvR8g+yCy+L/tMKpltHEcp7rc5FqJTORqomw7JHIsL3T/0VMnXjZKNklvdITcecepbEZ7c", - "M+AtSbFZz1H0ePTKPjll1jpNHrx2O/TT6xdeytgonSoY0B53L3FosFrAFb6YS2+SG/OWe6HLSbtwG+h/", - "3/inIHJGYlk4y0lFIPJo7nss76T4n1+2mc/RsUovEXs2QKUT1k5vt/vE0YbHWd36/lsKGMNvI5ibjDYc", - "ZYiVkeh7Cq9v+vwe8UJ9kGjPOwbHR78w7XRwlOMfPECgHzyYezH4l8fdz8TeHzxIJyBOmtzcry0WbqMR", - "Y9/UHn6tEgawULWwCSjy+RESBsixS8p9cExw4Yeas26FuE8vRdzN+650tGn6FLx79xa/BDzgH31E/M7M", - "EjewfaUwfti7FTKTJFM036M4d86+VtuphNO7gwLx/AFQNIKSieY5XMmgAmjSXX8wXiSiUTfqAkrllMy4", - "KFBsz//z4Nktfr4H27Uoi5/b3G69i0Rzma+TUcIL1/HvJKN3rmBilck6I2suJZTJ4Ui3/XvQgRNa+j/U", - "1Hk2Qk5s269AS8vtLa4FvAtmACpM6NArbOkmiLHaTZvVpGUoV6pgOE9b1KJljsNSzqkSmon3zTjsprY+", - "bhXfgvuEQ0tRYhhm2m+MLTPN7UgCLax3HuoLuXGw/LghMwONDppxscGL2fBNVQKezCvQfIVdlYRed0yh", - "hiNHFSuYqdwnbIkJKxSztZZMLZfRMkBaoaHczVnFjaFBHrplwRbnnj199PBh0uyF2JmwUsJiWOaP7VIe", - "nWIT+uKLLFEpgKOAPQzrx5aijtnYIeH4mpL/rMHYFE/FD/RyFb2k7tamepJN7dMT9h1mPnJE3El1j+bK", - "kES4m1CzrkrFizkmN37zzdkLRrNSHyohT/UsV2it65J/0r0yPcFoyOw0kjln+jj7U3m4VRubNeUnU7kJ", - "XYu2QKboxdygHS/Gzgl7TibUpoA/TcIwRbbeQBFVuyQlHonD/cdanq/RNtmRgMZ55fRCrIGdtZ6b6PVh", - "U/0IGbaD29dipVKsc6bsGvS1MIAv8uEKuukQm9yg3jYe0iN2l6drKYlSTo4QRptaR8eiPQBHkmwIKkhC", - "1kP8kZYpqsd8bF3aC+yVfovRK3Lb8/qH5HohxTZ76Z0LOZdKihxLIaQkaUzdNs1NOaFqRNq/aGb+hCYO", - "V7K0bvMW2GNxtNhuYIQecUOXf/TVbSpRB/1pYetLrq3AGs/ZoJiHStfeISakAV/NyhFRzCeVTgQ1JR9C", - "NAEUR5IRZmUasXB+67794O3fmBTjUki0dHm0ef2MXFalEeiZlkxYtlJg/Hq6r3nMW9fnBLM0FrB9f/JC", - "rUR+IVY4BoXRuWVTzOhwqLMQQeojNl3bZ66tz53f/NwJB6NJz6rKTzpeBz0pSNqtHEVwKm4pBJJEyG3G", - "j0fbQ257Q7/xPnWEBlcYtQYV3sMDwmhqaXdH+cbplkRR2ILRi8pkAl0hE2C8EDK4UNMXRJ68EnBj8LyO", - "9DO55pZ0h0k87Q3wcuQBBL5QJh/8bYfqVw5wKME1hjnGt7EtAz7COJoGrcTP5Y6FQ+GoOxImnvGyCZ1O", - "FPVGqcoLUQU+LuqV+U4xDse4s/BksoOug8/3mu5YjePYm2gsR+GiLlZgM14UqdRWX+NXhl/DIzHYQl43", - "Raia14HdHOVDavMT5UqaerNnrtDgltNFdfMT1BDX7g87jJl2Fjv8N1WBaXxnfND00a9yQ4R0cVxi/uEr", - "45TU62g6M2KVTccE3im3R0c79c0Ive1/p5Qenuv+IV7j9rhcvEcp/vaNuzjixL2D+HS6Wpq8uhgLrvB7", - "SHjUZITsciW8ygZ1xjDqATcvsWU94EPDJOBXvBx5CR/7Suh+Jf/B2Hv4fDR9A7c+PZflbC8LGk15RLHC", - "Pe/L0IU4Fh9M4cF357Xwa92L0HHf3fcdTx3FiLXMYtRDdzMnWrvBx3rRvr8aS5EQ6nTg97geiI/imfs0", - "8HAlVB2ir0IMdFAJ6VefgqdT92Nk/cmXBb+312LUx/LG16+lZXqd/PufyQvLQFq9+wN4XAab3i8qk5B2", - "yTzVNmFN6cNJpRA7t+KUGjapcileNgy2MmItHVoalJ8ZkNXzKeLAAB8f57Pz4qgLM1VyZ0ajpI7dC7Fa", - "W8zY/zfgBehXByoStFUI8IhVyoi2AmnpBvMpYNc43MnUxwaOgEVcUWE4VghCvYLcYtnZNrhOAxxTX8FN", - "Fpw+/12ZYFydbt5k+IIE+6oQDGvNHrjjB4mTouRfVKfzZHrO/bMmhJpegF1z06Zr6b2Znvxyc7mEHLMi", - "701U9R9rkFESpHmwyyAsyyhvlWjeMWFe7+Otji1A+/JI7YUnqq9za3DG3rFfwu6eYR1qSBYObR7x3SRx", - "MGKAXGAhh/SYIdlHjQnTUAZiIYQE+1TMbXGM0ZzPUdq1G84VSNJdHG0qtj1TpoueT5rLdT0q7SM+yRnL", - "ZTWsmTyufzzHEtXGB8jxJvFwrKWz82HhnGufuBjTijW+k5DCGEz4LeQQpFlKcenrByBWyFN1zXURWtxJ", - "Uii6m0Qa6GUzs2gfcAyDHBKlGPAtVF4qJ0ZkYw/Kum8mmoDDe4YiQ9sEPgjXErSGonGJlMpAZlV48LEP", - "jn2ooPDXGyHBjJY/IuBGU1+/bnN7Yxk4jqmuuY96jRfINGy4g05HGbjH59yH7Gf0PTzCD2XADlqYGno9", - "XI82PN0RZoDEmOqXzN+Whx/338TYJKQEnQXPUz8dt+xmZMO8m0Wd0wUdH4zGIDc5d84eVpK00+TDVfZ0", - "hOiR/CXsTkkJCoV8ww7GQJPkRKBHCUd7m3yn5jeTgnt1J+D9vnnkKqXKbMTZcT7MId6n+EuRXwLmAGxC", - "3EdqtLPP0MbeeLOv17uQM7uqQEJx/4SxM0mPioJju1tesDe5vGf3zb/FWYua0vp7o9rJO5l+nYEJ9/Ut", - "uVkYZj8PM+BY3S2nokEOZKjeyrGQm2tMzt+t4nkyVSsfupr7VeRboiIoUjLJBXmsnuFBTxmOMAVClKsD", - "HZmceU8XM6VKxfLeJE2DGyqNqXgyBMiCnJItoIHCD55EQLIueuIUUuo7n/ROLZmG1ol80+x/wxLuKY2+", - "P3MzS5ffLZWGTjF215syfTYPXzCNJv5nIazmeneTHH2DEvID68kolg+GYzWRWO1C2misIQ7LUl1nyKyy", - "ps5FSrV17Uz3Mg5F19p+7lQvIIrr4sYLaju25gXLldaQxz3S7z0Jqo3SkJUKw7xSHuildXL3Bh95SVaq", - "FVNVrgqgejFpChqbq5aSo9gEUVRNEgVEO/hamPpEdDxxSnenkh8pQ1FrdUTt/Bzo5Xqb1YkWnZEvcyRi", - "GYzP4uQxRI2H8O6p/Z/mzUuxRboBnTryS2Z1DXPmW/RrZPuDzzWwjTCGQGlo6VqUJT4cF9vI89oELqRR", - "OyL2nmNY5ZXA2JtuEgGShit35zWZFWIecBGnPWJ2rVW9WkcJphs4g8qra68Qx6P8ZGoMj8IXZG6KJ2yj", - "jPWaJo3ULrkNOfssV9JqVZZdoxSJ6CtvaX/Jt2d5bl8odbng+eV91Gulss1Ki3l4X90PDmxn0r3UYt0L", - "OKNy5odT9VI7DJXzRDuZQfZY3NGF3SMw3x/moIdt7mfDhfXX1WWmaTXmTDJu1Ubk6TP154q2G42RS7Go", - "ZM4yqq1IWSawGR72+LJqgiuQRQ7RDJIni8OdMc8IvJMZ2Y37L0rg/XHZEjyjGbkoh8zFS1FZPirr9QBA", - "SOnps601FWSMJbGGq6gVpUpAF3kf0Im3CkYi3Q42N8KdA2XhVkANoh8bAD8j48OccstRJOVCbcP3+23y", - "uRsB/3E/lXeYx1iI10VLWpqCvEKimhGOkE5xvTce6g0+e19MjYpqiudOvOEjAMbjpDowTIqWOhaMJRcl", - "FFmq9uJ5Y6OaR5q2f5rVL4kujOfkOa9D6UM3dq3BJ04hEV93/V8Vd6SkmuZDS7IsYAv0ruNX0IpqGs4j", - "/wuUVPKwZwxQVVbCFXTCx3w2lxpFTXEFoa9pOrMCoEJvZN9GloqLiu/ynuHErz2LImumYDdpSSHE0k6x", - "A2aSpFFnKzM6JmbqUXIQXYmi5h38mWNFjq4Z0B3lBKoGOkIW9Mip0/xEI7wOA5yF/ilRJmDi/TQ+dDQL", - "SqNuHwM6GCdZm7FTL9NhknGqosbBgrMVjSOWSLzlG6bi13LcIDkk+VbdmrhPQskIsd9sIUepxus7UHiN", - "Z8RJ4bOeILVLgIK0AtclYW1fg2RSRSUmr7lpVJU2h2L4gSbGRkJ6bfoGTuU2mvH2O8twMGZ6ydRGFQnd", - "0OnNzfO/y0ncexBHx0vRiAH//G+P/StQt1c7sAGW8pZuP53sj0Ua/S3muficLeowUFmqa6oZGeuhzyH4", - "QYn6ggvIi+WiuZZD1Obcp/fsmzpEFK++4TumNP7jtM5/1rwUyx3yGQI/dGNmzR0JeccrRQT4KFA38X7x", - "ah4AC9YWFaaidYupY0bD7dwoEdDuIg/FfRTb8EuItwGDHYh/5tYxTlMv0HLhruzedg6x4BcfUrRseBFr", - "+pgosltGPaQOdr3/Z/sWLp4q5HerSp6HCqG+RFGXz2AV4EBcdg2b/Y8lh3wtkEBTWbglWh1e1xc3MJke", - "ybpSLxDGyq90wB5UXB1UnrnVMiZafns1NvY8M520lLvehalRNwOg4zqNh8CPy1Z+Gvwnc7iOLWMK+H8U", - "vI8Uqo3hpZq0nwDLnQwcCVjJWr1Q20zD0hwKMCFztVPndZu7I5hYhcw1cEMRN+c/esWzTVEqpFOEKSa0", - "8Wk2oxSwFLJllkJWtU3oMZipVO4ihMVGf0TriAttTEpwwuQVL3+8Aq1FMbZx7nRQSce4RERwdPi+CRNG", - "c6cOBxCm1eHwfWZrRo+buQucilBRuKaxXBZcF3FzIVkO2t377JrvzM09So1z4JBPiUfSTDdrQORdQtIm", - "QMqddwrf0t/TAMjv0PEzwWGDccEJZw2Zdqwa8c8MYfhTOGw2fJuVaoWvCEcOhM9Nix4+UgGVRDM4yWfT", - "1h3mMeJX2D8NpuX3jMgqnHXKFPvP/Y+4lahG/iSF3XvyyUbZf9ZJcbd0MANS5aoN/idiGZ7H1Etcn3wl", - "fo0bhM3wVCXQHkSbCCP+oa5dfGQXMQzCP+OOjeDTy511Iy1S733JMpChxcDsCe8H04ay89yHZw1NaQNT", - "AyFl7l9LH2lpI/t8uJdGwKPa9P6sd6dtQmbcOMfUiNv/PjqrVJXlU2I+qXJH4d0EHtIujCP0ETkBRtbd", - "hMeYppZNJ+9Rp6jNsWXyRovqHPJ2Vfk+pX/MTDTC0bsuCLVEXkaV29G6hS95GmPKvP/GrGsGa5gE40xD", - "Xms0E1/z3eGyYyMZoy/+dvbFo8d/f/zFl8w1YIVYgWmzjvfKdrVxgUL27T6fNhJwsDyb3oSQfYAQF/yP", - "4VFVsyn+rBG3NW1K0UHRsmPsy4kLIHEcE+WibrRXOE4b2v/H2q7UIu98x1Io+O33TKuyTFd9aOSqhAMl", - "tVuRC8VpIBVoI4x1jLDrARW2jYg2azQPYu7fK8omo2QOwX7sqUDYkZCr1ELGAmqRn+Hbbu81YrCtSs+r", - "yNOzb11eTyMLHQqNGBWzAFapyov2YslSEOELIh29rPWGT7SIRzGyDbOlaNkUIfrI8zTpxQWz93P7bjFX", - "m+b0bhMT4kU4lDcgzTH/xHjegptwkta0/4fhH4lEDHfGNZrl/ha8Iqkf3Kwo/yTQho/yE+SBAIy8tu28", - "k4weikWJiDV5CdCfEBzIffHjZetYPvgsBCEJHQ6AFz+fbds1Lxk8OL9zRt+XDVKipbwfo4TO8g+9yA2s", - "t7lIoi3yRhNrwRBbUkOxMHpubZ41r5hHtJLBY2etlGVOMy3LxCNpsuPgmYoJx6kE+oqXn55rfCu0sWeI", - "Dyhejz+Nil/KxkgmVJqb5el7wSfNHb2Kvbup5St8mP0f4PYoec/5obwTfnCboXEHK9avwq1Ab73ZNY5J", - "QVaPvmQLX2yj0pAL03fuXwfhpHkYClosfUArbO2Bl6iH1vmzsrcg42WIxGE/RO6txmfvIWyP6O/MVEZO", - "bpLKU9Q3IIsE/lI8Ki7Oe+C6uGVhhpulfYkSuB2Z9mVYdnjq8ii1ibt0agPDdU6+rTu4TVzU7dqm5iya", - "XN/h3bu3djEl1VC6FoPrjrmO7qQow1ElGX6DLEeEIz+GnzdFMT+P5b2l3K4jubl7+1GL8mDASifT+sf5", - "bAUSjDCYS/zvvnbMp71LAwSUeWF4VAnW26SLIcQk1tqZPJoqyqE+IX2675bIeY2vGvNaC7vDusHBgCb+", - "nszH9F2T28Pnhml8af7us+oSmtrtbSaQ2oTb9TvFS7yPyMUn3S2kyhP2DWX49gflr/cW/waf/+VJ8fDz", - "R/+2+MvDLx7m8OSLrx4+5F894Y+++vwRPP7LF08ewqPll18tHhePnzxePHn85Msvvso/f/Jo8eTLr/7t", - "nuNDDmQCNKT2fzr7P9lZuVLZ2avz7I0DtsUJr8T34PYGdeWlwrqWDqk5nkTYcFHOnoaf/lc4YSe52rTD", - "h19nvj7TbG1tZZ6enl5fX5/EXU5X+PQ/s6rO16dhHqw22JFXXp03MfoUh4M72lqPcVM9KZzht9ffXLxh", - "Z6/OT1qCmT2dPTx5ePLIl7aWvBKzp7PP8Sc8PWvc91PMr3lqfOr806qi5Pkf57NTT4f+rzXwEpPouD82", - "YLXIwycNvNj5/5trvlqBPsEXGvTT1ePTIHGcfvDZET7u+3YaR3+cfugkkSgO9GyiG5J+xxdKXaLbO8hA", - "90wvVuMkrr59XjgUU0sMsDDnLbMLJZTRrzx7+jZlX/FxklW9KEXO6IpGGnUbEJFQkxqkZRFoTJu15ftb", - "hueY2MPsq/cfvvjLx5Qg1QfkpXf6tV4OH3aLL7nwEcJJgOufNehdCxh65GcxGEOXYDpD2tayyhc38LOd", - "sJ98NAN+Jb7RRH36h19NcrnQaQQwN0QKrgYL77GOH4b3ITk8fvgwnG4vO0dkdeqpNUZ3178wiP05JmVB", - "p7h1QvBxi8kQH0OK/clQWiWHTSE5Rc5jSO2GX5JnBYPmmPZvYz1GfRwuIrl5I+K3JTDw37Bs0YSH1zTT", - "UPD4OOSIIycwhMvGxq9SkGnPhzCl6lN/nM+eHEkNe41QnRyhCfBf8tKBDEVIDUMQPPp0EJxLiup0Vwtd", - "gR/nsy8+JQ7OpWNevGTYMiqxm6B4eSnVtQwtnbxSbzZc71AasVP22GcyQn9haEd0T5cnd2f47YzYMhYb", - "qUALpxTycvb+46Hr5fRDKK2+/zLqlNX2MclRh4mX3L5mpwsspza1KZio8fhS0MxlTj/gCR39/dRb29Mf", - "0WBGkthpSOQ10pJStqQ/dlD4wW7dQvYP59pE4+Xc5uu6Ov2A/0GhKloRZYA+tVt5igFGpx86iPCfB4jo", - "/t52j1tcbVQBATi1XFI9+n2fTz/Qv9FEHcJshZqugPJN1OjZGvLLWfru66XHj3oxkjn5ooSCmNOTCR2k", - "snGnGx3o1yh+GPbj90wsGfSnECbMcMS5peShp1i1ddfiMvy8k3nyx+E2dxInjvx8GlSelGjbbfmh82f3", - "yJl1bQt1Hc2CxkKydA8hcx9r0//79JoL69R/n68Py7wPO1vg5akvztH7tc2HPfiCSb6jH+OXaMlfT7lH", - "9axSJkG2r/l15OE7w8YkIYCxXyvUKMZup222EBIpKL6hWhsBfRzKxoN7yck1GAwX3CzDXDuY8EMrXuTc", - "YHlxX+dmIK1/TB67Ty1tfM0LFvKkZKyVPc68JtpZ2n9LIjj9559u+gvQVyIH9gY2ldJci3LHfpLNE5kb", - "M9JvkTg1zy9RQm8IluInNb/uvrrR6bwP3TJOIQ0IMLtlay6L0r+UVzXWp3OUhW5RFQXmuAsolDGrlEYA", - "KD8kFBSqYE7YRRPIgWERdVByCriCUlXot8CsxzQJxyAPcvTFF0GX/89n28wd4hXIzLORbKGKna/7M9P8", - "2m7p0fuAV5FwOMLIBqJb6quXTkYahYDu8Lk1IMYGObQiNKa4t++dFov15L2BobUvPT09xRc+a2Xs6cwp", - "4V3bU/zxfYOwUAZ1VmlxheUaEGlKC6dblpk33rQVz2aPTx7OPv7/AAAA//8zEE3OoQkBAA==", + "d79CN0CCJChRMxMnqdq/7BHxo9FoNBr988MsV5tKSZDWzJ5+mFVc8w1Y0PgXz3NVS5uJwv1VgMm1qKxQ", + "cvY0fGPGaiFXs/lMuF8rbtez+UzyDbRtXP/5TMM/a6GhmD21uob5zORr2HA3sN1VrnUz0jZbqcwPcUZD", + "nD+ffdzzgReFBmOGUP4oyx0TMi/rApjVXBqeu0+GXQu7ZnYtDPOdmZBMSWBqyey605gtBZSFOQmL/GcN", + "ehet0k8+vqSPLYiZViUM4XymNgshIUAFDVDNhjCrWAFLbLTmlrkZHKyhoVXMANf5mi2VPgAqARHDC7Le", + "zJ6+nRmQBWjcrRzEFf53qQF+hcxyvQI7ez9PLW5pQWdWbBJLO/fY12Dq0hqGbXGNK3EFkrleJ+xlbSxb", + "AOOSvf72Gfv888+/cgvZcGuh8EQ2uqp29nhN1H32dFZwC+HzkNZ4uVKayyJr2r/+9hnOf+EXOLUVNwbS", + "h+XMfWHnz8cWEDomSEhICyvchw71ux6JQ9H+vICl0jBxT6jxnW5KPP/vuis5t/m6UkLaxL4w/Mroc5KH", + "Rd338bAGgE77ymFKu0HfPsy+ev/h0fzRw4//8vYs+y//5xeff5y4/GfNuAcwkGyY11qDzHfZSgPH07Lm", + "coiP154ezFrVZcHW/Ao3n2+Q1fu+zPUl1nnFy9rRici1OitXyjDuyaiAJa9Ly8LErJalY1NuNE/tTBhW", + "aXUlCijmjvter0W+Zjk3NAS2Y9eiLB0N1gaKMVpLr27PYfoYo8TBdSN84IL+uMho13UAE7BFbpDlpTKQ", + "WXXgego3DpcFiy+U9q4yx11W7M0aGE7uPtBli7iTjqbLcscs7mvBuGGchatpzsSS7VTNrnFzSnGJ/f1q", + "HNY2zCENN6dzj7rDO4a+ATISyFsoVQKXiLxw7oYok0uxqjUYdr0Gu/Z3ngZTKWmAqcU/ILdu2//3xY8/", + "MKXZSzCGr+AVzy8ZyFwVUJyw8yWTykak4WkJceh6jq3Dw5W65P9hlKOJjVlVPL9M3+il2IjEql7yrdjU", + "GybrzQK029JwhVjFNNhayzGAaMQDpLjh2+Gkb3Qtc9z/dtqOLOeoTZiq5DtE2IZv//pw7sExjJclq0AW", + "Qq6Y3cpROc7NfRi8TKtaFhPEHOv2NLpYTQW5WAooWDPKHkj8NIfgEfI4eFrhKwInDDIKTjPLAXAkbBM0", + "4063+8IqvoKIZE7YT5654VerLkE2hM4WO/xUabgSqjZNpxEYcer9ErhUFrJKw1IkaOzCo8MxGGrjOfDG", + "y0C5kpYLCYVjzgi0skDMahSmaML9753hLb7gBr58MnbHt18n7v5S9Xd9745P2m1slNGRTFyd7qs/sGnJ", + "qtN/wvswntuIVUY/DzZSrN6422YpSryJ/uH2L6ChNsgEOogId5MRK8ltreHpO/nA/cUydmG5LLgu3C8b", + "+ullXVpxIVbup5J+eqFWIr8QqxFkNrAmH1zYbUP/uPHS7Nhuk++KF0pd1lW8oLzzcF3s2PnzsU2mMY8l", + "zLPmtRs/PN5sw2Pk2B5222zkCJCjuKu4a3gJOw0OWp4v8Z/tEumJL/Wv7p+qKl1vWy1TqHV07K9kVB94", + "tcJZVZUi5w6Jr/1n99UxAaCHBG9bnOKF+vRDBGKlVQXaChqUV1VWqpyXmbHc4kj/qmE5ezr7l9NW/3JK", + "3c1pNPkL1+sCOzmRlcSgjFfVEWO8cqKP2cMsHIPGT8gmiO2h0CQkbaIjJeFYcAlXXNqT9snS4QfNAX7r", + "Z2rxTdIO4bv3BBtFOKOGCzAkAVPDe4ZFqGeIVoZoRYF0VapF88NnZ1XVYhC/n1UV4QOlRxAomMFWGGvu", + "4/J5e5Liec6fn7Dv4rFRFFey3LnLgUQNdzcs/a3lb7FGt+TX0I54zzDcTqVP3NYENDgx/y4oDp8Va1U6", + "qecgrbjGf/NtYzJzv0/q/OcgsRi348SFDy2POXrj4C/R4+azHuUMCcere07YWb/vzcjGjbKHYMx5i8W7", + "Jh78RVjYmIOUEEEUUZPfHq413828kJihsDckk58MEIVUfCUkQjt3zyfJNvyS9kMh3h0hgGneRURLJEE2", + "KlQvc3rUnwz0LH8Cak1tbJBEnaRaCmPxXY2N2RpKFJy5DAQdk8qNKGPChu9ZRAPzteYV0bL/QmKXkPie", + "p0YE6y0v3ol3YhLmiN1HG41Q3ZgtH2SdSUiQa/Rg+LpU+eXfuFnfwQlfhLGGtI/TsDXwAjRbc7NOHJwe", + "bbejTaFv1xBpli2iqU6aJb5QK3MHSyzVMayrqp7xsnRTD1lWb7U48KSDXJbMNWawEagw9w9H0rDT+4t9", + "w/O1EwtYzsty3qqKVJWVcAWle7QLKUHPmV1z2x5+HDm8a/AcGXDMzgKLVuPVTKhi040uQgPbcLyBNu41", + "U5XdPg0HNXwDPSkIb0RVoxYhemicPw+rgyuQyJOaoRH8Zo2orYkHP3Fz+084s1S0ONIA2mC+a/DX8IsO", + "0K51e5/KdgqlC9JZW/eb0CxXmoagG95P7v4DXLediTo/qzRkfgjNr0AbXrrV9RZ1vyHfuzqdB05mwS2P", + "TqanwvQDjDgH9kPxDnRCS/Mj/oeXzH12UoyjpJZ6BAojKjKnFnQxO1TRTK4B6lsV25Aqk1U8vzwKymft", + "5Gk2M+nkfUPaU7+FfhHNDr3ZisLc1TbhYGN71T0hpLsK7Gggi+xlOtFcUxDwRlWM2EcPBOIUOBohRG3v", + "/Fr7Wm1TMH2ttoMrTW3hTnbCjTOZ2X+tts89ZEofxjyOPQXpboGSb8Dg7SZjxulmae1yZwulbyZN9C4Y", + "yVprI+Nu1EiYmveQhE3rKvNnM2GxoAa9gVoHj/1CQH/4FMY6WLiw/DfAgnGj3gUWugPdNRbUphIl3AHp", + "r5NC3IIb+Pwxu/jb2RePHv/98RdfOpKstFppvmGLnQXDPvNqOWbsroT7ydcRShfp0b98EmxU3XFT4xhV", + "6xw2vBoORbYvev1SM+baDbHWRTOuugFwEkcEd7UR2hmZdR1oz2FRry7AWvfSfaXV8s654WCGFHTY6FWl", + "nWBhunZCLy2dFq7JKWyt5qcVtgRZkJ+BW4cw7g24WdwJUY1tfNHOUjCP0QIOHopjt6mdZhdvld7p+i7U", + "G6C10skruNLKqlyVmZPzhEooKF75Fsy3CNtV9X8naNk1N8zNjdbLWhYjegi7ldPvLxr6zVa2uNl7g9F6", + "E6vz807Zly7y21dIBTqzW8mQOjvqkaVWG8ZZgR1R1vgOLMlfYgMXlm+qH5fLu9F2KhwooccRGzBuJkYt", + "nPRjIFeSnPkOqGz8qFPQ00dMsDLZcQA8Ri52MkdT2V0c23Ft1kZItNubncwj1ZaDsYRi1SHL26uwxtBB", + "U90zCXAcOl7gZ9TVP4fS8m+VftOKr99pVVd3zp77c05dDveL8daAwvUNamAhV2XXgXTlYD9JrfF3WdCz", + "RolAa0DokSJfiNXaRu/FV1r9BndicpYUoPiBlEWl6zNUGf2gCsdMbG3uQJRsB2s5nKPbmK/xhaot40yq", + "AnDza5MWMkdcDtHXCV20bCy3on5CGLYAR105r91q64qhA9Lgvmg7ZjynE5ohasyI+0XjN0OtaDpyZys1", + "8GLHFgCSqYX3cfDeF7hIjt5TNohpXsRN8IsOXJVWORgDReZV0QdBC+3o6rB78ISAI8DNLMwotuT61sBe", + "Xh2E8xJ2Gfr6GfbZ9z+b+78DvFZZXh5ALLZJobevTxtCPW36fQTXnzwmO9LUEdU68dYxiBIsjKHwKJyM", + "7l8fosEu3h4tV6DRpeQ3pfgwye0IqAH1N6b320JbVyMe7P6Z7iQ8t2GSSxUEq9RgJTc2O8SWXaOOLsGt", + "IOKEKU6MA48IXi+4seQGJWSBOk26TnAeEsLcFOMAjz5D3Mg/hxfIcOzc3YPS1KZ5jpi6qpS2UKTWgBbZ", + "0bl+gG0zl1pGYzdvHqtYbeDQyGNYisb3yPIvYPyD28b+6i26w8WhTd3d87skKjtAtIjYB8hFaBVhN/bi", + "HQFEmBbRRDjC9CincR2ez4xVVeW4hc1q2fQbQ9MFtT6zP7Vth8RFRg66twsFBg0ovr2H/JowS/7ba26Y", + "hyOY2FGdQ/5aQ5jdYcyMkDlk+ygfn3iuVXwEDh7SulppXkBWQMl3CecA+szo874BcMfb566ykJEjbnrT", + "W0oOfo97hlY4nkkJjwy/sNwdQfcUaAnE9z4wcgE4doo5eTq61wyFcyW3KIyHy6atToyIt+GVsm7HPT0g", + "yJ6jTwF4BA/N0DdHBXbO2rdnf4r/BOMnaOSI4yfZgRlbQjv+UQsY0QX7GKfovPTYe48DJ9nmKBs7wEfG", + "juyIYvoV11bkosK3zvewu/OnX3+CpOGcFWC5KKFg0Qd6BlZxf0YupP0xb/YUnKR7G4I/UL4llhPcdLrA", + "X8IO39yvKDYhUnXcxVs2Maq7n7hkCGjweHYieNwEtjy35c4JanYNO3YNGpipF+TCMLSnWFVl8QBJ+8ye", + "Gb11Nmkb3WsuvsChouWlfM3oTbAfvje9h0EHHf4tUClVTtCQDZCRhGCS7wirlNt14cOfQgBMoKQOkJ5p", + "o2m+uf7vmQ6acQXsP1XNci7xyVVbaGQapVFQQAHSzeBEsGZO75zYYghK2AC9JPHLgwf9hT944PdcGLaE", + "6xAz6Br20fHgAepxXiljO4frDvSh7ridJ64PNFy5i8+/Qvo85bDHkx95yk6+6g3eWLvcmTLGE65b/q0Z", + "QO9kbqesPaaRad5eOO4kW07XP2iwbtz3C7GpS27vwmoFV7zM1BVoLQo4yMn9xELJb654+WPTDeMhIXc0", + "mkOWYxTfxLHgjetDgX9uHCGFO8Dk9D8VIDinXhfU6cATs/VUFZsNFIJbKHes0pADxbs5ydE0Sz1h5Amf", + "r7lc4YNBq3rlnVtpHGT4tSHVjK7lYIikUGW3MkMld+oC8G5qIeTRiVPA3ZOuryGnB8w1b+bzUa5TbuZo", + "D/oWg6SRbD4bffE6pF61L15CTjduc8Jl0JH3Ivy0E080pSDqnOwzxFe8Le4wuc39bVT27dApKIcTRx6/", + "7ccxp1/33C53dyD00EBMQ6XB4BUVq6kMfVXLOEY7uArujIXNUJNPXf8+cvxej74XlSyFhGyjJOySaUmE", + "hJf4MXmc8Joc6YwCy1jf/hukA38PrO48U6jxtvjF3e6f0L7Fynyr9F2ZRGnAyeL9BAvkQXO7n/KmdlJe", + "lgnToo/g7DMAM2+cdYVm3BiVC5TZzgsz917BZI304Z5d9L9q4lLu4Oz1x+3Z0OLkAKgjhrJinOWlQA2y", + "ksbqOrfvJEcdVbTUhBNXeIyPay2fhSZpNWlCi+mHeic5OvA1mqukw8YSEmqabwGC8tLUqxUY23vrLAHe", + "Sd9KSFZLYXGujTsuGZ2XCjR6Up1Qyw3fsaWjCavYr6AVW9S2K/1jgLKxoiy9Qc9Nw9TyneSWlcCNZS+F", + "fLPF4YLRPxxZCfZa6csGC+nbfQUSjDBZ2tnsO/qKfv1++Wvv44/u7vQ5OJ22GRNmbpmdJCn/97N/f/r2", + "LPsvnv36MPvqf5y+//Dk4/0Hgx8ff/zrX/9f96fPP/71/r//a2qnAuyp8FkP+flz/zI+f47Pn8hVvw/7", + "J9P/b4TMkkQWe3P0aIt9hqkiPAHd7yrH7BreSbuVjpCueCkKx1tuQg79G2ZwFul09KimsxE9ZVhY65GP", + "iltwGZZgMj3WeGMpauifmQ5UR6Okjz3H87KsJW1lkL4pDjP4l6nlvElGQHnKnjKMVF/z4OTp/3z8xZez", + "eRth3nyfzWf+6/sEJYtim8ojUMA29VaMgyTuGVbxnQGb5h4Ie9KVjnw74mE3sFmANmtRfXpOYaxYpDlc", + "CFnyOqetPJfk4O/OD5o4d95yopafHm6rAQqo7DqVv6gjqGGrdjcBem4nlVZXIOdMnMBJX+dTuPeid+or", + "gS+DY6pWasprqDkHRGiBKiKsxwuZpFhJ0U8vvMFf/ubOn0N+4BRc/TlTHr33vvvmDTv1DNPco5QWNHSU", + "hCDxlPbBkx2HJMfN4piyd/KdfA5L1D4o+fSdLLjlpwtuRG5OawP6a15ymcPJSrGnIR7zObf8nRxIWqOJ", + "FaOgaVbVi1Lk7DJ+kLTkScmyhiO8e/eWlyv17t37gW/G8Pngp0ryF5ogc4Kwqm3mU/1kGq65Ttm+TJPq", + "BUemXF77ZiUhW9WkIA2phPz4aZ7Hq8r0Uz4Ml19VpVt+RIbGJzRwW8aMVU08mhNQfEiv298flL8YNL8O", + "epXagGG/bHj1Vkj7nmXv6ocPP8fIvjYHwi/+ync0uatgsnZlNCVFX6mCC6dnJfqqZxVfpUxs7969tcAr", + "3H2Ulzeo4yhLht06UYchwACHahfQhDiPbgDBcXRwMC7ugnqFtI7pJeAn3MJuAPat9iuKn7/xdh2Iwee1", + "XWfubCdXZRyJh51psr2tnJAVvDGMWOFr1SfGWwDL15Bf+oxlsKnsbt7pHhx+vKAZWIcwlMuOIgwxmxIa", + "KBbA6qrgXhTnctdPa2MoogIHfQ2XsHuj2mRMx+Sx6aZVMWMHFSk1ki4dscbH1o/R33zvVRYCTX12Egze", + "DGTxtKGL0Gf8IJPIeweHOEUUnbQfY4jgOoEIIv4RFNxgoW68W5F+anlC5iCtuIIMSrESi1Qa3v8Y2sMC", + "rI4qfeZB74XcDGiYWDL3lF/Qxeqf95rLFbjr2V2pyvCSsqomnTbwPbQGru0CuN2r55dxQooAHT4przHy", + "GjV8c7cE2Lr9FhY1dhKu3asCFUXUxnsvn4z7nxHgUNwQntC9fSmcjL51PeoSGQfDrdxgt3nWete8mM4Q", + "Lvq+AUxZqq7dvjgolM+2SUldovulNnwFI2+X2Ho3MR9Gx+KHgxySSJIyiFr2RY2BJJAEmRpnbs3JMwzu", + "izvE+MzsOWSGmchA7G1GmETbI2xRogDbeK7S3nPdsaJSVuAx0NKsBbRsRcEARhcj8XFccxOOI+ZLDVx2", + "knT2G6Z92Zea7jzyJYySojaJ58Jt2Oegg3e/T1AXstKFVHTxo39CWjn39sLwhdR2KImiaQElrGjh1DgQ", + "Spswqd0gB8ePyyXylizllhgpqCMBwM8B7uXygDGyjbDJI6TIOAIbHR9wYPaDis+mXB0DpPQJn3gYG6+I", + "6G9IB/aRo74TRlXlLlcxYm/MAwfwqShayaLnUY3DMCHnzLG5K146Nuff4u0ggwxp+KDo5UPzrjf3xx4a", + "e0xTdOUftSYSEm6ymliaDUCnRe09EC/UNqMI5eRbZLFdOHpPxi5gvHTqYFIuunuGLdQW3bnwaiFf+QOw", + "jMMRwIh0L1thkF6x35icRcDsm3a/nJuiQoMk4xWtDbmMCXpTph6RLcfI5bMovdyNAOipodpaDV4tcVB9", + "0BVPhpd5e6vN27SpISwsdfzHjlByl0bwN9SPdRPC/a1N/DeeXCycqE+SCW+oWbpNhkLqXFHWwWMSFPbJ", + "oQPEHqy+6suBSbR2fb26eI2wlmIljvkOjZJDtBkoAR/BWUc0zS5TngLuLQ94j1+EbpGyDnePy939yIFQ", + "w0oYC63RKPgF/R7qeI7pk5Vajq/OVnrp1vdaqebyJ7M5duws85OvAD3wl0Ibm6HFLbkE1+hbg0qkb13T", + "tATadVGkYgOiSHNcnPYSdlkhyjpNr37e75+7aX9oLhpTL/AWE5IctBZYHCPpuLxnavJt37vgF7TgF/zO", + "1jvtNLimbmLtyKU7x5/kXPQY2D52kCDAFHEMd20UpXsYZBRwPuSOkTQa+bSc7LM2DA5TEcY+6KUWwt7H", + "bn4aKbmWKA1gOkJQrVZQhPRmwR4moyRypZKrqIpTVe3LmXfCKHUdZp7bk7TOu+HDmBN+JO5nQhawTUMf", + "vwoQ8jayDhPu4SQrkJSuJK0WSqImdvHHFpGu7hPbQvsBAEkn6Dc9Y3brnUy71GwnbkAJvPBvEgNhffuP", + "5XBDPOrmY+7Tncyn+48QDog0JWxU2GSYhmCEAfOqEsW2Z3iiUUeVYPwo7fKItIWsxQ92AANdJ+gkwXVS", + "aXtXa69gP8U376l7lZHvtXcsdvTNcx+AX9QaLRgdz+Zh3vbmrTZx7d//fGGV5ivwVqiMQLrVELicY9AQ", + "ZUU3zApyJynEcgmx9cXcxHLQAW6gYy8mkG6CyNImmlpI++WTFBkdoJ4WxsMoS1NMghbGbPJvhlauINNH", + "qqTmSoi25gamqmS4/vewy37mZe0eGUKb1j3Xm526l+8Ru361+R52OPJBr1cH2IFdQc3Ta0AaTGn6m08m", + "SmB9z3RS/OPzsrOFR+zUWXqX7mhrfFGGceJvb5lO0YLuUm5zMFonCQfLlN24SPsmuNMDXcT3SfnQJoji", + "sAwSyfvxVMKEEpbDq6jJRXGIdt8ALwPx4nJmH+ez23kCpG4zP+IBXL9qLtAkntHTlCzDHceeI1HOq0qr", + "K15m3l9i7PLX6spf/tg8uFd84pdMmrLffHP24pUH/+N8lpfAddZoAkZXhe2qP82qqIzD/quEsn17RSdp", + "iqLNbzIyxz4W15jZu6dsGhRFaf1noqPofS6WaYf3g7zPu/rQEve4/EDVePy0Nk9y+Ok6+fArLspgbAzQ", + "jjin4+KmVdZJcoV4gFs7C0U+X9mdspvB6U6fjpa6DvAknOtHTE2ZfnFIn7gSWZF3/uF3Lj19q3SH+fvI", + "xKTz0G8nVjkhm/A44qsd6lf2hakTRoLXL6tf3Gl88CA+ag8ezNkvpf8QAYi/L/zv+L548CBpPUyqsRyT", + "QC2V5Bu430RZjG7Ep32AS7iedkGfXW0ayVKNk2FDoeQFFNB97bF3rYXHZ+F/KaAE99PJlEd6vOmE7hiY", + "KSfoYiwSsXEy3VDJTMOU7PtUYxCsIy1k9r4kAxljh0dI1hs0YGamFHnatUMujGOvkpwpXWOGjUe0tW7E", + "Woz45spaRGO5ZlNypvaAjOZIItMk07a2uFsof7xrKf5ZAxOFe9UsBWi813pXXXgc4KgDgTStF/MDk52q", + "Hf42epA99qagC9qnBNlrv3ve2JTCQlNFf470AI9nHDDuPd7bnj48NVM027rrgjntHTOldHpgdN5YNzJH", + "shS6MNlSq18hbQhB+1EiEUYwfApU8/4KMuW512cpjVG5rejezn5ou6e/jcc2/tZv4bDopurYTS7T9Kk+", + "biNv8ug16XTNHsljj7DYw6AbGjDCWvB4Rc6wWAYleB9xSeeJskB0IszSpzKO5Tyl8dtT6WEexL+W/HrB", + "UzVi3FvIwRRtb8dPyioWOocNME2OA5qdRR7cTVtBmeQq0K0NYpiV9obvGpp28oumfcAgRcVPlzm5KZRG", + "JYap5TWXVEXc9SN+5XsbIBO863WtNOaBNGmXrgJysUmqY9+9e1vkQ/edQqwEFciuDUQVmP1AjJJNIhX5", + "KtZN5g6PmvMleziPysD73SjElTBiUQK2eEQtFtzgddmYw5subnkg7dpg88cTmq9rWWgo7NoQYo1izdsT", + "hbzGMXEB9hpAsofY7tFX7DN0yTTiCu47LHohaPb00VfoUEN/PEzdsr7A+T6WXSDPDs7aaTpGn1QawzFJ", + "P2ra+3qpAX6F8dthz2mirlPOErb0F8rhs7Thkq8gHZ+xOQAT9cXdRHN+Dy+SrAFgrFY7Jmx6frDc8aeR", + "mG/H/ggMlqvNRtiNd9wzauPoqS2vTJOG4ajWv68XFeAKH9H/tQrufz1d1yd+xvDNSMwWein/gDbaGK1z", + "xin5Zylaz/RQr5Odh9zCWECrqZtFuHFzuaWjLImO6ktWaSEt6j9qu8z+4p7FmueO/Z2MgZstvnySKETV", + "rdUijwP8k+NdgwF9lUa9HiH7ILP4vuwzqWS2cRyluN/mWIhO5aijbtolc8wvdP/QUyVfN0o2Sm51h9x4", + "xKlvRXhyz4C3JMVmPUfR49Er++SUWes0efDa7dBPr194KWOjdKpgQHvcvcShwWoBVxgxl94kN+Yt90KX", + "k3bhNtD/vv5PQeSMxLJwlpMPgciiuS9Y3knxP79sM5+jYZUiEXs6QKUT2k6vt/vE3obHad369ltyGMNv", + "I5ibjDYcZYiVEe97cq9v+vwe/kJ9kGjPOwrHR78w7d7gKMc/eIBAP3gw92LwL4+7n4m9P3iQTkCcVLm5", + "X1ss3OZFjH1Te/i1SijAQtXCxqHI50dIKCDHLin3wTHBhR9qzroV4j69FHE38V1pb9P0KXj37i1+CXjA", + "P/qI+J2ZJW5gG6Uwfti7FTKTJFM03yM/d86+VtuphNO7gwLx/AFQNIKSieo5XMmgAmjSXH/QXySiUTfq", + "AkrlHplxUaBYn//nwbNb/HwPtmtRFj+3ud16F4nmMl8nvYQXruPfSUbvXMHEKpN1RtZcSiiTw9Hb9u/h", + "DZx4pf9DTZ1nI+TEtv0KtLTc3uJawLtgBqDChA69wpZughir3bRZTVqGcqUKhvO0RS1a5jgs5ZwqoZmI", + "b8ZhN7X1fqsYC+4TDi1FiW6Yabsxtsw0tyMJtLDeeagv5MbB8uOG1Aw0OmjGxQYvZsM3VQl4Mq9A8xV2", + "VRJ63TGFGo4cVaxgpnKfsCUmrFDM1loytVxGywBphYZyN2cVN4YGeeiWBVuce/b00cOHSbUXYmfCSgmL", + "YZk/tkt5dIpN6IsvskSlAI4C9jCsH1uKOmZjh4Tja0r+swZjUzwVP1DkKlpJ3a1N9SSb2qcn7DvMfOSI", + "uJPqHtWVIYlwN6FmXZWKF3NMbvzmm7MXjGalPlRCnupZrlBb1yX/pHlleoLRkNlpJHPO9HH2p/JwqzY2", + "a8pPpnITuhZtgUzR87lBPV6MnRP2nFSoTQF/moRhimy9gSKqdkmPeCQO9x9reb5G3WRHAhrnldMLsQZ2", + "1lpuoujDpvoRMmwHt6/FSqVY50zZNehrYQAj8uEKuukQm9ygXjce0iN2l6drKYlSTo4QRptaR8eiPQBH", + "kmxwKkhC1kP8kZopqsd8bF3aC+yVjsXoFbntWf1Dcr2QYpu99MaFnEslRY6lEFKSNKZum2amnFA1Im1f", + "NDN/QhOHK1lat4kF9lgcLbYbGKFH3NDkH311m0rUQX9a2PqSayuwxnM2KOah0rU3iAlpwFezckQU80ml", + "E05NyUCIxoHiSDLCrEwjGs5v3bcfvP4bk2JcComaLo82/z4jk1VpBFqmJROWrRQYv55uNI956/qcYJbG", + "ArbvT16olcgvxArHIDc6t2zyGR0OdRY8SL3Hpmv7zLX1ufObnzvuYDTpWVX5ScfroCcFSbuVowhO+S0F", + "R5IIuc348Wh7yG2v6zfep47Q4Aq91qDCe3hAGE0t7e4o37i3JVEUtmAUUZlMoCtkAowXQgYTavqCyJNX", + "Am4MnteRfibX3NLbYRJPewO8HAmAwAhlssHfdqh+5QCHElxjmGN8G9sy4COMo2nQSvxc7lg4FI66I2Hi", + "GS8b1+lEUW+UqrwQVWBwUa/Md4pxOMadhZDJDroOhu813bEax7E30ViOwkVdrMBmvChSqa2+xq8Mv4Yg", + "MdhCXjdFqJrowG6O8iG1+YlyJU292TNXaHDL6aK6+QlqiGv3hx3GTDuLHf6bqsA0vjPeafroqNzgIV0c", + "l5h/GGWcknodTWdGrLLpmMA75fboaKe+GaG3/e+U0kO47h8iGrfH5eI9SvG3b9zFESfuHfin09XS5NVF", + "X3CF30PCoyYjZJcr4VU2qDOGXg+4eYkt6wEfGiYBv+LlSCR8bCuh+5XsB2Px8Plo+gZufXouy9leFjSa", + "8oh8hXvWl6EJccw/mNyD785q4de6F6HjtrvvO5Y68hFrmcWohe5mRrR2g4+1on1/NZYiIdTpwO9xPRDv", + "xTP3aeDhSqg6eF8FH+jwJKRffQqeTt2PkfUnIwt+b6vFqI3lja9fS8v0b/LvfyYrLANp9e4PYHEZbHq/", + "qExC2iX1VNuENaUPJ5VC7NyKU2rYpMqleNkw6MqItXRoaVB+ZkBWz6eIAwN8fJzPzoujLsxUyZ0ZjZI6", + "di/Eam0xY//fgBegXx2oSNBWIcAjVikj2gqkpRvMp4Bd43AnU4MNHAGLuKLCcKzghHoFucWys61znQY4", + "pr6CmywYff67MsH4c7qJyfAFCfZVIRjWmj1wxw8SJ0XJv6hO58n0nPtnjQs1RYBdc9Oma+nFTE+O3Fwu", + "IcesyHsTVf3HGmSUBGke9DIIyzLKWyWaOCbM63281rEFaF8eqb3wRPV1bg3OWBz7JezuGdahhmTh0CaI", + "7yaJgxEDZAILOaTHFMnea0yYhjIQC8El2KdibotjjOZ8jtKu3XCuQJLu4mhTse2ZMl30fNJcrutRaR8x", + "JGcsl9WwZvL4++M5lqg23kGON4mH41c6Ox8Wzrn2iYsxrVhjOwkpjMGE30IOQZqlFJe+fgBihSxV11wX", + "ocWdJIWiu0mkgV42M4s2gGPo5JAoxYCxUHmpnBiRjQWUdWMmGofDe4Y8Q9sEPgjXErSGojGJlMpAZlUI", + "+NgHxz5UkPvrjZBgRssfEXCjqa9ft7m9sQwcx1TX3Hu9xgtkGjbcQaejDNzjc+5D9jP6HoLwQxmwgxqm", + "hl4P16MNoTvCDJAYU/2S+dvycHD/TZRNQkrQWbA89dNxy25GNsy7WdQ5XdDxwWgUcpNz5+xhJUk9TT5c", + "Ze+NEAXJX8LulB5BoZBv2MEYaJKcCPQo4Whvk+9U/WZScK/uBLzfN49cpVSZjRg7zoc5xPsUfynyS8Ac", + "gI2L+0iNdvYZ6tgba/b1ehdyZlcVSCjunzB2JimoKBi2u+UFe5PLe3bf/Fuctagprb9Xqp28k+noDEy4", + "r2/JzcIw+3mYAcfqbjkVDXIgQ/VWjrncXGNy/m4Vz5Opr/KhqblfRb4lKoIiJZNckMXqGR70lOIIUyBE", + "uTrQkMmZt3QxU6qUL+9N0jS4odKYiidDgCzIKdkCGij84EkEJOuiJ04hpb7zSe/Ukmlojcg3zf43LOGe", + "etH3Z25m6fK7pdLQKcbuelOmzybwBdNo4n8WwmqudzfJ0TcoIT/Qnoxi+aA7VuOJ1S6k9cYa4rAs1XWG", + "zCpr6lyknraunelexqHoWtvPneoFRH5d3HhBbcfWvGC50hryuEc63pOg2igNWanQzStlgV5aJ3dvMMhL", + "slKtmKpyVQDVi0lT0NhctZQcxSaIvGqSKCDawWhh6hPR8cQp3Z1KdqQMRa3VEbXzc6DI9TarEy06I1vm", + "iMcyGJ/FyWOIGg/h3VP7P82bl2KLdAM6deSXzOoa5sy36NfI9gefa2AbYQyB0tDStShLDBwX28jy2jgu", + "pFE7Ivaeo1vllUDfm24SAZKGK3fnNZkVYh5wEac9YnatVb1aRwmmGzjDk1fX/kEcj/KTqdE9CiPI3BRP", + "2EYZ61+aNFK75Nbl7LNcSatVWXaVUiSir7ym/SXfnuW5faHU5YLnl/fxXSuVbVZazEN8dd85sJ1J91KL", + "dS/gjMqZH07VS+3QVc4T7WQG2WNxRxd2j8B8f5iDHta5nw0X1l9Xl5mmnzFnknGrNiJPn6k/l7fdqI9c", + "ikUlc5ZRbUXKMoHN8LDHl1XjXIEscohmkDxZHO6MeUbgjczIbtx/UQLvj8uW4BnNyEU5ZC5eisryUVmv", + "BwBCSqHPttZUkDGWxBquolaUKgFN5H1AJ94q6Il0O9jcCHcOlIVbATXwfmwA/IyUD3PKLUeelAu1Dd/v", + "t8nnbgT8x/1U3mEeYy5eFy1paXLyColqRjhCOsX1Xn+oNxj2vpjqFdUUz514w0cAjPtJdWCY5C11LBhL", + "LkooslTtxfNGRzWPXto+NKtfEl0Yz8lzXofSh27sWoNPnEIivu7avyruSEk1zYeaZFnAFiiu41fQimoa", + "ziP7C5RU8rCnDFBVVsIVdNzHfDaXGkVNcQWhr2k6swKgQmtkX0eW8ouK7/Ke4sSvPYs8a6ZgN6lJIcTS", + "TrEDapKkUmcrMzomZupRchBdiaLmHfyZY0WOrhrQHeUEqgZvhCy8I6dO8xON8DoMcBb6p0SZgIn30/jQ", + "0Swojbp9DOign2Rtxk69TLtJxqmKGgMLzlY0hlgi8ZZvmIpfy3GF5JDk2+fWxH0SSkaI/WYLOUo1/r0D", + "hX/xjBgpfNYTpHYJUNCrwHVJaNvXIJlUUYnJa26ap0qbQzH8QBNjIyH9a/oGRuXWm/H2O8twMGZ6ydRG", + "HxK6odObq+d/l5O49yCOjpeiEQM+/G+P/itQt392YAMs5S3dfjrZH4s0+lvMc/E5W9RhoLJU11QzMn6H", + "PodgByXqCyYgL5aL5loOXptzn96zr+oQkb/6hu+Y0viPe3X+s+alWO6QzxD4oRsza+5IyBteySPAe4G6", + "ifeLV/MAWNC2qDAVrVtMHTMabudGiYB2F3ko7qPYhl9CvA3o7ED8M7eOcZp6gZoLd2X3tnOIBb/4kKJl", + "w4v4pY+JIrtl1EPqYNf7f7axcPFUIb9bVfI8VAj1JYq6fAarAAfismvY7A+WHPK1QAJNZeGWaHWIri9u", + "oDI9knWlIhDGyq90wB5UXB1UnrnVMiZqfns1NvaEmU5ayl3vwlSvmwHQcZ3GQ+DHZSs/Df6TOVzHljEF", + "/D8K3kcK1cbwUk3aT4DlTgaOBKykrV6obaZhaQ45mJC62j3ndZu7I6hYhcw1cEMeN+c/+odnm6JUSPcQ", + "Jp/QxqbZjFLAUsiWWQpZ1TbxjsFMpXIXISxW+iNaR0xoY1KCEyavePnjFWgtirGNc6eDSjrGJSKCocP3", + "Tagwmjt1OIAw7RsO4zNbNXrczF3gVISK3DWN5bLguoibC8ly0O7eZ9d8Z25uUWqMA4dsSjySZrpZAyLr", + "EpI2AVLuvFH4lvaeBkB+h4afCQYb9AtOGGtItWPViH1mCMOfwmCz4dusVCuMIhw5ED43LVr46AmoJKrB", + "ST6btu4wjxG/wv5pMC2/Z0RW4axTpth/7n/ErcRn5E9S2L0nn3SU/bBO8rulgxmQKlet8z8Ry/A8piJx", + "ffKVOBo3CJshVCXQHkSbCCP2oa5efGQX0Q3Ch3HHSvDp5c66nhapeF/SDGSoMTB73PvBtK7sPPfuWUNV", + "2kDVQEiZ+2jpIzVtpJ8P99IIeFSb3p/17rSNy4wb55gacfvjo7NKVVk+xeeTKncU3kzgIe3COEIfkRFg", + "ZN2Ne4xpatl08h51itocWyZvtKjOIWtXle979I+piUY4etcEoZbIy6hyO2q3MJKnUabM+zFmXTVYwyQY", + "ZxryWqOa+JrvDpcdG8kYffG3sy8ePf774y++ZK4BK8QKTJt1vFe2q/ULFLKv9/m0noCD5dn0JoTsA4S4", + "YH8MQVXNpvizRtzWtClFB0XLjtEvJy6AxHFMlIu60V7hOK1r/x9ru1KLvPMdS6Hgt98zrcoyXfWhkasS", + "BpTUbkUmFPcCqUAbYaxjhF0LqLCtR7RZo3oQc/9eUTYZJXMI+mNPBcKOuFylFjLmUIv8DGO7vdWIwbYq", + "Pa8iS8++dfl3GmnoUGhEr5gFsEpVXrQXS5aCCCOIdBRZ6xWfqBGPfGQbZkvesilC9J7nadKLC2bv5/bd", + "Yq42zendJibEi3Aob0CaY/aJ8bwFN+EkrWr/D8M/EokY7oxrNMv9LXhF8n1ws6L8k0AbBuUnyAMBGIm2", + "7cRJRoFiUSJiTVYCtCcEA3Jf/HjZGpYPhoUgJKHDAfDi8Nm2XRPJ4MH5nTP6vmyQEi3l/RgldJZ/KCI3", + "sN7mIom2yCtNrAVDbEkNxcIo3No8a6KYR14lg2BnrZRl7mValokgadLj4JmKCcc9CfQVLz891/hWaGPP", + "EB9QvB4PjYojZWMkEyrNzfL0veCT5o6iYu9uavkKA7P/A9weJe85P5Q3wg9uM1TuYMX6VbgVKNabXeOY", + "5GT16Eu28MU2Kg25MH3j/nUQTprAUNBi6R1aYWsPRKIeWufPyt6CjJfBE4f9EJm3Gpu9h7A9or8zUxk5", + "uUkqT1HfgCwS+EvxqLg474Hr4paFGW6W9iVK4HZk2pdh2eGpy6PUJu7SqQ0M1zn5tu7gNnFRt2ubmrNo", + "cn2Hd+/e2sWUVEPpWgyuO+Y6upOiDEeVZPgNshwRjvwYft4Uxfw8lveWcruO5Obu7UctyoMOK51M6x/n", + "sxVIMMJgLvG/+9oxn/YuDRBQ5oXhUSVYb5MuhhCTWGtn8miqKIf6hPTpvlsi5zVGNea1FnaHdYODAk38", + "PZmP6bsmt4fPDdPY0vzdZ9UlNLXb20wgtQm363eKl3gfkYlPultIlSfsG8rw7Q/KX+8t/g0+/8uT4uHn", + "j/5t8ZeHXzzM4ckXXz18yL96wh999fkjePyXL548hEfLL79aPC4eP3m8ePL4yZdffJV//uTR4smXX/3b", + "PceHHMgEaEjt/3T2f7KzcqWys1fn2RsHbIsTXonvwe0NvpWXCutaOqTmeBJhw0U5exp++l/hhJ3katMO", + "H36d+fpMs7W1lXl6enp9fX0SdzldYeh/ZlWdr0/DPFhtsCOvvDpvfPTJDwd3tNUe46Z6UjjDb6+/uXjD", + "zl6dn7QEM3s6e3jy8OSRL20teSVmT2ef4094eta476eYX/PU+NT5p02s1sf54FtVUWJ998nTqP9rDbzE", + "BDvujw1YLfLwSQMvdv7/5pqvVqBPMHqDfrp6fBqkkdMPPnPCx33fTmPPkNMPnQQTxYGejedD0ib5QqlL", + "NIkH+eie6flxnMSVuc8Lh35qic4X5rxlhKG8MtqcZ0/fpnQv3oeyqhelyBld30i/bnMi8mrShrTsAxVt", + "s7a0f8sMHYN7mH31/sMXf/mYErL6gLz0BsHWAuJdcjHKCwMUTgJc/6xB71rA0Fo/i8EYmgvT2dO2llW+", + "8IGf7YT95D0d8CvxlMYj1AeFNYnnQqcRwNwQKbgaLLzHGn/o+ofk8Pjhw3DyvVwdkdWpp9YY3V3bw8Av", + "6Jh0Bp3C1wmhyC0mQ3wMKfYnQymXHDaF5ORVj+62G35JVhd0qGPax816jHofXURyEz/ityUw99+wpNGE", + "oGyaaSiUfBxyy5ETGFxpY8VYKUjt592bUrWrP85nT46khr0Kqk7+0AT4L3npQIYipI0hCB59OgjOJXl8", + "umuHrseP89kXnxIH59IxL14ybBmV301QvLyU6lqGlk6WqTcbrncoqdgpe+yzHKEtMbQjuqeLlbsz/HZG", + "bBkLkVSghXsw8nL2/uOh6+X0Qyi7vv8y6pTc9v7KUYeJl9y+ZqcLLLU2tSmYqPH4UlAFZk4/4Akd/f3U", + "a+LTH1GZRlLaaUjyNdKS0rmkP3ZQ+MFu3UL2D+faROPl3Obrujr9gP9BgStaEWWHPrVbeYrOR6cfOojw", + "nweI6P7edo9bXG1UAQE4tVxSrfp9n08/0L/RRB3CbIWaroDyTdTo2Rryy1n67uulzo96MZJH+aKEgpjT", + "kwkdpLJxpxsd6Ncofhj24/dMLBn0pxAmzHDEuaXEoqdY0XXX4jL8vJN58sfhNneSKo78fBqeQynRttvy", + "Q+fP7pEz69oW6jqaBRWJpAUfQuY+1qb/9+k1FzZbKu1z+WEJ+GFnC7w89YU7er+2ubIHXzABePRjHKWW", + "/PWUe1TPKmUSZPuaX0fWvzNsTBICGPu1whfF2O20zRZCIgXFN1SrP6CPQ9l4cC85uQYd5YIJZpiHB5OB", + "aMWLnBssPe5r4Ayk9Y/JY/eppY2vecFCDpWMtbLHmX+ldpb235IITv/5p5v+AvSVyIG9gU2lNNei3LGf", + "ZBM+c2NG+i0Sp+b5JUroDcGSb6Xm192IHJ3OCdEt8RRShACzW7bmsih9FL2qsXadoyw0marIacddQKHE", + "WaU0AkC5I6EgNwZzwi4aJw90majDI6eAKyhVhTYNzIhMk3B0ACEjYHwRdPn/fLbN3CFegcw8G8kWqtj5", + "mkAzza/tlgLiB7yKhMMRRjYQ3VJfvXQy0ig4e4fPrXIxVtahFqFR0719716xWGveKxha3dPT01OM/lkr", + "Y09n7hHe1UvFH983CAslUmeVFldYygGRprRwb8sy88qbthra7PHJw9nH/x8AAP//7vEvy70JAQA=", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go b/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go index 7e8da1ba91..aee8f09bc9 100644 --- a/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go +++ b/daemon/algod/api/server/v2/generated/nonparticipating/private/routes.go @@ -21,6 +21,9 @@ import ( // ServerInterface represents all server handlers. type ServerInterface interface { + // Gets the merged config file. + // (GET /debug/settings/config) + GetConfig(ctx echo.Context) error // (GET /debug/settings/pprof) GetDebugSettingsProf(ctx echo.Context) error @@ -43,6 +46,17 @@ type ServerInterfaceWrapper struct { Handler ServerInterface } +// GetConfig converts echo context to params. +func (w *ServerInterfaceWrapper) GetConfig(ctx echo.Context) error { + var err error + + ctx.Set(Api_keyScopes, []string{""}) + + // Invoke the callback with all the unmarshalled arguments + err = w.Handler.GetConfig(ctx) + return err +} + // GetDebugSettingsProf converts echo context to params. func (w *ServerInterfaceWrapper) GetDebugSettingsProf(ctx echo.Context) error { var err error @@ -158,6 +172,7 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL Handler: si, } + router.GET(baseURL+"/debug/settings/config", wrapper.GetConfig, m...) router.GET(baseURL+"/debug/settings/pprof", wrapper.GetDebugSettingsProf, m...) router.PUT(baseURL+"/debug/settings/pprof", wrapper.PutDebugSettingsProf, m...) router.DELETE(baseURL+"/v2/catchup/:catchpoint", wrapper.AbortCatchup, m...) @@ -169,231 +184,232 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9f5PbtpLgV0Fpt8qxT5yxHSf74qtXexM7yZuLk7g8Tt7t2r4EIlsS3lAAHwDOSPH5", - "u1+hGyBBEpSomYmT1O5f9oj40Wg0Go3++X6Wq02lJEhrZk/fzyqu+QYsaPyL57mqpc1E4f4qwORaVFYo", - "OXsavjFjtZCr2Xwm3K8Vt+vZfCb5Bto2rv98puGftdBQzJ5aXcN8ZvI1bLgb2O4q17oZaZutVOaHOKMh", - "zp/PPuz5wItCgzFDKH+Q5Y4JmZd1AcxqLg3P3SfDroVdM7sWhvnOTEimJDC1ZHbdacyWAsrCnIRF/rMG", - "vYtW6ScfX9KHFsRMqxKGcD5Tm4WQEKCCBqhmQ5hVrIAlNlpzy9wMDtbQ0CpmgOt8zZZKHwCVgIjhBVlv", - "Zk/fzAzIAjTuVg7iCv+71AC/Qma5XoGdvZunFre0oDMrNomlnXvsazB1aQ3DtrjGlbgCyVyvE/ZdbSxb", - "AOOSvfr6Gfv000+/cAvZcGuh8EQ2uqp29nhN1H32dFZwC+HzkNZ4uVKayyJr2r/6+hnOf+EXOLUVNwbS", - "h+XMfWHnz8cWEDomSEhICyvchw71ux6JQ9H+vICl0jBxT6jxnW5KPP/vuis5t/m6UkLaxL4w/Mroc5KH", - "Rd338bAGgE77ymFKu0HfPMy+ePf+0fzRww//8uYs+0//52effpi4/GfNuAcwkGyY11qDzHfZSgPH07Lm", - "coiPV54ezFrVZcHW/Ao3n2+Q1fu+zPUl1nnFy9rRici1OitXyjDuyaiAJa9Ly8LErJalY1NuNE/tTBhW", - "aXUlCijmjvter0W+Zjk3NAS2Y9eiLB0N1gaKMVpLr27PYfoQo8TBdSN84IL+uMho13UAE7BFbpDlpTKQ", - "WXXgego3DpcFiy+U9q4yx11W7PUaGE7uPtBli7iTjqbLcscs7mvBuGGchatpzsSS7VTNrnFzSnGJ/f1q", - "HNY2zCENN6dzj7rDO4a+ATISyFsoVQKXiLxw7oYok0uxqjUYdr0Gu/Z3ngZTKWmAqcU/ILdu2//3xQ/f", - "M6XZd2AMX8FLnl8ykLkqoDhh50smlY1Iw9MS4tD1HFuHhyt1yf/DKEcTG7OqeH6ZvtFLsRGJVX3Ht2JT", - "b5isNwvQbkvDFWIV02BrLccAohEPkOKGb4eTvta1zHH/22k7spyjNmGqku8QYRu+/evDuQfHMF6WrAJZ", - "CLliditH5Tg392HwMq1qWUwQc6zb0+hiNRXkYimgYM0oeyDx0xyCR8jj4GmFrwicMMgoOM0sB8CRsE3Q", - "jDvd7gur+AoikjlhP3rmhl+tugTZEDpb7PBTpeFKqNo0nUZgxKn3S+BSWcgqDUuRoLELjw7HYKiN58Ab", - "LwPlSlouJBSOOSPQygIxq1GYogn3v3eGt/iCG/j8ydgd336duPtL1d/1vTs+abexUUZHMnF1uq/+wKYl", - "q07/Ce/DeG4jVhn9PNhIsXrtbpulKPEm+ofbv4CG2iAT6CAi3E1GrCS3tYanb+UD9xfL2IXlsuC6cL9s", - "6Kfv6tKKC7FyP5X00wu1EvmFWI0gs4E1+eDCbhv6x42XZsd2m3xXvFDqsq7iBeWdh+tix86fj20yjXks", - "YZ41r9344fF6Gx4jx/aw22YjR4AcxV3FXcNL2Glw0PJ8if9sl0hPfKl/df9UVel622qZQq2jY38lo/rA", - "qxXOqqoUOXdIfOU/u6+OCQA9JHjb4hQv1KfvIxArrSrQVtCgvKqyUuW8zIzlFkf6Vw3L2dPZv5y2+pdT", - "6m5Oo8lfuF4X2MmJrCQGZbyqjhjjpRN9zB5m4Rg0fkI2QWwPhSYhaRMdKQnHgku44tKetE+WDj9oDvAb", - "P1OLb5J2CN+9J9gowhk1XIAhCZga3jMsQj1DtDJEKwqkq1Itmh8+OauqFoP4/ayqCB8oPYJAwQy2wlhz", - "H5fP25MUz3P+/IR9E4+NoriS5c5dDiRquLth6W8tf4s1uiW/hnbEe4bhdip94rYmoMGJ+XdBcfisWKvS", - "ST0HacU1/ptvG5OZ+31S5z8HicW4HScufGh5zNEbB3+JHjef9ChnSDhe3XPCzvp9b0Y2bpQ9BGPOWyze", - "NfHgL8LCxhykhAiiiJr89nCt+W7mhcQMhb0hmfxogCik4ishEdq5ez5JtuGXtB8K8e4IAUzzLiJaIgmy", - "UaF6mdOj/mSgZ/kTUGtqY4Mk6iTVUhiL72pszNZQouDMZSDomFRuRBkTNnzPIhqYrzWviJb9FxK7hMT3", - "PDUiWG958U68E5MwR+w+2miE6sZs+SDrTEKCXKMHw5elyi//xs36Dk74Iow1pH2chq2BF6DZmpt14uD0", - "aLsdbQp9u4ZIs2wRTXXSLPGFWpk7WGKpjmFdVfWMl6WbesiyeqvFgScd5LJkrjGDjUCFuX84koad3l/s", - "K56vnVjAcl6W81ZVpKqshCso3aNdSAl6zuya2/bw48jhXYPnyIBjdhZYtBqvZkIVm250ERrYhuMNtHGv", - "mars9mk4qOEb6ElBeCOqGrUI0UPj/HlYHVyBRJ7UDI3gN2tEbU08+Imb23/CmaWixZEG0AbzXYO/hl90", - "gHat2/tUtlMoXZDO2rrfhGa50jQE3fB+cvcf4LrtTNT5SaUh80NofgXa8NKtrreo+w353tXpPHAyC255", - "dDI9FaYfYMQ5sB+Kd6ATWpof8D+8ZO6zk2IcJbXUI1AYUZE5taCL2aGKZnINUN+q2IZUmazi+eVRUD5r", - "J0+zmUkn7yvSnvot9Itoduj1VhTmrrYJBxvbq+4JId1VYEcDWWQv04nmmoKA16pixD56IBCnwNEIIWp7", - "59fal2qbgulLtR1caWoLd7ITbpzJzP5LtX3uIVP6MOZx7ClIdwuUfAMGbzcZM043S2uXO1sofTNponfB", - "SNZaGxl3o0bC1LyHJGxaV5k/mwmLBTXoDdQ6eOwXAvrDpzDWwcKF5b8BFowb9S6w0B3orrGgNpUo4Q5I", - "f50U4hbcwKeP2cXfzj579Pjnx5997kiy0mql+YYtdhYM+8Sr5ZixuxLuJ19HKF2kR//8SbBRdcdNjWNU", - "rXPY8Go4FNm+6PVLzZhrN8RaF8246gbASRwR3NVGaGdk1nWgPYdFvboAa91L96VWyzvnhoMZUtBho5eV", - "doKF6doJvbR0Wrgmp7C1mp9W2BJkQX4Gbh3CuDfgZnEnRDW28UU7S8E8Rgs4eCiO3aZ2ml28VXqn67tQ", - "b4DWSiev4Eorq3JVZk7OEyqhoHjpWzDfImxX1f+doGXX3DA3N1ova1mM6CHsVk6/v2jo11vZ4mbvDUbr", - "TazOzztlX7rIb18hFejMbiVD6uyoR5ZabRhnBXZEWeMbsCR/iQ1cWL6pflgu70bbqXCghB5HbMC4mRi1", - "cNKPgVxJcuY7oLLxo05BTx8xwcpkxwHwGLnYyRxNZXdxbMe1WRsh0W5vdjKPVFsOxhKKVYcsb6/CGkMH", - "TXXPJMBx6HiBn1FX/xxKy79W+nUrvn6jVV3dOXvuzzl1OdwvxlsDCtc3qIGFXJVdB9KVg/0ktcbfZUHP", - "GiUCrQGhR4p8IVZrG70XX2r1G9yJyVlSgOIHUhaVrs9QZfS9KhwzsbW5A1GyHazlcI5uY77GF6q2jDOp", - "CsDNr01ayBxxOURfJ3TRsrHcivoJYdgCHHXlvHarrSuGDkiD+6LtmPGcTmiGqDEj7heN3wy1ounIna3U", - "wIsdWwBIphbex8F7X+AiOXpP2SCmeRE3wS86cFVa5WAMFJlXRR8ELbSjq8PuwRMCjgA3szCj2JLrWwN7", - "eXUQzkvYZejrZ9gn3/5k7v8O8FpleXkAsdgmhd6+Pm0I9bTp9xFcf/KY7EhTR1TrxFvHIEqwMIbCo3Ay", - "un99iAa7eHu0XIFGl5LflOLDJLcjoAbU35jebwttXY14sPtnupPw3IZJLlUQrFKDldzY7BBbdo06ugS3", - "gogTpjgxDjwieL3gxpIblJAF6jTpOsF5SAhzU4wDPPoMcSP/FF4gw7Fzdw9KU5vmOWLqqlLaQpFaA1pk", - "R+f6HrbNXGoZjd28eaxitYFDI49hKRrfI8u/gPEPbhv7q7foDheHNnV3z++SqOwA0SJiHyAXoVWE3diL", - "dwQQYVpEE+EI06OcxnV4PjNWVZXjFjarZdNvDE0X1PrM/ti2HRIXGTno3i4UGDSg+PYe8mvCLPlvr7lh", - "Ho5gYkd1DvlrDWF2hzEzQuaQ7aN8fOK5VvEROHhI62qleQFZASXfJZwD6DOjz/sGwB1vn7vKQkaOuOlN", - "byk5+D3uGVrheCYlPDL8wnJ3BN1ToCUQ3/vAyAXg2Cnm5OnoXjMUzpXcojAeLpu2OjEi3oZXyrod9/SA", - "IHuOPgXgETw0Q98cFdg5a9+e/Sn+A4yfoJEjjp9kB2ZsCe34Ry1gRBfsY5yi89Jj7z0OnGSbo2zsAB8Z", - "O7IjiumXXFuRiwrfOt/C7s6ffv0JkoZzVoDlooSCRR/oGVjF/Rm5kPbHvNlTcJLubQj+QPmWWE5w0+kC", - "fwk7fHO/pNiESNVxF2/ZxKjufuKSIaDB49mJ4HET2PLcljsnqNk17Ng1aGCmXpALw9CeYlWVxQMk7TN7", - "ZvTW2aRtdK+5+AKHipaX8jWjN8F++F73HgYddPi3QKVUOUFDNkBGEoJJviOsUm7XhQ9/CgEwgZI6QHqm", - "jab55vq/ZzpoxhWw/1A1y7nEJ1dtoZFplEZBAQVIN4MTwZo5vXNiiyEoYQP0ksQvDx70F/7ggd9zYdgS", - "rkPMoGvYR8eDB6jHeamM7RyuO9CHuuN2nrg+0HDlLj7/CunzlMMeT37kKTv5sjd4Y+1yZ8oYT7hu+bdm", - "AL2TuZ2y9phGpnl74biTbDld/6DBunHfL8SmLrm9C6sVXPEyU1egtSjgICf3Ewslv7ri5Q9NN4yHhNzR", - "aA5ZjlF8E8eC164PBf65cYQU7gCT0/9UgOCcel1QpwNPzNZTVWw2UAhuodyxSkMOFO/mJEfTLPWEkSd8", - "vuZyhQ8GreqVd26lcZDh14ZUM7qWgyGSQpXdygyV3KkLwLuphZBHJ04Bd0+6voacHjDXvJnPR7lOuZmj", - "PehbDJJGsvls9MXrkHrVvngJOd24zQmXQUfei/DTTjzRlIKoc7LPEF/xtrjD5Db3t1HZt0OnoBxOHHn8", - "th/HnH7dc7vc3YHQQwMxDZUGg1dUrKYy9FUt4xjt4Cq4MxY2Q00+df155Pi9Gn0vKlkKCdlGSdgl05II", - "Cd/hx+RxwmtypDMKLGN9+2+QDvw9sLrzTKHG2+IXd7t/QvsWK/O10ndlEqUBJ4v3EyyQB83tfsqb2kl5", - "WSZMiz6Cs88AzLxx1hWacWNULlBmOy/M3HsFkzXSh3t20f+yiUu5g7PXH7dnQ4uTA6COGMqKcZaXAjXI", - "Shqr69y+lRx1VNFSE05c4TE+rrV8Fpqk1aQJLaYf6q3k6MDXaK6SDhtLSKhpvgYIyktTr1ZgbO+tswR4", - "K30rIVkthcW5Nu64ZHReKtDoSXVCLTd8x5aOJqxiv4JWbFHbrvSPAcrGirL0Bj03DVPLt5JbVgI3ln0n", - "5OstDheM/uHISrDXSl82WEjf7iuQYITJ0s5m39BX9Ov3y197H390d6fPwem0zZgwc8vsJEn5v5/8+9M3", - "Z9l/8uzXh9kX/+P03fsnH+4/GPz4+MNf//r/uj99+uGv9//9X1M7FWBPhc96yM+f+5fx+XN8/kSu+n3Y", - "P5r+fyNkliSy2JujR1vsE0wV4Qnoflc5ZtfwVtqtdIR0xUtRON5yE3Lo3zCDs0ino0c1nY3oKcPCWo98", - "VNyCy7AEk+mxxhtLUUP/zHSgOholfew5npdlLWkrg/RNcZjBv0wt500yAspT9pRhpPqaBydP/+fjzz6f", - "zdsI8+b7bD7zX98lKFkU21QegQK2qbdiHCRxz7CK7wzYNPdA2JOudOTbEQ+7gc0CtFmL6uNzCmPFIs3h", - "QsiS1zlt5bkkB393ftDEufOWE7X8+HBbDVBAZdep/EUdQQ1btbsJ0HM7qbS6Ajln4gRO+jqfwr0XvVNf", - "CXwZHFO1UlNeQ805IEILVBFhPV7IJMVKin564Q3+8jd3/hzyA6fg6s+Z8ui9981Xr9mpZ5jmHqW0oKGj", - "JASJp7QPnuw4JDluFseUvZVv5XNYovZByadvZcEtP11wI3JzWhvQX/KSyxxOVoo9DfGYz7nlb+VA0hpN", - "rBgFTbOqXpQiZ5fxg6QlT0qWNRzh7ds3vFypt2/fDXwzhs8HP1WSv9AEmROEVW0zn+on03DNdcr2ZZpU", - "Lzgy5fLaNysJ2aomBWlIJeTHT/M8XlWmn/JhuPyqKt3yIzI0PqGB2zJmrGri0ZyA4kN63f5+r/zFoPl1", - "0KvUBgz7ZcOrN0Ladyx7Wz98+ClG9rU5EH7xV76jyV0Fk7Uroykp+koVXDg9K9FXPav4KmVie/v2jQVe", - "4e6jvLxBHUdZMuzWiToMAQY4VLuAJsR5dAMIjqODg3FxF9QrpHVMLwE/4RZ2A7BvtV9R/PyNt+tADD6v", - "7TpzZzu5KuNIPOxMk+1t5YSs4I1hxApfqz4x3gJYvob80mcsg01ld/NO9+Dw4wXNwDqEoVx2FGGI2ZTQ", - "QLEAVlcF96I4l7t+WhtDERU46Cu4hN1r1SZjOiaPTTetihk7qEipkXTpiDU+tn6M/uZ7r7IQaOqzk2Dw", - "ZiCLpw1dhD7jB5lE3js4xCmi6KT9GEME1wlEEPGPoOAGC3Xj3Yr0U8sTMgdpxRVkUIqVWKTS8P59aA8L", - "sDqq9JkHvRdyM6BhYsncU35BF6t/3msuV+CuZ3elKsNLyqqadNrA99AauLYL4Havnl/GCSkCdPikvMbI", - "a9Twzd0SYOv2W1jU2Em4dq8KVBRRG++9fDLuf0aAQ3FDeEL39qVwMvrW9ahLZBwMt3KD3eZZ613zYjpD", - "uOj7BjBlqbp2++KgUD7bJiV1ie6X2vAVjLxdYuvdxHwYHYsfDnJIIknKIGrZFzUGkkASZGqcuTUnzzC4", - "L+4Q4zOz55AZZiIDsbcZYRJtj7BFiQJs47lKe891x4pKWYHHQEuzFtCyFQUDGF2MxMdxzU04jpgvNXDZ", - "SdLZb5j2ZV9quvPIlzBKitokngu3YZ+DDt79PkFdyEoXUtHFj/4JaeXc2wvDF1LboSSKpgWUsKKFU+NA", - "KG3CpHaDHBw/LJfIW7KUW2KkoI4EAD8HuJfLA8bINsImj5Ai4whsdHzAgdn3Kj6bcnUMkNInfOJhbLwi", - "or8hHdhHjvpOGFWVu1zFiL0xDxzAp6JoJYueRzUOw4ScM8fmrnjp2Jx/i7eDDDKk4YOilw/Nu97cH3to", - "7DFN0ZV/1JpISLjJamJpNgCdFrX3QLxQ24wilJNvkcV24eg9GbuA8dKpg0m56O4ZtlBbdOfCq4V85Q/A", - "Mg5HACPSvWyFQXrFfmNyFgGzb9r9cm6KCg2SjFe0NuQyJuhNmXpEthwjl0+i9HI3AqCnhmprNXi1xEH1", - "QVc8GV7m7a02b9OmhrCw1PEfO0LJXRrB31A/1k0I97c28d94crFwoj5KJryhZuk2GQqpc0VZB49JUNgn", - "hw4Qe7D6si8HJtHa9fXq4jXCWoqVOOY7NEoO0WagBHwEZx3RNLtMeQq4tzzgPX4RukXKOtw9Lnf3IwdC", - "DSthLLRGo+AX9Huo4zmmT1ZqOb46W+mlW98rpZrLn8zm2LGzzI++AvTAXwptbIYWt+QSXKOvDSqRvnZN", - "0xJo10WRig2IIs1xcdpL2GWFKOs0vfp5v33upv2+uWhMvcBbTEhy0FpgcYyk4/Keqcm3fe+CX9CCX/A7", - "W++00+Cauom1I5fuHH+Sc9FjYPvYQYIAU8Qx3LVRlO5hkFHA+ZA7RtJo5NNyss/aMDhMRRj7oJdaCHsf", - "u/lppORaojSA6QhBtVpBEdKbBXuYjJLIlUquoipOVbUvZ94Jo9R1mHluT9I674YPY074kbifCVnANg19", - "/CpAyNvIOky4h5OsQFK6krRaKIma2MUfW0S6uo9sC+0HACSdoF/3jNmtdzLtUrOduAEl8MK/SQyE9e0/", - "lsMN8aibj7lPdzKf7j9COCDSlLBRYZNhGoIRBsyrShTbnuGJRh1VgvGjtMsj0hayFj/YAQx0naCTBNdJ", - "pe1drb2C/RTfvKfuVUa+196x2NE3z30AflFrtGB0PJuHedubt9rEtX/704VVmq/AW6EyAulWQ+ByjkFD", - "lBXdMCvInaQQyyXE1hdzE8tBB7iBjr2YQLoJIkubaGoh7edPUmR0gHpaGA+jLE0xCVoYs8m/Hlq5gkwf", - "qZKaKyHamhuYqpLh+t/CLvuJl7V7ZAhtWvdcb3bqXr5H7PrV5lvY4cgHvV4dYAd2BTVPrwBpMKXpbz6Z", - "KIH1PdNJ8Y/Py84WHrFTZ+lduqOt8UUZxom/vWU6RQu6S7nNwWidJBwsU3bjIu2b4E4PdBHfJ+VDmyCK", - "wzJIJO/HUwkTSlgOr6ImF8Uh2n0NvAzEi8uZfZjPbucJkLrN/IgHcP2yuUCTeEZPU7IMdxx7jkQ5ryqt", - "rniZeX+Jsctfqyt/+WPz4F7xkV8yacp+/dXZi5ce/A/zWV4C11mjCRhdFbar/jSrojIO+68SyvbtFZ2k", - "KYo2v8nIHPtYXGNm756yaVAUpfWfiY6i97lYph3eD/I+7+pDS9zj8gNV4/HT2jzJ4afr5MOvuCiDsTFA", - "O+KcjoubVlknyRXiAW7tLBT5fGV3ym4Gpzt9OlrqOsCTcK4fMDVl+sUhfeJKZEXe+YffufT0tdId5u8j", - "E5POQ7+dWOWEbMLjiK92qF/ZF6ZOGAlev6x+cafxwYP4qD14MGe/lP5DBCD+vvC/4/viwYOk9TCpxnJM", - "ArVUkm/gfhNlMboRH/cBLuF62gV9drVpJEs1ToYNhZIXUED3tcfetRYen4X/pYAS3E8nUx7p8aYTumNg", - "ppygi7FIxMbJdEMlMw1Tsu9TjUGwjrSQ2fuSDGSMHR4hWW/QgJmZUuRp1w65MI69SnKmdI0ZNh7R1roR", - "azHimytrEY3lmk3JmdoDMpojiUyTTNva4m6h/PGupfhnDUwU7lWzFKDxXutddeFxgKMOBNK0XswPTHaq", - "dvjb6EH22JuCLmifEmSv/e55Y1MKC00V/TnSAzyeccC493hve/rw1EzRbOuuC+a0d8yU0umB0Xlj3cgc", - "yVLowmRLrX6FtCEE7UeJRBjB8ClQzfsryJTnXp+lNEbltqJ7O/uh7Z7+Nh7b+Fu/hcOim6pjN7lM06f6", - "uI28yaPXpNM1eySPPcJiD4NuaMAIa8HjFTnDYhmU4H3EJZ0nygLRiTBLn8o4lvOUxm9PpYd5EP9a8usF", - "T9WIcW8hB1O0vR0/KatY6Bw2wDQ5Dmh2FnlwN20FZZKrQLc2iGFW2hu+a2jayS+a9gGDFBU/XebkplAa", - "lRimltdcUhVx14/4le9tgEzwrte10pgH0qRdugrIxSapjn379k2RD913CrESVCC7NhBVYPYDMUo2iVTk", - "q1g3mTs8as6X7OE8KgPvd6MQV8KIRQnY4hG1WHCD12VjDm+6uOWBtGuDzR9PaL6uZaGhsGtDiDWKNW9P", - "FPIax8QF2GsAyR5iu0dfsE/QJdOIK7jvsOiFoNnTR1+gQw398TB1y/oC5/tYdoE8Ozhrp+kYfVJpDMck", - "/ahp7+ulBvgVxm+HPaeJuk45S9jSXyiHz9KGS76CdHzG5gBM1Bd3E835PbxIsgaAsVrtmLDp+cFyx59G", - "Yr4d+yMwWK42G2E33nHPqI2jp7a8Mk0ahqNa/75eVIArfET/1yq4//V0XR/5GcM3IzFb6KX8PdpoY7TO", - "Gafkn6VoPdNDvU52HnILYwGtpm4W4cbN5ZaOsiQ6qi9ZpYW0qP+o7TL7i3sWa5479ncyBm62+PxJohBV", - "t1aLPA7wj453DQb0VRr1eoTsg8zi+7JPpJLZxnGU4n6bYyE6laOOummXzDG/0P1DT5V83SjZKLnVHXLj", - "Eae+FeHJPQPekhSb9RxFj0ev7KNTZq3T5MFrt0M/vnrhpYyN0qmCAe1x9xKHBqsFXGHEXHqT3Ji33Atd", - "TtqF20D/+/o/BZEzEsvCWU4+BCKL5r5geSfF//Rdm/kcDasUidjTASqd0HZ6vd1H9jY8TuvWt9+Swxh+", - "G8HcZLThKEOsjHjfk3t90+f38Bfqg0R73lE4PvqFafcGRzn+wQME+sGDuReDf3nc/Uzs/cGDdALipMrN", - "/dpi4TYvYuyb2sMvVUIBFqoWNg5FPj9CQgE5dkm5D44JLvxQc9atEPfxpYi7ie9Ke5umT8Hbt2/wS8AD", - "/tFHxO/MLHED2yiF8cPerZCZJJmi+R75uXP2pdpOJZzeHRSI5w+AohGUTFTP4UoGFUCT5vqD/iIRjbpR", - "F1Aq98iMiwLF+vw/D57d4ud7sF2Lsvipze3Wu0g0l/k66SW8cB1/Jhm9cwUTq0zWGVlzKaFMDkdv25/D", - "GzjxSv+HmjrPRsiJbfsVaGm5vcW1gHfBDECFCR16hS3dBDFWu2mzmrQM5UoVDOdpi1q0zHFYyjlVQjMR", - "34zDbmrr/VYxFtwnHFqKEt0w03ZjbJlpbkcSaGG981BfyI2D5ccNqRlodNCMiw1ezIZvqhLwZF6B5ivs", - "qiT0umMKNRw5qljBTOU+YUtMWKGYrbVkarmMlgHSCg3lbs4qbgwN8tAtC7Y49+zpo4cPk2ovxM6ElRIW", - "wzJ/aJfy6BSb0BdfZIlKARwF7GFYP7QUdczGDgnH15T8Zw3GpngqfqDIVbSSulub6kk2tU9P2DeY+cgR", - "cSfVPaorQxLhbkLNuioVL+aY3Pj1V2cvGM1KfaiEPNWzXKG2rkv+SfPK9ASjIbPTSOac6ePsT+XhVm1s", - "1pSfTOUmdC3aApmi53ODerwYOyfsOalQmwL+NAnDFNl6A0VU7ZIe8Ugc7j/W8nyNusmOBDTOK6cXYg3s", - "rLXcRNGHTfUjZNgObl+LlUqxzpmya9DXwgBG5MMVdNMhNrlBvW48pEfsLk/XUhKlnBwhjDa1jo5FewCO", - "JNngVJCErIf4IzVTVI/52Lq0F9grHYvRK3Lbs/qH5HohxTb7zhsXci6VFDmWQkhJ0pi6bZqZckLViLR9", - "0cz8CU0crmRp3SYW2GNxtNhuYIQecUOTf/TVbSpRB/1pYetLrq3AGs/ZoJiHStfeICakAV/NyhFRzCeV", - "Tjg1JQMhGgeKI8kIszKNaDi/dt++9/pvTIpxKSRqujza/PuMTFalEWiZlkxYtlJg/Hq60Tzmjetzglka", - "C9i+O3mhViK/ECscg9zo3LLJZ3Q41FnwIPUem67tM9fW585vfu64g9GkZ1XlJx2vg54UJO1WjiI45bcU", - "HEki5Dbjx6PtIbe9rt94nzpCgyv0WoMK7+EBYTS1tLujfOXelkRR2IJRRGUyga6QCTBeCBlMqOkLIk9e", - "CbgxeF5H+plcc0tvh0k87TXwciQAAiOUyQZ/26H6lQMcSnCNYY7xbWzLgI8wjqZBK/FzuWPhUDjqjoSJ", - "Z7xsXKcTRb1RqvJCVIHBRb0y3ynG4Rh3FkImO+g6GL7XdMdqHMfeRGM5Chd1sQKb8aJIpbb6Er8y/BqC", - "xGALed0UoWqiA7s5yofU5ifKlTT1Zs9cocEtp4vq5ieoIa7dH3YYM+0sdvhvqgLT+M54p+mjo3KDh3Rx", - "XGL+YZRxSup1NJ0ZscqmYwLvlNujo536ZoTe9r9TSg/hun+IaNwel4v3KMXfvnIXR5y4d+CfTldLk1cX", - "fcEVfg8Jj5qMkF2uhFfZoM4Yej3g5iW2rAd8aJgE/IqXI5Hwsa2E7leyH4zFw+ej6Ru49em5LGd7WdBo", - "yiPyFe5ZX4YmxDH/YHIPvjurhV/rXoSO2+6+7VjqyEesZRajFrqbGdHaDT7Wivbt1ViKhFCnA7/H9UC8", - "F8/cp4GHK6Hq4H0VfKDDk5B+9Sl4OnU/RtafjCz4va0WozaW175+LS3Tv8m//YmssAyk1bs/gMVlsOn9", - "ojIJaZfUU20T1pQ+nFQKsXMrTqlhkyqX4mXDoCsj1tKhpUH5mQFZPZ8iDgzw8WE+Oy+OujBTJXdmNErq", - "2L0Qq7XFjP1/A16AfnmgIkFbhQCPWKWMaCuQlm4wnwJ2jcOdTA02cAQs4ooKw7GCE+oV5BbLzrbOdRrg", - "mPoKbrJg9PnvygTjz+kmJsMXJNhXhWBYa/bAHT9InBQl/6I6nSfTc+6fNS7UFAF2zU2brqUXMz05cnO5", - "hByzIu9NVPX3NcgoCdI86GUQlmWUt0o0cUyY1/t4rWML0L48Unvhierr3BqcsTj2S9jdM6xDDcnCoU0Q", - "300SByMGyAQWckiPKZK915gwDWUgFoJLsE/F3BbHGM35HKVdu+FcgSTdxdGmYtszZbro+aS5XNej0j5i", - "SM5YLqthzeTx98dzLFFtvIMcbxIPx690dj4snHPtExdjWrHGdhJSGIMJv4UcgjRLKS59/QDEClmqrrku", - "Qos7SQpFd5NIA71sZhZtAMfQySFRigFjofJSOTEiGwso68ZMNA6H9wx5hrYJfBCuJWgNRWMSKZWBzKoQ", - "8LEPjn2oIPfXGyHBjJY/IuBGU1+/anN7Yxk4jqmuufd6jRfINGy4g05HGbjH59yH7Gf0PQThhzJgBzVM", - "Db0erkcbQneEGSAxpvol87fl4eD+myibhJSgs2B56qfjlt2MbJh3s6hzuqDjg9Eo5CbnztnDSpJ6mny4", - "yt4bIQqSv4TdKT2CQiHfsIMx0CQ5EehRwtHeJt+p+s2k4F7dCXi/bx65SqkyGzF2nA9ziPcp/lLkl4A5", - "ABsX95Ea7ewT1LE31uzr9S7kzK4qkFDcP2HsTFJQUTBsd8sL9iaX9+y++bc4a1FTWn+vVDt5K9PRGZhw", - "X9+Sm4Vh9vMwA47V3XIqGuRAhuqtHHO5ucbk/N0qnidTX+VDU3O/inxLVARFSia5IIvVMzzoKcURpkCI", - "cnWgIZMzb+liplQpX96bpGlwQ6UxFU+GAFmQU7IFNFD4wZMISNZFT5xCSn3nk96pJdPQGpFvmv1vWMI9", - "9aLvz9zM0uV3S6WhU4zd9aZMn03gC6bRxP8shNVc726So29QQn6gPRnF8kF3rMYTq11I6401xGFZqusM", - "mVXW1LlIPW1dO9O9jEPRtbafO9ULiPy6uPGC2o6tecFypTXkcY90vCdBtVEaslKhm1fKAr20Tu7eYJCX", - "ZKVaMVXlqgCqF5OmoLG5aik5ik0QedUkUUC0g9HC1Cei44lTujuV7EgZilqrI2rn50CR621WJ1p0RrbM", - "EY9lMD6Lk8cQNR7Cu6f2f5o3L8UW6QZ06sgvmdU1zJlv0a+R7Q8+18A2whgCpaGla1GWGDgutpHltXFc", - "SKN2ROw9R7fKK4G+N90kAiQNV+7OazIrxDzgIk57xOxaq3q1jhJMN3CGJ6+u/YM4HuVHU6N7FEaQuSme", - "sI0y1r80aaR2ya3L2Se5klarsuwqpUhEX3lN+3d8e5bn9oVSlwueX97Hd61UtllpMQ/x1X3nwHYm3Ust", - "1r2AMypnfjhVL7VDVzlPtJMZZI/FHV3YPQLz3WEOeljnfjZcWH9dXWaafsacScat2og8fab+XN52oz5y", - "KRaVzFlGtRUpywQ2w8MeX1aNcwWyyCGaQfJkcbgz5hmBNzIju3H/RQm8Py5bgmc0IxflkLl4KSrLR2W9", - "HgAIKYU+21pTQcZYEmu4ilpRqgQ0kfcBnXiroCfS7WBzI9w5UBZuBdTA+7EB8BNSPswptxx5Ui7UNny/", - "3yafuxHwH/ZTeYd5jLl4XbSkpcnJKySqGeEI6RTXe/2hXmPY+2KqV1RTPHfiDR8BMO4n1YFhkrfUsWAs", - "uSihyFK1F88bHdU8emn70Kx+SXRhPCfPeR1KH7qxaw0+cQqJ+Lpr/6q4IyXVNB9qkmUBW6C4jl9BK6pp", - "OI/sL1BSycOeMkBVWQlX0HEf89lcahQ1xRWEvqbpzAqACq2RfR1Zyi8qvst7ihO/9izyrJmC3aQmhRBL", - "O8UOqEmSSp2tzOiYmKlHyUF0JYqad/BnjhU5umpAd5QTqBq8EbLwjpw6zY80wqswwFnonxJlAibeTeND", - "R7OgNOr2MaCDfpK1GTv1Mu0mGacqagwsOFvRGGKJxFu+YSp+LccVkkOSb59bE/dJKBkh9qst5CjV+PcO", - "FP7FM2Kk8FlPkNolQEGvAtcloW1fg2RSRSUmr7lpniptDsXwA02MjYT0r+kbGJVbb8bb7yzDwZjpJVMb", - "fUjohk5vrp7/XU7i3oM4Ol6KRgz48L89+q9A3f7ZgQ2wlLd0++lkfyzS6G8xz8XnbFGHgcpSXVPNyPgd", - "+hyCHZSoL5iAvFgumms5eG3OfXrPvqpDRP7qG75jSuM/7tX5z5qXYrlDPkPgh27MrLkjIW94JY8A7wXq", - "Jt4vXs0DYEHbosJUtG4xdcxouJ0bJQLaXeShuI9iG34J8TagswPxz9w6xmnqBWou3JXd284hFvziQ4qW", - "DS/ilz4miuyWUQ+pg13v/9nGwsVThfxuVcnzUCHUlyjq8hmsAhyIy65hsz9YcsjXAgk0lYVbotUhur64", - "gcr0SNaVikAYK7/SAXtQcXVQeeZWy5io+e3V2NgTZjppKXe9C1O9bgZAx3UaD4Efl638OPhP5nAdW8YU", - "8P8oeB8pVBvDSzVpPwKWOxk4ErCStnqhtpmGpTnkYELqavec123ujqBiFTLXwA153Jz/4B+ebYpSId1D", - "mHxCG5tmM0oBSyFbZilkVdvEOwYzlcpdhLBY6Y9oHTGhjUkJTpi84uUPV6C1KMY2zp0OKukYl4gIhg7f", - "N6HCaO7U4QDCtG84jM9s1ehxM3eBUxEqctc0lsuC6yJuLiTLQbt7n13znbm5RakxDhyyKfFImulmDYis", - "S0jaBEi580bhW9p7GgD5HRp+Jhhs0C84Yawh1Y5VI/aZIQx/CoPNhm+zUq0winDkQPjctGjhoyegkqgG", - "J/ls2rrDPEb8CvunwbT8nhFZhbNOmWL/uf8BtxKfkT9KYfeefNJR9sM6ye+WDmZAqly1zv9ELMPzmIrE", - "9clX4mjcIGyGUJVAexBtIozYh7p68ZFdRDcIH8YdK8Gnlzvrelqk4n1JM5ChxsDsce8H07qy89y7Zw1V", - "aQNVAyFl7qOlj9S0kX4+3Esj4FFten/Wu9M2LjNunGNqxO2Pj84qVWX5FJ9PqtxReDOBh7QL4wh9REaA", - "kXU37jGmqWXTyXvUKWpzbJm80aI6h6xdVb7v0T+mJhrh6F0ThFoiL6PK7ajdwkieRpky78eYddVgDZNg", - "nGnIa41q4mu+O1x2bCRj9MXfzj579Pjnx599zlwDVogVmDbreK9sV+sXKGRf7/NxPQEHy7PpTQjZBwhx", - "wf4YgqqaTfFnjbitaVOKDoqWHaNfTlwAieOYKBd1o73CcVrX/j/WdqUWeec7lkLBb79nWpVluupDI1cl", - "DCip3YpMKO4FUoE2wljHCLsWUGFbj2izRvUg5v69omwySuYQ9MeeCoQdcblKLWTMoRb5GcZ2e6sRg21V", - "el5Flp596/LvNNLQodCIXjELYJWqvGgvliwFEUYQ6Siy1is+USMe+cg2zJa8ZVOE6D3P06QXF8zez+27", - "xVxtmtO7TUyIF+FQ3oA0x+wT43kLbsJJWtX+H4Z/JBIx3BnXaJb7W/CK5PvgZkX5J4E2DMpPkAcCMBJt", - "24mTjALFokTEmqwEaE8IBuS++PFda1g+GBaCkIQOB8CLw2fbdk0kgwfnd87o+12DlGgp78YoobP8QxG5", - "gfU2F0m0RV5pYi0YYktqKBZG4dbmWRPFPPIqGQQ7a6Uscy/TskwESZMeB89UTDjuSaCvePnxucbXQht7", - "hviA4tV4aFQcKRsjmVBpbpan7wWfNHcUFXt3U8uXGJj9d3B7lLzn/FDeCD+4zVC5gxXrV+FWoFhvdo1j", - "kpPVo8/ZwhfbqDTkwvSN+9dBOGkCQ0GLpXdoha09EIl6aJ0/KXsLMl4GTxz2fWTeamz2HsL2iP7OTGXk", - "5CapPEV9A7JI4C/Fo+LivAeui1sWZrhZ2pcogduRaV+GZYenLo9Sm7hLpzYwXOfk27qD28RF3a5tas6i", - "yfUd3r59YxdTUg2lazG47pjr6E6KMhxVkuE3yHJEOPJj+HlTFPPTWN5byu06kpu7tx+1KA86rHQyrX+Y", - "z1YgwQiDucR/9rVjPu5dGiCgzAvDo0qw3iZdDCEmsdbO5NFUUQ71CenTfbdEzmuMasxrLewO6wYHBZr4", - "OZmP6Zsmt4fPDdPY0vzdZ9UlNLXb20wgtQm36zeKl3gfkYlPultIlSfsK8rw7Q/KX+8t/g0+/cuT4uGn", - "j/5t8ZeHnz3M4clnXzx8yL94wh998ekjePyXz548hEfLz79YPC4eP3m8ePL4yeeffZF/+uTR4snnX/zb", - "PceHHMgEaEjt/3T2f7KzcqWys5fn2WsHbIsTXolvwe0NvpWXCutaOqTmeBJhw0U5exp++l/hhJ3katMO", - "H36d+fpMs7W1lXl6enp9fX0SdzldYeh/ZlWdr0/DPFhtsCOvvDxvfPTJDwd3tNUe46Z6UjjDb6++unjN", - "zl6en7QEM3s6e3jy8OSRL20teSVmT2ef4k94eta476eYX/PU+NT5p1Xlk+cnzXavfMWlLsWFzghsk33d", - "7TYlZfcp8E1c1/q8QNqyw8T9WH8N3bIQwMcPH4Zd8TJPdPWcYhzI0/ezaVXuh5Phzvdzaizq1UsHc0jl", - "0iT388YJjzO0XxLCmv2iJzBfGdSsa3HFLczefZjPqjqBzq8wyMPsw9k8SghP0KiyaDA+wOjL+r8IRj/M", - "Z6eeT86evnd/rYGXmOTJ/bFxhJqHTxp4sfP/N9d8tQJ94tfpfrp6fBok4tP3PnvHh33fTmPvpNP3nSQn", - "xYGewfvmUJPT96F88/4BO6V7vd9j1GEioPuanS6wZNPUphCvbnwpSPPm9D0+Bkd/P/UavfRHfJQTtz8N", - "yYJGWlJaiPTHDgrf261byP7hXJtovJzbfF1Xp+/xP0i2H+i0l5DKKkTlIjhrm8+ZsIwvlMZqwDZfO24Q", - "ypAKE7UcHPkz1+sZQRCquqOry+zpm2EsEg7Ewkh4Xbq7oL3NOjO1Aguq9iOm0IhjnfatUPbmYfbFu/eP", - "5o8efvgXJ3T5Pz/79MNET+5nzbjsopGoJjZ8d0uON9AftIukTWoY2FDg9bQwHmvit6o3EGuQcaDWYG/4", - "odyODPjJHfL4bg7cBH//khcshOzj3I8+3tznkvyVndBEwt2H+eyzj7n6c+lInpcMW0bFo4db/6O8lOpa", - "hpZOEq83G6534RibDlNgfrNR3uvdgPOZVDJK7CdXJGaoVNqEEX5jLL8Bv7lwvf6b33QaDixOGBNGmj9f", - "aDzyMaHLpKmrBiHbafBz58UVl3kIDGo99XG/SPL2hNE4g9YGlnUZUmJUpVhS/XalyjCRqavKcZwlNw1l", - "+fAA93ijiP5maFbLXEly48FIjGCMxMh8NGiaS1F1uoiloypfWZyigk7Cpv+zBr1rd30j3Cus3d6Bo9lv", - "ycIJj3fAwrsD3TELf3wkG/3zr/i/9qX15OFfPh4EIZHOa7EBVds/66V5QTfYrS5NL8NTLYhTu5Wn6Gp8", - "+r7zXPGfB8+V7u9t97jF1UYVEJ4Qark0qFrZ9/n0Pf0bTQTbCrTYgKQS8f5XujlOsUD5bvjzTubJH4fr", - "6OQIHvn5NGj3Uq/kbsv3nT+7Lz+zrm2hrqmmZlJeweuTl2zDJV9RQHmjEHP3oB+gTV/Mfqiai8rHkTKO", - "peBUbVuNJYVV+ODyxqaMN1rjWbQSEidA4yDOwpeuK48ucF+N8WQoG3nIvlcFDGWj1EXoYexchs1RSNU9", - "vO3FOGS8H447KGjEJAv8kIzcx9r0/z695sI6CcrnEUaMDjtb4OWpLxrW+7Wt0zH4gsVHoh/jCPnkr6e8", - "ey66GhS3ZWMdB+qV1FevQRhpFAI7wufWkBAr5pFcGpX8m3du1w3oq0BJrZ756ekpRvqtlbGnKIl2ddDx", - "x3fNRodyyM2Gu2/bTGmxEpKXmVeStZUPZ49PHs4+/P8AAAD//50s/LmpDQEA", + "H4sIAAAAAAAC/+y9e5PbtpI4+lVQ2q3yY8UZ23GyJ751au/ETnLmxklcHid7d23fE4hsSThDATwAOCPF", + "19/9V+gGSJAEJWpm4iS1+5c9Ih6NRqPR6OeHWa42lZIgrZk9+zCruOYbsKDxL57nqpY2E4X7qwCTa1FZ", + "oeTsWfjGjNVCrmbzmXC/VtyuZ/OZ5Bto27j+85mGf9ZCQzF7ZnUN85nJ17DhbmC7q1zrZqRttlKZH+KM", + "hjh/Mfu45wMvCg3GDKH8UZY7JmRe1gUwq7k0PHefDLsWds3sWhjmOzMhmZLA1JLZdacxWwooC3MSFvnP", + "GvQuWqWffHxJH1sQM61KGML5XG0WQkKAChqgmg1hVrEClthozS1zMzhYQ0OrmAGu8zVbKn0AVAIihhdk", + "vZk9ezszIAvQuFs5iCv871ID/AqZ5XoFdvZ+nlrc0oLOrNgklnbusa/B1KU1DNviGlfiCiRzvU7Y97Wx", + "bAGMS/b6m+fss88++9ItZMOthcIT2eiq2tnjNVH32bNZwS2Ez0Na4+VKaS6LrGn/+pvnOP+FX+DUVtwY", + "SB+WM/eFnb8YW0DomCAhIS2scB861O96JA5F+/MClkrDxD2hxne6KfH8v+uu5Nzm60oJaRP7wvAro89J", + "HhZ138fDGgA67SuHKe0Gffso+/L9h8fzx48+/svbs+y//Z+ff/Zx4vKfN+MewECyYV5rDTLfZSsNHE/L", + "msshPl57ejBrVZcFW/Mr3Hy+QVbv+zLXl1jnFS9rRyci1+qsXCnDuCejApa8Li0LE7Nalo5NudE8tTNh", + "WKXVlSigmDvue70W+Zrl3NAQ2I5di7J0NFgbKMZoLb26PYfpY4wSB9eN8IEL+uMio13XAUzAFrlBlpfK", + "QGbVgesp3DhcFiy+UNq7yhx3WbE3a2A4uftAly3iTjqaLssds7ivBeOGcRaupjkTS7ZTNbvGzSnFJfb3", + "q3FY2zCHNNyczj3qDu8Y+gbISCBvoVQJXCLywrkbokwuxarWYNj1Guza33kaTKWkAaYW/4Dcum3/fy5+", + "/IEpzb4HY/gKXvH8koHMVQHFCTtfMqlsRBqelhCHrufYOjxcqUv+H0Y5mtiYVcXzy/SNXoqNSKzqe74V", + "m3rDZL1ZgHZbGq4Qq5gGW2s5BhCNeIAUN3w7nPSNrmWO+99O25HlHLUJU5V8hwjb8O1fH809OIbxsmQV", + "yELIFbNbOSrHubkPg5dpVctigphj3Z5GF6upIBdLAQVrRtkDiZ/mEDxCHgdPK3xF4IRBRsFpZjkAjoRt", + "gmbc6XZfWMVXEJHMCfvJMzf8atUlyIbQ2WKHnyoNV0LVpuk0AiNOvV8Cl8pCVmlYigSNXXh0OAZDbTwH", + "3ngZKFfSciGhcMwZgVYWiFmNwhRNuP+9M7zFF9zAF0/H7vj268TdX6r+ru/d8Um7jY0yOpKJq9N99Qc2", + "LVl1+k94H8ZzG7HK6OfBRorVG3fbLEWJN9E/3P4FNNQGmUAHEeFuMmIlua01PHsnH7q/WMYuLJcF14X7", + "ZUM/fV+XVlyIlfuppJ9eqpXIL8RqBJkNrMkHF3bb0D9uvDQ7ttvku+KlUpd1FS8o7zxcFzt2/mJsk2nM", + "YwnzrHntxg+PN9vwGDm2h902GzkC5CjuKu4aXsJOg4OW50v8Z7tEeuJL/av7p6pK19tWyxRqHR37KxnV", + "B16tcFZVpci5Q+Jr/9l9dUwA6CHB2xaneKE++xCBWGlVgbaCBuVVlZUq52VmLLc40r9qWM6ezf7ltNW/", + "nFJ3cxpN/tL1usBOTmQlMSjjVXXEGK+c6GP2MAvHoPETsglieyg0CUmb6EhJOBZcwhWX9qR9snT4QXOA", + "3/qZWnyTtEP47j3BRhHOqOECDEnA1PCeYRHqGaKVIVpRIF2VatH8cP+sqloM4vezqiJ8oPQIAgUz2Apj", + "zQNcPm9PUjzP+YsT9m08NoriSpY7dzmQqOHuhqW/tfwt1uiW/BraEe8Zhtup9InbmoAGJ+bfBcXhs2Kt", + "Sif1HKQV1/hvvm1MZu73SZ3/HCQW43acuPCh5TFHbxz8JXrc3O9RzpBwvLrnhJ31+96MbNwoewjGnLdY", + "vGviwV+EhY05SAkRRBE1+e3hWvPdzAuJGQp7QzL5yQBRSMVXQiK0c/d8kmzDL2k/FOLdEQKY5l1EtEQS", + "ZKNC9TKnR/3JQM/yJ6DW1MYGSdRJqqUwFt/V2JitoUTBmctA0DGp3IgyJmz4nkU0MF9rXhEt+y8kdgmJ", + "73lqRLDe8uKdeCcmYY7YfbTRCNWN2fJB1pmEBLlGD4avSpVf/o2b9R2c8EUYa0j7OA1bAy9AszU368TB", + "6dF2O9oU+nYNkWbZIprqpFniS7Uyd7DEUh3DuqrqOS9LN/WQZfVWiwNPOshlyVxjBhuBCnP/cCQNO72/", + "2Nc8XzuxgOW8LOetqkhVWQlXULpHu5AS9JzZNbft4ceRw7sGz5EBx+wssGg1Xs2EKjbd6CI0sA3HG2jj", + "XjNV2e3TcFDDN9CTgvBGVDVqEaKHxvmLsDq4Aok8qRkawW/WiNqaePATN7f/hDNLRYsjDaAN5rsGfw2/", + "6ADtWrf3qWynULognbV1vwnNcqVpCLrh/eTuP8B125mo836lIfNDaH4F2vDSra63qAcN+d7V6TxwMgtu", + "eXQyPRWmH2DEObAfinegE1qaH/E/vGTus5NiHCW11CNQGFGRObWgi9mhimZyDVDfqtiGVJms4vnlUVA+", + "bydPs5lJJ+9r0p76LfSLaHbozVYU5q62CQcb26vuCSHdVWBHA1lkL9OJ5pqCgDeqYsQ+eiAQp8DRCCFq", + "e+fX2ldqm4LpK7UdXGlqC3eyE26cycz+K7V94SFT+jDmcewpSHcLlHwDBm83GTNON0trlztbKH0zaaJ3", + "wUjWWhsZd6NGwtS8hyRsWleZP5sJiwU16A3UOnjsFwL6w6cw1sHCheW/ARaMG/UusNAd6K6xoDaVKOEO", + "SH+dFOIW3MBnT9jF384+f/zk708+/8KRZKXVSvMNW+wsGHbfq+WYsbsSHiRfRyhdpEf/4mmwUXXHTY1j", + "VK1z2PBqOBTZvuj1S82YazfEWhfNuOoGwEkcEdzVRmhnZNZ1oL2ARb26AGvdS/eVVss754aDGVLQYaNX", + "lXaChenaCb20dFq4JqewtZqfVtgSZEF+Bm4dwrg34GZxJ0Q1tvFFO0vBPEYLOHgojt2mdppdvFV6p+u7", + "UG+A1konr+BKK6tyVWZOzhMqoaB45Vsw3yJsV9X/naBl19wwNzdaL2tZjOgh7FZOv79o6Ddb2eJm7w1G", + "602szs87ZV+6yG9fIRXozG4lQ+rsqEeWWm0YZwV2RFnjW7Akf4kNXFi+qX5cLu9G26lwoIQeR2zAuJkY", + "tXDSj4FcSXLmO6Cy8aNOQU8fMcHKZMcB8Bi52MkcTWV3cWzHtVkbIdFub3Yyj1RbDsYSilWHLG+vwhpD", + "B011zyTAceh4iZ9RV/8CSsu/UfpNK75+q1Vd3Tl77s85dTncL8ZbAwrXN6iBhVyVXQfSlYP9JLXG32VB", + "zxslAq0BoUeKfClWaxu9F19p9RvciclZUoDiB1IWla7PUGX0gyocM7G1uQNRsh2s5XCObmO+xheqtowz", + "qQrAza9NWsgccTlEXyd00bKx3Ir6CWHYAhx15bx2q60rhg5Ig/ui7ZjxnE5ohqgxI+4Xjd8MtaLpyJ2t", + "1MCLHVsASKYW3sfBe1/gIjl6T9kgpnkRN8EvOnBVWuVgDBSZV0UfBC20o6vD7sETAo4AN7Mwo9iS61sD", + "e3l1EM5L2GXo62fY/e9+Ng9+B3itsrw8gFhsk0JvX582hHra9PsIrj95THakqSOqdeKtYxAlWBhD4VE4", + "Gd2/PkSDXbw9Wq5Ao0vJb0rxYZLbEVAD6m9M77eFtq5GPNj9M91JeG7DJJcqCFapwUpubHaILbtGHV2C", + "W0HECVOcGAceEbxecmPJDUrIAnWadJ3gPCSEuSnGAR59hriRfw4vkOHYubsHpalN8xwxdVUpbaFIrQEt", + "sqNz/QDbZi61jMZu3jxWsdrAoZHHsBSN75HlX8D4B7eN/dVbdIeLQ5u6u+d3SVR2gGgRsQ+Qi9Aqwm7s", + "xTsCiDAtoolwhOlRTuM6PJ8Zq6rKcQub1bLpN4amC2p9Zn9q2w6Ji4wcdG8XCgwaUHx7D/k1YZb8t9fc", + "MA9HMLGjOof8tYYwu8OYGSFzyPZRPj7xXKv4CBw8pHW10ryArICS7xLOAfSZ0ed9A+COt89dZSEjR9z0", + "preUHPwe9wytcDyTEh4ZfmG5O4LuKdASiO99YOQCcOwUc/J0dK8ZCudKblEYD5dNW50YEW/DK2Xdjnt6", + "QJA9R58C8AgemqFvjgrsnLVvz/4U/wXGT9DIEcdPsgMztoR2/KMWMKIL9jFO0XnpsfceB06yzVE2doCP", + "jB3ZEcX0K66tyEWFb53vYHfnT7/+BEnDOSvAclFCwaIP9Ays4v6MXEj7Y97sKThJ9zYEf6B8SywnuOl0", + "gb+EHb65X1FsQqTquIu3bGJUdz9xyRDQ4PHsRPC4CWx5bsudE9TsGnbsGjQwUy/IhWFoT7GqyuIBkvaZ", + "PTN662zSNrrXXHyBQ0XLS/ma0ZtgP3xveg+DDjr8W6BSqpygIRsgIwnBJN8RVim368KHP4UAmEBJHSA9", + "00bTfHP93zMdNOMK2H+pmuVc4pOrttDINEqjoIACpJvBiWDNnN45scUQlLABeknil4cP+wt/+NDvuTBs", + "CdchZtA17KPj4UPU47xSxnYO1x3oQ91xO09cH2i4cheff4X0ecphjyc/8pSdfNUbvLF2uTNljCdct/xb", + "M4DeydxOWXtMI9O8vXDcSbacrn/QYN247xdiU5fc3oXVCq54makr0FoUcJCT+4mFkl9f8fLHphvGQ0Lu", + "aDSHLMcovoljwRvXhwL/3DhCCneAyel/KkBwTr0uqNOBJ2brqSo2GygEt1DuWKUhB4p3c5KjaZZ6wsgT", + "Pl9zucIHg1b1yju30jjI8GtDqhldy8EQSaHKbmWGSu7UBeDd1ELIoxOngLsnXV9DTg+Ya97M56Ncp9zM", + "0R70LQZJI9l8NvridUi9al+8hJxu3OaEy6Aj70X4aSeeaEpB1DnZZ4iveFvcYXKb+9uo7NuhU1AOJ448", + "ftuPY06/7rld7u5A6KGBmIZKg8ErKlZTGfqqlnGMdnAV3BkLm6Emn7r+feT4vR59LypZCgnZRknYJdOS", + "CAnf48fkccJrcqQzCixjfftvkA78PbC680yhxtviF3e7f0L7FivzjdJ3ZRKlASeL9xMskAfN7X7Km9pJ", + "eVkmTIs+grPPAMy8cdYVmnFjVC5QZjsvzNx7BZM10od7dtH/qolLuYOz1x+3Z0OLkwOgjhjKinGWlwI1", + "yEoaq+vcvpMcdVTRUhNOXOExPq61fB6apNWkCS2mH+qd5OjA12iukg4bS0ioab4BCMpLU69WYGzvrbME", + "eCd9KyFZLYXFuTbuuGR0XirQ6El1Qi03fMeWjiasYr+CVmxR2670jwHKxoqy9AY9Nw1Ty3eSW1YCN5Z9", + "L+SbLQ4XjP7hyEqw10pfNlhI3+4rkGCEydLOZt/SV/Tr98tfex9/dHenz8HptM2YMHPL7CRJ+f/u/8ez", + "t2fZf/Ps10fZl/92+v7D048PHg5+fPLxr3/9/7s/ffbxrw/+419TOxVgT4XPesjPX/iX8fkLfP5Ervp9", + "2D+Z/n8jZJYkstibo0db7D6mivAE9KCrHLNreCftVjpCuuKlKBxvuQk59G+YwVmk09Gjms5G9JRhYa1H", + "PipuwWVYgsn0WOONpaihf2Y6UB2Nkj72HM/Lspa0lUH6pjjM4F+mlvMmGQHlKXvGMFJ9zYOTp//zyedf", + "zOZthHnzfTaf+a/vE5Qsim0qj0AB29RbMQ6SuGdYxXcGbJp7IOxJVzry7YiH3cBmAdqsRfXpOYWxYpHm", + "cCFkyeuctvJckoO/Oz9o4tx5y4lafnq4rQYooLLrVP6ijqCGrdrdBOi5nVRaXYGcM3ECJ32dT+Hei96p", + "rwS+DI6pWqkpr6HmHBChBaqIsB4vZJJiJUU/vfAGf/mbO38O+YFTcPXnTHn03vv26zfs1DNMc49SWtDQ", + "URKCxFPaB092HJIcN4tjyt7Jd/IFLFH7oOSzd7Lglp8uuBG5Oa0N6K94yWUOJyvFnoV4zBfc8ndyIGmN", + "JlaMgqZZVS9KkbPL+EHSkiclyxqO8O7dW16u1Lt37we+GcPng58qyV9ogswJwqq2mU/1k2m45jpl+zJN", + "qhccmXJ57ZuVhGxVk4I0pBLy46d5Hq8q00/5MFx+VZVu+REZGp/QwG0ZM1Y18WhOQPEhvW5/f1D+YtD8", + "OuhVagOG/bLh1Vsh7XuWvasfPfoMI/vaHAi/+Cvf0eSugsnaldGUFH2lCi6cnpXoq55VfJUysb1799YC", + "r3D3UV7eoI6jLBl260QdhgADHKpdQBPiPLoBBMfRwcG4uAvqFdI6ppeAn3ALuwHYt9qvKH7+xtt1IAaf", + "13adubOdXJVxJB52psn2tnJCVvDGMGKFr1WfGG8BLF9DfukzlsGmsrt5p3tw+PGCZmAdwlAuO4owxGxK", + "aKBYAKurgntRnMtdP62NoYgKHPQ1XMLujWqTMR2Tx6abVsWMHVSk1Ei6dMQaH1s/Rn/zvVdZCDT12Ukw", + "eDOQxbOGLkKf8YNMIu8dHOIUUXTSfowhgusEIoj4R1Bwg4W68W5F+qnlCZmDtOIKMijFSixSaXj/c2gP", + "C7A6qvSZB70XcjOgYWLJ3FN+QRerf95rLlfgrmd3pSrDS8qqmnTawPfQGri2C+B2r55fxgkpAnT4pLzG", + "yGvU8M3dEmDr9ltY1NhJuHavClQUURvvvXwy7n9GgENxQ3hC9/alcDL61vWoS2QcDLdyg93mWetd82I6", + "Q7jo+wYwZam6dvvioFA+2yYldYnul9rwFYy8XWLr3cR8GB2LHw5ySCJJyiBq2Rc1BpJAEmRqnLk1J88w", + "uC/uEOMzs+eQGWYiA7G3GWESbY+wRYkCbOO5SnvPdceKSlmBx0BLsxbQshUFAxhdjMTHcc1NOI6YLzVw", + "2UnS2W+Y9mVfarrzyJcwSoraJJ4Lt2Gfgw7e/T5BXchKF1LRxY/+CWnl3NsLwxdS26EkiqYFlLCihVPj", + "QChtwqR2gxwcPy6XyFuylFtipKCOBAA/B7iXy0PGyDbCJo+QIuMIbHR8wIHZDyo+m3J1DJDSJ3ziYWy8", + "IqK/IR3YR476ThhVlbtcxYi9MQ8cwKeiaCWLnkc1DsOEnDPH5q546dicf4u3gwwypOGDopcPzbvePBh7", + "aOwxTdGVf9SaSEi4yWpiaTYAnRa190C8UNuMIpSTb5HFduHoPRm7gPHSqYNJuejuGbZQW3TnwquFfOUP", + "wDIORwAj0r1shUF6xX5jchYBs2/a/XJuigoNkoxXtDbkMiboTZl6RLYcI5f7UXq5GwHQU0O1tRq8WuKg", + "+qArngwv8/ZWm7dpU0NYWOr4jx2h5C6N4G+oH+smhPtbm/hvPLlYOFGfJBPeULN0mwyF1LmirIPHJCjs", + "k0MHiD1YfdWXA5No7fp6dfEaYS3FShzzHRolh2gzUAI+grOOaJpdpjwF3Fse8B6/CN0iZR3uHpe7B5ED", + "oYaVMBZao1HwC/o91PEc0ycrtRxfna300q3vtVLN5U9mc+zYWeYnXwF64C+FNjZDi1tyCa7RNwaVSN+4", + "pmkJtOuiSMUGRJHmuDjtJeyyQpR1ml79vN+9cNP+0Fw0pl7gLSYkOWgtsDhG0nF5z9Tk2753wS9pwS/5", + "na132mlwTd3E2pFLd44/ybnoMbB97CBBgCniGO7aKEr3MMgo4HzIHSNpNPJpOdlnbRgcpiKMfdBLLYS9", + "j938NFJyLVEawHSEoFqtoAjpzYI9TEZJ5EolV1EVp6ralzPvhFHqOsw8tydpnXfDhzEn/Ejcz4QsYJuG", + "Pn4VIORtZB0m3MNJViApXUlaLZRETezijy0iXd0ntoX2AwCSTtBvesbs1juZdqnZTtyAEnjh3yQGwvr2", + "H8vhhnjUzcfcpzuZT/cfIRwQaUrYqLDJMA3BCAPmVSWKbc/wRKOOKsH4UdrlEWkLWYsf7AAGuk7QSYLr", + "pNL2rtZewX6Kb95T9yoj32vvWOzom+c+AL+oNVowOp7Nw7ztzVtt4tq/+/nCKs1X4K1QGYF0qyFwOceg", + "IcqKbpgV5E5SiOUSYuuLuYnloAPcQMdeTCDdBJGlTTS1kPaLpykyOkA9LYyHUZammAQtjNnk3wytXEGm", + "j1RJzZUQbc0NTFXJcP3vYJf9zMvaPTKENq17rjc7dS/fI3b9avMd7HDkg16vDrADu4Kap9eANJjS9Def", + "TJTA+p7ppPjH52VnC4/YqbP0Lt3R1viiDOPE394ynaIF3aXc5mC0ThIOlim7cZH2TXCnB7qI75PyoU0Q", + "xWEZJJL346mECSUsh1dRk4viEO2+AV4G4sXlzD7OZ7fzBEjdZn7EA7h+1VygSTyjpylZhjuOPUeinFeV", + "Vle8zLy/xNjlr9WVv/yxeXCv+MQvmTRlv/n67OUrD/7H+Swvgeus0QSMrgrbVX+aVVEZh/1XCWX79opO", + "0hRFm99kZI59LK4xs3dP2TQoitL6z0RH0ftcLNMO7wd5n3f1oSXucfmBqvH4aW2e5PDTdfLhV1yUwdgY", + "oB1xTsfFTausk+QK8QC3dhaKfL6yO2U3g9OdPh0tdR3gSTjXj5iaMv3ikD5xJbIi7/zD71x6+kbpDvP3", + "kYlJ56HfTqxyQjbhccRXO9Sv7AtTJ4wEr19Wv7jT+PBhfNQePpyzX0r/IQIQf1/43/F98fBh0nqYVGM5", + "JoFaKsk38KCJshjdiE/7AJdwPe2CPrvaNJKlGifDhkLJCyig+9pj71oLj8/C/1JACe6nkymP9HjTCd0x", + "MFNO0MVYJGLjZLqhkpmGKdn3qcYgWEdayOx9SQYyxg6PkKw3aMDMTCnytGuHXBjHXiU5U7rGDBuPaGvd", + "iLUY8c2VtYjGcs2m5EztARnNkUSmSaZtbXG3UP5411L8swYmCveqWQrQeK/1rrrwOMBRBwJpWi/mByY7", + "VTv8bfQge+xNQRe0Twmy1373orEphYWmiv4c6QEezzhg3Hu8tz19eGqmaLZ11wVz2jtmSun0wOi8sW5k", + "jmQpdGGypVa/QtoQgvajRCKMYPgUqOb9FWTKc6/PUhqjclvRvZ390HZPfxuPbfyt38Jh0U3VsZtcpulT", + "fdxG3uTRa9Lpmj2Sxx5hsYdBNzRghLXg8YqcYbEMSvA+4pLOE2WB6ESYpU9lHMt5SuO3p9LDPIh/Lfn1", + "gqdqxLi3kIMp2t6On5RVLHQOG2CaHAc0O4s8uJu2gjLJVaBbG8QwK+0N3zU07eQXTfuAQYqKny5zclMo", + "jUoMU8trLqmKuOtH/Mr3NkAmeNfrWmnMA2nSLl0F5GKTVMe+e/e2yIfuO4VYCSqQXRuIKjD7gRglm0Qq", + "8lWsm8wdHjXnS/ZoHpWB97tRiCthxKIEbPGYWiy4weuyMYc3XdzyQNq1weZPJjRf17LQUNi1IcQaxZq3", + "Jwp5jWPiAuw1gGSPsN3jL9l9dMk04goeOCx6IWj27PGX6FBDfzxK3bK+wPk+ll0gzw7O2mk6Rp9UGsMx", + "ST9q2vt6qQF+hfHbYc9poq5TzhK29BfK4bO04ZKvIB2fsTkAE/XF3URzfg8vkqwBYKxWOyZsen6w3PGn", + "kZhvx/4IDJarzUbYjXfcM2rj6Kktr0yThuGo1r+vFxXgCh/R/7UK7n89XdcnfsbwzUjMFnop/4A22hit", + "c8Yp+WcpWs/0UK+TnYfcwlhAq6mbRbhxc7mloyyJjupLVmkhLeo/arvM/uKexZrnjv2djIGbLb54mihE", + "1a3VIo8D/JPjXYMBfZVGvR4h+yCz+L7svlQy2ziOUjxocyxEp3LUUTftkjnmF7p/6KmSrxslGyW3ukNu", + "POLUtyI8uWfAW5Jis56j6PHolX1yyqx1mjx47Xbop9cvvZSxUTpVMKA97l7i0GC1gCuMmEtvkhvzlnuh", + "y0m7cBvof1//pyByRmJZOMvJh0Bk0dwXLO+k+J+/bzOfo2GVIhF7OkClE9pOr7f7xN6Gx2nd+vZbchjD", + "byOYm4w2HGWIlRHve3Kvb/r8Hv5CfZBozzsKx8e/MO3e4CjHP3yIQD98OPdi8C9Pup+JvT98mE5AnFS5", + "uV9bLNzmRYx9U3v4lUoowELVwsahyOdHSCggxy4p98ExwYUfas66FeI+vRRxN/FdaW/T9Cl49+4tfgl4", + "wD/6iPidmSVuYBulMH7YuxUykyRTNN8jP3fOvlLbqYTTu4MC8fwBUDSCkonqOVzJoAJo0lx/0F8kolE3", + "6gJK5R6ZcVGgWJ//58GzW/x8D7ZrURY/t7ndeheJ5jJfJ72EF67j30lG71zBxCqTdUbWXEook8PR2/bv", + "4Q2ceKX/Q02dZyPkxLb9CrS03N7iWsC7YAagwoQOvcKWboIYq920WU1ahnKlCobztEUtWuY4LOWcKqGZ", + "iG/GYTe19X6rGAvuEw4tRYlumGm7MbbMNLcjCbSw3nmoL+TGwfLjhtQMNDpoxsUGL2bDN1UJeDKvQPMV", + "dlUSet0xhRqOHFWsYKZyn7AlJqxQzNZaMrVcRssAaYWGcjdnFTeGBnnklgVbnHv27PGjR0m1F2JnwkoJ", + "i2GZP7ZLeXyKTeiLL7JEpQCOAvYwrB9bijpmY4eE42tK/rMGY1M8FT9Q5CpaSd2tTfUkm9qnJ+xbzHzk", + "iLiT6h7VlSGJcDehZl2VihdzTG785uuzl4xmpT5UQp7qWa5QW9cl/6R5ZXqC0ZDZaSRzzvRx9qfycKs2", + "NmvKT6ZyE7oWbYFM0fO5QT1ejJ0T9oJUqE0Bf5qEYYpsvYEiqnZJj3gkDvcfa3m+Rt1kRwIa55XTC7EG", + "dtZabqLow6b6ETJsB7evxUqlWOdM2TXoa2EAI/LhCrrpEJvcoF43HtIjdpenaymJUk6OEEabWkfHoj0A", + "R5JscCpIQtZD/JGaKarHfGxd2gvslY7F6BW57Vn9Q3K9kGKbfe+NCzmXSoocSyGkJGlM3TbNTDmhakTa", + "vmhm/oQmDleytG4TC+yxOFpsNzBCj7ihyT/66jaVqIP+tLD1JddWYI3nbFDMQ6VrbxAT0oCvZuWIKOaT", + "SiecmpKBEI0DxZFkhFmZRjSc37hvP3j9NybFuBQSNV0ebf59Riar0gi0TEsmLFspMH493Wge89b1OcEs", + "jQVs35+8VCuRX4gVjkFudG7Z5DM6HOoseJB6j03X9rlr63PnNz933MFo0rOq8pOO10FPCpJ2K0cRnPJb", + "Co4kEXKb8ePR9pDbXtdvvE8docEVeq1BhffwgDCaWtrdUb52b0uiKGzBKKIymUBXyAQYL4UMJtT0BZEn", + "rwTcGDyvI/1Mrrmlt8MknvYGeDkSAIERymSDv+1Q/coBDiW4xjDH+Da2ZcBHGEfToJX4udyxcCgcdUfC", + "xHNeNq7TiaLeKFV5IarA4KJeme8U43CMOwshkx10HQzfa7pjNY5jb6KxHIWLuliBzXhRpFJbfYVfGX4N", + "QWKwhbxuilA10YHdHOVDavMT5UqaerNnrtDgltNFdfMT1BDX7g87jJl2Fjv8N1WBaXxnvNP00VG5wUO6", + "OC4x/zDKOCX1OprOjFhl0zGBd8rt0dFOfTNCb/vfKaWHcN0/RDRuj8vFe5Tib1+7iyNO3DvwT6erpcmr", + "i77gCr+HhEdNRsguV8KrbFBnDL0ecPMSW9YDPjRMAn7Fy5FI+NhWQvcr2Q/G4uHz0fQN3Pr0XJazvSxo", + "NOUR+Qr3rC9DE+KYfzC5B9+d1cKvdS9Cx21333UsdeQj1jKLUQvdzYxo7QYfa0X77mosRUKo04Hf43og", + "3otn7tPAw5VQdfC+Cj7Q4UlIv/oUPJ26HyPrT0YW/N5Wi1Ebyxtfv5aW6d/k3/1MVlgG0urdH8DiMtj0", + "flGZhLRL6qm2CWtKH04qhdi5FafUsEmVS/GyYdCVEWvp0NKg/MyArF5MEQcG+Pg4n50XR12YqZI7Mxol", + "dexeitXaYsb+vwEvQL86UJGgrUKAR6xSRrQVSEs3mE8Bu8bhTqYGGzgCFnFFheFYwQn1CnKLZWdb5zoN", + "cEx9BTdZMPr8b2WC8ed0E5PhCxLsq0IwrDV74I4fJE6Kkn9Rnc6T6Tn3zxoXaooAu+amTdfSi5meHLm5", + "XEKOWZH3Jqr6zzXIKAnSPOhlEJZllLdKNHFMmNf7eK1jC9C+PFJ74Ynq69wanLE49kvY3TOsQw3JwqFN", + "EN9NEgcjBsgEFnJIjymSvdeYMA1lIBaCS7BPxdwWxxjN+RylXbvhXIEk3cXRpmLbM2W66PmkuVzXo9I+", + "YkjOWC6rYc3k8ffHCyxRbbyDHG8SD8evdHY+LJxz7RMXY1qxxnYSUhiDCb+FHII0Sykuff0AxApZqq65", + "LkKLO0kKRXeTSAO9bGYWbQDH0MkhUYoBY6HyUjkxIhsLKOvGTDQOh/cMeYa2CXwQriVoDUVjEimVgcyq", + "EPCxD459qCD31xshwYyWPyLgRlNfv25ze2MZOI6prrn3eo0XyDRsuINORxm4x+fch+zn9D0E4YcyYAc1", + "TA29Hq5HG0J3hBkgMab6JfO35eHg/psom4SUoLNgeeqn45bdjGyYd7Ooc7qg44PRKOQm587Zw0qSepp8", + "uMreGyEKkr+E3Sk9gkIh37CDMdAkORHoUcLR3ibfqfrNpOBe3Ql4v28euUqpMhsxdpwPc4j3Kf5S5JeA", + "OQAbF/eRGu3sPurYG2v29XoXcmZXFUgoHpwwdiYpqCgYtrvlBXuTy3t23/xbnLWoKa2/V6qdvJPp6AxM", + "uK9vyc3CMPt5mAHH6m45FQ1yIEP1Vo653Fxjcv5uFc+Tqa/yoam5X0W+JSqCIiWTXJDF6jke9JTiCFMg", + "RLk60JDJmbd0MVOqlC/vTdI0uKHSmIonQ4AsyCnZAhoo/OBJBCTroidOIaW+80nv1JJpaI3IN83+Nyzh", + "nnrR92duZunyu6XS0CnG7npTps8m8AXTaOJ/FsJqrnc3ydE3KCE/0J6MYvmgO1bjidUupPXGGuKwLNV1", + "hswqa+pcpJ62rp3pXsah6Frbz53qBUR+Xdx4QW3H1rxgudIa8rhHOt6ToNooDVmp0M0rZYFeWid3bzDI", + "S7JSrZiqclUA1YtJU9DYXLWUHMUmiLxqkigg2sFoYeoT0fHEKd2dSnakDEWt1RG183OgyPU2qxMtOiNb", + "5ojHMhifxcljiBoP4d1T+z/Nm5dii3QDOnXkl8zqGubMt+jXyPYHn2tgG2EMgdLQ0rUoSwwcF9vI8to4", + "LqRROyL2nqNb5ZVA35tuEgGShit35zWZFWIecBGnPWJ2rVW9WkcJphs4w5NX1/5BHI/yk6nRPQojyNwU", + "T9lGGetfmjRSu+TW5ex+rqTVqiy7SikS0Vde0/49357luX2p1OWC55cP8F0rlW1WWsxDfHXfObCdSfdS", + "i3Uv4IzKmR9O1Uvt0FXOE+1kBtljcUcXdo/AfH+Ygx7WuZ8NF9ZfV5eZpp8xZ5JxqzYiT5+pP5e33aiP", + "XIpFJXOWUW1FyjKBzfCwx5dV41yBLHKIZpA8WRzujHlG4I3MyG7cf1EC74/LluAZzchFOWQuXorK8lFZ", + "rwcAQkqhz7bWVJAxlsQarqJWlCoBTeR9QCfeKuiJdDvY3Ah3DpSFWwE18H5sALxPyoc55ZYjT8qF2obv", + "D9rkczcC/uN+Ku8wjzEXr4uWtDQ5eYVENSMcIZ3ieq8/1BsMe19M9YpqiudOvOEjAMb9pDowTPKWOhaM", + "JRclFFmq9uJ5o6OaRy9tH5rVL4kujOfkOa9D6UM3dq3BJ04hEV937V8Vd6SkmuZDTbIsYAsU1/EraEU1", + "DeeR/QVKKnnYUwaoKivhCjruYz6bS42ipriC0Nc0nVkBUKE1sq8jS/lFxXd5T3Hi155FnjVTsJvUpBBi", + "aafYATVJUqmzlRkdEzP1KDmIrkRR8w7+zLEiR1cN6I5yAlWDN0IW3pFTp/mJRngdBjgL/VOiTMDE+2l8", + "6GgWlEbdPgZ00E+yNmOnXqbdJONURY2BBWcrGkMskXjLN0zFr+W4QnJI8u1za+I+CSUjxH69hRylGv/e", + "gcK/eEaMFD7rCVK7BCjoVeC6JLTta5BMqqjE5DU3zVOlzaEYfqCJsZGQ/jV9A6Ny6814+51lOBgzvWRq", + "ow8J3dDpzdXzv8tJ3HsQR8dL0YgBH/63R/8VqNs/O7ABlvKWbj+d7I9FGv0t5rn4nC3qMFBZqmuqGRm/", + "Q19AsIMS9QUTkBfLRXMtB6/NuU/v2Vd1iMhffcN3TGn8x706/1nzUix3yGcI/NCNmTV3JOQNr+QR4L1A", + "3cT7xat5ACxoW1SYitYtpo4ZDbdzo0RAu4s8FPdRbMMvId4GdHYg/plbxzhNvUDNhbuye9s5xIJffEjR", + "suFF/NLHRJHdMuohdbDr/X+1sXDxVCG/W1XyPFQI9SWKunwGqwAH4rJr2OwPlhzytUACTWXhlmh1iK4v", + "bqAyPZJ1pSIQxsqvdMAeVFwdVJ651TIman57NTb2hJlOWspd78JUr5sB0HGdxkPgx2UrPw3+kzlcx5Yx", + "Bfw/Ct5HCtXG8FJN2k+A5U4GjgSspK1eqG2mYWkOOZiQuto953WbuyOoWIXMNXBDHjfnP/qHZ5uiVEj3", + "ECaf0Mam2YxSwFLIllkKWdU28Y7BTKVyFyEsVvojWkdMaGNSghMmr3j54xVoLYqxjXOng0o6xiUigqHD", + "902oMJo7dTiAMO0bDuMzWzV63Mxd4FSEitw1jeWy4LqImwvJctDu3mfXfGdublFqjAOHbEo8kma6WQMi", + "6xKSNgFS7rxR+Jb2ngZAfoeGnwkGG/QLThhrSLVj1Yh9ZgjDn8Jgs+HbrFQrjCIcORA+Ny1a+OgJqCSq", + "wUk+m7buMI8Rv8L+aTAtv2dEVuGsU6bYf+5/xK3EZ+RPUti9J590lP2wTvK7pYMZkCpXrfM/EcvwPKYi", + "cX3ylTgaNwibIVQl0B5Emwgj9qGuXnxkF9ENwodxx0rw6eXOup4WqXhf0gxkqDEwe9z7wbSu7Dz37llD", + "VdpA1UBImfto6SM1baSfD/fSCHhUm96f9e60jcuMG+eYGnH746OzSlVZPsXnkyp3FN5M4CHtwjhCH5ER", + "YGTdjXuMaWrZdPIedYraHFsmb7SoziFrV5Xve/SPqYlGOHrXBKGWyMuocjtqtzCSp1GmzPsxZl01WMMk", + "GGca8lqjmvia7w6XHRvJGH3xt7PPHz/5+5PPv2CuASvECkybdbxXtqv1CxSyr/f5tJ6Ag+XZ9CaE7AOE", + "uGB/DEFVzab4s0bc1rQpRQdFy47RLycugMRxTJSLutFe4Tita/8fa7tSi7zzHUuh4LffM63KMl31oZGr", + "EgaU1G5FJhT3AqlAG2GsY4RdC6iwrUe0WaN6EHP/XlE2GSVzCPpjTwXCjrhcpRYy5lCL/Axju73ViMG2", + "Kj2vIkvPvnX5dxpp6FBoRK+YBbBKVV60F0uWgggjiHQUWesVn6gRj3xkG2ZL3rIpQvSe52nSiwtm7+f2", + "3WKuNs3p3SYmxItwKG9AmmP2ifG8BTfhJK1q/w/DPxKJGO6MazTL/S14RfJ9cLOi/JNAGwblJ8gDARiJ", + "tu3ESUaBYlEiYk1WArQnBANyX/z4vjUsHwwLQUhChwPgxeGzbbsmksGD8ztn9P2+QUq0lPdjlNBZ/qGI", + "3MB6m4sk2iKvNLEWDLElNRQLo3Br87yJYh55lQyCnbVSlrmXaVkmgqRJj4NnKiYc9yTQV7z89FzjG6GN", + "PUN8QPF6PDQqjpSNkUyoNDfL0/eST5o7ioq9u6nlKwzM/k9we5S85/xQ3gg/uM1QuYMV61fhVqBYb3aN", + "Y5KT1eMv2MIX26g05ML0jfvXQThpAkNBi6V3aIWtPRCJemidPyt7CzJeBk8c9kNk3mps9h7C9oj+zkxl", + "5OQmqTxFfQOySOAvxaPi4rwHrotbFma4WdqXKIHbkWlfhmWHpy6PUpu4S6c2MFzn5Nu6g9vERd2ubWrO", + "osn1Hd69e2sXU1INpWsxuO6Y6+hOijIcVZLhN8hyRDjyY/h5UxTz81jeW8rtOpKbu7cftSgPOqx0Mq1/", + "nM9WIMEIg7nE/+5rx3zauzRAQJkXhkeVYL1NuhhCTGKtncmjqaIc6hPSp/tuiZzXGNWY11rYHdYNDgo0", + "8fdkPqZvm9wePjdMY0vzd59Vl9DUbm8zgdQm3K7fKl7ifUQmPuluIVWesK8pw7c/KH+9t/h3+OwvT4tH", + "nz3+98VfHn3+KIenn3/56BH/8il//OVnj+HJXz5/+ggeL7/4cvGkePL0yeLpk6dffP5l/tnTx4unX3z5", + "7/ccH3IgE6Ahtf+z2f+bnZUrlZ29Os/eOGBbnPBKfAdub/CtvFRY19IhNceTCBsuytmz8NP/HU7YSa42", + "7fDh15mvzzRbW1uZZ6en19fXJ3GX0xWG/mdW1fn6NMyD1QY78sqr88ZHn/xwcEdb7TFuqieFM/z2+uuL", + "N+zs1flJSzCzZ7NHJ49OHvvS1pJXYvZs9hn+hKdnjft+ivk1T41PnX/axmol7Xav0WU9COd6BQW730Td", + "/FtjuTUPQvDOUpR4ZfzDEDE2qzgvkLh8jdIZVl1DZywE68mjR2EvvKQTXTinGP3x7MOsrW3fFyYGSH3T", + "ApyErK35OFz0T/JSqmvJMBkgHaB6s+F6RyvoYCMaHLeJrwwq2bW44hZm713vPs6ryhcsGEM5VrnqnvLQ", + "GQmkyXjvThglwvdlB0wK5cNiCbfE/t7kkIPJEruDjV45mEP6nCahojcIeZyhzZgQ1pwRUjsMED2fVXUC", + "nV9jYI3Zh7N5lISfoFFl0WB8gNFX9f8QjDrS9XfT7NkH99caeImJtdwfG0eoefikgRc7/39zzVcr0Cd+", + "ne6nqyen4RVy+sFnTPm479tp7BF2+qGTWKY40DN4PB1qcvohlMzeP2CnXLL3NY06TAR0X7PTBZbJmtoU", + "4tWNLwVp3px+wAf46O+nXoua/oiKELphT0OCppGWlIoj/bGDwg926xayfzjXJhov5zZf19XpB/wPku1H", + "Ou0lpDI5UYkOztrmcyYs4wulsQKzzdeOG4TSr8JELQdH/sz1ek4QhEr66F40e/Z2GP+FA7EwEooo7v5t", + "JYjOTK2QiOaUiCk0InCnfSsIv32Uffn+w+P540cf/8UJuv7Pzz/7ONF7/nkzLrtopNiJDd/fkuMNdDbt", + "ImmTGgY2fGR4WhiP7/Fb1RuINcg4UN+xN/zwrYQM+Okd8vhu3uEEf/+KFyykScC5H3+6uc8l+Yg7QZUE", + "6o/z2eefcvXn0pE8L4NIdkPh7YwOf8wUmN/slPA2n0klo2SKckVihkqlqhjhN8byG/CbC9frf/lNp+HA", + "yodxeKRt9cXdI78eukyaWnYQMsyG2AJeXHGZh2CsNjoC94skb08YjQNubWBZlyENSVWKJdXMV6oME5m6", + "qhzHWXLTUJYPyXAPZsqi0AzNapkrSa5TGP0SDMCYDQGNyOZSVJ0uYumoyldzp0isk7Dp/6xB79pd3wj3", + "8h28mVrnvt+ShRMe74CFdwe6Yxb+5Eg2+udf8f/sS+vpo798OghC8qI3YgOqtn/WS/OCbrBbXZpehqf6", + "G6d2K0/Rvfv0Q+e54j8Pnivd39vucYurjSogPCHUcmlQtbLv8+kH+jeaCLYVaLEBSWX5/a90c5xiUfjd", + "8OedzJM/DtfRycs88vNp0KimXsndlh86f3ZffmZd20JdUx3TpLyC1ycv2YZLvqIg/kYJ6e5BP0CbMpr9", + "WDUXlY/dZRzL76natlpiCmXxAf2NHR9vtMabayUkToAGWZyFL11XHl3gvgLmUId44SH7QRUwlI1SF6GH", + "sXMZNkchVWvy/d1oJyPG+/G4g4KGY/J6GJKR+1ib/t+n11xYJ0H53M2I0WFnC7w89YXaer+2tVEGX7Dg", + "S/RjnJUg+esp756LrgbFbdlYx4F6JfXVaxBGGoVgmvC5Nd7ExhAkl8YM8va923UD+ipQUqvbf3Z6itGV", + "a2XsKUqiXb1//PF9s9GhBHWz4e7bNlNarITkZeaVZG21ydmTk0ezj/8nAAD//1Xaw+sdDwEA", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go index 31d870e45e..b2f8e4a070 100644 --- a/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go +++ b/daemon/algod/api/server/v2/generated/nonparticipating/public/routes.go @@ -746,308 +746,308 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+y9/XfbtrIo+q9g6d618nFFOUnTnt281XWfm6Stb/O1Yrf7nNPktRA5krBNAdwAKEvN", - "y/9+FwYACZKgRNmyk7T+KbFIAoPBYDDf82GUimUhOHCtRk8+jAoq6RI0SPyLpqkouU5YZv7KQKWSFZoJ", - "PnrinxGlJePz0XjEzK8F1YvReMTpEup3zPfjkYR/l0xCNnqiZQnjkUoXsKRmYL0pzNvVSOtkLhI3xLEd", - "4uTZ6OOWBzTLJCjVhfI1zzeE8TQvMyBaUq5oah4pcsH0gugFU8R9TBgnggMRM6IXjZfJjEGeqYlf5L9L", - "kJtglW7y/iV9rEFMpMihC+dTsZwyDh4qqICqNoRoQTKY4UsLqomZwcDqX9SCKKAyXZCZkDtAtUCE8AIv", - "l6Mnv40U8Awk7lYKbIX/nUmAPyHRVM5Bj96PY4ubaZCJZsvI0k4c9iWoMteK4Lu4xjlbASfmqwl5WSpN", - "pkAoJ29/eEq++uqrb81CllRryByR9a6qnj1ck/189GSUUQ3+cZfWaD4XkvIsqd5/+8NTnP/ULXDoW1Qp", - "iB+WY/OEnDzrW4D/MEJCjGuY4z40qN98ETkU9c9TmAkJA/fEvnzQTQnn/6S7klKdLgrBuI7sC8GnxD6O", - "8rDg8208rAKg8X5hMCXNoL89SL59/+Hh+OGDj//jt+Pkv92fX3/1ceDyn1bj7sBA9MW0lBJ4uknmEiie", - "lgXlXXy8dfSgFqLMM7KgK9x8ukRW774l5lvLOlc0Lw2dsFSK43wuFKGOjDKY0TLXxE9MSp4bNmVGc9RO", - "mCKFFCuWQTY23PdiwdIFSamyQ+B75ILluaHBUkHWR2vx1W05TB9DlBi4LoUPXNDni4x6XTswAWvkBkma", - "CwWJFjuuJ3/jUJ6R8EKp7yq132VFzhZAcHLzwF62iDtuaDrPN0TjvmaEKkKJv5rGhM3IRpTkAjcnZ+f4", - "vVuNwdqSGKTh5jTuUXN4+9DXQUYEeVMhcqAckefPXRdlfMbmpQRFLhagF+7Ok6AKwRUQMf0XpNps+/85", - "ff2KCEleglJ0Dm9oek6ApyKDbEJOZoQLHZCGoyXEofmybx0Ortgl/y8lDE0s1byg6Xn8Rs/ZkkVW9ZKu", - "2bJcEl4upyDNlvorRAsiQZeS9wFkR9xBiku67k56Jkue4v7X0zZkOUNtTBU53SDClnT93YOxA0cRmuek", - "AJ4xPid6zXvlODP3bvASKUqeDRBztNnT4GJVBaRsxiAj1ShbIHHT7IKH8f3gqYWvABw/SC841Sw7wOGw", - "jtCMOd3mCSnoHAKSmZBfHHPDp1qcA68InUw3+KiQsGKiVNVHPTDi1NslcC40JIWEGYvQ2KlDh2Ew9h3H", - "gZdOBkoF15RxyAxzRqCFBsusemEKJtyu73Rv8SlV8M3jvju+fjpw92eivetbd3zQbuNLiT2SkavTPHUH", - "Ni5ZNb4foB+Gcys2T+zPnY1k8zNz28xYjjfRv8z+eTSUCplAAxH+blJszqkuJTx5x++bv0hCTjXlGZWZ", - "+WVpf3pZ5pqdsrn5Kbc/vRBzlp6yeQ8yK1ijChd+trT/mPHi7Fivo3rFCyHOyyJcUNpQXKcbcvKsb5Pt", - "mPsS5nGl7YaKx9naKyP7fqHX1Ub2ANmLu4KaF89hI8FAS9MZ/rOeIT3RmfzT/FMUuflaF7MYag0duysZ", - "zQfOrHBcFDlLqUHiW/fYPDVMAKwiQes3jvBCffIhALGQogCpmR2UFkWSi5TmidJU40j/U8Js9GT0P45q", - "+8uR/VwdBZO/MF+d4kdGZLViUEKLYo8x3hjRR21hFoZB4yNkE5btodDEuN1EQ0rMsOAcVpTrSa2yNPhB", - "dYB/czPV+LbSjsV3SwXrRTixL05BWQnYvnhHkQD1BNFKEK0okM5zMa1+uHtcFDUG8flxUVh8oPQIDAUz", - "WDOl1T1cPq1PUjjPybMJ+TEcG0VxwfONuRysqGHuhpm7tdwtVtmW3BrqEe8ogtsp5MRsjUeDEfMPQXGo", - "VixEbqSenbRiXv7JvRuSmfl90MdfBomFuO0nLlS0HOasjoO/BMrN3RbldAnHmXsm5Lj97eXIxoyyhWDU", - "SY3FQxMP/sI0LNVOSgggCqjJbQ+Vkm5GTkhMUNjrkskvCiyFFHTOOEI7NuoTJ0t6bvdDIN4NIYCq9CJL", - "S1aCrEyoTuZ0qJ907CxfALXGNtZLokZSzZnSqFfjy2QBOQrOlHuCDknlUpQxYMO3LKKC+ULSwtKye2LF", - "LsZRn7cvWVivePEOvBOjMAfsPthohOrSbHkn64xCglyjBcP3uUjPf6JqcYATPvVjdWkfpyELoBlIsqBq", - "ETk4LdquRxtC3+ZFpFkyDaaaVEt8IebqAEvMxT6sqyie0jw3U3dZVmu1OPCgg5znxLxMYMnQYO4UR2th", - "t/oXeU7ThRELSErzfFybikSR5LCC3CjtjHOQY6IXVNeHH0f2eg2eIwWG2WkgwWqcmQlNbLKyRUggS4o3", - "0NJoM0Xe/KbioIouoSUF4Y0oSrQiBIrGyTO/OlgBR55UDY3gV2tEa004+MTM7R7hzFzYxVkLoPbuuwp/", - "Fb9oAG3eru9TXk8hZGZt1tr8xiRJhbRD2BveTW7+A1TWH1vqvFtISNwQkq5AKpqb1bUWda8i30Odzh0n", - "M6OaBifTUWFcAbOcA79D8Q5kxErzGv9Dc2IeGynGUFJNPQyFERG4UzN7MRtU2ZnMC2hvFWRpTZmkoOn5", - "XlA+rSePs5lBJ++5tZ66LXSLqHbobM0ydahtwsH69qp5QqztyrOjjiyylekEcw1BwJkoiGUfLRAsp8DR", - "LELE+uDX2vdiHYPpe7HuXGliDQfZCTPOYGb/vVg/c5AJuRvzOPYQpJsFcroEhbcbDxmnmaX2yx1Phbyc", - "NNG6YDipvY2EmlEDYWrcQhK+WhaJO5sRj4V9oTVQHeCxXQhoDx/DWAMLp5peAxaUGfUQWGgOdGgsiGXB", - "cjgA6S+iQtyUKvjqETn96fjrh49+f/T1N4YkCynmki7JdKNBkbvOLEeU3uRwL6odoXQRH/2bx95H1Rw3", - "No4SpUxhSYvuUNb3ZbVf+xox73Wx1kQzrroCcBBHBHO1WbQT69Y1oD2DaTk/Ba2NpvtGitnBuWFnhhh0", - "+NKbQhrBQjX9hE5aOsrMK0ew1pIeFfgm8MzGGZh1MGV0wOX0IETVt/FZPUtGHEYz2Hko9t2meppNuFVy", - "I8tDmDdASiGjV3AhhRapyBMj5zERMVC8cW8Q94bfrqL9u4WWXFBFzNzovSx51mOH0Gs+/P6yQ5+teY2b", - "rTeYXW9kdW7eIfvSRH6thRQgE73mBKmzYR6ZSbEklGT4IcoaP4K28hdbwqmmy+L1bHYYa6fAgSJ2HLYE", - "ZWYi9g0j/ShIBbfBfDtMNm7UIehpI8Z7mXQ/AA4jpxueoqvsEMe235q1ZBz99mrD08C0ZWDMIZs3yPLq", - "Jqw+dNip7qgIOAYdL/Ax2uqfQa7pD0Ke1eLrj1KUxcHZc3vOocuhbjHOG5CZb70ZmPF53gwgnRvYJ7E1", - "fpIFPa2MCHYNCD1S5As2X+hAX3wjxTXcidFZYoDiA2ssys03XZPRK5EZZqJLdQBRsh6s5nCGbkO+Rqei", - "1IQSLjLAzS9VXMjsCTnEWCcM0dKh3Ir2CabIFAx1pbQ0qy0LggFInfui/jChqT2hCaJG9YRfVHEz9i07", - "nQ1nyyXQbEOmAJyIqYtxcNEXuEiK0VPai2lOxI3wiwZchRQpKAVZ4kzRO0Hz79mrQ2/BEwKOAFezECXI", - "jMorA3u+2gnnOWwSjPVT5O7Pv6p7nwBeLTTNdyAW34mht21P60I9bPptBNeePCQ7a6mzVGvEW8MgctDQ", - "h8K9cNK7f22IOrt4dbSsQGJIybVSvJ/kagRUgXrN9H5VaMuiJ4LdqelGwjMbxikXXrCKDZZTpZNdbNm8", - "1LAlmBUEnDDGiXHgHsHrBVXahkExnqFN014nOI8VwswU/QD3qiFm5F+9BtIdOzX3IFelqtQRVRaFkBqy", - "2BrQI9s71ytYV3OJWTB2pfNoQUoFu0buw1IwvkOW04DxD6or/6vz6HYXhz51c89voqhsAFEjYhsgp/6t", - "ALthFG8PIEzViLaEw1SLcqrQ4fFIaVEUhlvopOTVd31oOrVvH+tf6ne7xGWdHPbezgQodKC49x3kFxaz", - "Nn57QRVxcHgXO5pzbLxWF2ZzGBPFeArJNspHFc+8FR6BnYe0LOaSZpBkkNNNJDjAPib28bYBcMdrdVdo", - "SGwgbnzTa0r2cY9bhhY4nooJjwSfkNQcQaMK1ATivt4xcgY4dow5OTq6Uw2Fc0W3yI+Hy7ZbHRkRb8OV", - "0GbHHT0gyI6jDwG4Bw/V0JdHBX6c1Lpne4r/AuUmqOSI/SfZgOpbQj3+XgvosQW7HKfgvLTYe4sDR9lm", - "LxvbwUf6jmyPYfoNlZqlrEBd52fYHFz1a08QdZyTDDRlOWQkeGDVwCL8ntgQ0vaYl1MFB9neuuB3jG+R", - "5fgwnSbw57BBnfuNzU0ITB2H0GUjo5r7iXKCgPqIZyOCh6/AmqY63xhBTS9gQy5AAlHl1IYwdP0pWhRJ", - "OEDUP7NlRuedjfpGt7qLT3GoYHmxWDOrE2yH76ylGDTQ4XSBQoh8gIWsg4woBINiR0ghzK4zl/7kE2A8", - "JTWAdEwbXfPV9X9HNdCMKyD/JUqSUo4qV6mhkmmEREEBBUgzgxHBqjldcGKNIchhCVaTxCf377cXfv++", - "23OmyAwufM6gebGNjvv30Y7zRijdOFwHsIea43YSuT7QcWUuPqeFtHnK7ognN/KQnXzTGrzydpkzpZQj", - "XLP8KzOA1slcD1l7SCPDor1w3EG+nGZ8UGfduO+nbFnmVB/CawUrmidiBVKyDHZycjcxE/z5iuavq88w", - "HxJSQ6MpJClm8Q0cC87MNzbxz4zDODMH2Ab9DwUITuxXp/ajHSpmHanKlkvIGNWQb0ghIQWb72YkR1Ut", - "dUJsJHy6oHyOCoMU5dwFt9pxkOGXyppmZMk7Q0SFKr3mCRq5YxeAC1PzKY9GnAJqVLq2hdwqMBe0ms9l", - "uQ65mYM9aHsMok6y8ahX4zVIXdUar0VOM29zwGXQkPcC/NQTD3SlIOqM7NPFV7gt5jCZzb0ek309dAzK", - "7sRBxG/9sC/o16jb+eYAQo8diEgoJCi8okIzlbJPxSzM0fahghulYdm15NtPf+85fm979UXBc8YhWQoO", - "m2hZEsbhJT6MHie8Jns+RoGl79u2DtKAvwVWc54h1HhV/OJut09o22OlfhDyUC5RO+Bg8X6AB3Knu91N", - "eVk/Kc3ziGvRZXC2GYAaV8G6TBKqlEgZymwnmRq7qGDrjXTpnk30v6nyUg5w9trjtnxoYXEAtBFDXhBK", - "0pyhBVlwpWWZ6necoo0qWGokiMsr4/1Wy6f+lbiZNGLFdEO94xQD+CrLVTRgYwYRM80PAN54qcr5HJRu", - "6TozgHfcvcU4KTnTONfSHJfEnpcCJEZSTeybS7ohM0MTWpA/QQoyLXVT+scEZaVZnjuHnpmGiNk7TjXJ", - "gSpNXjJ+tsbhvNPfH1kO+kLI8woL8dt9DhwUU0k82OxH+xTj+t3yFy7GH8Pd7WMfdFpXTBiZZTaKpPx/", - "d//3k9+Ok/+myZ8Pkm//19H7D48/3rvf+fHRx++++/+bP3318bt7//t/xnbKwx5Ln3WQnzxzmvHJM1R/", - "glD9Nuw3Zv9fMp5EiSyM5mjRFrmLpSIcAd1rGsf0At5xveaGkFY0Z5nhLZchh/YN0zmL9nS0qKaxES1j", - "mF/rnkrFFbgMiTCZFmu8tBTVjc+MJ6qjU9LlnuN5mZXcbqWXvm0epo8vE7NxVYzA1il7QjBTfUF9kKf7", - "89HX34zGdYZ59Xw0Hrmn7yOUzLJ1rI5ABuuYrhgmSdxRpKAbBTrOPRD2aCidje0Ih13CcgpSLVhx85xC", - "aTaNczifsuRsTmt+wm2Avzk/6OLcOM+JmN083FoCZFDoRax+UUNQw7fq3QRohZ0UUqyAjwmbwKRt88mM", - "vuiC+nKgMx+YKoUYog1V58ASmqeKAOvhQgYZVmL000pvcJe/Org65AaOwdWeMxbRe+fH52fkyDFMdceW", - "tLBDB0UIIqq0S55sBCQZbhbmlL3j7/gzmKH1QfAn73hGNT2aUsVSdVQqkN/TnPIUJnNBnvh8zGdU03e8", - "I2n1FlYMkqZJUU5zlpLzUCGpydMWy+qO8O7dbzSfi3fv3ndiM7rqg5sqyl/sBIkRhEWpE1fqJ5FwQWXM", - "96WqUi84sq3ltW1WK2SL0hpIfSkhN36c59GiUO2SD93lF0Vulh+QoXIFDcyWEaVFlY9mBBSX0mv295Vw", - "F4OkF96uUipQ5I8lLX5jXL8nybvywYOvMLOvroHwh7vyDU1uChhsXektSdE2quDCrVqJsepJQecxF9u7", - "d79poAXuPsrLS7Rx5DnBzxpZhz7BAIeqF1ClOPdugIVj7+RgXNyp/cqXdYwvAR/hFjYTsK+0X0H+/KW3", - "a0cOPi31IjFnO7oqZUjc70xV7W1uhCwfjaHYHLVVVxhvCiRdQHruKpbBstCbceNzH/DjBE3POpiytexs", - "hiFWU0IHxRRIWWTUieKUb9plbZTNqMBB38I5bM5EXYxpnzo2zbIqqu+gIqUG0qUh1vDYujHam++iynyi", - "qatOgsmbniyeVHThv+k/yFbkPcAhjhFFo+xHHyKojCDCEn8PCi6xUDPelUg/tjzGU+CarSCBnM3ZNFaG", - "959df5iH1VClqzzoopCrARVhM2JU+am9WJ16Lymfg7mezZUqFM1tVdVo0AbqQwugUk+B6q12fh4WpPDQ", - "oUp5gZnXaOEbmyXA2uw302ix43BhtAo0FNl3XPTypD/+zAIO2SXh8Z/XmsKkV9d1qItUHPS3coXdSq11", - "oXkhnSFc9vkSsGSpuDD7YqAQrtqmLeoS3C+lonPo0V1C793AehgNjx8OsksiicogYtYWNTqSQBRk+3Ji", - "1hw9w2CemEOMamYrINPPZB3EzmeERbQdwqY5CrBV5KrdeyobXlRbFbgPtDhrAclrUdCD0cRIeBwXVPnj", - "iPVSPZcdJJ1dY9mXbaXpToJYwqAoalV4zt+GbQ7a0ftdgTpflc6XoguV/gFl5YzuhekLse0QHEXTDHKY", - "24Xblz2h1AWT6g0ycLyezZC3JLGwxMBAHQgAbg4wmst9QqxvhAweIUbGAdgY+IADk1ciPJt8vg+Q3BV8", - "on5svCKCvyGe2GcD9Y0wKgpzubIef2PqOYArRVFLFq2IahyGMD4mhs2taG7YnNPF60E6FdJQoWjVQ3Oh", - "N/f6FI0tril75e+1JiskXGY1oTTrgY6L2lsgnop1YjOUo7rIdD019B7NXcB86djBtLXo7igyFWsM58Kr", - "xcbK74ClHw4PRmB7WTOF9Irf9clZFpht026Xc2NUqJBknKG1Ipc+QW/I1D2yZR+53A3Ky10KgJYZqu7V", - "4MwSO80HTfGke5nXt9q4Lpvq08Jix7/vCEV3qQd/XftYsyDcT3Xhv/7iYv5E3UglvK5l6SoVCu3Hha06", - "uE+BwjY5NIDYgtU3bTkwitZmrFcTrwHWYqzEMN+uU7KLNgU5oBKcNETT5DwWKWB0ecB7/NR/FhjrcPco", - "39wLAgglzJnSUDuNfFzQpzDHUyyfLMSsf3W6kDOzvrdCVJe/dZvjh41l3vgKMAJ/xqTSCXrcokswL/2g", - "0Ij0g3k1LoE2QxRtswGWxTkuTnsOmyRjeRmnVzfvz8/MtK+qi0aVU7zFGLcBWlNsjhENXN4ytY1t37rg", - "F3bBL+jB1jvsNJhXzcTSkEtzji/kXLQY2DZ2ECHAGHF0d60XpVsYZJBw3uWOgTQaxLRMtnkbOocp82Pv", - "jFLzae99N78dKbqWoAxgPENQzOeQ+fJm3h/GgyJyueDzoItTUWyrmTchtnQdVp7bUrTOheFDXxB+IO4n", - "jGewjkMfagUIeZ1ZhwX3cJI5cFuuJG4WiqImDPHHNwJb3Q37QtsJANEg6LOWM7uOTra7VG0nbkAONHM6", - "iQK/vu3HsrshDnXjvvDpRuXT7UcIB0SaYjpobNItQ9DDgGlRsGzdcjzZUXuNYHQv63KPtIWsxQ22AwPN", - "IOgowTVKabtQa2dgP0Kd98hoZTb22gUWG/qmqUvAz0qJHoxGZHO3bnulqw1c+8+/nmoh6RycFyqxIF1p", - "CFzOPmgIqqIropkNJ8nYbAah90VdxnPQAK5jY88GkG6EyOIumpJx/c3jGBntoJ4axt0oi1NMhBb6fPJn", - "XS+Xl+kDU1J1JQRbcwlXVTRd/2fYJL/SvDRKBpOqDs91bqfm5bvHrq+WP8MGR94Z9WoA27EraHl6C0iD", - "MUt/9UgFBazvqEaJf1QvG1u4x04dx3fpQFvjmjL0E399yzSaFjSXcpWDUQdJGFiG7MZpPDbBnB5oIr5N", - "yrs2gWW7ZZBA3g+nYsq3sOxeRVUtil20ewY098SLyxl9HI+uFgkQu83ciDtw/aa6QKN4xkhT6xluBPbs", - "iXJaFFKsaJ64eIm+y1+Klbv88XUfXnHDmkycss+eH79448D/OB6lOVCZVJaA3lXhe8UXsyrbxmH7VWKr", - "fTtDp7UUBZtfVWQOYywusLJ3y9jUaYpSx88ER9HFXMziAe87eZ8L9bFL3BLyA0UV8VP7PG3ATzPIh64o", - "y72z0UPbE5yOixvWWSfKFcIBrhwsFMR8JQdlN53THT8dNXXt4Ek412ssTRnXOLgrXImsyAX/0INLTz8I", - "2WD+LjMxGjx0fWKVEbItHntitX3/yrYwNSFW8Ppj/oc5jffvh0ft/v0x+SN3DwIA8fep+x31i/v3o97D", - "qBnLMAm0UnG6hHtVlkXvRtysAs7hYtgFfbxaVpKl6CfDikJtFJBH94XD3oVkDp+Z+yWDHMxPkyFKerjp", - "Ft0hMENO0GlfJmIVZLq0LTMVEbwdU41JsIa0kNm7lgzWGds9QrxcogMzUTlL46EdfKoMe+U2mNK8TPDl", - "HmutGbFkPbG5vGTBWOa1ITVTW0AGc0SRqaJlW2vcTYU73iVn/y6BsMxoNTMGEu+11lXnlQMctSOQxu1i", - "bmDrp6qHv4odZIu/yduCthlBtvrvnlU+Jb/QWNOfPSPAwxk7jHtL9LajD0fNNptt0QzBHKbHDGmd7hmd", - "c9b1zBFthc5UMpPiT4g7QtB/FCmE4R2fDM28fwKPRe61WUrlVK47utez79ru4bpx38ZfWRf2i666jl3m", - "Mo2f6v028jJKr4qXa3ZI7lPCwgiDZmpAD2vB4xUEw2IbFB99RLk9T7YKRCPDLH4qw1zOIzt+fSodzJ38", - "15xeTGmsR4zRhQxMwfY24qS0IP5jvwGqqnFgZydBBHf1LrOV5AqQtQ+iW5X2knqNnXawRlMrMEhRoeoy", - "tmEKuRKRYUp+QbntIm6+s/zKfa3AuuDNVxdCYh1IFQ/pyiBly6g59t2737K0G76TsTmzDbJLBUEHZjcQ", - "scUmkYpcF+uqcodDzcmMPBgHbeDdbmRsxRSb5oBvPLRvTKnC67Jyh1efmOUB1wuFrz8a8Pqi5JmETC+U", - "RawSpNI9UcirAhOnoC8AOHmA7z38ltzFkEzFVnDPYNEJQaMnD7/FgBr7x4PYLesanG9j2RnybB+sHadj", - "jEm1Yxgm6UaNR1/PJMCf0H87bDlN9tMhZwnfdBfK7rO0pJzOIZ6fsdwBk/0WdxPd+S28cOsNAKWl2BCm", - "4/ODpoY/9eR8G/ZnwSCpWC6ZXrrAPSWWhp7q9sp2Uj+c7fXv+kV5uPxDjH8tfPhfy9Z1w2oMXfbkbGGU", - "8iv00YZoHRNqi3/mrI5M9/06yYmvLYwNtKq+WRY3Zi6zdJQlMVB9RgrJuEb7R6lnyT+MWixpatjfpA/c", - "ZPrN40gjqmavFr4f4DeOdwkK5CqOetlD9l5mcd+Su1zwZGk4SnavrrEQnMreQN14SGZfXOj2oYdKvmaU", - "pJfcyga50YBTX4nw+JYBr0iK1Xr2ose9V3bjlFnKOHnQ0uzQL29fOCljKWSsYUB93J3EIUFLBivMmItv", - "khnzinsh80G7cBXoP238kxc5A7HMn+WoIhB4NLclyxsp/teXdeVzdKzaTMSWDVDIiLXT2e1uONpwP6tb", - "239rA8bwWQ/mBqMNR+lipSf63obXV998inihNkh2zxsGx4d/EGl0cJTj799HoO/fHzsx+I9HzceWvd+/", - "Hy9AHDW5mV9rLFxFI8ZvY3v4vYgYwHzXwiqgyNVHiBgg+y4p88AwwakbakyaHeJuXoo4TH5XPNo0fgre", - "vfsNn3g84B9tRHxiZokbWGcp9B/2ZofMKMlk1fMgzp2S78V6KOG07iBPPJ8BinpQMtA8hyvpdACNuut3", - "xosENGpGnUIujJIZNgUK7flfDp7N4sdbsF2yPPu1ru3Wukgk5ekiGiU8NR/+bmX0xhVsWWW0z8iCcg55", - "dDir2/7udeCIlv4vMXSeJeMD3213oLXLbS2uBrwJpgfKT2jQy3RuJgix2iybVZVlyOciIzhP3dSiZo7d", - "Vs6xFpqR/GYcdllqF7eKueCu4NCM5RiGGfcb45uJpLqngBb2O/f9hcw42H5cWTODHR0koWyJF7OiyyIH", - "PJkrkHSOnwoOrc+xhBqOHHSsIKowj/BNLFghiC4lJ2I2C5YBXDMJ+WZMCqqUHeSBWRasce7Rk4cPHkTN", - "XoidASu1WPTLfF0v5eERvmKfuCZLthXAXsDuhvVjTVH7bGyXcFxPyX+XoHSMp+IDm7mKXlJza9t+klXv", - "0wn5ESsfGSJulLpHc6UvItwsqFkWuaDZGIsbnz0/fkHsrPYb20Le9rOco7WuSf5R98rwAqO+slNP5Zzh", - "42wv5WFWrXRStZ+M1SY0b9QNMlkr5gbteCF2JuSZNaFWDfztJARLZMslZEG3S6vEI3GY/2hN0wXaJhsS", - "UD+vHN6I1bOz2nMTZB9W3Y+QYRu4XS9W24p1TIRegLxgCjAjH1bQLIdY1QZ1tnFfHrG5PFlybillsocw", - "WvU62hftHjgryfqggihkLcTvaZmy/Zj37Ut7il/FczFaTW5bXn9fXM+X2CYvnXMhpVxwlmIrhJgkjaXb", - "hrkpB3SNiPsX1cid0MjhirbWrXKBHRZ7m+16RugQ13X5B0/NplrqsH9qWLuWa3PQynE2yMa+07VziDGu", - "wHWzMkQU8kkhI0FN0USIKoBiTzLCqkw9Fs4fzLNXzv6NRTHOGUdLl0Ob08+syypXDD3TnDBN5gKUW08z", - "m0f9Zr6ZYJXGDNbvJy/EnKWnbI5j2DA6s2wbM9od6thHkLqITfPuU/Ouq51f/dwIB7OTHheFm7S/D3pU", - "kNRr3ovgWNySDyQJkFuNH462hdy2hn7jfWoIDVYYtQYF3sMdwqh6aTdHeW50S0tR+AaxGZXRArqMR8B4", - "wbh3ocYviDR6JeDG4Hnt+U6lkmqrOwziaWdA854ECMxQtj74qw7V7hxgUIJr9HP0b2PdBryHcVQv1BI/", - "5RviD4Wh7kCYeErzKnQ60tQbpSonRGWYXNRq8x1jHIZxJz5lsoGunel71efYjWPfm6ivRuG0zOagE5pl", - "sdJW3+NTgk99khisIS2rJlRVdmCzRnmX2txEqeCqXG6Zy79wxemCvvkRagh79/sdxko70w3+G+vA1L8z", - "Lmh676xcHyGd7VeYv5tlHJN6DU0nis2T4ZjAO+Xq6Kinvhyh198flNJ9uu5nkY3b4nLhHsX423NzcYSF", - "ezvx6fZqqerqYiy4wOe+4FFVEbLJlfAq6/QZw6gH3LzIlrWA9y9GAV/RvCcTPvSV2PvV+g/68uHT3vIN", - "VLvyXJqSrSyot+SRjRVueV+6LsS++GAbHnw4r4Vb61aE9vvufm546myMWM0sej10l3Oi1Ru8rxft51Vf", - "iQTfpwOfh/1AXBTP2JWBhxUTpY++8jHQXiW0v7oSPI2+Hz3rj2YWfGqvRa+P5cz1r7XLdDr5z79aLywB", - "ruXmM/C4dDa93VQmIu1a81T9CqlaHw5qhdi4FYf0sIm1S3GyobeVWdbSoKVO+5kOWT0bIg508PFxPDrJ", - "9rowYy13RnaU2LF7weYLjRX7fwKagXyzoyNB3YUAj1ghFKs7kOZmMFcCdoHDTYYmGxgCZmFHhe5YPgh1", - "BanGtrN1cJ0E2Ke/gpnMO31uOxP0q9NVToZrSLCtC0G31+yOO75TOCko/mX7dE6G19w/rkKobQbYBVV1", - "uZZWzvTgzM3ZDFKsiry1UNU/F8CDIkhjb5dBWGZB3SpW5TFhXe/9rY41QNvqSG2FJ+ivc2Vw+vLYz2Fz", - "R5EGNUQbh1ZJfJcpHIwYsC4wX0O6z5DsosaYqigDseBDgl0p5ro5Rm/N56Ds2iXn8iRpLo66FNuWKeNN", - "zwfNZT7dq+wjpuT01bLq9kzu1z+eYYtq5QLkaFV4ONTSyUm3cc6FK1yMZcUq34kvYQzK/+ZrCNpZcnbu", - "+gcgVqyn6oLKzL9xkKJQ9m5icaBn1cysTuDoBjlEWjFgLlSaCyNGJH0JZc2ciSrg8I6ykaF1AR+EawZS", - "Qla5RHKhINHCJ3xsg2MbKmz466WQoHrbH1ngektfv61re2MbOIqlrqmLeg0XSCQsqYFOBhW4++fchuyn", - "9rlPwvdtwHZamCp63d2P1qfuMNVBYkj1M+Juy93J/ZcxNjHOQSbe89Qux82bFdmw7mZWpvaCDg9GZZAb", - "XDtnCyuJ2mnS7ipbOkKQJH8OmyOrBPlGvn4HQ6Ct5GRBDwqOtjb5oOY3FYN7fhDwPm0duUKIPOlxdpx0", - "a4i3Kf6cpeeANQCrEPeeHu3kLtrYK2/2xWLja2YXBXDI7k0IOeY2qcg7tpvtBVuT8zt62/xrnDUrbVl/", - "Z1SbvOPx7AwsuC+vyM38MNt5mALD6q44lR1kR4XqNe8LubnA4vzNLp6ToVp519Xc7iJfE5WFIiaTnFqP", - "1VM86DHDEZZACGp1oCOTEufpIioXsVjey5RpMEPFMRVOhgBp4EOqBVRQuMGjCIj2RY+cQlv6zhW9EzMi", - "oXYiX7b6X7eFe0yjb89czdLkdzMhodGM3XxtK31WiS9YRhP/M2VaUrm5TI2+Tgv5jvWkF8s7w7GqSKx6", - "IXU0VheHeS4uEmRWSdXnIqbamvdU8zL2Tdfq78ypnkIQ10WVE9Q2ZEEzkgopIQ2/iOd7WqiWQkKSCwzz", - "inmgZ9rI3UtM8uIkF3MiilRkYPvFxCmob66Sc4piEwRRNVEUWNrBbGH7TUDHA6c0d6r1IyUoas336J2f", - "gs1cr6s62UUn1pfZE7EMylVxchiyL3fh3dL7P86bZ2yNdAMyduRnRMsSxsS90e6R7Q4+lUCWTCkLSkVL", - "FyzPMXGcrQPPaxW4EEdtj9h7gmGVK4axN80iAlYaLsydV1VWCHnAaVj2iOiFFOV8ERSYruD0Kq8snUIc", - "jvKLKjE8CjPIzBSPyVIo7TRNO1K95Drk7G4quJYiz5tGKSuiz52l/SVdH6epfiHE+ZSm5/dQr+VCVyvN", - "xj6/uh0cWM8kW6XFmhdwYtuZ7y7Va9/DUDlHtIMZZIvF7d3YPQDz/W4OutvmftxdWHtdTWYaV2OOOaFa", - "LFkaP1NfVrRdb4xcjEVFa5bZ3oq2ygS+hoc9vKyq4ApkkV00A6fR5nDHxDEC52RGdmP+ixJ4e1wyA8do", - "ei7KLnNxUlSS9sp6LQAQUpv6rEtpGzKGkljFVcTclkpAF3kb0IG3CkYiXQ02M8LBgdJwJaA60Y8VgHet", - "8WFsa8vZSMqpWPvn9+ric5cC/uN2Km8wj74Qr9OatKQN8vKFano4QrzE9dZ4qDNMe58OjYqqmucOvOED", - "APrjpBowDIqW2heMGWU5ZEms9+JJZaMaB5q2S81qt0RnynHylJa+9aEZu5TgCqdYEV82/V8FNaQkqte7", - "lmSewRpsXsefIIXtaTgO/C+Q25aHLWOAKJIcVtAIH3PVXEoUNdkK/Leq+phkAAV6I9s2slhcVHiXtwwn", - "bu1JEFkzBLtRS4pFrN0pssNMEjXqrHlij4kaepQMRCuWlbSBP7WvyNE0A5qjHEFVR0dIvB45dJpf7Ahv", - "/QDH/vuYKOMx8X4YH9qbBcVRt40B7YyTLFXfqefxMMmwVFHlYMHZssoRa0m85huqoBe83yDZJfla3Rq4", - "T0zwALHP15CiVOP0HcicxtPjpHBVT5DaOUBmtQLzScTavgBOuAhaTF5QVakqdQ1F/4OdGF9i3GnTl3Aq", - "19GMV99ZgoMR1Sqm1qtIyIpOL2+e/yQncetB7B0vRiMKXPrfFvuXp26nduAL2Mqbm/00sj82aXS3mOPi", - "YzIt/UB5Li5sz8hQD30G3g9qqc+7gJxYzqpr2Udtjl15z7apgwXx6ku6IULiP0br/HdJczbbIJ+x4PvP", - "iFpQQ0LO8WojAlwUqJl4u3g19oB5a4vwU9l1s6FjBsNtzCgB0OYi9819BFnScwi3AYMdLP9MtWGcqpyi", - "5cJc2a3t7GLBLd6XaFnSLNT0sVBks426Lx1svv5/6ly4cCpf363Iaeo7hLoWRU0+g12APXHpBSy3J0t2", - "+ZongaqzcE200mfXZ5cwme7JumIZCH3tVxpgdzqudjrPXGkZAy2/rR4bW9JMBy3l0LswNOqmA3TYp3EX", - "+GHbypvBf7SGa98yhoD/ueC9p1FtCK/tSXsDWG5U4IjAaq3VU7FOJMzUrgATa6426rysa3d4EyvjqQSq", - "bMTNyWuneNYlShk3irCNCa18mtUoGcwYr5kl40WpI3oMVirlmwBhodEf0drjQuuTEowwuaL56xVIybK+", - "jTOnw7Z0DFtEeEeH+zZiwqju1O4ATNU6HOZn1mb08DVzgdsmVDZcU2nKMyqz8HXGSQrS3Pvkgm7U5T1K", - "lXNgl0+JBtJMs2pA4F1C0raA5BvnFL6iv6cCkB7Q8TPAYYNxwRFnjTXtaNHjn+nC8EU4bJZ0neRijlmE", - "PQfC1aZFD59VAQVHM7iVz4at28+j2J+wfRosy+8YkRY465Aptp/717iVqEb+wpneevKtjbKd1mnjbu3B", - "9Ejl8zr43xJL9zzGMnFd8ZUwG9cLmz5VxdMeBJsIPf6hpl28ZxcxDMKlcYdG8OHtzpqRFrF8X2sZSNBi", - "oLaE94OqQ9lp6sKzuqa0jqnBImXssqX3tLRZ+7y/l3rAs73p3VlvTluFzJhx9ukRtz0/OilEkaRDYj5t", - "547MuQkcpE0Ye+gjcAL0rLsKj1FVL5tG3aNGU5t92+T1NtXZ5e0q0m1Kf5+ZqIejN10QYoa8zHZuR+sW", - "ZvJUxpRxO8esaQarmAShREJaSjQTX9DN7rZjPRWjT386/vrho98fff0NMS+QjM1B1VXHW2276rhAxtt2", - "n5uNBOwsT8c3wVcfsIjz/kefVFVtijtrltuquqRop2nZPvblyAUQOY6RdlGX2iscpw7t/7y2K7bIg+9Y", - "DAXXv2dS5Hm860MlV0UcKLHdClwoRgMpQCqmtGGETQ8o03VEtFqgeRBr/65sNRnBU/D2Y0cFTPeEXMUW", - "0hdQi/wMc7ud14jAusgdr7Kenm3rcnqatdCh0IhRMVMghSicaM9mJAYRZhDJILPWGT7RIh7EyFbM1kbL", - "xgjRRZ7HSS9smL2d2zebueo4pzebGBEv/KG8BGn2+Sf66xZchpPUpv3Phn9ECjEcjGtUy70OXhHVDy7X", - "lH8QaN2k/Ah5IAA92baNPMkgUSwoRCytlwD9Cd6B3BY/XtaO5Z1pIQiJ/2AHeGH6bP1elcngwPnEFX1f", - "VkgJlvK+jxIay9+VketZb3WRBFvkjCZag7JsSXTFwiDdWj2tsph7tJJOsrMUQhOjmeZ5JEna2nHwTIWE", - "Y1QCuaL5zXONH5hU+hjxAdnb/tSoMFM2RLJFpbpcnb4XdNDcQVbs4abmbzAx+59g9ih6z7mhnBO+c5uh", - "cQc71s/9rWBzvckFjmmDrB5+Q6au2UYhIWWq7dy/8MJJlRgKks1cQCus9Y5M1F3r/FXoK5DxzEfikFeB", - "e6vy2TsI6yP6iZlKz8mNUnmM+jpkEcFfjEeFzXl3XBdXbMxwubIvQQG3Pcu+dNsOD12eLW1iLp1SQXed", - "g2/rBm4jF3W9tqE1iwb3d3j37jc9HVJqKN6LwXyOtY4O0pRhr5YM11DlyOLIjeHmjVHMr311b21t157a", - "3K39KFm+M2ClUWn943g0Bw6KKawl/rvrHXOzd6mHwFZe6B5VC+tVysVYxETW2pg8mCqooT6gfLr7LFLz", - "GrMa01IyvcG+wd6Axn6P1mP6sart4WrDVL40d/dpcQ5V7/a6Ekip/O36o6A53kfWxcfNLSTyCXluK3y7", - "g/Ldnel/wFf/eJw9+Orhf0z/8eDrByk8/vrbBw/ot4/pw2+/egiP/vH14wfwcPbNt9NH2aPHj6aPHz3+", - "5utv068eP5w+/ubb/7hj+JAB2QLqS/s/Gf1ncpzPRXL85iQ5M8DWOKEF+xnM3qCuPBPY19IgNcWTCEvK", - "8tET/9P/60/YJBXLenj/68j1ZxottC7Uk6Oji4uLSfjJ0RxT/xMtynRx5OfBboMNeeXNSRWjb+NwcEdr", - "6zFuqiOFY3z29vnpGTl+czKpCWb0ZPRg8mDy0LW25rRgoyejr/AnPD0L3PcjrK95pFzp/KOisMXzP45H", - "R44O3V8LoDkW0TF/LEFLlvpHEmi2cf9XF3Q+BznBDA370+rRkZc4jj646ggfzeRR16CtpR4U0PbBhkU5", - "zVnq65AxZW3ENohehQ1krfG8VGMytS2GfZwuzzAKyRYcUGGb7ZPM4NJ+flLzM98lGV3Hoye/RSpW+eQO", - "37w3jCsLIs7+z+nrV0RI4jSfNzQ9rxJbfCZTnb0VJjKZLyeetP9dgtzUpOeY4nhUd/kHXi4Nf3EZMks1", - "L5rFW2uBK2YQ6uDaz2woJqD5qpZJzdPQ+hdAUnNow3UfJN++//D1Pz6OBgCChXUUYC/HP2ie/2EtaLDG", - "4NlWcM24L+xpXNfGwA/qnRyjsap6Gnxev9Osef4HFxz+6NsGB1h0H2iemxcFh9gevMdug0gseBwfPXjg", - "eZCT8APojtyZCmYZVObfOhCqUTxJXGKgLq+yj95W5S8lLexZPPYhwpuicuHYlyaGJT0+4EKbRTqvvNz2", - "cJ1Ff08zIl2KMi7l4Re7lBNuwz3NnWPvxo/j0ddf8N6ccMNzaE7wzaCVb/ei+YWfc3HB/ZtGLiqXSyo3", - "KPXoihe2e8/QuUK/KbJIe7aDCmt8Pnr/sffWOwrjGo8+NMojZVe6E60jpdG5acc1eUf1cU4cyyaeuR/u", - "HhcFhnWeVs+Pi8J2BsdQAWB4+8GaKa3uTciP4dcN/4eFxLo/GnH/vlG2b7/dcIcHLTejl3aj8MDt/f1p", - "7+/jph2EZcA1mzGUyWPANE7BVpg6AUlXvUC7eUBBGaR9Y56rEthOtEhce7WBY7hG+4frHTig+omd6X1M", - "S9zJqG9x14O7PjEpgLeSmOrGhTfDmn013eomaVwZ18i4v3Ch7yXNDZ0Ey211rTl5disM/q2Ewarq5txK", - "Z0VxAPHQJ2fseuXog6skeQipEdXjQfJiqHkH3wbx9XdbHOfehBy337kcW3GVOHdKgua9Wxnwc5ABbSnT", - "XdKfo+NPKveFqV37ZFo1BBbz+6CPv3BB72+MrF7JzkC6W6a7BPvsyGuOWV8bW/1LymkOabcS2t9aQqvq", - "Y19JRgvDW49cpYFAYruSga9twGO6ksSaNdIDzoYlRTDn3h7hcR3Kb1iMjVF20clq7JVHdMZavdJu1rij", - "WnZFrB8h1GG/35w82yVdfUGmoMGtjiO3QHxvrpuXRj0Tb2/GMzGMNz1+8PjmIAh34ZXQ5Ae8xa+ZQ14r", - "S4uT1b4sbBtHOpqK9S6uxFtsqSpCZw5tg0dVtUbHwXPzto3xuItZvc3mWPcm5Hv3al3pw2Wtz4VhVD4b", - "jMq5/cjwOoMMcsf/+QTHvzMhP2COo1ZjDFXD5Al8kXH95OGjrx67VyS9sJFg7fem3zx+cvzdd+61QjKu", - "MWTA6jmd15WWTxaQ58J94O6I7rjmwZP//K//nkwmd3ayVbH+fvPKdtP9XHjrOFbVsCKAvt36wjcppq27", - "Lsc7UXcjHv7vxTp6C4j17S30yW4hg/2/xO0zbZKRU0QrY2ej384BbyN7TPa5j8bu/sFEjeoymZBXwrU+", - "K3MqbQ0YLJOryLykknINkE08pWKWnbLF6tKcYXkASRTIFchEsaocdSmhKlRSSFhhhH1dyLUBwW5Gj3G4", - "ny2Tf0nXQWr8tLqmtXBLRrPnkq4J9vLQRIEe2yppa/Ldd+TBuNZe8twMkFSIiTHXJV2PbtDqVxHb0NI/", - "zxx2hNwd3otjD7Eg1dJPVUOyVjX+7pz7i5XcLbm7jT0Q59zb8VM7dkI7gmswttWCYAU7jRWPVVkU+aau", - "dWukPC9CxVmcmWGoceAz9hHsNE1HldA2em8P8a0R4EqspE1Qe7INzFlVRx9QLw95RufcYs7d38tdGviO", - "pFh655EgM9DpwqX7tlAfYU/SpRz286Yl42xpoHwwvnapBnexW+M47O+cUZtkP6SFWJCJiQ48kBEifo3/", - "oTnWzWMzW77dN/XwFQvRNeUqYFdNVa3ybdssu5B/nxVc0EaT2N1QPq0n7wpkiJZD+D9vEbwfgjvM8bmr", - "aGCPl1vEXyEpwKuSCXkl6qRzq0H9JV2P13mzX/eCXgkO1sduJF9Li7fu1ErsMIzDIsVXG7H6S91C67Ii", - "yJGv0rNVDvnJvLRDFhlye2PFny/xCv8pWsuoccuYtU12llKoRxvCnM2LtudBWOxk8im1mE/CTz9D1eZT", - "cKybYTF4SD2fcWIBPyzTwQI+lpiPCl9tqY8DvTAvB3KZrWk0mBtpUYWhQaRyEJlCLvhcfZ6saBt1xPES", - "oRJbp8q2Tumsf/I3PLtPXV8T37DfVYtSjKdAlFgCqgxGRndFpy2E/7g5CDVb+u7cPExv/cTc5esHX93c", - "9KcgVywFcgbLQkgqWb4hv/Cqf8lVuJ0i1O15aA2OMAfG0dvUrCqWhiWQrsAEXXf8uNXY2a3ruojKylWi", - "1CBtRbxWmyrWYdIxezAyjBdm6gPIc7mYf2ninMf60ELOT2meI7p2OZlw4EFRynlu9xOWTOu6LUR4u5Ln", - "NF1UezuurXtV8z5fP3zcqjiJI7tObjbVX4HZZw0kWE1grQBp25Br7M20pBiwvCxzzYq8+U3V3RK7/UTC", - "kCxtho0CTp751VnnrJjVQ7fp11cbd4NPzNzuEc7MhV0clYC8u7KttBpITRpA275XPvw66Fbkei65YoZM", - "tqpL1rEzRQFU1h9byr9bSEjcEJKuQCqKh7W1qHu3ovrnIaqvXTnjz0RQj/oor8rrL38VNaKoP+g1yz7u", - "lsuDisB7iuSMByJ5yC7sWbu8LL47iqLdPfzkWZioIqqaWV5A6AHFoGjPXK3/NRroAsEiLWLm9LCSW0B9", - "GUsnsbosEjEbV3GaRiEVsyfkHb9P1IL6Ksvuz0dff9PjxDHzuOpzXTdOPZB5bIcZ4sv5oj1Th5U4Kvw+", - "uend3m8TxyOWrSOl5HkG66B7SbO7sbsP7yhS0I3P6OhUUyziFZUrxTQcdgnmmlILVtx81V6l2TRettxb", - "4qou+Sf8+8oga0vLGqmh+BTVWscjLQEyKPRiZxFnfKveTXDlnJlyjXdsqd0xYROY2Eq0dYO0bA7uYqIk", - "BzqrOp0JMSSPL+AzhtA8VQRYDxcyRJKO0g/KvEiUN28nrfPd7EXnkdcWij+pEKY/lRCWtKSwJlo+nUyG", - "LRvGQeRVIYUWqchtGGVZFELq6nSrySDLA/QJeg3DQx/hXkmYW7NM7XTpnOFbB7ABNClbfTEunTOPpphP", - "J7aoS5aWrecawtLOREE63fkNCJ+Ur90qlTF+1nL/fOneH91Legd2BqVUp4uyOPqA/8HSuh/rnF1sOqKO", - "9JofYZvJow9bo2uRpeZGNpG2X0nDpNtpWhmNkX2Bn9e9UX4Qst0QfGf0bAtp4/alb1tmYhhuhD1ejzb5", - "t1bCtrrOWht+9WiQyIid81qVpAga7VW0G3Tc8VUmbJvNCAnfRi99Xguq/YkzxjNCg21s2Zqq1vxeB/jH", - "F7voT+GivPmQra+/4HP2SmhysixyWALXkF0t8J20OZy/PbZet/sJBu7q70bHd+/88Mb3OT2VLLLzgt9D", - "7wmqGIGfjkosK2Tu6utRd25v8s/7Jn9aeVtDMry9l7+ce1n6TKTbK/jzv4K/+mJXc40xTAOv5Es4h5vX", - "cK2J73khd4QBZ8NqGQ62+ZVR9W6vUv0gpO8rd3uLf6FOUbuTgwOxhlhodlli3ZSHyDr7rKAfZmfI84il", - "oe+gjqtYL4b1GkXKsDvPSabGLqjMGifcKb4VfD5rwSfY61u559b08IWZHnqkHKf15/kQQWNfAWi1FBl4", - "x6qYzVx95D7pp9n00ZCn0nRZEPvlpDcO+4wt4dS8+dpOcdArtga7JRa1wDPIUpAKnqkBURxu1MveQ+ho", - "6gfgxj2b1Q54WFzlpMmlSfZtUH6xQwmkjXyFzTp9nWiHjAxWxBDg5ABke/TB/ovmtEKoyGpOPQF3Nuau", - "2xZb+NqO2wCQvEEh1FbQ9l+JGXlg61+XHJPc667clGdEy40RVH25Pwk0J2kjubWCo3tyTntPzk5VoLO6", - "njXFdQFRn9BDRjC0Cgv8fOMH4CnljuS7CNKCUMJhTjVbgXf5T26LUV36NnOloLYwwDGhWWZPY70JsAK5", - "IaqcKiPr8GaO0h3VPC97MAxYFyCZuaJpXjvgrZpwZCtNbYsjOrVvXPHSavEiW99KNqMW/c3qql+JGXnJ", - "UimO87moYuHVRmlYdnpeu09/7+lX4A0J3ZhVwXPGIVkKHuvE/BqfvsSHsa+xWlffx2fmYd+3rfu2CX8L", - "rOY8Q+7kq+L3Mzn9Vwp0aa1WQiGk0W6nG5t/gfS/51Hyh2bD0+5J2vA0cGq5h8FAYXPmxs9HPh2h0ao5", - "+uaHxp+uIp17Uy1KnYmLYBa0AdhwxiHFqFD43jPJo7a5NbMnmbpeq9t1epsCPMTOVvU00oK3ftjfhfdv", - "moTtnDMhkbicxhVI1VLkbjOx/1KZ2IP3fS9ubFvO7+JopTqs7PJKZGDHrdNxzdGPNUHhIgPXGb8rslRh", - "kfGUIX9/1e+1kjhSWs4XmpQF0SKWLlJ/mNDUMtnEKkLxCYOyw1ZdwukWdAWE5hJoZpRX4ERMzaLrmxQX", - "SRUWfvY5Jy74Myo0BXAVUqSgFGSJb/qyCzT/ng1V11vwhIAjwNUsRAkyo/LKwJ6vdsJ5DpsElWFF7v78", - "q1GtbxxeKzRuR6wtNxtBbzvtugv1sOm3EVx78pDsbEK3pVpMkRPLIgeXJBdB4V446d2/NkSdXbw6WjCL", - "jF0zxftJrkZAFajXTO9XhbYsEnN/d0F8ap+esSVKYpxy4S2QscFyqnSyiy2bl8K1KLOCgBPGODEO3KOa", - "vqBKv3X50hmWebTXCc5jZWwzRT/A5ha1ukVk5F/tw9jYqbkPuSoVcSP4HCjIYmvgsN4y1ytYV3Nh7RQ/", - "dpVkZW2Bu0buw1IwvkNW0PmGUB34/c1wkcWhpZI6U0YXlQ0gakRsA+TUvxVgN3T49wDCVI1oSzhYyT+k", - "nKkQOVBuc1VFURhuoZOSV9/1oenUvn2sf6nf7RKXrYVh7+1MgAoT4BzkFxazCk25C6qIg4Ms6bnLkZu7", - "TqZdmM1hTLDMUrKN8tG4a94Kj8DOQ1oWc0kzSDLIacTo8ot9TOzjbQPgjnvyTFZCQzLFGinxTa8pWfYa", - "k6qhBY6nYsIjwSckNUfQKM81gbivd4ycAY4dY06Oju5UQ+Fc0S3y4+Gy7Vb3GLDMGGbHHT0gyI6jDwG4", - "Bw/V0JdHBX6c1OaD9hT/BcpNUMkR+0+yAdW3hHr8vRbQNvyFF1jjpmix9xYHjrLNXja2g4/0HdmYqfGL", - "dAu0o5yuMcmuaWoNFMDJZZTbowvKdDIT0grSCZ1pkDtD5/9JmXec+/Rd4aquEBzB3ZtuHGTyYT85x0Us", - "CMRdF4ZEXCUpc4dR8pAsGS+1fSJKPbblryXQdGGE9tAGa0fCjsCuSJOEOZVZjt1iZ9W9KaQt+qRbFzwC", - "HclHbGr8Zt0/CDmoqH6zdCRlmpRcszxoLFTp7Z+f9fLWInFrkbi1SNxaJG4tErcWiVuLxK1F4tYicWuR", - "uLVI3Fok/r4WiU9VJinxEoev2MgFT9rBlLexlH+pqvLVVeUNJGiduKBMuzb5vkpBv91iD0OQBpojDlgO", - "/dHdNuj07PnxC6JEKVMgqYGQcVLk1KgGsNZV0+YpVfDNY59qaK9OurSd3/F+NS989Yic/nTsK44uXGXM", - "5rt3j228GlF6k8M91xYNeGYlUd8fDbhBumuPRv2V4Js7u1bXLMfIeEWe49vPYAW5KEDaYoZEyxK6Fp8z", - "oPlTh5sdBp9/msldqO0fZrQ/xg2jl0PbkhZezPdrpYpQm3FJngU5mH/MaK7gj740TDvekhax/srVxWdN", - "QchMvhfZpnVCzK4d4QY2z0Zdd5RxKjeRKlHdFIg2aWhh2JUjrK4t6+PBq+N2ibZLZrsoLCat2zL48dH7", - "qDxaFrbasM5QNlF31qKTUSzHtF0LdVQBOKgwIKZJ2D0hb+13n7YMIELkjljNzD+bKMbmmxXTwHeNEuFY", - "z5eaS+ARHz29ePbHhrCzMgXCtCK+wO7u62U8WidmpDnwxDGgZCqyTdJgX6PGLZQxRZWC5XT3TRTyTzxx", - "1eVjnmy/pz7NNfIsWNw2nhwSzTpxDLiHO280DObNFbZwRMeeA4xfN4vuY6MhCMTxp5hRqcX79mV69TSb", - "W8Z3y/iC09iSCBh3BcnbTGRyjYxPbmTJ+3ne8zWkpQEuPMl30TqPLjlY64aTNYNpOZ8bbaHro8M2Ojge", - "E/wTsUK73KFccD8KsoNX3fKvmqTeHq7LXYK88bu+MuM93A7KN+jMWBaUb7zLFxLFlmVucWibSh+W0dqa", - "4bES07Xtr8+q/cab/ALbrbtqm79btJALqojdX8hIyTOX8dSpbb3mw+uc2KHP1rxm01trmtj1Rlbn5h1y", - "RfhdbqaaK1KATPSa2wPVOEyug4E9uZ+0lvbttXFz14ZNVIceBtutxl8zhAPdHjLga3h9BD2X6sS8Ricm", - "2kwnbDxDi0Z/ikvYnMm+edDAks7wzfiS2tzi/KeQF4SSNGfoXRVcaVmm+h2n6L8JFjbpxp54Q3U/73vq", - "X4m7ECMePjfUO04xyKjy6kR54AwiLowfADyLVeV8Dsrw0ZCAZgDvuHuLcVJyo4WJGVmyVIrEptaa82Vk", - "l4l9c0k3ZIYVTQT5E6QgU3PrB7tubclKszx3wS5mGiJm7zjVJAeqNHnJDAc2w/lyClXIGegLIc8rLMR7", - "9cyBg2IqiRtmfrRPsR2OW743AKIx0z6u21jcbB8cDzvLeiE/eYYxaliNOWcq7L/Yhv3GfONLxpMokZ0t", - "gLhwsTZtkbtYA84R0L2m40gv4B03t58WBDk+1Zcjh7YHqHMW7eloUU1jI1qOIr/WQerfQbgMiTCZW7fL", - "XyiFNKAD79nEjbf19Vt7v6eLpXHlArYG7buQ7VPXPrHnJadANIxkrQI37o2zBshb/RdfflnJw+uSHo0H", - "0ya7A3bZVbNBHuLNb/iY0Fzwua2raLRLgfvEeFFqDAC/TgMerGieiBVIyTJQA1fKBH++ovnr6rOP4xGs", - "IU20pCkk1qIwFGtn5htLp9hokDPNaJ6gVj0UIDixX53aj3bcx0G30eUSMkY15BtSSEghs4XImCK1Pj+x", - "BRpIuqB8jle3FOV8YV+z41yAhKoxo1Gh20PEC8GseWKL0nVhPHaNmsO6vUDTRaRxDF5wRmf3BJU1elIN", - "3INGydE+JX086hW0DVJXdeicRU6TzQyQIhryQICfeuJD1Gi9Jfpbov/SiT5WUhFRN2tZKyy+wm25ZrPW", - "dRcQvUEr2SepLnxbov+vXqLfcyBFKJG0oYPEe8NRRZgmF1gWaQrE3F8lWuddwz2nr2OmXXDUXaVN5drz", - "pQvKuKupU+U1IBxGJV4umda+Pe21GDYtM0OLpkEHpKVkeoNaCy3Y7+dg/v/eiP0K5MorNKXMR09GC62L", - "J0dHuUhpvhBKH40+jsNnqvXwfQX/B6+LFJKtjH71EcEWks0ZN3fuBZ3PQdYmxNGjyYPRx/8bAAD//1zt", - "z0/tvgEA", + "H4sIAAAAAAAC/+z9e3fbNrMojH8VLJ+zVi5HlJM07X6a3+o6PzfpxadpmhW7fc7eTd8WIkcStimADwDa", + "Uvvmu78LgwtBEpQoW3GSp/4rsUgCg8FgMPf56ygXq0pw4FodPfvrqKKSrkCDxL9onoua64wV5q8CVC5Z", + "pZngR8/8M6K0ZHxxNDli5teK6uXR5IjTFTTvmO8nRxL+VTMJxdEzLWuYHKl8CStqBtabyrwdRlpnC5G5", + "IU7sEKcvjt5teUCLQoJSfSh/4uWGMJ6XdQFES8oVzc0jRa6YXhK9ZIq4jwnjRHAgYk70svUymTMoCzX1", + "i/xXDXITrdJNPrykdw2ImRQl9OF8LlYzxsFDBQGosCFEC1LAHF9aUk3MDAZW/6IWRAGV+ZLMhdwBqgUi", + "hhd4vTp69uuRAl6AxN3KgV3if+cS4E/INJUL0Ee/TVKLm2uQmWarxNJOHfYlqLrUiuC7uMYFuwROzFdT", + "8mOtNJkBoZy8+fY5+eyzz740C1lRraFwRDa4qmb2eE3286NnRwXV4B/3aY2WCyEpL7Lw/ptvn+P8Z26B", + "Y9+iSkH6sJyYJ+T0xdAC/IcJEmJcwwL3oUX95ovEoWh+nsFcSBi5J/blg25KPP8H3ZWc6nxZCcZ1Yl8I", + "PiX2cZKHRZ9v42EBgNb7lcGUNIP++ij78re/Hk8eP3r3P349yf7L/fn5Z+9GLv95GHcHBpIv5rWUwPNN", + "tpBA8bQsKe/j442jB7UUdVmQJb3EzacrZPXuW2K+tazzkpa1oROWS3FSLoQi1JFRAXNal5r4iUnNS8Om", + "zGiO2glTpJLikhVQTAz3vVqyfElyquwQ+B65YmVpaLBWUAzRWnp1Ww7TuxglBq5r4QMX9PEio1nXDkzA", + "GrlBlpdCQabFjuvJ3ziUFyS+UJq7Su13WZHzJRCc3Dywly3ijhuaLssN0bivBaGKUOKvpglhc7IRNbnC", + "zSnZBX7vVmOwtiIGabg5rXvUHN4h9PWQkUDeTIgSKEfk+XPXRxmfs0UtQZGrJeilu/MkqEpwBUTM/hty", + "bbb9/5z99IoISX4EpegCXtP8ggDPRQHFlJzOCRc6Ig1HS4hD8+XQOhxcqUv+v5UwNLFSi4rmF+kbvWQr", + "lljVj3TNVvWK8Ho1A2m21F8hWhAJupZ8CCA74g5SXNF1f9JzWfMc97+ZtiXLGWpjqirpBhG2ouuvHk0c", + "OIrQsiQV8ILxBdFrPijHmbl3g5dJUfNihJijzZ5GF6uqIGdzBgUJo2yBxE2zCx7G94OnEb4icPwgg+CE", + "WXaAw2GdoBlzus0TUtEFRCQzJT875oZPtbgAHgidzDb4qJJwyUStwkcDMOLU2yVwLjRklYQ5S9DYmUOH", + "YTD2HceBV04GygXXlHEoDHNGoIUGy6wGYYom3K7v9G/xGVXwxdOhO755OnL356K761t3fNRu40uZPZKJ", + "q9M8dQc2LVm1vh+hH8ZzK7bI7M+9jWSLc3PbzFmJN9F/m/3zaKgVMoEWIvzdpNiCU11LePaWPzR/kYyc", + "acoLKgvzy8r+9GNdanbGFuan0v70UixYfsYWA8gMsCYVLvxsZf8x46XZsV4n9YqXQlzUVbygvKW4zjbk", + "9MXQJtsx9yXMk6DtxorH+dorI/t+oddhIweAHMRdRc2LF7CRYKCl+Rz/Wc+Rnuhc/mn+qarSfK2reQq1", + "ho7dlYzmA2dWOKmqkuXUIPGNe2yeGiYAVpGgzRvHeKE++ysCsZKiAqmZHZRWVVaKnJaZ0lTjSP9Twvzo", + "2dH/OG7sL8f2c3UcTf7SfHWGHxmR1YpBGa2qPcZ4bUQftYVZGAaNj5BNWLaHQhPjdhMNKTHDgku4pFxP", + "G5WlxQ/CAf7VzdTg20o7Ft8dFWwQ4cS+OANlJWD74j1FItQTRCtBtKJAuijFLPxw/6SqGgzi85OqsvhA", + "6REYCmawZkqrB7h82pykeJ7TF1PyXTw2iuKClxtzOVhRw9wNc3druVss2JbcGpoR7ymC2ynk1GyNR4MR", + "8w9BcahWLEVppJ6dtGJe/t69G5OZ+X3Ux58GicW4HSYuVLQc5qyOg79Eys39DuX0CceZe6bkpPvt9cjG", + "jLKFYNRpg8VDEw/+wjSs1E5KiCCKqMltD5WSbo6ckJihsNcnk58VWAqp6IJxhHZi1CdOVvTC7odAvBtC", + "ABX0IktLVoIMJlQnczrUT3t2lk+AWlMb6yVRI6mWTGnUq/FlsoQSBWfKPUHHpHItyhix4VsWEWC+krSy", + "tOyeWLGLcdTn7UsW1htevCPvxCTMEbuPNhqhujZb3sk6k5Ag1+jA8HUp8ovvqVoe4ITP/Fh92sdpyBJo", + "AZIsqVomDk6HtpvRxtC3eRFplsyiqaZhiS/FQh1giaXYh3VV1XNalmbqPsvqrBYHHnWQy5KYlwmsGBrM", + "neJoLexW/yLf0HxpxAKS07KcNKYiUWUlXEJplHbGOcgJ0Uuqm8OPI3u9Bs+RAsPsNJBoNc7MhCY2GWwR", + "EsiK4g20MtpMVba/CRxU0RV0pCC8EUWNVoRI0Th94VcHl8CRJ4WhEfywRrTWxINPzdzuEc7MhV2ctQBq", + "774L+Av8ogW0ebu5T3kzhZCFtVlr8xuTJBfSDmFveDe5+Q9Q2XxsqfN+JSFzQ0h6CVLR0qyus6gHgXwP", + "dTp3nMyCahqdTEeFaQXMcg78DsU7kAkrzU/4H1oS89hIMYaSGuphKIyIyJ1a2IvZoMrOZF5Ae6sgK2vK", + "JBXNL/aC8nkzeZrNjDp531jrqdtCt4iwQ+drVqhDbRMONrRX7RNibVeeHfVkka1MJ5prDALORUUs++iA", + "YDkFjmYRItYHv9a+FusUTF+Lde9KE2s4yE6YcUYz+6/F+oWDTMjdmMexxyDdLJDTFSi83XjMOM0sjV/u", + "ZCbk9aSJzgXDSeNtJNSMGglTkw6S8NW6ytzZTHgs7AudgZoAj+1CQHf4FMZaWDjT9D1gQZlRD4GF9kCH", + "xoJYVayEA5D+MinEzaiCz56Qs+9PPn/85Pcnn39hSLKSYiHpisw2GhS578xyROlNCQ+S2hFKF+nRv3jq", + "fVTtcVPjKFHLHFa06g9lfV9W+7WvEfNeH2ttNOOqA4CjOCKYq82inVi3rgHtBczqxRlobTTd11LMD84N", + "ezOkoMOXXlfSCBaq7Sd00tJxYV45hrWW9LjCN4EXNs7ArIMpowOuZgchqqGNL5pZCuIwWsDOQ7HvNjXT", + "bOKtkhtZH8K8AVIKmbyCKym0yEWZGTmPiYSB4rV7g7g3/HZV3d8ttOSKKmLmRu9lzYsBO4Re8/H3lx36", + "fM0b3Gy9wex6E6tz847ZlzbyGy2kApnpNSdInS3zyFyKFaGkwA9R1vgOtJW/2ArONF1VP83nh7F2Chwo", + "YcdhK1BmJmLfMNKPglxwG8y3w2TjRh2Dni5ivJdJDwPgMHK24Tm6yg5xbIetWSvG0W+vNjyPTFsGxhKK", + "RYssb27CGkKHneqeSoBj0PESH6Ot/gWUmn4r5Hkjvn4nRV0dnD135xy7HOoW47wBhfnWm4EZX5TtANKF", + "gX2aWuMHWdDzYESwa0DokSJfssVSR/riaynew52YnCUFKD6wxqLSfNM3Gb0ShWEmulYHECWbwRoOZ+g2", + "5mt0JmpNKOGiANz8WqWFzIGQQ4x1whAtHcutaJ9giszAUFdOa7PauiIYgNS7L5oPM5rbE5ohatRA+EWI", + "m7Fv2elsOFspgRYbMgPgRMxcjIOLvsBFUoye0l5McyJugl+04KqkyEEpKDJnit4Jmn/PXh16C54QcAQ4", + "zEKUIHMqbwzsxeVOOC9gk2GsnyL3f/hFPfgA8GqhabkDsfhOCr1de1of6nHTbyO47uQx2VlLnaVaI94a", + "BlGChiEU7oWTwf3rQtTbxZuj5RIkhpS8V4r3k9yMgAKo75nebwptXQ1EsDs13Uh4ZsM45cILVqnBSqp0", + "tostm5datgSzgogTpjgxDjwgeL2kStswKMYLtGna6wTnsUKYmWIY4EE1xIz8i9dA+mPn5h7kqlZBHVF1", + "VQmpoUitAT2yg3O9gnWYS8yjsYPOowWpFewaeQhL0fgOWU4Dxj+oDv5X59HtLw596uae3yRR2QKiQcQ2", + "QM78WxF24yjeAUCYahBtCYepDuWE0OHJkdKiqgy30FnNw3dDaDqzb5/on5t3+8RlnRz23i4EKHSguPcd", + "5FcWszZ+e0kVcXB4Fzuac2y8Vh9mcxgzxXgO2TbKRxXPvBUfgZ2HtK4WkhaQFVDSTSI4wD4m9vG2AXDH", + "G3VXaMhsIG560xtK9nGPW4YWOJ5KCY8En5DcHEGjCjQE4r7eMXIBOHaKOTk6uheGwrmSW+THw2XbrU6M", + "iLfhpdBmxx09IMiOo48BeAAPYejrowI/zhrdszvFf4JyEwQ5Yv9JNqCGltCMv9cCBmzBLscpOi8d9t7h", + "wEm2OcjGdvCRoSM7YJh+TaVmOatQ1/kBNgdX/boTJB3npABNWQkFiR5YNbCKvyc2hLQ75vVUwVG2tz74", + "PeNbYjk+TKcN/AVsUOd+bXMTIlPHIXTZxKjmfqKcIKA+4tmI4PErsKa5LjdGUNNL2JArkEBUPbMhDH1/", + "ihZVFg+Q9M9smdF5Z5O+0a3u4jMcKlpeKtbM6gTb4TvvKAYtdDhdoBKiHGEh6yEjCcGo2BFSCbPrzKU/", + "+QQYT0ktIB3TRtd8uP7vqRaacQXkP0VNcspR5ao1BJlGSBQUUIA0MxgRLMzpghMbDEEJK7CaJD55+LC7", + "8IcP3Z4zReZw5XMGzYtddDx8iHac10Lp1uE6gD3UHLfTxPWBjitz8TktpMtTdkc8uZHH7OTrzuDB22XO", + "lFKOcM3yb8wAOidzPWbtMY2Mi/bCcUf5ctrxQb11476fsVVdUn0IrxVc0jITlyAlK2AnJ3cTM8G/uaTl", + "T+EzzIeE3NBoDlmOWXwjx4Jz841N/DPjMM7MAbZB/2MBglP71Zn9aIeK2USqstUKCkY1lBtSScjB5rsZ", + "yVGFpU6JjYTPl5QvUGGQol644FY7DjL8WlnTjKx5b4ikUKXXPEMjd+oCcGFqPuXRiFNAjUrXtZBbBeaK", + "hvlcluuYmznag67HIOkkmxwNarwGqZeNxmuR087bHHEZtOS9CD/NxCNdKYg6I/v08RVvizlMZnPfj8m+", + "GToFZX/iKOK3eTgU9GvU7XJzAKHHDkQkVBIUXlGxmUrZp2Ie52j7UMGN0rDqW/Ltp78PHL83g/qi4CXj", + "kK0Eh02yLAnj8CM+TB4nvCYHPkaBZejbrg7Sgr8DVnueMdR4U/zibndPaNdjpb4V8lAuUTvgaPF+hAdy", + "p7vdTXldPykty4Rr0WVwdhmAmoRgXSYJVUrkDGW200JNXFSw9Ua6dM82+l+HvJQDnL3uuB0fWlwcAG3E", + "UFaEkrxkaEEWXGlZ5/otp2ijipaaCOLyyviw1fK5fyVtJk1YMd1QbznFAL5guUoGbMwhYab5FsAbL1W9", + "WIDSHV1nDvCWu7cYJzVnGudameOS2fNSgcRIqql9c0U3ZG5oQgvyJ0hBZrVuS/+YoKw0K0vn0DPTEDF/", + "y6kmJVClyY+Mn69xOO/090eWg74S8iJgIX27L4CDYipLB5t9Z59iXL9b/tLF+GO4u33sg06biglHZpmt", + "Iin/z/3//ezXk+y/aPbno+zL/3X8219P3z142Pvxybuvvvp/2z999u6rB//7f6Z2ysOeSp91kJ++cJrx", + "6QtUf6JQ/S7st2b/XzGeJYksjubo0Ba5j6UiHAE9aBvH9BLecr3mhpAuackKw1uuQw7dG6Z3Fu3p6FBN", + "ayM6xjC/1j2VihtwGZJgMh3WeG0pqh+fmU5UR6ekyz3H8zKvud1KL33bPEwfXybmk1CMwNYpe0YwU31J", + "fZCn+/PJ518cTZoM8/D8aHLknv6WoGRWrFN1BApYp3TFOEniniIV3SjQae6BsCdD6WxsRzzsClYzkGrJ", + "qtvnFEqzWZrD+ZQlZ3Na81NuA/zN+UEX58Z5TsT89uHWEqCASi9T9Ytaghq+1ewmQCfspJLiEviEsClM", + "uzafwuiLLqivBDr3galSiDHaUDgHltA8VURYjxcyyrCSop9OeoO7/NXB1SE3cAqu7pypiN57331zTo4d", + "w1T3bEkLO3RUhCChSrvkyVZAkuFmcU7ZW/6Wv4A5Wh8Ef/aWF1TT4xlVLFfHtQL5NS0pz2G6EOSZz8d8", + "QTV9y3uS1mBhxShpmlT1rGQ5uYgVkoY8bbGs/ghv3/5Ky4V4+/a3XmxGX31wUyX5i50gM4KwqHXmSv1k", + "Eq6oTPm+VCj1giPbWl7bZrVCtqitgdSXEnLjp3kerSrVLfnQX35VlWb5ERkqV9DAbBlRWoR8NCOguJRe", + "s7+vhLsYJL3ydpVagSJ/rGj1K+P6N5K9rR89+gwz+5oaCH+4K9/Q5KaC0daVwZIUXaMKLtyqlRirnlV0", + "kXKxvX37qwZa4e6jvLxCG0dZEvyslXXoEwxwqGYBIcV5cAMsHHsnB+PizuxXvqxjegn4CLewnYB9o/2K", + "8uevvV07cvBprZeZOdvJVSlD4n5nQrW3hRGyfDSGYgvUVl1hvBmQfAn5hatYBqtKbyatz33AjxM0Petg", + "ytaysxmGWE0JHRQzIHVVUCeKU77plrVRNqMCB30DF7A5F00xpn3q2LTLqqihg4qUGkmXhljjY+vG6G6+", + "iyrziaauOgkmb3qyeBbown8zfJCtyHuAQ5wiilbZjyFEUJlAhCX+ARRcY6FmvBuRfmp5jOfANbuEDEq2", + "YLNUGd5/9v1hHlZDla7yoItCDgMqwubEqPIze7E69V5SvgBzPZsrVSha2qqqyaAN1IeWQKWeAdVb7fw8", + "LkjhoUOV8gozr9HCNzFLgLXZb6bRYsfhymgVaCiy77jo5elw/JkFHIprwuM/bzSF6aCu61CXqDjob+WA", + "3aDWutC8mM4QLvt8BViyVFyZfTFQCFdt0xZ1ie6XWtEFDOgusfduZD2MlscPB9klkSRlEDHviho9SSAJ", + "sn05M2tOnmEwT8whRjWzE5DpZ7IOYuczwiLaDmGzEgXYELlq957KlhfVVgUeAi3NWkDyRhT0YLQxEh/H", + "JVX+OGK9VM9lR0ln77Hsy7bSdKdRLGFUFDUUnvO3YZeD9vR+V6DOV6XzpehipX9EWTmje2H6Qmo7BEfR", + "tIASFnbh9mVPKE3BpGaDDBw/zefIW7JUWGJkoI4EADcHGM3lISHWN0JGj5Ai4whsDHzAgckrEZ9NvtgH", + "SO4KPlE/Nl4R0d+QTuyzgfpGGBWVuVzZgL8x9xzAlaJoJItORDUOQxifEMPmLmlp2JzTxZtBehXSUKHo", + "1ENzoTcPhhSNLa4pe+XvtSYrJFxnNbE064FOi9pbIJ6JdWYzlJO6yGw9M/SezF3AfOnUwbS16O4pMhNr", + "DOfCq8XGyu+AZRgOD0Zke1kzhfSK3w3JWRaYbdNul3NTVKiQZJyhNZDLkKA3ZuoB2XKIXO5H5eWuBUDH", + "DNX0anBmiZ3mg7Z40r/Mm1tt0pRN9WlhqeM/dISSuzSAv759rF0Q7vum8N9wcTF/om6lEl7fsnSTCoX2", + "48pWHdynQGGXHFpAbMHq664cmERrO9arjdcIaylWYphv3ynZR5uCElAJzlqiaXaRihQwujzgPX7mP4uM", + "dbh7lG8eRAGEEhZMaWicRj4u6EOY4ymWTxZiPrw6Xcm5Wd8bIcLlb93m+GFrmbe+AozAnzOpdIYet+QS", + "zEvfKjQifWteTUug7RBF22yAFWmOi9NewCYrWFmn6dXN+8MLM+2rcNGoeoa3GOM2QGuGzTGSgctbprax", + "7VsX/NIu+CU92HrHnQbzqplYGnJpz/GJnIsOA9vGDhIEmCKO/q4NonQLg4wSzvvcMZJGo5iW6TZvQ+8w", + "FX7snVFqPu196Oa3IyXXEpUBTGcIisUCCl/ezPvDeFRErhR8EXVxqqptNfOmxJauw8pzW4rWuTB8GArC", + "j8T9jPEC1mnoY60AIW8y67DgHk6yAG7LlaTNQknUxCH++EZkq7tlX2g3ASAZBH3ecWY30cl2l8J24gaU", + "QAunkyjw69t+LPsb4lA3GQqfblU+3X6EcECkKaajxib9MgQDDJhWFSvWHceTHXXQCEb3si4PSFvIWtxg", + "OzDQDoJOElyrlLYLtXYG9mPUeY+NVmZjr11gsaFvmrsE/KKW6MFoRTb367YHXW3k2n/45UwLSRfgvFCZ", + "BelGQ+By9kFDVBVdEc1sOEnB5nOIvS/qOp6DFnA9G3sxgnQTRJZ20dSM6y+epshoB/U0MO5GWZpiErQw", + "5JM/73u5vEwfmZLClRBtzTVcVcl0/R9gk/1Cy9ooGUyqJjzXuZ3al+8eu365+gE2OPLOqFcD2I5dQcvT", + "G0AaTFn6wyMVFbC+p1ol/lG9bG3hHjt1kt6lA22Na8owTPzNLdNqWtBeyk0ORhMkYWAZsxtn6dgEc3qg", + "jfguKe/aBFbslkEieT+eiinfwrJ/FYVaFLto9xxo6YkXl3P0bnJ0s0iA1G3mRtyB69fhAk3iGSNNrWe4", + "FdizJ8ppVUlxScvMxUsMXf5SXLrLH1/34RW3rMmkKfv8m5OXrx347yZHeQlUZsESMLgqfK/6ZFZl2zhs", + "v0pstW9n6LSWomjzQ0XmOMbiCit7d4xNvaYoTfxMdBRdzMU8HfC+k/e5UB+7xC0hP1CFiJ/G52kDftpB", + "PvSSstI7Gz20A8HpuLhxnXWSXCEe4MbBQlHMV3ZQdtM73enT0VDXDp6Ec/2EpSnTGgd3hSuRFbngH3pw", + "6elbIVvM32UmJoOH3p9YZYRsi8eBWG3fv7IrTE2JFbz+WPxhTuPDh/FRe/hwQv4o3YMIQPx95n5H/eLh", + "w6T3MGnGMkwCrVScruBByLIY3IjbVcA5XI27oE8uV0GyFMNkGCjURgF5dF857F1J5vBZuF8KKMH8NB2j", + "pMebbtEdAzPmBJ0NZSKGINOVbZmpiODdmGpMgjWkhczetWSwztj+EeL1Ch2YmSpZng7t4DNl2Cu3wZTm", + "ZYIvD1hrzYg1G4jN5TWLxjKvjamZ2gEymiOJTJUs29rgbibc8a45+1cNhBVGq5kzkHivda46rxzgqD2B", + "NG0XcwNbP1Uz/E3sIFv8Td4WtM0IstV/9yL4lPxCU01/9owAj2fsMe4t0duOPhw122y2ZTsEc5weM6Z1", + "umd0zlk3MEeyFTpT2VyKPyHtCEH/UaIQhnd8MjTz/gk8FbnXZSnBqdx0dG9m37Xd43XjoY2/sS7sFx26", + "jl3nMk2f6v028jpKr0qXa3ZIHlLC4giDdmrAAGvB4xUFw2IbFB99RLk9T7YKRCvDLH0q41zOYzt+cyod", + "zL3815JezWiqR4zRhQxM0fa24qS0IP5jvwEq1Diws5Mogju8y2wluQpk44PoV6W9pl5jpx2t0TQKDFJU", + "rLpMbJhCqURimJpfUW67iJvvLL9yXyuwLnjz1ZWQWAdSpUO6CsjZKmmOffv21yLvh+8UbMFsg+xaQdSB", + "2Q1EbLFJpCLXxTpU7nCoOZ2TR5OoDbzbjYJdMsVmJeAbj+0bM6rwugzu8PCJWR5wvVT4+pMRry9rXkgo", + "9FJZxCpBgu6JQl4ITJyBvgLg5BG+9/hLch9DMhW7hAcGi04IOnr2+EsMqLF/PErdsq7B+TaWXSDP9sHa", + "aTrGmFQ7hmGSbtR09PVcAvwJw7fDltNkPx1zlvBNd6HsPksryukC0vkZqx0w2W9xN9Gd38ELt94AUFqK", + "DWE6PT9oavjTQM63YX8WDJKL1YrplQvcU2Jl6Klpr2wn9cPZXv+uX5SHyz/E+NfKh/91bF23rMbQ1UDO", + "FkYpv0IfbYzWCaG2+GfJmsh036+TnPrawthAK/TNsrgxc5mloyyJgepzUknGNdo/aj3P/mHUYklzw/6m", + "Q+Bmsy+eJhpRtXu18P0Av3W8S1AgL9OolwNk72UW9y25zwXPVoajFA+aGgvRqRwM1E2HZA7FhW4feqzk", + "a0bJBsmtbpEbjTj1jQiPbxnwhqQY1rMXPe69slunzFqmyYPWZod+fvPSSRkrIVMNA5rj7iQOCVoyuMSM", + "ufQmmTFvuBeyHLULN4H+w8Y/eZEzEsv8WU4qApFHc1uyvJHif/mxqXyOjlWbidixAQqZsHY6u90tRxvu", + "Z3Xr+m9twBg+G8DcaLThKH2sDETf2/D68M2HiBfqgmT3vGVwfPwHkUYHRzn+4UME+uHDiROD/3jSfmzZ", + "+8OH6QLESZOb+bXBwk00Yvw2tYdfi4QBzHctDAFFrj5CwgA5dEmZB4YJztxQE9LuEHf7UsRh8rvS0abp", + "U/D27a/4xOMB/+gi4gMzS9zAJkth+LC3O2QmSaYIz6M4d0q+FuuxhNO5gzzxfAQoGkDJSPMcrqTXATTp", + "rt8ZLxLRqBl1BqUwSmbcFCi25386eDaLn2zBds3K4pemtlvnIpGU58tklPDMfPi7ldFbV7Bllck+I0vK", + "OZTJ4axu+7vXgRNa+n+LsfOsGB/5brcDrV1uZ3EN4G0wPVB+QoNepkszQYzVdtmsUJahXIiC4DxNU4uG", + "OfZbOadaaCbym3HYVa1d3CrmgruCQ3NWYhhm2m+Mb2aS6oECWtjv3PcXMuNg+3FlzQx2dJCEshVezIqu", + "qhLwZF6CpAv8VHDofI4l1HDkqGMFUZV5hG9iwQpBdC05EfN5tAzgmkkoNxNSUaXsII/MsmCNcx89e/zo", + "UdLshdgZsVKLRb/Mn5qlPD7GV+wT12TJtgLYC9jdsL5rKGqfje0Tjusp+a8alE7xVHxgM1fRS2pubdtP", + "MvQ+nZLvsPKRIeJWqXs0V/oiwu2CmnVVClpMsLjx+TcnL4md1X5jW8jbfpYLtNa1yT/pXhlfYNRXdhqo", + "nDN+nO2lPMyqlc5C+8lUbULzRtMgk3VibtCOF2NnSl5YE2po4G8nIVgiW66giLpdWiUeicP8R2uaL9E2", + "2ZKAhnnl+Easnp01npso+zB0P0KGbeB2vVhtK9YJEXoJ8oopwIx8uIR2OcRQG9TZxn15xPbyZM25pZTp", + "HsJo6HW0L9o9cFaS9UEFScg6iN/TMmX7Me/bl/YMv0rnYnSa3Ha8/r64ni+xTX50zoWccsFZjq0QUpI0", + "lm4b56Yc0TUi7V9UR+6EJg5XsrVuyAV2WBxstusZoUNc3+UfPTWbaqnD/qlh7VquLUArx9mgmPhO184h", + "xrgC183KEFHMJ4VMBDUlEyFCAMWeZIRVmQYsnN+aZ6+c/RuLYlwwjpYuhzann1mXVakYeqY5YZosBCi3", + "nnY2j/rVfDPFKo0FrH+bvhQLlp+xBY5hw+jMsm3MaH+oEx9B6iI2zbvPzbuudn74uRUOZic9qSo36XAf", + "9KQgqdd8EMGpuCUfSBIhN4wfj7aF3LaGfuN9aggNLjFqDSq8h3uEEXppt0f5xuiWlqLwDWIzKpMFdBlP", + "gPGSce9CTV8QefJKwI3B8zrwncol1VZ3GMXTzoGWAwkQmKFsffA3HarbOcCgBNfo5xjexqYN+ADjCC80", + "Ej/lG+IPhaHuSJh4TssQOp1o6o1SlROiCkwu6rT5TjEOw7gznzLZQtfO9L3wOXbj2PcmGqpROKuLBeiM", + "FkWqtNXX+JTgU58kBmvI69CEKmQHtmuU96nNTZQLrurVlrn8CzecLuqbn6CGuHe/32GstDPb4L+pDkzD", + "O+OCpvfOyvUR0sV+hfn7WcYpqdfQdKbYIhuPCbxTbo6OZurrEXrz/UEp3afrfhTZuB0uF+9Rir99Yy6O", + "uHBvLz7dXi2hri7Gggt87gsehYqQba6EV1mvzxhGPeDmJbasA7x/MQn4JS0HMuFjX4m9X63/YCgfPh8s", + "30C1K8+lKdnKggZLHtlY4Y73pe9CHIoPtuHBh/NauLVuReiw7+6HlqfOxog1zGLQQ3c9J1qzwft60X64", + "HCqR4Pt04PO4H4iL4pm4MvBwyUTto698DLRXCe2vrgRPq+/HwPqTmQUf2msx6GM5d/1r7TKdTv7DL9YL", + "S4BrufkIPC69Te82lUlIu9Y81bxCQuvDUa0QW7fimB42qXYpTjb0tjLLWlq01Gs/0yOrF2PEgR4+3k2O", + "Tou9LsxUy50jO0rq2L1ki6XGiv3fAy1Avt7RkaDpQoBHrBKKNR1ISzOYKwG7xOGmY5MNDAGzuKNCfywf", + "hHoJuca2s01wnQTYp7+Cmcw7fe46Ewyr0yEnwzUk2NaFoN9rdscd3yucFBX/sn06p+Nr7p+EEGqbAXZF", + "VVOupZMzPTpzcz6HHKsiby1U9c8l8KgI0sTbZRCWeVS3ioU8Jqzrvb/VsQFoWx2prfBE/XVuDM5QHvsF", + "bO4p0qKGZOPQkMR3ncLBiAHrAvM1pIcMyS5qjKlAGYgFHxLsSjE3zTEGaz5HZdeuOZcnSXNxNKXYtkyZ", + "bno+ai7z6V5lHzElZ6iWVb9n8rD+8QJbVCsXIEdD4eFYSyen/cY5V65wMZYVC74TX8IYlP/N1xC0s5Ts", + "wvUPQKxYT9UVlYV/4yBFoezdxNJAz8PMrEng6Ac5JFoxYC5UXgojRmRDCWXtnIkQcHhP2cjQpoAPwjUH", + "KaEILpFSKMi08Akf2+DYhgob/notJKjB9kcWuMHS12+a2t7YBo5iqWvqol7jBRIJK2qgk1EF7uE5tyH7", + "uX3uk/B9G7CdFqZAr7v70frUHaZ6SIypfk7cbbk7uf86xibGOcjMe5665bh5uyIb1t0s6txe0PHBCAa5", + "0bVztrCSpJ0m76+yoyNESfIXsDm2SpBv5Ot3MAbaSk4W9KjgaGeTD2p+Uym4FwcB78PWkauEKLMBZ8dp", + "v4Z4l+IvWH4BWAMwhLgP9Ggn99HGHrzZV8uNr5ldVcCheDAl5ITbpCLv2G63F+xMzu/pbfOvcdaitmX9", + "nVFt+panszOw4L68ITfzw2znYQoMq7vhVHaQHRWq13wo5OYKi/O3u3hOx2rlfVdzt4t8Q1QWipRMcmY9", + "Vs/xoKcMR1gCIarVgY5MSpyni6hSpGJ5r1OmwQyVxlQ8GQKkgY+pFhCgcIMnEZDsi544hbb0nSt6J+ZE", + "QuNEvm71v34L95RG3505zNLmd3MhodWM3XxtK32GxBcso4n/mTEtqdxcp0Zfr4V8z3oyiOWd4VghEqtZ", + "SBON1cdhWYqrDJlVFvpcpFRb855qX8a+6VrznTnVM4jiuqhygtqGLGlBciEl5PEX6XxPC9VKSMhKgWFe", + "KQ/0XBu5e4VJXpyUYkFElYsCbL+YNAUNzVVzTlFsgiiqJokCSzuYLWy/ieh45JTmTrV+pAxFrcUevfNz", + "sJnrTVUnu+jM+jIHIpZBuSpODkP25T68W3r/p3nznK2RbkCmjvycaFnDhLg3uj2y3cGnEsiKKWVBCbR0", + "xcoSE8fZOvK8hsCFNGoHxN5TDKu8ZBh70y4iYKXhytx5obJCzAPO4rJHRC+lqBfLqMB0gNOrvLJ2CnE8", + "ys+qxvAozCAzUzwlK6G00zTtSM2Sm5Cz+7ngWoqybBulrIi+cJb2H+n6JM/1SyEuZjS/eIB6LRc6rLSY", + "+PzqbnBgM5PslBZrX8CZbWe+u1SvfQ9D5RzRjmaQHRa3d2P3CMzfdnPQ3Tb3k/7CuutqM9O0GnPCCdVi", + "xfL0mfq0ou0GY+RSLCpZs8z2VrRVJvA1POzxZRWCK5BF9tEMnCabw50QxwickxnZjfkvSuDdcckcHKMZ", + "uCj7zMVJUVk+KOt1AEBIbeqzrqVtyBhLYoGriIUtlYAu8i6gI28VjES6GWxmhIMDpeFGQPWiHwOA963x", + "YWJry9lIyplY++cPmuJz1wL+3XYqbzGPoRCvs4a0pA3y8oVqBjhCusT11nioc0x7n42NigrNc0fe8BEA", + "w3FSLRhGRUvtC8acshKKLNV78TTYqCaRpu1Ss7ot0ZlynDyntW99aMauJbjCKVbEl23/V0UNKYnwet+S", + "zAtYg83r+BOksD0NJ5H/BUrb8rBjDBBVVsIltMLHXDWXGkVNdgn+WxU+JgVAhd7Iro0sFRcV3+Udw4lb", + "exZF1ozBbtKSYhFrd4rsMJMkjTprntljosYeJQPRJStq2sKf2lfkaJsBzVFOoKqnI2Rejxw7zc92hDd+", + "gBP/fUqU8Zj4bRwf2psFpVG3jQHtjJOs1dCp5+kwybhUUXCw4GxFcMRaEm/4hqroFR82SPZJvlG3Ru4T", + "EzxC7DdryFGqcfoOFE7jGXBSuKonSO0coLBagfkkYW1fAidcRC0mr6gKqkpTQ9H/YCfGlxh32vQ1nMpN", + "NOPNd5bgYER1iqkNKhIy0On1zfMf5CRuPYiD46VoRIFL/9ti//LU7dQOfAFbeXOzn0b2xyaN7hZzXHxC", + "ZrUfqCzFle0ZGeuhL8D7QS31eReQE8tZuJZ91ObElffsmjpYFK++ohsiJP5jtM5/1bRk8w3yGQu+/4yo", + "JTUk5ByvNiLARYGaibeLVxMPmLe2CD+VXTcbO2Y03MaMEgFtLnLf3EeQFb2AeBsw2MHyz1wbxqnqGVou", + "zJXd2c4+FtzifYmWFS1iTR8LRbbbqPvSwebr/1+TCxdP5eu7VSXNfYdQ16KozWewC7AnLr2E1fZkyT5f", + "8yQQOgs3RCt9dn1xDZPpnqwrlYEw1H6lBXav42qv88yNljHS8tvpsbElzXTUUg69C2OjbnpAx30ad4Ef", + "t628Hfwna7gOLWMM+B8L3gca1cbw2p60t4DlVgWOBKzWWj0T60zCXO0KMLHmaqPOy6Z2hzexMp5LoMpG", + "3Jz+5BTPpkQp40YRtjGhwacZRilgznjDLBmvap3QY7BSKd9ECIuN/ojWARfakJRghMlLWv50CVKyYmjj", + "zOmwLR3jFhHe0eG+TZgwwp3aH4CpRofD/MzGjB6/Zi5w24TKhmsqTXlBZRG/zjjJQZp7n1zRjbq+Ryk4", + "B3b5lGgkzbSrBkTeJSRtC0i5cU7hG/p7AoD0gI6fEQ4bjAtOOGusaUeLAf9MH4ZPwmGzouusFAvMIhw4", + "EK42LXr4rAooOJrBrXw2bt1+HsX+hO3TYFl+x4i0wFnHTLH93P+EW4lq5M+c6a0n39oou2mdNu7WHkyP", + "VL5ogv8tsfTPYyoT1xVfibNxvbDpU1U87UG0iTDgH2rbxQd2EcMgXBp3bAQf3+6sHWmRyve1loEMLQZq", + "S3g/qCaUneYuPKtvSuuZGixSJi5bek9Lm7XP+3tpADzbm96d9fa0IWTGjLNPj7jt+dFZJaosHxPzaTt3", + "FM5N4CBtwzhAH5ETYGDdITxGhV42rbpHraY2+7bJG2yqs8vbVeXblP4hM9EAR2+7IMQceZnt3I7WLczk", + "CcaUSTfHrG0GC0yCUCIhryWaia/oZnfbsYGK0Wffn3z++MnvTz7/gpgXSMEWoJqq4522XU1cIONdu8/t", + "RgL2lqfTm+CrD1jEef+jT6oKm+LOmuW2qikp2mtato99OXEBJI5jol3UtfYKx2lC+z+u7Uot8uA7lkLB", + "+98zKcoy3fUhyFUJB0pqtyIXitFAKpCKKW0YYdsDynQTEa2WaB7E2r+XtpqM4Dl4+7GjAqYHQq5SCxkK", + "qEV+hrndzmtEYF2VjldZT8+2dTk9zVroUGjEqJgZkEpUTrRnc5KCCDOIZJRZ6wyfaBGPYmQDs7XRsilC", + "dJHnadKLG2Zv5/btZq46zenNJibEC38or0GaQ/6J4boF1+EkjWn/o+EfiUIMB+MaYbnvg1ck9YPrNeUf", + "BVo/KT9BHgjAQLZtK08yShSLChFL6yVAf4J3IHfFjx8bx/LOtBCExH+wA7w4fbZ5L2QyOHA+cEXfHwNS", + "oqX8NkQJreXvysj1rDdcJNEWOaOJ1qAsWxJ9sTBKt1bPQxbzgFbSS3aWQmhiNNOyTCRJWzsOnqmYcIxK", + "IC9peftc41smlT5BfEDxZjg1Ks6UjZFsUamuV6fvJR01d5QVe7ip+WtMzP4nmD1K3nNuKOeE791maNzB", + "jvULfyvYXG9yhWPaIKvHX5CZa7ZRSciZ6jr3r7xwEhJDQbK5C2iFtd6Ribprnb8IfQMynvtIHPIqcm8F", + "n72DsDmiH5ipDJzcJJWnqK9HFgn8pXhU3Jx3x3Vxw8YM1yv7EhVw27PsS7/t8Njl2dIm5tKpFfTXOfq2", + "buE2cVE3axtbs2h0f4e3b3/VszGlhtK9GMznWOvoIE0Z9mrJ8B6qHFkcuTHcvCmK+WWo7q2t7TpQm7uz", + "HzUrdwastCqtv5scLYCDYgprif/uesfc7l3qIbCVF/pH1cJ6k3IxFjGJtbYmj6aKaqiPKJ/uPkvUvMas", + "xryWTG+wb7A3oLHfk/WYvgu1PVxtmOBLc3efFhcQerc3lUBq5W/X7wQt8T6yLj5ubiFRTsk3tsK3Oyhf", + "3Zv9B3z2j6fFo88e/8fsH48+f5TD08+/fPSIfvmUPv7ys8fw5B+fP30Ej+dffDl7Ujx5+mT29MnTLz7/", + "Mv/s6ePZ0y++/I97hg8ZkC2gvrT/s6P/m52UC5GdvD7Nzg2wDU5oxX4AszeoK88F9rU0SM3xJMKKsvLo", + "mf/p/+9P2DQXq2Z4/+uR6890tNS6Us+Oj6+urqbxJ8cLTP3PtKjz5bGfB7sNtuSV16chRt/G4eCONtZj", + "3FRHCif47M03Z+fk5PXptCGYo2dHj6aPpo9da2tOK3b07Ogz/AlPzxL3/Rjrax4rVzr/OORqvZv0nlWV", + "LaxvHjkadX8tgZZYYMf8sQItWe4fSaDFxv1fXdHFAuQUszfsT5dPjr00cvyXq5zwzgCWdBvaOutRcW0f", + "iFjVs5LlvkYZU9Z+bAPsVdxc1hrWazUhM9t+2Mfw8gIjlGwxAhW34D4tDJ7t56cNr/MdlNGtfPTs10Q1", + "K5/44Rv7xjFnUTTa/zn76RURkjit6DXNL0LSi89yajK74iQn8+XUk/2/apCbhiwdw5wcqdAdHHi9MrzH", + "Zc+s1KJqF3ZthLGUsaiHaz+zoaboPIQ6Jw2/Q8tgBEnDvQ1HfpR9+dtfn//j3dEIQLDojgLs8/gHLcs/", + "rHUN1hhY2wm8mQyFRE2auhn4QbOTEzRkhafR58077Xrof3DB4Y+hbXCAJfeBlqV5UXBI7cFv2IkQiQWP", + "6pNHjzx/ctJ/BN2xO1PRLKNaAFjnQhjFk8Q1BurzMfvoTSiNKWllz+KJDx/eVMG9Y1+aGnb19IALbRfw", + "vPFyu8P1Fv01LYh06cu4lMef7FJOuQ0FNfeRvTffTY4+/4T35pQbnkNLgm9GbX77F83P/IKLK+7fNDJT", + "vVpRuUGJSAde2O1LQxcKfarIIu3Zjqqv8cXRb+8Gb73jOObx+K9W6aTiRneidbK0ujrtuCbvqSHOiWPZ", + "pDT3w/2TqsKQz7Pw/KSqbNdwDCMAhrcfrJnS6sGUfBd/3fKNWEisa6SVE+CbaPvW3C1XedSOM3lpt4oS", + "3N3fH/b+PmnbSFgBXLM5Q3k9BUzrFGyFqResdNMLtJ8jFJVI2jceOpTHdqJF5lqvjRzDNeE/XF/BEZVR", + "7Ey/pTTInYz6DncDuBsSkyJ4g8TUNDW8HdbsK+2Gm6R1ZbxHxv2JC30/0tLQSbTcTkeb0xd3wuDfShgM", + "FTkXVjqrqgOIhz5xY9crx3+5KpOHkBpRPR4lL8aad/RtFHt/v8NxHkzJSfed67EVV6VzpyRo3ruTAT8G", + "GdCWOd0l/Tk6/qByX5z2tU8WVktgMb+P+vgTF/T+xsgalOwMpLtlumuwz5685pj1e2Or/5ZymkPanYT2", + "t5bQQu3sG8locejrsatCEElsNzLwdQ14TAdJrF0/PeJsWG4E8/HtEZ40Yf6Gxdj4ZRe5rCZeeURHrdUr", + "7WZNeqplX8T6DmId9uvN6Ytd0tUnZAoa3QY5cQuk9+Z989KkZ+LN7XgmxvGmp4+e3h4E8S68Epp8i7f4", + "e+aQ75WlpclqXxa2jSMdz8R6F1fiHbYUCtSZQ9viUaEO6SR6bt628R/3MeO33TjrwZR87V5tqoC4jPaF", + "MIzKZ4pRubAfGV5nkEHu+T+f4fj3puRbzH/UaoJhbJhYgS8yrp89fvLZU/eKpFc2Sqz73uyLp89OvvrK", + "vVZJxjWGDFg9p/e60vLZEspSuA/cHdEf1zx49n//87+m0+m9nWxVrL/evLKddj8W3jpJVTwMBDC0W5/4", + "JqW0ddcBeSfqbsXD/7VYJ28Bsb67hT7YLWSw/29x+8zaZOQU0WDsbPXiOeBtZI/JPvfRxN0/mMQRLpMp", + "eSVcW7S6pNLWh8ESuoosaiop1wDF1FMqZuApW8guLxmWDpBEgbwEmSkWSlXXEkIRk0rCJUbfN0VeWxDs", + "ZvQYo/vRMvkf6TpKm5+Fa1oLt2Q0e67ommCfD00U6ImtoLYmX31FHk0a7aUszQBZQEyKua7o+ugWrX6B", + "2MaWBXrhsCPk7tBfHHuMBamRfkJ9yUbV+Ltz7k9Wcrfk7jb2QJxzb8dP49iJ7Qiu+dhWC4IV7DRWQ1Z1", + "VZWbpg6ukfK8CJVmcWaGscaBj9hHsNM0nVRCu+i9O8R3RoAbsZIuQe3JNjCfVR3/hXp5zDN65xbz8f5e", + "7tLIdyTFyjuPBJmDzpcuFbiD+gR7ki4dcZg3rRhnKwPlo8l7l2pwF/v1j+PezwW1Cfhj2otFWZrowAOZ", + "IOKf8D+0xJp6bG5Lu/uGH76aIbqmXHXs0HDVKt+2BbML+fcZwxVtNZDdDeXzZvK+QIZoOYT/8w7B+yG4", + "xxy/cdUO7PFyi/h3SArwqmRGXokmId1qUP+Wrsf3ebO/7wW9Ehysj91IvpYW79ypQewwjMMixVcisfpL", + "017ruiLIsa/gs1UO+d68tEMWGXN7YzWgT/EK/z5Z56h1y5i1TXeWWWhGG8OczYu2H0JcCGX6IbWYD8JP", + "P0LV5kNwrNthMXhIPZ9xYgE/LNPB4j6WmI8rX4lpiAO9NC9HcpmtdzSaG2kRwtAgUVWIzKAUfKE+Tla0", + "jTrSeElQia1hZduq9NY//Rue3eeu54lv5u8qSSnGcyBKrABVBiOju4LUFsJ/3B6Emq18524ep7d+YO7y", + "+aPPbm/6M5CXLAdyDqtKSCpZuSE/89Db5CbcThHq9jy2BieYA+PobWpXHMvj8kg3YIKuc37aauzs1k3N", + "RGXlKlFrkLZaXqeFFesx6ZQ9GBnGSzP1AeS5Uiw+NXHOY31skefntCwRXbucTDjwqCjlsrT7CSumddMy", + "Ir5dyTc0X4a9nTTWvdDYz9cWn3SqUeLIrsubTfVXYPZZA4lWE1krQNoW5Rr7Nq0oBiyv6lKzqmx/Ezpf", + "YiegRBiSpc24icDpC78665wV82boLv36SuRu8KmZ2z3Cmbmwi6MSkHcH20qnudS0BbTtieXDr6NORq4f", + "kyt0yGSn8mQTO1NVQGXzsaX8+5WEzA0h6SVIRfGwdhb14E5U/zhE9bUrdfyRCOpJH+VNef31r6JWFPVf", + "es2Kd7vl8qha8J4iOeORSB6zC3vWri+L746i6HYWP30RJ6qIUE/LCwgDoBgU7Zmr9b+ORrpAsEiLmDs9", + "rOYWUF/i0kmsLotEzCchTtMopGL+jLzlD4laUl+B2f355PMvBpw4Zh5Xma7vxmkGMo/tMGN8OZ+0Z+qw", + "EkfA77Pb3u39NnFyxIp1osw8L2AddTZpdz529+E9RSq68RkdvUqLVbraclBM42FXYK4ptWTV7Vf0VZrN", + "0iXNvSUudNA/5V8Hg6wtO2ukhupDVHKdHGkJUECllzsLPONbzW6CK/XMlGvKY8vwTgibwtRWqW2apxUL", + "cBcTJSXQeeiCJsSYPL6IzxhC81QRYT1eyBhJOkk/KPMiUd6+nbTJd7MXnUdeVyj+oEKY/lBCWNaRwtpo", + "+XAyGbZzmESRV5UUWuSitGGUdVUJqcPpVtNRlgcYEvRahochwr2RMLdmhdrp0jnHtw5gA2hTtvpkXDrn", + "Hk0pn05qUdcsO9vMNYalnYuK9Dr3GxA+KF+7UypT/Kzj/vnUvT96kPQO7AzKqc6XdXX8F/4Hy+6+a3J2", + "sSGJOtZrfowtKI//2hpdiyy1NLKJtL1MWibdXkPLZIzsS/y86ZvyrZDdZuE7o2c7SJt0L33bThPDcBPs", + "8f1ok39rJWyr66yz4TePBkmM2DuvoSRF1IQv0G7UjcdXmbAtOBMkfBe99HEtqPEnzhkvCI22sWNrCm37", + "vQ7wj0920R/CRXn7IVuff8Ln7JXQ5HRVlbACrqG4WeA76XI4f3tsvW73Ewzc1d+Pju/f+fGN73N6giyy", + "84LfQ++JqhiBn45KLCtk7ur3o+7c3eQf903+PHhbYzK8u5c/nXtZ+kykuyv447+CP/tkV/MeY5hGXsnX", + "cA63r+FGE9/zQu4JA86G1TEcbPMro+rdXaX6Vkjfc+7uFv9EnaJ2J0cHYo2x0OyyxLopD5F19lFBP87O", + "UJYJS8PQQZ2EWC+G9RpFzrA7z2mhJi6ozBon3Cm+E3w+asEn2us7uefO9PCJmR4GpByn9ZflGEFjXwHo", + "ciUK8I5VMZ+7+shD0k+7IaQhT6XpqiL2y+lgHPY5W8GZefMnO8VBr9gG7I5Y1AHPIEtBLnihRkRxuFGv", + "ew+ho2kYgFv3bIYd8LC4yknTa5Psm6j8Yo8SSBf5Cht5+jrRDhkFXBJDgNMDkO3xX/ZfNKdVQiVWc+YJ", + "uLcx99222MLXdtwWgOQ1CqG2grb/SszJI1v/uuaY5N507Ka8IFpujKDqy/1JoCXJW8mtAY7+yTkbPDk7", + "VYHe6gbWlNYFRHNCDxnB0Cks8MOtH4DnlDuS7yNIC0IJhwXV7BK8y396V4zq2reZKwW1hQFOCC0Kexqb", + "TYBLkBui6pkysg5v5yjdU+3zsgfDgHUFkpkrmpaNA96qCce20tS2OKIz+8YNL60OL7L1rWQ7atHfrK76", + "lZiTH1kuxUm5ECEWXm2UhlWvH7b79PeBfgXekNCPWRW8ZByyleCpLs0/4dMf8WHqa6zWNfTxuXk49G3n", + "vm3D3wGrPc+YO/mm+P1ITv+NAl06q5VQCWm029nG5l8g/e95lPyh2fC8f5I2PI+cWu5hNFDcnLn187FP", + "R2i1ak6++VfrT1eRzr2plrUuxFU0C9oAbDjjmGJUKHzvmeTR2Nza2ZNMvV+r2/v0NkV4SJ2t8DTRgrd5", + "ONyF92+ahO2cMzGRuJzGS5Cqo8jdZWL/W2Vij973vbixbTm/i6PV6rCyyytRgB23Scc1Rz/VBIWLAlxn", + "/L7IEsIi0ylD/v5q3uskceS0Xiw1qSuiRSpdpPkwo7llsplVhNITRmWHrbqE0y3pJRBaSqCFUV6BEzEz", + "i25uUlwkVVj42eecuODPpNAUwVVJkYNSUGS+6csu0Px7NlRdb8ETAo4Ah1mIEmRO5Y2BvbjcCecFbDJU", + "hhW5/8MvRrW+dXit0LgdsbbcbAK93bTrPtTjpt9GcN3JY7KzCd2WajFFTqyqElySXAKFe+FkcP+6EPV2", + "8eZowSwy9p4p3k9yMwIKoL5ner8ptHWVmfu7D+Jz+/ScrVAS45QLb4FMDVZSpbNdbNm8FK9FmRVEnDDF", + "iXHgAdX0JVX6jcuXLrDMo71OcB4rY5sphgE2t6jVLRIj/2IfpsbOzX3IVa2IG8HnQEGRWgOH9Za5XsE6", + "zIW1U/zYIcnK2gJ3jTyEpWh8h6yo8w2hOvL7m+ESi0NLJXWmjD4qW0A0iNgGyJl/K8Ju7PAfAISpBtGW", + "cLCSf0w5MyFKoNzmqoqqMtxCZzUP3w2h6cy+faJ/bt7tE5ethWHv7UKAihPgHORXFrMKTblLqoiDg6zo", + "hcuRW7hOpn2YzWHMsMxSto3y0bhr3oqPwM5DWlcLSQvICihpwujys31M7ONtA+COe/LMLoWGbIY1UtKb", + "3lCyHDQmhaEFjqdSwiPBJyQ3R9Aozw2BuK93jFwAjp1iTo6O7oWhcK7kFvnxcNl2qwcMWGYMs+OOHhBk", + "x9HHADyAhzD09VGBH2eN+aA7xX+CchMEOWL/STaghpbQjL/XArqGv/gCa90UHfbe4cBJtjnIxnbwkaEj", + "mzI1fpJugW6U03tMsmubWiMFcHod5fb4ijKdzYW0gnRG5xrkztD5f1LmHec+fVe4qisER3D3phsHmXzc", + "T85xEQsCcdeFIRFXScrcYZQ8JivGa22fiFpPbPlrCTRfGqE9tsHakbAjsCvSJGFBZVFit9h5uDeFtEWf", + "dOeCR6AT+Yhtjd+s+1shRxXVb5eOpEyTmmtWRo2Fgt7+8Vkv7ywSdxaJO4vEnUXiziJxZ5G4s0jcWSTu", + "LBJ3Fok7i8SdReLva5H4UGWSMi9x+IqNXPCsG0x5F0v5b1VVPlxV3kCC1okryrRrk++rFAzbLfYwBGmg", + "JeKAlTAc3W2DTs+/OXlJlKhlDiQ3EDJOqpIa1QDWOjRtnlEFXzz1qYb26qQr2/kd71fzwmdPyNn3J77i", + "6NJVxmy/e//ExqsRpTclPHBt0YAXVhL1/dGAG6S79mjUXwm+ubNrdc1KjIxX5Bt8+wVcQikqkLaYIdGy", + "hr7F5xxo+dzhZofB559mchdq+4cZ7Y9Jy+jl0LailRfz/VqpItRmXJIXUQ7mH3NaKvhjKA3TjreiVaq/", + "crj4rCkImcnXoth0TojZtWPcwPbZaOqOMk7lJlElqp8C0SUNLQy7coTVt2W9O3h13D7R9slsF4WlpHVb", + "Bj89+hCVJ8vChg3rDWUTdecdOjlK5Zh2a6EeBQBHFQbENAm7J+SN/e7DlgFEiNwRa5j5RxPF2H4zMA18", + "1ygRjvV8qrkEHvHJ04tnf2IIu6hzIEwr4gvs7r5eJkfrzIy0AJ45BpTNRLHJWuzrqHULFUxRpWA1230T", + "xfwTT1y4fMyT7ffUh7lGXkSL28aTY6JZZ44BD3DnjYbRvDlgC0d07DnC+Ptm0UNsNAaBOP6UMip1eN++", + "TK+ZZnPH+O4YX3QaOxIB464geZeJTN8j45MbWfNhnvfNGvLaABef5PtonUeXHKx1y8lawKxeLIy20PfR", + "YRsdHI8J/oFYoV3uWC64HwXZwUO3/JsmqXeH63OXKG/8vq/M+AC3g/INOjNWFeUb7/KFTLFVXVoc2qbS", + "h2W0tmZ4qsR0Y/sbsmq/9ia/yHbrrtr27xYt5IoqYvcXClLzwmU89Wpbr/n4Oid26PM1b9j01pomdr2J", + "1bl5x1wRfpfbqeaKVCAzveb2QLUOk+tgYE/uB62lfXdt3N61YRPVYYDB9qvxNwzhQLeHjPgaXh9Rz6Um", + "Ma/ViYm20wlbz9CiMZziEjdnsm8eNLCkN3w7vqQxtzj/KZQVoSQvGXpXBVda1rl+yyn6b6KFTfuxJ95Q", + "Pcz7nvtX0i7EhIfPDfWWUwwyCl6dJA+cQ8KF8S2AZ7GqXixAGT4aE9Ac4C13bzFOam60MDEnK5ZLkdnU", + "WnO+jOwytW+u6IbMsaKJIH+CFGRmbv1o160tWWlWli7YxUxDxPwtp5qUQJUmPzLDgc1wvpxCCDkDfSXk", + "RcBCulfPAjgoprK0YeY7+xTb4bjlewMgGjPt46aNxe32wfGws2IQ8tMXGKOG1ZhLpuL+i13Yb803vmI8", + "SxLZ+RKICxfr0ha5jzXgHAE9aDuO9BLecnP7aUGQ41N9PXLoeoB6Z9Gejg7VtDai4yjyax2l/h2Ey5AE", + "k7lzu/wbpZBGdOA9m7jxtr5+Z+/3dLG0rlzA1qBDF7J96tonDrzkFIiWkaxT4Ma9cd4Ceav/4tMvK3l4", + "XdKj8WDaZH/APrtqN8hDvPkNnxBaCr6wdRWNdilwnxivao0B4O/TgAeXtMzEJUjJClAjV8oE/+aSlj+F", + "z95NjmANeaYlzSGzFoWxWDs331g6xUaDnGlGywy16rEAwan96sx+tOM+jrqNrlZQMKqh3JBKQg6FLUTG", + "FGn0+akt0EDyJeULvLqlqBdL+5od5wokhMaMRoXuDpEuBLPmmS1K14fxxDVqjuv2As2XicYxeMEZnd0T", + "VNHqSTVyD1olR4eU9MnRoKBtkHrZhM5Z5LTZzAgpoiUPRPhpJj5EjdY7or8j+k+d6FMlFRF18461wuIr", + "3pb3bNZ63wVEb9FK9kGqC9+V6P93L9HvOZAilEja0kHSveGoIkyTKyyLNANi7q8arfOu4Z7T1zHTLjrq", + "rtKmcu358iVl3NXUCXkNCIdRiVcrprVvT/teDJuWmaFF06AD8loyvUGthVbs9wsw///NiP0K5KVXaGpZ", + "Hj07WmpdPTs+LkVOy6VQ+vjo3SR+pjoPfwvw/+V1kUqyS6NfvUOwhWQLxs2de0UXC5CNCfHoyfTR0bv/", + "LwAA///BlGwvCb8BAA==", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/participating/private/routes.go b/daemon/algod/api/server/v2/generated/participating/private/routes.go index 981fddde61..3a53efec1e 100644 --- a/daemon/algod/api/server/v2/generated/participating/private/routes.go +++ b/daemon/algod/api/server/v2/generated/participating/private/routes.go @@ -21,6 +21,9 @@ import ( // ServerInterface represents all server handlers. type ServerInterface interface { + // Gets the merged config file. + // (GET /debug/settings/config) + GetConfig(ctx echo.Context) error // (GET /debug/settings/pprof) GetDebugSettingsProf(ctx echo.Context) error @@ -52,6 +55,17 @@ type ServerInterfaceWrapper struct { Handler ServerInterface } +// GetConfig converts echo context to params. +func (w *ServerInterfaceWrapper) GetConfig(ctx echo.Context) error { + var err error + + ctx.Set(Api_keyScopes, []string{""}) + + // Invoke the callback with all the unmarshalled arguments + err = w.Handler.GetConfig(ctx) + return err +} + // GetDebugSettingsProf converts echo context to params. func (w *ServerInterfaceWrapper) GetDebugSettingsProf(ctx echo.Context) error { var err error @@ -219,6 +233,7 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL Handler: si, } + router.GET(baseURL+"/debug/settings/config", wrapper.GetConfig, m...) router.GET(baseURL+"/debug/settings/pprof", wrapper.GetDebugSettingsProf, m...) router.PUT(baseURL+"/debug/settings/pprof", wrapper.PutDebugSettingsProf, m...) router.GET(baseURL+"/v2/participation", wrapper.GetParticipationKeys, m...) @@ -233,235 +248,236 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9a5PcNpLgX0HUboQeV+yWZNk71sXEXluyPb2WbYVa9t6upJtBkVlVmGYBHADsrrJO", - "//0CmQAJkmAVq7stj+P8SeoiHolEIpEvZH6Y5WpTKQnSmtmzD7OKa74BCxr/4nmuamkzUbi/CjC5FpUV", - "Ss6ehW/MWC3kajafCfdrxe16Np9JvoG2jes/n2n4Ry00FLNnVtcwn5l8DRvuBra7yrVuRtpmK5X5Ic5o", - "iPMXs497PvCi0GDMEMofZbljQuZlXQCzmkvDc/fJsGth18yuhWG+MxOSKQlMLZlddxqzpYCyMCdhkf+o", - "Qe+iVfrJx5f0sQUx06qEIZzP1WYhJASooAGq2RBmFStgiY3W3DI3g4M1NLSKGeA6X7Ol0gdAJSBieEHW", - "m9mztzMDsgCNu5WDuML/LjXAL5BZrldgZ+/nqcUtLejMik1iaece+xpMXVrDsC2ucSWuQDLX64R9XxvL", - "FsC4ZK+/ec4+++yzL91CNtxaKDyRja6qnT1eE3WfPZsV3EL4PKQ1Xq6U5rLImvavv3mO81/4BU5txY2B", - "9GE5c1/Y+YuxBYSOCRIS0sIK96FD/a5H4lC0Py9gqTRM3BNqfKebEs//m+5Kzm2+rpSQNrEvDL8y+pzk", - "YVH3fTysAaDTvnKY0m7Qt4+yL99/eDx//Ojjv7w9y/7b//n5Zx8nLv95M+4BDCQb5rXWIPNdttLA8bSs", - "uRzi47WnB7NWdVmwNb/CzecbZPW+L3N9iXVe8bJ2dCJyrc7KlTKMezIqYMnr0rIwMatl6diUG81TOxOG", - "VVpdiQKKueO+12uRr1nODQ2B7di1KEtHg7WBYozW0qvbc5g+xihxcN0IH7igf15ktOs6gAnYIjfI8lIZ", - "yKw6cD2FG4fLgsUXSntXmeMuK/ZmDQwndx/oskXcSUfTZbljFve1YNwwzsLVNGdiyXaqZte4OaW4xP5+", - "NQ5rG+aQhpvTuUfd4R1D3wAZCeQtlCqBS0ReOHdDlMmlWNUaDLteg137O0+DqZQ0wNTi75Bbt+3/cfHj", - "D0xp9j0Yw1fwiueXDGSuCihO2PmSSWUj0vC0hDh0PcfW4eFKXfJ/N8rRxMasKp5fpm/0UmxEYlXf863Y", - "1Bsm680CtNvScIVYxTTYWssxgGjEA6S44dvhpG90LXPc/3bajiznqE2YquQ7RNiGb//8aO7BMYyXJatA", - "FkKumN3KUTnOzX0YvEyrWhYTxBzr9jS6WE0FuVgKKFgzyh5I/DSH4BHyOHha4SsCJwwyCk4zywFwJGwT", - "NONOt/vCKr6CiGRO2E+eueFXqy5BNoTOFjv8VGm4Eqo2TacRGHHq/RK4VBaySsNSJGjswqPDMRhq4znw", - "xstAuZKWCwmFY84ItLJAzGoUpmjC/frO8BZfcANfPB2749uvE3d/qfq7vnfHJ+02NsroSCauTvfVH9i0", - "ZNXpP0E/jOc2YpXRz4ONFKs37rZZihJvor+7/QtoqA0ygQ4iwt1kxEpyW2t49k4+dH+xjF1YLguuC/fL", - "hn76vi6tuBAr91NJP71UK5FfiNUIMhtYkwoXdtvQP268NDu226Re8VKpy7qKF5R3FNfFjp2/GNtkGvNY", - "wjxrtN1Y8XizDcrIsT3sttnIESBHcVdx1/ASdhoctDxf4j/bJdITX+pf3D9VVbretlqmUOvo2F/JaD7w", - "ZoWzqipFzh0SX/vP7qtjAkCKBG9bnOKF+uxDBGKlVQXaChqUV1VWqpyXmbHc4kj/qmE5ezb7l9PW/nJK", - "3c1pNPlL1+sCOzmRlcSgjFfVEWO8cqKP2cMsHIPGT8gmiO2h0CQkbaIjJeFYcAlXXNqTVmXp8IPmAL/1", - "M7X4JmmH8N1TwUYRzqjhAgxJwNTwnmER6hmilSFaUSBdlWrR/HD/rKpaDOL3s6oifKD0CAIFM9gKY80D", - "XD5vT1I8z/mLE/ZtPDaK4kqWO3c5kKjh7oalv7X8LdbYlvwa2hHvGYbbqfSJ25qABifm3wXFoVqxVqWT", - "eg7Simv8F982JjP3+6TOvw8Si3E7TlyoaHnMkY6Dv0TKzf0e5QwJx5t7TthZv+/NyMaNsodgzHmLxbsm", - "HvxFWNiYg5QQQRRRk98erjXfzbyQmKGwNySTnwwQhVR8JSRCO3fqk2Qbfkn7oRDvjhDANHoR0RJJkI0J", - "1cucHvUnAzvL74BaUxsbJFEnqZbCWNSrsTFbQ4mCM5eBoGNSuRFlTNjwPYtoYL7WvCJa9l9I7BIS9Xlq", - "RLDe8uKdeCcmYY7YfbTRCNWN2fJB1pmEBLlGD4avSpVf/oWb9R2c8EUYa0j7OA1bAy9AszU368TB6dF2", - "O9oU+nYNkWbZIprqpFniS7Uyd7DEUh3DuqrqOS9LN/WQZfVWiwNPOshlyVxjBhuBBnOvOJKFnfQv9jXP", - "104sYDkvy3lrKlJVVsIVlE5pF1KCnjO75rY9/Dhy0GvwHBlwzM4Ci1bjzUxoYtONLUID23C8gTZOm6nK", - "bp+Ggxq+gZ4UhDeiqtGKECka5y/C6uAKJPKkZmgEv1kjWmviwU/c3P4TziwVLY4sgDa47xr8NfyiA7Rr", - "3d6nsp1C6YJs1tb9JjTLlaYh6Ib3k7v/ANdtZ6LO+5WGzA+h+RVow0u3ut6iHjTke1en88DJLLjl0cn0", - "VJhWwIhzYD8U70AnrDQ/4n94ydxnJ8U4SmqpR6AwoiJ3akEXs0MVzeQaoL1VsQ2ZMlnF88ujoHzeTp5m", - "M5NO3tdkPfVb6BfR7NCbrSjMXW0TDja2V90TQrarwI4GsshephPNNQUBb1TFiH30QCBOgaMRQtT2zq+1", - "r9Q2BdNXaju40tQW7mQn3DiTmf1XavvCQ6b0Yczj2FOQ7hYo+QYM3m4yZpxultYvd7ZQ+mbSRO+Ckaz1", - "NjLuRo2EqXkPSdi0rjJ/NhMeC2rQG6gN8NgvBPSHT2Gsg4ULy38FLBg36l1goTvQXWNBbSpRwh2Q/jop", - "xC24gc+esIu/nH3++Mlfn3z+hSPJSquV5hu22Fkw7L43yzFjdyU8SGpHKF2kR//iafBRdcdNjWNUrXPY", - "8Go4FPm+SPulZsy1G2Kti2ZcdQPgJI4I7mojtDNy6zrQXsCiXl2AtU7TfaXV8s654WCGFHTY6FWlnWBh", - "un5CLy2dFq7JKWyt5qcVtgRZUJyBW4cwTgfcLO6EqMY2vmhnKZjHaAEHD8Wx29ROs4u3Su90fRfmDdBa", - "6eQVXGllVa7KzMl5QiUMFK98C+ZbhO2q+r8TtOyaG+bmRu9lLYsRO4Tdyun3Fw39Zitb3Oy9wWi9idX5", - "eafsSxf5rRZSgc7sVjKkzo55ZKnVhnFWYEeUNb4FS/KX2MCF5Zvqx+XybqydCgdK2HHEBoybiVELJ/0Y", - "yJWkYL4DJhs/6hT09BETvEx2HACPkYudzNFVdhfHdtyatRES/fZmJ/PItOVgLKFYdcjy9iasMXTQVPdM", - "AhyHjpf4GW31L6C0/Bul37Ti67da1dWds+f+nFOXw/1ivDegcH2DGVjIVdkNIF052E9Sa/xNFvS8MSLQ", - "GhB6pMiXYrW2kb74Sqtf4U5MzpICFD+Qsah0fYYmox9U4ZiJrc0diJLtYC2Hc3Qb8zW+ULVlnElVAG5+", - "bdJC5kjIIcY6YYiWjeVWtE8IwxbgqCvntVttXTEMQBrcF23HjOd0QjNEjRkJv2jiZqgVTUfhbKUGXuzY", - "AkAytfAxDj76AhfJMXrKBjHNi7gJftGBq9IqB2OgyLwp+iBooR1dHXYPnhBwBLiZhRnFllzfGtjLq4Nw", - "XsIuw1g/w+5/97N58BvAa5Xl5QHEYpsUevv2tCHU06bfR3D9yWOyI0sdUa0Tbx2DKMHCGAqPwsno/vUh", - "Guzi7dFyBRpDSn5Vig+T3I6AGlB/ZXq/LbR1NRLB7tV0J+G5DZNcqiBYpQYrubHZIbbsGnVsCW4FESdM", - "cWIceETwesmNpTAoIQu0adJ1gvOQEOamGAd4VA1xI/8cNJDh2Lm7B6WpTaOOmLqqlLZQpNaAHtnRuX6A", - "bTOXWkZjNzqPVaw2cGjkMSxF43tkeQ0Y/+C28b96j+5wcehTd/f8LonKDhAtIvYBchFaRdiNo3hHABGm", - "RTQRjjA9ymlCh+czY1VVOW5hs1o2/cbQdEGtz+xPbdshcZGTg+7tQoFBB4pv7yG/JsxS/PaaG+bhCC52", - "NOdQvNYQZncYMyNkDtk+ykcVz7WKj8DBQ1pXK80LyAoo+S4RHECfGX3eNwDueKvuKgsZBeKmN72l5BD3", - "uGdoheOZlPDI8AvL3RF0qkBLIL73gZELwLFTzMnT0b1mKJwruUVhPFw2bXViRLwNr5R1O+7pAUH2HH0K", - "wCN4aIa+OSqwc9bqnv0p/guMn6CRI46fZAdmbAnt+EctYMQW7N84Reelx957HDjJNkfZ2AE+MnZkRwzT", - "r7i2IhcV6jrfwe7OVb/+BEnHOSvAclFCwaIPpAZWcX9GIaT9MW+mCk6yvQ3BHxjfEssJYTpd4C9hhzr3", - "K3qbEJk67kKXTYzq7icuGQIaIp6dCB43gS3Pbblzgppdw45dgwZm6gWFMAz9KVZVWTxA0j+zZ0bvnU36", - "Rve6iy9wqGh5qVgz0gn2w/empxh00OF1gUqpcoKFbICMJASTYkdYpdyuC//8KTyACZTUAdIzbXTNN9f/", - "PdNBM66A/ZeqWc4lqly1hUamURoFBRQg3QxOBGvm9MGJLYaghA2QJolfHj7sL/zhQ7/nwrAlXIc3g65h", - "Hx0PH6Id55UytnO47sAe6o7beeL6QMeVu/i8FtLnKYcjnvzIU3byVW/wxtvlzpQxnnDd8m/NAHoncztl", - "7TGNTIv2wnEn+XK68UGDdeO+X4hNXXJ7F14ruOJlpq5Aa1HAQU7uJxZKfn3Fyx+bbvgeEnJHozlkOb7i", - "mzgWvHF96OGfG0dI4Q4wBf1PBQjOqdcFdTqgYraRqmKzgUJwC+WOVRpyoPduTnI0zVJPGEXC52suV6gw", - "aFWvfHArjYMMvzZkmtG1HAyRFKrsVmZo5E5dAD5MLTx5dOIUcKfS9S3kpMBc82Y+/8p1ys0c7UHfY5B0", - "ks1noxqvQ+pVq/EScrrvNidcBh15L8JPO/FEVwqizsk+Q3zF2+IOk9vcX8dk3w6dgnI4cRTx234cC/p1", - "6na5uwOhhwZiGioNBq+o2Exl6Ktaxm+0Q6jgzljYDC351PWvI8fv9ai+qGQpJGQbJWGXTEsiJHyPH5PH", - "Ca/Jkc4osIz17esgHfh7YHXnmUKNt8Uv7nb/hPY9VuYbpe/KJUoDThbvJ3ggD7rb/ZQ39ZPysky4Fv0L", - "zj4DMPMmWFdoxo1RuUCZ7bwwcx8VTN5I/9yzi/5XzbuUOzh7/XF7PrQ4OQDaiKGsGGd5KdCCrKSxus7t", - "O8nRRhUtNRHEFZTxcavl89AkbSZNWDH9UO8kxwC+xnKVDNhYQsJM8w1AMF6aerUCY3u6zhLgnfSthGS1", - "FBbn2rjjktF5qUBjJNUJtdzwHVs6mrCK/QJasUVtu9I/PlA2VpSld+i5aZhavpPcshK4sex7Id9scbjg", - "9A9HVoK9VvqywUL6dl+BBCNMlg42+5a+Yly/X/7ax/hjuDt9DkGnbcaEmVtmJ0nK/7n/78/enmX/zbNf", - "HmVf/o/T9x+efnzwcPDjk49//vP/7f702cc/P/j3f03tVIA99XzWQ37+wmvG5y9Q/YlC9fuwfzL7/0bI", - "LElkcTRHj7bYfUwV4QnoQdc4ZtfwTtqtdIR0xUtRON5yE3Lo3zCDs0ino0c1nY3oGcPCWo9UKm7BZViC", - "yfRY442lqGF8ZvqhOjol/dtzPC/LWtJWBumb3mGG+DK1nDfJCChP2TOGL9XXPAR5+j+ffP7FbN6+MG++", - "z+Yz//V9gpJFsU3lEShgm9IV40cS9wyr+M6ATXMPhD0ZSkexHfGwG9gsQJu1qD49pzBWLNIcLjxZ8jan", - "rTyXFODvzg+6OHfec6KWnx5uqwEKqOw6lb+oI6hhq3Y3AXphJ5VWVyDnTJzASd/mUzh90Qf1lcCXITBV", - "KzVFG2rOARFaoIoI6/FCJhlWUvTTe97gL39z5+qQHzgFV3/OVETvvW+/fsNOPcM09yilBQ0dJSFIqNL+", - "8WQnIMlxs/hN2Tv5Tr6AJVoflHz2Thbc8tMFNyI3p7UB/RUvuczhZKXYs/Ae8wW3/J0cSFqjiRWjR9Os", - "qhelyNllrJC05EnJsoYjvHv3lpcr9e7d+0FsxlB98FMl+QtNkDlBWNU286l+Mg3XXKd8X6ZJ9YIjUy6v", - "fbOSkK1qMpCGVEJ+/DTP41Vl+ikfhsuvqtItPyJD4xMauC1jxqrmPZoTUPyTXre/Pyh/MWh+HewqtQHD", - "/rbh1Vsh7XuWvasfPfoMX/a1ORD+5q98R5O7CiZbV0ZTUvSNKrhwUisxVj2r+CrlYnv37q0FXuHuo7y8", - "QRtHWTLs1nl1GB4Y4FDtAponzqMbQHAc/TgYF3dBvUJax/QS8BNuYfcB9q32K3o/f+PtOvAGn9d2nbmz", - "nVyVcSQedqbJ9rZyQlaIxjBihdqqT4y3AJavIb/0GctgU9ndvNM9BPx4QTOwDmEolx29MMRsSuigWACr", - "q4J7UZzLXT+tjaEXFTjoa7iE3RvVJmM6Jo9NN62KGTuoSKmRdOmINT62foz+5vuosvDQ1GcnwcebgSye", - "NXQR+owfZBJ57+AQp4iik/ZjDBFcJxBBxD+Cghss1I13K9JPLU/IHKQVV5BBKVZikUrD+59Df1iA1VGl", - "zzzoo5CbAQ0TS+ZU+QVdrF6911yuwF3P7kpVhpeUVTUZtIH60Bq4tgvgdq+dX8YJKQJ0qFJe48trtPDN", - "3RJg6/ZbWLTYSbh2WgUaiqiNj14+GY8/I8ChuCE8oXurKZyM6roedYmMg+FWbrDbqLU+NC+mM4SLvm8A", - "U5aqa7cvDgrls21SUpfofqkNX8GI7hJ77ybmw+h4/HCQQxJJUgZRy76oMZAEkiBT48ytOXmGwX1xhxjV", - "zF5AZpiJHMTeZ4RJtD3CFiUKsE3kKu091x0vKmUFHgMtzVpAy1YUDGB0MRIfxzU34ThivtTAZSdJZ79i", - "2pd9qenOo1jCKClqk3gu3IZ9DjrQ+32CupCVLqSii5X+CWnlnO6FzxdS26EkiqYFlLCihVPjQChtwqR2", - "gxwcPy6XyFuyVFhiZKCOBAA/BzjN5SFj5Bthk0dIkXEENgY+4MDsBxWfTbk6BkjpEz7xMDZeEdHfkH7Y", - "R4H6ThhVlbtcxYi/MQ8cwKeiaCWLXkQ1DsOEnDPH5q546dic18XbQQYZ0lCh6OVD86E3D8YUjT2uKbry", - "j1oTCQk3WU0szQag06L2HogXapvRC+WkLrLYLhy9J98u4Hvp1MGkXHT3DFuoLYZz4dVCsfIHYBmHI4AR", - "2V62wiC9Yr8xOYuA2Tftfjk3RYUGScYbWhtyGRP0pkw9IluOkcv9KL3cjQDomaHaWg3eLHHQfNAVT4aX", - "eXurzdu0qeFZWOr4jx2h5C6N4G9oH+smhPtLm/hvPLlYOFGfJBPe0LJ0mwyF1LmirIPHJCjsk0MHiD1Y", - "fdWXA5No7cZ6dfEaYS3FShzzHTolh2gzUAIqwVlHNM0uU5ECTpcHvMcvQrfIWIe7x+XuQRRAqGEljIXW", - "aRTign4LczzH9MlKLcdXZyu9dOt7rVRz+ZPbHDt2lvnJV4AR+Euhjc3Q45Zcgmv0jUEj0jeuaVoC7YYo", - "UrEBUaQ5Lk57CbusEGWdplc/73cv3LQ/NBeNqRd4iwlJAVoLLI6RDFzeMzXFtu9d8Eta8Et+Z+uddhpc", - "UzexduTSneN3ci56DGwfO0gQYIo4hrs2itI9DDJ6cD7kjpE0GsW0nOzzNgwOUxHGPhilFp69j938NFJy", - "LVEawPQLQbVaQRHSmwV/mIySyJVKrqIqTlW1L2feCaPUdZh5bk/SOh+GD2NB+JG4nwlZwDYNfawVIOTt", - "yzpMuIeTrEBSupK0WSiJmjjEH1tEtrpP7AvtPwBIBkG/6Tmz2+hk2qVmO3EDSuCF10kMhPXtP5bDDfGo", - "m4+FT3cyn+4/Qjgg0pSwUWGTYRqCEQbMq0oU257jiUYdNYLxo6zLI9IWshY/2AEMdIOgkwTXSaXtQ629", - "gf0Udd5Tp5VR7LUPLHb0zXP/AL+oNXowOpHNw7ztja42ce3f/XxhleYr8F6ojEC61RC4nGPQEGVFN8wK", - "CicpxHIJsffF3MRz0AFuYGMvJpBugsjSLppaSPvF0xQZHaCeFsbDKEtTTIIWxnzyb4ZeriDTR6ak5kqI", - "tuYGrqrkc/3vYJf9zMvaKRlCmzY817udupfvEbt+tfkOdjjywahXB9iBXUHL02tAGkxZ+ptPJkpgfc90", - "UvyjetnZwiN26iy9S3e0Nb4owzjxt7dMp2hBdym3ORhtkISDZcpuXKRjE9zpgS7i+6R8aBNEcVgGieT9", - "eCphQgnL4VXU5KI4RLtvgJeBeHE5s4/z2e0iAVK3mR/xAK5fNRdoEs8YaUqe4U5gz5Eo51Wl1RUvMx8v", - "MXb5a3XlL39sHsIrPrEmk6bsN1+fvXzlwf84n+UlcJ01loDRVWG76nezKirjsP8qoWzf3tBJlqJo85uM", - "zHGMxTVm9u4ZmwZFUdr4mego+piLZTrg/SDv86E+tMQ9IT9QNRE/rc+TAn66QT78iosyOBsDtCPB6bi4", - "aZV1klwhHuDWwUJRzFd2p+xmcLrTp6OlrgM8Cef6EVNTpjUO6RNXIivywT/8zqWnb5TuMH//MjEZPPTr", - "iVVOyCY8jsRqh/qVfWHqhJHg9bfV39xpfPgwPmoPH87Z30r/IQIQf1/431G/ePgw6T1MmrEck0ArleQb", - "eNC8shjdiE+rgEu4nnZBn11tGslSjZNhQ6EUBRTQfe2xd62Fx2fhfymgBPfTyRQlPd50QncMzJQTdDH2", - "ErEJMt1QyUzDlOzHVOMjWEdayOx9SQZyxg6PkKw36MDMTCnydGiHXBjHXiUFU7rGDBuPWGvdiLUYic2V", - "tYjGcs2m5EztARnNkUSmSaZtbXG3UP5411L8owYmCqfVLAVovNd6V11QDnDUgUCatov5gclP1Q5/GzvI", - "Hn9TsAXtM4Ls9d+9aHxKYaGpoj9HRoDHMw4Y957obU8fnprpNdu6G4I5TY+ZUjo9MDrvrBuZI1kKXZhs", - "qdUvkHaEoP8okQgjOD4Fmnl/AZmK3OuzlMap3FZ0b2c/tN3TdeOxjb+1LhwW3VQdu8llmj7Vx23kTZRe", - "k07X7JE8poTFEQbdpwEjrAWPVxQMi2VQQvQRl3SeKAtE54VZ+lTGbzlPafz2VHqYB+9fS3694KkaMU4X", - "cjBF29uJk7KKhc5hA0yT44BmZ1EEd9NWUCa5CnTrgxhmpb2hXkPTTtZoWgUGKSpWXeYUplAalRimltdc", - "UhVx14/4le9tgFzwrte10pgH0qRDugrIxSZpjn337m2RD8N3CrESVCC7NhBVYPYDMUo2iVTkq1g3mTs8", - "as6X7NE8KgPvd6MQV8KIRQnY4jG1WHCD12XjDm+6uOWBtGuDzZ9MaL6uZaGhsGtDiDWKNbonCnlNYOIC", - "7DWAZI+w3eMv2X0MyTTiCh44LHohaPbs8ZcYUEN/PErdsr7A+T6WXSDPDsHaaTrGmFQawzFJP2o6+nqp", - "AX6B8dthz2mirlPOErb0F8rhs7Thkq8g/T5jcwAm6ou7ie78Hl4keQPAWK12TNj0/GC5408jb74d+yMw", - "WK42G2E3PnDPqI2jp7a8Mk0ahqNa/75eVIArfMT41yqE//VsXZ9YjeGbkTdbGKX8A/poY7TOGafkn6Vo", - "I9NDvU52HnILYwGtpm4W4cbN5ZaOsiQGqi9ZpYW0aP+o7TL7k1OLNc8d+zsZAzdbfPE0UYiqW6tFHgf4", - "J8e7BgP6Ko16PUL2QWbxfdl9qWS2cRyleNDmWIhO5WigbjokcywudP/QUyVfN0o2Sm51h9x4xKlvRXhy", - "z4C3JMVmPUfR49Er++SUWes0efDa7dBPr196KWOjdKpgQHvcvcShwWoBV/hiLr1Jbsxb7oUuJ+3CbaD/", - "beOfgsgZiWXhLCcVgcijue+xvJPif/6+zXyOjlV6idizASqdsHZ6u90njjY8zurW999SwBh+G8HcZLTh", - "KEOsjETfU3h90+e3iBfqg0R73jE4Pv4b004HRzn+4UME+uHDuReD//ak+5nY+8OH6QTESZOb+7XFwm00", - "Yuyb2sOvVMIAFqoWNgFFPj9CwgA5dkm5D44JLvxQc9atEPfppYi7ed+VjjZNn4J3797il4AH/KOPiN+Y", - "WeIGtq8Uxg97t0JmkmSK5nsU587ZV2o7lXB6d1Agnn8CFI2gZKJ5DlcyqACadNcfjBeJaNSNuoBSOSUz", - "LgoU2/N/P3h2i5/vwXYtyuLnNrdb7yLRXObrZJTwwnX8K8nonSuYWGWyzsiaSwllcjjSbf8adOCElv53", - "NXWejZAT2/Yr0NJye4trAe+CGYAKEzr0Clu6CWKsdtNmNWkZypUqGM7TFrVomeOwlHOqhGbifTMOu6mt", - "j1vFt+A+4dBSlBiGmfYbY8tMczuSQAvrnYf6Qm4cLD9uyMxAo4NmXGzwYjZ8U5WAJ/MKNF9hVyWh1x1T", - "qOHIUcUKZir3CVtiwgrFbK0lU8tltAyQVmgod3NWcWNokEduWbDFuWfPHj96lDR7IXYmrJSwGJb5Y7uU", - "x6fYhL74IktUCuAoYA/D+rGlqGM2dkg4vqbkP2owNsVT8QO9XEUvqbu1qZ5kU/v0hH2LmY8cEXdS3aO5", - "MiQR7ibUrKtS8WKOyY3ffH32ktGs1IdKyFM9yxVa67rkn3SvTE8wGjI7jWTOmT7O/lQebtXGZk35yVRu", - "QteiLZApejE3aMeLsXPCXpAJtSngT5MwTJGtN1BE1S5JiUficP+xludrtE12JKBxXjm9EGtgZ63nJnp9", - "2FQ/Qobt4Pa1WKkU65wpuwZ9LQzgi3y4gm46xCY3qLeNh/SI3eXpWkqilJMjhNGm1tGxaA/AkSQbggqS", - "kPUQf6RliuoxH1uX9gJ7pd9i9Irc9rz+IbleSLHNvvfOhZxLJUWOpRBSkjSmbpvmppxQNSLtXzQzf0IT", - "hytZWrd5C+yxOFpsNzBCj7ihyz/66jaVqIP+tLD1JddWYI3nbFDMQ6Vr7xAT0oCvZuWIKOaTSieCmpIP", - "IZoAiiPJCLMyjVg4v3HffvD2b0yKcSkkWro82rx+Ri6r0gj0TEsmLFspMH493dc85q3rc4JZGgvYvj95", - "qVYivxArHIPC6NyyKWZ0ONRZiCD1EZuu7XPX1ufOb37uhIPRpGdV5Scdr4OeFCTtVo4iOBW3FAJJIuQ2", - "48ej7SG3vaHfeJ86QoMrjFqDCu/hAWE0tbS7o3ztdEuiKGzB6EVlMoGukAkwXgoZXKjpCyJPXgm4MXhe", - "R/qZXHNLusMknvYGeDnyAAJfKJMP/rZD9SsHOJTgGsMc49vYlgEfYRxNg1bi53LHwqFw1B0JE8952YRO", - "J4p6o1TlhagCHxf1ynynGIdj3Fl4MtlB18Hne013rMZx7E00lqNwURcrsBkvilRqq6/wK8Ov4ZEYbCGv", - "myJUzevAbo7yIbX5iXIlTb3ZM1docMvporr5CWqIa/eHHcZMO4sd/puqwDS+Mz5o+uhXuSFCujguMf/w", - "lXFK6nU0nRmxyqZjAu+U26OjnfpmhN72v1NKD891/yle4/a4XLxHKf72tbs44sS9g/h0ulqavLoYC67w", - "e0h41GSE7HIlvMoGdcYw6gE3L7FlPeBDwyTgV7wceQkf+0rofiX/wdh7+Hw0fQO3Pj2X5WwvCxpNeUSx", - "wj3vy9CFOBYfTOHBd+e18Gvdi9Bx3913HU8dxYi1zGLUQ3czJ1q7wcd60b67GkuREOp04Pe4HoiP4pn7", - "NPBwJVQdoq9CDHRQCelXn4KnU/djZP3JlwW/tddi1MfyxtevpWV6nfy7n8kLy0Bavfsn8LgMNr1fVCYh", - "7ZJ5qm3CmtKHk0ohdm7FKTVsUuVSvGwYbGXEWjq0NCg/MyCrF1PEgQE+Ps5n58VRF2aq5M6MRkkdu5di", - "tbaYsf8vwAvQrw5UJGirEOARq5QRbQXS0g3mU8CucbiTqY8NHAGLuKLCcKwQhHoFucWys21wnQY4pr6C", - "myw4ff6oTDCuTjdvMnxBgn1VCIa1Zg/c8YPESVHyL6rTeTI95/5ZE0JNL8CuuWnTtfTeTE9+ublcQo5Z", - "kfcmqvrPNcgoCdI82GUQlmWUt0o075gwr/fxVscWoH15pPbCE9XXuTU4Y+/YL2F3z7AONSQLhzaP+G6S", - "OBgxQC6wkEN6zJDso8aEaSgDsRBCgn0q5rY4xmjO5yjt2g3nCiTpLo42FdueKdNFzyfN5boelfYRn+SM", - "5bIa1kwe1z9eYIlq4wPkeJN4ONbS2fmwcM61T1yMacUa30lIYQwm/BZyCNIspbj09QMQK+Spuua6CC3u", - "JCkU3U0iDfSymVm0DziGQQ6JUgz4FiovlRMjsrEHZd03E03A4T1DkaFtAh+EawlaQ9G4REplILMqPPjY", - "B8c+VFD4642QYEbLHxFwo6mvX7e5vbEMHMdU19xHvcYLZBo23EGnowzc43PuQ/Zz+h4e4YcyYActTA29", - "Hq5HG57uCDNAYkz1S+Zvy8OP+29ibBJSgs6C56mfjlt2M7Jh3s2izumCjg9GY5CbnDtnDytJ2mny4Sp7", - "OkL0SP4SdqekBIVCvmEHY6BJciLQo4SjvU2+U/ObScG9uhPwfts8cpVSZTbi7Dgf5hDvU/ylyC8BcwA2", - "Ie4jNdrZfbSxN97s6/Uu5MyuKpBQPDhh7EzSo6Lg2O6WF+xNLu/ZffNvcdaiprT+3qh28k6mX2dgwn19", - "S24WhtnPwww4VnfLqWiQAxmqt3Is5OYak/N3q3ieTNXKh67mfhX5lqgIipRMckEeq+d40FOGI0yBEOXq", - "QEcmZ97TxUypUrG8N0nT4IZKYyqeDAGyIKdkC2ig8IMnEZCsi544hZT6zie9U0umoXUi3zT737CEe0qj", - "78/czNLld0uloVOM3fWmTJ/NwxdMo4n/WQirud7dJEffoIT8wHoyiuWD4VhNJFa7kDYaa4jDslTXGTKr", - "rKlzkVJtXTvTvYxD0bW2nzvVC4jiurjxgtqOrXnBcqU15HGP9HtPgmqjNGSlwjCvlAd6aZ3cvcFHXpKV", - "asVUlasCqF5MmoLG5qql5Cg2QRRVk0QB0Q6+FqY+ER1PnNLdqeRHylDUWh1ROz8HerneZnWiRWfkyxyJ", - "WAbjszh5DFHjIbx7av+nefNSbJFuQKeO/JJZXcOc+Rb9Gtn+4HMNbCOMIVAaWroWZYkPx8U28rw2gQtp", - "1I6IvecYVnklMPamm0SApOHK3XlNZoWYB1zEaY+YXWtVr9ZRgukGzqDy6torxPEoP5kaw6PwBZmb4inb", - "KGO9pkkjtUtuQ87u50parcqya5QiEX3lLe3f8+1ZntuXSl0ueH75APVaqWyz0mIe3lf3gwPbmXQvtVj3", - "As6onPnhVL3UDkPlPNFOZpA9Fnd0YfcIzPeHOehhm/vZcGH9dXWZaVqNOZOMW7URefpM/b6i7UZj5FIs", - "KpmzjGorUpYJbIaHPb6smuAKZJFDNIPkyeJwZ8wzAu9kRnbj/osSeH9ctgTPaEYuyiFz8VJUlo/Kej0A", - "EFJ6+mxrTQUZY0ms4SpqRakS0EXeB3TirYKRSLeDzY1w50BZuBVQg+jHBsD7ZHyYU245iqRcqG34/qBN", - "Pncj4D/up/IO8xgL8bpoSUtTkFdIVDPCEdIprvfGQ73BZ++LqVFRTfHciTd8BMB4nFQHhknRUseCseSi", - "hCJL1V48b2xU80jT9k+z+iXRhfGcPOd1KH3oxq41+MQpJOLrrv+r4o6UVNN8aEmWBWyB3nX8AlpRTcN5", - "5H+Bkkoe9owBqspKuIJO+JjP5lKjqCmuIPQ1TWdWAFTojezbyFJxUfFd3jOc+LVnUWTNFOwmLSmEWNop", - "dsBMkjTqbGVGx8RMPUoOoitR1LyDP3OsyNE1A7qjnEDVQEfIgh45dZqfaITXYYCz0D8lygRMvJ/Gh45m", - "QWnU7WNAB+MkazN26mU6TDJOVdQ4WHC2onHEEom3fMNU/FqOGySHJN+qWxP3SSgZIfbrLeQo1Xh9Bwqv", - "8Yw4KXzWE6R2CVCQVuC6JKzta5BMqqjE5DU3jarS5lAMP9DE2EhIr03fwKncRjPefmcZDsZML5naqCKh", - "Gzq9uXn+NzmJew/i6HgpGjHgn//tsX8F6vZqBzbAUt7S7aeT/bFIo7/FPBefs0UdBipLdU01I2M99AUE", - "PyhRX3ABebFcNNdyiNqc+/SefVOHiOLVN3zHlMZ/nNb5j5qXYrlDPkPgh27MrLkjIe94pYgAHwXqJt4v", - "Xs0DYMHaosJUtG4xdcxouJ0bJQLaXeShuI9iG34J8TZgsAPxz9w6xmnqBVou3JXd284hFvziQ4qWDS9i", - "TR8TRXbLqIfUwa73/2zfwsVThfxuVcnzUCHUlyjq8hmsAhyIy65hs/+x5JCvBRJoKgu3RKvD6/riBibT", - "I1lX6gXCWPmVDtiDiquDyjO3WsZEy2+vxsaeZ6aTlnLXuzA16mYAdFyn8RD4cdnKT4P/ZA7XsWVMAf+f", - "Be8jhWpjeKkm7SfAcicDRwJWslYv1DbTsDSHAkzIXO3Ued3m7ggmViFzDdxQxM35j17xbFOUCukUYYoJ", - "bXyazSgFLIVsmaWQVW0TegxmKpW7CGGx0R/ROuJCG5MSnDB5xcsfr0BrUYxtnDsdVNIxLhERHB2+b8KE", - "0dypwwGEaXU4fJ/ZmtHjZu4CpyJUFK5pLJcF10XcXEiWg3b3PrvmO3Nzj1LjHDjkU+KRNNPNGhB5l5C0", - "CZBy553Ct/T3NADyO3T8THDYYFxwwllDph2rRvwzQxh+Fw6bDd9mpVrhK8KRA+Fz06KHj1RAJdEMTvLZ", - "tHWHeYz4BfZPg2n5PSOyCmedMsX+c/8jbiWqkT9JYfeefLJR9p91UtwtHcyAVLlqg/+JWIbnMfUS1ydf", - "iV/jBmEzPFUJtAfRJsKIf6hrFx/ZRQyD8M+4YyP49HJn3UiL1HtfsgxkaDEwe8L7wbSh7Dz34VlDU9rA", - "1EBImfvX0kda2sg+H+6lEfCoNr0/691pm5AZN84xNeL2v4/OKlVl+ZSYT6rcUXg3gYe0C+MIfUROgJF1", - "N+Expqll08l71Clqc2yZvNGiOoe8XVW+T+kfMxONcPSuC0ItkZdR5Xa0buFLnsaYMu+/MeuawRomwTjT", - "kNcazcTXfHe47NhIxuiLv5x9/vjJX598/gVzDVghVmDarOO9sl1tXKCQfbvPp40EHCzPpjchZB8gxAX/", - "Y3hU1WyKP2vEbU2bUnRQtOwY+3LiAkgcx0S5qBvtFY7Thvb/c21XapF3vmMpFPz6e6ZVWaarPjRyVcKB", - "ktqtyIXiNJAKtBHGOkbY9YAK20ZEmzWaBzH37xVlk1Eyh2A/9lQg7EjIVWohYwG1yM/wbbf3GjHYVqXn", - "VeTp2bcur6eRhQ6FRoyKWQCrVOVFe7FkKYjwBZGOXtZ6wydaxKMY2YbZUrRsihB95Hma9OKC2fu5fbeY", - "q01zereJCfEiHMobkOaYf2I8b8FNOElr2v+n4R+JRAx3xjWa5f4avCKpH9ysKP8k0IaP8hPkgQCMvLbt", - "vJOMHopFiYg1eQnQnxAcyH3x4/vWsXzwWQhCEjocAC9+Ptu2a14yeHB+44y+3zdIiZbyfowSOss/9CI3", - "sN7mIom2yBtNrAVDbEkNxcLoubV53rxiHtFKBo+dtVKWOc20LBOPpMmOg2cqJhynEugrXn56rvGN0Mae", - "IT6geD3+NCp+KRsjmVBpbpan7yWfNHf0Kvbuppav8GH2f4Lbo+Q954fyTvjBbYbGHaxYvwq3Ar31Ztc4", - "JgVZPf6CLXyxjUpDLkzfuX8dhJPmYShosfQBrbC1B16iHlrnz8regoyXIRKH/RC5txqfvYewPaK/MVMZ", - "OblJKk9R34AsEvhL8ai4OO+B6+KWhRlulvYlSuB2ZNqXYdnhqcuj1Cbu0qkNDNc5+bbu4DZxUbdrm5qz", - "aHJ9h3fv3trFlFRD6VoMrjvmOrqTogxHlWT4FbIcEY78GH7eFMX8PJb3lnK7juTm7u1HLcqDASudTOsf", - "57MVSDDCYC7xv/raMZ/2Lg0QUOaF4VElWG+TLoYQk1hrZ/JoqiiH+oT06b5bIuc1vmrMay3sDusGBwOa", - "+GsyH9O3TW4Pnxum8aX5u8+qS2hqt7eZQGoTbtdvFS/xPiIXn3S3kCpP2NeU4dsflD/fW/wbfPanp8Wj", - "zx7/2+JPjz5/lMPTz7989Ih/+ZQ//vKzx/DkT58/fQSPl198uXhSPHn6ZPH0ydMvPv8y/+zp48XTL778", - "t3uODzmQCdCQ2v/Z7H9nZ+VKZWevzrM3DtgWJ7wS34HbG9SVlwrrWjqk5ngSYcNFOXsWfvpf4YSd5GrT", - "Dh9+nfn6TLO1tZV5dnp6fX19Enc5XeHT/8yqOl+fhnmw2mBHXnl13sToUxwO7mhrPcZN9aRwht9ef33x", - "hp29Oj9pCWb2bPbo5NHJY1/aWvJKzJ7NPsOf8PSscd9PMb/mqfGp80+ryifPT7rtXvuKS12KC50R2Cb7", - "utttSsruU+CbuK71eYG0ZYeJ+7H+GoZlIYBPHj0Ku+JlnujqOcV3IM8+zKZVuR9Ohjvfz6mxqFevHMwh", - "lUuT3M87JzzO0H9JCGv2i1RgvjJoWdfiiluYvf84n1V1Ap1f4yMPsw9n8yghPEGjyqLB+ACjr+r/TzD6", - "cT479Xxy9uyD+2sNvMQkT+6PjSPUPHzSwIud/7+55qsV6BO/TvfT1ZPTIBGffvDZOz7u+3YaRyedfugk", - "OSkO9AzRN4eanH4I5Zv3D9gp3evjHqMOEwHd1+x0gSWbpjaFeHXjS0GaN6cfUBkc/f3UW/TSH1EpJ25/", - "GpIFjbSktBDpjx0UfrBbt5D9w7k20Xg5t/m6rk4/4H+QbKMVUZbZU7uVpxjEcPqhgwj/eYCI7u9t97jF", - "1UYVEIBTyyXVvN73+fQD/RtNBNsKtHAaEWZ28r9SBr5TLH24G/68k3nyx+E6OtnH9t0stUb7pDAhsqeb", - "tCx5ffQzoZnbMrtpeVX6+deGwt7wVt+3so/z2dM75MrdrLUJYL7iBQuP7HHux59u7nNJEcZOzCFxDCF4", - "+ukg6Gwf+w527Adl2TdoN/g4n33+KXfiXDothpcMW0alp4dH5Cd5KdW1DC2dHF9vNlzvJh+f/jXq5MCm", - "mVyRoKIo8UL3qJ0VxYDoSZ8BY79SeLuOYWxjVpX3GLZIa9U5Id0ShvagAareUAX2XipDSrgVBAmpCpjF", - "ipbVNXy8JU/oxRpxbc8T5k200+Ojg2UoFh+BmszL14/EoJGHqvghEj5/ESZtY/X/4Cl/8JSGp3z+6LNP", - "N/0F6CuRA3sDm0pprkW5Yz/J5hHIjXncWVEkk5l2j/5BHjefbbNcFbACmXkGli1UsfPVYWadCS6BLDcD", - "QeY0WDo6GsMI9ww2lJS00oYmz569Tbno/VO7ql6UImdk5UUzh9PhIytEk12yy/zm0bYO2E8igzkrRFk3", - "mQXstfIvd4cXCrsf59sw/9B48eBBFHbHroUs1PWDkwDuP2pAPu/hDdPMEgBG8abDYj2t88oBOABrbD70", - "ek3Bzp7JX/KbzV3yY6d+f8sr6+Bl2mRv+4+LH3+IXsaRpYGCU/BdFpEuBtFrhcHh1xyjE6mo33OyAZU7", - "fOFpua1Np57YyR/30B+8//a8/9smnS9VErNYImjIkqK74GSSwJvk7R86f3q7xYxCg1NJeN3vjLMVVoEc", - "XlCLHTt/MdBeqVv/Svhqh017t0KC3/dBPIrxj7CXfSKNW8hK2SZAmhb1h5D5h5B5K8V18uGZorsmLUtU", - "m5UP9LF5KLPaeYSCibQx3GgAyhT70296fO9k44e2rZQtixJ+Q8GiD5QpoY/mP1jEHyzidiziW0gcRjy1", - "nmkkiO44W9dUhoFpfYpOuF+QOkLzuuQ6epx6yIR9hiOmVcFfhWt8aoNdEldkr8MYdkHBm4kNvFsb3h8s", - "7w+W9/theWeHGU1XMLm11esSdhteNbYus65toa4jDznCQoHXQx8fKf79v0+vubDZUmlfPoYvLehhZwu8", - "PPW1onu/tuUZB1+w5mT0Y5wYLfnrKe86LbuOc8d6xzoOvOqpr95xPNIovOcPn9v4sTgeC9l+E4n19r1j", - "2Qb0VbgR2vCiZ6enmOBlrYw9nX2cf+iFHsUf3zfk8aG5RzyZfES6UFqshORl5mMj2oL3sycnj2Yf/18A", - "AAD//wm+5emgGwEA", + "H4sIAAAAAAAC/+y9/5PbNpIo/q+gdFflLyfO2I6T2/hTW/eZ2El2Lk7i8ji5d2f77UJkS8IOBXABcEaK", + "n//3V+gGSJAEJWpm4mzq5Sd7RHxpNBqN/obuD7NcbSolQVoze/ZhVnHNN2BB4188z1UtbSYK91cBJtei", + "skLJ2bPwjRmrhVzN5jPhfq24Xc/mM8k30LZx/eczDf+ohYZi9szqGuYzk69hw93Adle51s1I22ylMj/E", + "GQ1x/mL2cc8HXhQajBlC+aMsd0zIvKwLYFZzaXjuPhl2Leya2bUwzHdmQjIlgakls+tOY7YUUBbmJCzy", + "HzXoXbRKP/n4kj62IGZalTCE87naLISEABU0QDUbwqxiBSyx0Zpb5mZwsIaGVjEDXOdrtlT6AKgERAwv", + "yHoze/Z2ZkAWoHG3chBX+N+lBvgFMsv1Cuzs/Ty1uKUFnVmxSSzt3GNfg6lLaxi2xTWuxBVI5nqdsO9r", + "Y9kCGJfs9TfP2WefffalW8iGWwuFJ7LRVbWzx2ui7rNns4JbCJ+HtMbLldJcFlnT/vU3z3H+C7/Aqa24", + "MZA+LGfuCzt/MbaA0DFBQkJaWOE+dKjf9UgcivbnBSyVhol7Qo3vdFPi+X/TXcm5zdeVEtIm9oXhV0af", + "kzws6r6PhzUAdNpXDlPaDfr2Ufbl+w+P548fffyXt2fZ//g/P//s48TlP2/GPYCBZMO81hpkvstWGjie", + "ljWXQ3y89vRg1qouC7bmV7j5fIOs3vdlri+xzite1o5ORK7VWblShnFPRgUseV1aFiZmtSwdm3KjeWpn", + "wrBKqytRQDF33Pd6LfI1y7mhIbAduxZl6WiwNlCM0Vp6dXsO08cYJQ6uG+EDF/TPi4x2XQcwAVvkBlle", + "KgOZVQeup3DjcFmw+EJp7ypz3GXF3qyB4eTuA122iDvpaLosd8zivhaMG8ZZuJrmTCzZTtXsGjenFJfY", + "36/GYW3DHNJwczr3qDu8Y+gbICOBvIVSJXCJyAvnbogyuRSrWoNh12uwa3/naTCVkgaYWvwdcuu2/T8v", + "fvyBKc2+B2P4Cl7x/JKBzFUBxQk7XzKpbEQanpYQh67n2Do8XKlL/u9GOZrYmFXF88v0jV6KjUis6nu+", + "FZt6w2S9WYB2WxquEKuYBltrOQYQjXiAFDd8O5z0ja5ljvvfTtuR5Ry1CVOVfIcI2/Dtnx/NPTiG8bJk", + "FchCyBWzWzkqx7m5D4OXaVXLYoKYY92eRherqSAXSwEFa0bZA4mf5hA8Qh4HTyt8ReCEQUbBaWY5AI6E", + "bYJm3Ol2X1jFVxCRzAn7yTM3/GrVJciG0Nlih58qDVdC1abpNAIjTr1fApfKQlZpWIoEjV14dDgGQ208", + "B954GShX0nIhoXDMGYFWFohZjcIUTbhf3xne4gtu4IunY3d8+3Xi7i9Vf9f37vik3cZGGR3JxNXpvvoD", + "m5asOv0n6Ifx3EasMvp5sJFi9cbdNktR4k30d7d/AQ21QSbQQUS4m4xYSW5rDc/eyYfuL5axC8tlwXXh", + "ftnQT9/XpRUXYuV+Kumnl2ol8guxGkFmA2tS4cJuG/rHjZdmx3ab1CteKnVZV/GC8o7iutix8xdjm0xj", + "HkuYZ422Gyseb7ZBGTm2h902GzkC5CjuKu4aXsJOg4OW50v8Z7tEeuJL/Yv7p6pK19tWyxRqHR37KxnN", + "B96scFZVpci5Q+Jr/9l9dUwASJHgbYtTvFCffYhArLSqQFtBg/KqykqV8zIzllsc6V81LGfPZv9y2tpf", + "Tqm7OY0mf+l6XWAnJ7KSGJTxqjpijFdO9DF7mIVj0PgJ2QSxPRSahKRNdKQkHAsu4YpLe9KqLB1+0Bzg", + "t36mFt8k7RC+eyrYKMIZNVyAIQmYGt4zLEI9Q7QyRCsKpKtSLZof7p9VVYtB/H5WVYQPlB5BoGAGW2Gs", + "eYDL5+1Jiuc5f3HCvo3HRlFcyXLnLgcSNdzdsPS3lr/FGtuSX0M74j3DcDuVPnFbE9DgxPy7oDhUK9aq", + "dFLPQVpxjf/i28Zk5n6f1Pn3QWIxbseJCxUtjznScfCXSLm536OcIeF4c88JO+v3vRnZuFH2EIw5b7F4", + "18SDvwgLG3OQEiKIImry28O15ruZFxIzFPaGZPKTAaKQiq+ERGjnTn2SbMMvaT8U4t0RAphGLyJaIgmy", + "MaF6mdOj/mRgZ/kdUGtqY4Mk6iTVUhiLejU2ZmsoUXDmMhB0TCo3oowJG75nEQ3M15pXRMv+C4ldQqI+", + "T40I1ltevBPvxCTMEbuPNhqhujFbPsg6k5Ag1+jB8FWp8su/cLO+gxO+CGMNaR+nYWvgBWi25madODg9", + "2m5Hm0LfriHSLFtEU500S3ypVuYOlliqY1hXVT3nZemmHrKs3mpx4EkHuSyZa8xgI9Bg7hVHsrCT/sW+", + "5vnaiQUs52U5b01FqspKuILSKe1CStBzZtfctocfRw56DZ4jA47ZWWDRaryZCU1surFFaGAbjjfQxmkz", + "Vdnt03BQwzfQk4LwRlQ1WhEiReP8RVgdXIFEntQMjeA3a0RrTTz4iZvbf8KZpaLFkQXQBvddg7+GX3SA", + "dq3b+1S2UyhdkM3aut+EZrnSNATd8H5y9x/guu1M1Hm/0pD5ITS/Am146VbXW9SDhnzv6nQeOJkFtzw6", + "mZ4K0woYcQ7sh+Id6ISV5kf8Dy+Z++ykGEdJLfUIFEZU5E4t6GJ2qKKZXAO0tyq2IVMmq3h+eRSUz9vJ", + "02xm0sn7mqynfgv9IpoderMVhbmrbcLBxvaqe0LIdhXY0UAW2ct0ormmIOCNqhixjx4IxClwNEKI2t75", + "tfaV2qZg+kptB1ea2sKd7IQbZzKz/0ptX3jIlD6MeRx7CtLdAiXfgMHbTcaM083S+uXOFkrfTJroXTCS", + "td5Gxt2okTA17yEJm9ZV5s9mwmNBDXoDtQEe+4WA/vApjHWwcGH5r4AF40a9Cyx0B7prLKhNJUq4A9Jf", + "J4W4BTfw2RN28Zezzx8/+euTz79wJFlptdJ8wxY7C4bd92Y5ZuyuhAdJ7Qili/ToXzwNPqruuKlxjKp1", + "DhteDYci3xdpv9SMuXZDrHXRjKtuAJzEEcFdbYR2Rm5dB9oLWNSrC7DWabqvtFreOTcczJCCDhu9qrQT", + "LEzXT+ilpdPCNTmFrdX8tMKWIAuKM3DrEMbpgJvFnRDV2MYX7SwF8xgt4OChOHab2ml28Vbpna7vwrwB", + "WiudvIIrrazKVZk5OU+ohIHilW/BfIuwXVX/d4KWXXPD3NzovaxlMWKHsFs5/f6iod9sZYubvTcYrTex", + "Oj/vlH3pIr/VQirQmd1KhtTZMY8stdowzgrsiLLGt2BJ/hIbuLB8U/24XN6NtVPhQAk7jtiAcTMxauGk", + "HwO5khTMd8Bk40edgp4+YoKXyY4D4DFysZM5usru4tiOW7M2QqLf3uxkHpm2HIwlFKsOWd7ehDWGDprq", + "nkmA49DxEj+jrf4FlJZ/o/SbVnz9Vqu6unP23J9z6nK4X4z3BhSubzADC7kquwGkKwf7SWqNv8mCnjdG", + "BFoDQo8U+VKs1jbSF19p9SvciclZUoDiBzIWla7P0GT0gyocM7G1uQNRsh2s5XCObmO+xheqtowzqQrA", + "za9NWsgcCTnEWCcM0bKx3Ir2CWHYAhx15bx2q60rhgFIg/ui7ZjxnE5ohqgxI+EXTdwMtaLpKJyt1MCL", + "HVsASKYWPsbBR1/gIjlGT9kgpnkRN8EvOnBVWuVgDBSZN0UfBC20o6vD7sETAo4AN7Mwo9iS61sDe3l1", + "EM5L2GUY62fY/e9+Ng9+A3itsrw8gFhsk0Jv3542hHra9PsIrj95THZkqSOqdeKtYxAlWBhD4VE4Gd2/", + "PkSDXbw9Wq5AY0jJr0rxYZLbEVAD6q9M77eFtq5GIti9mu4kPLdhkksVBKvUYCU3NjvEll2jji3BrSDi", + "hClOjAOPCF4vubEUBiVkgTZNuk5wHhLC3BTjAI+qIW7kn4MGMhw7d/egNLVp1BFTV5XSForUGtAjOzrX", + "D7Bt5lLLaOxG57GK1QYOjTyGpWh8jyyvAeMf3Db+V+/RHS4Oferunt8lUdkBokXEPkAuQqsIu3EU7wgg", + "wrSIJsIRpkc5TejwfGasqirHLWxWy6bfGJouqPWZ/altOyQucnLQvV0oMOhA8e095NeEWYrfXnPDPBzB", + "xY7mHIrXGsLsDmNmhMwh20f5qOK5VvEROHhI62qleQFZASXfJYID6DOjz/sGwB1v1V1lIaNA3PSmt5Qc", + "4h73DK1wPJMSHhl+Ybk7gk4VaAnE9z4wcgE4doo5eTq61wyFcyW3KIyHy6atToyIt+GVsm7HPT0gyJ6j", + "TwF4BA/N0DdHBXbOWt2zP8V/g/ETNHLE8ZPswIwtoR3/qAWM2IL9G6fovPTYe48DJ9nmKBs7wEfGjuyI", + "YfoV11bkokJd5zvY3bnq158g6ThnBVguSihY9IHUwCruzyiEtD/mzVTBSba3IfgD41tiOSFMpwv8JexQ", + "535FbxMiU8dd6LKJUd39xCVDQEPEsxPB4yaw5bktd05Qs2vYsWvQwEy9oBCGoT/FqiqLB0j6Z/bM6L2z", + "Sd/oXnfxBQ4VLS8Va0Y6wX743vQUgw46vC5QKVVOsJANkJGEYFLsCKuU23Xhnz+FBzCBkjpAeqaNrvnm", + "+r9nOmjGFbD/VjXLuUSVq7bQyDRKo6CAAqSbwYlgzZw+OLHFEJSwAdIk8cvDh/2FP3zo91wYtoTr8GbQ", + "Neyj4+FDtOO8UsZ2Dtcd2EPdcTtPXB/ouHIXn9dC+jzlcMSTH3nKTr7qDd54u9yZMsYTrlv+rRlA72Ru", + "p6w9ppFp0V447iRfTjc+aLBu3PcLsalLbu/CawVXvMzUFWgtCjjIyf3EQsmvr3j5Y9MN30NC7mg0hyzH", + "V3wTx4I3rg89/HPjCCncAaag/6kAwTn1uqBOB1TMNlJVbDZQCG6h3LFKQw703s1JjqZZ6gmjSPh8zeUK", + "FQat6pUPbqVxkOHXhkwzupaDIZJCld3KDI3cqQvAh6mFJ49OnALuVLq+hZwUmGvezOdfuU65maM96HsM", + "kk6y+WxU43VIvWo1XkJO993mhMugI+9F+GknnuhKQdQ52WeIr3hb3GFym/vrmOzboVNQDieOIn7bj2NB", + "v07dLnd3IPTQQExDpcHgFRWbqQx9Vcv4jXYIFdwZC5uhJZ+6/nXk+L0e1ReVLIWEbKMk7JJpSYSE7/Fj", + "8jjhNTnSGQWWsb59HaQDfw+s7jxTqPG2+MXd7p/QvsfKfKP0XblEacDJ4v0ED+RBd7uf8qZ+Ul6WCdei", + "f8HZZwBm3gTrCs24MSoXKLOdF2buo4LJG+mfe3bR/6p5l3IHZ68/bs+HFicHQBsxlBXjLC8FWpCVNFbX", + "uX0nOdqooqUmgriCMj5utXwemqTNpAkrph/qneQYwNdYrpIBG0tImGm+AQjGS1OvVmBsT9dZAryTvpWQ", + "rJbC4lwbd1wyOi8VaIykOqGWG75jS0cTVrFfQCu2qG1X+scHysaKsvQOPTcNU8t3kltWAjeWfS/kmy0O", + "F5z+4chKsNdKXzZYSN/uK5BghMnSwWbf0leM6/fLX/sYfwx3p88h6LTNmDBzy+wkSfnf9//j2duz7H94", + "9suj7Mt/O33/4enHBw8HPz75+Oc//5/uT599/POD//jX1E4F2FPPZz3k5y+8Znz+AtWfKFS/D/sns/9v", + "hMySRBZHc/Roi93HVBGegB50jWN2De+k3UpHSFe8FIXjLTchh/4NMziLdDp6VNPZiJ4xLKz1SKXiFlyG", + "JZhMjzXeWIoaxmemH6qjU9K/PcfzsqwlbWWQvukdZogvU8t5k4yA8pQ9Y/hSfc1DkKf/88nnX8zm7Qvz", + "5vtsPvNf3ycoWRTbVB6BArYpXTF+JHHPsIrvDNg090DYk6F0FNsRD7uBzQK0WYvq03MKY8UizeHCkyVv", + "c9rKc0kB/u78oItz5z0navnp4bYaoIDKrlP5izqCGrZqdxOgF3ZSaXUFcs7ECZz0bT6F0xd9UF8JfBkC", + "U7VSU7Sh5hwQoQWqiLAeL2SSYSVFP73nDf7yN3euDvmBU3D150xF9N779us37NQzTHOPUlrQ0FESgoQq", + "7R9PdgKSHDeL35S9k+/kC1ii9UHJZ+9kwS0/XXAjcnNaG9Bf8ZLLHE5Wij0L7zFfcMvfyYGkNZpYMXo0", + "zap6UYqcXcYKSUuelCxrOMK7d295uVLv3r0fxGYM1Qc/VZK/0ASZE4RVbTOf6ifTcM11yvdlmlQvODLl", + "8to3KwnZqiYDaUgl5MdP8zxeVaaf8mG4/Koq3fIjMjQ+oYHbMmasat6jOQHFP+l1+/uD8heD5tfBrlIb", + "MOxvG169FdK+Z9m7+tGjz/BlX5sD4W/+ync0uatgsnVlNCVF36iCCye1EmPVs4qvUi62d+/eWuAV7j7K", + "yxu0cZQlw26dV4fhgQEO1S6geeI8ugEEx9GPg3FxF9QrpHVMLwE/4RZ2H2Dfar+i9/M33q4Db/B5bdeZ", + "O9vJVRlH4mFnmmxvKydkhWgMI1aorfrEeAtg+RryS5+xDDaV3c073UPAjxc0A+sQhnLZ0QtDzKaEDooF", + "sLoquBfFudz109oYelGBg76GS9i9UW0ypmPy2HTTqpixg4qUGkmXjljjY+vH6G++jyoLD019dhJ8vBnI", + "4llDF6HP+EEmkfcODnGKKDppP8YQwXUCEUT8Iyi4wULdeLci/dTyhMxBWnEFGZRiJRapNLz/NfSHBVgd", + "VfrMgz4KuRnQMLFkTpVf0MXq1XvN5Qrc9eyuVGV4SVlVk0EbqA+tgWu7AG732vllnJAiQIcq5TW+vEYL", + "39wtAbZuv4VFi52Ea6dVoKGI2vjo5ZPx+DMCHIobwhO6t5rCyaiu61GXyDgYbuUGu41a60PzYjpDuOj7", + "BjBlqbp2++KgUD7bJiV1ie6X2vAVjOgusfduYj6MjscPBzkkkSRlELXsixoDSSAJMjXO3JqTZxjcF3eI", + "Uc3sBWSGmchB7H1GmETbI2xRogDbRK7S3nPd8aJSVuAx0NKsBbRsRcEARhcj8XFccxOOI+ZLDVx2knT2", + "K6Z92Zea7jyKJYySojaJ58Jt2OegA73fJ6gLWelCKrpY6Z+QVs7pXvh8IbUdSqJoWkAJK1o4NQ6E0iZM", + "ajfIwfHjcom8JUuFJUYG6kgA8HOA01weMka+ETZ5hBQZR2Bj4AMOzH5Q8dmUq2OAlD7hEw9j4xUR/Q3p", + "h30UqO+EUVW5y1WM+BvzwAF8KopWsuhFVOMwTMg5c2zuipeOzXldvB1kkCENFYpePjQfevNgTNHY45qi", + "K/+oNZGQcJPVxNJsADotau+BeKG2Gb1QTuoii+3C0Xvy7QK+l04dTMpFd8+whdpiOBdeLRQrfwCWcTgC", + "GJHtZSsM0iv2G5OzCJh90+6Xc1NUaJBkvKG1IZcxQW/K1COy5Ri53I/Sy90IgJ4Zqq3V4M0SB80HXfFk", + "eJm3t9q8TZsanoWljv/YEUru0gj+hvaxbkK4v7SJ/8aTi4UT9Uky4Q0tS7fJUEidK8o6eEyCwj45dIDY", + "g9VXfTkwidZurFcXrxHWUqzEMd+hU3KINgMloBKcdUTT7DIVKeB0ecB7/CJ0i4x1uHtc7h5EAYQaVsJY", + "aJ1GIS7otzDHc0yfrNRyfHW20ku3vtdKNZc/uc2xY2eZn3wFGIG/FNrYDD1uySW4Rt8YNCJ945qmJdBu", + "iCIVGxBFmuPitJewywpR1ml69fN+98JN+0Nz0Zh6gbeYkBSgtcDiGMnA5T1TU2z73gW/pAW/5He23mmn", + "wTV1E2tHLt05fifnosfA9rGDBAGmiGO4a6Mo3cMgowfnQ+4YSaNRTMvJPm/D4DAVYeyDUWrh2fvYzU8j", + "JdcSpQFMvxBUqxUUIb1Z8IfJKIlcqeQqquJUVfty5p0wSl2Hmef2JK3zYfgwFoQfifuZkAVs09DHWgFC", + "3r6sw4R7OMkKJKUrSZuFkqiJQ/yxRWSr+8S+0P4DgGQQ9JueM7uNTqZdarYTN6AEXnidxEBY3/5jOdwQ", + "j7r5WPh0J/Pp/iOEAyJNCRsVNhmmIRhhwLyqRLHtOZ5o1FEjGD/KujwibSFr8YMdwEA3CDpJcJ1U2j7U", + "2hvYT1HnPXVaGcVe+8BiR9889w/wi1qjB6MT2TzM297oahPX/t3PF1ZpvgLvhcoIpFsNgcs5Bg1RVnTD", + "rKBwkkIslxB7X8xNPAcd4AY29mIC6SaILO2iqYW0XzxNkdEB6mlhPIyyNMUkaGHMJ/9m6OUKMn1kSmqu", + "hGhrbuCqSj7X/w522c+8rJ2SIbRpw3O926l7+R6x61eb72CHIx+MenWAHdgVtDy9BqTBlKW/+WSiBNb3", + "TCfFP6qXnS08YqfO0rt0R1vjizKME397y3SKFnSXcpuD0QZJOFim7MZFOjbBnR7oIr5Pyoc2QRSHZZBI", + "3o+nEiaUsBxeRU0uikO0+wZ4GYgXlzP7OJ/dLhIgdZv5EQ/g+lVzgSbxjJGm5BnuBPYciXJeVVpd8TLz", + "8RJjl79WV/7yx+YhvOITazJpyn7z9dnLVx78j/NZXgLXWWMJGF0Vtqt+N6uiMg77rxLK9u0NnWQpija/", + "ycgcx1hcY2bvnrFpUBSljZ+JjqKPuVimA94P8j4f6kNL3BPyA1UT8dP6PCngpxvkw6+4KIOzMUA7EpyO", + "i5tWWSfJFeIBbh0sFMV8ZXfKbganO306Wuo6wJNwrh8xNWVa45A+cSWyIh/8w+9cevpG6Q7z9y8Tk8FD", + "v55Y5YRswuNIrHaoX9kXpk4YCV5/W/3NncaHD+Oj9vDhnP2t9B8iAPH3hf8d9YuHD5Pew6QZyzEJtFJJ", + "voEHzSuL0Y34tAq4hOtpF/TZ1aaRLNU4GTYUSlFAAd3XHnvXWnh8Fv6XAkpwP51MUdLjTSd0x8BMOUEX", + "Yy8RmyDTDZXMNEzJfkw1PoJ1pIXM3pdkIGfs8AjJeoMOzMyUIk+HdsiFcexVUjCla8yw8Yi11o1Yi5HY", + "XFmLaCzXbErO1B6Q0RxJZJpk2tYWdwvlj3ctxT9qYKJwWs1SgMZ7rXfVBeUARx0IpGm7mB+Y/FTt8Lex", + "g+zxNwVb0D4jyF7/3YvGpxQWmir6c2QEeDzjgHHvid729OGpmV6zrbshmNP0mCml0wOj8866kTmSpdCF", + "yZZa/QJpRwj6jxKJMILjU6CZ9xeQqci9PktpnMptRfd29kPbPV03Htv4W+vCYdFN1bGbXKbpU33cRt5E", + "6TXpdM0eyWNKWBxh0H0aMMJa8HhFwbBYBiVEH3FJ54myQHRemKVPZfyW85TGb0+lh3nw/rXk1wueqhHj", + "dCEHU7S9nTgpq1joHDbANDkOaHYWRXA3bQVlkqtAtz6IYVbaG+o1NO1kjaZVYJCiYtVlTmEKpVGJYWp5", + "zSVVEXf9iF/53gbIBe96XSuNeSBNOqSrgFxskubYd+/eFvkwfKcQK0EFsmsDUQVmPxCjZJNIRb6KdZO5", + "w6PmfMkezaMy8H43CnEljFiUgC0eU4sFN3hdNu7wpotbHki7Ntj8yYTm61oWGgq7NoRYo1ije6KQ1wQm", + "LsBeA0j2CNs9/pLdx5BMI67ggcOiF4Jmzx5/iQE19Mej1C3rC5zvY9kF8uwQrJ2mY4xJpTEck/SjpqOv", + "lxrgFxi/HfacJuo65SxhS3+hHD5LGy75CtLvMzYHYKK+uJvozu/hRZI3AIzVaseETc8Pljv+NPLm27E/", + "AoPlarMRduMD94zaOHpqyyvTpGE4qvXv60UFuMJHjH+tQvhfz9b1idUYvhl5s4VRyj+gjzZG65xxSv5Z", + "ijYyPdTrZOchtzAW0GrqZhFu3Fxu6ShLYqD6klVaSIv2j9ousz85tVjz3LG/kzFws8UXTxOFqLq1WuRx", + "gH9yvGswoK/SqNcjZB9kFt+X3ZdKZhvHUYoHbY6F6FSOBuqmQzLH4kL3Dz1V8nWjZKPkVnfIjUec+laE", + "J/cMeEtSbNZzFD0evbJPTpm1TpMHr90O/fT6pZcyNkqnCga0x91LHBqsFnCFL+bSm+TGvOVe6HLSLtwG", + "+t82/imInJFYFs5yUhGIPJr7Hss7Kf7n79vM5+hYpZeIPRug0glrp7fbfeJow+Osbn3/LQWM4bcRzE1G", + "G44yxMpI9D2F1zd9fot4oT5ItOcdg+PjvzHtdHCU4x8+RKAfPpx7MfhvT7qfib0/fJhOQJw0ublfWyzc", + "RiPGvqk9/EolDGChamETUOTzIyQMkGOXlPvgmODCDzVn3Qpxn16KuJv3Xelo0/QpePfuLX4JeMA/+oj4", + "jZklbmD7SmH8sHcrZCZJpmi+R3HunH2ltlMJp3cHBeL5J0DRCEommudwJYMKoEl3/cF4kYhG3agLKJVT", + "MuOiQLE9//eDZ7f4+R5s16Isfm5zu/UuEs1lvk5GCS9cx7+SjN65golVJuuMrLmUUCaHI932r0EHTmjp", + "f1dT59kIObFtvwItLbe3uBbwLpgBqDChQ6+wpZsgxmo3bVaTlqFcqYLhPG1Ri5Y5Dks5p0poJt4347Cb", + "2vq4VXwL7hMOLUWJYZhpvzG2zDS3Iwm0sN55qC/kxsHy44bMDDQ6aMbFBi9mwzdVCXgyr0DzFXZVEnrd", + "MYUajhxVrGCmcp+wJSasUMzWWjK1XEbLAGmFhnI3ZxU3hgZ55JYFW5x79uzxo0dJsxdiZ8JKCYthmT+2", + "S3l8ik3oiy+yRKUAjgL2MKwfW4o6ZmOHhONrSv6jBmNTPBU/0MtV9JK6W5vqSTa1T0/Yt5j5yBFxJ9U9", + "mitDEuFuQs26KhUv5pjc+M3XZy8ZzUp9qIQ81bNcobWuS/5J98r0BKMhs9NI5pzp4+xP5eFWbWzWlJ9M", + "5SZ0LdoCmaIXc4N2vBg7J+wFmVCbAv40CcMU2XoDRVTtkpR4JA73H2t5vkbbZEcCGueV0wuxBnbWem6i", + "14dN9SNk2A5uX4uVSrHOmbJr0NfCAL7IhyvopkNscoN623hIj9hdnq6lJEo5OUIYbWodHYv2ABxJsiGo", + "IAlZD/FHWqaoHvOxdWkvsFf6LUavyG3P6x+S64UU2+x771zIuVRS5FgKISVJY+q2aW7KCVUj0v5FM/Mn", + "NHG4kqV1m7fAHoujxXYDI/SIG7r8o69uU4k66E8LW19ybQXWeM4GxTxUuvYOMSEN+GpWjohiPql0Iqgp", + "+RCiCaA4kowwK9OIhfMb9+0Hb//GpBiXQqKly6PN62fksiqNQM+0ZMKylQLj19N9zWPeuj4nmKWxgO37", + "k5dqJfILscIxKIzOLZtiRodDnYUIUh+x6do+d2197vzm5044GE16VlV+0vE66ElB0m7lKIJTcUshkCRC", + "bjN+PNoectsb+o33qSM0uMKoNajwHh4QRlNLuzvK1063JIrCFoxeVCYT6AqZAOOlkMGFmr4g8uSVgBuD", + "53Wkn8k1t6Q7TOJpb4CXIw8g8IUy+eBvO1S/coBDCa4xzDG+jW0Z8BHG0TRoJX4udywcCkfdkTDxnJdN", + "6HSiqDdKVV6IKvBxUa/Md4pxOMadhSeTHXQdfL7XdMdqHMfeRGM5Chd1sQKb8aJIpbb6Cr8y/BoeicEW", + "8ropQtW8DuzmKB9Sm58oV9LUmz1zhQa3nC6qm5+ghrh2f9hhzLSz2OG/qQpM4zvjg6aPfpUbIqSL4xLz", + "D18Zp6ReR9OZEatsOibwTrk9Otqpb0bobf87pfTwXPef4jVuj8vFe5Tib1+7iyNO3DuIT6erpcmri7Hg", + "Cr+HhEdNRsguV8KrbFBnDKMecPMSW9YDPjRMAn7Fy5GX8LGvhO5X8h+MvYfPR9M3cOvTc1nO9rKg0ZRH", + "FCvc874MXYhj8cEUHnx3Xgu/1r0IHffdfdfx1FGMWMssRj10N3OitRt8rBftu6uxFAmhTgd+j+uB+Cie", + "uU8DD1dC1SH6KsRAB5WQfvUpeDp1P0bWn3xZ8Ft7LUZ9LG98/VpaptfJv/uZvLAMpNW7fwKPy2DT+0Vl", + "EtIumafaJqwpfTipFGLnVpxSwyZVLsXLhsFWRqylQ0uD8jMDsnoxRRwY4OPjfHZeHHVhpkruzGiU1LF7", + "KVZrixn7/wK8AP3qQEWCtgoBHrFKGdFWIC3dYD4F7BqHO5n62MARsIgrKgzHCkGoV5BbLDvbBtdpgGPq", + "K7jJgtPnj8oE4+p08ybDFyTYV4VgWGv2wB0/SJwUJf+iOp0n03PunzUh1PQC7JqbNl1L78305JebyyXk", + "mBV5b6Kq/1qDjJIgzYNdBmFZRnmrRPOOCfN6H291bAHal0dqLzxRfZ1bgzP2jv0SdvcM61BDsnBo84jv", + "JomDEQPkAgs5pMcMyT5qTJiGMhALISTYp2Jui2OM5nyO0q7dcK5Aku7iaFOx7ZkyXfR80lyu61FpH/FJ", + "zlguq2HN5HH94wWWqDY+QI43iYdjLZ2dDwvnXPvExZhWrPGdhBTGYMJvIYcgzVKKS18/ALFCnqprrovQ", + "4k6SQtHdJNJAL5uZRfuAYxjkkCjFgG+h8lI5MSIbe1DWfTPRBBzeMxQZ2ibwQbiWoDUUjUukVAYyq8KD", + "j31w7EMFhb/eCAlmtPwRATea+vp1m9sby8BxTHXNfdRrvECmYcMddDrKwD0+5z5kP6fv4RF+KAN20MLU", + "0OvherTh6Y4wAyTGVL9k/rY8/Lj/JsYmISXoLHie+um4ZTcjG+bdLOqcLuj4YDQGucm5c/awkqSdJh+u", + "sqcjRI/kL2F3SkpQKOQbdjAGmiQnAj1KONrb5Ds1v5kU3Ks7Ae+3zSNXKVVmI86O82EO8T7FX4r8EjAH", + "YBPiPlKjnd1HG3vjzb5e70LO7KoCCcWDE8bOJD0qCo7tbnnB3uTynt03/xZnLWpK6++NaifvZPp1Bibc", + "17fkZmGY/TzMgGN1t5yKBjmQoXorx0JurjE5f7eK58lUrXzoau5XkW+JiqBIySQX5LF6jgc9ZTjCFAhR", + "rg50ZHLmPV3MlCoVy3uTNA1uqDSm4skQIAtySraABgo/eBIBybroiVNIqe980ju1ZBpaJ/JNs/8NS7in", + "NPr+zM0sXX63VBo6xdhdb8r02Tx8wTSa+J+FsJrr3U1y9A1KyA+sJ6NYPhiO1URitQtpo7GGOCxLdZ0h", + "s8qaOhcp1da1M93LOBRda/u5U72AKK6LGy+o7diaFyxXWkMe90i/9ySoNkpDVioM80p5oJfWyd0bfOQl", + "WalWTFW5KoDqxaQpaGyuWkqOYhNEUTVJFBDt4Gth6hPR8cQp3Z1KfqQMRa3VEbXzc6CX621WJ1p0Rr7M", + "kYhlMD6Lk8cQNR7Cu6f2f5o3L8UW6QZ06sgvmdU1zJlv0a+R7Q8+18A2whgCpaGla1GW+HBcbCPPaxO4", + "kEbtiNh7jmGVVwJjb7pJBEgartyd12RWiHnARZz2iNm1VvVqHSWYbuAMKq+uvUIcj/KTqTE8Cl+QuSme", + "so0y1muaNFK75Dbk7H6upNWqLLtGKRLRV97S/j3fnuW5fanU5YLnlw9Qr5XKNist5uF9dT84sJ1J91KL", + "dS/gjMqZH07VS+0wVM4T7WQG2WNxRxd2j8B8f5iDHra5nw0X1l9Xl5mm1ZgzybhVG5Gnz9TvK9puNEYu", + "xaKSOcuotiJlmcBmeNjjy6oJrkAWOUQzSJ4sDnfGPCPwTmZkN+6/KIH3x2VL8Ixm5KIcMhcvRWX5qKzX", + "AwAhpafPttZUkDGWxBquolaUKgFd5H1AJ94qGIl0O9jcCHcOlIVbATWIfmwAvE/GhznllqNIyoXahu8P", + "2uRzNwL+434q7zCPsRCvi5a0NAV5hUQ1IxwhneJ6bzzUG3z2vpgaFdUUz514w0cAjMdJdWCYFC11LBhL", + "LkooslTtxfPGRjWPNG3/NKtfEl0Yz8lzXofSh27sWoNPnEIivu76vyruSEk1zYeWZFnAFuhdxy+gFdU0", + "nEf+Fyip5GHPGKCqrIQr6ISP+WwuNYqa4gpCX9N0ZgVAhd7Ivo0sFRcV3+U9w4lfexZF1kzBbtKSQoil", + "nWIHzCRJo85WZnRMzNSj5CC6EkXNO/gzx4ocXTOgO8oJVA10hCzokVOn+YlGeB0GOAv9U6JMwMT7aXzo", + "aBaURt0+BnQwTrI2Y6depsMk41RFjYMFZysaRyyReMs3TMWv5bhBckjyrbo1cZ+EkhFiv95CjlKN13eg", + "8BrPiJPCZz1BapcABWkFrkvC2r4GyaSKSkxec9OoKm0OxfADTYyNhPTa9A2cym004+13luFgzPSSqY0q", + "Erqh05ub53+Tk7j3II6Ol6IRA/753x77V6Bur3ZgAyzlLd1+OtkfizT6W8xz8Tlb1GGgslTXVDMy1kNf", + "QPCDEvUFF5AXy0VzLYeozblP79k3dYgoXn3Dd0xp/Mdpnf+oeSmWO+QzBH7oxsyaOxLyjleKCPBRoG7i", + "/eLVPAAWrC0qTEXrFlPHjIbbuVEioN1FHor7KLbhlxBvAwY7EP/MrWOcpl6g5cJd2b3tHGLBLz6kaNnw", + "Itb0MVFkt4x6SB3sev9/7Vu4eKqQ360qeR4qhPoSRV0+g1WAA3HZNWz2P5Yc8rVAAk1l4ZZodXhdX9zA", + "ZHok60q9QBgrv9IBe1BxdVB55lbLmGj57dXY2PPMdNJS7noXpkbdDICO6zQeAj8uW/lp8J/M4Tq2jCng", + "/7PgfaRQbQwv1aT9BFjuZOBIwErW6oXaZhqW5lCACZmrnTqv29wdwcQqZK6BG4q4Of/RK55tilIhnSJM", + "MaGNT7MZpYClkC2zFLKqbUKPwUylchchLDb6I1pHXGhjUoITJq94+eMVaC2KsY1zp4NKOsYlIoKjw/dN", + "mDCaO3U4gDCtDofvM1szetzMXeBUhIrCNY3lsuC6iJsLyXLQ7t5n13xnbu5RapwDh3xKPJJmulkDIu8S", + "kjYBUu68U/iW/p4GQH6Hjp8JDhuMC044a8i0Y9WIf2YIw+/CYbPh26xUK3xFOHIgfG5a9PCRCqgkmsFJ", + "Ppu27jCPEb/A/mkwLb9nRFbhrFOm2H/uf8StRDXyJyns3pNPNsr+s06Ku6WDGZAqV23wPxHL8DymXuL6", + "5Cvxa9wgbIanKoH2INpEGPEPde3iI7uIYRD+GXdsBJ9e7qwbaZF670uWgQwtBmZPeD+YNpSd5z48a2hK", + "G5gaCClz/1r6SEsb2efDvTQCHtWm92e9O20TMuPGOaZG3P730VmlqiyfEvNJlTsK7ybwkHZhHKGPyAkw", + "su4mPMY0tWw6eY86RW2OLZM3WlTnkLeryvcp/WNmohGO3nVBqCXyMqrcjtYtfMnTGFPm/TdmXTNYwyQY", + "ZxryWqOZ+JrvDpcdG8kYffGXs88fP/nrk8+/YK4BK8QKTJt1vFe2q40LFLJv9/m0kYCD5dn0JoTsA4S4", + "4H8Mj6qaTfFnjbitaVOKDoqWHWNfTlwAieOYKBd1o73CcdrQ/n+u7Uot8s53LIWCX3/PtCrLdNWHRq5K", + "OFBSuxW5UJwGUoE2wljHCLseUGHbiGizRvMg5v69omwySuYQ7MeeCoQdCblKLWQsoBb5Gb7t9l4jBtuq", + "9LyKPD371uX1NLLQodCIUTELYJWqvGgvliwFEb4g0tHLWm/4RIt4FCPbMFuKlk0Roo88T5NeXDB7P7fv", + "FnO1aU7vNjEhXoRDeQPSHPNPjOctuAknaU37/zT8I5GI4c64RrPcX4NXJPWDmxXlnwTa8FF+gjwQgJHX", + "tp13ktFDsSgRsSYvAfoTggO5L3583zqWDz4LQUhChwPgxc9n23bNSwYPzm+c0ff7BinRUt6PUUJn+Yde", + "5AbW21wk0RZ5o4m1YIgtqaFYGD23Ns+bV8wjWsngsbNWyjKnmZZl4pE02XHwTMWE41QCfcXLT881vhHa", + "2DPEBxSvx59GxS9lYyQTKs3N8vS95JPmjl7F3t3U8hU+zP4vcHuUvOf8UN4JP7jN0LiDFetX4Vagt97s", + "GsekIKvHX7CFL7ZRaciF6Tv3r4Nw0jwMBS2WPqAVtvbAS9RD6/xZ2VuQ8TJE4rAfIvdW47P3ELZH9Ddm", + "KiMnN0nlKeobkEUCfykeFRfnPXBd3LIww83SvkQJ3I5M+zIsOzx1eZTaxF06tYHhOiff1h3cJi7qdm1T", + "cxZNru/w7t1bu5iSaihdi8F1x1xHd1KU4aiSDL9CliPCkR/Dz5uimJ/H8t5SbteR3Ny9/ahFeTBgpZNp", + "/eN8tgIJRhjMJf5XXzvm096lAQLKvDA8qgTrbdLFEGISa+1MHk0V5VCfkD7dd0vkvMZXjXmthd1h3eBg", + "QBN/TeZj+rbJ7eFzwzS+NH/3WXUJTe32NhNIbcLt+q3iJd5H5OKT7hZS5Qn7mjJ8+4Py53uLf4fP/vS0", + "ePTZ439f/OnR549yePr5l48e8S+f8sdffvYYnvzp86eP4PHyiy8XT4onT58snj55+sXnX+afPX28ePrF", + "l/9+z/EhBzIBGlL7P5v9r+ysXKns7NV59sYB2+KEV+I7cHuDuvJSYV1Lh9QcTyJsuChnz8JP/384YSe5", + "2rTDh19nvj7TbG1tZZ6dnl5fX5/EXU5X+PQ/s6rO16dhHqw22JFXXp03MfoUh4M72lqPcVM9KZzht9df", + "X7xhZ6/OT1qCmT2bPTp5dPLYl7aWvBKzZ7PP8Cc8PWvc91PMr3lqfOr80/atVtJv9xpD1oNwrldQsPvN", + "q5t/azy35kF4vLMUJV4ZfzdEjM0qzgskLl+jdIZV1zAYC8F68uhR2Asv6UQXzim+/nj2YdbWtu8LEwOk", + "vmkBTkLW1nwcLvoneSnVtWSYDJAOUL3ZcL2jFXSwEQ2O28RXBo3sWlxxC7P3rncf51XlCxaMoRyrXHVP", + "eeiMBNJkvHcnjBLh+7IDJoXyYbGEW2J/b3LIwWSJ3cFGrxzMIX1Ok1DRO4Q8ztBnTAhrzgiZHQaIns+q", + "OoHOr/FhjdmHs3mUhJ+gUWXRYHyA0Vf1/yMYdaTr76bZsw/urzXwEhNruT82jlDz8EkDL3b+/+aar1ag", + "T/w63U9XT06DFnL6wWdM+bjv22kcEXb6oZNYpjjQM0Q8HWpy+iGUzN4/YKdcso81jTpMBHRfs9MFlsma", + "2hTi1Y0vBWnenH5ABXz091NvRU1/REMI3bCnIUHTSEtKxZH+2EHhB7t1C9k/nGsTjZdzm6/r6vQD/gfJ", + "NloRZfY9tVt5ioEjpx86iPCfB4jo/t52j1tcbVQBATi1XFKd8X2fTz/Qv9FEsK1AC6eFYjYt/ytlPTzF", + "cpO74c87mSd/HK6jk/HtwGWO2QRNiKbqJopLXh/97HPmtsxuWi6bfs67oYA9lKT2rezjfPb0DrlyN1Nw", + "ApiveMFCYgOc+/Gnm/tcUlS3Ey1JBEYInn46CDrbx76DHftBWfYN2mo+zmeff8qdOJdOc+RlEOhuKPpN", + "Oz79a9TJ3k0zuSJBRVGyi+5ROyuKAdGTDgnGfqXwdh3D2MasKu+lbZHWqtBCuiXMp4nNw/SRlOQsCBJS", + "FTCLlVura/h4S57Qi+/i2p4nTMroG8GHHstQoD8CNZkLsR/9QiMPzR+HSPj8RZi0fR/xB0/5g6c0POXz", + "R599uukvQF+JHNgb2FRKcy3KHftJNg9vbszjzooimUC2e/QP8rj5bJvlqoAVyMwzsGyhip2vyDPrTHAJ", + "ZC0bCDKnwbrU0RhGuGewW6WklTYcfPbsbSoswj9vrOpFKXJGlnU0LVXcriPLT5PRs8v85nssE/NE1nhW", + "iLJusjnYa+VfSw8vlMjaYhUz/9B48eBBFHbHroUs1PWDkwDuP2pAPu/hDdPMEgBGMb7DAkmtw9ABOABr", + "bD70NE7Bzp7JX/KbzV3yY6d+/2vboJqMef958eMP0WtEsjRQQBC+hSPSxYcLWmFA/jXHiFAqpPicbEDl", + "Dl/VWm5r06nhdvLHPfQH77897/+2SaFM1dsslmUasqToLjiZJPAmefuHzp/ebjGjcOxU4mP3O+NshZU3", + "hxfUYsfOXwy0V+rWvxK+2mHT3q2Q4Pd9EI9i/CPsZZ9I4xayUrYJSqdF/SFk/iFk3kpxnXx4puiuScsS", + "1cPlA31sHkrbdh7+YPJyDPEagDLF/vSbHt872fihbStly6Ik61Cw6ANlp+ij+Q8W8QeLuB2L+BYShxFP", + "rWcaCaI7ztY1lWFgKqWiE2IZpI7QvC65jh4EHzJhn+GIaVXwV+Ean9pgl8QV2evw3YCggNnEBt6tDe8P", + "lvcHy/v9sLyzw4ymK5jc2up1CbsNrxpbl1nXtlDXkYccYaFg96GPjxT//t+n11zYbKm0L9nDlxb0sLMF", + "Xp76+ty9X9uSmIMvWOcz+jFORpf89ZR3nZZdx7ljvWMdB1711FfvOB5pFHIohM9tzF4cA4dsv4l+e/ve", + "sWwD+ircCG1I17PTU0yqs1bGns4+zj/0wr3ij+8b8vjQ3COeTD4iXSgtVkLyMvOxEVkbtvXk5NHs4/8N", + "AAD//1nr4yEUHQEA", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/generated/participating/public/routes.go b/daemon/algod/api/server/v2/generated/participating/public/routes.go index 0ada10df52..37794f0d92 100644 --- a/daemon/algod/api/server/v2/generated/participating/public/routes.go +++ b/daemon/algod/api/server/v2/generated/participating/public/routes.go @@ -383,32 +383,32 @@ var swaggerSpec = []string{ "lyfZ6eOH/zr7y+kXpyk8+eKr01P61RP68KvHD+HRX754cgoP519+NXuUPXryaPbk0ZMvv/gqffzk4ezJ", "l1/96z3DhwzIFlBf2v/p5D+Ts3whkrPX58mlAbbBCS3ZD2D2BnXlucC+lgapKZ5EKCjLJ0/9T//Hn7Dj", "VBTN8P7XievPNFlqXaqnJyfr9fo4/ORkgan/iRZVujzx82C3wZa88vq8jtG3cTi4o431GDfVkcIZPnvz", - "7cUlOXt9ftwQzOTp5PT49Piha23NackmTyeP8Sc8PUvc9xOsr3miXOn8k7K0xfM/TCcnjg7dX0ugORbR", - "MX8UoCVL/SMJNNu6/6s1XSxAHmOGhv1p9ejESxwn7111hA+7np2E0R8n71tFJLI9X/rohn2vnLz37XF3", - "D9hqjeriygziom7N70G7kkrWvhCpx4HeBDf6lCisjW9+KiUT5kxOzQWbAfr+MYRNYpFwLSueWoewnQI4", - "/vfl2X+iU/zl2X+Sr8np1CUVKFRaYtPbrOqamM4zC3Y/FlF9sz2rK5Y0DvTJ07cxQ5ILCC2rWc5SYmUR", - "PIyG0oKzUo/Y8EK0Gk5U3cO84eyGW58mX717/8VfPsQkxp78WyMpKOLR8uwK390UkVbQzddDKNu4KHMz", - "7j8qkNtmEQXdTEKA+17SSGUznwTkmzyH8YdBZOK/X7z6kQhJnIb8mqbXdQKUz3hrsvzChDfz5RDE7vIM", - "gQZeFeYecplUhVqU7SK/NZrfYUdEBBRZxqPTU88nnRYSHNATd+6DmTqmqz6hYShOYIzsp7srAhua6nxL", - "qApiITAy0Xcv7aSpiTJpBcvvNH/2Z3RbEs00ODTjPlKFXmia74HvstPpsYUOF9ZTmot0f4p7DxlRCN7F", - "RIVwaz2N/Lm7/z12ty95kFKYM80w9rq5cvx11gLSyZv51oM7UEzkmPxNVCgfGsm/0hDrc48zWL+Hm9PV", - "PgqC5Zr0IHxydNRd+NFRE9o3hzUyWcrxxS46jo6OzU49OZCV7bRFt0oFjzo7hwzX26yXdFNHRlPCBU84", - "LKhmKyCBUvnk9OFnu8JzbmPRjUBsBfcP08kXn/GWnXMj2NCc4Jt2NY8/29VcgFyxFMglFKWQVLJ8S37i", - "dbB/0Ea9z/5+4tdcrLlHhNFJq6KgcuuEaFrznIoHvX128p9eFaNG0EYuShcK411QRLUyra90yBeTdx+8", - "DjBS99j12skMu1yOfRVChWVYO0Hvgzp5j/bzwd9PnBM0/hD9GFZBPvH1FQfetJW04g9bWtF7vTEL2T2c", - "eScYL6U6XVblyXv8D+q6wYpsYf4TveEnGPd58r6FCPe4h4j2783n4RurQmTggRPzuUI9btfjk/f232Ai", - "2JQgmbmOsBim+9UWLT7BbtHb/s9bnkZ/7K+jVbB14OcTb2qJqdTtN9+3/mzTlFpWOhPrYBZ0UlgPWx8y", - "87BS3b9P1pRpIyS5OqF0rkH2P9ZA8xPXFKjza1OHv/cEmwsEP3bEqlLYQkFtjfYNXV+28j2lLYjxjUBD", - "xRDD3SQzxpELhVyyMT3ah30VqccbL5dgY2y99zYig2pBZlLQLKVKmz9c+6yebvzhlvpXt37HecQ3h2Ci", - "uaFfctLwk+O9Dhscd4yQGewLOX/uJ2ySzH53wawH0Tc0I76yVEJe0txsOGTkzIn/LWz83kLVp5eCPrHY", - "8tHkjG/84VOEYpm9loIo44Vxgj53Y4QKo0UaBrAAnjgWlMxEtnWtyCaSrvXG1uHoMrcT2r4x2oZIKmmh", - "hh7egZXyj22a3GeR/NMQ+Kch8E9T0Z+GwD93909D4EhD4J9msj/NZP8jzWSH2MZiYqYz/wxLm9gbnbbm", - "tXofbXpQ1Cy+XSGM6Voma6WKYrsLpo8JucTyLtTcErACSXOSUmWlK1eKqMAITqwzBtnTK560ILFxkmbi", - "+81/bYDqVXV6+hjI6YPuN0qzPA95c/9blHfxkc0h+ZpcTa4mvZEkFGIFmU14DWug26/2Dvu/6nFf9Zon", - "YKY71s/x5ciIquZzljKL8lzwBaEL0QRXY9FVLvAJSAOcbUFFmJ66ZBTmMqBdh/p2qfa25N6XAM6bLdwb", - "UtAhl3g0gSG8A0MJ/mVMHMH/aCn9phWrbstId47d46p/cpWPwVU+OV/53J20gWnxv6WY+eT0yWe7oNAQ", - "/aPQ5DtMHLidOOaKgabRTlw3FbR8MRhv7muCj8NgXrxF6zDet+/MRaBArvwF28SmPj05wepgS6H0ycRc", - "f+241fDhuxrm9/52KiVbYatntG4KyRaM0zxxgZ9JE3/66Ph08uH/BwAA///VdB8P3SEBAA==", + "7cUlOXt9ftwQzOTp5PT49Piha23NackmTyeP8Sc8PUvc9xOsr3miXOn8kzpX68O096wsbWF988jRqPtr", + "CTTHAjvmjwK0ZKl/JIFmW/d/taaLBchjzN6wP60enXhp5OS9q5zwYdezkzAy5OR9q8BEtudLH/mw75WT", + "97517u4BW21TXcyZQWrU5fk9aFduydoeIrU60NPgRp8ShXXzzU+lZMKc16m5fDPAuAAMb5NYQFzLiqfW", + "WWynAI7/fXn2n+gwf3n2n+Rrcjp1CQcKFZrY9Dbjuia088yC3Y9TVN9sz+pqJo1zffL0bczI5IJFy2qW", + "s5RYOQUPqqHC4BzVIzZ8Ei2KE1X3N2+4vuHkp8lX795/8ZcPMWmyJxvXSAoKfLS8vsJ3PkWkFXTz9RDK", + "Ni4C3Yz7jwrktllEQTeTEOC+BzVS9cwnCPkG0GFsYhC1+O8Xr34kQhKnPb+m6XWdHOWz4ZoMwDAZznw5", + "BLG7WEOggVeFuaNcllWhFmW7AHCN5nfYLREBRXby6PTU81CnoQQH9MSd+2CmjlmrT2gYphMYKvup8IrA", + "hqY63xKqgjgJjFr0nU07KWyiTFqB9DtNo/0Z3ZZEsxAOzcaPVKgXmuZ74LvsdIFsocOF/JTmkt2f/t5D", + "RhSCdzExItxaTyN/7u5/j93tSyWkFOZMM4zLbq4cf521gHSyaL714A4UGjkmfxMVyo5GK6g0xHrg4wzW", + "J+LmdHWRgkC6JnUInxwddRd+dNSE/c1hjUyWcnyxi46jo2OzU08OZGU77dStMsKjzs4hw/U26yXd1FHT", + "lHDBEw4LqtkKSKBwPjl9+Nmu8JzbOHUjLFuh/sN08sVnvGXn3Ag2NCf4pl3N4892NRcgVywFcglFKSSV", + "LN+Sn3idCBC0WO+zv5/4NRdr7hFh9NWqKKjcOiGa1jyn4kHfn538p1fhqBG0kYvShcJYGBRRrUzrqyDy", + "xeTdB68DjNQ9dr12MsMOmGNfhVBhGdZO0DOhTt6jbX3w9xPnII0/RB+HVZ5PfO3FgTdtla34w5ZW9F5v", + "zEJ2D2feCcZLqU6XVXnyHv+DenCwIlu0/0Rv+AnGhJ68byHCPe4hov1783n4xqoQGXjgxHyuUI/b9fjk", + "vf03mAg2JUhmriMslOl+tQWNT7CT9Lb/85an0R/762gVcx34+cSbYWIqdfvN960/2zSllpXOxDqYBR0Y", + "1vvWh8w8rFT375M1ZdoISa6GKJ1rkP2PNdD8xDUM6vza1OjvPcHGA8GPHbGqFLaIUFujfUPXl61cUGmL", + "ZXwj0FAxxHA3yYxx5EIhl2zMkvZhX0Xq8cbLJdj4W+/ZjcigWpCZFDRLqdLmD9daq6cbf7il/tWt7XEe", + "8dshmGhu6JejNPzkeK8zB8cdI2QG+0LOn/sJmwS0310w60H0Dc2IrzqVkJc0NxsOGTlz4n8LG7+3UPXp", + "paBPLLZ8NDnjG3/4FKFYgq+lIMp40ZygB94YocJokYYBLIAnjgUlM5FtXZuyiaRrvbE1OrrM7YS2b4y2", + "IZJKWqihh3dgpfxjmyb3WST/NAT+aQj801T0pyHwz9390xA40hD4p5nsTzPZ/0gz2SG2sZiY6cw/w9Im", + "9k2nrXmt3keb/hQ1i29XD2O6lslaaaTYCoPpY0IusfQLNbcErEDSnKRUWenKlSkqMLoTa5BB9vSKJy1I", + "bAylmfh+818bvHpVnZ4+BnL6oPuN0izPQ97c/xblXXxk80u+JleTq0lvJAmFWEFmk2HD+uj2q73D/q96", + "3Fe9xgqYBY+1dXypMqKq+ZylzKI8F3xB6EI0gddYkJULfALSAGfbUxGmpy5RhbnsaNe9vl3GvS259yWA", + "82YL94YUdMglHk1gCO/AUIJ/GRNH8D9aSr9pNavbMtKdY/e46p9c5WNwlU/OVz53J21gWvxvKWY+OX3y", + "2S4oNET/KDT5DpMKbieOuUKhabRL100FLV8oxpv7msDkMNAXb9E6xPftO3MRKJArf8E2catPT06wcthS", + "KH0yMddfO6Y1fPiuhvm9v51KyVbYBhqtm0KyBeM0T1zgZ9LEpj46Pp18+P8BAAD//3e/DWz5IQEA", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go index 4a97d94d07..4e233a8b20 100644 --- a/daemon/algod/api/server/v2/handlers.go +++ b/daemon/algod/api/server/v2/handlers.go @@ -2104,6 +2104,11 @@ func (v2 *Handlers) GetDebugSettingsProf(ctx echo.Context) error { return ctx.JSON(http.StatusOK, response) } +// GetConfig returns the merged (defaults + overrides) config file in json. +func (v2 *Handlers) GetConfig(ctx echo.Context) error { + return ctx.JSON(http.StatusOK, v2.Node.Config()) +} + // PutDebugSettingsProf sets the mutex and blocking rates and returns the old values. func (v2 *Handlers) PutDebugSettingsProf(ctx echo.Context) error { req := ctx.Request() diff --git a/daemon/algod/api/server/v2/test/handlers_test.go b/daemon/algod/api/server/v2/test/handlers_test.go index 1952c88e7a..c0e5c77990 100644 --- a/daemon/algod/api/server/v2/test/handlers_test.go +++ b/daemon/algod/api/server/v2/test/handlers_test.go @@ -2502,3 +2502,19 @@ func TestDebugExtraPprofEndpoint(t *testing.T) { require.Contains(t, string(body), `"block-rate":0`) } + +func TestGetConfigEndpoint(t *testing.T) { + partitiontest.PartitionTest(t) + + handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t, cannedStatusReportGolden) + defer releasefunc() + + err := handler.GetConfig(c) + require.NoError(t, err) + require.Equal(t, 200, rec.Code) + var responseConfig config.Local + + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &responseConfig)) + + require.Equal(t, handler.Node.Config(), responseConfig) +} From 046d01df5a6e25b67bb544eba1c990c0f8805d5b Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Mon, 8 Jul 2024 12:18:59 -0400 Subject: [PATCH 24/82] tests: increase ListPeersForTopic wait time in TestP2PRelay (#6056) --- network/p2pNetwork_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/network/p2pNetwork_test.go b/network/p2pNetwork_test.go index 7c94be98e4..3f77d55f69 100644 --- a/network/p2pNetwork_test.go +++ b/network/p2pNetwork_test.go @@ -880,7 +880,7 @@ func TestP2PRelay(t *testing.T) { // add a netC with listening address set and enable relaying on netB // ensure all messages from netB and netC are received by netA cfg.NetAddress = "127.0.0.1:0" - log.Debugf("Starting netB with phonebook addresses %v", phoneBookAddresses) + log.Debugf("Starting netC with phonebook addresses %v", phoneBookAddresses) netC, err := NewP2PNetwork(log.With("net", "netC"), cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) require.NoError(t, err) err = netC.Start() @@ -892,11 +892,11 @@ func TestP2PRelay(t *testing.T) { require.Eventually( t, func() bool { - return len(netA.service.ListPeersForTopic(p2p.TXTopicName)) > 0 && + return len(netA.service.ListPeersForTopic(p2p.TXTopicName)) >= 2 && len(netB.service.ListPeersForTopic(p2p.TXTopicName)) > 0 && len(netC.service.ListPeersForTopic(p2p.TXTopicName)) > 0 }, - 2*time.Second, + 10*time.Second, // wait until netC node gets actually connected to netA after starting 50*time.Millisecond, ) From a9641a3edccd102099ebeb1a4bb6c718cf25f212 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Mon, 8 Jul 2024 14:07:45 -0400 Subject: [PATCH 25/82] ci: pin reviewdog version to v0.18.1 (#6058) --- .github/workflows/reviewdog.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml index 1eff6ac578..b02d99e7cc 100644 --- a/.github/workflows/reviewdog.yml +++ b/.github/workflows/reviewdog.yml @@ -69,7 +69,7 @@ jobs: cd ../../ - name: Install reviewdog run: | - curl -sfL https://raw.githubusercontent.com/reviewdog/reviewdog/v0.17.4/install.sh | sh -s + curl -sfL https://raw.githubusercontent.com/reviewdog/reviewdog/v0.18.1/install.sh | sh -s -- v0.18.1 reviewdog --version - name: Build custom linters run: | From 292932940db138c000b2a31e994ce44e91f0fcf3 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Wed, 10 Jul 2024 12:00:37 -0400 Subject: [PATCH 26/82] algod: fix telemetry lookup if phonebook is used (#6061) --- cmd/algod/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/algod/main.go b/cmd/algod/main.go index c4d1c09bc8..311b1507e6 100644 --- a/cmd/algod/main.go +++ b/cmd/algod/main.go @@ -362,7 +362,7 @@ func run() int { // If the telemetry URI is not set, periodically check SRV records for new telemetry URI if remoteTelemetryEnabled && log.GetTelemetryURI() == "" { - toolsnet.StartTelemetryURIUpdateService(time.Minute, cfg, s.Genesis.Network, log, done) + toolsnet.StartTelemetryURIUpdateService(time.Minute, cfgCopy, s.Genesis.Network, log, done) } currentVersion := config.GetCurrentVersion() From cf99017fa74daf738710165cefcb9bc95470da15 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Wed, 10 Jul 2024 12:01:07 -0400 Subject: [PATCH 27/82] tests: fix concurrent accout commits in TestLedgerVerifiesOldStateProofs (#6060) --- ledger/ledger_test.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index 968e6d8b21..f816ebb837 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -1683,6 +1683,15 @@ func TestLedgerVerifiesOldStateProofs(t *testing.T) { backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil) defer backlogPool.Shutdown() + // wait all pending commits to finish + l.trackers.accountsWriting.Wait() + + // quit the commitSyncer goroutine: this test flushes manually with triggerTrackerFlush + l.trackers.ctxCancel() + l.trackers.ctxCancel = nil + <-l.trackers.commitSyncerClosed + l.trackers.commitSyncerClosed = nil + triggerTrackerFlush(t, l) l.WaitForCommit(l.Latest()) blk := createBlkWithStateproof(t, maxBlocks, proto, genesisInitState, l, accounts) @@ -1714,7 +1723,7 @@ func TestLedgerVerifiesOldStateProofs(t *testing.T) { } l.acctsOnline.voters.votersMu.Unlock() - // However, we are still able to very a state proof sicne we use the tracker + // However, we are still able to very a state proof since we use the tracker blk = createBlkWithStateproof(t, maxBlocks, proto, genesisInitState, l, accounts) _, err = l.Validate(context.Background(), blk, backlogPool) require.ErrorContains(t, err, "state proof crypto error") @@ -2934,7 +2943,7 @@ func testVotersReloadFromDiskAfterOneStateProofCommitted(t *testing.T, cfg confi // wait all pending commits to finish l.trackers.accountsWriting.Wait() - // quit the commitSyncer goroutine + // quit the commitSyncer goroutine: this test flushes manually with triggerTrackerFlush l.trackers.ctxCancel() l.trackers.ctxCancel = nil <-l.trackers.commitSyncerClosed From b46e2c1c1009ed1ae36d1bd9c40a8b45b732e7df Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Wed, 10 Jul 2024 12:01:41 -0400 Subject: [PATCH 28/82] tests: disable TestP2PRelay on CI (#6059) --- ledger/ledger_test.go | 5 ++++- network/p2pNetwork_test.go | 32 ++++++++++++++++++++++++++++---- 2 files changed, 32 insertions(+), 5 deletions(-) diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index f816ebb837..c97040f42c 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -1801,6 +1801,9 @@ func TestLedgerMemoryLeak(t *testing.T) { log := logging.TestingLog(t) log.SetLevel(logging.Info) // prevent spamming with ledger.AddValidatedBlock debug message deadlock.Opts.Disable = true // catchpoint writing might take long + defer func() { + deadlock.Opts.Disable = false + }() l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg) require.NoError(t, err) defer l.Close() @@ -2907,7 +2910,7 @@ func testVotersReloadFromDiskAfterOneStateProofCommitted(t *testing.T, cfg confi const inMem = true log := logging.TestingLog(t) - log.SetLevel(logging.Info) + log.SetLevel(logging.Debug) l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg) require.NoError(t, err) defer l.Close() diff --git a/network/p2pNetwork_test.go b/network/p2pNetwork_test.go index 3f77d55f69..5b3470689f 100644 --- a/network/p2pNetwork_test.go +++ b/network/p2pNetwork_test.go @@ -22,6 +22,9 @@ import ( "fmt" "io" "net/http" + "os" + "slices" + "strings" "sync" "sync/atomic" "testing" @@ -52,6 +55,13 @@ func (n *P2PNetwork) hasPeers() bool { return len(n.wsPeers) > 0 } +func (n *P2PNetwork) hasPeer(peerID peer.ID) bool { + n.wsPeersLock.RLock() + defer n.wsPeersLock.RUnlock() + _, ok := n.wsPeers[peerID] + return ok +} + func TestP2PSubmitTX(t *testing.T) { partitiontest.PartitionTest(t) @@ -794,8 +804,14 @@ func TestP2PHTTPHandler(t *testing.T) { func TestP2PRelay(t *testing.T) { partitiontest.PartitionTest(t) + if strings.ToUpper(os.Getenv("CIRCLECI")) == "TRUE" { + t.Skip("Flaky on CIRCLECI") + } + cfg := config.GetDefaultLocal() + cfg.DNSBootstrapID = "" // disable DNS lookups since the test uses phonebook addresses cfg.ForceFetchTransactions = true + cfg.BaseLoggerDebugLevel = 5 log := logging.TestingLog(t) log.Debugln("Starting netA") netA, err := NewP2PNetwork(log.With("net", "netA"), cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}) @@ -883,6 +899,7 @@ func TestP2PRelay(t *testing.T) { log.Debugf("Starting netC with phonebook addresses %v", phoneBookAddresses) netC, err := NewP2PNetwork(log.With("net", "netC"), cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) require.NoError(t, err) + require.True(t, netC.relayMessages) err = netC.Start() require.NoError(t, err) defer netC.Stop() @@ -892,16 +909,23 @@ func TestP2PRelay(t *testing.T) { require.Eventually( t, func() bool { - return len(netA.service.ListPeersForTopic(p2p.TXTopicName)) >= 2 && - len(netB.service.ListPeersForTopic(p2p.TXTopicName)) > 0 && - len(netC.service.ListPeersForTopic(p2p.TXTopicName)) > 0 + netAtopicPeers := netA.service.ListPeersForTopic(p2p.TXTopicName) + netBtopicPeers := netB.service.ListPeersForTopic(p2p.TXTopicName) + netCtopicPeers := netC.service.ListPeersForTopic(p2p.TXTopicName) + netBConnected := slices.Contains(netAtopicPeers, netB.service.ID()) + netCConnected := slices.Contains(netAtopicPeers, netC.service.ID()) + return len(netAtopicPeers) >= 2 && + len(netBtopicPeers) > 0 && + len(netCtopicPeers) > 0 && + netBConnected && netCConnected }, 10*time.Second, // wait until netC node gets actually connected to netA after starting 50*time.Millisecond, ) require.Eventually(t, func() bool { - return netA.hasPeers() && netB.hasPeers() && netC.hasPeers() + return netA.hasPeers() && netB.hasPeers() && netC.hasPeers() && + netA.hasPeer(netB.service.ID()) && netA.hasPeer(netC.service.ID()) }, 2*time.Second, 50*time.Millisecond) const expectedMsgs = 10 From 5869a00445c7860db42e12da10e8169e62b53c1e Mon Sep 17 00:00:00 2001 From: cce <51567+cce@users.noreply.github.com> Date: Wed, 10 Jul 2024 12:02:45 -0400 Subject: [PATCH 29/82] logging: change transaction pool re-evaluation message from warn to info (#6047) --- data/pools/transactionPool.go | 11 ++++++++--- logging/telemetryspec/metric.go | 2 ++ 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/data/pools/transactionPool.go b/data/pools/transactionPool.go index afe12f2363..a2eef08bc3 100644 --- a/data/pools/transactionPool.go +++ b/data/pools/transactionPool.go @@ -29,6 +29,7 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/data/transactions/logic" "github.com/algorand/go-algorand/ledger" "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/logging" @@ -784,15 +785,19 @@ func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIDs map[transact case *ledgercore.LeaseInLedgerError: asmStats.LeaseErrorCount++ stats.RemovedInvalidCount++ - pool.log.Infof("Cannot re-add pending transaction to pool: %v", err) + pool.log.Infof("Pending transaction in pool no longer valid: %v", err) case *transactions.MinFeeError: asmStats.MinFeeErrorCount++ stats.RemovedInvalidCount++ - pool.log.Infof("Cannot re-add pending transaction to pool: %v", err) + pool.log.Infof("Pending transaction in pool no longer valid: %v", err) + case logic.EvalError: + asmStats.LogicErrorCount++ + stats.RemovedInvalidCount++ + pool.log.Infof("Pending transaction in pool no longer valid: %v", err) default: asmStats.InvalidCount++ stats.RemovedInvalidCount++ - pool.log.Warnf("Cannot re-add pending transaction to pool: %v", err) + pool.log.Infof("Pending transaction in pool no longer valid: %v", err) } } } diff --git a/logging/telemetryspec/metric.go b/logging/telemetryspec/metric.go index 8ab269a2c2..2d43baae0d 100644 --- a/logging/telemetryspec/metric.go +++ b/logging/telemetryspec/metric.go @@ -46,6 +46,7 @@ type AssembleBlockStats struct { IncludedCount int // number of transactions that are included in a block InvalidCount int // number of transaction groups that are included in a block MinFeeErrorCount int // number of transactions excluded because the fee is too low + LogicErrorCount int // number of transactions excluded due to logic error (contract no longer valid) ExpiredCount int // number of transactions removed because of expiration ExpiredLongLivedCount int // number of expired transactions with non-super short LastValid values LeaseErrorCount int // number of transactions removed because it has an already used lease @@ -115,6 +116,7 @@ func (m AssembleBlockStats) String() string { b.WriteString(fmt.Sprintf("IncludedCount:%d, ", m.IncludedCount)) b.WriteString(fmt.Sprintf("InvalidCount:%d, ", m.InvalidCount)) b.WriteString(fmt.Sprintf("MinFeeErrorCount:%d, ", m.MinFeeErrorCount)) + b.WriteString(fmt.Sprintf("LogicErrorCount:%d, ", m.LogicErrorCount)) b.WriteString(fmt.Sprintf("ExpiredCount:%d, ", m.ExpiredCount)) b.WriteString(fmt.Sprintf("ExpiredLongLivedCount:%d, ", m.ExpiredLongLivedCount)) b.WriteString(fmt.Sprintf("LeaseErrorCount:%d, ", m.LeaseErrorCount)) From 1493410add4aaa4a5537037de1d88f166ccc85db Mon Sep 17 00:00:00 2001 From: Gary Malouf <982483+gmalouf@users.noreply.github.com> Date: Fri, 12 Jul 2024 10:49:26 -0400 Subject: [PATCH 30/82] P2P: Disable circuit relaying via libp2p.NoListenAddrs (#6064) --- network/p2p/p2p.go | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index ac0489d5e1..2877c6b2f3 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -115,12 +115,6 @@ func MakeHost(cfg config.Local, datadir string, pstore *pstore.PeerStore) (host. listenAddr = "/ip4/0.0.0.0/tcp/0" } - // the libp2p.NoListenAddrs builtin disables relays but this one does not - var noListenAddrs = func(cfg *libp2p.Config) error { - cfg.ListenAddrs = []multiaddr.Multiaddr{} - return nil - } - var disableMetrics = func(cfg *libp2p.Config) error { return nil } metrics.DefaultRegistry().Register(&metrics.PrometheusDefaultMetrics) @@ -130,7 +124,7 @@ func MakeHost(cfg config.Local, datadir string, pstore *pstore.PeerStore) (host. libp2p.Transport(tcp.NewTCPTransport), libp2p.Muxer("/yamux/1.0.0", &ymx), libp2p.Peerstore(pstore), - noListenAddrs, + libp2p.NoListenAddrs, libp2p.Security(noise.ID, noise.New), disableMetrics, ) From 8a44d87aac9e276bc84ef0d61697f777644f83bc Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Mon, 15 Jul 2024 14:43:14 -0400 Subject: [PATCH 31/82] rest api: Fix to Disable API authentication (#6067) Co-authored-by: Gary Malouf <982483+gmalouf@users.noreply.github.com> --- daemon/algod/api/server/router.go | 19 ++++++++++++++----- test/e2e-go/restAPI/other/misc_test.go | 19 ++++++++++++++----- test/framework/fixtures/libgoalFixture.go | 2 +- 3 files changed, 29 insertions(+), 11 deletions(-) diff --git a/daemon/algod/api/server/router.go b/daemon/algod/api/server/router.go index cd0899c176..0b02bb8566 100644 --- a/daemon/algod/api/server/router.go +++ b/daemon/algod/api/server/router.go @@ -19,10 +19,11 @@ package server import ( "fmt" - "golang.org/x/sync/semaphore" "net" "net/http" + "golang.org/x/sync/semaphore" + "github.com/labstack/echo/v4" "github.com/labstack/echo/v4/middleware" @@ -74,18 +75,26 @@ func registerHandlers(router *echo.Echo, prefix string, routes lib.Routes, ctx l // NewRouter builds and returns a new router with our REST handlers registered. func NewRouter(logger logging.Logger, node APINodeInterface, shutdown <-chan struct{}, apiToken string, adminAPIToken string, listener net.Listener, numConnectionsLimit uint64) *echo.Echo { - if err := tokens.ValidateAPIToken(apiToken); err != nil { - logger.Errorf("Invalid apiToken was passed to NewRouter ('%s'): %v", apiToken, err) - } + // check admin token and init admin middleware if err := tokens.ValidateAPIToken(adminAPIToken); err != nil { logger.Errorf("Invalid adminAPIToken was passed to NewRouter ('%s'): %v", adminAPIToken, err) } adminMiddleware := []echo.MiddlewareFunc{ middlewares.MakeAuth(TokenHeader, []string{adminAPIToken}), } + + // check public api tokens and init public middleware publicMiddleware := []echo.MiddlewareFunc{ middleware.BodyLimit(MaxRequestBodyBytes), - middlewares.MakeAuth(TokenHeader, []string{adminAPIToken, apiToken}), + } + if apiToken == "" { + logger.Warn("Running with public API authentication disabled") + } else { + if err := tokens.ValidateAPIToken(apiToken); err != nil { + logger.Errorf("Invalid apiToken was passed to NewRouter ('%s'): %v", apiToken, err) + } + publicMiddleware = append(publicMiddleware, middlewares.MakeAuth(TokenHeader, []string{adminAPIToken, apiToken})) + } e := echo.New() diff --git a/test/e2e-go/restAPI/other/misc_test.go b/test/e2e-go/restAPI/other/misc_test.go index 3f9da07c4a..eeaff9fcd1 100644 --- a/test/e2e-go/restAPI/other/misc_test.go +++ b/test/e2e-go/restAPI/other/misc_test.go @@ -39,12 +39,12 @@ func TestDisabledAPIConfig(t *testing.T) { localFixture.Setup(t, filepath.Join("nettemplates", "DisableAPIAuth.json")) defer localFixture.Shutdown() - testClient := localFixture.LibGoalClient + libgoalClient := localFixture.LibGoalClient - statusResponse, err := testClient.Status() + statusResponse, err := libgoalClient.Status() a.NoError(err) a.NotEmpty(statusResponse) - statusResponse2, err := testClient.Status() + statusResponse2, err := libgoalClient.Status() a.NoError(err) a.NotEmpty(statusResponse2) a.True(statusResponse2.LastRound >= statusResponse.LastRound) @@ -58,12 +58,21 @@ func TestDisabledAPIConfig(t *testing.T) { assert.True(t, os.IsNotExist(err)) // check public api works without a token - testClient.WaitForRound(1) + url, err := localFixture.NC.ServerURL() + a.NoError(err) + testClient := client.MakeRestClient(url, "") // empty token + + _, err = testClient.WaitForBlock(1) + assert.NoError(t, err) _, err = testClient.Block(1) assert.NoError(t, err) + _, err = testClient.Status() + a.NoError(err) + // check admin api works with the generated token - _, err = testClient.GetParticipationKeys() + _, err = libgoalClient.GetParticipationKeys() assert.NoError(t, err) + // check admin api doesn't work with an invalid token algodURL, err := nc.ServerURL() assert.NoError(t, err) diff --git a/test/framework/fixtures/libgoalFixture.go b/test/framework/fixtures/libgoalFixture.go index de1a06623d..bd4f615ae7 100644 --- a/test/framework/fixtures/libgoalFixture.go +++ b/test/framework/fixtures/libgoalFixture.go @@ -390,7 +390,7 @@ func (f *LibGoalFixture) dumpLogs(filePath string) { fmt.Fprintf(os.Stderr, "%s/%s:\n", parts[len(parts)-2], parts[len(parts)-1]) // Primary/node.log scanner := bufio.NewScanner(file) for scanner.Scan() { - fmt.Fprint(os.Stderr, scanner.Text()) + fmt.Fprintln(os.Stderr, scanner.Text()) } fmt.Fprintln(os.Stderr) } From 48a539f3307ea970c7a1fd67266c15108a5fdebc Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Mon, 15 Jul 2024 15:13:26 -0400 Subject: [PATCH 32/82] rest api: make RawTransactionAsync developer api (#6069) --- daemon/algod/api/algod.oas2.json | 4 + daemon/algod/api/algod.oas3.yml | 5 + .../v2/generated/experimental/routes.go | 451 +++++++++--------- daemon/algod/api/server/v2/handlers.go | 3 + .../algod/api/server/v2/test/handlers_test.go | 37 +- 5 files changed, 266 insertions(+), 234 deletions(-) diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json index 4cc1e0ced7..9b11f26642 100644 --- a/daemon/algod/api/algod.oas2.json +++ b/daemon/algod/api/algod.oas2.json @@ -1565,6 +1565,7 @@ "/v2/transactions/async": { "post": { "tags": [ + "public", "experimental" ], "consumes": [ @@ -1603,6 +1604,9 @@ "$ref": "#/definitions/ErrorResponse" } }, + "404": { + "description": "Developer or Experimental API not enabled" + }, "500": { "description": "Internal Error", "schema": { diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml index 2f57b62453..9b7ec0a37d 100644 --- a/daemon/algod/api/algod.oas3.yml +++ b/daemon/algod/api/algod.oas3.yml @@ -7012,6 +7012,10 @@ }, "description": "Invalid API Token" }, + "404": { + "content": {}, + "description": "Developer or Experimental API not enabled" + }, "500": { "content": { "application/json": { @@ -7039,6 +7043,7 @@ }, "summary": "Fast track for broadcasting a raw transaction or transaction group to the network through the tx handler without performing most of the checks and reporting detailed errors. Should be only used for development and performance testing.", "tags": [ + "public", "experimental" ], "x-codegen-request-body-name": "rawtxn" diff --git a/daemon/algod/api/server/v2/generated/experimental/routes.go b/daemon/algod/api/server/v2/generated/experimental/routes.go index 83312b906f..3d5c749701 100644 --- a/daemon/algod/api/server/v2/generated/experimental/routes.go +++ b/daemon/algod/api/server/v2/generated/experimental/routes.go @@ -130,231 +130,232 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9f5PbtpLgV0Fpt8qxT5yxHSf74qtXexM7yZuLnbg8TvZ2bd8LRLYkvKEAPgCckeLz", - "d79CN0CCJChRMxMnqdq/7BHxo9FoNBr988MsV5tKSZDWzJ5+mFVc8w1Y0PgXz3NVS5uJwv1VgMm1qKxQ", - "cvY0fGPGaiFXs/lMuF8rbtez+UzyDbRtXP/5TMM/a6GhmD21uob5zORr2HA3sN1VrnUz0jZbqcwPcUZD", - "nD+ffdzzgReFBmOGUP4oyx0TMi/rApjVXBqeu0+GXQu7ZnYtDPOdmZBMSWBqyey605gtBZSFOQmL/GcN", - "ehet0k8+vqSPLYiZViUM4XymNgshIUAFDVDNhjCrWAFLbLTmlrkZHKyhoVXMANf5mi2VPgAqARHDC7Le", - "zJ6+nRmQBWjcrRzEFf53qQF+hcxyvQI7ez9PLW5pQWdWbBJLO/fY12Dq0hqGbXGNK3EFkrleJ+xlbSxb", - "AOOSvf72Gfv888+/cgvZcGuh8EQ2uqp29nhN1H32dFZwC+HzkNZ4uVKayyJr2r/+9hnOf+EXOLUVNwbS", - "h+XMfWHnz8cWEDomSEhICyvchw71ux6JQ9H+vICl0jBxT6jxnW5KPP/vuis5t/m6UkLaxL4w/Mroc5KH", - "Rd338bAGgE77ymFKu0HfPsy+ev/h0fzRw4//8vYs+y//5xeff5y4/GfNuAcwkGyY11qDzHfZSgPH07Lm", - "coiP154ezFrVZcHW/Ao3n2+Q1fu+zPUl1nnFy9rRici1OitXyjDuyaiAJa9Ly8LErJalY1NuNE/tTBhW", - "aXUlCijmjvter0W+Zjk3NAS2Y9eiLB0N1gaKMVpLr27PYfoYo8TBdSN84IL+uMho13UAE7BFbpDlpTKQ", - "WXXgego3DpcFiy+U9q4yx11W7M0aGE7uPtBli7iTjqbLcscs7mvBuGGchatpzsSS7VTNrnFzSnGJ/f1q", - "HNY2zCENN6dzj7rDO4a+ATISyFsoVQKXiLxw7oYok0uxqjUYdr0Gu/Z3ngZTKWmAqcU/ILdu2//3xY8/", - "MKXZSzCGr+AVzy8ZyFwVUJyw8yWTykak4WkJceh6jq3Dw5W65P9hlKOJjVlVPL9M3+il2IjEql7yrdjU", - "GybrzQK029JwhVjFNNhayzGAaMQDpLjh2+Gkb3Qtc9z/dtqOLOeoTZiq5DtE2IZv//pw7sExjJclq0AW", - "Qq6Y3cpROc7NfRi8TKtaFhPEHOv2NLpYTQW5WAooWDPKHkj8NIfgEfI4eFrhKwInDDIKTjPLAXAkbBM0", - "4063+8IqvoKIZE7YT5654VerLkE2hM4WO/xUabgSqjZNpxEYcer9ErhUFrJKw1IkaOzCo8MxGGrjOfDG", - "y0C5kpYLCYVjzgi0skDMahSmaML9753hLb7gBr58MnbHt18n7v5S9Xd9745P2m1slNGRTFyd7qs/sGnJ", - "qtN/wvswntuIVUY/DzZSrN6422YpSryJ/uH2L6ChNsgEOogId5MRK8ltreHpO/nA/cUydmG5LLgu3C8b", - "+ullXVpxIVbup5J+eqFWIr8QqxFkNrAmH1zYbUP/uPHS7Nhuk++KF0pd1lW8oLzzcF3s2PnzsU2mMY8l", - "zLPmtRs/PN5sw2Pk2B5222zkCJCjuKu4a3gJOw0OWp4v8Z/tEumJL/Wv7p+qKl1vWy1TqHV07K9kVB94", - "tcJZVZUi5w6Jr/1n99UxAaCHBG9bnOKF+vRDBGKlVQXaChqUV1VWqpyXmbHc4kj/qmE5ezr7l9NW/3JK", - "3c1pNPkL1+sCOzmRlcSgjFfVEWO8cqKP2cMsHIPGT8gmiO2h0CQkbaIjJeFYcAlXXNqT9snS4QfNAX7r", - "Z2rxTdIO4bv3BBtFOKOGCzAkAVPDe4ZFqGeIVoZoRYF0VapF88NnZ1XVYhC/n1UV4QOlRxAomMFWGGvu", - "4/J5e5Liec6fn7Dv4rFRFFey3LnLgUQNdzcs/a3lb7FGt+TX0I54zzDcTqVP3NYENDgx/y4oDp8Va1U6", - "qecgrbjGf/NtYzJzv0/q/OcgsRi348SFDy2POXrj4C/R4+azHuUMCcere07YWb/vzcjGjbKHYMx5i8W7", - "Jh78RVjYmIOUEEEUUZPfHq413828kJihsDckk58MEIVUfCUkQjt3zyfJNvyS9kMh3h0hgGneRURLJEE2", - "KlQvc3rUnwz0LH8Cak1tbJBEnaRaCmPxXY2N2RpKFJy5DAQdk8qNKGPChu9ZRAPzteYV0bL/QmKXkPie", - "p0YE6y0v3ol3YhLmiN1HG41Q3ZgtH2SdSUiQa/Rg+LpU+eXfuFnfwQlfhLGGtI/TsDXwAjRbc7NOHJwe", - "bbejTaFv1xBpli2iqU6aJb5QK3MHSyzVMayrqp7xsnRTD1lWb7U48KSDXJbMNWawEagw9w9H0rDT+4t9", - "w/O1EwtYzsty3qqKVJWVcAWle7QLKUHPmV1z2x5+HDm8a/AcGXDMzgKLVuPVTKhi040uQgPbcLyBNu41", - "U5XdPg0HNXwDPSkIb0RVoxYhemicPw+rgyuQyJOaoRH8Zo2orYkHP3Fz+084s1S0ONIA2mC+a/DX8IsO", - "0K51e5/KdgqlC9JZW/eb0CxXmoagG95P7v4DXLediTo/qzRkfgjNr0AbXrrV9RZ1vyHfuzqdB05mwS2P", - "TqanwvQDjDgH9kPxDnRCS/Mj/oeXzH12UoyjpJZ6BAojKjKnFnQxO1TRTK4B6lsV25Aqk1U8vzwKymft", - "5Gk2M+nkfUPaU7+FfhHNDr3ZisLc1TbhYGN71T0hpLsK7Gggi+xlOtFcUxDwRlWM2EcPBOIUOBohRG3v", - "/Fr7Wm1TMH2ttoMrTW3hTnbCjTOZ2X+tts89ZEofxjyOPQXpboGSb8Dg7SZjxulmae1yZwulbyZN9C4Y", - "yVprI+Nu1EiYmveQhE3rKvNnM2GxoAa9gVoHj/1CQH/4FMY6WLiw/DfAgnGj3gUWugPdNRbUphIl3AHp", - "r5NC3IIb+Pwxu/jb2RePHv/98RdfOpKstFppvmGLnQXDPvNqOWbsroT7ydcRShfp0b98EmxU3XFT4xhV", - "6xw2vBoORbYvev1SM+baDbHWRTOuugFwEkcEd7UR2hmZdR1oz2FRry7AWvfSfaXV8s654WCGFHTY6FWl", - "nWBhunZCLy2dFq7JKWyt5qcVtgRZkJ+BW4cw7g24WdwJUY1tfNHOUjCP0QIOHopjt6mdZhdvld7p+i7U", - "G6C10skruNLKqlyVmZPzhEooKF75Fsy3CNtV9X8naNk1N8zNjdbLWhYjegi7ldPvLxr6zVa2uNl7g9F6", - "E6vz807Zly7y21dIBTqzW8mQOjvqkaVWG8ZZgR1R1vgOLMlfYgMXlm+qH5fLu9F2KhwooccRGzBuJkYt", - "nPRjIFeSnPkOqGz8qFPQ00dMsDLZcQA8Ri52MkdT2V0c23Ft1kZItNubncwj1ZaDsYRi1SHL26uwxtBB", - "U90zCXAcOl7gZ9TVP4fS8m+VftOKr99pVVd3zp77c05dDveL8daAwvUNamAhV2XXgXTlYD9JrfF3WdCz", - "RolAa0DokSJfiNXaRu/FV1r9BndicpYUoPiBlEWl6zNUGf2gCsdMbG3uQJRsB2s5nKPbmK/xhaot40yq", - "AnDza5MWMkdcDtHXCV20bCy3on5CGLYAR105r91q64qhA9Lgvmg7ZjynE5ohasyI+0XjN0OtaDpyZys1", - "8GLHFgCSqYX3cfDeF7hIjt5TNohpXsRN8IsOXJVWORgDReZV0QdBC+3o6rB78ISAI8DNLMwotuT61sBe", - "Xh2E8xJ2Gfr6GfbZ9z+b+78DvFZZXh5ALLZJobevTxtCPW36fQTXnzwmO9LUEdU68dYxiBIsjKHwKJyM", - "7l8fosEu3h4tV6DRpeQ3pfgwye0IqAH1N6b320JbVyMe7P6Z7iQ8t2GSSxUEq9RgJTc2O8SWXaOOLsGt", - "IOKEKU6MA48IXi+4seQGJWSBOk26TnAeEsLcFOMAjz5D3Mg/hxfIcOzc3YPS1KZ5jpi6qpS2UKTWgBbZ", - "0bl+gG0zl1pGYzdvHqtYbeDQyGNYisb3yPIvYPyD28b+6i26w8WhTd3d87skKjtAtIjYB8hFaBVhN/bi", - "HQFEmBbRRDjC9CincR2ez4xVVeW4hc1q2fQbQ9MFtT6zP7Vth8RFRg66twsFBg0ovr2H/JowS/7ba26Y", - "hyOY2FGdQ/5aQ5jdYcyMkDlk+ygfn3iuVXwEDh7SulppXkBWQMl3CecA+szo874BcMfb566ykJEjbnrT", - "W0oOfo97hlY4nkkJjwy/sNwdQfcUaAnE9z4wcgE4doo5eTq61wyFcyW3KIyHy6atToyIt+GVsm7HPT0g", - "yJ6jTwF4BA/N0DdHBXbO2rdnf4r/BOMnaOSI4yfZgRlbQjv+UQsY0QX7GKfovPTYe48DJ9nmKBs7wEfG", - "juyIYvoV11bkosK3zvewu/OnX3+CpOGcFWC5KKFg0Qd6BlZxf0YupP0xb/YUnKR7G4I/UL4llhPcdLrA", - "X8IO39yvKDYhUnXcxVs2Maq7n7hkCGjweHYieNwEtjy35c4JanYNO3YNGpipF+TCMLSnWFVl8QBJ+8ye", - "Gb11Nmkb3WsuvsChouWlfM3oTbAfvje9h0EHHf4tUClVTtCQDZCRhGCS7wirlNt14cOfQgBMoKQOkJ5p", - "o2m+uf7vmQ6acQXsP1XNci7xyVVbaGQapVFQQAHSzeBEsGZO75zYYghK2AC9JPHLgwf9hT944PdcGLaE", - "6xAz6Br20fHgAepxXiljO4frDvSh7ridJ64PNFy5i8+/Qvo85bDHkx95yk6+6g3eWLvcmTLGE65b/q0Z", - "QO9kbqesPaaRad5eOO4kW07XP2iwbtz3C7GpS27vwmoFV7zM1BVoLQo4yMn9xELJb654+WPTDeMhIXc0", - "mkOWYxTfxLHgjetDgX9uHCGFO8Dk9D8VIDinXhfU6cATs/VUFZsNFIJbKHes0pADxbs5ydE0Sz1h5Amf", - "r7lc4YNBq3rlnVtpHGT4tSHVjK7lYIikUGW3MkMld+oC8G5qIeTRiVPA3ZOuryGnB8w1b+bzUa5TbuZo", - "D/oWg6SRbD4bffE6pF61L15CTjduc8Jl0JH3Ivy0E080pSDqnOwzxFe8Le4wuc39bVT27dApKIcTRx6/", - "7ccxp1/33C53dyD00EBMQ6XB4BUVq6kMfVXLOEY7uArujIXNUJNPXf8+cvxej74XlSyFhGyjJOySaUmE", - "hJf4MXmc8Joc6YwCy1jf/hukA38PrO48U6jxtvjF3e6f0L7Fynyr9F2ZRGnAyeL9BAvkQXO7n/KmdlJe", - "lgnToo/g7DMAM2+cdYVm3BiVC5TZzgsz917BZI304Z5d9L9q4lLu4Oz1x+3Z0OLkAKgjhrJinOWlQA2y", - "ksbqOrfvJEcdVbTUhBNXeIyPay2fhSZpNWlCi+mHeic5OvA1mqukw8YSEmqabwGC8tLUqxUY23vrLAHe", - "Sd9KSFZLYXGujTsuGZ2XCjR6Up1Qyw3fsaWjCavYr6AVW9S2K/1jgLKxoiy9Qc9Nw9TyneSWlcCNZS+F", - "fLPF4YLRPxxZCfZa6csGC+nbfQUSjDBZ2tnsO/qKfv1++Wvv44/u7vQ5OJ22GRNmbpmdJCn/97N/f/r2", - "LPsvnv36MPvqf5y+//Dk4/0Hgx8ff/zrX/9f96fPP/71/r//a2qnAuyp8FkP+flz/zI+f47Pn8hVvw/7", - "J9P/b4TMkkQWe3P0aIt9hqkiPAHd7yrH7BreSbuVjpCueCkKx1tuQg79G2ZwFul09KimsxE9ZVhY65GP", - "iltwGZZgMj3WeGMpauifmQ5UR6Okjz3H87KsJW1lkL4pDjP4l6nlvElGQHnKnjKMVF/z4OTp/3z8xZez", - "eRth3nyfzWf+6/sEJYtim8ojUMA29VaMgyTuGVbxnQGb5h4Ie9KVjnw74mE3sFmANmtRfXpOYaxYpDlc", - "CFnyOqetPJfk4O/OD5o4d95yopafHm6rAQqo7DqVv6gjqGGrdjcBem4nlVZXIOdMnMBJX+dTuPeid+or", - "gS+DY6pWasprqDkHRGiBKiKsxwuZpFhJ0U8vvMFf/ubOn0N+4BRc/TlTHr33vvvmDTv1DNPco5QWNHSU", - "hCDxlPbBkx2HJMfN4piyd/KdfA5L1D4o+fSdLLjlpwtuRG5OawP6a15ymcPJSrGnIR7zObf8nRxIWqOJ", - "FaOgaVbVi1Lk7DJ+kLTkScmyhiO8e/eWlyv17t37gW/G8Pngp0ryF5ogc4Kwqm3mU/1kGq65Ttm+TJPq", - "BUemXF77ZiUhW9WkIA2phPz4aZ7Hq8r0Uz4Ml19VpVt+RIbGJzRwW8aMVU08mhNQfEiv298flL8YNL8O", - "epXagGG/bHj1Vkj7nmXv6ocPP8fIvjYHwi/+ync0uatgsnZlNCVFX6mCC6dnJfqqZxVfpUxs7969tcAr", - "3H2Ulzeo4yhLht06UYchwACHahfQhDiPbgDBcXRwMC7ugnqFtI7pJeAn3MJuAPat9iuKn7/xdh2Iwee1", - "XWfubCdXZRyJh51psr2tnJAVvDGMWOFr1SfGWwDL15Bf+oxlsKnsbt7pHhx+vKAZWIcwlMuOIgwxmxIa", - "KBbA6qrgXhTnctdPa2MoogIHfQ2XsHuj2mRMx+Sx6aZVMWMHFSk1ki4dscbH1o/R33zvVRYCTX12Egze", - "DGTxtKGL0Gf8IJPIeweHOEUUnbQfY4jgOoEIIv4RFNxgoW68W5F+anlC5iCtuIIMSrESi1Qa3v8Y2sMC", - "rI4qfeZB74XcDGiYWDL3lF/Qxeqf95rLFbjr2V2pyvCSsqomnTbwPbQGru0CuN2r55dxQooAHT4przHy", - "GjV8c7cE2Lr9FhY1dhKu3asCFUXUxnsvn4z7nxHgUNwQntC9fSmcjL51PeoSGQfDrdxgt3nWete8mM4Q", - "Lvq+AUxZqq7dvjgolM+2SUldovulNnwFI2+X2Ho3MR9Gx+KHgxySSJIyiFr2RY2BJJAEmRpnbs3JMwzu", - "izvE+MzsOWSGmchA7G1GmETbI2xRogDbeK7S3nPdsaJSVuAx0NKsBbRsRcEARhcj8XFccxOOI+ZLDVx2", - "knT2G6Z92Zea7jzyJYySojaJ58Jt2Oegg3e/T1AXstKFVHTxo39CWjn39sLwhdR2KImiaQElrGjh1DgQ", - "Spswqd0gB8ePyyXylizllhgpqCMBwM8B7uXygDGyjbDJI6TIOAIbHR9wYPaDis+mXB0DpPQJn3gYG6+I", - "6G9IB/aRo74TRlXlLlcxYm/MAwfwqShayaLnUY3DMCHnzLG5K146Nuff4u0ggwxp+KDo5UPzrjf3xx4a", - "e0xTdOUftSYSEm6ymliaDUCnRe09EC/UNqMI5eRbZLFdOHpPxi5gvHTqYFIuunuGLdQW3bnwaiFf+QOw", - "jMMRwIh0L1thkF6x35icRcDsm3a/nJuiQoMk4xWtDbmMCXpTph6RLcfI5bMovdyNAOipodpaDV4tcVB9", - "0BVPhpd5e6vN27SpISwsdfzHjlByl0bwN9SPdRPC/a1N/DeeXCycqE+SCW+oWbpNhkLqXFHWwWMSFPbJ", - "oQPEHqy+6suBSbR2fb26eI2wlmIljvkOjZJDtBkoAR/BWUc0zS5TngLuLQ94j1+EbpGyDnePy939yIFQ", - "w0oYC63RKPgF/R7qeI7pk5Vajq/OVnrp1vdaqebyJ7M5duws85OvAD3wl0Ibm6HFLbkE1+hbg0qkb13T", - "tATadVGkYgOiSHNcnPYSdlkhyjpNr37e75+7aX9oLhpTL/AWE5IctBZYHCPpuLxnavJt37vgF7TgF/zO", - "1jvtNLimbmLtyKU7x5/kXPQY2D52kCDAFHEMd20UpXsYZBRwPuSOkTQa+bSc7LM2DA5TEcY+6KUWwt7H", - "bn4aKbmWKA1gOkJQrVZQhPRmwR4moyRypZKrqIpTVe3LmXfCKHUdZp7bk7TOu+HDmBN+JO5nQhawTUMf", - "vwoQ8jayDhPu4SQrkJSuJK0WSqImdvHHFpGu7hPbQvsBAEkn6Dc9Y3brnUy71GwnbkAJvPBvEgNhffuP", - "5XBDPOrmY+7Tncyn+48QDog0JWxU2GSYhmCEAfOqEsW2Z3iiUUeVYPwo7fKItIWsxQ92AANdJ+gkwXVS", - "aXtXa69gP8U376l7lZHvtXcsdvTNcx+AX9QaLRgdz+Zh3vbmrTZx7d//fGGV5ivwVqiMQLrVELicY9AQ", - "ZUU3zApyJynEcgmx9cXcxHLQAW6gYy8mkG6CyNImmlpI++WTFBkdoJ4WxsMoS1NMghbGbPJvhlauINNH", - "qqTmSoi25gamqmS4/vewy37mZe0eGUKb1j3Xm526l+8Ru361+R52OPJBr1cH2IFdQc3Ta0AaTGn6m08m", - "SmB9z3RS/OPzsrOFR+zUWXqX7mhrfFGGceJvb5lO0YLuUm5zMFonCQfLlN24SPsmuNMDXcT3SfnQJoji", - "sAwSyfvxVMKEEpbDq6jJRXGIdt8ALwPx4nJmH+ez23kCpG4zP+IBXL9qLtAkntHTlCzDHceeI1HOq0qr", - "K15m3l9i7PLX6spf/tg8uFd84pdMmrLffHP24pUH/+N8lpfAddZoAkZXhe2qP82qqIzD/quEsn17RSdp", - "iqLNbzIyxz4W15jZu6dsGhRFaf1noqPofS6WaYf3g7zPu/rQEve4/EDVePy0Nk9y+Ok6+fArLspgbAzQ", - "jjin4+KmVdZJcoV4gFs7C0U+X9mdspvB6U6fjpa6DvAknOtHTE2ZfnFIn7gSWZF3/uF3Lj19q3SH+fvI", - "xKTz0G8nVjkhm/A44qsd6lf2hakTRoLXL6tf3Gl88CA+ag8ezNkvpf8QAYi/L/zv+L548CBpPUyqsRyT", - "QC2V5Bu430RZjG7Ep32AS7iedkGfXW0ayVKNk2FDoeQFFNB97bF3rYXHZ+F/KaAE99PJlEd6vOmE7hiY", - "KSfoYiwSsXEy3VDJTMOU7PtUYxCsIy1k9r4kAxljh0dI1hs0YGamFHnatUMujGOvkpwpXWOGjUe0tW7E", - "Woz45spaRGO5ZlNypvaAjOZIItMk07a2uFsof7xrKf5ZAxOFe9UsBWi813pXXXgc4KgDgTStF/MDk52q", - "Hf42epA99qagC9qnBNlrv3ve2JTCQlNFf470AI9nHDDuPd7bnj48NVM027rrgjntHTOldHpgdN5YNzJH", - "shS6MNlSq18hbQhB+1EiEUYwfApU8/4KMuW512cpjVG5rejezn5ou6e/jcc2/tZv4bDopurYTS7T9Kk+", - "biNv8ug16XTNHsljj7DYw6AbGjDCWvB4Rc6wWAYleB9xSeeJskB0IszSpzKO5Tyl8dtT6WEexL+W/HrB", - "UzVi3FvIwRRtb8dPyioWOocNME2OA5qdRR7cTVtBmeQq0K0NYpiV9obvGpp28oumfcAgRcVPlzm5KZRG", - "JYap5TWXVEXc9SN+5XsbIBO863WtNOaBNGmXrgJysUmqY9+9e1vkQ/edQqwEFciuDUQVmP1AjJJNIhX5", - "KtZN5g6PmvMleziPysD73SjElTBiUQK2eEQtFtzgddmYw5subnkg7dpg88cTmq9rWWgo7NoQYo1izdsT", - "hbzGMXEB9hpAsofY7tFX7DN0yTTiCu47LHohaPb00VfoUEN/PEzdsr7A+T6WXSDPDs7aaTpGn1QawzFJ", - "P2ra+3qpAX6F8dthz2mirlPOErb0F8rhs7Thkq8gHZ+xOQAT9cXdRHN+Dy+SrAFgrFY7Jmx6frDc8aeR", - "mG/H/ggMlqvNRtiNd9wzauPoqS2vTJOG4ajWv68XFeAKH9H/tQrufz1d1yd+xvDNSMwWein/gDbaGK1z", - "xin5Zylaz/RQr5Odh9zCWECrqZtFuHFzuaWjLImO6ktWaSEt6j9qu8z+4p7FmueO/Z2MgZstvnySKETV", - "rdUijwP8k+NdgwF9lUa9HiH7ILP4vuwzqWS2cRyluN/mWIhO5aijbtolc8wvdP/QUyVfN0o2Sm51h9x4", - "xKlvRXhyz4C3JMVmPUfR49Er++SUWes0efDa7dBPr194KWOjdKpgQHvcvcShwWoBVxgxl94kN+Yt90KX", - "k3bhNtD/vv5PQeSMxLJwlpMPgciiuS9Y3knxP79sM5+jYZUiEXs6QKUT2k6vt/vE3obHad369ltyGMNv", - "I5ibjDYcZYiVEe97cq9v+vwe/kJ9kGjPOwrHR78w7d7gKMc/eIBAP3gw92LwL4+7n4m9P3iQTkCcVLm5", - "X1ss3OZFjH1Te/i1SijAQtXCxqHI50dIKCDHLin3wTHBhR9qzroV4j69FHE38V1pb9P0KXj37i1+CXjA", - "P/qI+J2ZJW5gG6Uwfti7FTKTJFM03yM/d86+VtuphNO7gwLx/AFQNIKSieo5XMmgAmjSXH/QXySiUTfq", - "AkrlHplxUaBYn//nwbNb/HwPtmtRFj+3ud16F4nmMl8nvYQXruPfSUbvXMHEKpN1RtZcSiiTw9Hb9u/h", - "DZx4pf9DTZ1nI+TEtv0KtLTc3uJawLtgBqDChA69wpZughir3bRZTVqGcqUKhvO0RS1a5jgs5ZwqoZmI", - "b8ZhN7X1fqsYC+4TDi1FiW6Yabsxtsw0tyMJtLDeeagv5MbB8uOG1Aw0OmjGxQYvZsM3VQl4Mq9A8xV2", - "VRJ63TGFGo4cVaxgpnKfsCUmrFDM1loytVxGywBphYZyN2cVN4YGeeiWBVuce/b00cOHSbUXYmfCSgmL", - "YZk/tkt5dIpN6IsvskSlAI4C9jCsH1uKOmZjh4Tja0r+swZjUzwVP1DkKlpJ3a1N9SSb2qcn7DvMfOSI", - "uJPqHtWVIYlwN6FmXZWKF3NMbvzmm7MXjGalPlRCnupZrlBb1yX/pHlleoLRkNlpJHPO9HH2p/JwqzY2", - "a8pPpnITuhZtgUzR87lBPV6MnRP2nFSoTQF/moRhimy9gSKqdkmPeCQO9x9reb5G3WRHAhrnldMLsQZ2", - "1lpuoujDpvoRMmwHt6/FSqVY50zZNehrYQAj8uEKuukQm9ygXjce0iN2l6drKYlSTo4QRptaR8eiPQBH", - "kmxwKkhC1kP8kZopqsd8bF3aC+yVjsXoFbntWf1Dcr2QYpu99MaFnEslRY6lEFKSNKZum2amnFA1Im1f", - "NDN/QhOHK1lat4kF9lgcLbYbGKFH3NDkH311m0rUQX9a2PqSayuwxnM2KOah0rU3iAlpwFezckQU80ml", - "E05NyUCIxoHiSDLCrEwjGs5v3bcfvP4bk2JcComaLo82/z4jk1VpBFqmJROWrRQYv55uNI956/qcYJbG", - "ArbvT16olcgvxArHIDc6t2zyGR0OdRY8SL3Hpmv7zLX1ufObnzvuYDTpWVX5ScfroCcFSbuVowhO+S0F", - "R5IIuc348Wh7yG2v6zfep47Q4Aq91qDCe3hAGE0t7e4o37i3JVEUtmAUUZlMoCtkAowXQgYTavqCyJNX", - "Am4MnteRfibX3NLbYRJPewO8HAmAwAhlssHfdqh+5QCHElxjmGN8G9sy4COMo2nQSvxc7lg4FI66I2Hi", - "GS8b1+lEUW+UqrwQVWBwUa/Md4pxOMadhZDJDroOhu813bEax7E30ViOwkVdrMBmvChSqa2+xq8Mv4Yg", - "MdhCXjdFqJrowG6O8iG1+YlyJU292TNXaHDL6aK6+QlqiGv3hx3GTDuLHf6bqsA0vjPeafroqNzgIV0c", - "l5h/GGWcknodTWdGrLLpmMA75fboaKe+GaG3/e+U0kO47h8iGrfH5eI9SvG3b9zFESfuHfin09XS5NVF", - "X3CF30PCoyYjZJcr4VU2qDOGXg+4eYkt6wEfGiYBv+LlSCR8bCuh+5XsB2Px8Plo+gZufXouy9leFjSa", - "8oh8hXvWl6EJccw/mNyD785q4de6F6HjtrvvO5Y68hFrmcWohe5mRrR2g4+1on1/NZYiIdTpwO9xPRDv", - "xTP3aeDhSqg6eF8FH+jwJKRffQqeTt2PkfUnIwt+b6vFqI3lja9fS8v0b/LvfyYrLANp9e4PYHEZbHq/", - "qExC2iX1VNuENaUPJ5VC7NyKU2rYpMqleNkw6MqItXRoaVB+ZkBWz6eIAwN8fJzPzoujLsxUyZ0ZjZI6", - "di/Eam0xY//fgBegXx2oSNBWIcAjVikj2gqkpRvMp4Bd43AnU4MNHAGLuKLCcKzghHoFucWys61znQY4", - "pr6CmywYff67MsH4c7qJyfAFCfZVIRjWmj1wxw8SJ0XJv6hO58n0nPtnjQs1RYBdc9Oma+nFTE+O3Fwu", - "IcesyHsTVf3HGmSUBGke9DIIyzLKWyWaOCbM63281rEFaF8eqb3wRPV1bg3OWBz7JezuGdahhmTh0CaI", - "7yaJgxEDZAILOaTHFMnea0yYhjIQC8El2KdibotjjOZ8jtKu3XCuQJLu4mhTse2ZMl30fNJcrutRaR8x", - "JGcsl9WwZvL4++M5lqg23kGON4mH41c6Ox8Wzrn2iYsxrVhjOwkpjMGE30IOQZqlFJe+fgBihSxV11wX", - "ocWdJIWiu0mkgV42M4s2gGPo5JAoxYCxUHmpnBiRjQWUdWMmGofDe4Y8Q9sEPgjXErSGojGJlMpAZlUI", - "+NgHxz5UkPvrjZBgRssfEXCjqa9ft7m9sQwcx1TX3Hu9xgtkGjbcQaejDNzjc+5D9jP6HoLwQxmwgxqm", - "hl4P16MNoTvCDJAYU/2S+dvycHD/TZRNQkrQWbA89dNxy25GNsy7WdQ5XdDxwWgUcpNz5+xhJUk9TT5c", - "Ze+NEAXJX8LulB5BoZBv2MEYaJKcCPQo4Whvk+9U/WZScK/uBLzfN49cpVSZjRg7zoc5xPsUfynyS8Ac", - "gI2L+0iNdvYZ6tgba/b1ehdyZlcVSCjunzB2JimoKBi2u+UFe5PLe3bf/Fuctagprb9Xqp28k+noDEy4", - "r2/JzcIw+3mYAcfqbjkVDXIgQ/VWjrncXGNy/m4Vz5Opr/KhqblfRb4lKoIiJZNckMXqGR70lOIIUyBE", - "uTrQkMmZt3QxU6qUL+9N0jS4odKYiidDgCzIKdkCGij84EkEJOuiJ04hpb7zSe/Ukmlojcg3zf43LOGe", - "etH3Z25m6fK7pdLQKcbuelOmzybwBdNo4n8WwmqudzfJ0TcoIT/Qnoxi+aA7VuOJ1S6k9cYa4rAs1XWG", - "zCpr6lyknraunelexqHoWtvPneoFRH5d3HhBbcfWvGC50hryuEc63pOg2igNWanQzStlgV5aJ3dvMMhL", - "slKtmKpyVQDVi0lT0NhctZQcxSaIvGqSKCDawWhh6hPR8cQp3Z1KdqQMRa3VEbXzc6DI9TarEy06I1vm", - "iMcyGJ/FyWOIGg/h3VP7P82bl2KLdAM6deSXzOoa5sy36NfI9gefa2AbYQyB0tDStShLDBwX28jy2jgu", - "pFE7Ivaeo1vllUDfm24SAZKGK3fnNZkVYh5wEac9YnatVb1aRwmmGzjDk1fX/kEcj/KTqdE9CiPI3BRP", - "2EYZ61+aNFK75Nbl7LNcSatVWXaVUiSir7ym/SXfnuW5faHU5YLnl/fxXSuVbVZazEN8dd85sJ1J91KL", - "dS/gjMqZH07VS+3QVc4T7WQG2WNxRxd2j8B8f5iDHta5nw0X1l9Xl5mmnzFnknGrNiJPn6k/l7fdqI9c", - "ikUlc5ZRbUXKMoHN8LDHl1XjXIEscohmkDxZHO6MeUbgjczIbtx/UQLvj8uW4BnNyEU5ZC5eisryUVmv", - "BwBCSqHPttZUkDGWxBquolaUKgFN5H1AJ94q6Il0O9jcCHcOlIVbATXwfmwA/IyUD3PKLUeelAu1Dd/v", - "t8nnbgT8x/1U3mEeYy5eFy1paXLyColqRjhCOsX1Xn+oNxj2vpjqFdUUz514w0cAjPtJdWCY5C11LBhL", - "LkooslTtxfNGRzWPXto+NKtfEl0Yz8lzXofSh27sWoNPnEIivu7avyruSEk1zYeaZFnAFiiu41fQimoa", - "ziP7C5RU8rCnDFBVVsIVdNzHfDaXGkVNcQWhr2k6swKgQmtkX0eW8ouK7/Ke4sSvPYs8a6ZgN6lJIcTS", - "TrEDapKkUmcrMzomZupRchBdiaLmHfyZY0WOrhrQHeUEqgZvhCy8I6dO8xON8DoMcBb6p0SZgIn30/jQ", - "0Swojbp9DOign2Rtxk69TLtJxqmKGgMLzlY0hlgi8ZZvmIpfy3GF5JDk2+fWxH0SSkaI/WYLOUo1/r0D", - "hX/xjBgpfNYTpHYJUNCrwHVJaNvXIJlUUYnJa26ap0qbQzH8QBNjIyH9a/oGRuXWm/H2O8twMGZ6ydRG", - "HxK6odObq+d/l5O49yCOjpeiEQM+/G+P/itQt392YAMs5S3dfjrZH4s0+lvMc/E5W9RhoLJU11QzMn6H", - "PodgByXqCyYgL5aL5loOXptzn96zr+oQkb/6hu+Y0viPe3X+s+alWO6QzxD4oRsza+5IyBteySPAe4G6", - "ifeLV/MAWNC2qDAVrVtMHTMabudGiYB2F3ko7qPYhl9CvA3o7ED8M7eOcZp6gZoLd2X3tnOIBb/4kKJl", - "w4v4pY+JIrtl1EPqYNf7f7axcPFUIb9bVfI8VAj1JYq6fAarAAfismvY7A+WHPK1QAJNZeGWaHWIri9u", - "oDI9knWlIhDGyq90wB5UXB1UnrnVMiZqfns1NvaEmU5ayl3vwlSvmwHQcZ3GQ+DHZSs/Df6TOVzHljEF", - "/D8K3kcK1cbwUk3aT4DlTgaOBKykrV6obaZhaQ45mJC62j3ndZu7I6hYhcw1cEMeN+c/+odnm6JUSPcQ", - "Jp/QxqbZjFLAUsiWWQpZ1TbxjsFMpXIXISxW+iNaR0xoY1KCEyavePnjFWgtirGNc6eDSjrGJSKCocP3", - "Tagwmjt1OIAw7RsO4zNbNXrczF3gVISK3DWN5bLguoibC8ly0O7eZ9d8Z25uUWqMA4dsSjySZrpZAyLr", - "EpI2AVLuvFH4lvaeBkB+h4afCQYb9AtOGGtItWPViH1mCMOfwmCz4dusVCuMIhw5ED43LVr46AmoJKrB", - "ST6btu4wjxG/wv5pMC2/Z0RW4axTpth/7n/ErcRn5E9S2L0nn3SU/bBO8rulgxmQKlet8z8Ry/A8piJx", - "ffKVOBo3CJshVCXQHkSbCCP2oa5efGQX0Q3Ch3HHSvDp5c66nhapeF/SDGSoMTB73PvBtK7sPPfuWUNV", - "2kDVQEiZ+2jpIzVtpJ8P99IIeFSb3p/17rSNy4wb55gacfvjo7NKVVk+xeeTKncU3kzgIe3COEIfkRFg", - "ZN2Ne4xpatl08h51itocWyZvtKjOIWtXle979I+piUY4etcEoZbIy6hyO2q3MJKnUabM+zFmXTVYwyQY", - "ZxryWqOa+JrvDpcdG8kYffG3sy8ePf774y++ZK4BK8QKTJt1vFe2q/ULFLKv9/m0noCD5dn0JoTsA4S4", - "YH8MQVXNpvizRtzWtClFB0XLjtEvJy6AxHFMlIu60V7hOK1r/x9ru1KLvPMdS6Hgt98zrcoyXfWhkasS", - "BpTUbkUmFPcCqUAbYaxjhF0LqLCtR7RZo3oQc/9eUTYZJXMI+mNPBcKOuFylFjLmUIv8DGO7vdWIwbYq", - "Pa8iS8++dfl3GmnoUGhEr5gFsEpVXrQXS5aCCCOIdBRZ6xWfqBGPfGQbZkvesilC9J7nadKLC2bv5/bd", - "Yq42zendJibEi3Aob0CaY/aJ8bwFN+EkrWr/D8M/EokY7oxrNMv9LXhF8n1ws6L8k0AbBuUnyAMBGIm2", - "7cRJRoFiUSJiTVYCtCcEA3Jf/HjZGpYPhoUgJKHDAfDi8Nm2XRPJ4MH5nTP6vmyQEi3l/RgldJZ/KCI3", - "sN7mIom2yCtNrAVDbEkNxcIo3No8a6KYR14lg2BnrZRl7mValokgadLj4JmKCcc9CfQVLz891/hWaGPP", - "EB9QvB4PjYojZWMkEyrNzfL0veCT5o6iYu9uavkKA7P/A9weJe85P5Q3wg9uM1TuYMX6VbgVKNabXeOY", - "5GT16Eu28MU2Kg25MH3j/nUQTprAUNBi6R1aYWsPRKIeWufPyt6CjJfBE4f9EJm3Gpu9h7A9or8zUxk5", - "uUkqT1HfgCwS+EvxqLg474Hr4paFGW6W9iVK4HZk2pdh2eGpy6PUJu7SqQ0M1zn5tu7gNnFRt2ubmrNo", - "cn2Hd+/e2sWUVEPpWgyuO+Y6upOiDEeVZPgNshwRjvwYft4Uxfw8lveWcruO5Obu7UctyoMOK51M6x/n", - "sxVIMMJgLvG/+9oxn/YuDRBQ5oXhUSVYb5MuhhCTWGtn8miqKIf6hPTpvlsi5zVGNea1FnaHdYODAk38", - "PZmP6bsmt4fPDdPY0vzdZ9UlNLXb20wgtQm363eKl3gfkYlPultIlSfsG8rw7Q/KX+8t/g0+/8uT4uHn", - "j/5t8ZeHXzzM4ckXXz18yL96wh999fkjePyXL548hEfLL79aPC4eP3m8ePL4yZdffJV//uTR4smXX/3b", - "PceHHMgEaEjt/3T2f7KzcqWys1fn2RsHbIsTXonvwe0NvpWXCutaOqTmeBJhw0U5exp++l/hhJ3katMO", - "H36d+fpMs7W1lXl6enp9fX0SdzldYeh/ZlWdr0/DPFhtsCOvvDpvfPTJDwd3tNUe46Z6UjjDb6+/uXjD", - "zl6dn7QEM3s6e3jy8OSRL20teSVmT2ef4094eta476eYX/PU+NT5p02s1sf54FtVUWJ998nTqP9rDbzE", - "BDvujw1YLfLwSQMvdv7/5pqvVqBPMHqDfrp6fBqkkdMPPnPCx33fTmPPkNMPnQQTxYGejedD0ib5QqlL", - "NIkH+eie6flxnMSVuc8Lh35qic4X5rxlhKG8MtqcZ0/fpnQv3oeyqhelyBld30i/bnMi8mrShrTsAxVt", - "s7a0f8sMHYN7mH31/sMXf/mYErL6gLz0BsHWAuJdcjHKCwMUTgJc/6xB71rA0Fo/i8EYmgvT2dO2llW+", - "8IGf7YT95D0d8CvxlMYj1AeFNYnnQqcRwNwQKbgaLLzHGn/o+ofk8Pjhw3DyvVwdkdWpp9YY3V3bw8Av", - "6Jh0Bp3C1wmhyC0mQ3wMKfYnQymXHDaF5ORVj+62G35JVhd0qGPax816jHofXURyEz/ityUw99+wpNGE", - "oGyaaSiUfBxyy5ETGFxpY8VYKUjt592bUrWrP85nT46khr0Kqk7+0AT4L3npQIYipI0hCB59OgjOJXl8", - "umuHrseP89kXnxIH59IxL14ybBmV301QvLyU6lqGlk6WqTcbrncoqdgpe+yzHKEtMbQjuqeLlbsz/HZG", - "bBkLkVSghXsw8nL2/uOh6+X0Qyi7vv8y6pTc9v7KUYeJl9y+ZqcLLLU2tSmYqPH4UlAFZk4/4Akd/f3U", - "a+LTH1GZRlLaaUjyNdKS0rmkP3ZQ+MFu3UL2D+faROPl3Obrujr9gP9BgStaEWWHPrVbeYrOR6cfOojw", - "nweI6P7edo9bXG1UAQE4tVxSrfp9n08/0L/RRB3CbIWaroDyTdTo2Rryy1n67uulzo96MZJH+aKEgpjT", - "kwkdpLJxpxsd6Ncofhj24/dMLBn0pxAmzHDEuaXEoqdY0XXX4jL8vJN58sfhNneSKo78fBqeQynRttvy", - "Q+fP7pEz69oW6jqaBRWJpAUfQuY+1qb/9+k1FzZbKu1z+WEJ+GFnC7w89YU7er+2ubIHXzABePRjHKWW", - "/PWUe1TPKmUSZPuaX0fWvzNsTBICGPu1whfF2O20zRZCIgXFN1SrP6CPQ9l4cC85uQYd5YIJZpiHB5OB", - "aMWLnBssPe5r4Ayk9Y/JY/eppY2vecFCDpWMtbLHmX+ldpb235IITv/5p5v+AvSVyIG9gU2lNNei3LGf", - "ZBM+c2NG+i0Sp+b5JUroDcGSb6Xm192IHJ3OCdEt8RRShACzW7bmsih9FL2qsXadoyw0marIacddQKHE", - "WaU0AkC5I6EgNwZzwi4aJw90majDI6eAKyhVhTYNzIhMk3B0ACEjYHwRdPn/fLbN3CFegcw8G8kWqtj5", - "mkAzza/tlgLiB7yKhMMRRjYQ3VJfvXQy0ig4e4fPrXIxVtahFqFR0719716xWGveKxha3dPT01OM/lkr", - "Y09n7hHe1UvFH983CAslUmeVFldYygGRprRwb8sy88qbthra7PHJw9nH/x8AAP//7vEvy70JAQA=", + "H4sIAAAAAAAC/+y9f5PbtpIo+lVQ2q1y7CfO2I6TPfGrU/smdpIzL07i8jjZt2v7JRDZknCGAngAcEaK", + "r7/7LXQDJEiCEjUzsZOq+5c9In40Go1Go3++n+VqUykJ0prZ0/ezimu+AQsa/+J5rmppM1G4vwowuRaV", + "FUrOnoZvzFgt5Go2nwn3a8XtejafSb6Bto3rP59p+FctNBSzp1bXMJ+ZfA0b7ga2u8q1bkbaZiuV+SHO", + "aIjz57MPez7wotBgzBDKn2S5Y0LmZV0As5pLw3P3ybBrYdfMroVhvjMTkikJTC2ZXXcas6WAsjAnYZH/", + "qkHvolX6yceX9KEFMdOqhCGcz9RmISQEqKABqtkQZhUrYImN1twyN4ODNTS0ihngOl+zpdIHQCUgYnhB", + "1pvZ0zczA7IAjbuVg7jC/y41wO+QWa5XYGfv5qnFLS3ozIpNYmnnHvsaTF1aw7AtrnElrkAy1+uE/VAb", + "yxbAuGSvvn3GPv/886/cQjbcWig8kY2uqp09XhN1nz2dFdxC+DykNV6ulOayyJr2r759hvNf+AVObcWN", + "gfRhOXNf2PnzsQWEjgkSEtLCCvehQ/2uR+JQtD8vYKk0TNwTanynmxLP/0l3Jec2X1dKSJvYF4ZfGX1O", + "8rCo+z4e1gDQaV85TGk36JuH2Vfv3j+aP3r44d/enGX/4//84vMPE5f/rBn3AAaSDfNaa5D5Lltp4Hha", + "1lwO8fHK04NZq7os2Jpf4ebzDbJ635e5vsQ6r3hZOzoRuVZn5UoZxj0ZFbDkdWlZmJjVsnRsyo3mqZ0J", + "wyqtrkQBxdxx3+u1yNcs54aGwHbsWpSlo8HaQDFGa+nV7TlMH2KUOLhuhA9c0J8XGe26DmACtsgNsrxU", + "BjKrDlxP4cbhsmDxhdLeVea4y4q9XgPDyd0HumwRd9LRdFnumMV9LRg3jLNwNc2ZWLKdqtk1bk4pLrG/", + "X43D2oY5pOHmdO5Rd3jH0DdARgJ5C6VK4BKRF87dEGVyKVa1BsOu12DX/s7TYColDTC1+Cfk1m37/3vx", + "049MafYDGMNX8JLnlwxkrgooTtj5kkllI9LwtIQ4dD3H1uHhSl3y/zTK0cTGrCqeX6Zv9FJsRGJVP/Ct", + "2NQbJuvNArTb0nCFWMU02FrLMYBoxAOkuOHb4aSvdS1z3P922o4s56hNmKrkO0TYhm///nDuwTGMlyWr", + "QBZCrpjdylE5zs19GLxMq1oWE8Qc6/Y0ulhNBblYCihYM8oeSPw0h+AR8jh4WuErAicMMgpOM8sBcCRs", + "EzTjTrf7wiq+gohkTtjPnrnhV6suQTaEzhY7/FRpuBKqNk2nERhx6v0SuFQWskrDUiRo7MKjwzEYauM5", + "8MbLQLmSlgsJhWPOCLSyQMxqFKZowv3vneEtvuAGvnwydse3Xyfu/lL1d33vjk/abWyU0ZFMXJ3uqz+w", + "acmq03/C+zCe24hVRj8PNlKsXrvbZilKvIn+6fYvoKE2yAQ6iAh3kxEryW2t4elb+cD9xTJ2YbksuC7c", + "Lxv66Ye6tOJCrNxPJf30Qq1EfiFWI8hsYE0+uLDbhv5x46XZsd0m3xUvlLqsq3hBeefhutix8+djm0xj", + "HkuYZ81rN354vN6Gx8ixPey22cgRIEdxV3HX8BJ2Ghy0PF/iP9sl0hNf6t/dP1VVut62WqZQ6+jYX8mo", + "PvBqhbOqKkXOHRJf+c/uq2MCQA8J3rY4xQv16fsIxEqrCrQVNCivqqxUOS8zY7nFkf5dw3L2dPZvp63+", + "5ZS6m9No8heu1wV2ciIriUEZr6ojxnjpRB+zh1k4Bo2fkE0Q20OhSUjaREdKwrHgEq64tCftk6XDD5oD", + "/MbP1OKbpB3Cd+8JNopwRg0XYEgCpob3DItQzxCtDNGKAumqVIvmh8/OqqrFIH4/qyrCB0qPIFAwg60w", + "1tzH5fP2JMXznD8/Yd/FY6MormS5c5cDiRrublj6W8vfYo1uya+hHfGeYbidSp+4rQlocGL+XVAcPivW", + "qnRSz0FacY3/4dvGZOZ+n9T5r0FiMW7HiQsfWh5z9MbBX6LHzWc9yhkSjlf3nLCzft+bkY0bZQ/BmPMW", + "i3dNPPiLsLAxBykhgiiiJr89XGu+m3khMUNhb0gmPxsgCqn4SkiEdu6eT5Jt+CXth0K8O0IA07yLiJZI", + "gmxUqF7m9Kg/GehZ/gLUmtrYIIk6SbUUxuK7GhuzNZQoOHMZCDomlRtRxoQN37OIBuZrzSuiZf+FxC4h", + "8T1PjQjWW168E+/EJMwRu482GqG6MVs+yDqTkCDX6MHwdanyy39ws76DE74IYw1pH6dha+AFaLbmZp04", + "OD3abkebQt+uIdIsW0RTnTRLfKFW5g6WWKpjWFdVPeNl6aYesqzeanHgSQe5LJlrzGAjUGHuH46kYaf3", + "F/uG52snFrCcl+W8VRWpKivhCkr3aBdSgp4zu+a2Pfw4cnjX4Dky4JidBRatxquZUMWmG12EBrbheANt", + "3GumKrt9Gg5q+AZ6UhDeiKpGLUL00Dh/HlYHVyCRJzVDI/jNGlFbEw9+4ub2n3BmqWhxpAG0wXzX4K/h", + "Fx2gXev2PpXtFEoXpLO27jehWa40DUE3vJ/c/Qe4bjsTdX5Wacj8EJpfgTa8dKvrLep+Q753dToPnMyC", + "Wx6dTE+F6QcYcQ7sh+Id6ISW5if8Dy+Z++ykGEdJLfUIFEZUZE4t6GJ2qKKZXAPUtyq2IVUmq3h+eRSU", + "z9rJ02xm0sn7hrSnfgv9Ipoder0VhbmrbcLBxvaqe0JIdxXY0UAW2ct0ormmIOC1qhixjx4IxClwNEKI", + "2t75tfa12qZg+lptB1ea2sKd7IQbZzKz/1ptn3vIlD6MeRx7CtLdAiXfgMHbTcaM083S2uXOFkrfTJro", + "XTCStdZGxt2okTA17yEJm9ZV5s9mwmJBDXoDtQ4e+4WA/vApjHWwcGH5H4AF40a9Cyx0B7prLKhNJUq4", + "A9JfJ4W4BTfw+WN28Y+zLx49/vXxF186kqy0Wmm+YYudBcM+82o5ZuyuhPvJ1xFKF+nRv3wSbFTdcVPj", + "GFXrHDa8Gg5Fti96/VIz5toNsdZFM666AXASRwR3tRHaGZl1HWjPYVGvLsBa99J9qdXyzrnhYIYUdNjo", + "ZaWdYGG6dkIvLZ0WrskpbK3mpxW2BFmQn4FbhzDuDbhZ3AlRjW180c5SMI/RAg4eimO3qZ1mF2+V3un6", + "LtQboLXSySu40sqqXJWZk/OESigoXvoWzLcI21X1fydo2TU3zM2N1staFiN6CLuV0+8vGvr1Vra42XuD", + "0XoTq/PzTtmXLvLbV0gFOrNbyZA6O+qRpVYbxlmBHVHW+A4syV9iAxeWb6qflsu70XYqHCihxxEbMG4m", + "Ri2c9GMgV5Kc+Q6obPyoU9DTR0ywMtlxADxGLnYyR1PZXRzbcW3WRki025udzCPVloOxhGLVIcvbq7DG", + "0EFT3TMJcBw6XuBn1NU/h9Lyb5V+3Yqv32lVV3fOnvtzTl0O94vx1oDC9Q1qYCFXZdeBdOVgP0mt8ZMs", + "6FmjRKA1IPRIkS/Eam2j9+JLrf6AOzE5SwpQ/EDKotL1GaqMflSFYya2NncgSraDtRzO0W3M1/hC1ZZx", + "JlUBuPm1SQuZIy6H6OuELlo2lltRPyEMW4CjrpzXbrV1xdABaXBftB0zntMJzRA1ZsT9ovGboVY0Hbmz", + "lRp4sWMLAMnUwvs4eO8LXCRH7ykbxDQv4ib4RQeuSqscjIEi86rog6CFdnR12D14QsAR4GYWZhRbcn1r", + "YC+vDsJ5CbsMff0M++z7X8z9TwCvVZaXBxCLbVLo7evThlBPm34fwfUnj8mONHVEtU68dQyiBAtjKDwK", + "J6P714dosIu3R8sVaHQp+UMpPkxyOwJqQP2D6f220NbViAe7f6Y7Cc9tmORSBcEqNVjJjc0OsWXXqKNL", + "cCuIOGGKE+PAI4LXC24suUEJWaBOk64TnIeEMDfFOMCjzxA38i/hBTIcO3f3oDS1aZ4jpq4qpS0UqTWg", + "RXZ0rh9h28ylltHYzZvHKlYbODTyGJai8T2y/AsY/+C2sb96i+5wcWhTd/f8LonKDhAtIvYBchFaRdiN", + "vXhHABGmRTQRjjA9ymlch+czY1VVOW5hs1o2/cbQdEGtz+zPbdshcZGRg+7tQoFBA4pv7yG/JsyS//aa", + "G+bhCCZ2VOeQv9YQZncYMyNkDtk+yscnnmsVH4GDh7SuVpoXkBVQ8l3COYA+M/q8bwDc8fa5qyxk5Iib", + "3vSWkoPf456hFY5nUsIjwy8sd0fQPQVaAvG9D4xcAI6dYk6eju41Q+FcyS0K4+GyaasTI+JteKWs23FP", + "Dwiy5+hTAB7BQzP0zVGBnbP27dmf4r/B+AkaOeL4SXZgxpbQjn/UAkZ0wT7GKTovPfbe48BJtjnKxg7w", + "kbEjO6KYfsm1Fbmo8K3zPezu/OnXnyBpOGcFWC5KKFj0gZ6BVdyfkQtpf8ybPQUn6d6G4A+Ub4nlBDed", + "LvCXsMM390uKTYhUHXfxlk2M6u4nLhkCGjyenQgeN4Etz225c4KaXcOOXYMGZuoFuTAM7SlWVVk8QNI+", + "s2dGb51N2kb3mosvcKhoeSlfM3oT7Ifvde9h0EGHfwtUSpUTNGQDZCQhmOQ7wirldl348KcQABMoqQOk", + "Z9pomm+u/3umg2ZcAftvVbOcS3xy1RYamUZpFBRQgHQzOBGsmdM7J7YYghI2QC9J/PLgQX/hDx74PReG", + "LeE6xAy6hn10PHiAepyXytjO4boDfag7bueJ6wMNV+7i86+QPk857PHkR56yky97gzfWLnemjPGE65Z/", + "awbQO5nbKWuPaWSatxeOO8mW0/UPGqwb9/1CbOqS27uwWsEVLzN1BVqLAg5ycj+xUPKbK17+1HTDeEjI", + "HY3mkOUYxTdxLHjt+lDgnxtHSOEOMDn9TwUIzqnXBXU68MRsPVXFZgOF4BbKHas05EDxbk5yNM1STxh5", + "wudrLlf4YNCqXnnnVhoHGX5tSDWjazkYIilU2a3MUMmdugC8m1oIeXTiFHD3pOtryOkBc82b+XyU65Sb", + "OdqDvsUgaSSbz0ZfvA6pV+2Ll5DTjduccBl05L0IP+3EE00piDon+wzxFW+LO0xuc/8YlX07dArK4cSR", + "x2/7cczp1z23y90dCD00ENNQaTB4RcVqKkNf1TKO0Q6ugjtjYTPU5FPXX0eO36vR96KSpZCQbZSEXTIt", + "iZDwA35MHie8Jkc6o8Ay1rf/BunA3wOrO88UarwtfnG3+ye0b7Ey3yp9VyZRGnCyeD/BAnnQ3O6nvKmd", + "lJdlwrToIzj7DMDMG2ddoRk3RuUCZbbzwsy9VzBZI324Zxf9L5u4lDs4e/1xeza0ODkA6oihrBhneSlQ", + "g6yksbrO7VvJUUcVLTXhxBUe4+Nay2ehSVpNmtBi+qHeSo4OfI3mKumwsYSEmuZbgKC8NPVqBcb23jpL", + "gLfStxKS1VJYnGvjjktG56UCjZ5UJ9Ryw3ds6WjCKvY7aMUWte1K/xigbKwoS2/Qc9MwtXwruWUlcGPZ", + "D0K+3uJwwegfjqwEe630ZYOF9O2+AglGmCztbPYdfUW/fr/8tffxR3d3+hycTtuMCTO3zE6SlP//s/98", + "+uYs+x+e/f4w++r/On33/smH+w8GPz7+8Pe//6/uT59/+Pv9//z31E4F2FPhsx7y8+f+ZXz+HJ8/kat+", + "H/aPpv/fCJkliSz25ujRFvsMU0V4ArrfVY7ZNbyVdisdIV3xUhSOt9yEHPo3zOAs0unoUU1nI3rKsLDW", + "Ix8Vt+AyLMFkeqzxxlLU0D8zHaiORkkfe47nZVlL2sogfVMcZvAvU8t5k4yA8pQ9ZRipvubBydP/+fiL", + "L2fzNsK8+T6bz/zXdwlKFsU2lUeggG3qrRgHSdwzrOI7AzbNPRD2pCsd+XbEw25gswBt1qL6+JzCWLFI", + "c7gQsuR1Tlt5LsnB350fNHHuvOVELT8+3FYDFFDZdSp/UUdQw1btbgL03E4qra5Azpk4gZO+zqdw70Xv", + "1FcCXwbHVK3UlNdQcw6I0AJVRFiPFzJJsZKin154g7/8zZ0/h/zAKbj6c6Y8eu99981rduoZprlHKS1o", + "6CgJQeIp7YMnOw5JjpvFMWVv5Vv5HJaofVDy6VtZcMtPF9yI3JzWBvTXvOQyh5OVYk9DPOZzbvlbOZC0", + "RhMrRkHTrKoXpcjZZfwgacmTkmUNR3j79g0vV+rt23cD34zh88FPleQvNEHmBGFV28yn+sk0XHOdsn2Z", + "JtULjky5vPbNSkK2qklBGlIJ+fHTPI9XlemnfBguv6pKt/yIDI1PaOC2jBmrmng0J6D4kF63vz8qfzFo", + "fh30KrUBw37b8OqNkPYdy97WDx9+jpF9bQ6E3/yV72hyV8Fk7cpoSoq+UgUXTs9K9FXPKr5Kmdjevn1j", + "gVe4+ygvb1DHUZYMu3WiDkOAAQ7VLqAJcR7dAILj6OBgXNwF9QppHdNLwE+4hd0A7FvtVxQ/f+PtOhCD", + "z2u7ztzZTq7KOBIPO9Nke1s5ISt4YxixwteqT4y3AJavIb/0GctgU9ndvNM9OPx4QTOwDmEolx1FGGI2", + "JTRQLIDVVcG9KM7lrp/WxlBEBQ76Ci5h91q1yZiOyWPTTatixg4qUmokXTpijY+tH6O/+d6rLASa+uwk", + "GLwZyOJpQxehz/hBJpH3Dg5xiig6aT/GEMF1AhFE/CMouMFC3Xi3Iv3U8oTMQVpxBRmUYiUWqTS8/zW0", + "hwVYHVX6zIPeC7kZ0DCxZO4pv6CL1T/vNZcrcNezu1KV4SVlVU06beB7aA1c2wVwu1fPL+OEFAE6fFJe", + "Y+Q1avjmbgmwdfstLGrsJFy7VwUqiqiN914+Gfc/I8ChuCE8oXv7UjgZfet61CUyDoZbucFu86z1rnkx", + "nSFc9H0DmLJUXbt9cVAon22TkrpE90tt+ApG3i6x9W5iPoyOxQ8HOSSRJGUQteyLGgNJIAkyNc7cmpNn", + "GNwXd4jxmdlzyAwzkYHY24wwibZH2KJEAbbxXKW957pjRaWswGOgpVkLaNmKggGMLkbi47jmJhxHzJca", + "uOwk6ewPTPuyLzXdeeRLGCVFbRLPhduwz0EH736foC5kpQup6OJH/4S0cu7theELqe1QEkXTAkpY0cKp", + "cSCUNmFSu0EOjp+WS+QtWcotMVJQRwKAnwPcy+UBY2QbYZNHSJFxBDY6PuDA7EcVn025OgZI6RM+8TA2", + "XhHR35AO7CNHfSeMqspdrmLE3pgHDuBTUbSSRc+jGodhQs6ZY3NXvHRszr/F20EGGdLwQdHLh+Zdb+6P", + "PTT2mKboyj9qTSQk3GQ1sTQbgE6L2nsgXqhtRhHKybfIYrtw9J6MXcB46dTBpFx09wxbqC26c+HVQr7y", + "B2AZhyOAEeletsIgvWK/MTmLgNk37X45N0WFBknGK1obchkT9KZMPSJbjpHLZ1F6uRsB0FNDtbUavFri", + "oPqgK54ML/P2Vpu3aVNDWFjq+I8doeQujeBvqB/rJoT7R5v4bzy5WDhRHyUT3lCzdJsMhdS5oqyDxyQo", + "7JNDB4g9WH3ZlwOTaO36enXxGmEtxUoc8x0aJYdoM1ACPoKzjmiaXaY8BdxbHvAevwjdImUd7h6Xu/uR", + "A6GGlTAWWqNR8Av6FOp4jumTlVqOr85WeunW90qp5vInszl27Czzo68APfCXQhubocUtuQTX6FuDSqRv", + "XdO0BNp1UaRiA6JIc1yc9hJ2WSHKOk2vft7vn7tpf2wuGlMv8BYTkhy0FlgcI+m4vGdq8m3fu+AXtOAX", + "/M7WO+00uKZuYu3IpTvHX+Rc9BjYPnaQIMAUcQx3bRSlexhkFHA+5I6RNBr5tJzsszYMDlMRxj7opRbC", + "3sdufhopuZYoDWA6QlCtVlCE9GbBHiajJHKlkquoilNV7cuZd8IodR1mntuTtM674cOYE34k7mdCFrBN", + "Qx+/ChDyNrIOE+7hJCuQlK4krRZKoiZ28ccWka7uI9tC+wEASSfo1z1jduudTLvUbCduQAm88G8SA2F9", + "+4/lcEM86uZj7tOdzKf7jxAOiDQlbFTYZJiGYIQB86oSxbZneKJRR5Vg/Cjt8oi0hazFD3YAA10n6CTB", + "dVJpe1drr2A/xTfvqXuVke+1dyx29M1zH4Bf1BotGB3P5mHe9uatNnHt3/9yYZXmK/BWqIxAutUQuJxj", + "0BBlRTfMCnInKcRyCbH1xdzEctABbqBjLyaQboLI0iaaWkj75ZMUGR2gnhbGwyhLU0yCFsZs8q+HVq4g", + "00eqpOZKiLbmBqaqZLj+97DLfuFl7R4ZQpvWPdebnbqX7xG7frX5HnY48kGvVwfYgV1BzdMrQBpMafqb", + "TyZKYH3PdFL84/Oys4VH7NRZepfuaGt8UYZx4m9vmU7Rgu5SbnMwWicJB8uU3bhI+ya40wNdxPdJ+dAm", + "iOKwDBLJ+/FUwoQSlsOrqMlFcYh2XwMvA/HicmYf5rPbeQKkbjM/4gFcv2wu0CSe0dOULMMdx54jUc6r", + "SqsrXmbeX2Ls8tfqyl/+2Dy4V3zkl0yasl9/c/bipQf/w3yWl8B11mgCRleF7aq/zKqojMP+q4SyfXtF", + "J2mKos1vMjLHPhbXmNm7p2waFEVp/Weio+h9LpZph/eDvM+7+tAS97j8QNV4/LQ2T3L46Tr58CsuymBs", + "DNCOOKfj4qZV1klyhXiAWzsLRT5f2Z2ym8HpTp+OlroO8CSc6ydMTZl+cUifuBJZkXf+4XcuPX2rdIf5", + "+8jEpPPQHydWOSGb8Djiqx3qV/aFqRNGgtdvq9/caXzwID5qDx7M2W+l/xABiL8v/O/4vnjwIGk9TKqx", + "HJNALZXkG7jfRFmMbsTHfYBLuJ52QZ9dbRrJUo2TYUOh5AUU0H3tsXethcdn4X8poAT308mUR3q86YTu", + "GJgpJ+hiLBKxcTLdUMlMw5Ts+1RjEKwjLWT2viQDGWOHR0jWGzRgZqYUedq1Qy6MY6+SnCldY4aNR7S1", + "bsRajPjmylpEY7lmU3Km9oCM5kgi0yTTtra4Wyh/vGsp/lUDE4V71SwFaLzXelddeBzgqAOBNK0X8wOT", + "naod/jZ6kD32pqAL2qcE2Wu/e97YlMJCU0V/jvQAj2ccMO493tuePjw1UzTbuuuCOe0dM6V0emB03lg3", + "MkeyFLow2VKr3yFtCEH7USIRRjB8ClTz/g4y5bnXZymNUbmt6N7Ofmi7p7+Nxzb+1m/hsOim6thNLtP0", + "qT5uI2/y6DXpdM0eyWOPsNjDoBsaMMJa8HhFzrBYBiV4H3FJ54myQHQizNKnMo7lPKXx21PpYR7Ev5b8", + "esFTNWLcW8jBFG1vx0/KKhY6hw0wTY4Dmp1FHtxNW0GZ5CrQrQ1imJX2hu8amnbyi6Z9wCBFxU+XObkp", + "lEYlhqnlNZdURdz1I37lexsgE7zrda005oE0aZeuAnKxSapj3759U+RD951CrAQVyK4NRBWY/UCMkk0i", + "Ffkq1k3mDo+a8yV7OI/KwPvdKMSVMGJRArZ4RC0W3OB12ZjDmy5ueSDt2mDzxxOar2tZaCjs2hBijWLN", + "2xOFvMYxcQH2GkCyh9ju0VfsM3TJNOIK7jsseiFo9vTRV+hQQ388TN2yvsD5PpZdIM8OztppOkafVBrD", + "MUk/atr7eqkBfofx22HPaaKuU84StvQXyuGztOGSryAdn7E5ABP1xd1Ec34PL5KsAWCsVjsmbHp+sNzx", + "p5GYb8f+CAyWq81G2I133DNq4+ipLa9Mk4bhqNa/rxcV4Aof0f+1Cu5/PV3XR37G8M1IzBZ6Kf+INtoY", + "rXPGKflnKVrP9FCvk52H3MJYQKupm0W4cXO5paMsiY7qS1ZpIS3qP2q7zP7mnsWa5479nYyBmy2+fJIo", + "RNWt1SKPA/yj412DAX2VRr0eIfsgs/i+7DOpZLZxHKW43+ZYiE7lqKNu2iVzzC90/9BTJV83SjZKbnWH", + "3HjEqW9FeHLPgLckxWY9R9Hj0Sv76JRZ6zR58Nrt0M+vXngpY6N0qmBAe9y9xKHBagFXGDGX3iQ35i33", + "QpeTduE20H9a/6cgckZiWTjLyYdAZNHcFyzvpPhffmgzn6NhlSIRezpApRPaTq+3+8jehsdp3fr2W3IY", + "w28jmJuMNhxliJUR73tyr2/6fAp/oT5ItOcdheOj35h2b3CU4x88QKAfPJh7Mfi3x93PxN4fPEgnIE6q", + "3NyvLRZu8yLGvqk9/FolFGChamHjUOTzIyQUkGOXlPvgmODCDzVn3QpxH1+KuJv4rrS3afoUvH37Br8E", + "POAffUR8YmaJG9hGKYwf9m6FzCTJFM33yM+ds6/Vdirh9O6gQDx/AhSNoGSieg5XMqgAmjTXH/QXiWjU", + "jbqAUrlHZlwUKNbn/3Xw7BY/34PtWpTFL21ut95FornM10kv4YXr+CvJ6J0rmFhlss7ImksJZXI4etv+", + "Gt7AiVf6P9XUeTZCTmzbr0BLy+0trgW8C2YAKkzo0Cts6SaIsdpNm9WkZShXqmA4T1vUomWOw1LOqRKa", + "ifhmHHZTW++3irHgPuHQUpTohpm2G2PLTHM7kkAL652H+kJuHCw/bkjNQKODZlxs8GI2fFOVgCfzCjRf", + "YVclodcdU6jhyFHFCmYq9wlbYsIKxWytJVPLZbQMkFZoKHdzVnFjaJCHblmwxblnTx89fJhUeyF2JqyU", + "sBiW+VO7lEen2IS++CJLVArgKGAPw/qhpahjNnZIOL6m5L9qMDbFU/EDRa6ildTd2lRPsql9esK+w8xH", + "jog7qe5RXRmSCHcTatZVqXgxx+TGr785e8FoVupDJeSpnuUKtXVd8k+aV6YnGA2ZnUYy50wfZ38qD7dq", + "Y7Om/GQqN6Fr0RbIFD2fG9Tjxdg5Yc9JhdoU8KdJGKbI1hsoomqX9IhH4nD/sZbna9RNdiSgcV45vRBr", + "YGet5SaKPmyqHyHDdnD7WqxUinXOlF2DvhYGMCIfrqCbDrHJDep14yE9Ynd5upaSKOXkCGG0qXV0LNoD", + "cCTJBqeCJGQ9xB+pmaJ6zMfWpb3AXulYjF6R257VPyTXCym22Q/euJBzqaTIsRRCSpLG1G3TzJQTqkak", + "7Ytm5k9o4nAlS+s2scAei6PFdgMj9Igbmvyjr25TiTroTwtbX3JtBdZ4zgbFPFS69gYxIQ34alaOiGI+", + "qXTCqSkZCNE4UBxJRpiVaUTD+a379qPXf2NSjEshUdPl0ebfZ2SyKo1Ay7RkwrKVAuPX043mMW9cnxPM", + "0ljA9t3JC7US+YVY4RjkRueWTT6jw6HOggep99h0bZ+5tj53fvNzxx2MJj2rKj/peB30pCBpt3IUwSm/", + "peBIEiG3GT8ebQ+57XX9xvvUERpcodcaVHgPDwijqaXdHeUb97YkisIWjCIqkwl0hUyA8ULIYEJNXxB5", + "8krAjcHzOtLP5JpbejtM4mmvgZcjARAYoUw2+NsO1a8c4FCCawxzjG9jWwZ8hHE0DVqJn8sdC4fCUXck", + "TDzjZeM6nSjqjVKVF6IKDC7qlflOMQ7HuLMQMtlB18HwvaY7VuM49iYay1G4qIsV2IwXRSq11df4leHX", + "ECQGW8jrpghVEx3YzVE+pDY/Ua6kqTd75goNbjldVDc/QQ1x7f6ww5hpZ7HDf1MVmMZ3xjtNHx2VGzyk", + "i+MS8w+jjFNSr6PpzIhVNh0TeKfcHh3t1Dcj9Lb/nVJ6CNf9U0Tj9rhcvEcp/vaNuzjixL0D/3S6Wpq8", + "uugLrvB7SHjUZITsciW8ygZ1xtDrATcvsWU94EPDJOBXvByJhI9tJXS/kv1gLB4+H03fwK1Pz2U528uC", + "RlMeka9wz/oyNCGO+QeTe/DdWS38WvcidNx2933HUkc+Yi2zGLXQ3cyI1m7wsVa076/GUiSEOh34Pa4H", + "4r145j4NPFwJVQfvq+ADHZ6E9KtPwdOp+zGy/mRkwae2WozaWF77+rW0TP8m//4XssIykFbv/gQWl8Gm", + "94vKJKRdUk+1TVhT+nBSKcTOrTilhk2qXIqXDYOujFhLh5YG5WcGZPV8ijgwwMeH+ey8OOrCTJXcmdEo", + "qWP3QqzWFjP2/wN4AfrlgYoEbRUCPGKVMqKtQFq6wXwK2DUOdzI12MARsIgrKgzHCk6oV5BbLDvbOtdp", + "gGPqK7jJgtHn/1QmGH9ONzEZviDBvioEw1qzB+74QeKkKPkX1ek8mZ5z/6xxoaYIsGtu2nQtvZjpyZGb", + "yyXkmBV5b6Kq/1qDjJIgzYNeBmFZRnmrRBPHhHm9j9c6tgDtyyO1F56ovs6twRmLY7+E3T3DOtSQLBza", + "BPHdJHEwYoBMYCGH9Jgi2XuNCdNQBmIhuAT7VMxtcYzRnM9R2rUbzhVI0l0cbSq2PVOmi55Pmst1PSrt", + "I4bkjOWyGtZMHn9/PMcS1cY7yPEm8XD8Smfnw8I51z5xMaYVa2wnIYUxmPBbyCFIs5Ti0tcPQKyQpeqa", + "6yK0uJOkUHQ3iTTQy2Zm0QZwDJ0cEqUYMBYqL5UTI7KxgLJuzETjcHjPkGdom8AH4VqC1lA0JpFSGcis", + "CgEf++DYhwpyf70REsxo+SMCbjT19as2tzeWgeOY6pp7r9d4gUzDhjvodJSBe3zOfch+Rt9DEH4oA3ZQ", + "w9TQ6+F6tCF0R5gBEmOqXzJ/Wx4O7r+JsklICToLlqd+Om7ZzciGeTeLOqcLOj4YjUJucu6cPawkqafJ", + "h6vsvRGiIPlL2J3SIygU8g07GANNkhOBHiUc7W3ynarfTAru1Z2A92nzyFVKldmIseN8mEO8T/GXIr8E", + "zAHYuLiP1Ghnn6GOvbFmX693IWd2VYGE4v4JY2eSgoqCYbtbXrA3ubxn982/xVmLmtL6e6XayVuZjs7A", + "hPv6ltwsDLOfhxlwrO6WU9EgBzJUb+WYy801JufvVvE8mfoqH5qa+1XkW6IiKFIyyQVZrJ7hQU8pjjAF", + "QpSrAw2ZnHlLFzOlSvny3iRNgxsqjal4MgTIgpySLaCBwg+eRECyLnriFFLqO5/0Ti2ZhtaIfNPsf8MS", + "7qkXfX/mZpYuv1sqDZ1i7K43ZfpsAl8wjSb+ZyGs5np3kxx9gxLyA+3JKJYPumM1nljtQlpvrCEOy1Jd", + "Z8issqbORepp69qZ7mUciq61/dypXkDk18WNF9R2bM0LliutIY97pOM9CaqN0pCVCt28UhbopXVy9waD", + "vCQr1YqpKlcFUL2YNAWNzVVLyVFsgsirJokCoh2MFqY+ER1PnNLdqWRHylDUWh1ROz8HilxvszrRojOy", + "ZY54LIPxWZw8hqjxEN49tf/TvHkptkg3oFNHfsmsrmHOfIt+jWx/8LkGthHGECgNLV2LssTAcbGNLK+N", + "40IatSNi7zm6VV4J9L3pJhEgabhyd16TWSHmARdx2iNm11rVq3WUYLqBMzx5de0fxPEoP5sa3aMwgsxN", + "8YRtlLH+pUkjtUtuXc4+y5W0WpVlVylFIvrKa9p/4NuzPLcvlLpc8PzyPr5rpbLNSot5iK/uOwe2M+le", + "arHuBZxROfPDqXqpHbrKeaKdzCB7LO7owu4RmO8Oc9DDOvez4cL66+oy0/Qz5kwybtVG5Okz9dfythv1", + "kUuxqGTOMqqtSFkmsBke9viyapwrkEUO0QySJ4vDnTHPCLyRGdmN+y9K4P1x2RI8oxm5KIfMxUtRWT4q", + "6/UAQEgp9NnWmgoyxpJYw1XUilIloIm8D+jEWwU9kW4HmxvhzoGycCugBt6PDYCfkfJhTrnlyJNyobbh", + "+/02+dyNgP+wn8o7zGPMxeuiJS1NTl4hUc0IR0inuN7rD/Uaw94XU72imuK5E2/4CIBxP6kODJO8pY4F", + "Y8lFCUWWqr143uio5tFL24dm9UuiC+M5ec7rUPrQjV1r8IlTSMTXXftXxR0pqab5UJMsC9gCxXX8DlpR", + "TcN5ZH+Bkkoe9pQBqspKuIKO+5jP5lKjqCmuIPQ1TWdWAFRojezryFJ+UfFd3lOc+LVnkWfNFOwmNSmE", + "WNopdkBNklTqbGVGx8RMPUoOoitR1LyDP3OsyNFVA7qjnEDV4I2QhXfk1Gl+phFehQHOQv+UKBMw8W4a", + "HzqaBaVRt48BHfSTrM3YqZdpN8k4VVFjYMHZisYQSyTe8g1T8Ws5rpAcknz73Jq4T0LJCLHfbCFHqca/", + "d6DwL54RI4XPeoLULgEKehW4Lglt+xokkyoqMXnNTfNUaXMohh9oYmwkpH9N38Co3Hoz3n5nGQ7GTC+Z", + "2uhDQjd0enP1/Cc5iXsP4uh4KRox4MP/9ui/AnX7Zwc2wFLe0u2nk/2xSKO/xTwXn7NFHQYqS3VNNSPj", + "d+hzCHZQor5gAvJiuWiu5eC1OffpPfuqDhH5q2/4jimN/7hX579qXorlDvkMgR+6MbPmjoS84ZU8ArwX", + "qJt4v3g1D4AFbYsKU9G6xdQxo+F2bpQIaHeRh+I+im34JcTbgM4OxD9z6xinqReouXBXdm87h1jwiw8p", + "Wja8iF/6mCiyW0Y9pA52vf/vNhYunirkd6tKnocKob5EUZfPYBXgQFx2DZv9wZJDvhZIoKks3BKtDtH1", + "xQ1UpkeyrlQEwlj5lQ7Yg4qrg8ozt1rGRM1vr8bGnjDTSUu5612Y6nUzADqu03gI/Lhs5cfBfzKH69gy", + "poD/Z8H7SKHaGF6qSfsRsNzJwJGAlbTVC7XNNCzNIQcTUle757xuc3cEFauQuQZuyOPm/Cf/8GxTlArp", + "HsLkE9rYNJtRClgK2TJLIavaJt4xmKlU7iKExUp/ROuICW1MSnDC5BUvf7oCrUUxtnHudFBJx7hERDB0", + "+L4JFUZzpw4HEKZ9w2F8ZqtGj5u5C5yKUJG7prFcFlwXcXMhWQ7a3fvsmu/MzS1KjXHgkE2JR9JMN2tA", + "ZF1C0iZAyp03Ct/S3tMAyO/Q8DPBYIN+wQljDal2rBqxzwxh+EsYbDZ8m5VqhVGEIwfC56ZFCx89AZVE", + "NTjJZ9PWHeYx4nfYPw2m5feMyCqcdcoU+8/9T7iV+Iz8WQq79+STjrIf1kl+t3QwA1LlqnX+J2IZnsdU", + "JK5PvhJH4wZhM4SqBNqDaBNhxD7U1YuP7CK6Qfgw7lgJPr3cWdfTIhXvS5qBDDUGZo97P5jWlZ3n3j1r", + "qEobqBoIKXMfLX2kpo308+FeGgGPatP7s96dtnGZceMcUyNuf3x0Vqkqy6f4fFLljsKbCTykXRhH6CMy", + "Aoysu3GPMU0tm07eo05Rm2PL5I0W1Tlk7aryfY/+MTXRCEfvmiDUEnkZVW5H7RZG8jTKlHk/xqyrBmuY", + "BONMQ15rVBNf893hsmMjGaMv/nH2xaPHvz7+4kvmGrBCrMC0Wcd7Zbtav0Ah+3qfj+sJOFieTW9CyD5A", + "iAv2xxBU1WyKP2vEbU2bUnRQtOwY/XLiAkgcx0S5qBvtFY7Tuvb/ubYrtcg737EUCv74PdOqLNNVHxq5", + "KmFASe1WZEJxL5AKtBHGOkbYtYAK23pEmzWqBzH37xVlk1Eyh6A/9lQg7IjLVWohYw61yM8wtttbjRhs", + "q9LzKrL07FuXf6eRhg6FRvSKWQCrVOVFe7FkKYgwgkhHkbVe8Yka8chHtmG25C2bIkTveZ4mvbhg9n5u", + "3y3matOc3m1iQrwIh/IGpDlmnxjPW3ATTtKq9v80/CORiOHOuEaz3D+CVyTfBzcryj8JtGFQfoI8EICR", + "aNtOnGQUKBYlItZkJUB7QjAg98WPH1rD8sGwEIQkdDgAXhw+27ZrIhk8OJ84o+8PDVKipbwbo4TO8g9F", + "5AbW21wk0RZ5pYm1YIgtqaFYGIVbm2dNFPPIq2QQ7KyVssy9TMsyESRNehw8UzHhuCeBvuLlx+ca3wpt", + "7BniA4pX46FRcaRsjGRCpblZnr4XfNLcUVTs3U0tX2Jg9n+B26PkPeeH8kb4wW2Gyh2sWL8KtwLFerNr", + "HJOcrB59yRa+2EalIRemb9y/DsJJExgKWiy9Qyts7YFI1EPr/EXZW5DxMnjisB8j81Zjs/cQtkf0EzOV", + "kZObpPIU9Q3IIoG/FI+Ki/MeuC5uWZjhZmlfogRuR6Z9GZYdnro8Sm3iLp3awHCdk2/rDm4TF3W7tqk5", + "iybXd3j79o1dTEk1lK7F4LpjrqM7KcpwVEmGPyDLEeHIj+HnTVHML2N5bym360hu7t5+1KI86LDSybT+", + "YT5bgQQjDOYS/9XXjvm4d2mAgDIvDI8qwXqbdDGEmMRaO5NHU0U51CekT/fdEjmvMaoxr7WwO6wbHBRo", + "4tdkPqbvmtwePjdMY0vzd59Vl9DUbm8zgdQm3K7fKV7ifUQmPuluIVWesG8ow7c/KH+/t/gP+PxvT4qH", + "nz/6j8XfHn7xMIcnX3z18CH/6gl/9NXnj+Dx37548hAeLb/8avG4ePzk8eLJ4ydffvFV/vmTR4snX371", + "H/ccH3IgE6Ahtf/T2f+XnZUrlZ29PM9eO2BbnPBKfA9ub/CtvFRY19IhNceTCBsuytnT8NP/E07YSa42", + "7fDh15mvzzRbW1uZp6en19fXJ3GX0xWG/mdW1fn6NMyD1QY78srL88ZHn/xwcEdb7TFuqieFM/z26puL", + "1+zs5flJSzCzp7OHJw9PHvnS1pJXYvZ09jn+hKdnjft+ivk1T41PnX/axGp9mA++VRUl1nefPI36v9bA", + "S0yw4/7YgNUiD5808GLn/2+u+WoF+gSjN+inq8enQRo5fe8zJ3zY9+009gw5fd9JMFEc6Nl4PiRtki+U", + "ukSTeJCP7pmeH8dJXJn7vHDop5bofGHOW0YYyiujzXn29E1K9+J9KKt6UYqc0fWN9Os2JyKvJm1Iyz5Q", + "0TZrS/u3zNAxuIfZV+/ef/G3Dykhqw/ID94g2FpAvEsuRnlhgMJJgOtfNehdCxha62cxGENzYTp72tay", + "yhc+8LOdsJ+9pwN+JZ7SeIT6oLAm8VzoNAKYGyIFV4OFd1jjD13/kBweP3wYTr6XqyOyOvXUGqO7a3sY", + "+AUdk86gU/g6IRS5xWSIjyHF/mwo5ZLDppCcvOrR3XbDL8nqgg51TPu4WY9R76OLSG7iR/y2BOb+B5Y0", + "mhCUTTMNhZIPQ245cgKDK22sGCsFqf28e1OqdvWH+ezJkdSwV0HVyR+aAP8HXjqQoQhpYwiCRx8PgnNJ", + "Hp/u2qHr8cN89sXHxMG5dMyLlwxbRuV3ExQvL6W6lqGlk2XqzYbrHUoqdsoe+yxHaEsM7Yju6WLl7gy/", + "mRFbxkIkFWjhHoy8nL37cOh6OX0fyq7vv4w6Jbe9v3LUYeIlt6/Z6QJLrU1tCiZqPL4UVIGZ0/d4Qkd/", + "P/Wa+PRHVKaRlHYaknyNtKR0LumPHRS+t1u3kP3DuTbReDm3+bquTt/jf1DgilZE2aFP7VaeovPR6fsO", + "IvznASK6v7fd4xZXG1VAAE4tl1Srft/n0/f0bzRRhzBboaYroHwTNXq2hvxylr77eqnzo16M5FG+KKEg", + "5vRkQgepbNzpRgf6FYofhv30PRNLBv0phAkzHHFuKbHoKVZ03bW4DD/vZJ78cbjNnaSKIz+fhudQSrTt", + "tnzf+bN75My6toW6jmZBRSJpwYeQuY+16f99es2FzZZK+1x+WAJ+2NkCL0994Y7er22u7MEXTAAe/RhH", + "qSV/PeUe1bNKmQTZvuLXkfXvDBuThADGfq3wRTF2O22zhZBIQfEN1eoP6ONQNh7cS06uQUe5YIIZ5uHB", + "ZCBa8SLnBkuP+xo4A2n9Q/LYfWxp42tesJBDJWOt7HHmX6mdpf05JJEku3kOV1A6imFKs0O85xPLMl88", + "/PzjTX8B+krkwF7DplKaa1Hu2M+yCcC5MSv+Fslb8/wSZfyG5Mk7U/PrbkyPTmeV6BaJCklGgNktW3NZ", + "lD4OX9VY/c7RJhpdVeT2466wUCStUhoBoOyTUJAjhDlhF42bCDpd1OGZVBDZoFUEcyrTJBxdSMiMOOEq", + "mc+2meMHK5CZ50jZQhU7X15opvm13VJs/YDtkZw5whMHUmDqqxd0RhoFv/HwudVTxno/VEg0Gr8379yD", + "GMvWe11Fq8Z6enqKgURrZezpzL3nuyqu+OO7BnOh2uqs0uIKq0Ig0pQW7plaZl4P1BZWmz0+eTj78L8D", + "AAD//6UnopQICgEA", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go index 4e233a8b20..b638825ec4 100644 --- a/daemon/algod/api/server/v2/handlers.go +++ b/daemon/algod/api/server/v2/handlers.go @@ -1102,6 +1102,9 @@ func (v2 *Handlers) RawTransactionAsync(ctx echo.Context) error { if !v2.Node.Config().EnableExperimentalAPI { return ctx.String(http.StatusNotFound, "/transactions/async was not enabled in the configuration file by setting the EnableExperimentalAPI to true") } + if !v2.Node.Config().EnableDeveloperAPI { + return ctx.String(http.StatusNotFound, "/transactions/async was not enabled in the configuration file by setting the EnableDeveloperAPI to true") + } txgroup, err := decodeTxGroup(ctx.Request().Body, config.MaxTxGroupSize) if err != nil { return badRequest(ctx, err, err.Error(), v2.Log) diff --git a/daemon/algod/api/server/v2/test/handlers_test.go b/daemon/algod/api/server/v2/test/handlers_test.go index c0e5c77990..e830c4892c 100644 --- a/daemon/algod/api/server/v2/test/handlers_test.go +++ b/daemon/algod/api/server/v2/test/handlers_test.go @@ -853,12 +853,29 @@ func prepareTransactionTest(t *testing.T, txnToUse int, txnPrep func(transaction return } -func postTransactionTest(t *testing.T, txnToUse int, expectedCode int, method string, enableExperimental bool) { +type postTransactionOpt func(cfg *config.Local) + +func enableExperimentalAPI() postTransactionOpt { + return func(cfg *config.Local) { + cfg.EnableExperimentalAPI = true + } +} + +func enableDeveloperAPI() postTransactionOpt { + return func(cfg *config.Local) { + cfg.EnableDeveloperAPI = true + } +} + +func postTransactionTest(t *testing.T, txnToUse int, expectedCode int, method string, opts ...postTransactionOpt) { + cfg := config.GetDefaultLocal() + for _, opt := range opts { + opt(&cfg) + } + txnPrep := func(stxn transactions.SignedTxn) []byte { return protocol.Encode(&stxn) } - cfg := config.GetDefaultLocal() - cfg.EnableExperimentalAPI = enableExperimental handler, c, rec, releasefunc := prepareTransactionTest(t, txnToUse, txnPrep, cfg) defer releasefunc() results := reflect.ValueOf(&handler).MethodByName(method).Call([]reflect.Value{reflect.ValueOf(c)}) @@ -873,18 +890,20 @@ func TestPostTransaction(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - postTransactionTest(t, -1, 400, "RawTransaction", false) - postTransactionTest(t, 0, 200, "RawTransaction", false) + postTransactionTest(t, -1, 400, "RawTransaction") + postTransactionTest(t, 0, 200, "RawTransaction") } func TestPostTransactionAsync(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - postTransactionTest(t, -1, 404, "RawTransactionAsync", false) - postTransactionTest(t, 0, 404, "RawTransactionAsync", false) - postTransactionTest(t, -1, 400, "RawTransactionAsync", true) - postTransactionTest(t, 0, 200, "RawTransactionAsync", true) + postTransactionTest(t, -1, 404, "RawTransactionAsync") + postTransactionTest(t, 0, 404, "RawTransactionAsync") + postTransactionTest(t, -1, 404, "RawTransactionAsync", enableDeveloperAPI()) + postTransactionTest(t, -1, 404, "RawTransactionAsync", enableExperimentalAPI()) + postTransactionTest(t, -1, 400, "RawTransactionAsync", enableExperimentalAPI(), enableDeveloperAPI()) + postTransactionTest(t, 0, 200, "RawTransactionAsync", enableExperimentalAPI(), enableDeveloperAPI()) } func simulateTransactionTest(t *testing.T, txnToUse int, format string, expectedCode int) { From 1fa0ef7e0c0169d857dedb3747362a08f2355b63 Mon Sep 17 00:00:00 2001 From: Gary Malouf <982483+gmalouf@users.noreply.github.com> Date: Thu, 18 Jul 2024 18:00:01 -0400 Subject: [PATCH 33/82] P2P: Introduce profiles for hybridRelay, hybridArchival, and hybridClient. (#6062) Co-authored-by: chris erway Co-authored-by: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> --- cmd/algocfg/profileCommand.go | 70 ++++++++++++++++++++++++++---- cmd/algocfg/profileCommand_test.go | 62 ++++++++++++++++++++++++++ config/config.go | 8 ++++ network/p2p/p2p.go | 8 +++- network/p2pNetwork.go | 8 +++- network/p2pNetwork_test.go | 16 ++++++- 6 files changed, 159 insertions(+), 13 deletions(-) diff --git a/cmd/algocfg/profileCommand.go b/cmd/algocfg/profileCommand.go index 4cd9bf5c9e..076cb65a64 100644 --- a/cmd/algocfg/profileCommand.go +++ b/cmd/algocfg/profileCommand.go @@ -66,8 +66,8 @@ var ( }, } - relay = configUpdater{ - description: "Relay consensus messages across the network and support catchup.", + wsRelay = configUpdater{ + description: "Relay consensus messages across the ws network and support recent catchup.", updateFunc: func(cfg config.Local) config.Local { cfg.MaxBlockHistoryLookback = 22000 // Enough to support 2 catchpoints with some wiggle room for nodes to catch up from the older one cfg.CatchpointFileHistoryLength = 3 @@ -80,7 +80,7 @@ var ( } archival = configUpdater{ - description: "Store the full chain history and support catchup.", + description: "Store the full chain history and support full catchup.", updateFunc: func(cfg config.Local) config.Local { cfg.Archival = true cfg.EnableLedgerService = true @@ -91,13 +91,67 @@ var ( }, } + hybridRelay = configUpdater{ + description: "Relay consensus messages across both ws and p2p networks, also support recent catchup.", + updateFunc: func(cfg config.Local) config.Local { + // WS relay config defaults + cfg.MaxBlockHistoryLookback = 22000 // Enough to support 2 catchpoints with some wiggle room for nodes to catch up from the older one + cfg.CatchpointFileHistoryLength = 3 + cfg.CatchpointTracking = 2 + cfg.EnableLedgerService = true + cfg.EnableBlockService = true + cfg.NetAddress = ":4160" + // This should be set to the public address of the node if public access is desired + cfg.PublicAddress = config.PlaceholderPublicAddress + + // P2P config defaults + cfg.EnableP2PHybridMode = true + cfg.P2PNetAddress = ":4190" + cfg.EnableDHTProviders = true + return cfg + }, + } + + hybridArchival = configUpdater{ + description: "Store the full chain history, support full catchup, P2P enabled, discoverable via DHT.", + updateFunc: func(cfg config.Local) config.Local { + cfg.Archival = true + cfg.EnableLedgerService = true + cfg.EnableBlockService = true + cfg.NetAddress = ":4160" + cfg.EnableGossipService = false + // This should be set to the public address of the node + cfg.PublicAddress = config.PlaceholderPublicAddress + + // P2P config defaults + cfg.EnableP2PHybridMode = true + cfg.P2PNetAddress = ":4190" + cfg.EnableDHTProviders = true + return cfg + }, + } + + hybridClient = configUpdater{ + description: "Participate in consensus or simply ensure chain health by validating blocks and supporting P2P traffic propagation.", + updateFunc: func(cfg config.Local) config.Local { + + // P2P config defaults + cfg.EnableP2PHybridMode = true + cfg.EnableDHTProviders = true + return cfg + }, + } + // profileNames are the supported pre-configurations of config values profileNames = map[string]configUpdater{ - "participation": participation, - "conduit": conduit, - "relay": relay, - "archival": archival, - "development": development, + "participation": participation, + "conduit": conduit, + "wsRelay": wsRelay, + "archival": archival, + "development": development, + "hybridRelay": hybridRelay, + "hybridArchival": hybridArchival, + "hybridClient": hybridClient, } forceUpdate bool diff --git a/cmd/algocfg/profileCommand_test.go b/cmd/algocfg/profileCommand_test.go index d8bf715534..8d7d95a26e 100644 --- a/cmd/algocfg/profileCommand_test.go +++ b/cmd/algocfg/profileCommand_test.go @@ -17,6 +17,7 @@ package main import ( + "github.com/algorand/go-algorand/config" "testing" "github.com/stretchr/testify/require" @@ -62,4 +63,65 @@ func Test_getConfigForArg(t *testing.T) { require.Equal(t, ":4160", cfg.NetAddress) require.False(t, cfg.EnableGossipService) }) + + t.Run("valid config test hybrid relay", func(t *testing.T) { + t.Parallel() + cfg, err := getConfigForArg("hybridRelay") + require.NoError(t, err) + + require.False(t, cfg.Archival) + require.Equal(t, uint64(22000), cfg.MaxBlockHistoryLookback) + require.Equal(t, 3, cfg.CatchpointFileHistoryLength) + require.Equal(t, int64(2), cfg.CatchpointTracking) + require.True(t, cfg.EnableLedgerService) + require.True(t, cfg.EnableBlockService) + require.Equal(t, ":4160", cfg.NetAddress) + require.True(t, cfg.EnableGossipService) + require.Equal(t, config.PlaceholderPublicAddress, cfg.PublicAddress) + + require.True(t, cfg.EnableP2PHybridMode) + require.Equal(t, ":4190", cfg.P2PNetAddress) + require.True(t, cfg.EnableDHTProviders) + }) + + t.Run("valid config test hybrid archival", func(t *testing.T) { + t.Parallel() + cfg, err := getConfigForArg("hybridArchival") + require.NoError(t, err) + + require.True(t, cfg.Archival) + require.Equal(t, uint64(0), cfg.MaxBlockHistoryLookback) + require.Equal(t, 365, cfg.CatchpointFileHistoryLength) + require.Equal(t, int64(0), cfg.CatchpointTracking) + require.True(t, cfg.EnableLedgerService) + require.True(t, cfg.EnableBlockService) + require.Equal(t, ":4160", cfg.NetAddress) + require.False(t, cfg.EnableGossipService) + require.Equal(t, config.PlaceholderPublicAddress, cfg.PublicAddress) + + require.True(t, cfg.EnableP2PHybridMode) + require.Equal(t, ":4190", cfg.P2PNetAddress) + require.True(t, cfg.EnableDHTProviders) + }) + + t.Run("valid config test hybrid client", func(t *testing.T) { + t.Parallel() + cfg, err := getConfigForArg("hybridClient") + require.NoError(t, err) + + require.False(t, cfg.Archival) + require.Equal(t, uint64(0), cfg.MaxBlockHistoryLookback) + require.Equal(t, 365, cfg.CatchpointFileHistoryLength) + require.Equal(t, int64(0), cfg.CatchpointTracking) + require.False(t, cfg.EnableLedgerService) + require.False(t, cfg.EnableBlockService) + require.Empty(t, cfg.NetAddress) + // True because it is the default value, net address is blank so has no effect in practice + require.True(t, cfg.EnableGossipService) + require.Equal(t, "", cfg.PublicAddress) + + require.True(t, cfg.EnableP2PHybridMode) + require.Equal(t, "", cfg.P2PNetAddress) + require.True(t, cfg.EnableDHTProviders) + }) } diff --git a/config/config.go b/config/config.go index 2d5d0bdbfe..87440cc58a 100644 --- a/config/config.go +++ b/config/config.go @@ -104,6 +104,9 @@ const CatchpointTrackingModeTracked = 1 // as long as CatchpointInterval > 0 const CatchpointTrackingModeStored = 2 +// PlaceholderPublicAddress is a placeholder for the public address generated in certain profiles +const PlaceholderPublicAddress = "PLEASE_SET_ME" + // LoadConfigFromDisk returns a Local config structure based on merging the defaults // with settings loaded from the config file from the custom dir. If the custom file // cannot be loaded, the default config is returned (with the error from loading the @@ -145,6 +148,11 @@ func mergeConfigFromFile(configpath string, source Local) (Local, error) { err = loadConfig(f, &source) + // If the PublicAddress in config file has the PlaceholderPublicAddress, treat it as if it were empty + if source.PublicAddress == PlaceholderPublicAddress { + source.PublicAddress = "" + } + if source.NetAddress != "" { source.EnableLedgerService = true source.EnableBlockService = true diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index 2877c6b2f3..f67f79f427 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -112,7 +112,8 @@ func MakeHost(cfg config.Local, datadir string, pstore *pstore.PeerStore) (host. listenAddr = parsedListenAddr } } else { - listenAddr = "/ip4/0.0.0.0/tcp/0" + // don't listen if NetAddress is not set. + listenAddr = "" } var disableMetrics = func(cfg *libp2p.Config) error { return nil } @@ -163,6 +164,11 @@ func MakeService(ctx context.Context, log logging.Logger, cfg config.Local, h ho // Start starts the P2P service func (s *serviceImpl) Start() error { + if s.listenAddr == "" { + // don't listen if no listen address configured + return nil + } + listenAddr, err := multiaddr.NewMultiaddr(s.listenAddr) if err != nil { s.log.Errorf("failed to create multiaddress: %s", err) diff --git a/network/p2pNetwork.go b/network/p2pNetwork.go index 1ad49bd045..4d6efdac83 100644 --- a/network/p2pNetwork.go +++ b/network/p2pNetwork.go @@ -346,8 +346,11 @@ func (n *P2PNetwork) Start() error { go n.handler.messageHandlerThread(&n.wg, n.wsPeersConnectivityCheckTicker.C, n, "network", "P2PNetwork") } - n.wg.Add(1) - go n.httpdThread() + // start the HTTP server if configured to listen + if n.config.NetAddress != "" { + n.wg.Add(1) + go n.httpdThread() + } n.wg.Add(1) go n.broadcaster.broadcastThread(&n.wg, n, "network", "P2PNetwork") @@ -471,6 +474,7 @@ func (n *P2PNetwork) meshThread() { func (n *P2PNetwork) httpdThread() { defer n.wg.Done() + err := n.httpServer.Serve() if err != nil { n.log.Errorf("Error serving libp2phttp: %v", err) diff --git a/network/p2pNetwork_test.go b/network/p2pNetwork_test.go index 5b3470689f..ff1f40a63c 100644 --- a/network/p2pNetwork_test.go +++ b/network/p2pNetwork_test.go @@ -67,6 +67,7 @@ func TestP2PSubmitTX(t *testing.T) { cfg := config.GetDefaultLocal() cfg.ForceFetchTransactions = true + cfg.NetAddress = "127.0.0.1:0" log := logging.TestingLog(t) netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}) require.NoError(t, err) @@ -159,6 +160,7 @@ func TestP2PSubmitTXNoGossip(t *testing.T) { cfg := config.GetDefaultLocal() cfg.ForceFetchTransactions = true + cfg.NetAddress = "127.0.0.1:0" log := logging.TestingLog(t) netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}) require.NoError(t, err) @@ -189,6 +191,8 @@ func TestP2PSubmitTXNoGossip(t *testing.T) { // run netC in NPN mode (no relay => no gossip sup => no TX receiving) cfg.ForceFetchTransactions = false + // Have to unset NetAddress to get IsGossipServer to return false + cfg.NetAddress = "" netC, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) require.NoError(t, err) netC.Start() @@ -253,6 +257,7 @@ func TestP2PSubmitWS(t *testing.T) { partitiontest.PartitionTest(t) cfg := config.GetDefaultLocal() + cfg.NetAddress = "127.0.0.1:0" log := logging.TestingLog(t) netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}) require.NoError(t, err) @@ -584,6 +589,7 @@ func TestP2PNetworkDHTCapabilities(t *testing.T) { partitiontest.PartitionTest(t) cfg := config.GetDefaultLocal() + cfg.NetAddress = "127.0.0.1:0" cfg.EnableDHTProviders = true log := logging.TestingLog(t) @@ -744,6 +750,7 @@ func TestP2PHTTPHandler(t *testing.T) { cfg := config.GetDefaultLocal() cfg.EnableDHTProviders = true cfg.GossipFanout = 1 + cfg.NetAddress = "127.0.0.1:0" log := logging.TestingLog(t) netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}) @@ -812,6 +819,7 @@ func TestP2PRelay(t *testing.T) { cfg.DNSBootstrapID = "" // disable DNS lookups since the test uses phonebook addresses cfg.ForceFetchTransactions = true cfg.BaseLoggerDebugLevel = 5 + cfg.NetAddress = "127.0.0.1:0" log := logging.TestingLog(t) log.Debugln("Starting netA") netA, err := NewP2PNetwork(log.With("net", "netA"), cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}) @@ -829,6 +837,8 @@ func TestP2PRelay(t *testing.T) { multiAddrStr := addrsA[0].String() phoneBookAddresses := []string{multiAddrStr} + // Explicitly unset NetAddress for netB + cfg.NetAddress = "" log.Debugf("Starting netB with phonebook addresses %v", phoneBookAddresses) netB, err := NewP2PNetwork(log.With("net", "netB"), cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) require.NoError(t, err) @@ -880,8 +890,8 @@ func TestP2PRelay(t *testing.T) { counterHandler, counterDone := makeCounterHandler(1, &counter, nil) netA.RegisterProcessors(counterHandler) - // send 5 messages from both netB to netA - // since there is no node with listening address set => no messages should be received + // send 5 messages from netB to netA + // since relaying is disabled on net B => no messages should be received by net A for i := 0; i < 5; i++ { err := netB.Relay(context.Background(), protocol.TxnTag, []byte{1, 2, 3, byte(i)}, true, nil) require.NoError(t, err) @@ -1031,6 +1041,7 @@ func TestP2PWantTXGossip(t *testing.T) { net.wantTXGossip.Store(true) net.nodeInfo = &nopeNodeInfo{} net.config.ForceFetchTransactions = false + net.config.NetAddress = "" net.relayMessages = false net.OnNetworkAdvance() require.Eventually(t, func() bool { net.wg.Wait(); return true }, 1*time.Second, 50*time.Millisecond) @@ -1048,6 +1059,7 @@ func TestP2PWantTXGossip(t *testing.T) { net.wantTXGossip.Store(false) net.nodeInfo = &nopeNodeInfo{} net.config.ForceFetchTransactions = false + net.config.NetAddress = "" net.relayMessages = true net.OnNetworkAdvance() require.Eventually(t, func() bool { return mockService.count.Load() == 3 }, 1*time.Second, 50*time.Millisecond) From 9c9367018eaea37653e70f368af0aafff18527e7 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Fri, 19 Jul 2024 12:06:23 -0400 Subject: [PATCH 34/82] p2p: hybrid node net identity for connection deduplication (#6035) --- config/config.go | 20 +- config/config_test.go | 185 +++++++++++++++--- config/localTemplate.go | 42 +++- config/local_defaults.go | 1 + daemon/algod/server.go | 24 ++- installer/config.json.example | 1 + netdeploy/remote/deployedNetwork.go | 10 + netdeploy/remote/nodeConfig.go | 2 + netdeploy/remote/nodecfg/nodeConfigurator.go | 22 ++- netdeploy/remote/nodecfg/nodeDir.go | 48 ++++- network/hybridNetwork.go | 10 +- network/hybridNetwork_test.go | 183 +++++++++++++++++ network/netidentity.go | 89 +++++++-- network/netidentity_test.go | 103 ++++++++-- network/p2p/logger.go | 23 ++- network/p2p/p2p.go | 29 ++- network/p2p/peerID_test.go | 16 ++ network/p2pNetwork.go | 90 +++++---- network/p2pNetwork_test.go | 54 ++--- network/requestLogger_test.go | 13 +- network/requestTracker_test.go | 13 +- network/wsNetwork.go | 34 ++-- network/wsNetwork_test.go | 80 ++++---- node/follower_node.go | 2 +- node/node.go | 4 +- node/node_test.go | 1 + test/testdata/configs/config-v34.json | 1 + .../recipes/scenario1s-p2p/Makefile | 10 +- .../recipes/scenario1s-p2p/README.md | 9 +- .../scenario1s-p2p/copy-node-configs.py | 136 +++++++++++-- 30 files changed, 994 insertions(+), 261 deletions(-) create mode 100644 network/hybridNetwork_test.go diff --git a/config/config.go b/config/config.go index 87440cc58a..65d711cacc 100644 --- a/config/config.go +++ b/config/config.go @@ -23,6 +23,7 @@ import ( "os" "os/user" "path/filepath" + "strings" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/codecs" @@ -147,7 +148,17 @@ func mergeConfigFromFile(configpath string, source Local) (Local, error) { defer f.Close() err = loadConfig(f, &source) + if err != nil { + return source, err + } + source, err = enrichNetworkingConfig(source) + return source, err +} +// enrichNetworkingConfig makes the following tweaks to the config: +// - If NetAddress is set, enable the ledger and block services +// - If EnableP2PHybridMode is set, require PublicAddress to be set +func enrichNetworkingConfig(source Local) (Local, error) { // If the PublicAddress in config file has the PlaceholderPublicAddress, treat it as if it were empty if source.PublicAddress == PlaceholderPublicAddress { source.PublicAddress = "" @@ -163,8 +174,13 @@ func mergeConfigFromFile(configpath string, source Local) (Local, error) { source.GossipFanout = defaultRelayGossipFanout } } - - return source, err + // In hybrid mode we want to prevent connections from the same node over both P2P and WS. + // The only way it is supported at the moment is to use net identity challenge that is based on PublicAddress. + if (source.NetAddress != "" || source.P2PNetAddress != "") && source.EnableP2PHybridMode && source.PublicAddress == "" { + return source, errors.New("PublicAddress must be specified when EnableP2PHybridMode is set") + } + source.PublicAddress = strings.ToLower(source.PublicAddress) + return source, nil } func loadConfig(reader io.Reader, config *Local) error { diff --git a/config/config_test.go b/config/config_test.go index 432c0f9281..20338766c3 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -121,6 +121,62 @@ func TestLocal_MergeConfig(t *testing.T) { require.Equal(t, c1.GossipFanout, c2.GossipFanout) } +func TestLocal_EnrichNetworkingConfig(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + c1 := Local{ + NetAddress: "test1", + GossipFanout: defaultLocal.GossipFanout, + } + c2, err := enrichNetworkingConfig(c1) + require.NoError(t, err) + require.NotEqual(t, c1, c2) + require.False(t, c1.EnableLedgerService) + require.False(t, c1.EnableBlockService) + require.Equal(t, c1.GossipFanout, defaultLocal.GossipFanout) + require.True(t, c2.EnableLedgerService) + require.True(t, c2.EnableBlockService) + require.Equal(t, c2.GossipFanout, defaultRelayGossipFanout) + + c1 = Local{ + EnableP2PHybridMode: true, + } + c2, err = enrichNetworkingConfig(c1) + require.NoError(t, err) + + c1 = Local{ + NetAddress: "test1", + EnableP2PHybridMode: true, + } + c2, err = enrichNetworkingConfig(c1) + require.ErrorContains(t, err, "PublicAddress must be specified when EnableP2PHybridMode is set") + + c1 = Local{ + P2PNetAddress: "test1", + EnableP2PHybridMode: true, + } + c2, err = enrichNetworkingConfig(c1) + require.ErrorContains(t, err, "PublicAddress must be specified when EnableP2PHybridMode is set") + + c1 = Local{ + EnableP2PHybridMode: true, + PublicAddress: "test2", + } + c2, err = enrichNetworkingConfig(c1) + require.NoError(t, err) + require.Equal(t, c1, c2) + require.True(t, c2.EnableP2PHybridMode) + require.NotEmpty(t, c2.PublicAddress) + + c1 = Local{ + PublicAddress: "R1.test3.my-domain.tld", + } + c2, err = enrichNetworkingConfig(c1) + require.NoError(t, err) + require.Equal(t, "r1.test3.my-domain.tld", c2.PublicAddress) +} + func saveFullPhonebook(phonebook phonebookBlackWhiteList, saveToDir string) error { filename := filepath.Join(saveToDir, PhonebookFilename) f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) @@ -559,9 +615,68 @@ func TestLocal_IsGossipServer(t *testing.T) { cfg := GetDefaultLocal() require.False(t, cfg.IsGossipServer()) + require.False(t, cfg.IsWsGossipServer()) + require.False(t, cfg.IsP2PGossipServer()) cfg.NetAddress = ":4160" require.True(t, cfg.IsGossipServer()) + require.True(t, cfg.IsWsGossipServer()) + require.False(t, cfg.IsP2PGossipServer()) + + cfg.EnableGossipService = false + // EnableGossipService does not matter + require.True(t, cfg.IsGossipServer()) + require.True(t, cfg.IsWsGossipServer()) + require.False(t, cfg.IsP2PGossipServer()) + + cfg.EnableP2P = true + cfg.NetAddress = ":4160" + require.True(t, cfg.IsGossipServer()) + require.False(t, cfg.IsWsGossipServer()) + require.True(t, cfg.IsP2PGossipServer()) + + cfg.EnableP2P = false + + cfg.EnableP2PHybridMode = true + // with net address set it is ws net gossip server + require.True(t, cfg.IsGossipServer()) + require.True(t, cfg.IsWsGossipServer()) + require.False(t, cfg.IsP2PGossipServer()) + + cfg.EnableP2PHybridMode = true + cfg.NetAddress = "" + require.False(t, cfg.IsGossipServer()) + require.False(t, cfg.IsWsGossipServer()) + require.False(t, cfg.IsP2PGossipServer()) + + cfg.EnableP2PHybridMode = true + cfg.P2PNetAddress = ":4190" + require.True(t, cfg.IsGossipServer()) + require.False(t, cfg.IsWsGossipServer()) + require.True(t, cfg.IsP2PGossipServer()) + + cfg.EnableP2PHybridMode = true + cfg.NetAddress = ":4160" + cfg.P2PNetAddress = ":4190" + require.True(t, cfg.IsGossipServer()) + require.True(t, cfg.IsWsGossipServer()) + require.True(t, cfg.IsP2PGossipServer()) + + cfg.EnableP2PHybridMode = true + cfg.EnableP2P = true + cfg.NetAddress = ":4160" + cfg.P2PNetAddress = ":4190" + require.True(t, cfg.IsGossipServer()) + require.True(t, cfg.IsWsGossipServer()) + require.True(t, cfg.IsP2PGossipServer()) + + cfg.EnableP2PHybridMode = true + cfg.EnableP2P = true + cfg.NetAddress = ":4160" + cfg.P2PNetAddress = "" + require.True(t, cfg.IsGossipServer()) + require.True(t, cfg.IsWsGossipServer()) + require.False(t, cfg.IsP2PGossipServer()) } func TestLocal_RecalculateConnectionLimits(t *testing.T) { @@ -569,45 +684,59 @@ func TestLocal_RecalculateConnectionLimits(t *testing.T) { t.Parallel() var tests = []struct { - maxFDs uint64 - reservedIn uint64 - restSoftIn uint64 - restHardIn uint64 - incomingIn int - - updated bool - restSoftExp uint64 - restHardExp uint64 - incomingExp int + maxFDs uint64 + reservedIn uint64 + restSoftIn uint64 + restHardIn uint64 + incomingIn int + p2pIncomingIn int + + updated bool + restSoftExp uint64 + restHardExp uint64 + incomingExp int + p2pIncomingExp int }{ - {100, 10, 20, 40, 50, false, 20, 40, 50}, // no change - {100, 10, 20, 50, 50, true, 20, 40, 50}, // borrow from rest - {100, 10, 25, 50, 50, true, 25, 40, 50}, // borrow from rest - {100, 10, 50, 50, 50, true, 40, 40, 50}, // borrow from rest, update soft - {100, 10, 9, 19, 81, true, 9, 10, 80}, // borrow from both rest and incoming - {100, 10, 10, 20, 80, true, 10, 10, 80}, // borrow from both rest and incoming - {100, 50, 10, 30, 40, true, 10, 10, 40}, // borrow from both rest and incoming - {100, 90, 10, 30, 40, true, 10, 10, 0}, // borrow from both rest and incoming, clear incoming - {4096, 256, 1024, 2048, 2400, true, 1024, 1440, 2400}, // real numbers - {5000, 256, 1024, 2048, 2400, false, 1024, 2048, 2400}, // real numbers + {100, 10, 20, 40, 50, 0, false, 20, 40, 50, 0}, // no change + {100, 10, 20, 50, 50, 0, true, 20, 40, 50, 0}, // borrow from rest + {100, 10, 25, 50, 50, 0, true, 25, 40, 50, 0}, // borrow from rest + {100, 10, 25, 50, 50, 50, true, 10, 10, 40, 40}, // borrow from rest for incoming and p2p incoming + {100, 10, 50, 50, 50, 0, true, 40, 40, 50, 0}, // borrow from rest, update soft + {100, 10, 50, 50, 40, 10, true, 40, 40, 40, 10}, // borrow from rest, update soft for incoming and p2p incoming + {100, 10, 9, 19, 81, 0, true, 9, 10, 80, 0}, // borrow from both rest and incoming + {100, 10, 9, 19, 41, 41, true, 9, 10, 40, 40}, // borrow from both rest and incoming for incoming and p2p incoming + {100, 90, 10, 30, 40, 0, true, 10, 10, 0, 0}, // borrow from both rest and incoming, clear incoming + {100, 90, 10, 30, 40, 40, true, 10, 10, 0, 0}, // borrow from both rest and incoming, clear incoming + {100, 90, 10, 30, 50, 40, true, 10, 10, 0, 0}, // borrow from both rest and incoming, clear incoming + {4096, 256, 1024, 2048, 2400, 0, true, 1024, 1440, 2400, 0}, // real numbers + {5000, 256, 1024, 2048, 2400, 0, false, 1024, 2048, 2400, 0}, // real numbers + {4096, 256, 1024, 2048, 2400, 1200, true, 240, 240, 2400, 1200}, // real numbers + {6000, 256, 1024, 2048, 2400, 1200, false, 1024, 2048, 2400, 1200}, // real numbers } for i, test := range tests { test := test - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(fmt.Sprintf("test=%d", i), func(t *testing.T) { t.Parallel() c := Local{ - RestConnectionsSoftLimit: test.restSoftIn, - RestConnectionsHardLimit: test.restHardIn, - IncomingConnectionsLimit: test.incomingIn, + NetAddress: ":4160", + RestConnectionsSoftLimit: test.restSoftIn, + RestConnectionsHardLimit: test.restHardIn, + IncomingConnectionsLimit: test.incomingIn, + P2PIncomingConnectionsLimit: test.p2pIncomingIn, + } + if test.p2pIncomingIn > 0 { + c.EnableP2PHybridMode = true + c.P2PNetAddress = ":4190" } - requireFDs := test.reservedIn + test.restHardIn + uint64(test.incomingIn) + requireFDs := test.reservedIn + test.restHardIn + uint64(test.incomingIn) + uint64(test.p2pIncomingIn) res := c.AdjustConnectionLimits(requireFDs, test.maxFDs) require.Equal(t, test.updated, res) - require.Equal(t, test.restSoftExp, c.RestConnectionsSoftLimit) - require.Equal(t, test.restHardExp, c.RestConnectionsHardLimit) - require.Equal(t, test.incomingExp, c.IncomingConnectionsLimit) + require.Equal(t, int(test.restSoftExp), int(c.RestConnectionsSoftLimit)) + require.Equal(t, int(test.restHardExp), int(c.RestConnectionsHardLimit)) + require.Equal(t, int(test.incomingExp), int(c.IncomingConnectionsLimit)) + require.Equal(t, int(test.p2pIncomingExp), int(c.P2PIncomingConnectionsLimit)) }) } } diff --git a/config/localTemplate.go b/config/localTemplate.go index 314b83a78b..9583a194cd 100644 --- a/config/localTemplate.go +++ b/config/localTemplate.go @@ -134,6 +134,8 @@ type Local struct { // Estimating 1.5MB per incoming connection, 1.5MB*2400 = 3.6GB IncomingConnectionsLimit int `version[0]:"-1" version[1]:"10000" version[17]:"800" version[27]:"2400"` + P2PIncomingConnectionsLimit int `version[34]:"1200"` + // BroadcastConnectionsLimit specifies the number of connections that // will receive broadcast (gossip) messages from this node. If the // node has more connections than this number, it will send broadcasts @@ -602,6 +604,7 @@ type Local struct { EnableP2P bool `version[31]:"false"` // EnableP2PHybridMode turns on both websockets and P2P networking. + // Enabling this setting also requires PublicAddress to be set. EnableP2PHybridMode bool `version[34]:"false"` // P2PNetAddress sets the listen address used for P2P networking, if hybrid mode is set. @@ -734,10 +737,21 @@ func (cfg Local) TxFilterCanonicalEnabled() bool { return cfg.TxIncomingFilteringFlags&txFilterCanonical != 0 } -// IsGossipServer returns true if NetAddress is set and this node supposed -// to start websocket server +// IsGossipServer returns true if this node supposed to start websocket or p2p server func (cfg Local) IsGossipServer() bool { - return cfg.NetAddress != "" + return cfg.IsWsGossipServer() || cfg.IsP2PGossipServer() +} + +// IsWsGossipServer returns true if a node configured to run a listening ws net +func (cfg Local) IsWsGossipServer() bool { + // 1. NetAddress is set and EnableP2P is not set + // 2. NetAddress is set and EnableP2PHybridMode is set then EnableP2P is overridden by EnableP2PHybridMode + return cfg.NetAddress != "" && (!cfg.EnableP2P || cfg.EnableP2PHybridMode) +} + +// IsP2PGossipServer returns true if a node configured to run a listening p2p net +func (cfg Local) IsP2PGossipServer() bool { + return (cfg.EnableP2P && !cfg.EnableP2PHybridMode && cfg.NetAddress != "") || (cfg.EnableP2PHybridMode && cfg.P2PNetAddress != "") } // ensureAbsGenesisDir will convert a path to absolute, and will attempt to make a genesis directory there @@ -935,10 +949,24 @@ func (cfg *Local) AdjustConnectionLimits(requiredFDs, maxFDs uint64) bool { if cfg.RestConnectionsHardLimit <= diff+reservedRESTConns { restDelta := diff + reservedRESTConns - cfg.RestConnectionsHardLimit cfg.RestConnectionsHardLimit = reservedRESTConns - if cfg.IncomingConnectionsLimit > int(restDelta) { - cfg.IncomingConnectionsLimit -= int(restDelta) - } else { - cfg.IncomingConnectionsLimit = 0 + splitRatio := 1 + if cfg.IsWsGossipServer() && cfg.IsP2PGossipServer() { + // split the rest of the delta between ws and p2p evenly + splitRatio = 2 + } + if cfg.IsWsGossipServer() { + if cfg.IncomingConnectionsLimit > int(restDelta) { + cfg.IncomingConnectionsLimit -= int(restDelta) / splitRatio + } else { + cfg.IncomingConnectionsLimit = 0 + } + } + if cfg.IsP2PGossipServer() { + if cfg.P2PIncomingConnectionsLimit > int(restDelta) { + cfg.P2PIncomingConnectionsLimit -= int(restDelta) / splitRatio + } else { + cfg.P2PIncomingConnectionsLimit = 0 + } } } else { cfg.RestConnectionsHardLimit -= diff diff --git a/config/local_defaults.go b/config/local_defaults.go index ae2ed22ebf..57457531be 100644 --- a/config/local_defaults.go +++ b/config/local_defaults.go @@ -119,6 +119,7 @@ var defaultLocal = Local{ OptimizeAccountsDatabaseOnStartup: false, OutgoingMessageFilterBucketCount: 3, OutgoingMessageFilterBucketSize: 128, + P2PIncomingConnectionsLimit: 1200, P2PNetAddress: "", P2PPersistPeerID: false, P2PPrivateKeyLocation: "", diff --git a/daemon/algod/server.go b/daemon/algod/server.go index c43b0b0693..309fdc5799 100644 --- a/daemon/algod/server.go +++ b/daemon/algod/server.go @@ -149,9 +149,21 @@ func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string, genes if cfg.IsGossipServer() { var ot basics.OverflowTracker - fdRequired = ot.Add(fdRequired, uint64(cfg.IncomingConnectionsLimit)+network.ReservedHealthServiceConnections) + fdRequired = ot.Add(fdRequired, network.ReservedHealthServiceConnections) if ot.Overflowed { - return errors.New("Initialize() overflowed when adding up IncomingConnectionsLimit to the existing RLIMIT_NOFILE value; decrease RestConnectionsHardLimit or IncomingConnectionsLimit") + return errors.New("Initialize() overflowed when adding up ReservedHealthServiceConnections to the existing RLIMIT_NOFILE value; decrease RestConnectionsHardLimit") + } + if cfg.IsWsGossipServer() { + fdRequired = ot.Add(fdRequired, uint64(cfg.IncomingConnectionsLimit)) + if ot.Overflowed { + return errors.New("Initialize() overflowed when adding up IncomingConnectionsLimit to the existing RLIMIT_NOFILE value; decrease IncomingConnectionsLimit") + } + } + if cfg.IsP2PGossipServer() { + fdRequired = ot.Add(fdRequired, uint64(cfg.P2PIncomingConnectionsLimit)) + if ot.Overflowed { + return errors.New("Initialize() overflowed when adding up P2PIncomingConnectionsLimit to the existing RLIMIT_NOFILE value; decrease P2PIncomingConnectionsLimit") + } } _, hard, fdErr := util.GetFdLimits() if fdErr != nil { @@ -164,14 +176,18 @@ func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string, genes // but try to keep cfg.ReservedFDs untouched by decreasing other limits if cfg.AdjustConnectionLimits(fdRequired, hard) { s.log.Warnf( - "Updated connection limits: RestConnectionsSoftLimit=%d, RestConnectionsHardLimit=%d, IncomingConnectionsLimit=%d", + "Updated connection limits: RestConnectionsSoftLimit=%d, RestConnectionsHardLimit=%d, IncomingConnectionsLimit=%d, P2PIncomingConnectionsLimit=%d", cfg.RestConnectionsSoftLimit, cfg.RestConnectionsHardLimit, cfg.IncomingConnectionsLimit, + cfg.P2PIncomingConnectionsLimit, ) - if cfg.IncomingConnectionsLimit == 0 { + if cfg.IsWsGossipServer() && cfg.IncomingConnectionsLimit == 0 { return errors.New("Initialize() failed to adjust connection limits") } + if cfg.IsP2PGossipServer() && cfg.P2PIncomingConnectionsLimit == 0 { + return errors.New("Initialize() failed to adjust p2p connection limits") + } } } fdErr = util.SetFdSoftLimit(maxFDs) diff --git a/installer/config.json.example b/installer/config.json.example index 7f16155303..3a9714bbfb 100644 --- a/installer/config.json.example +++ b/installer/config.json.example @@ -98,6 +98,7 @@ "OptimizeAccountsDatabaseOnStartup": false, "OutgoingMessageFilterBucketCount": 3, "OutgoingMessageFilterBucketSize": 128, + "P2PIncomingConnectionsLimit": 1200, "P2PNetAddress": "", "P2PPersistPeerID": false, "P2PPrivateKeyLocation": "", diff --git a/netdeploy/remote/deployedNetwork.go b/netdeploy/remote/deployedNetwork.go index 25de422026..ce72071ff0 100644 --- a/netdeploy/remote/deployedNetwork.go +++ b/netdeploy/remote/deployedNetwork.go @@ -1005,6 +1005,16 @@ func createHostSpec(host HostConfig, template cloudHost) (hostSpec cloudHostSpec portList = append(portList, strconv.Itoa(port)) } } + if node.P2PNetAddress != "" { + port, err = extractPublicPort(node.P2PNetAddress) + if err != nil { + return + } + if !ports[port] { + ports[port] = true + portList = append(portList, strconv.Itoa(port)) + } + } // See if the APIEndpoint is open to the public, and if so add it // Error means it's not valid/specified as public port diff --git a/netdeploy/remote/nodeConfig.go b/netdeploy/remote/nodeConfig.go index 4880d76eb9..bd4b63dac8 100644 --- a/netdeploy/remote/nodeConfig.go +++ b/netdeploy/remote/nodeConfig.go @@ -35,6 +35,8 @@ type NodeConfig struct { DeadlockOverride int `json:",omitempty"` // -1 = Disable deadlock detection, 0 = Use Default for build, 1 = Enable ConfigJSONOverride string `json:",omitempty"` // Raw json to merge into config.json after other modifications are complete P2PBootstrap bool // True if this node should be a p2p bootstrap node and registered in DNS + P2PNetAddress string `json:",omitempty"` + PublicAddress bool // NodeNameMatchRegex is tested against Name in generated configs and if matched the rest of the configs in this record are applied as a template NodeNameMatchRegex string `json:",omitempty"` diff --git a/netdeploy/remote/nodecfg/nodeConfigurator.go b/netdeploy/remote/nodecfg/nodeConfigurator.go index 842570bfc8..8e6bea9718 100644 --- a/netdeploy/remote/nodecfg/nodeConfigurator.go +++ b/netdeploy/remote/nodecfg/nodeConfigurator.go @@ -93,6 +93,10 @@ func (nc *nodeConfigurator) apply(rootConfigDir, rootNodeDir string) (err error) nc.genesisFile = filepath.Join(rootConfigDir, "genesisdata", config.GenesisJSONFile) nc.genesisData, err = bookkeeping.LoadGenesisFromFile(nc.genesisFile) + if err != nil { + return fmt.Errorf("error loading genesis from '%s': %v", nc.genesisFile, err) + + } nodeDirs, err := nc.prepareNodeDirs(nc.config.Nodes, rootConfigDir, rootNodeDir) if err != nil { return fmt.Errorf("error preparing node directories: %v", err) @@ -198,6 +202,11 @@ func (nc *nodeConfigurator) prepareNodeDirs(configs []remote.NodeConfig, rootCon return } +// getHostName creates a DNS name for a host +func (nc *nodeConfigurator) getNetworkHostName() string { + return nc.config.Name + "." + string(nc.genesisData.Network) + ".algodev.network" +} + func (nc *nodeConfigurator) registerDNSRecords() (err error) { cfZoneID, cfToken, err := getClouldflareCredentials() if err != nil { @@ -210,12 +219,13 @@ func (nc *nodeConfigurator) registerDNSRecords() (err error) { const weight = 1 const relayBootstrap = "_algobootstrap" const metricsSrv = "_metrics" + const tcpProto = "_tcp" const proxied = false // If we need to register anything, first register a DNS entry // to map our network DNS name to our public name (or IP) provided to nodecfg // Network HostName = eg r1.testnet.algodev.network - networkHostName := nc.config.Name + "." + string(nc.genesisData.Network) + ".algodev.network" + networkHostName := nc.getNetworkHostName() isIP := net.ParseIP(nc.dnsName) != nil var recordType string if isIP { @@ -232,9 +242,10 @@ func (nc *nodeConfigurator) registerDNSRecords() (err error) { if parseErr != nil { return parseErr } - fmt.Fprintf(os.Stdout, "...... Adding Relay SRV Record '%s' -> '%s' .\n", entry.srvName, networkHostName) + fmt.Fprintf(os.Stdout, "...... Adding Relay SRV Record [%s.%s] '%s' [%d %d] -> '%s' .\n", + relayBootstrap, tcpProto, entry.srvName, priority, port, networkHostName) err = cloudflareDNS.SetSRVRecord(context.Background(), entry.srvName, networkHostName, - cloudflare.AutomaticTTL, priority, uint(port), relayBootstrap, "_tcp", weight) + cloudflare.AutomaticTTL, priority, uint(port), relayBootstrap, tcpProto, weight) if err != nil { return } @@ -246,9 +257,10 @@ func (nc *nodeConfigurator) registerDNSRecords() (err error) { fmt.Fprintf(os.Stdout, "Error parsing port for srv record: %s (port %v)\n", parseErr, entry) return parseErr } - fmt.Fprintf(os.Stdout, "...... Adding Metrics SRV Record '%s' -> '%s' .\n", entry.srvName, networkHostName) + fmt.Fprintf(os.Stdout, "...... Adding Metrics SRV Record [%s.%s] '%s' [%d %d] -> '%s' .\n", + metricsSrv, tcpProto, entry.srvName, priority, port, networkHostName) err = cloudflareDNS.SetSRVRecord(context.Background(), entry.srvName, networkHostName, - cloudflare.AutomaticTTL, priority, uint(port), metricsSrv, "_tcp", weight) + cloudflare.AutomaticTTL, priority, uint(port), metricsSrv, tcpProto, weight) if err != nil { fmt.Fprintf(os.Stdout, "Error creating srv record: %s (%v)\n", err, entry) return diff --git a/netdeploy/remote/nodecfg/nodeDir.go b/netdeploy/remote/nodecfg/nodeDir.go index bdfc037438..304fa4c636 100644 --- a/netdeploy/remote/nodecfg/nodeDir.go +++ b/netdeploy/remote/nodecfg/nodeDir.go @@ -104,6 +104,11 @@ func (nd *nodeDir) configure() (err error) { return } + if err = nd.configurePublicAddress(nd.PublicAddress); err != nil { + fmt.Fprintf(os.Stdout, "Error during configurePublicAddress: %s\n", err) + return + } + if err = nd.configureP2PDNSBootstrap(nd.P2PBootstrap); err != nil { fmt.Fprintf(os.Stdout, "Error during configureP2PDNSBootstrap: %s\n", err) return @@ -155,15 +160,46 @@ func (nd *nodeDir) configureNetAddress() (err error) { fmt.Fprintf(os.Stdout, " - Assigning NetAddress: %s\n", nd.NetAddress) nd.config.NetAddress = nd.NetAddress if nd.IsRelay() && nd.NetAddress[0] == ':' { - fmt.Fprintf(os.Stdout, " - adding to relay addresses\n") - for _, bootstrapRecord := range nd.config.DNSBootstrapArray(nd.configurator.genesisData.Network) { - nd.configurator.addRelaySrv(bootstrapRecord.PrimarySRVBootstrap, nd.NetAddress) + if nd.config.EnableP2P && !nd.config.EnableP2PHybridMode { + fmt.Fprintf(os.Stdout, " - skipping relay addresses - p2p mode\n") + } else { + fmt.Fprintf(os.Stdout, " - adding to relay addresses\n") + for _, bootstrapRecord := range nd.config.DNSBootstrapArray(nd.configurator.genesisData.Network) { + nd.configurator.addRelaySrv(bootstrapRecord.PrimarySRVBootstrap, nd.NetAddress) + } } } + if nd.P2PNetAddress != "" { + fmt.Fprintf(os.Stdout, " - Assigning P2PNetAddress: %s\n", nd.P2PNetAddress) + nd.config.P2PNetAddress = nd.P2PNetAddress + } err = nd.saveConfig() return } +func (nd *nodeDir) configurePublicAddress(publicAddress bool) error { + if !publicAddress { + return nil + } + if !nd.IsRelay() { + return errors.New("publicAddress is only valid for relay nodes") + } + if nd.config.EnableP2P && !nd.config.EnableP2PHybridMode { + return errors.New("publicAddress is only valid websocket gossip node or a hybrid mode node") + } + + if err := nd.ensureConfig(); err != nil { + return err + } + + if nd.NetAddress[0] == ':' { + networkHostName := nd.configurator.getNetworkHostName() + nd.NetAddress + nd.config.PublicAddress = strings.ToLower(networkHostName) + fmt.Fprintf(os.Stdout, " - Assigning PublicAddress: %s\n", networkHostName) + } + return nd.saveConfig() +} + func (nd *nodeDir) configureP2PDNSBootstrap(p2pBootstrap bool) error { if !p2pBootstrap { return nil @@ -179,7 +215,7 @@ func (nd *nodeDir) configureP2PDNSBootstrap(p2pBootstrap bool) error { if !nd.config.EnableP2P && !nd.config.EnableP2PHybridMode { return errors.New("p2p bootstrap requires EnableP2P or EnableP2PHybridMode to be set") } - if nd.NetAddress == "" && nd.config.P2PNetAddress == "" { + if nd.NetAddress == "" && nd.P2PNetAddress == "" { return errors.New("p2p bootstrap requires NetAddress or P2PNetAddress to be set") } if !nd.config.EnableGossipService { @@ -187,8 +223,8 @@ func (nd *nodeDir) configureP2PDNSBootstrap(p2pBootstrap bool) error { } netAddress := nd.NetAddress - if nd.config.P2PNetAddress != "" { - netAddress = nd.config.P2PNetAddress + if nd.P2PNetAddress != "" { + netAddress = nd.P2PNetAddress } key, err := p2p.GetPrivKey(config.Local{P2PPersistPeerID: true}, nd.dataDir) diff --git a/network/hybridNetwork.go b/network/hybridNetwork.go index 27fc6edbb0..d30e03cee2 100644 --- a/network/hybridNetwork.go +++ b/network/hybridNetwork.go @@ -42,11 +42,17 @@ func NewHybridP2PNetwork(log logging.Logger, cfg config.Local, datadir string, p // supply alternate NetAddress for P2P network p2pcfg := cfg p2pcfg.NetAddress = cfg.P2PNetAddress - p2pnet, err := NewP2PNetwork(log, p2pcfg, datadir, phonebookAddresses, genesisID, networkID, nodeInfo) + identityTracker := NewIdentityTracker() + p2pnet, err := NewP2PNetwork(log, p2pcfg, datadir, phonebookAddresses, genesisID, networkID, nodeInfo, &identityOpts{tracker: identityTracker}) if err != nil { return nil, err } - wsnet, err := NewWebsocketNetwork(log, cfg, phonebookAddresses, genesisID, networkID, nodeInfo, p2pnet.PeerID(), p2pnet.PeerIDSigner()) + + identOpts := identityOpts{ + tracker: identityTracker, + scheme: NewIdentityChallengeScheme(NetIdentityDedupNames(cfg.PublicAddress, p2pnet.PeerID().String()), NetIdentitySigner(p2pnet.PeerIDSigner())), + } + wsnet, err := NewWebsocketNetwork(log, cfg, phonebookAddresses, genesisID, networkID, nodeInfo, &identOpts) if err != nil { return nil, err } diff --git a/network/hybridNetwork_test.go b/network/hybridNetwork_test.go new file mode 100644 index 0000000000..7c76c1e38e --- /dev/null +++ b/network/hybridNetwork_test.go @@ -0,0 +1,183 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package network + +import ( + "net/url" + "testing" + "time" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" +) + +// TestHybridNetwork_DuplicateConn checks the same nodes do not connect over ws and p2p. +// Scenario: +// 1. Create a hybrid network: relay and two nodes +// 2. Let them connect to the relay +// 3. Ensure relay has only two connections +// 4. Ensure extra connection attempts were rejected by nodes rather than relay +func TestHybridNetwork_DuplicateConn(t *testing.T) { + partitiontest.PartitionTest(t) + + cfg := config.GetDefaultLocal() + cfg.EnableP2PHybridMode = true + log := logging.TestingLog(t) + const p2pKeyDir = "" + + identDiscValue := networkPeerIdentityDisconnect.GetUint64Value() + + relayCfg := cfg + relayCfg.ForceRelayMessages = true + netA, err := NewHybridP2PNetwork(log.With("node", "netA"), relayCfg, p2pKeyDir, nil, genesisID, "net", &nopeNodeInfo{}) + require.NoError(t, err) + + err = netA.Start() + require.NoError(t, err) + + // collect ws address + addr, portListen := netA.wsNetwork.Address() + require.True(t, portListen) + require.NotZero(t, addr) + parsed, err := url.Parse(addr) + require.NoError(t, err) + addr = parsed.Host + netA.Stop() + + // make it net address and restart the node + relayCfg.NetAddress = addr + relayCfg.PublicAddress = addr + relayCfg.P2PNetAddress = ":0" + netA, err = NewHybridP2PNetwork(log.With("node", "netA"), relayCfg, p2pKeyDir, nil, genesisID, "net", &nopeNodeInfo{}) + require.NoError(t, err) + + err = netA.Start() + require.NoError(t, err) + defer netA.Stop() + + // collect relay address and prepare nodes phonebook + peerInfoA := netA.p2pNetwork.service.AddrInfo() + addrsAp2p, err := peer.AddrInfoToP2pAddrs(&peerInfoA) + require.NoError(t, err) + require.NotZero(t, addrsAp2p[0]) + multiAddrStr := addrsAp2p[0].String() + + fullAddr, portListen := netA.wsNetwork.Address() + require.True(t, portListen) + require.NotZero(t, addr) + require.Contains(t, fullAddr, addr) + + phoneBookAddresses := []string{multiAddrStr, addr} + + netB, err := NewHybridP2PNetwork(log.With("node", "netB"), cfg, "", phoneBookAddresses, genesisID, "net", &nopeNodeInfo{}) + require.NoError(t, err) + // for netB start the p2p network first + err = netB.p2pNetwork.Start() + require.NoError(t, err) + defer netB.Stop() + + netC, err := NewHybridP2PNetwork(log.With("node", "netC"), cfg, "", phoneBookAddresses, genesisID, "net", &nopeNodeInfo{}) + require.NoError(t, err) + // for netC start the ws network first + err = netC.wsNetwork.Start() + require.NoError(t, err) + defer netC.Stop() + + // ensure initial connections are done + require.Eventually(t, func() bool { + return len(netA.GetPeers(PeersConnectedIn)) == 2 + }, 3*time.Second, 50*time.Millisecond) + + // start the second half of the hybrid net + err = netB.wsNetwork.Start() + require.NoError(t, err) + err = netC.p2pNetwork.Start() + require.NoError(t, err) + + // wait for connection attempts. nodes need some time to make connections, + // and instead of `time.Sleep(1 * time.Second)` the networkPeerIdentityDisconnect net identity counter is used. + // Since this test is not parallel the networkPeerIdentityDisconnect should not be modified from outside. + // Both netB and netC are attempting to connect but netA could also open an outgoing stream in netB or netC connection. + // So, the counter should be at least 2+identDiscValue. + const waitFor = 3 * time.Second + const checkEvery = 50 * time.Millisecond + const maxTicks = int(waitFor / checkEvery) + const debugThreshold = maxTicks - maxTicks/20 // log last 5% of ticks + require.Greater(t, debugThreshold, 1) + require.Less(t, debugThreshold, maxTicks) + tickCounter := 0 + require.Eventually(t, func() bool { + if tickCounter >= debugThreshold { + log.Infof("networkPeerIdentityDisconnect: %d\n", networkPeerIdentityDisconnect.GetUint64Value()) + } + tickCounter++ + return networkPeerIdentityDisconnect.GetUint64Value() >= 2+identDiscValue + }, waitFor, checkEvery) + + // now count connections + // netA should have 2 connections, not 4 + // netB should have 1 connection (via p2p) + // netC should have 1 connection (via ws) + + tickCounter = 0 + require.Eventually(t, func() bool { + if tickCounter >= debugThreshold { + netAIn := len(netA.GetPeers(PeersConnectedIn)) + netAOut := len(netA.GetPeers(PeersConnectedOut)) + netBIn := len(netB.GetPeers(PeersConnectedIn)) + netBOut := len(netB.GetPeers(PeersConnectedOut)) + netCIn := len(netC.GetPeers(PeersConnectedIn)) + netCOut := len(netC.GetPeers(PeersConnectedOut)) + log.Infof("netA in/out: %d/%d, netB in/out: %d/%d, netC in/out: %d/%d\n", netAIn, netAOut, netBIn, netBOut, netCIn, netCOut) + } + tickCounter++ + return len(netB.GetPeers(PeersConnectedOut)) == 1 + }, waitFor, checkEvery) + + tickCounter = 0 + require.Eventually(t, func() bool { + if tickCounter >= debugThreshold { + netAIn := len(netA.GetPeers(PeersConnectedIn)) + netAOut := len(netA.GetPeers(PeersConnectedOut)) + netBIn := len(netB.GetPeers(PeersConnectedIn)) + netBOut := len(netB.GetPeers(PeersConnectedOut)) + netCIn := len(netC.GetPeers(PeersConnectedIn)) + netCOut := len(netC.GetPeers(PeersConnectedOut)) + log.Infof("netA in/out: %d/%d, netB in/out: %d/%d, netC in/out: %d/%d\n", netAIn, netAOut, netBIn, netBOut, netCIn, netCOut) + } + tickCounter++ + return len(netC.GetPeers(PeersConnectedOut)) == 1 + }, waitFor, checkEvery) + + tickCounter = 0 + require.Eventually(t, func() bool { + if tickCounter >= debugThreshold { + netAIn := len(netA.GetPeers(PeersConnectedIn)) + netAOut := len(netA.GetPeers(PeersConnectedOut)) + netBIn := len(netB.GetPeers(PeersConnectedIn)) + netBOut := len(netB.GetPeers(PeersConnectedOut)) + netCIn := len(netC.GetPeers(PeersConnectedIn)) + netCOut := len(netC.GetPeers(PeersConnectedOut)) + log.Infof("netA in/out: %d/%d, netB in/out: %d/%d, netC in/out: %d/%d\n", netAIn, netAOut, netBIn, netBOut, netCIn, netCOut) + } + tickCounter++ + return len(netA.GetPeers(PeersConnectedIn)) == 2 + }, 3*time.Second, 50*time.Millisecond) +} diff --git a/network/netidentity.go b/network/netidentity.go index 4d797a1a5b..30755f0648 100644 --- a/network/netidentity.go +++ b/network/netidentity.go @@ -100,6 +100,11 @@ type identityChallengeSigner interface { PublicKey() crypto.PublicKey } +type identityOpts struct { + scheme identityChallengeScheme + tracker identityTracker +} + type identityChallengeLegacySigner struct { keys *crypto.SignatureSecrets } @@ -120,37 +125,81 @@ func (s *identityChallengeLegacySigner) PublicKey() crypto.PublicKey { // exchanging and verifying public key challenges and attaching them to headers, // or returning the message payload to be sent type identityChallengePublicKeyScheme struct { - dedupName string + dedupNames map[string]struct{} identityKeys identityChallengeSigner } +type identityChallengeSchemeConfig struct { + dedupNames []string + signer identityChallengeSigner +} + +// IdentityChallengeSchemeOption is a function that can be passed to NewIdentityChallengeScheme +type IdentityChallengeSchemeOption func(*identityChallengeSchemeConfig) + +// NetIdentityDedupNames is an option to set the deduplication names for the identity challenge scheme +func NetIdentityDedupNames(dn ...string) IdentityChallengeSchemeOption { + return func(c *identityChallengeSchemeConfig) { + c.dedupNames = append(c.dedupNames, dn...) + } +} + +// NetIdentitySigner is an option to set the signer for the identity challenge scheme +func NetIdentitySigner(s identityChallengeSigner) IdentityChallengeSchemeOption { + return func(c *identityChallengeSchemeConfig) { + c.signer = s + } +} + // NewIdentityChallengeScheme will create a default Identification Scheme -func NewIdentityChallengeScheme(dn string) *identityChallengePublicKeyScheme { - // without an deduplication name, there is no identityto manage, so just return an empty scheme - if dn == "" { +func NewIdentityChallengeScheme(opts ...IdentityChallengeSchemeOption) *identityChallengePublicKeyScheme { + // without an deduplication name, there is no identity to manage, so just return an empty scheme + if len(opts) == 0 { + return &identityChallengePublicKeyScheme{} + } + + config := identityChallengeSchemeConfig{} + for _, opt := range opts { + opt(&config) + } + + if len(config.dedupNames) == 0 { return &identityChallengePublicKeyScheme{} } + hasNonEmpty := false + dedupNames := make(map[string]struct{}, len(config.dedupNames)) + for _, name := range config.dedupNames { + if len(name) > 0 { + dedupNames[name] = struct{}{} + hasNonEmpty = true + } + } + if !hasNonEmpty { + return &identityChallengePublicKeyScheme{} + } + + if config.signer != nil { + return &identityChallengePublicKeyScheme{ + dedupNames: dedupNames, + identityKeys: config.signer, + } + } + var seed crypto.Seed crypto.RandBytes(seed[:]) - return &identityChallengePublicKeyScheme{ - dedupName: dn, + dedupNames: dedupNames, identityKeys: &identityChallengeLegacySigner{keys: crypto.GenerateSignatureSecrets(seed)}, } } -// NewIdentityChallengeSchemeWithSigner will create an identification Scheme with a given signer -func NewIdentityChallengeSchemeWithSigner(dn string, signer identityChallengeSigner) *identityChallengePublicKeyScheme { - return &identityChallengePublicKeyScheme{dedupName: dn, identityKeys: signer} -} - // AttachChallenge will generate a new identity challenge and will encode and attach the challenge // as a header. It returns the identityChallengeValue used for this challenge, so the network can // confirm it later (by passing it to VerifyResponse), or returns an empty challenge if dedupName is // not set. func (i identityChallengePublicKeyScheme) AttachChallenge(attachTo http.Header, addr string) identityChallengeValue { - if i.dedupName == "" || addr == "" { + if len(i.dedupNames) == 0 || addr == "" { return identityChallengeValue{} } c := identityChallenge{ @@ -172,7 +221,7 @@ func (i identityChallengePublicKeyScheme) AttachChallenge(attachTo http.Header, // or returns empty values if the header did not end up getting set func (i identityChallengePublicKeyScheme) VerifyRequestAndAttachResponse(attachTo http.Header, h http.Header) (identityChallengeValue, crypto.PublicKey, error) { // if dedupName is not set, this scheme is not configured to exchange identity - if i.dedupName == "" { + if len(i.dedupNames) == 0 { return identityChallengeValue{}, crypto.PublicKey{}, nil } // if the headerString is not populated, the peer isn't participating in identity exchange @@ -193,10 +242,11 @@ func (i identityChallengePublicKeyScheme) VerifyRequestAndAttachResponse(attachT if !idChal.Verify() { return identityChallengeValue{}, crypto.PublicKey{}, fmt.Errorf("identity challenge incorrectly signed") } + // if the address is not meant for this host, return without attaching headers, // but also do not emit an error. This is because if an operator were to incorrectly // specify their dedupName, it could result in inappropriate disconnections from valid peers - if string(idChal.Msg.PublicAddress) != i.dedupName { + if _, ok := i.dedupNames[string(idChal.Msg.PublicAddress)]; !ok { return identityChallengeValue{}, crypto.PublicKey{}, nil } // make the response object, encode it and attach it to the header @@ -216,7 +266,7 @@ func (i identityChallengePublicKeyScheme) VerifyRequestAndAttachResponse(attachT // encoded identityVerificationMessage to send to the peer. Otherwise, it returns empty values. func (i identityChallengePublicKeyScheme) VerifyResponse(h http.Header, c identityChallengeValue) (crypto.PublicKey, []byte, error) { // if we are not participating in identity challenge exchange, do nothing (no error and no value) - if i.dedupName == "" { + if len(i.dedupNames) == 0 { return crypto.PublicKey{}, []byte{}, nil } headerString := h.Get(IdentityChallengeHeader) @@ -400,9 +450,16 @@ type identityTracker interface { setIdentity(p *wsPeer) bool } +// noopIdentityTracker implements identityTracker by doing nothing. +// Intended for pure p2p mode when libp2p is handling identities itself. +type noopIdentityTracker struct{} + +func (noopIdentityTracker) setIdentity(p *wsPeer) bool { return true } +func (noopIdentityTracker) removeIdentity(p *wsPeer) {} + // publicKeyIdentTracker implements identityTracker by // mapping from PublicKeys exchanged in identity challenges to a peer -// this structure is not thread-safe; it is protected by wn.peersLock. +// this structure is not thread-safe; it is protected by wn.peersLock or p2p.wsPeersLock type publicKeyIdentTracker struct { peersByID map[crypto.PublicKey]*wsPeer } diff --git a/network/netidentity_test.go b/network/netidentity_test.go index f87480c1b1..a54628a6fe 100644 --- a/network/netidentity_test.go +++ b/network/netidentity_test.go @@ -32,12 +32,15 @@ func TestIdentityChallengeSchemeAttachIfEnabled(t *testing.T) { partitiontest.PartitionTest(t) h := http.Header{} - i := NewIdentityChallengeScheme("") + i0 := NewIdentityChallengeScheme() + i := NewIdentityChallengeScheme(NetIdentityDedupNames("")) + require.Equal(t, i0, i) + require.Zero(t, *i) chal := i.AttachChallenge(h, "other") - require.Empty(t, h.Get(IdentityChallengeHeader)) - require.Empty(t, chal) + require.Zero(t, h.Get(IdentityChallengeHeader)) + require.Zero(t, chal) - j := NewIdentityChallengeScheme("yes") + j := NewIdentityChallengeScheme(NetIdentityDedupNames("yes")) chal = j.AttachChallenge(h, "other") require.NotEmpty(t, h.Get(IdentityChallengeHeader)) require.NotEmpty(t, chal) @@ -48,7 +51,7 @@ func TestIdentityChallengeSchemeAttachIfEnabled(t *testing.T) { func TestIdentityChallengeSchemeVerifyRequestAndAttachResponse(t *testing.T) { partitiontest.PartitionTest(t) - i := NewIdentityChallengeScheme("i1") + i := NewIdentityChallengeScheme(NetIdentityDedupNames("i1")) // author a challenge to the other scheme h := http.Header{} i.AttachChallenge(h, "i2") @@ -58,7 +61,7 @@ func TestIdentityChallengeSchemeVerifyRequestAndAttachResponse(t *testing.T) { h = http.Header{} i.AttachChallenge(h, "i2") r := http.Header{} - i2 := NewIdentityChallengeScheme("") + i2 := NewIdentityChallengeScheme() chal, key, err := i2.VerifyRequestAndAttachResponse(r, h) require.Empty(t, r.Get(IdentityChallengeHeader)) require.Empty(t, chal) @@ -69,7 +72,7 @@ func TestIdentityChallengeSchemeVerifyRequestAndAttachResponse(t *testing.T) { h = http.Header{} i.AttachChallenge(h, "i2") r = http.Header{} - i2 = NewIdentityChallengeScheme("not i2") + i2 = NewIdentityChallengeScheme(NetIdentityDedupNames("not i2")) chal, key, err = i2.VerifyRequestAndAttachResponse(r, h) require.Empty(t, r.Get(IdentityChallengeHeader)) require.Empty(t, chal) @@ -80,7 +83,7 @@ func TestIdentityChallengeSchemeVerifyRequestAndAttachResponse(t *testing.T) { h = http.Header{} h.Add(IdentityChallengeHeader, "garbage") r = http.Header{} - i2 = NewIdentityChallengeScheme("i2") + i2 = NewIdentityChallengeScheme(NetIdentityDedupNames("i2")) chal, key, err = i2.VerifyRequestAndAttachResponse(r, h) require.Empty(t, r.Get(IdentityChallengeHeader)) require.Empty(t, chal) @@ -91,7 +94,7 @@ func TestIdentityChallengeSchemeVerifyRequestAndAttachResponse(t *testing.T) { h = http.Header{} i.AttachChallenge(h, "i2") r = http.Header{} - i2 = NewIdentityChallengeScheme("i2") + i2 = NewIdentityChallengeScheme(NetIdentityDedupNames("i2")) chal, key, err = i2.VerifyRequestAndAttachResponse(r, h) require.NotEmpty(t, r.Get(IdentityChallengeHeader)) require.NotEmpty(t, chal) @@ -103,11 +106,11 @@ func TestIdentityChallengeNoErrorWhenNotParticipating(t *testing.T) { partitiontest.PartitionTest(t) // blank deduplication name will make the scheme a no-op - iNotParticipate := NewIdentityChallengeScheme("") + iNotParticipate := NewIdentityChallengeScheme() // create a request header first h := http.Header{} - i := NewIdentityChallengeScheme("i1") + i := NewIdentityChallengeScheme(NetIdentityDedupNames("i1")) origChal := i.AttachChallenge(h, "i1") require.NotEmpty(t, h.Get(IdentityChallengeHeader)) require.NotEmpty(t, origChal) @@ -120,7 +123,7 @@ func TestIdentityChallengeNoErrorWhenNotParticipating(t *testing.T) { // create a response h2 := http.Header{} - i2 := NewIdentityChallengeScheme("i2") + i2 := NewIdentityChallengeScheme(NetIdentityDedupNames("i2")) i2.VerifyRequestAndAttachResponse(h2, h) // confirm a nil scheme will not return values or error @@ -148,7 +151,7 @@ func TestIdentityChallengeSchemeVerifyResponse(t *testing.T) { partitiontest.PartitionTest(t) h := http.Header{} - i := NewIdentityChallengeScheme("i1") + i := NewIdentityChallengeScheme(NetIdentityDedupNames("i1")) // author a challenge to ourselves origChal := i.AttachChallenge(h, "i1") require.NotEmpty(t, h.Get(IdentityChallengeHeader)) @@ -176,7 +179,7 @@ func TestIdentityChallengeSchemeBadSignature(t *testing.T) { partitiontest.PartitionTest(t) h := http.Header{} - i := NewIdentityChallengeScheme("i1") + i := NewIdentityChallengeScheme(NetIdentityDedupNames("i1")) // Copy the logic of attaching the header and signing so we can sign it wrong c := identityChallengeSigned{ Msg: identityChallenge{ @@ -204,7 +207,7 @@ func TestIdentityChallengeSchemeBadPayload(t *testing.T) { partitiontest.PartitionTest(t) h := http.Header{} - i := NewIdentityChallengeScheme("i1") + i := NewIdentityChallengeScheme(NetIdentityDedupNames("i1")) h.Add(IdentityChallengeHeader, "NOT VALID BASE 64! :)") // observe that VerifyRequestAndAttachResponse won't do anything on bad signature @@ -222,7 +225,7 @@ func TestIdentityChallengeSchemeBadResponseSignature(t *testing.T) { partitiontest.PartitionTest(t) h := http.Header{} - i := NewIdentityChallengeScheme("i1") + i := NewIdentityChallengeScheme(NetIdentityDedupNames("i1")) // author a challenge to ourselves origChal := i.AttachChallenge(h, "i1") require.NotEmpty(t, h.Get(IdentityChallengeHeader)) @@ -253,7 +256,7 @@ func TestIdentityChallengeSchemeBadResponsePayload(t *testing.T) { partitiontest.PartitionTest(t) h := http.Header{} - i := NewIdentityChallengeScheme("i1") + i := NewIdentityChallengeScheme(NetIdentityDedupNames("i1")) // author a challenge to ourselves origChal := i.AttachChallenge(h, "i1") require.NotEmpty(t, h.Get(IdentityChallengeHeader)) @@ -275,7 +278,7 @@ func TestIdentityChallengeSchemeWrongChallenge(t *testing.T) { partitiontest.PartitionTest(t) h := http.Header{} - i := NewIdentityChallengeScheme("i1") + i := NewIdentityChallengeScheme(NetIdentityDedupNames("i1")) // author a challenge to ourselves origChal := i.AttachChallenge(h, "i1") require.NotEmpty(t, h.Get(IdentityChallengeHeader)) @@ -366,3 +369,67 @@ func TestIdentityTrackerHandlerGuard(t *testing.T) { } require.Equal(t, OutgoingMessage{}, identityVerificationHandler(msg)) } + +// TestNewIdentityChallengeScheme ensures NewIdentityChallengeScheme returns +// a correct identityChallengePublicKeyScheme for the following inputs: +// DedupNames(a, b) vs DedupNames(a), DedupNames(b) +// Empty vs non-empty PeerID, PublicAddress +// Empty vs non-empty Signer +func TestNewIdentityChallengeScheme(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + s1 := NewIdentityChallengeScheme() + s2 := NewIdentityChallengeScheme(NetIdentityDedupNames("")) + s3 := NewIdentityChallengeScheme(NetIdentityDedupNames("", "")) + s4 := NewIdentityChallengeScheme(NetIdentityDedupNames(""), NetIdentityDedupNames("")) + require.Equal(t, s1, s2) + require.Equal(t, s2, s3) + require.Equal(t, s3, s4) + require.Zero(t, *s1) + + s1 = NewIdentityChallengeScheme(NetIdentityDedupNames("a", "a")) + s2 = NewIdentityChallengeScheme(NetIdentityDedupNames("a"), NetIdentityDedupNames("a")) + require.Equal(t, s1.dedupNames, s2.dedupNames) + require.Len(t, s1.dedupNames, 1) + require.IsType(t, &identityChallengeLegacySigner{}, s1.identityKeys) + require.IsType(t, &identityChallengeLegacySigner{}, s2.identityKeys) + require.NotEqual(t, s1.identityKeys, s2.identityKeys) + + s1 = NewIdentityChallengeScheme(NetIdentityDedupNames("a", "b")) + s2 = NewIdentityChallengeScheme(NetIdentityDedupNames("a"), NetIdentityDedupNames("b")) + require.Equal(t, s1.dedupNames, s2.dedupNames) + require.Len(t, s1.dedupNames, 2) + require.IsType(t, &identityChallengeLegacySigner{}, s1.identityKeys) + require.IsType(t, &identityChallengeLegacySigner{}, s2.identityKeys) + require.NotEqual(t, s1.identityKeys, s2.identityKeys) + + s1 = NewIdentityChallengeScheme(NetIdentityDedupNames("", "a")) + s2 = NewIdentityChallengeScheme(NetIdentityDedupNames("a"), NetIdentityDedupNames("")) + s3 = NewIdentityChallengeScheme(NetIdentityDedupNames("a", "")) + s4 = NewIdentityChallengeScheme(NetIdentityDedupNames(""), NetIdentityDedupNames("a")) + require.Equal(t, s1.dedupNames, s2.dedupNames) + require.Equal(t, s2.dedupNames, s3.dedupNames) + require.Equal(t, s3.dedupNames, s4.dedupNames) + require.Len(t, s1.dedupNames, 1) + require.IsType(t, &identityChallengeLegacySigner{}, s1.identityKeys) + require.IsType(t, &identityChallengeLegacySigner{}, s2.identityKeys) + require.NotEqual(t, s1.identityKeys, s2.identityKeys) + + s1 = NewIdentityChallengeScheme(NetIdentityDedupNames("a"), NetIdentitySigner(&identityChallengeLegacySigner{})) + require.Len(t, s1.dedupNames, 1) + require.IsType(t, &identityChallengeLegacySigner{}, s1.identityKeys) + + var seed crypto.Seed + crypto.RandBytes(seed[:]) + signer := &identityChallengeLegacySigner{keys: crypto.GenerateSignatureSecrets(seed)} + s1 = NewIdentityChallengeScheme(NetIdentityDedupNames("a"), NetIdentitySigner(signer)) + require.Len(t, s1.dedupNames, 1) + require.IsType(t, &identityChallengeLegacySigner{}, s1.identityKeys) + require.Equal(t, signer, s1.identityKeys) + + s1 = NewIdentityChallengeScheme(NetIdentityDedupNames(""), NetIdentitySigner(signer)) + require.Empty(t, s1) + s1 = NewIdentityChallengeScheme(NetIdentitySigner(signer)) + require.Empty(t, s1) +} diff --git a/network/p2p/logger.go b/network/p2p/logger.go index 26c738e1e1..741755745b 100644 --- a/network/p2p/logger.go +++ b/network/p2p/logger.go @@ -19,6 +19,7 @@ package p2p import ( + "errors" "runtime" "strings" @@ -55,19 +56,37 @@ type loggingCore struct { zapcore.Core } +// ErrInvalidLogLevel is returned when an invalid log level is provided. +var ErrInvalidLogLevel = errors.New("invalid log level") + // EnableP2PLogging enables libp2p logging into the provided logger with the provided level. -func EnableP2PLogging(log logging.Logger, l logging.Level) { +func EnableP2PLogging(log logging.Logger, l logging.Level) error { core := loggingCore{ log: log, level: l, } + err := SetP2PLogLevel(l) + if err != nil { + return err + } + p2plogging.SetPrimaryCore(&core) + return nil +} + +// SetP2PLogLevel sets the log level for libp2p logging. +func SetP2PLogLevel(l logging.Level) error { + var seen bool for p2pLevel, logLevel := range levelsMap { if logLevel == l { p2plogging.SetAllLoggers(p2plogging.LogLevel(p2pLevel)) + seen = true break } } - p2plogging.SetPrimaryCore(&core) + if !seen { + return ErrInvalidLogLevel + } + return nil } func (c *loggingCore) Enabled(l zapcore.Level) bool { diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index f67f79f427..2c64b63eab 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -38,6 +38,7 @@ import ( "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" + rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" "github.com/libp2p/go-libp2p/p2p/muxer/yamux" "github.com/libp2p/go-libp2p/p2p/security/noise" "github.com/libp2p/go-libp2p/p2p/transport/tcp" @@ -116,9 +117,14 @@ func MakeHost(cfg config.Local, datadir string, pstore *pstore.PeerStore) (host. listenAddr = "" } - var disableMetrics = func(cfg *libp2p.Config) error { return nil } + var enableMetrics = func(cfg *libp2p.Config) error { cfg.DisableMetrics = false; return nil } metrics.DefaultRegistry().Register(&metrics.PrometheusDefaultMetrics) + rm, err := configureResourceManager(cfg) + if err != nil { + return nil, "", err + } + host, err := libp2p.New( libp2p.Identity(privKey), libp2p.UserAgent(ua), @@ -127,11 +133,29 @@ func MakeHost(cfg config.Local, datadir string, pstore *pstore.PeerStore) (host. libp2p.Peerstore(pstore), libp2p.NoListenAddrs, libp2p.Security(noise.ID, noise.New), - disableMetrics, + enableMetrics, + libp2p.ResourceManager(rm), ) return host, listenAddr, err } +func configureResourceManager(cfg config.Local) (network.ResourceManager, error) { + // see https://github.com/libp2p/go-libp2p/tree/master/p2p/host/resource-manager for more details + scalingLimits := rcmgr.DefaultLimits + libp2p.SetDefaultServiceLimits(&scalingLimits) + scaledDefaultLimits := scalingLimits.AutoScale() + + limitConfig := rcmgr.PartialLimitConfig{ + System: rcmgr.ResourceLimits{ + Conns: rcmgr.LimitVal(cfg.P2PIncomingConnectionsLimit), + }, + // Everything else is default. The exact values will come from `scaledDefaultLimits` above. + } + limiter := rcmgr.NewFixedLimiter(limitConfig.Build(scaledDefaultLimits)) + rm, err := rcmgr.NewResourceManager(limiter) + return rm, err +} + // MakeService creates a P2P service instance func MakeService(ctx context.Context, log logging.Logger, cfg config.Local, h host.Host, listenAddr string, wsStreamHandler StreamHandler, bootstrapPeers []*peer.AddrInfo) (*serviceImpl, error) { @@ -150,7 +174,6 @@ func MakeService(ctx context.Context, log logging.Logger, cfg config.Local, h ho return nil, err } return &serviceImpl{ - log: log, listenAddr: listenAddr, host: h, diff --git a/network/p2p/peerID_test.go b/network/p2p/peerID_test.go index 9d7729d593..beed18868c 100644 --- a/network/p2p/peerID_test.go +++ b/network/p2p/peerID_test.go @@ -25,6 +25,7 @@ import ( "github.com/stretchr/testify/require" "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -103,3 +104,18 @@ func TestGetPrivKeyUserGeneratedEphemeral(t *testing.T) { _, err = loadPrivateKeyFromFile(path.Join(tempdir, DefaultPrivKeyPath)) assert.True(t, os.IsNotExist(err)) } + +func TestPeerIDChallengeSigner(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + privKey, err := generatePrivKey() + require.NoError(t, err) + + data := make([]byte, 111) + crypto.RandBytes(data) + signer := PeerIDChallengeSigner{key: privKey} + pubKey := privKey.GetPublic() + pubKeyRaw, err := pubKey.Raw() + require.NoError(t, err) + require.Equal(t, crypto.PublicKey(pubKeyRaw), signer.PublicKey()) +} diff --git a/network/p2pNetwork.go b/network/p2pNetwork.go index 4d6efdac83..9f5448c0ea 100644 --- a/network/p2pNetwork.go +++ b/network/p2pNetwork.go @@ -26,6 +26,7 @@ import ( "time" "github.com/algorand/go-algorand/config" + algocrypto "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/logging/telemetryspec" "github.com/algorand/go-algorand/network/limitcaller" @@ -81,6 +82,8 @@ type P2PNetwork struct { nodeInfo NodeInfo pstore *peerstore.PeerStore httpServer *p2p.HTTPServer + + identityTracker identityTracker } type bootstrapper struct { @@ -191,30 +194,8 @@ type p2pPeerStats struct { txReceived atomic.Uint64 } -// gossipSubPeer implements the DeadlineSettableConn, IPAddressable, and ErlClient interfaces. -type gossipSubPeer struct { - peerID peer.ID - net GossipNode - routingAddr [8]byte -} - -func (p gossipSubPeer) GetNetwork() GossipNode { return p.net } - -func (p gossipSubPeer) OnClose(f func()) { - net := p.GetNetwork().(*P2PNetwork) - net.wsPeersLock.Lock() - defer net.wsPeersLock.Unlock() - if wsp, ok := net.wsPeers[p.peerID]; ok { - wsp.OnClose(f) - } -} - -func (p gossipSubPeer) RoutingAddr() []byte { - return p.routingAddr[:] -} - // NewP2PNetwork returns an instance of GossipNode that uses the p2p.Service -func NewP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID, node NodeInfo) (*P2PNetwork, error) { +func NewP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID, node NodeInfo, identityOpts *identityOpts) (*P2PNetwork, error) { const readBufferLen = 2048 // create Peerstore and add phonebook addresses @@ -262,7 +243,17 @@ func NewP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebo broadcastQueueBulk: make(chan broadcastRequest, 100), } - p2p.EnableP2PLogging(log, logging.Level(cfg.BaseLoggerDebugLevel)) + if identityOpts != nil { + net.identityTracker = identityOpts.tracker + } + if net.identityTracker == nil { + net.identityTracker = noopIdentityTracker{} + } + + err = p2p.EnableP2PLogging(log, logging.Level(cfg.BaseLoggerDebugLevel)) + if err != nil { + return nil, err + } h, la, err := p2p.MakeHost(cfg, datadir, pstore) if err != nil { @@ -380,7 +371,16 @@ func (n *P2PNetwork) Stop() { n.wsPeersConnectivityCheckTicker = nil } n.innerStop() + + // This is a workaround for a race between PubSub.processLoop (triggered by context cancellation below) termination + // and this function returning that causes main goroutine to exit before + // PubSub.processLoop goroutine finishes logging its termination message + // to already closed logger. Not seen in wild, only in tests. + if n.log.GetLevel() >= logging.Warn { + _ = p2p.SetP2PLogLevel(logging.Warn) + } n.ctxCancel() + n.service.Close() n.bootstrapperStop() n.httpServer.Close() @@ -541,8 +541,6 @@ func (n *P2PNetwork) Disconnect(badpeer DisconnectablePeer) { n.wsPeersLock.Lock() defer n.wsPeersLock.Unlock() switch p := badpeer.(type) { - case gossipSubPeer: // Disconnect came from a message received via GossipSub - peerID, wsp = p.peerID, n.wsPeers[p.peerID] case *wsPeer: // Disconnect came from a message received via wsPeer peerID, wsp = n.wsPeersToIDs[p], p default: @@ -755,21 +753,32 @@ func (n *P2PNetwork) wsStreamHandler(ctx context.Context, p2pPeer peer.ID, strea ma := stream.Conn().RemoteMultiaddr() addr := ma.String() if addr == "" { - n.log.Warnf("Could not get address for peer %s", p2pPeer) + n.log.Warnf("Cannot get address for peer %s", p2pPeer) } - // create a wsPeer for this stream and added it to the peers map. + // create a wsPeer for this stream and added it to the peers map. addrInfo := &peer.AddrInfo{ID: p2pPeer, Addrs: []multiaddr.Multiaddr{ma}} maxIdleConnsPerHost := int(n.config.ConnectionsRateLimitingCount) client, err := p2p.MakeHTTPClientWithRateLimit(addrInfo, n.pstore, limitcaller.DefaultQueueingTimeout, maxIdleConnsPerHost) if err != nil { client = nil } + var netIdentPeerID algocrypto.PublicKey + if p2pPeerPubKey, err0 := p2pPeer.ExtractPublicKey(); err0 == nil { + if b, err0 := p2pPeerPubKey.Raw(); err0 == nil { + netIdentPeerID = algocrypto.PublicKey(b) + } else { + n.log.Warnf("Cannot get raw pubkey for peer %s", p2pPeer) + } + } else { + n.log.Warnf("Cannot get pubkey for peer %s", p2pPeer) + } peerCore := makePeerCore(ctx, n, n.log, n.handler.readBuffer, addr, client, addr) wsp := &wsPeer{ wsPeerCore: peerCore, conn: &wsPeerConnP2PImpl{stream: stream}, outgoing: !incoming, + identity: netIdentPeerID, } protos, err := n.pstore.GetProtocols(p2pPeer) if err != nil { @@ -777,6 +786,19 @@ func (n *P2PNetwork) wsStreamHandler(ctx context.Context, p2pPeer peer.ID, strea } wsp.TelemetryGUID, wsp.InstanceName = p2p.GetPeerTelemetryInfo(protos) + localAddr, has := n.Address() + if !has { + n.log.Warn("Could not get local address") + } + n.wsPeersLock.Lock() + ok := n.identityTracker.setIdentity(wsp) + n.wsPeersLock.Unlock() + if !ok { + networkPeerIdentityDisconnect.Inc(nil) + n.log.With("remote", addr).With("local", localAddr).Warn("peer deduplicated before adding because the identity is already known") + stream.Close() + } + wsp.init(n.config, outgoingMessagesBufferSize) n.wsPeersLock.Lock() n.wsPeers[p2pPeer] = wsp @@ -790,10 +812,6 @@ func (n *P2PNetwork) wsStreamHandler(ctx context.Context, p2pPeer peer.ID, strea event = "ConnectedIn" msg = "Accepted incoming connection from peer %s" } - localAddr, has := n.Address() - if !has { - n.log.Warn("Could not get local address") - } n.log.With("event", event).With("remote", addr).With("local", localAddr).Infof(msg, p2pPeer.String()) if n.log.GetLevel() >= logging.Debug { @@ -815,6 +833,7 @@ func (n *P2PNetwork) wsStreamHandler(ctx context.Context, p2pPeer peer.ID, strea func (n *P2PNetwork) peerRemoteClose(peer *wsPeer, reason disconnectReason) { remotePeerID := peer.conn.(*wsPeerConnP2PImpl).stream.Conn().RemotePeer() n.wsPeersLock.Lock() + n.identityTracker.removeIdentity(peer) delete(n.wsPeers, remotePeerID) delete(n.wsPeersToIDs, peer) n.wsPeersLock.Unlock() @@ -913,7 +932,9 @@ func (n *P2PNetwork) txTopicHandleLoop() { func (n *P2PNetwork) txTopicValidator(ctx context.Context, peerID peer.ID, msg *pubsub.Message) pubsub.ValidationResult { var routingAddr [8]byte n.wsPeersLock.Lock() - if wsp, ok := n.wsPeers[peerID]; ok { + var wsp *wsPeer + var ok bool + if wsp, ok = n.wsPeers[peerID]; ok { copy(routingAddr[:], wsp.RoutingAddr()) } else { // well, otherwise use last 8 bytes of peerID @@ -922,7 +943,8 @@ func (n *P2PNetwork) txTopicValidator(ctx context.Context, peerID peer.ID, msg * n.wsPeersLock.Unlock() inmsg := IncomingMessage{ - Sender: gossipSubPeer{peerID: msg.ReceivedFrom, net: n, routingAddr: routingAddr}, + // Sender: gossipSubPeer{peerID: msg.ReceivedFrom, net: n, routingAddr: routingAddr}, + Sender: wsp, Tag: protocol.TxnTag, Data: msg.Data, Net: n, diff --git a/network/p2pNetwork_test.go b/network/p2pNetwork_test.go index ff1f40a63c..0eac398431 100644 --- a/network/p2pNetwork_test.go +++ b/network/p2pNetwork_test.go @@ -39,7 +39,6 @@ import ( "github.com/algorand/go-algorand/network/phonebook" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/test/partitiontest" - "github.com/algorand/go-algorand/util" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/crypto" @@ -69,7 +68,7 @@ func TestP2PSubmitTX(t *testing.T) { cfg.ForceFetchTransactions = true cfg.NetAddress = "127.0.0.1:0" log := logging.TestingLog(t) - netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}) + netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) require.NoError(t, err) netA.Start() defer netA.Stop() @@ -81,12 +80,12 @@ func TestP2PSubmitTX(t *testing.T) { multiAddrStr := addrsA[0].String() phoneBookAddresses := []string{multiAddrStr} - netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) + netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) require.NoError(t, err) netB.Start() defer netB.Stop() - netC, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) + netC, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) require.NoError(t, err) netC.Start() defer netC.Stop() @@ -162,7 +161,7 @@ func TestP2PSubmitTXNoGossip(t *testing.T) { cfg.ForceFetchTransactions = true cfg.NetAddress = "127.0.0.1:0" log := logging.TestingLog(t) - netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}) + netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) require.NoError(t, err) netA.Start() defer netA.Stop() @@ -174,7 +173,7 @@ func TestP2PSubmitTXNoGossip(t *testing.T) { multiAddrStr := addrsA[0].String() phoneBookAddresses := []string{multiAddrStr} - netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) + netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) require.NoError(t, err) netB.Start() defer netB.Stop() @@ -193,7 +192,7 @@ func TestP2PSubmitTXNoGossip(t *testing.T) { cfg.ForceFetchTransactions = false // Have to unset NetAddress to get IsGossipServer to return false cfg.NetAddress = "" - netC, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) + netC, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) require.NoError(t, err) netC.Start() defer netC.Stop() @@ -259,7 +258,7 @@ func TestP2PSubmitWS(t *testing.T) { cfg := config.GetDefaultLocal() cfg.NetAddress = "127.0.0.1:0" log := logging.TestingLog(t) - netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}) + netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) require.NoError(t, err) err = netA.Start() @@ -273,13 +272,13 @@ func TestP2PSubmitWS(t *testing.T) { multiAddrStr := addrsA[0].String() phoneBookAddresses := []string{multiAddrStr} - netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) + netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) require.NoError(t, err) err = netB.Start() require.NoError(t, err) defer netB.Stop() - netC, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) + netC, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) require.NoError(t, err) err = netC.Start() require.NoError(t, err) @@ -393,7 +392,7 @@ func TestP2PNetworkAddress(t *testing.T) { cfg := config.GetDefaultLocal() log := logging.TestingLog(t) - netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}) + netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) defer netA.Stop() require.NoError(t, err) addrInfo := netA.service.AddrInfo() @@ -605,7 +604,7 @@ func TestP2PNetworkDHTCapabilities(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, test.nis[0]) + netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, test.nis[0], nil) require.NoError(t, err) err = netA.Start() @@ -619,13 +618,13 @@ func TestP2PNetworkDHTCapabilities(t *testing.T) { multiAddrStr := addrsA[0].String() phoneBookAddresses := []string{multiAddrStr} - netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, test.nis[1]) + netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, test.nis[1], nil) require.NoError(t, err) err = netB.Start() require.NoError(t, err) defer netB.Stop() - netC, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, test.nis[2]) + netC, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, test.nis[2], nil) require.NoError(t, err) err = netC.Start() require.NoError(t, err) @@ -753,7 +752,7 @@ func TestP2PHTTPHandler(t *testing.T) { cfg.NetAddress = "127.0.0.1:0" log := logging.TestingLog(t) - netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}) + netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) require.NoError(t, err) h := &p2phttpHandler{t, "hello", nil} @@ -822,7 +821,7 @@ func TestP2PRelay(t *testing.T) { cfg.NetAddress = "127.0.0.1:0" log := logging.TestingLog(t) log.Debugln("Starting netA") - netA, err := NewP2PNetwork(log.With("net", "netA"), cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}) + netA, err := NewP2PNetwork(log.With("net", "netA"), cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) require.NoError(t, err) err = netA.Start() @@ -840,7 +839,7 @@ func TestP2PRelay(t *testing.T) { // Explicitly unset NetAddress for netB cfg.NetAddress = "" log.Debugf("Starting netB with phonebook addresses %v", phoneBookAddresses) - netB, err := NewP2PNetwork(log.With("net", "netB"), cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) + netB, err := NewP2PNetwork(log.With("net", "netB"), cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) require.NoError(t, err) err = netB.Start() require.NoError(t, err) @@ -907,7 +906,7 @@ func TestP2PRelay(t *testing.T) { // ensure all messages from netB and netC are received by netA cfg.NetAddress = "127.0.0.1:0" log.Debugf("Starting netC with phonebook addresses %v", phoneBookAddresses) - netC, err := NewP2PNetwork(log.With("net", "netC"), cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}) + netC, err := NewP2PNetwork(log.With("net", "netC"), cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) require.NoError(t, err) require.True(t, netC.relayMessages) err = netC.Start() @@ -1155,22 +1154,3 @@ func TestMergeP2PAddrInfoResolvedAddresses(t *testing.T) { }) } } - -// TestP2PGossipSubPeerCasts checks that gossipSubPeer implements the ErlClient and IPAddressable interfaces -// needed by TxHandler -func TestP2PGossipSubPeerCasts(t *testing.T) { - partitiontest.PartitionTest(t) - t.Parallel() - - var g interface{} = gossipSubPeer{} - _, ok := g.(util.ErlClient) - require.True(t, ok) - - _, ok = g.(IPAddressable) - require.True(t, ok) - - // check that gossipSubPeer is hashable as ERL wants - var m map[util.ErlClient]struct{} - require.Equal(t, m[gossipSubPeer{}], struct{}{}) - require.Equal(t, m[g.(util.ErlClient)], struct{}{}) -} diff --git a/network/requestLogger_test.go b/network/requestLogger_test.go index 0de6a41c73..c6bde8956e 100644 --- a/network/requestLogger_test.go +++ b/network/requestLogger_test.go @@ -50,12 +50,13 @@ func TestRequestLogger(t *testing.T) { dl := eventsDetailsLogger{Logger: log, eventReceived: make(chan interface{}, 1), eventIdentifier: telemetryspec.HTTPRequestEvent} log.SetLevel(logging.Level(defaultConfig.BaseLoggerDebugLevel)) netA := &WebsocketNetwork{ - log: dl, - config: defaultConfig, - phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), - GenesisID: "go-test-network-genesis", - NetworkID: config.Devtestnet, - peerStater: peerConnectionStater{log: log}, + log: dl, + config: defaultConfig, + phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), + GenesisID: "go-test-network-genesis", + NetworkID: config.Devtestnet, + peerStater: peerConnectionStater{log: log}, + identityTracker: noopIdentityTracker{}, } netA.config.EnableRequestLogger = true netA.setup() diff --git a/network/requestTracker_test.go b/network/requestTracker_test.go index d814507c78..46f003e0f8 100644 --- a/network/requestTracker_test.go +++ b/network/requestTracker_test.go @@ -87,12 +87,13 @@ func TestRateLimiting(t *testing.T) { // This test is conducted locally, so we want to treat all hosts the same for counting incoming requests. testConfig.DisableLocalhostConnectionRateLimit = false wn := &WebsocketNetwork{ - log: log, - config: testConfig, - phonebook: phonebook.MakePhonebook(1, 1), - GenesisID: "go-test-network-genesis", - NetworkID: config.Devtestnet, - peerStater: peerConnectionStater{log: log}, + log: log, + config: testConfig, + phonebook: phonebook.MakePhonebook(1, 1), + GenesisID: "go-test-network-genesis", + NetworkID: config.Devtestnet, + peerStater: peerConnectionStater{log: log}, + identityTracker: noopIdentityTracker{}, } // increase the IncomingConnectionsLimit/MaxConnectionsPerIP limits, since we don't want to test these. diff --git a/network/wsNetwork.go b/network/wsNetwork.go index f222d2ff27..1c0f3e8676 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -214,9 +214,6 @@ type WebsocketNetwork struct { NetworkID protocol.NetworkID RandomID string - peerID p2p.PeerID - peerIDSigner identityChallengeSigner - ready atomic.Int32 readyChan chan struct{} @@ -631,8 +628,6 @@ func (wn *WebsocketNetwork) setup() { wn.outgoingMessagesBufferSize = outgoingMessagesBufferSize wn.wsMaxHeaderBytes = wsMaxHeaderBytes - wn.identityTracker = NewIdentityTracker() - wn.broadcaster = msgBroadcaster{ ctx: wn.ctx, log: wn.log, @@ -699,7 +694,7 @@ func (wn *WebsocketNetwork) Start() error { wn.messagesOfInterestEnc = MarshallMessageOfInterestMap(wn.messagesOfInterest) } - if wn.config.IsGossipServer() { + if wn.config.IsGossipServer() || wn.config.ForceRelayMessages { listener, err := net.Listen("tcp", wn.config.NetAddress) if err != nil { wn.log.Errorf("network could not listen %v: %s", wn.config.NetAddress, err) @@ -736,16 +731,11 @@ func (wn *WebsocketNetwork) Start() error { } } // if the network has a public address or a libp2p peer ID, use that as the name for connection deduplication - if wn.config.PublicAddress != "" || (wn.peerID != "" && wn.peerIDSigner != nil) { + if wn.config.PublicAddress != "" || wn.identityScheme != nil { wn.RegisterHandlers(identityHandlers) } if wn.identityScheme == nil { - if wn.peerID != "" && wn.peerIDSigner != nil { - wn.identityScheme = NewIdentityChallengeSchemeWithSigner(string(wn.peerID), wn.peerIDSigner) - } - if wn.config.PublicAddress != "" { - wn.identityScheme = NewIdentityChallengeScheme(wn.config.PublicAddress) - } + wn.identityScheme = NewIdentityChallengeScheme(NetIdentityDedupNames(wn.config.PublicAddress)) } wn.meshUpdateRequests <- meshRequest{false, nil} @@ -2115,7 +2105,8 @@ func (wn *WebsocketNetwork) tryConnect(netAddr, gossipAddr string) { var idChallenge identityChallengeValue if wn.identityScheme != nil { - idChallenge = wn.identityScheme.AttachChallenge(requestHeader, netAddr) + theirAddr := strings.ToLower(netAddr) + idChallenge = wn.identityScheme.AttachChallenge(requestHeader, theirAddr) } // for backward compatibility, include the ProtocolVersion header as well. @@ -2305,7 +2296,7 @@ func (wn *WebsocketNetwork) SetPeerData(peer Peer, key string, value interface{} } // NewWebsocketNetwork constructor for websockets based gossip network -func NewWebsocketNetwork(log logging.Logger, config config.Local, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID, nodeInfo NodeInfo, peerID p2p.PeerID, idSigner identityChallengeSigner) (wn *WebsocketNetwork, err error) { +func NewWebsocketNetwork(log logging.Logger, config config.Local, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID, nodeInfo NodeInfo, identityOpts *identityOpts) (wn *WebsocketNetwork, err error) { pb := phonebook.MakePhonebook(config.ConnectionsRateLimitingCount, time.Duration(config.ConnectionsRateLimitingWindowSeconds)*time.Second) @@ -2324,8 +2315,6 @@ func NewWebsocketNetwork(log logging.Logger, config config.Local, phonebookAddre GenesisID: genesisID, NetworkID: networkID, nodeInfo: nodeInfo, - peerID: peerID, - peerIDSigner: idSigner, resolveSRVRecords: tools_network.ReadFromSRV, peerStater: peerConnectionStater{ log: log, @@ -2334,13 +2323,22 @@ func NewWebsocketNetwork(log logging.Logger, config config.Local, phonebookAddre }, } + // initialize net identity tracker either from the provided options or with a new one + if identityOpts != nil { + wn.identityScheme = identityOpts.scheme + wn.identityTracker = identityOpts.tracker + } + if wn.identityTracker == nil { + wn.identityTracker = NewIdentityTracker() + } + wn.setup() return wn, nil } // NewWebsocketGossipNode constructs a websocket network node and returns it as a GossipNode interface implementation func NewWebsocketGossipNode(log logging.Logger, config config.Local, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID) (gn GossipNode, err error) { - return NewWebsocketNetwork(log, config, phonebookAddresses, genesisID, networkID, nil, "", nil) + return NewWebsocketNetwork(log, config, phonebookAddresses, genesisID, networkID, nil, nil) } // SetPrioScheme specifies the network priority scheme for a network node diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go index 038a9d6e2d..6af3a697fc 100644 --- a/network/wsNetwork_test.go +++ b/network/wsNetwork_test.go @@ -128,12 +128,13 @@ func makeTestWebsocketNodeWithConfig(t testing.TB, conf config.Local, opts ...te log := logging.TestingLog(t) log.SetLevel(logging.Warn) wn := &WebsocketNetwork{ - log: log, - config: conf, - phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), - GenesisID: genesisID, - NetworkID: config.Devtestnet, - peerStater: peerConnectionStater{log: log}, + log: log, + config: conf, + phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), + GenesisID: genesisID, + NetworkID: config.Devtestnet, + peerStater: peerConnectionStater{log: log}, + identityTracker: NewIdentityTracker(), } // apply options to newly-created WebsocketNetwork, if provided for _, opt := range opts { @@ -1055,12 +1056,13 @@ func makeTestFilterWebsocketNode(t *testing.T, nodename string) *WebsocketNetwor dc.OutgoingMessageFilterBucketCount = 3 dc.OutgoingMessageFilterBucketSize = 128 wn := &WebsocketNetwork{ - log: logging.TestingLog(t).With("node", nodename), - config: dc, - phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), - GenesisID: genesisID, - NetworkID: config.Devtestnet, - peerStater: peerConnectionStater{log: logging.TestingLog(t).With("node", nodename)}, + log: logging.TestingLog(t).With("node", nodename), + config: dc, + phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), + GenesisID: genesisID, + NetworkID: config.Devtestnet, + peerStater: peerConnectionStater{log: logging.TestingLog(t).With("node", nodename)}, + identityTracker: noopIdentityTracker{}, } require.True(t, wn.config.EnableIncomingMessageFilter) wn.setup() @@ -1696,7 +1698,7 @@ type mockIdentityScheme struct { } func newMockIdentityScheme(t *testing.T) *mockIdentityScheme { - return &mockIdentityScheme{t: t, realScheme: NewIdentityChallengeScheme("any")} + return &mockIdentityScheme{t: t, realScheme: NewIdentityChallengeScheme(NetIdentityDedupNames("any"))} } func (i mockIdentityScheme) AttachChallenge(attach http.Header, addr string) identityChallengeValue { if i.attachChallenge != nil { @@ -1768,7 +1770,7 @@ func TestPeeringWithBadIdentityChallenge(t *testing.T) { { name: "incorrect address", attachChallenge: func(attach http.Header, addr string) identityChallengeValue { - s := NewIdentityChallengeScheme("does not matter") // make a scheme to use its keys + s := NewIdentityChallengeScheme(NetIdentityDedupNames("does not matter")) // make a scheme to use its keys c := identityChallenge{ Key: s.identityKeys.PublicKey(), Challenge: newIdentityChallengeValue(), @@ -1786,7 +1788,7 @@ func TestPeeringWithBadIdentityChallenge(t *testing.T) { { name: "bad signature", attachChallenge: func(attach http.Header, addr string) identityChallengeValue { - s := NewIdentityChallengeScheme("does not matter") // make a scheme to use its keys + s := NewIdentityChallengeScheme(NetIdentityDedupNames("does not matter")) // make a scheme to use its keys c := identityChallenge{ Key: s.identityKeys.PublicKey(), Challenge: newIdentityChallengeValue(), @@ -1901,7 +1903,7 @@ func TestPeeringWithBadIdentityChallengeResponse(t *testing.T) { { name: "incorrect original challenge", verifyAndAttachResponse: func(attach http.Header, h http.Header) (identityChallengeValue, crypto.PublicKey, error) { - s := NewIdentityChallengeScheme("does not matter") // make a scheme to use its keys + s := NewIdentityChallengeScheme(NetIdentityDedupNames("does not matter")) // make a scheme to use its keys // decode the header to an identityChallenge msg, _ := base64.StdEncoding.DecodeString(h.Get(IdentityChallengeHeader)) idChal := identityChallenge{} @@ -1924,7 +1926,7 @@ func TestPeeringWithBadIdentityChallengeResponse(t *testing.T) { { name: "bad signature", verifyAndAttachResponse: func(attach http.Header, h http.Header) (identityChallengeValue, crypto.PublicKey, error) { - s := NewIdentityChallengeScheme("does not matter") // make a scheme to use its keys + s := NewIdentityChallengeScheme(NetIdentityDedupNames("does not matter")) // make a scheme to use its keys // decode the header to an identityChallenge msg, _ := base64.StdEncoding.DecodeString(h.Get(IdentityChallengeHeader)) idChal := identityChallenge{} @@ -2056,7 +2058,7 @@ func TestPeeringWithBadIdentityVerification(t *testing.T) { resp := identityChallengeResponseSigned{} err = protocol.Decode(msg, &resp) require.NoError(t, err) - s := NewIdentityChallengeScheme("does not matter") // make a throwaway key + s := NewIdentityChallengeScheme(NetIdentityDedupNames("does not matter")) // make a throwaway key ver := identityVerificationMessageSigned{ // fill in correct ResponseChallenge field Msg: identityVerificationMessage{ResponseChallenge: resp.Msg.ResponseChallenge}, @@ -2074,7 +2076,7 @@ func TestPeeringWithBadIdentityVerification(t *testing.T) { // when the verification signature doesn't match the peer's expectation (the previously exchanged identity), peer is disconnected name: "bad signature", verifyResponse: func(t *testing.T, h http.Header, c identityChallengeValue) (crypto.PublicKey, []byte, error) { - s := NewIdentityChallengeScheme("does not matter") // make a throwaway key + s := NewIdentityChallengeScheme(NetIdentityDedupNames("does not matter")) // make a throwaway key ver := identityVerificationMessageSigned{ // fill in wrong ResponseChallenge field Msg: identityVerificationMessage{ResponseChallenge: newIdentityChallengeValue()}, @@ -2566,12 +2568,13 @@ func TestSlowPeerDisconnection(t *testing.T) { log := logging.TestingLog(t) log.SetLevel(logging.Info) wn := &WebsocketNetwork{ - log: log, - config: defaultConfig, - phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), - GenesisID: genesisID, - NetworkID: config.Devtestnet, - peerStater: peerConnectionStater{log: log}, + log: log, + config: defaultConfig, + phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), + GenesisID: genesisID, + NetworkID: config.Devtestnet, + peerStater: peerConnectionStater{log: log}, + identityTracker: noopIdentityTracker{}, } wn.setup() wn.broadcaster.slowWritingPeerMonitorInterval = time.Millisecond * 50 @@ -2642,12 +2645,13 @@ func TestForceMessageRelaying(t *testing.T) { log := logging.TestingLog(t) log.SetLevel(logging.Level(defaultConfig.BaseLoggerDebugLevel)) wn := &WebsocketNetwork{ - log: log, - config: defaultConfig, - phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), - GenesisID: genesisID, - NetworkID: config.Devtestnet, - peerStater: peerConnectionStater{log: log}, + log: log, + config: defaultConfig, + phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), + GenesisID: genesisID, + NetworkID: config.Devtestnet, + peerStater: peerConnectionStater{log: log}, + identityTracker: noopIdentityTracker{}, } wn.setup() wn.eventualReadyDelay = time.Second @@ -2737,12 +2741,13 @@ func TestCheckProtocolVersionMatch(t *testing.T) { log := logging.TestingLog(t) log.SetLevel(logging.Level(defaultConfig.BaseLoggerDebugLevel)) wn := &WebsocketNetwork{ - log: log, - config: defaultConfig, - phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), - GenesisID: genesisID, - NetworkID: config.Devtestnet, - peerStater: peerConnectionStater{log: log}, + log: log, + config: defaultConfig, + phonebook: phonebook.MakePhonebook(1, 1*time.Millisecond), + GenesisID: genesisID, + NetworkID: config.Devtestnet, + peerStater: peerConnectionStater{log: log}, + identityTracker: noopIdentityTracker{}, } wn.setup() wn.supportedProtocolVersions = []string{"2", "1"} @@ -4560,7 +4565,6 @@ func TestWsNetworkPhonebookMix(t *testing.T) { "test", "net", nil, - "", nil, ) require.NoError(t, err) diff --git a/node/follower_node.go b/node/follower_node.go index 117cc56e86..7d8fc64388 100644 --- a/node/follower_node.go +++ b/node/follower_node.go @@ -94,7 +94,7 @@ func MakeFollower(log logging.Logger, rootDir string, cfg config.Local, phoneboo node.config = cfg // tie network, block fetcher, and agreement services together - p2pNode, err := network.NewWebsocketNetwork(node.log, node.config, phonebookAddresses, genesis.ID(), genesis.Network, nil, "", nil) + p2pNode, err := network.NewWebsocketNetwork(node.log, node.config, phonebookAddresses, genesis.ID(), genesis.Network, nil, nil) if err != nil { log.Errorf("could not create websocket node: %v", err) return nil, err diff --git a/node/node.go b/node/node.go index 5f1baa56be..b6118aadc0 100644 --- a/node/node.go +++ b/node/node.go @@ -206,14 +206,14 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd return nil, err } } else if cfg.EnableP2P { - p2pNode, err = network.NewP2PNetwork(node.log, node.config, rootDir, phonebookAddresses, genesis.ID(), genesis.Network, node) + p2pNode, err = network.NewP2PNetwork(node.log, node.config, rootDir, phonebookAddresses, genesis.ID(), genesis.Network, node, nil) if err != nil { log.Errorf("could not create p2p node: %v", err) return nil, err } } else { var wsNode *network.WebsocketNetwork - wsNode, err = network.NewWebsocketNetwork(node.log, node.config, phonebookAddresses, genesis.ID(), genesis.Network, node, "", nil) + wsNode, err = network.NewWebsocketNetwork(node.log, node.config, phonebookAddresses, genesis.ID(), genesis.Network, node, nil) if err != nil { log.Errorf("could not create websocket node: %v", err) return nil, err diff --git a/node/node_test.go b/node/node_test.go index 3ea6d4a33d..e17e3e8d3f 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -875,6 +875,7 @@ func TestNodeHybridTopology(t *testing.T) { cfg.NetAddress = ni.wsNetAddr() cfg.EnableP2PHybridMode = true + cfg.PublicAddress = ni.wsNetAddr() cfg.EnableDHTProviders = true cfg.P2PPersistPeerID = true privKey, err := p2p.GetPrivKey(cfg, ni.rootDir) diff --git a/test/testdata/configs/config-v34.json b/test/testdata/configs/config-v34.json index 7f16155303..3a9714bbfb 100644 --- a/test/testdata/configs/config-v34.json +++ b/test/testdata/configs/config-v34.json @@ -98,6 +98,7 @@ "OptimizeAccountsDatabaseOnStartup": false, "OutgoingMessageFilterBucketCount": 3, "OutgoingMessageFilterBucketSize": 128, + "P2PIncomingConnectionsLimit": 1200, "P2PNetAddress": "", "P2PPersistPeerID": false, "P2PPrivateKeyLocation": "", diff --git a/test/testdata/deployednettemplates/recipes/scenario1s-p2p/Makefile b/test/testdata/deployednettemplates/recipes/scenario1s-p2p/Makefile index f4ec4b3c1f..7222fd3882 100644 --- a/test/testdata/deployednettemplates/recipes/scenario1s-p2p/Makefile +++ b/test/testdata/deployednettemplates/recipes/scenario1s-p2p/Makefile @@ -3,15 +3,17 @@ PARAMS=-w 20 -R 8 -N 20 -n 20 --npn-algod-nodes 10 --node-template node.json --r .PHONY: clean all +HYBRID ?= no + all: net.json genesis.json topology.json -node.json nonPartNode.json relay.json: - python3 copy-node-configs.py +node.json nonPartNode.json relay.json: copy-node-configs.py + python3 copy-node-configs.py --hybrid=${HYBRID} -net.json: node.json nonPartNode.json relay.json ${GOPATH}/bin/netgoal Makefile +net.json: node.json nonPartNode.json relay.json Makefile netgoal generate -t net -r /tmp/wat -o net.json ${PARAMS} -genesis.json: ${GOPATH}/bin/netgoal Makefile +genesis.json: Makefile netgoal generate -t genesis -r /tmp/wat -o genesis.l.json ${PARAMS} jq '.LastPartKeyRound=5000|.NetworkName="s1s-p2p"|.ConsensusProtocol="future"' < genesis.l.json > genesis.json rm genesis.l.json diff --git a/test/testdata/deployednettemplates/recipes/scenario1s-p2p/README.md b/test/testdata/deployednettemplates/recipes/scenario1s-p2p/README.md index 1cad95bc2d..04e8b986c7 100644 --- a/test/testdata/deployednettemplates/recipes/scenario1s-p2p/README.md +++ b/test/testdata/deployednettemplates/recipes/scenario1s-p2p/README.md @@ -7,10 +7,17 @@ This is a copy of scenario1s with the following changes in nodes configuration: ## Build ```sh -export GOPATH=~/go make ``` +If want to configure a hybrid net, set the `HYBRID` mode parameter to: + - `p2p` meaning all nodes are p2pnet and 50% of them are hybrid + - `ws` meaning all nodes are wsnet and 50% of them are hybrid + +```sh +make -D HYBRID=p2p +``` + ## Run Run as usual cluster test scenario with algonet. diff --git a/test/testdata/deployednettemplates/recipes/scenario1s-p2p/copy-node-configs.py b/test/testdata/deployednettemplates/recipes/scenario1s-p2p/copy-node-configs.py index 6ffbc01d8d..12da86f348 100644 --- a/test/testdata/deployednettemplates/recipes/scenario1s-p2p/copy-node-configs.py +++ b/test/testdata/deployednettemplates/recipes/scenario1s-p2p/copy-node-configs.py @@ -5,14 +5,120 @@ 3. Set DNSSecurityFlags: 0 to all configs """ +import argparse +import copy import json import os CURRENT_DIR = os.path.dirname(os.path.realpath(__file__)) SCENARIO1S_DIR = os.path.join(CURRENT_DIR, "..", "scenario1s") +def make_p2p_net(*args): + """convert config to a pure p2p network""" + for config in args: + override_json = json.loads(config.get("ConfigJSONOverride", "{}")) + override_json["EnableP2P"] = True + override_json["DNSSecurityFlags"] = 0x8000 # set to some unused value otherwise 0 would be migrated to default that enables DNSSEC + config["ConfigJSONOverride"] = json.dumps(override_json) + + net_address = config.get("NetAddress") + if net_address: + config["P2PBootstrap"] = True + altconfigs = config.get("AltConfigs", []) + if altconfigs: + for i, altconfig in enumerate(altconfigs): + override_json = json.loads(altconfig.get("ConfigJSONOverride", "{}")) + override_json["EnableP2P"] = True + override_json["DNSSecurityFlags"] = 0x8000 # set to some unused value otherwise 0 would be migrated to default that enables DNSSEC + altconfigs[i]["ConfigJSONOverride"] = json.dumps(override_json) + config["AltConfigs"] = altconfigs + + +def make_hybrid_p2p_net(*args): + """convert config to a hybrid p2p network: + - half of relays become hybrid and receive public address + - half of non-relay nodes become hybrid + - AltConfigs are used for hybrid nodes with FractionApply=0.5 + - Only one AltConfigs is supported and its FractionApply is forced to 0.5 + """ + for config in args: + override_json = json.loads(config.get("ConfigJSONOverride", "{}")) + override_json["EnableP2P"] = True + override_json["DNSSecurityFlags"] = 0x8000 # set to some unused value otherwise 0 would be migrated to default that enables DNSSEC + config["ConfigJSONOverride"] = json.dumps(override_json) + + net_address = config.get("NetAddress") + if net_address: + # in p2p-only mode all relays are P2PBootstrap-able + config["P2PBootstrap"] = True + + altconfigs = config.get("AltConfigs") + altconfig = None + if altconfigs: + altconfig = altconfigs[0] + else: + altconfig = copy.deepcopy(config) + + override_json = json.loads(altconfig.get("ConfigJSONOverride", "{}")) + override_json["EnableP2PHybridMode"] = True + override_json["DNSSecurityFlags"] = 0x8000 # set to some unused value otherwise 0 would be migrated to default that enables DNSSEC + altconfig["ConfigJSONOverride"] = json.dumps(override_json) + if net_address: # relay, set public address + altconfig["P2PBootstrap"] = True + altconfig["P2PNetAddress"] = "{{NetworkPort2}}" + altconfig["PublicAddress"] = True + altconfig['FractionApply'] = 0.5 + + altconfigs = [altconfig] + config["AltConfigs"] = altconfigs + + +def make_hybrid_ws_net(*args): + """convert config to a hybrid ws network: + - half of relays become hybrid and receive public address + - half of non-relay nodes become hybrid + - AltConfigs are used for hybrid nodes with FractionApply=0.5 + - Only one AltConfigs is supported and its FractionApply is forced to 0.5 + """ + for config in args: + override_json = json.loads(config.get("ConfigJSONOverride", "{}")) + override_json["DNSSecurityFlags"] = 0x8000 # set to some unused value otherwise 0 would be migrated to default that enables DNSSEC + config["ConfigJSONOverride"] = json.dumps(override_json) + + net_address = config.get("NetAddress") + altconfigs = config.get("AltConfigs") + altconfig = None + if altconfigs: + altconfig = altconfigs[0] + else: + altconfig = copy.deepcopy(config) + + override_json = json.loads(altconfig.get("ConfigJSONOverride", "{}")) + override_json["EnableP2PHybridMode"] = True + override_json["DNSSecurityFlags"] = 0x8000 # set to some unused value otherwise 0 would be migrated to default that enables DNSSEC + altconfig["ConfigJSONOverride"] = json.dumps(override_json) + if net_address: # relay, set public address + altconfig["P2PBootstrap"] = True + altconfig["P2PNetAddress"] = "{{NetworkPort2}}" + altconfig["PublicAddress"] = True + altconfig['FractionApply'] = 0.5 + + altconfigs = [altconfig] + config["AltConfigs"] = altconfigs + + def main(): """main""" + ap = argparse.ArgumentParser() + ap.add_argument('--hybrid', type=str, help='Hybrid mode: p2p, ws') + args = ap.parse_args() + + hybrid_mode = args.hybrid + if hybrid_mode not in ("p2p", "ws"): + hybrid_mode = None + + print('Hybrid mode:', hybrid_mode) + with open(os.path.join(SCENARIO1S_DIR, "node.json"), "r") as f: node = json.load(f) with open(os.path.join(SCENARIO1S_DIR, "relay.json"), "r") as f: @@ -20,27 +126,15 @@ def main(): with open(os.path.join(SCENARIO1S_DIR, "nonPartNode.json"), "r") as f: non_part_node = json.load(f) - # make all relays P2PBootstrap'able - relay["P2PBootstrap"] = True - - # enable P2P for all configs - for config in (node, relay, non_part_node): - override = config.get("ConfigJSONOverride") - if override: - override_json = json.loads(override) - override_json["EnableP2P"] = True - override_json["DNSSecurityFlags"] = 0x8000 # set to some unused value otherwise 0 would be migrated to default that enables DNSSEC - config["ConfigJSONOverride"] = json.dumps(override_json) - altconfigs = config.get("AltConfigs", []) - if altconfigs: - for i, altconfig in enumerate(altconfigs): - override = altconfig.get("ConfigJSONOverride") - if override: - override_json = json.loads(override) - override_json["EnableP2P"] = True - override_json["DNSSecurityFlags"] = 0x8000 # set to some unused value otherwise 0 would be migrated to default that enables DNSSEC - altconfigs[i]["ConfigJSONOverride"] = json.dumps(override_json) - config["AltConfigs"] = altconfigs + if hybrid_mode == 'p2p': + print('making hybrid p2p network...') + make_hybrid_p2p_net(node, relay, non_part_node) + elif hybrid_mode == 'ws': + print('making hybrid ws network...') + make_hybrid_ws_net(node, relay, non_part_node) + else: + print('making pure p2p network...') + make_p2p_net(node, relay, non_part_node) with open("node.json", "w") as f: json.dump(node, f, indent=4) From 2d385ffeab6438ca2765fb8e56a8ee1abb54ebb7 Mon Sep 17 00:00:00 2001 From: cce <51567+cce@users.noreply.github.com> Date: Fri, 19 Jul 2024 12:06:37 -0400 Subject: [PATCH 35/82] build: quiet duplicate libraries and ATOMIC_VAR_INT messages (#6072) --- Makefile | 1 + go.mod | 2 +- go.sum | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 7ad7f219f8..bf544242c0 100644 --- a/Makefile +++ b/Makefile @@ -52,6 +52,7 @@ endif # M1 Mac--homebrew install location in /opt/homebrew ifeq ($(OS_TYPE), darwin) ifeq ($(ARCH), arm64) +EXTLDFLAGS := -Wl,-no_warn_duplicate_libraries export CPATH=/opt/homebrew/include export LIBRARY_PATH=/opt/homebrew/lib endif diff --git a/go.mod b/go.mod index e4d8c5b8d5..e078ec73fb 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,7 @@ require ( github.com/ipfs/go-log v1.0.5 github.com/ipfs/go-log/v2 v2.5.1 github.com/jmoiron/sqlx v1.2.0 - github.com/karalabe/usb v0.0.2 + github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c github.com/labstack/echo/v4 v4.9.1 github.com/libp2p/go-libp2p v0.33.2 github.com/libp2p/go-libp2p-kad-dht v0.24.3 diff --git a/go.sum b/go.sum index 1a52e04711..66924d7486 100644 --- a/go.sum +++ b/go.sum @@ -313,8 +313,8 @@ github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPci github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4= -github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= +github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c h1:AqsttAyEyIEsNz5WLRwuRwjiT5CMDUfLk6cFJDVPebs= +github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk= github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U= github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw= From 75bb6a90320c3eb924c482378663cb8dd451d123 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Pier=C5=9Bcionek?= Date: Fri, 19 Jul 2024 20:08:07 +0200 Subject: [PATCH 36/82] catchup: skip logging err for catchup cancellation (#6053) --- catchup/service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/catchup/service.go b/catchup/service.go index b00a787ee3..a89f6bcecc 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -351,7 +351,7 @@ func (s *Service) fetchAndWrite(ctx context.Context, r basics.Round, prevFetchCo // for no reason. select { case <-ctx.Done(): - s.log.Infof("fetchAndWrite(%d): Aborted while waiting for lookback block to ledger after failing once : %v", r, err) + s.log.Infof("fetchAndWrite(%v): Aborted while waiting for lookback block to ledger", r) return false case <-lookbackComplete: } From 47fd1c93f5c05c53c39ad60832f6f6f9748dae5d Mon Sep 17 00:00:00 2001 From: cce <51567+cce@users.noreply.github.com> Date: Mon, 22 Jul 2024 15:03:38 -0400 Subject: [PATCH 37/82] build: make -no_warn_duplicate_libraries conditional on Xcode >= 15 (#6074) --- Makefile | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index bf544242c0..5204f24345 100644 --- a/Makefile +++ b/Makefile @@ -49,10 +49,14 @@ else export GOTESTCOMMAND=gotestsum --format pkgname --jsonfile testresults.json -- endif -# M1 Mac--homebrew install location in /opt/homebrew ifeq ($(OS_TYPE), darwin) -ifeq ($(ARCH), arm64) +# For Xcode >= 15, set -no_warn_duplicate_libraries linker option +CLANG_MAJOR_VERSION := $(shell clang --version | grep '^Apple clang version ' | awk '{print $$4}' | cut -d. -f1) +ifeq ($(shell [ $(CLANG_MAJOR_VERSION) -ge 15 ] && echo true), true) EXTLDFLAGS := -Wl,-no_warn_duplicate_libraries +endif +# M1 Mac--homebrew install location in /opt/homebrew +ifeq ($(ARCH), arm64) export CPATH=/opt/homebrew/include export LIBRARY_PATH=/opt/homebrew/lib endif From f6fa5906384f62f5cc0b47a5f30a55311a3c7dff Mon Sep 17 00:00:00 2001 From: John Lee Date: Thu, 25 Jul 2024 13:53:44 -0400 Subject: [PATCH 38/82] CI: update nightly builds to use universal mac update (#6071) --- .circleci/config.yml | 2 +- Makefile | 11 ++++++--- cmd/updater/update.sh | 4 ---- package-upload.yaml | 23 ------------------ scripts/release/mule/Makefile.mule | 17 +++++++++---- scripts/release/mule/README.md | 19 --------------- scripts/travis/deploy_packages.sh | 13 ++++++++-- test/muleCI/mule.yaml | 38 ++++++++---------------------- util/s3/s3Helper.go | 12 +++++++++- 9 files changed, 53 insertions(+), 86 deletions(-) delete mode 100644 package-upload.yaml diff --git a/.circleci/config.yml b/.circleci/config.yml index 4057049dac..13d4ea996f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -715,7 +715,7 @@ commands: command: | if [ "${CIRCLE_BRANCH}" = "rel/nightly" ] then - export NO_BUILD="true" + export NIGHTLY_BUILD="true" fi export PATH=$(echo "$PATH" | sed -e "s|:${HOME}/\.go_workspace/bin||g" | sed -e 's|:/usr/local/go/bin||g') export GOPATH="<< parameters.build_dir >>/go" diff --git a/Makefile b/Makefile index 5204f24345..52c505b301 100644 --- a/Makefile +++ b/Makefile @@ -179,11 +179,16 @@ ifeq ($(OS_TYPE),darwin) mkdir -p $(GOPATH1)/bin-darwin-arm64 CROSS_COMPILE_ARCH=arm64 GOBIN=$(GOPATH1)/bin-darwin-arm64 MACOSX_DEPLOYMENT_TARGET=12.0 EXTRA_CONFIGURE_FLAGS='CFLAGS="-arch arm64 -mmacos-version-min=12.0" --host=aarch64-apple-darwin' $(MAKE) + # same for buildsrc-special + cd tools/block-generator && \ + CROSS_COMPILE_ARCH=amd64 GOBIN=$(GOPATH1)/bin-darwin-amd64 MACOSX_DEPLOYMENT_TARGET=12.0 EXTRA_CONFIGURE_FLAGS='CFLAGS="-arch x86_64 -mmacos-version-min=12.0" --host=x86_64-apple-darwin' $(MAKE) + CROSS_COMPILE_ARCH=arm64 GOBIN=$(GOPATH1)/bin-darwin-arm64 MACOSX_DEPLOYMENT_TARGET=12.0 EXTRA_CONFIGURE_FLAGS='CFLAGS="-arch arm64 -mmacos-version-min=12.0" --host=aarch64-apple-darwin' $(MAKE) + # lipo together - mkdir -p $(GOPATH1)/bin-darwin-universal + mkdir -p $(GOPATH1)/bin for binary in $$(ls $(GOPATH1)/bin-darwin-arm64); do \ if [ -f $(GOPATH1)/bin-darwin-amd64/$$binary ]; then \ - lipo -create -output $(GOPATH1)/bin-darwin-universal/$$binary \ + lipo -create -output $(GOPATH1)/bin/$$binary \ $(GOPATH1)/bin-darwin-arm64/$$binary \ $(GOPATH1)/bin-darwin-amd64/$$binary; \ else \ @@ -191,7 +196,7 @@ ifeq ($(OS_TYPE),darwin) fi \ done else - $(error OS_TYPE must be darwin for universal builds) + echo "OS_TYPE must be darwin for universal builds, skipping" endif deps: diff --git a/cmd/updater/update.sh b/cmd/updater/update.sh index 6c20d55c2d..c7f049eec9 100755 --- a/cmd/updater/update.sh +++ b/cmd/updater/update.sh @@ -185,10 +185,6 @@ function get_updater_url() { UNAME=$(uname -m) if [[ "${UNAME}" = "x86_64" ]]; then ARCH="amd64" - elif [[ "${UNAME}" = "armv6l" ]]; then - ARCH="arm" - elif [[ "${UNAME}" = "armv7l" ]]; then - ARCH="arm" elif [[ "${UNAME}" = "aarch64" ]]; then ARCH="arm64" else diff --git a/package-upload.yaml b/package-upload.yaml deleted file mode 100644 index 9d13c458ef..0000000000 --- a/package-upload.yaml +++ /dev/null @@ -1,23 +0,0 @@ -tasks: - - task: s3.BucketCopy - name: amd64 - src: $HOME/projects/go-algorand/tmp/node_pkgs/linux/amd64 - dest: s3://$STAGING/$CHANNEL/$VERSION/ - - - task: s3.BucketCopy - name: arm - src: $HOME/projects/go-algorand/tmp/node_pkgs/linux/arm - dest: s3://$STAGING/$CHANNEL/$VERSION/ - - - task: s3.BucketCopy - name: arm64 - src: $HOME/projects/go-algorand/tmp/node_pkgs/linux/arm64 - dest: s3://$STAGING/$CHANNEL/$VERSION/ - -jobs: - package-upload: - tasks: - - s3.BucketCopy.amd64 - - s3.BucketCopy.arm - - s3.BucketCopy.arm64 - diff --git a/scripts/release/mule/Makefile.mule b/scripts/release/mule/Makefile.mule index 0c73cccf89..06fe5d09b7 100644 --- a/scripts/release/mule/Makefile.mule +++ b/scripts/release/mule/Makefile.mule @@ -1,15 +1,13 @@ # This file is imported into go-algorand/Makefile. PKG_DIR = $(SRCPATH)/tmp/node_pkgs/$(OS_TYPE)/$(ARCH) +PKG_DIR_UNIVERSAL = $(SRCPATH)/tmp/node_pkgs/$(OS_TYPE)/universal -.PHONY: ci-clean ci-setup ci-build +.PHONY: ci-clean ci-build ci-clean: clean rm -rf tmp -ci-setup: - mkdir -p $(PKG_DIR) - ci-test: ifeq ($(ARCH), amd64) RACE=-race @@ -28,7 +26,16 @@ ci-integration: SRCROOT=$(SRCPATH) \ test/scripts/e2e.sh -c $(CHANNEL) -n -ci-build: ci-clean build ci-setup +ci-build-universal: ci-clean universal + echo $(PKG_DIR_UNIVERSAL) + mkdir -p $(PKG_DIR_UNIVERSAL) + CHANNEL=$(CHANNEL) PKG_ROOT=$(PKG_DIR_UNIVERSAL) NO_BUILD=True VARIATIONS=$(OS_TYPE)-universal \ + scripts/build_packages.sh $(OS_TYPE)/universal && \ + mkdir -p $(PKG_DIR_UNIVERSAL)/data && \ + cp installer/genesis/devnet/genesis.json $(PKG_DIR_UNIVERSAL)/data + +ci-build: ci-clean + mkdir -p $(PKG_DIR) CHANNEL=$(CHANNEL) PKG_ROOT=$(PKG_DIR) NO_BUILD=True VARIATIONS=$(OS_TYPE)-$(ARCH) \ scripts/build_packages.sh $(OS_TYPE)/$(ARCH) && \ mkdir -p $(PKG_DIR)/data && \ diff --git a/scripts/release/mule/README.md b/scripts/release/mule/README.md index 8439767725..030728ef70 100644 --- a/scripts/release/mule/README.md +++ b/scripts/release/mule/README.md @@ -25,7 +25,6 @@ In addition, make sure that the following AWS credentials are set in environment # Build Stages - [package](#package) -- [upload](#upload) - [test](#test) - [sign](#sign) - [deploy](#deploy) @@ -49,20 +48,6 @@ In addition, make sure that the following AWS credentials are set in environment - package-docker + packages docker image -## upload - -- see `./go-algorand/package-upload.yaml` - -- customizable environment variables: - - + `CHANNEL` - + `STAGING` - + `VERSION` - -#### `mule` jobs - - - package-upload - ## test - see `./go-algorand/package-test.yaml` @@ -180,10 +165,6 @@ Let's look at some examples. mule -f package.yaml package -### Uploading - - STAGING=the-staging-area CHANNEL=beta VERSION=latest mule -f package-upload.yaml package-upload - ### Testing 1. As part of the test suite, the `verify_package_string.sh` test needs the `BRANCH` as well as the `SHA`: diff --git a/scripts/travis/deploy_packages.sh b/scripts/travis/deploy_packages.sh index e7e517394c..5879835b01 100755 --- a/scripts/travis/deploy_packages.sh +++ b/scripts/travis/deploy_packages.sh @@ -11,7 +11,7 @@ set -e SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )" - +OSARCH=$("${SCRIPTPATH}/../osarchtype.sh") # Get the go build version. GOLANG_VERSION=$("${SCRIPTPATH}/../get_golang_version.sh") @@ -24,6 +24,15 @@ then exit 1 fi +if [ "${NIGHTLY_BUILD}" == "true" ]; then + # we want to rebuild universal binaries for nightly builds + NO_BUILD=true + if [ "${OSARCH}" == "darwin/arm64" ]; then + make universal + OSARCH="darwin/universal" + fi +fi + if [ -z "${NO_BUILD}" ] || [ "${NO_BUILD}" != "true" ]; then scripts/travis/build.sh fi @@ -31,4 +40,4 @@ fi export RELEASE_GENESIS_PROCESS=true export NO_BUILD=true export SkipCleanCheck=1 -scripts/deploy_version.sh "${TRAVIS_BRANCH}" "$(./scripts/osarchtype.sh)" +scripts/deploy_version.sh "${TRAVIS_BRANCH}" "${OSARCH}" diff --git a/test/muleCI/mule.yaml b/test/muleCI/mule.yaml index afa73ae2c1..5022cba792 100644 --- a/test/muleCI/mule.yaml +++ b/test/muleCI/mule.yaml @@ -93,11 +93,8 @@ agents: tasks: - task: shell.Make - name: build.darwin-arm64 - target: ci-build - - task: shell.Make - name: build.darwin-amd64 - target: ci-build + name: build.darwin-universal + target: ci-build-universal - task: docker.Make name: build.amd64 agent: cicd.ubuntu.amd64 @@ -124,9 +121,9 @@ tasks: # Stash tasks - task: stash.Stash - name: darwin-arm64 + name: darwin-universal bucketName: go-algorand-ci-cache - stashId: ${JENKINS_JOB_CACHE_ID}/darwin-arm64 + stashId: ${JENKINS_JOB_CACHE_ID}/darwin-universal globSpecs: - tmp/node_pkgs/**/* - task: stash.Stash @@ -135,12 +132,6 @@ tasks: stashId: ${JENKINS_JOB_CACHE_ID}/linux-amd64 globSpecs: - tmp/node_pkgs/**/* - - task: stash.Stash - name: darwin-amd64 - bucketName: go-algorand-ci-cache - stashId: ${JENKINS_JOB_CACHE_ID}/darwin-amd64 - globSpecs: - - tmp/node_pkgs/**/* - task: stash.Stash name: linux-arm64 bucketName: go-algorand-ci-cache @@ -164,13 +155,9 @@ tasks: bucketName: go-algorand-ci-cache stashId: ${JENKINS_JOB_CACHE_ID}/linux-amd64 - task: stash.Unstash - name: darwin-amd64 + name: darwin-universal bucketName: go-algorand-ci-cache - stashId: ${JENKINS_JOB_CACHE_ID}/darwin-amd64 - - task: stash.Unstash - name: darwin-arm64 - bucketName: go-algorand-ci-cache - stashId: ${JENKINS_JOB_CACHE_ID}/darwin-arm64 + stashId: ${JENKINS_JOB_CACHE_ID}/darwin-universal - task: stash.Unstash name: packages bucketName: go-algorand-ci-cache @@ -187,14 +174,10 @@ tasks: target: mule-sign jobs: - build-darwin-arm64: - tasks: - - shell.Make.build.darwin-arm64 - - stash.Stash.darwin-arm64 - build-darwin-amd64: + build-darwin-universal: tasks: - - shell.Make.build.darwin-amd64 - - stash.Stash.darwin-amd64 + - shell.Make.build.darwin-universal + - stash.Stash.darwin-universal build-linux-amd64: tasks: - docker.Make.build.amd64 @@ -207,8 +190,7 @@ jobs: tasks: - stash.Unstash.linux-amd64 - stash.Unstash.linux-arm64 - - stash.Unstash.darwin-arm64 - - stash.Unstash.darwin-amd64 + - stash.Unstash.darwin-universal - docker.Make.deb.amd64 - docker.Make.rpm.amd64 - stash.Stash.packages diff --git a/util/s3/s3Helper.go b/util/s3/s3Helper.go index efad67d924..79396e7ced 100644 --- a/util/s3/s3Helper.go +++ b/util/s3/s3Helper.go @@ -189,7 +189,17 @@ func (helper *Helper) GetPackageVersion(channel string, pkg string, specificVers osName := runtime.GOOS arch := runtime.GOARCH prefix := fmt.Sprintf("%s_%s_%s-%s_", pkg, channel, osName, arch) - return helper.GetPackageFilesVersion(channel, prefix, specificVersion) + + maxVersion, maxVersionName, err = helper.GetPackageFilesVersion(channel, prefix, specificVersion) + // For darwin, we want to also look at universal binaries + if osName == "darwin" { + universalPrefix := fmt.Sprintf("%s_%s_%s-%s_", pkg, channel, osName, "universal") + universalMaxVersion, universalMaxVersionName, universalErr := helper.GetPackageFilesVersion(channel, universalPrefix, specificVersion) + if universalMaxVersion > maxVersion { + return universalMaxVersion, universalMaxVersionName, universalErr + } + } + return maxVersion, maxVersionName, err } // GetPackageFilesVersion return the package version From 2b34eda7849a99cedbfac55d8ae026cf886f7715 Mon Sep 17 00:00:00 2001 From: John Lee Date: Fri, 26 Jul 2024 10:37:30 -0400 Subject: [PATCH 39/82] CI: update to CentOS Stream 9 and start integrating universal target (#6080) --- docker/build/cicd.centos.Dockerfile | 28 ------------------- ...os8.Dockerfile => cicd.centos9.Dockerfile} | 11 ++------ package-deploy.yaml | 4 +-- package-test.yaml | 4 +-- package.yaml | 4 +-- scripts/release/README.md | 8 +++--- scripts/release/build/stage/build/task.sh | 4 +-- .../release/common/docker/centos.Dockerfile | 8 ------ ...{centos8.Dockerfile => centos9.Dockerfile} | 5 ++-- scripts/release/common/setup.sh | 3 +- ...ntos8_image.sh => ensure_centos9_image.sh} | 6 ++-- scripts/release/mule/package/rpm/package.sh | 2 +- scripts/release/prod/rpm/run_centos.sh | 4 +-- scripts/release/test/rpm/run_centos.sh | 4 +-- scripts/release/test/util/test_package.sh | 6 ++-- test/muleCI/mule.yaml | 24 ++++------------ .../test_linux_amd64_compatibility.sh | 6 ++-- 17 files changed, 38 insertions(+), 93 deletions(-) delete mode 100644 docker/build/cicd.centos.Dockerfile rename docker/build/{cicd.centos8.Dockerfile => cicd.centos9.Dockerfile} (79%) delete mode 100644 scripts/release/common/docker/centos.Dockerfile rename scripts/release/common/docker/{centos8.Dockerfile => centos9.Dockerfile} (77%) rename scripts/release/mule/common/{ensure_centos8_image.sh => ensure_centos9_image.sh} (69%) diff --git a/docker/build/cicd.centos.Dockerfile b/docker/build/cicd.centos.Dockerfile deleted file mode 100644 index f292e3d220..0000000000 --- a/docker/build/cicd.centos.Dockerfile +++ /dev/null @@ -1,28 +0,0 @@ -ARG ARCH="amd64" - -FROM ${ARCH}/centos:7 -ARG GOLANG_VERSION -ARG ARCH="amd64" -RUN yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \ - yum update -y && \ - yum install -y autoconf wget awscli git gnupg2 nfs-utils python3-devel expect jq \ - libtool gcc-c++ libstdc++-devel libstdc++-static rpmdevtools createrepo rpm-sign bzip2 which ShellCheck \ - libffi-devel openssl-devel -WORKDIR /root -RUN wget https://dl.google.com/go/go${GOLANG_VERSION}.linux-${ARCH%v*}.tar.gz \ - && tar -xvf go${GOLANG_VERSION}.linux-${ARCH%v*}.tar.gz && \ - mv go /usr/local -ENV GOROOT=/usr/local/go \ - GOPATH=$HOME/go \ - ARCH_TYPE=${ARCH} -RUN mkdir -p $GOPATH/src/github.com/algorand -COPY . $GOPATH/src/github.com/algorand/go-algorand -ENV PATH=$GOPATH/bin:$GOROOT/bin:$PATH \ - GOPROXY=https://proxy.golang.org,https://pkg.go.dev,https://goproxy.io,direct -WORKDIR $GOPATH/src/github.com/algorand/go-algorand -RUN git config --global --add safe.directory '*' -RUN make clean -RUN rm -rf $GOPATH/src/github.com/algorand/go-algorand && \ - mkdir -p $GOPATH/src/github.com/algorand/go-algorand -RUN echo "vm.max_map_count = 262144" >> /etc/sysctl.conf -CMD ["/bin/bash"] diff --git a/docker/build/cicd.centos8.Dockerfile b/docker/build/cicd.centos9.Dockerfile similarity index 79% rename from docker/build/cicd.centos8.Dockerfile rename to docker/build/cicd.centos9.Dockerfile index 76ec3e9cc5..e0d53e467e 100644 --- a/docker/build/cicd.centos8.Dockerfile +++ b/docker/build/cicd.centos9.Dockerfile @@ -1,18 +1,13 @@ ARG ARCH="amd64" -FROM quay.io/centos/centos:stream8 +FROM quay.io/centos/centos:stream9 ARG GOLANG_VERSION ARG ARCH="amd64" -RUN dnf update rpm -y && \ - dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \ +RUN dnf install -y epel-release epel-next-release && dnf config-manager --set-enabled crb && \ dnf update -y && \ dnf install -y autoconf wget awscli git gnupg2 nfs-utils python3-devel expect jq \ libtool gcc-c++ libstdc++-devel rpmdevtools createrepo rpm-sign bzip2 which \ - libffi-devel openssl-devel -RUN dnf install -y epel-release && \ - dnf update && \ - dnf -y --enablerepo=powertools install libstdc++-static && \ - dnf -y install make + libffi-devel openssl-devel libstdc++-static RUN echo "${BOLD}Downloading and installing binaries...${RESET}" && \ curl -Of https://shellcheck.storage.googleapis.com/shellcheck-v0.7.0.linux.x86_64.tar.xz && \ tar -C /usr/local/bin/ -xf shellcheck-v0.7.0.linux.x86_64.tar.xz --no-anchored 'shellcheck' --strip=1 diff --git a/package-deploy.yaml b/package-deploy.yaml index 9b67a2fe6c..b4da6c2eec 100644 --- a/package-deploy.yaml +++ b/package-deploy.yaml @@ -45,8 +45,8 @@ agents: workDir: $HOME/projects/go-algorand - name: rpm - dockerFilePath: docker/build/cicd.centos8.Dockerfile - image: algorand/go-algorand-ci-linux-centos8 + dockerFilePath: docker/build/cicd.centos9.Dockerfile + image: algorand/go-algorand-ci-linux-centos9 version: scripts/configure_dev-deps.sh buildArgs: - GOLANG_VERSION=`./scripts/get_golang_version.sh` diff --git a/package-test.yaml b/package-test.yaml index e338197ff1..cd526dda68 100644 --- a/package-test.yaml +++ b/package-test.yaml @@ -16,8 +16,8 @@ agents: workDir: $HOME/projects/go-algorand - name: rpm - dockerFilePath: docker/build/cicd.centos.Dockerfile - image: algorand/mule-linux-centos + dockerFilePath: docker/build/cicd.centos9.Dockerfile + image: algorand/mule-linux-centos9 version: scripts/configure_dev-deps.sh buildArgs: - GOLANG_VERSION=`./scripts/get_golang_version.sh` diff --git a/package.yaml b/package.yaml index d5d8de0d97..0cc1d588eb 100644 --- a/package.yaml +++ b/package.yaml @@ -11,8 +11,8 @@ agents: workDir: $HOME/projects/go-algorand - name: rpm - dockerFilePath: docker/build/cicd.centos.Dockerfile - image: algorand/go-algorand-ci-linux-centos + dockerFilePath: docker/build/cicd.centos9.Dockerfile + image: algorand/go-algorand-ci-linux-centos9 version: scripts/configure_dev-deps.sh buildArgs: - GOLANG_VERSION=`./scripts/get_golang_version.sh` diff --git a/scripts/release/README.md b/scripts/release/README.md index f3fa543f13..534bc70faa 100644 --- a/scripts/release/README.md +++ b/scripts/release/README.md @@ -50,7 +50,7 @@ This section briefly describes the expected outcomes of the current build pipeli 1. build - 1. Build (compile) the binaries in a Centos 7 & 8 docker container that will then be used by both `deb` and `rpm` packaging. + 1. Build (compile) the binaries in a Centos 9 docker container that will then be used by both `deb` and `rpm` packaging. 1. Docker containers will package `deb` and `rpm` artifacts inside of Ubuntu 20.04 and Centos 7 & 8, respectively. @@ -69,9 +69,9 @@ This section briefly describes the expected outcomes of the current build pipeli - The signatures are correct. - The packages are built from the correct branch and channel and are the correct version. This done by running `algod -v`. + This is done for the following docker containers: - - centos:7 - - quay.io/centos/centos:stream8 - - fedora:38 + - quay.io/centos/centos:stream9 + - fedora:39 + - fedora:40 - ubuntu:20.04 - ubuntu:22.04 diff --git a/scripts/release/build/stage/build/task.sh b/scripts/release/build/stage/build/task.sh index fdb96d6b27..944aeb7b34 100755 --- a/scripts/release/build/stage/build/task.sh +++ b/scripts/release/build/stage/build/task.sh @@ -30,8 +30,8 @@ else echo ${BUILD_NUMBER} > "${REPO_ROOT}"/buildnumber.dat fi -# Run RPM build in Centos 7 & 8 Docker container -sg docker "docker build -t algocentosbuild - < $HOME/go/src/github.com/algorand/go-algorand/scripts/release/common/docker/centos.Dockerfile" +# Run RPM build in Centos 9 Docker container +sg docker "docker build -t algocentosbuild - < $HOME/go/src/github.com/algorand/go-algorand/scripts/release/common/docker/centos9.Dockerfile" sg docker "docker run --rm --env-file ${HOME}/build_env_docker --mount type=bind,src=${HOME},dst=/root/subhome algocentosbuild /root/subhome/go/src/github.com/algorand/go-algorand/scripts/release/build/rpm/build.sh" echo diff --git a/scripts/release/common/docker/centos.Dockerfile b/scripts/release/common/docker/centos.Dockerfile deleted file mode 100644 index a23b446ca1..0000000000 --- a/scripts/release/common/docker/centos.Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM centos:7 - -WORKDIR /root -RUN yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm -RUN yum install -y autoconf awscli curl git gnupg2 nfs-utils python36 expect jq libtool gcc-c++ libstdc++-devel libstdc++-static rpmdevtools createrepo rpm-sign bzip2 which ShellCheck - -ENTRYPOINT ["/bin/bash"] - diff --git a/scripts/release/common/docker/centos8.Dockerfile b/scripts/release/common/docker/centos9.Dockerfile similarity index 77% rename from scripts/release/common/docker/centos8.Dockerfile rename to scripts/release/common/docker/centos9.Dockerfile index cf5474cfe7..1151201edb 100644 --- a/scripts/release/common/docker/centos8.Dockerfile +++ b/scripts/release/common/docker/centos9.Dockerfile @@ -1,7 +1,8 @@ -FROM quay.io/centos/centos:stream8 +FROM quay.io/centos/centos:stream9 WORKDIR /root -RUN dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \ +RUN dnf install -y epel-release epel-next-release && dnf config-manager --set-enabled crb && \ + dnf update -y && \ dnf install -y autoconf awscli curl git gnupg2 nfs-utils python36 expect jq libtool gcc-c++ libstdc++-devel rpmdevtools createrepo rpm-sign bzip2 which && \ dnf -y --enablerepo=powertools install libstdc++-static diff --git a/scripts/release/common/setup.sh b/scripts/release/common/setup.sh index 75683262d5..6124e0b95f 100755 --- a/scripts/release/common/setup.sh +++ b/scripts/release/common/setup.sh @@ -104,8 +104,7 @@ else fi sudo usermod -a -G docker ubuntu -sg docker "docker pull centos:7" -sg docker "docker pull quay.io/centos/centos:stream8" +sg docker "docker pull quay.io/centos/centos:stream9" sg docker "docker pull ubuntu:22.04" cat << EOF >> "${HOME}/.bashrc" diff --git a/scripts/release/mule/common/ensure_centos8_image.sh b/scripts/release/mule/common/ensure_centos9_image.sh similarity index 69% rename from scripts/release/mule/common/ensure_centos8_image.sh rename to scripts/release/mule/common/ensure_centos9_image.sh index 1ebd3475fe..bb03624c1b 100755 --- a/scripts/release/mule/common/ensure_centos8_image.sh +++ b/scripts/release/mule/common/ensure_centos9_image.sh @@ -2,9 +2,9 @@ set -exo pipefail -# Ensure the centos8 docker image is built and available +# Ensure the centos docker image is built and available -DOCKER_IMAGE="algorand/go-algorand-ci-linux-centos8:amd64-$(sha1sum scripts/configure_dev-deps.sh | cut -f1 -d' ')" +DOCKER_IMAGE="algorand/go-algorand-ci-linux-centos9:amd64-$(sha1sum scripts/configure_dev-deps.sh | cut -f1 -d' ')" MATCH=${DOCKER_IMAGE/:*/} echo "Checking for RPM image" @@ -13,5 +13,5 @@ if docker images $DOCKER_IMAGE | grep -qs $MATCH > /dev/null 2>&1; then else echo "RPM image doesn't exist, building" docker build --platform=linux/amd64 --build-arg ARCH=amd64 \ - --build-arg GOLANG_VERSION=$(./scripts/get_golang_version.sh) -t $DOCKER_IMAGE -f docker/build/cicd.centos8.Dockerfile . + --build-arg GOLANG_VERSION=$(./scripts/get_golang_version.sh) -t $DOCKER_IMAGE -f docker/build/cicd.centos9.Dockerfile . fi diff --git a/scripts/release/mule/package/rpm/package.sh b/scripts/release/mule/package/rpm/package.sh index f3e3dde17f..7d63872f5c 100755 --- a/scripts/release/mule/package/rpm/package.sh +++ b/scripts/release/mule/package/rpm/package.sh @@ -50,7 +50,7 @@ find tmp/node_pkgs -name "*${CHANNEL}*linux*${VERSION}*.tar.gz" | cut -d '/' -f3 -e "s,@REQUIRED_ALGORAND_PKG@,$REQUIRED_ALGORAND_PACKAGE," \ > "$TEMPDIR/$ALGORAND_PACKAGE_NAME.spec" - rpmbuild --buildroot "$HOME/foo" --define "_rpmdir $RPMTMP" --define "RELEASE_GENESIS_PROCESS xtrue" --define "LICENSE_FILE ./COPYING" -bb "$TEMPDIR/$ALGORAND_PACKAGE_NAME.spec" --target $ARCH_UNAME + rpmbuild --buildroot "$HOME/foo" --define "_rpmdir $RPMTMP" --define "RELEASE_GENESIS_PROCESS \"xtrue\"" --define "LICENSE_FILE ./COPYING" -bb "$TEMPDIR/$ALGORAND_PACKAGE_NAME.spec" --target $ARCH_UNAME cp -p "$RPMTMP"/*/*.rpm "./tmp/node_pkgs/$OS_TYPE/$ARCH_TYPE" echo "${RPMTMP}" diff --git a/scripts/release/prod/rpm/run_centos.sh b/scripts/release/prod/rpm/run_centos.sh index 9426da5ea1..abb2b73f9e 100755 --- a/scripts/release/prod/rpm/run_centos.sh +++ b/scripts/release/prod/rpm/run_centos.sh @@ -5,8 +5,8 @@ set -ex . "${HOME}"/build_env -# Run RPM build in Centos 7 & 8 Docker container -sg docker "docker build -t algocentosbuild - < ${HOME}/go/src/github.com/algorand/go-algorand/scripts/release/common/docker/centos.Dockerfile" +# Run RPM build in Centos 9 Docker container +sg docker "docker build -t algocentosbuild - < ${HOME}/go/src/github.com/algorand/go-algorand/scripts/release/common/docker/centos9.Dockerfile" sg docker "docker run --rm --env-file ${HOME}/build_env_docker --mount type=bind,src=/run/user/1000/gnupg/S.gpg-agent,dst=/root/S.gpg-agent --mount type=bind,src=${HOME}/prodrepo,dst=/root/prodrepo --mount type=bind,src=${HOME}/keys,dst=/root/keys --mount type=bind,src=${HOME},dst=/root/subhome algocentosbuild /root/subhome/go/src/github.com/algorand/go-algorand/scripts/release/prod/rpm/snapshot.sh" diff --git a/scripts/release/test/rpm/run_centos.sh b/scripts/release/test/rpm/run_centos.sh index 88e0a6be4c..d206a085d7 100755 --- a/scripts/release/test/rpm/run_centos.sh +++ b/scripts/release/test/rpm/run_centos.sh @@ -14,8 +14,8 @@ if [ "$CHANNEL" = beta ]; then exit 0 fi -# Run RPM build in Centos 7 & 8 Docker container -sg docker "docker build -t algocentosbuild - < ${HOME}/go/src/github.com/algorand/go-algorand/scripts/release/common/docker/centos.Dockerfile" +# Run RPM build in Centos 9 Docker container +sg docker "docker build -t algocentosbuild - < ${HOME}/go/src/github.com/algorand/go-algorand/scripts/release/common/docker/centos9.Dockerfile" cat <"${HOME}"/dummyrepo/algodummy.repo [algodummy] diff --git a/scripts/release/test/util/test_package.sh b/scripts/release/test/util/test_package.sh index 61c93b84b4..c8dd206c8c 100755 --- a/scripts/release/test/util/test_package.sh +++ b/scripts/release/test/util/test_package.sh @@ -8,9 +8,9 @@ set -ex . "${HOME}"/build_env OS_LIST=( - centos:7 - quay.io/centos/centos:stream8 - fedora:38 + quay.io/centos/centos:stream9 + fedora:39 + fedora:40 ubuntu:20.04 ubuntu:22.04 ) diff --git a/test/muleCI/mule.yaml b/test/muleCI/mule.yaml index 5022cba792..e1ce86b9f8 100644 --- a/test/muleCI/mule.yaml +++ b/test/muleCI/mule.yaml @@ -15,23 +15,9 @@ agents: - GOLANG_VERSION=`./scripts/get_golang_version.sh` - ARCH=amd64 - GOARCH=amd64 - - name: cicd.centos.amd64 - dockerFilePath: docker/build/cicd.centos.Dockerfile - image: algorand/go-algorand-ci-linux-centos - version: scripts/configure_dev-deps.sh - arch: amd64 - env: - - TRAVIS_BRANCH=${GIT_BRANCH} - - NETWORK=$NETWORK - - VERSION=$VERSION - - BUILD_NUMBER=$BUILD_NUMBER - - GOHOSTARCH=amd64 - buildArgs: - - GOLANG_VERSION=`./scripts/get_golang_version.sh` - - ARCH=amd64 - - name: cicd.centos8.amd64 - dockerFilePath: docker/build/cicd.centos8.Dockerfile - image: algorand/go-algorand-ci-linux-centos8 + - name: cicd.centos9.amd64 + dockerFilePath: docker/build/cicd.centos9.Dockerfile + image: algorand/go-algorand-ci-linux-centos9 version: scripts/configure_dev-deps.sh arch: amd64 env: @@ -106,12 +92,12 @@ tasks: - task: docker.Make name: archive - agent: cicd.centos8.amd64 + agent: cicd.centos9.amd64 target: archive - task: docker.Make name: rpm.amd64 - agent: cicd.centos.amd64 + agent: cicd.centos9.amd64 target: mule-package-rpm - task: docker.Make diff --git a/test/platform/test_linux_amd64_compatibility.sh b/test/platform/test_linux_amd64_compatibility.sh index 7e91c8728e..2ab8d4c990 100755 --- a/test/platform/test_linux_amd64_compatibility.sh +++ b/test/platform/test_linux_amd64_compatibility.sh @@ -7,9 +7,9 @@ BLUE_FG=$(tput setaf 4 2>/dev/null) END_FG_COLOR=$(tput sgr0 2>/dev/null) OS_LIST=( - centos:7 - quay.io/centos/centos:stream8 - fedora:38 + quay.io/centos/centos:stream9 + fedora:39 + fedora:40 ubuntu:20.04 ubuntu:22.04 ) From adaecdeb786c32a495d0d4a755749db3968c5113 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Fri, 26 Jul 2024 15:17:56 -0400 Subject: [PATCH 40/82] p2p: fix connection deduplication in hybrid mode (#6082) --- network/p2pNetwork.go | 1 + network/p2pNetwork_test.go | 55 +++++++++++++++++++++++++++++++++++++- 2 files changed, 55 insertions(+), 1 deletion(-) diff --git a/network/p2pNetwork.go b/network/p2pNetwork.go index 9f5448c0ea..9417b641ae 100644 --- a/network/p2pNetwork.go +++ b/network/p2pNetwork.go @@ -797,6 +797,7 @@ func (n *P2PNetwork) wsStreamHandler(ctx context.Context, p2pPeer peer.ID, strea networkPeerIdentityDisconnect.Inc(nil) n.log.With("remote", addr).With("local", localAddr).Warn("peer deduplicated before adding because the identity is already known") stream.Close() + return } wsp.init(n.config, outgoingMessagesBufferSize) diff --git a/network/p2pNetwork_test.go b/network/p2pNetwork_test.go index 0eac398431..9fc00774fb 100644 --- a/network/p2pNetwork_test.go +++ b/network/p2pNetwork_test.go @@ -31,6 +31,7 @@ import ( "time" "github.com/algorand/go-algorand/config" + algocrypto "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/network/limitcaller" "github.com/algorand/go-algorand/network/p2p" @@ -1081,7 +1082,7 @@ func TestP2PWantTXGossip(t *testing.T) { require.True(t, net.wantTXGossip.Load()) } -func TestMergeP2PAddrInfoResolvedAddresses(t *testing.T) { +func TestP2PMergeAddrInfoResolvedAddresses(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() @@ -1154,3 +1155,55 @@ func TestMergeP2PAddrInfoResolvedAddresses(t *testing.T) { }) } } + +// TestP2PwsStreamHandlerDedup checks that the wsStreamHandler detects duplicate connections +// and does not add a new wePeer for it. +func TestP2PwsStreamHandlerDedup(t *testing.T) { + partitiontest.PartitionTest(t) + + cfg := config.GetDefaultLocal() + cfg.DNSBootstrapID = "" // disable DNS lookups since the test uses phonebook addresses + cfg.NetAddress = "127.0.0.1:0" + log := logging.TestingLog(t) + netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, &identityOpts{tracker: NewIdentityTracker()}) + require.NoError(t, err) + err = netA.Start() + require.NoError(t, err) + defer netA.Stop() + + peerInfoA := netA.service.AddrInfo() + addrsA, err := peer.AddrInfoToP2pAddrs(&peerInfoA) + require.NoError(t, err) + require.NotZero(t, addrsA[0]) + + multiAddrStr := addrsA[0].String() + phoneBookAddresses := []string{multiAddrStr} + netB, err := NewP2PNetwork(log, cfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, &identityOpts{tracker: NewIdentityTracker()}) + require.NoError(t, err) + + // now say netA's identity tracker knows about netB's peerID + var netIdentPeerID algocrypto.PublicKey + p2pPeerPubKey, err := netB.service.ID().ExtractPublicKey() + require.NoError(t, err) + + b, err := p2pPeerPubKey.Raw() + require.NoError(t, err) + netIdentPeerID = algocrypto.PublicKey(b) + wsp := &wsPeer{ + identity: netIdentPeerID, + } + netA.identityTracker.setIdentity(wsp) + networkPeerIdentityDisconnectInitial := networkPeerIdentityDisconnect.GetUint64Value() + + // start network and ensure dedup happens + err = netB.Start() + require.NoError(t, err) + defer netB.Stop() + + require.Eventually(t, func() bool { + return networkPeerIdentityDisconnect.GetUint64Value() == networkPeerIdentityDisconnectInitial+1 + }, 2*time.Second, 50*time.Millisecond) + + require.False(t, netA.hasPeers()) + require.False(t, netB.hasPeers()) +} From e52f91a4b1025f78ffe858f77e15d32b3157bf43 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Fri, 26 Jul 2024 15:18:32 -0400 Subject: [PATCH 41/82] txHandler: fix TestTxHandlerAppRateLimiter (#6075) --- data/appRateLimiter.go | 4 ++-- data/appRateLimiter_test.go | 2 +- data/txHandler_test.go | 47 ++++++++++++++++++++----------------- 3 files changed, 29 insertions(+), 24 deletions(-) diff --git a/data/appRateLimiter.go b/data/appRateLimiter.go index 1f4472e68a..9bb2fd1254 100644 --- a/data/appRateLimiter.go +++ b/data/appRateLimiter.go @@ -81,8 +81,8 @@ func makeAppRateLimiter(maxCacheSize int, maxAppPeerRate uint64, serviceRateWind serviceRatePerWindow := maxAppPeerRate * uint64(serviceRateWindow/time.Second) maxBucketSize := maxCacheSize / numBuckets if maxBucketSize == 0 { - // got the max size less then buckets, use maps of 1 - maxBucketSize = 1 + // got the max size less then buckets, use maps of 2 to avoid eviction on each insert + maxBucketSize = 2 } r := &appRateLimiter{ maxBucketSize: maxBucketSize, diff --git a/data/appRateLimiter_test.go b/data/appRateLimiter_test.go index f5e63dfb36..5a7a872133 100644 --- a/data/appRateLimiter_test.go +++ b/data/appRateLimiter_test.go @@ -40,7 +40,7 @@ func TestAppRateLimiter_Make(t *testing.T) { window := 1 * time.Second rm := makeAppRateLimiter(10, rate, window) - require.Equal(t, 1, rm.maxBucketSize) + require.Equal(t, 2, rm.maxBucketSize) require.NotEmpty(t, rm.seed) require.NotEmpty(t, rm.salt) for i := 0; i < len(rm.buckets); i++ { diff --git a/data/txHandler_test.go b/data/txHandler_test.go index 9237865037..23235f15fd 100644 --- a/data/txHandler_test.go +++ b/data/txHandler_test.go @@ -2515,8 +2515,14 @@ func TestTxHandlerAppRateLimiterERLEnabled(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() + // technically we don't need any users for this test + // but we need to create the genesis accounts to prevent this warning: + // "cannot start evaluator: overflowed subtracting rewards for block 1" + _, _, genesis := makeTestGenesisAccounts(t, 0) + genBal := bookkeeping.MakeGenesisBalances(genesis, sinkAddr, poolAddr) ledgerName := fmt.Sprintf("%s-mem", t.Name()) const inMem = true + log := logging.TestingLog(t) log.SetLevel(logging.Panic) @@ -2525,11 +2531,9 @@ func TestTxHandlerAppRateLimiterERLEnabled(t *testing.T) { cfg.TxBacklogServiceRateWindowSeconds = 1 cfg.TxBacklogAppTxPerSecondRate = 3 cfg.TxBacklogSize = 3 - ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, bookkeeping.GenesisBalances{}, genesisID, genesisHash, cfg) + l, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, cfg) require.NoError(t, err) - defer ledger.Close() - - l := ledger + defer l.Close() func() { cfg.EnableTxBacklogRateLimiting = false @@ -2618,9 +2622,10 @@ func TestTxHandlerAppRateLimiterERLEnabled(t *testing.T) { require.Equal(t, 1, handler.appLimiter.len()) } +// TestTxHandlerAppRateLimiter submits few app txns to make the app rate limit to filter one the last txn +// to ensure it is propely integrated with the txHandler func TestTxHandlerAppRateLimiter(t *testing.T) { partitiontest.PartitionTest(t) - t.Parallel() const numUsers = 10 log := logging.TestingLog(t) @@ -2637,16 +2642,16 @@ func TestTxHandlerAppRateLimiter(t *testing.T) { cfg.TxBacklogAppTxRateLimiterMaxSize = 100 cfg.TxBacklogServiceRateWindowSeconds = 1 cfg.TxBacklogAppTxPerSecondRate = 3 - ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, cfg) + l, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, cfg) require.NoError(t, err) - defer ledger.Close() + defer l.Close() - l := ledger handler, err := makeTestTxHandler(l, cfg) require.NoError(t, err) defer handler.txVerificationPool.Shutdown() defer close(handler.streamVerifierDropped) + handler.appLimiterBacklogThreshold = -1 // force the rate limiter to start checking transactions tx := transactions.Transaction{ Type: protocol.ApplicationCallTx, Header: transactions.Header{ @@ -2667,21 +2672,21 @@ func TestTxHandlerAppRateLimiter(t *testing.T) { require.Equal(t, network.OutgoingMessage{Action: network.Ignore}, action) require.Equal(t, 1, len(handler.backlogQueue)) + counterBefore := transactionMessagesAppLimiterDrop.GetUint64Value() // trigger the rate limiter and ensure the txn is ignored - tx2 := tx - for i := 0; i < cfg.TxBacklogAppTxPerSecondRate*cfg.TxBacklogServiceRateWindowSeconds; i++ { - tx2.ForeignApps = append(tx2.ForeignApps, 1) + numTxnToTriggerARL := cfg.TxBacklogAppTxPerSecondRate * cfg.TxBacklogServiceRateWindowSeconds + for i := 0; i < numTxnToTriggerARL; i++ { + tx2 := tx + tx2.Header.Sender = addresses[i+1] + signedTx2 := tx2.Sign(secrets[i+1]) + blob2 := protocol.Encode(&signedTx2) + + action = handler.processIncomingTxn(network.IncomingMessage{Data: blob2, Sender: mockSender{}}) + require.Equal(t, network.OutgoingMessage{Action: network.Ignore}, action) } - signedTx2 := tx.Sign(secrets[1]) - blob2 := protocol.Encode(&signedTx2) - - action = handler.processIncomingTxn(network.IncomingMessage{Data: blob2, Sender: mockSender{}}) - require.Equal(t, network.OutgoingMessage{Action: network.Ignore}, action) - require.Equal(t, 1, len(handler.backlogQueue)) - - // backlogQueue has the first txn, but the second one is dropped - msg := <-handler.backlogQueue - require.Equal(t, msg.rawmsg.Data, blob, blob) + // last txn should be dropped + require.Equal(t, 1+numTxnToTriggerARL-1, len(handler.backlogQueue)) + require.Equal(t, counterBefore+1, transactionMessagesAppLimiterDrop.GetUint64Value()) } // TestTxHandlerCapGuard checks there is no cap guard leak in case of invalid input. From 8ed60e2c88e8c0296e7424a4c40ec39ab88eb6c5 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Fri, 26 Jul 2024 15:50:59 -0400 Subject: [PATCH 42/82] tests: fix TestNodeHybridTopology (#6079) --- node/node_test.go | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/node/node_test.go b/node/node_test.go index e17e3e8d3f..df72a699bb 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -841,16 +841,13 @@ func TestNodeHybridTopology(t *testing.T) { testParams0.AgreementFilterTimeoutPeriod0 = 500 * time.Millisecond configurableConsensus[consensusTest0] = testParams0 - minMoneyAtStart := 1_000_000 - maxMoneyAtStart := 100_000_000_000 - gen := rand.New(rand.NewSource(2)) - + // configure the stake to have R and A producing and confirming blocks + const totalStake = 100_000_000_000 const numAccounts = 3 acctStake := make([]basics.MicroAlgos, numAccounts) - for i := range acctStake { - acctStake[i] = basics.MicroAlgos{Raw: uint64(minMoneyAtStart + (gen.Int() % (maxMoneyAtStart - minMoneyAtStart)))} - } acctStake[0] = basics.MicroAlgos{} // no stake at node 0 + acctStake[1] = basics.MicroAlgos{Raw: uint64(totalStake / 2)} + acctStake[2] = basics.MicroAlgos{Raw: uint64(totalStake / 2)} configHook := func(ni nodeInfo, cfg config.Local) (nodeInfo, config.Local) { cfg = config.GetDefaultLocal() @@ -918,9 +915,18 @@ func TestNodeHybridTopology(t *testing.T) { startAndConnectNodes(nodes, 10*time.Second) + // ensure the initial connectivity topology + require.Eventually(t, func() bool { + node0Conn := len(nodes[0].net.GetPeers(network.PeersConnectedIn)) > 0 // has connection from 1 + node1Conn := len(nodes[1].net.GetPeers(network.PeersConnectedOut, network.PeersConnectedIn)) == 2 // connected to 0 and 2 + node2Conn := len(nodes[2].net.GetPeers(network.PeersConnectedOut, network.PeersConnectedIn)) >= 1 // connected to 1 + return node0Conn && node1Conn && node2Conn + }, 60*time.Second, 500*time.Millisecond) + initialRound := nodes[0].ledger.NextRound() targetRound := initialRound + 10 + // ensure discovery of archival node by tracking its ledger select { case <-nodes[0].ledger.Wait(targetRound): e0, err := nodes[0].ledger.Block(targetRound) @@ -928,7 +934,7 @@ func TestNodeHybridTopology(t *testing.T) { e1, err := nodes[1].ledger.Block(targetRound) require.NoError(t, err) require.Equal(t, e1.Hash(), e0.Hash()) - case <-time.After(120 * time.Second): + case <-time.After(3 * time.Minute): // set it to 1.5x of the dht.periodicBootstrapInterval to give DHT code to rebuild routing table one more time require.Fail(t, fmt.Sprintf("no block notification for wallet: %v.", wallets[0])) } } From 04ec5f9bb188229a790998e8c2f3bec552eaaaf5 Mon Sep 17 00:00:00 2001 From: John Lee Date: Fri, 26 Jul 2024 17:49:49 -0400 Subject: [PATCH 43/82] CI: fix mac universal rebuild for nightlies (#6084) --- scripts/travis/build.sh | 14 +++++++++++--- scripts/travis/deploy_packages.sh | 4 ++-- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/scripts/travis/build.sh b/scripts/travis/build.sh index 3087f63d75..244ff399ef 100755 --- a/scripts/travis/build.sh +++ b/scripts/travis/build.sh @@ -9,12 +9,18 @@ # Examples: scripts/travis/build.sh MAKE_DEBUG_OPTION="" +MAKE_UNIVERSAL_OPTION="" + while [ "$1" != "" ]; do case "$1" in --make_debug) shift MAKE_DEBUG_OPTION="1" ;; + --make_universal) + shift + MAKE_UNIVERSAL_OPTION="1" + ;; *) echo "Unknown option" "$1" exit 1 @@ -75,13 +81,15 @@ set -e scripts/travis/before_build.sh duration "before_build.sh" -if [ "${OS}-${ARCH}" = "linux-arm" ] || [ "${OS}-${ARCH}" = "windows-amd64" ]; then - # for arm, build just the basic distro +if [ "${OS}-${ARCH}" = "windows-amd64" ]; then # for windows, we still have some issues with the enlistment checking, so we'll make it simple for now. MAKE_DEBUG_OPTION="" fi -if [ "${MAKE_DEBUG_OPTION}" != "" ]; then +if [ "${MAKE_UNIVERSAL_OPTION}" != "" ]; then + make universal + duration "make universal" +elif [ "${MAKE_DEBUG_OPTION}" != "" ]; then make build build-race duration "make build build-race" else diff --git a/scripts/travis/deploy_packages.sh b/scripts/travis/deploy_packages.sh index 5879835b01..8eae9307b4 100755 --- a/scripts/travis/deploy_packages.sh +++ b/scripts/travis/deploy_packages.sh @@ -26,11 +26,11 @@ fi if [ "${NIGHTLY_BUILD}" == "true" ]; then # we want to rebuild universal binaries for nightly builds - NO_BUILD=true if [ "${OSARCH}" == "darwin/arm64" ]; then - make universal + ./scripts/travis/build.sh --make_universal OSARCH="darwin/universal" fi + NO_BUILD=true fi if [ -z "${NO_BUILD}" ] || [ "${NO_BUILD}" != "true" ]; then From edda2ee09c1c89c54dafd9e0e898f82760095bf0 Mon Sep 17 00:00:00 2001 From: Nickolai Zeldovich Date: Fri, 26 Jul 2024 17:50:36 -0400 Subject: [PATCH 44/82] rpcs: simplify API for BlockService to handle multiple HTTP paths (#5718) Co-authored-by: Pavel Zbitskiy --- catchup/fetcher_test.go | 7 +++++++ catchup/pref_test.go | 2 +- catchup/service_test.go | 22 +++++++++++----------- catchup/universalFetcher_test.go | 2 +- components/mocks/mockNetwork.go | 4 ++++ network/gossipNode.go | 3 ++- network/hybridNetwork.go | 6 ++++++ network/p2p/http.go | 8 ++++++++ network/p2pNetwork.go | 6 ++++++ network/wsNetwork.go | 5 +++++ rpcs/blockService.go | 17 +++++++++++------ rpcs/blockService_test.go | 19 +++++++++---------- rpcs/registrar.go | 2 ++ rpcs/txService_test.go | 7 +++++++ 14 files changed, 80 insertions(+), 30 deletions(-) diff --git a/catchup/fetcher_test.go b/catchup/fetcher_test.go index 52b0b32a8f..85dcaba70b 100644 --- a/catchup/fetcher_test.go +++ b/catchup/fetcher_test.go @@ -141,6 +141,13 @@ func (b *basicRPCNode) RegisterHTTPHandler(path string, handler http.Handler) { b.rmux.Handle(path, handler) } +func (b *basicRPCNode) RegisterHTTPHandlerFunc(path string, handler func(http.ResponseWriter, *http.Request)) { + if b.rmux == nil { + b.rmux = mux.NewRouter() + } + b.rmux.HandleFunc(path, handler) +} + func (b *basicRPCNode) RegisterHandlers(dispatch []network.TaggedMessageHandler) { } diff --git a/catchup/pref_test.go b/catchup/pref_test.go index a72ed855ec..7c849630e1 100644 --- a/catchup/pref_test.go +++ b/catchup/pref_test.go @@ -50,7 +50,7 @@ func BenchmarkServiceFetchBlocks(b *testing.B) { net := &httpTestPeerSource{} ls := rpcs.MakeBlockService(logging.TestingLog(b), config.GetDefaultLocal(), remote, net, "test genesisID") nodeA := basicRPCNode{} - nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) + ls.RegisterHandlers(&nodeA) nodeA.start() defer nodeA.stop() rootURL := nodeA.rootURL() diff --git a/catchup/service_test.go b/catchup/service_test.go index 045a0438f2..d20305ce82 100644 --- a/catchup/service_test.go +++ b/catchup/service_test.go @@ -151,7 +151,7 @@ func TestServiceFetchBlocksSameRange(t *testing.T) { ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID") nodeA := basicRPCNode{} - nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) + ls.RegisterHandlers(&nodeA) nodeA.start() defer nodeA.stop() rootURL := nodeA.rootURL() @@ -223,7 +223,7 @@ func TestSyncRound(t *testing.T) { ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID") nodeA := basicRPCNode{} - nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) + ls.RegisterHandlers(&nodeA) nodeA.start() defer nodeA.stop() rootURL := nodeA.rootURL() @@ -313,7 +313,7 @@ func TestPeriodicSync(t *testing.T) { ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID") nodeA := basicRPCNode{} - nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) + ls.RegisterHandlers(&nodeA) nodeA.start() defer nodeA.stop() rootURL := nodeA.rootURL() @@ -379,7 +379,7 @@ func TestServiceFetchBlocksOneBlock(t *testing.T) { ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID") nodeA := basicRPCNode{} - nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) + ls.RegisterHandlers(&nodeA) nodeA.start() defer nodeA.stop() rootURL := nodeA.rootURL() @@ -443,7 +443,7 @@ func TestAbruptWrites(t *testing.T) { ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID") nodeA := basicRPCNode{} - nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) + ls.RegisterHandlers(&nodeA) nodeA.start() defer nodeA.stop() rootURL := nodeA.rootURL() @@ -501,7 +501,7 @@ func TestServiceFetchBlocksMultiBlocks(t *testing.T) { ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID") nodeA := basicRPCNode{} - nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) + ls.RegisterHandlers(&nodeA) nodeA.start() defer nodeA.stop() rootURL := nodeA.rootURL() @@ -555,7 +555,7 @@ func TestServiceFetchBlocksMalformed(t *testing.T) { ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID") nodeA := basicRPCNode{} - nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) + ls.RegisterHandlers(&nodeA) nodeA.start() defer nodeA.stop() rootURL := nodeA.rootURL() @@ -709,7 +709,7 @@ func helperTestOnSwitchToUnSupportedProtocol( ls := rpcs.MakeBlockService(logging.Base(), config, remote, net, "test genesisID") nodeA := basicRPCNode{} - nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) + ls.RegisterHandlers(&nodeA) nodeA.start() defer nodeA.stop() rootURL := nodeA.rootURL() @@ -932,7 +932,7 @@ func TestCatchupUnmatchedCertificate(t *testing.T) { ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID") nodeA := basicRPCNode{} - nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) + ls.RegisterHandlers(&nodeA) nodeA.start() defer nodeA.stop() rootURL := nodeA.rootURL() @@ -1064,7 +1064,7 @@ func TestServiceLedgerUnavailable(t *testing.T) { ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID") nodeA := basicRPCNode{} - nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) + ls.RegisterHandlers(&nodeA) nodeA.start() defer nodeA.stop() rootURL := nodeA.rootURL() @@ -1110,7 +1110,7 @@ func TestServiceNoBlockForRound(t *testing.T) { ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID") nodeA := basicRPCNode{} - nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) + ls.RegisterHandlers(&nodeA) nodeA.start() defer nodeA.stop() rootURL := nodeA.rootURL() diff --git a/catchup/universalFetcher_test.go b/catchup/universalFetcher_test.go index 59c5d69b7a..164360d43d 100644 --- a/catchup/universalFetcher_test.go +++ b/catchup/universalFetcher_test.go @@ -101,7 +101,7 @@ func TestUGetBlockHTTP(t *testing.T) { ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, ledger, net, "test genesisID") nodeA := basicRPCNode{} - nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) + ls.RegisterHandlers(&nodeA) nodeA.start() defer nodeA.stop() rootURL := nodeA.rootURL() diff --git a/components/mocks/mockNetwork.go b/components/mocks/mockNetwork.go index 47b1a5b5e4..4f145b1841 100644 --- a/components/mocks/mockNetwork.go +++ b/components/mocks/mockNetwork.go @@ -103,6 +103,10 @@ func (network *MockNetwork) ClearProcessors() { func (network *MockNetwork) RegisterHTTPHandler(path string, handler http.Handler) { } +// RegisterHTTPHandlerFunc - empty implementation +func (network *MockNetwork) RegisterHTTPHandlerFunc(path string, handler func(http.ResponseWriter, *http.Request)) { +} + // OnNetworkAdvance - empty implementation func (network *MockNetwork) OnNetworkAdvance() {} diff --git a/network/gossipNode.go b/network/gossipNode.go index 1592641f70..8b108b5fde 100644 --- a/network/gossipNode.go +++ b/network/gossipNode.go @@ -57,8 +57,9 @@ type GossipNode interface { Disconnect(badnode DisconnectablePeer) DisconnectPeers() // only used by testing - // RegisterHTTPHandler path accepts gorilla/mux path annotations + // RegisterHTTPHandler and RegisterHTTPHandlerFunc: path accepts gorilla/mux path annotations RegisterHTTPHandler(path string, handler http.Handler) + RegisterHTTPHandlerFunc(path string, handler func(http.ResponseWriter, *http.Request)) // RequestConnectOutgoing asks the system to actually connect to peers. // `replace` optionally drops existing connections before making new ones. diff --git a/network/hybridNetwork.go b/network/hybridNetwork.go index d30e03cee2..c955dfff1a 100644 --- a/network/hybridNetwork.go +++ b/network/hybridNetwork.go @@ -146,6 +146,12 @@ func (n *HybridP2PNetwork) RegisterHTTPHandler(path string, handler http.Handler n.wsNetwork.RegisterHTTPHandler(path, handler) } +// RegisterHTTPHandlerFunc implements GossipNode +func (n *HybridP2PNetwork) RegisterHTTPHandlerFunc(path string, handlerFunc func(http.ResponseWriter, *http.Request)) { + n.p2pNetwork.RegisterHTTPHandlerFunc(path, handlerFunc) + n.wsNetwork.RegisterHTTPHandlerFunc(path, handlerFunc) +} + // RequestConnectOutgoing implements GossipNode func (n *HybridP2PNetwork) RequestConnectOutgoing(replace bool, quit <-chan struct{}) {} diff --git a/network/p2p/http.go b/network/p2p/http.go index 9f2622d015..07f27afff1 100644 --- a/network/p2p/http.go +++ b/network/p2p/http.go @@ -57,6 +57,14 @@ func (s *HTTPServer) RegisterHTTPHandler(path string, handler http.Handler) { }) } +// RegisterHTTPHandlerFunc registers a http handler with a given path. +func (s *HTTPServer) RegisterHTTPHandlerFunc(path string, handler func(http.ResponseWriter, *http.Request)) { + s.p2phttpMux.HandleFunc(path, handler) + s.p2phttpMuxRegistrarOnce.Do(func() { + s.Host.SetHTTPHandlerAtPath(algorandP2pHTTPProtocol, "/", s.p2phttpMux) + }) +} + // MakeHTTPClient creates a http.Client that uses libp2p transport for a given protocol and peer address. func MakeHTTPClient(addrInfo *peer.AddrInfo) (*http.Client, error) { clientStreamHost, err := libp2p.New(libp2p.NoListenAddrs) diff --git a/network/p2pNetwork.go b/network/p2pNetwork.go index 9417b641ae..4357970388 100644 --- a/network/p2pNetwork.go +++ b/network/p2pNetwork.go @@ -577,6 +577,12 @@ func (n *P2PNetwork) RegisterHTTPHandler(path string, handler http.Handler) { n.httpServer.RegisterHTTPHandler(path, handler) } +// RegisterHTTPHandlerFunc is like RegisterHTTPHandler but accepts +// a callback handler function instead of a method receiver. +func (n *P2PNetwork) RegisterHTTPHandlerFunc(path string, handler func(http.ResponseWriter, *http.Request)) { + n.httpServer.RegisterHTTPHandlerFunc(path, handler) +} + // RequestConnectOutgoing asks the system to actually connect to peers. // `replace` optionally drops existing connections before making new ones. // `quit` chan allows cancellation. diff --git a/network/wsNetwork.go b/network/wsNetwork.go index 1c0f3e8676..fc42296652 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -523,6 +523,11 @@ func (wn *WebsocketNetwork) RegisterHTTPHandler(path string, handler http.Handle wn.router.Handle(path, handler) } +// RegisterHTTPHandlerFunc path accepts gorilla/mux path annotations +func (wn *WebsocketNetwork) RegisterHTTPHandlerFunc(path string, handler func(http.ResponseWriter, *http.Request)) { + wn.router.HandleFunc(path, handler) +} + // RequestConnectOutgoing tries to actually do the connect to new peers. // `replace` drop all connections first and find new peers. func (wn *WebsocketNetwork) RequestConnectOutgoing(replace bool, quit <-chan struct{}) { diff --git a/rpcs/blockService.go b/rpcs/blockService.go index 1a9893b70a..410376a20f 100644 --- a/rpcs/blockService.go +++ b/rpcs/blockService.go @@ -58,7 +58,7 @@ const blockServerCatchupRequestBufferSize = 10 const BlockResponseLatestRoundHeader = "X-Latest-Round" // BlockServiceBlockPath is the path to register BlockService as a handler for when using gorilla/mux -// e.g. .Handle(BlockServiceBlockPath, &ls) +// e.g. .HandleFunc(BlockServiceBlockPath, ls.ServeBlockPath) const BlockServiceBlockPath = "/v{version:[0-9.]+}/{genesisID}/block/{round:[0-9a-z]+}" // Constant strings used as keys for topics @@ -147,11 +147,16 @@ func MakeBlockService(log logging.Logger, config config.Local, ledger LedgerForB memoryCap: config.BlockServiceMemCap, } if service.enableService { - net.RegisterHTTPHandler(BlockServiceBlockPath, service) + service.RegisterHandlers(net) } return service } +// RegisterHandlers registers the request handlers for BlockService's paths with the registrar. +func (bs *BlockService) RegisterHandlers(registrar Registrar) { + registrar.RegisterHTTPHandlerFunc(BlockServiceBlockPath, bs.ServeBlockPath) +} + // Start listening to catchup requests over ws func (bs *BlockService) Start() { bs.mu.Lock() @@ -179,10 +184,10 @@ func (bs *BlockService) Stop() { bs.closeWaitGroup.Wait() } -// ServerHTTP returns blocks +// ServeBlockPath returns blocks // Either /v{version}/{genesisID}/block/{round} or ?b={round}&v={version} // Uses gorilla/mux for path argument parsing. -func (bs *BlockService) ServeHTTP(response http.ResponseWriter, request *http.Request) { +func (bs *BlockService) ServeBlockPath(response http.ResponseWriter, request *http.Request) { pathVars := mux.Vars(request) versionStr, hasVersionStr := pathVars["version"] roundStr, hasRoundStr := pathVars["round"] @@ -260,13 +265,13 @@ func (bs *BlockService) ServeHTTP(response http.ResponseWriter, request *http.Re if !ok { response.Header().Set("Retry-After", blockResponseRetryAfter) response.WriteHeader(http.StatusServiceUnavailable) - bs.log.Debugf("ServeHTTP: returned retry-after: %v", err) + bs.log.Debugf("ServeBlockPath: returned retry-after: %v", err) } httpBlockMessagesDroppedCounter.Inc(nil) return default: // unexpected error. - bs.log.Warnf("ServeHTTP : failed to retrieve block %d %v", round, err) + bs.log.Warnf("ServeBlockPath: failed to retrieve block %d %v", round, err) response.WriteHeader(http.StatusInternalServerError) return } diff --git a/rpcs/blockService_test.go b/rpcs/blockService_test.go index e637796adf..83cfd94ef9 100644 --- a/rpcs/blockService_test.go +++ b/rpcs/blockService_test.go @@ -166,8 +166,8 @@ func TestRedirectFallbackEndpoints(t *testing.T) { bs1 := MakeBlockService(log, config, ledger1, net1, "test-genesis-ID") bs2 := MakeBlockService(log, config, ledger2, net2, "test-genesis-ID") - nodeA.RegisterHTTPHandler(BlockServiceBlockPath, bs1) - nodeB.RegisterHTTPHandler(BlockServiceBlockPath, bs2) + bs1.RegisterHandlers(nodeA) + bs2.RegisterHandlers(nodeB) parsedURL, err := addr.ParseHostOrURL(nodeA.rootURL()) require.NoError(t, err) @@ -210,7 +210,7 @@ func TestBlockServiceShutdown(t *testing.T) { nodeA := &basicRPCNode{} - nodeA.RegisterHTTPHandler(BlockServiceBlockPath, bs1) + bs1.RegisterHandlers(nodeA) nodeA.start() defer nodeA.stop() @@ -292,9 +292,8 @@ func TestRedirectOnFullCapacity(t *testing.T) { bs1.memoryCap = 250 bs2.memoryCap = 250 - nodeA.RegisterHTTPHandler(BlockServiceBlockPath, bs1) - - nodeB.RegisterHTTPHandler(BlockServiceBlockPath, bs2) + bs1.RegisterHandlers(nodeA) + bs2.RegisterHandlers(nodeB) parsedURL, err := addr.ParseHostOrURL(nodeA.rootURL()) require.NoError(t, err) @@ -371,11 +370,11 @@ forloop: // First node redirects, does not return retry require.True(t, strings.Contains(logBuffer1.String(), "redirectRequest: redirected block request to")) - require.False(t, strings.Contains(logBuffer1.String(), "ServeHTTP: returned retry-after: block service memory over capacity")) + require.False(t, strings.Contains(logBuffer1.String(), "ServeBlockPath: returned retry-after: block service memory over capacity")) // Second node cannot redirect, it returns retry-after when over capacity require.False(t, strings.Contains(logBuffer2.String(), "redirectRequest: redirected block request to")) - require.True(t, strings.Contains(logBuffer2.String(), "ServeHTTP: returned retry-after: block service memory over capacity")) + require.True(t, strings.Contains(logBuffer2.String(), "ServeBlockPath: returned retry-after: block service memory over capacity")) } // TestWsBlockLimiting ensures that limits are applied correctly on the websocket side of the service @@ -474,8 +473,8 @@ func TestRedirectExceptions(t *testing.T) { bs1 := MakeBlockService(log1, configInvalidRedirects, ledger1, net1, "{genesisID}") bs2 := MakeBlockService(log2, configWithRedirectToSelf, ledger2, net2, "{genesisID}") - nodeA.RegisterHTTPHandler(BlockServiceBlockPath, bs1) - nodeB.RegisterHTTPHandler(BlockServiceBlockPath, bs2) + bs1.RegisterHandlers(nodeA) + bs2.RegisterHandlers(nodeB) parsedURL, err := addr.ParseHostOrURL(nodeA.rootURL()) require.NoError(t, err) diff --git a/rpcs/registrar.go b/rpcs/registrar.go index f488aebf26..f0122b552c 100644 --- a/rpcs/registrar.go +++ b/rpcs/registrar.go @@ -26,6 +26,8 @@ import ( type Registrar interface { // RegisterHTTPHandler path accepts gorilla/mux path annotations RegisterHTTPHandler(path string, handler http.Handler) + // RegisterHTTPHandlerFunc path accepts gorilla/mux path annotations and a HandlerFunc + RegisterHTTPHandlerFunc(path string, handler func(response http.ResponseWriter, request *http.Request)) // RegisterHandlers exposes global websocket handler registration RegisterHandlers(dispatch []network.TaggedMessageHandler) } diff --git a/rpcs/txService_test.go b/rpcs/txService_test.go index fcfae1044d..0b12d2b413 100644 --- a/rpcs/txService_test.go +++ b/rpcs/txService_test.go @@ -89,6 +89,13 @@ func (b *basicRPCNode) RegisterHTTPHandler(path string, handler http.Handler) { b.rmux.Handle(path, handler) } +func (b *basicRPCNode) RegisterHTTPHandlerFunc(path string, handler func(http.ResponseWriter, *http.Request)) { + if b.rmux == nil { + b.rmux = mux.NewRouter() + } + b.rmux.HandleFunc(path, handler) +} + func (b *basicRPCNode) RegisterHandlers(dispatch []network.TaggedMessageHandler) { } From a9c2b7e6ec8e867e21f357e3edbec08d69090a8e Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Mon, 29 Jul 2024 13:43:38 -0400 Subject: [PATCH 45/82] p2p: do not register closing peer (#6086) Co-authored-by: cce <51567+cce@users.noreply.github.com> --- catchup/universalFetcher_test.go | 15 +++++++-------- network/p2pNetwork.go | 6 ++++++ 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/catchup/universalFetcher_test.go b/catchup/universalFetcher_test.go index 164360d43d..bd7f25b77e 100644 --- a/catchup/universalFetcher_test.go +++ b/catchup/universalFetcher_test.go @@ -204,7 +204,7 @@ func TestRequestBlockBytesErrors(t *testing.T) { cancel() _, _, _, err = fetcher.fetchBlock(ctx, next, up) var wrfe errWsFetcherRequestFailed - require.True(t, errors.As(err, &wrfe), "unexpected err: %w", wrfe) + require.ErrorAs(t, err, &wrfe) require.Equal(t, "context canceled", err.(errWsFetcherRequestFailed).cause) ctx = context.Background() @@ -213,14 +213,14 @@ func TestRequestBlockBytesErrors(t *testing.T) { up = makeTestUnicastPeerWithResponseOverride(net, t, &responseOverride) _, _, _, err = fetcher.fetchBlock(ctx, next, up) - require.True(t, errors.As(err, &wrfe)) + require.ErrorAs(t, err, &wrfe) require.Equal(t, "Cert data not found", err.(errWsFetcherRequestFailed).cause) responseOverride = network.Response{Topics: network.Topics{network.MakeTopic(rpcs.CertDataKey, make([]byte, 0))}} up = makeTestUnicastPeerWithResponseOverride(net, t, &responseOverride) _, _, _, err = fetcher.fetchBlock(ctx, next, up) - require.True(t, errors.As(err, &wrfe)) + require.ErrorAs(t, err, &wrfe) require.Equal(t, "Block data not found", err.(errWsFetcherRequestFailed).cause) } @@ -240,7 +240,6 @@ func (thh *TestHTTPHandler) ServeHTTP(response http.ResponseWriter, request *htt bytes = make([]byte, fetcherMaxBlockBytes+1) } response.Write(bytes) - return } // TestGetBlockBytesHTTPErrors tests the errors reported from getblockBytes for http peer @@ -264,25 +263,25 @@ func TestGetBlockBytesHTTPErrors(t *testing.T) { ls.status = http.StatusBadRequest _, _, _, err := fetcher.fetchBlock(context.Background(), 1, net.GetPeers()[0]) var hre errHTTPResponse - require.True(t, errors.As(err, &hre)) + require.ErrorAs(t, err, &hre) require.Equal(t, "Response body '\x00'", err.(errHTTPResponse).cause) ls.exceedLimit = true _, _, _, err = fetcher.fetchBlock(context.Background(), 1, net.GetPeers()[0]) - require.True(t, errors.As(err, &hre)) + require.ErrorAs(t, err, &hre) require.Equal(t, "read limit exceeded", err.(errHTTPResponse).cause) ls.status = http.StatusOK ls.content = append(ls.content, "undefined") _, _, _, err = fetcher.fetchBlock(context.Background(), 1, net.GetPeers()[0]) var cte errHTTPResponseContentType - require.True(t, errors.As(err, &cte)) + require.ErrorAs(t, err, &cte) require.Equal(t, "undefined", err.(errHTTPResponseContentType).contentType) ls.status = http.StatusOK ls.content = append(ls.content, "undefined2") _, _, _, err = fetcher.fetchBlock(context.Background(), 1, net.GetPeers()[0]) - require.True(t, errors.As(err, &cte)) + require.ErrorAs(t, err, &cte) require.Equal(t, 2, err.(errHTTPResponseContentType).contentTypeCount) } diff --git a/network/p2pNetwork.go b/network/p2pNetwork.go index 4357970388..a186426b15 100644 --- a/network/p2pNetwork.go +++ b/network/p2pNetwork.go @@ -808,6 +808,12 @@ func (n *P2PNetwork) wsStreamHandler(ctx context.Context, p2pPeer peer.ID, strea wsp.init(n.config, outgoingMessagesBufferSize) n.wsPeersLock.Lock() + if wsp.didSignalClose.Load() == 1 { + networkPeerAlreadyClosed.Inc(nil) + n.log.Debugf("peer closing %s", addr) + n.wsPeersLock.Unlock() + return + } n.wsPeers[p2pPeer] = wsp n.wsPeersToIDs[wsp] = p2pPeer n.wsPeersLock.Unlock() From e697ae8903475e7fc0dcef59e57ef0ebb8b5360b Mon Sep 17 00:00:00 2001 From: John Lee Date: Mon, 29 Jul 2024 16:56:09 -0400 Subject: [PATCH 46/82] CICD: fix broken ci-build target (#6087) --- scripts/release/mule/Makefile.mule | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/release/mule/Makefile.mule b/scripts/release/mule/Makefile.mule index 06fe5d09b7..6bc0cdb65e 100644 --- a/scripts/release/mule/Makefile.mule +++ b/scripts/release/mule/Makefile.mule @@ -34,7 +34,7 @@ ci-build-universal: ci-clean universal mkdir -p $(PKG_DIR_UNIVERSAL)/data && \ cp installer/genesis/devnet/genesis.json $(PKG_DIR_UNIVERSAL)/data -ci-build: ci-clean +ci-build: ci-clean build mkdir -p $(PKG_DIR) CHANNEL=$(CHANNEL) PKG_ROOT=$(PKG_DIR) NO_BUILD=True VARIATIONS=$(OS_TYPE)-$(ARCH) \ scripts/build_packages.sh $(OS_TYPE)/$(ARCH) && \ From 578684e28266a0ea47c356c811fd689c51070bf3 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Mon, 29 Jul 2024 16:56:43 -0400 Subject: [PATCH 47/82] p2p: support EnableGossipService in p2p streams (#6073) --- network/p2p/p2p.go | 2 +- network/p2p/streams.go | 58 +++++++--------- network/p2pNetwork_test.go | 138 +++++++++++++++++++++++++++++++++++++ 3 files changed, 163 insertions(+), 35 deletions(-) diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index 2c64b63eab..21782dce44 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -159,7 +159,7 @@ func configureResourceManager(cfg config.Local) (network.ResourceManager, error) // MakeService creates a P2P service instance func MakeService(ctx context.Context, log logging.Logger, cfg config.Local, h host.Host, listenAddr string, wsStreamHandler StreamHandler, bootstrapPeers []*peer.AddrInfo) (*serviceImpl, error) { - sm := makeStreamManager(ctx, log, h, wsStreamHandler) + sm := makeStreamManager(ctx, log, h, wsStreamHandler, cfg.EnableGossipService) h.Network().Notify(sm) h.SetStreamHandler(AlgorandWsProtocol, sm.streamHandler) diff --git a/network/p2p/streams.go b/network/p2p/streams.go index e7277f4871..0b7838ffdc 100644 --- a/network/p2p/streams.go +++ b/network/p2p/streams.go @@ -30,10 +30,11 @@ import ( // streamManager implements network.Notifiee to create and manage streams for use with non-gossipsub protocols. type streamManager struct { - ctx context.Context - log logging.Logger - host host.Host - handler StreamHandler + ctx context.Context + log logging.Logger + host host.Host + handler StreamHandler + allowIncomingGossip bool streams map[peer.ID]network.Stream streamsLock deadlock.Mutex @@ -42,18 +43,25 @@ type streamManager struct { // StreamHandler is called when a new bidirectional stream for a given protocol and peer is opened. type StreamHandler func(ctx context.Context, pid peer.ID, s network.Stream, incoming bool) -func makeStreamManager(ctx context.Context, log logging.Logger, h host.Host, handler StreamHandler) *streamManager { +func makeStreamManager(ctx context.Context, log logging.Logger, h host.Host, handler StreamHandler, allowIncomingGossip bool) *streamManager { return &streamManager{ - ctx: ctx, - log: log, - host: h, - handler: handler, - streams: make(map[peer.ID]network.Stream), + ctx: ctx, + log: log, + host: h, + handler: handler, + allowIncomingGossip: allowIncomingGossip, + streams: make(map[peer.ID]network.Stream), } } // streamHandler is called by libp2p when a new stream is accepted func (n *streamManager) streamHandler(stream network.Stream) { + if stream.Conn().Stat().Direction == network.DirInbound && !n.allowIncomingGossip { + n.log.Debugf("rejecting stream from incoming connection from %s", stream.Conn().RemotePeer().String()) + stream.Close() + return + } + n.streamsLock.Lock() defer n.streamsLock.Unlock() @@ -74,15 +82,7 @@ func (n *streamManager) streamHandler(stream network.Stream) { } n.streams[stream.Conn().RemotePeer()] = stream - // streamHandler is supposed to be called for accepted streams, so we expect incoming here incoming := stream.Conn().Stat().Direction == network.DirInbound - if !incoming { - if stream.Stat().Direction == network.DirUnknown { - n.log.Warnf("Unknown direction for a steam %s to/from %s", stream.ID(), remotePeer) - } else { - n.log.Warnf("Unexpected outgoing stream in streamHandler for connection %s (%s): %s vs %s stream", stream.Conn().ID(), remotePeer, stream.Conn().Stat().Direction, stream.Stat().Direction.String()) - } - } n.handler(n.ctx, remotePeer, stream, incoming) return } @@ -92,20 +92,18 @@ func (n *streamManager) streamHandler(stream network.Stream) { } // no old stream n.streams[stream.Conn().RemotePeer()] = stream - // streamHandler is supposed to be called for accepted streams, so we expect incoming here incoming := stream.Conn().Stat().Direction == network.DirInbound - if !incoming { - if stream.Stat().Direction == network.DirUnknown { - n.log.Warnf("streamHandler: unknown direction for a steam %s to/from %s", stream.ID(), remotePeer) - } else { - n.log.Warnf("Unexpected outgoing stream in streamHandler for connection %s (%s): %s vs %s stream", stream.Conn().ID(), remotePeer, stream.Conn().Stat().Direction, stream.Stat().Direction.String()) - } - } n.handler(n.ctx, remotePeer, stream, incoming) } // Connected is called when a connection is opened +// for both incoming (listener -> addConn) and outgoing (dialer -> addConn) connections. func (n *streamManager) Connected(net network.Network, conn network.Conn) { + if conn.Stat().Direction == network.DirInbound && !n.allowIncomingGossip { + n.log.Debugf("ignoring incoming connection from %s", conn.RemotePeer().String()) + return + } + remotePeer := conn.RemotePeer() localPeer := n.host.ID() @@ -138,15 +136,7 @@ func (n *streamManager) Connected(net network.Network, conn network.Conn) { needUnlock = false n.streamsLock.Unlock() - // a new stream created above, expected direction is outbound incoming := stream.Conn().Stat().Direction == network.DirInbound - if incoming { - n.log.Warnf("Unexpected incoming stream in streamHandler for connection %s (%s): %s vs %s stream", stream.Conn().ID(), remotePeer, stream.Conn().Stat().Direction, stream.Stat().Direction.String()) - } else { - if stream.Stat().Direction == network.DirUnknown { - n.log.Warnf("Connected: unknown direction for a steam %s to/from %s", stream.ID(), remotePeer) - } - } n.handler(n.ctx, remotePeer, stream, incoming) } diff --git a/network/p2pNetwork_test.go b/network/p2pNetwork_test.go index 9fc00774fb..f6eea2ab69 100644 --- a/network/p2pNetwork_test.go +++ b/network/p2pNetwork_test.go @@ -1207,3 +1207,141 @@ func TestP2PwsStreamHandlerDedup(t *testing.T) { require.False(t, netA.hasPeers()) require.False(t, netB.hasPeers()) } + +// TestP2PEnableGossipService_NodeDisable ensures that a node with EnableGossipService=false +// still can participate in the network by sending and receiving messages. +func TestP2PEnableGossipService_NodeDisable(t *testing.T) { + partitiontest.PartitionTest(t) + + log := logging.TestingLog(t) + + // prepare configs + cfg := config.GetDefaultLocal() + cfg.DNSBootstrapID = "" // disable DNS lookups since the test uses phonebook addresses + + relayCfg := cfg + relayCfg.NetAddress = "127.0.0.1:0" + + nodeCfg := cfg + nodeCfg.EnableGossipService = false + nodeCfg2 := nodeCfg + nodeCfg2.NetAddress = "127.0.0.1:0" + + tests := []struct { + name string + relayCfg config.Local + nodeCfg config.Local + }{ + {"non-listening-node", relayCfg, nodeCfg}, + {"listening-node", relayCfg, nodeCfg2}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + relayCfg := test.relayCfg + netA, err := NewP2PNetwork(log, relayCfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + require.NoError(t, err) + netA.Start() + defer netA.Stop() + + peerInfoA := netA.service.AddrInfo() + addrsA, err := peer.AddrInfoToP2pAddrs(&peerInfoA) + require.NoError(t, err) + require.NotZero(t, addrsA[0]) + multiAddrStr := addrsA[0].String() + phoneBookAddresses := []string{multiAddrStr} + + // start netB with gossip service disabled + nodeCfg := test.nodeCfg + netB, err := NewP2PNetwork(log, nodeCfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + require.NoError(t, err) + netB.Start() + defer netB.Stop() + + require.Eventually(t, func() bool { + return netA.hasPeers() && netB.hasPeers() + }, 1*time.Second, 50*time.Millisecond) + + testTag := protocol.AgreementVoteTag + + var handlerCountA atomic.Uint32 + passThroughHandlerA := []TaggedMessageHandler{ + {Tag: testTag, MessageHandler: HandlerFunc(func(msg IncomingMessage) OutgoingMessage { + handlerCountA.Add(1) + return OutgoingMessage{Action: Broadcast} + })}, + } + var handlerCountB atomic.Uint32 + passThroughHandlerB := []TaggedMessageHandler{ + {Tag: testTag, MessageHandler: HandlerFunc(func(msg IncomingMessage) OutgoingMessage { + handlerCountB.Add(1) + return OutgoingMessage{Action: Broadcast} + })}, + } + netA.RegisterHandlers(passThroughHandlerA) + netB.RegisterHandlers(passThroughHandlerB) + + // send messages from both nodes to each other and confirm they are received. + for i := 0; i < 10; i++ { + err = netA.Broadcast(context.Background(), testTag, []byte(fmt.Sprintf("hello from A %d", i)), false, nil) + require.NoError(t, err) + err = netB.Broadcast(context.Background(), testTag, []byte(fmt.Sprintf("hello from B %d", i)), false, nil) + require.NoError(t, err) + } + + require.Eventually( + t, + func() bool { + return handlerCountA.Load() == 10 && handlerCountB.Load() == 10 + }, + 2*time.Second, + 50*time.Millisecond, + ) + }) + } +} + +// TestP2PEnableGossipService_BothDisable checks if both relay and node have EnableGossipService=false +// they do not gossip to each other. +// +// Note, this test checks a configuration where node A (relay) does not know about node B, +// and node B is configured to connect to A, and this scenario rejecting logic is guaranteed to work. +func TestP2PEnableGossipService_BothDisable(t *testing.T) { + partitiontest.PartitionTest(t) + + log := logging.TestingLog(t) + + // prepare configs + cfg := config.GetDefaultLocal() + cfg.DNSBootstrapID = "" // disable DNS lookups since the test uses phonebook addresses + cfg.EnableGossipService = false // disable gossip service by default + + relayCfg := cfg + relayCfg.NetAddress = "127.0.0.1:0" + + netA, err := NewP2PNetwork(log.With("net", "netA"), relayCfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + require.NoError(t, err) + netA.Start() + defer netA.Stop() + + peerInfoA := netA.service.AddrInfo() + addrsA, err := peer.AddrInfoToP2pAddrs(&peerInfoA) + require.NoError(t, err) + require.NotZero(t, addrsA[0]) + multiAddrStr := addrsA[0].String() + phoneBookAddresses := []string{multiAddrStr} + + nodeCfg := cfg + nodeCfg.NetAddress = "" + + netB, err := NewP2PNetwork(log.With("net", "netB"), nodeCfg, "", phoneBookAddresses, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + require.NoError(t, err) + netB.Start() + defer netB.Stop() + + require.Eventually(t, func() bool { + return len(netA.service.Conns()) > 0 && len(netB.service.Conns()) > 0 + }, 1*time.Second, 50*time.Millisecond) + + require.False(t, netA.hasPeers()) + require.False(t, netB.hasPeers()) +} From 8eca278ffef1eee17a89e2d3d49ad9373e10bbac Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Wed, 31 Jul 2024 10:26:20 -0400 Subject: [PATCH 48/82] tests: debug logging for TestVotersReloadFromDiskAfterOneStateProofCommitted (#6088) --- ledger/acctupdates_test.go | 14 +++++++++++++- ledger/blockqueue_test.go | 1 + ledger/ledger.go | 7 +++++-- ledger/ledger_test.go | 1 + ledger/tracker.go | 9 +++++++++ 5 files changed, 29 insertions(+), 3 deletions(-) diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go index 283fbcdc2e..a27a2be795 100644 --- a/ledger/acctupdates_test.go +++ b/ledger/acctupdates_test.go @@ -130,7 +130,18 @@ func makeMockLedgerForTrackerWithLogger(t testing.TB, inMemory bool, initialBloc Totals: totals, } } - return &mockLedgerForTracker{dbs: dbs, log: l, filename: fileName, inMemory: inMemory, blocks: blocks, deltas: deltas, consensusParams: config.Consensus[consensusVersion], consensusVersion: consensusVersion, accts: accts[0]} + ml := &mockLedgerForTracker{ + dbs: dbs, + log: l, + filename: fileName, + inMemory: inMemory, + blocks: blocks, + deltas: deltas, consensusParams: config.Consensus[consensusVersion], + consensusVersion: consensusVersion, + accts: accts[0], + trackers: trackerRegistry{log: l}, + } + return ml } @@ -160,6 +171,7 @@ func (ml *mockLedgerForTracker) fork(t testing.TB) *mockLedgerForTracker { filename: fn, consensusParams: ml.consensusParams, consensusVersion: ml.consensusVersion, + trackers: trackerRegistry{log: dblogger}, } for k, v := range ml.accts { newLedgerTracker.accts[k] = v diff --git a/ledger/blockqueue_test.go b/ledger/blockqueue_test.go index e74fbc0b3b..e72523be71 100644 --- a/ledger/blockqueue_test.go +++ b/ledger/blockqueue_test.go @@ -207,6 +207,7 @@ func TestBlockQueueSyncerDeletion(t *testing.T) { l := &Ledger{ log: log, blockDBs: blockDBs, + trackers: trackerRegistry{log: log}, } if test.tracker != nil { l.trackers.trackers = append(l.trackers.trackers, test.tracker) diff --git a/ledger/ledger.go b/ledger/ledger.go index 7459c23037..2f10724fee 100644 --- a/ledger/ledger.go +++ b/ledger/ledger.go @@ -214,8 +214,11 @@ func (l *Ledger) reloadLedger() error { blockListeners := make([]ledgercore.BlockListener, 0, len(l.notifier.listeners)) blockListeners = append(blockListeners, l.notifier.listeners...) - // close the trackers. - l.trackers.close() + // close the trackers if the registry was already initialized: opening a new ledger calls reloadLedger + // and there is nothing to close. Registry's logger is not initialized yet so close cannot log. + if l.trackers.trackers != nil { + l.trackers.close() + } // init block queue var err error diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index c97040f42c..f1ee8898c9 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -3415,6 +3415,7 @@ func TestLedgerRetainMinOffCatchpointInterval(t *testing.T) { l := &Ledger{} l.cfg = cfg l.archival = cfg.Archival + l.trackers.log = logging.TestingLog(t) for i := 1; i <= blocksToMake; i++ { minBlockToKeep := l.notifyCommit(basics.Round(i)) diff --git a/ledger/tracker.go b/ledger/tracker.go index 97098a572f..96e42e949f 100644 --- a/ledger/tracker.go +++ b/ledger/tracker.go @@ -467,6 +467,7 @@ func (tr *trackerRegistry) scheduleCommit(blockqRound, maxLookback basics.Round) // Dropping this dcc allows the blockqueue syncer to continue persisting other blocks // and ledger reads to proceed without being blocked by trackerMu lock. tr.accountsWriting.Done() + tr.log.Debugf("trackerRegistry.scheduleCommit: deferredCommits channel is full, skipping commit for (%d-%d)", dcc.oldBase, dcc.oldBase+basics.Round(dcc.offset)) } } } @@ -491,22 +492,27 @@ func (tr *trackerRegistry) isBehindCommittingDeltas(latest basics.Round) bool { } func (tr *trackerRegistry) close() { + tr.log.Debugf("trackerRegistry is closing") if tr.ctxCancel != nil { tr.ctxCancel() } // close() is called from reloadLedger() when and trackerRegistry is not initialized yet if tr.commitSyncerClosed != nil { + tr.log.Debugf("trackerRegistry is waiting for accounts writing to complete") tr.waitAccountsWriting() // this would block until the commitSyncerClosed channel get closed. <-tr.commitSyncerClosed + tr.log.Debugf("trackerRegistry done waiting for accounts writing") } + tr.log.Debugf("trackerRegistry is closing trackers") for _, lt := range tr.trackers { lt.close() } tr.trackers = nil tr.accts = nil + tr.log.Debugf("trackerRegistry has closed") } // commitSyncer is the syncer go-routine function which perform the database updates. Internally, it dequeues deferredCommits and @@ -525,11 +531,13 @@ func (tr *trackerRegistry) commitSyncer(deferredCommits chan *deferredCommitCont } case <-tr.ctx.Done(): // drain the pending commits queue: + tr.log.Debugf("commitSyncer is closing, draining the pending commits queue") drained := false for !drained { select { case <-deferredCommits: tr.accountsWriting.Done() + tr.log.Debugf("commitSyncer drained a pending commit") default: drained = true } @@ -648,6 +656,7 @@ func (tr *trackerRegistry) commitRound(dcc *deferredCommitContext) error { lt.postCommitUnlocked(tr.ctx, dcc) } + tr.log.Debugf("commitRound completed for (%d-%d)", dbRound, dbRound+basics.Round(offset)) return nil } From 05a52e26685f7dd94fd7ec852cb08589aed8b8d5 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Wed, 31 Jul 2024 14:29:19 -0400 Subject: [PATCH 49/82] tests: fix flushing in TestVotersReloadFromDiskAfterOneStateProofCommitted (#6090) --- ledger/ledger_test.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index f1ee8898c9..c4bb74fcb5 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -2952,6 +2952,19 @@ func testVotersReloadFromDiskAfterOneStateProofCommitted(t *testing.T, cfg confi <-l.trackers.commitSyncerClosed l.trackers.commitSyncerClosed = nil + // it is possible a commmit was scheduled while commitSyncer was closing so that there is one pending task + // that required to be done before before the ledger can be closed, so drain the queue +outer: + for { + select { + case <-l.trackers.deferredCommits: + log.Info("drained deferred commit") + l.trackers.accountsWriting.Done() + default: + break outer + } + } + // flush one final time triggerTrackerFlush(t, l) From 94f1355bdabd34744bb30942fa9158d09ff742b4 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Thu, 1 Aug 2024 10:46:23 -0400 Subject: [PATCH 50/82] netgoal: allow unknown template tokens (#6091) --- cmd/netgoal/network.go | 6 ++++-- netdeploy/remote/deployedNetwork.go | 20 ++++++++++++-------- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/cmd/netgoal/network.go b/cmd/netgoal/network.go index d4ed3c277c..fef3850868 100644 --- a/cmd/netgoal/network.go +++ b/cmd/netgoal/network.go @@ -35,6 +35,7 @@ var networkRecipeFile string var networkName string var networkGenesisVersionModifier string var miscStringStringTokens []string +var ignoreUnknownTokens bool var cpuprofilePath string @@ -56,7 +57,8 @@ func init() { networkBuildCmd.Flags().BoolVarP(&networkUseGenesisFiles, "use-existing-files", "e", false, "Use existing genesis files.") networkBuildCmd.Flags().BoolVarP(&bootstrapLoadingFile, "gen-db-files", "b", false, "Generate database files.") networkBuildCmd.Flags().BoolVarP(&networkIgnoreExistingDir, "force", "f", false, "Force generation into existing directory.") - networkBuildCmd.Flags().StringSliceVarP(&miscStringStringTokens, "val", "v", nil, "name=value, may be reapeated") + networkBuildCmd.Flags().StringSliceVarP(&miscStringStringTokens, "val", "v", nil, "name=value, may be repeated") + networkBuildCmd.Flags().BoolVarP(&ignoreUnknownTokens, "ignore", "i", false, "Ignore unknown tokens in network template file") networkBuildCmd.Flags().StringVar(&cpuprofilePath, "cpuprofile", "", "write cpu profile to path") rootCmd.PersistentFlags().StringVarP(&networkGenesisVersionModifier, "modifier", "m", "", "Override Genesis Version Modifier (eg 'v1')") @@ -136,7 +138,7 @@ func runBuildNetwork() error { return fmt.Errorf("error resolving network template file '%s' to full path: %v", networkTemplateFile, err) } - netCfg, err := remote.InitDeployedNetworkConfig(networkTemplateFile, buildConfig) + netCfg, err := remote.InitDeployedNetworkConfig(networkTemplateFile, buildConfig, ignoreUnknownTokens) if err != nil { return fmt.Errorf("error loading Network Config file '%s': %v", networkTemplateFile, err) } diff --git a/netdeploy/remote/deployedNetwork.go b/netdeploy/remote/deployedNetwork.go index ce72071ff0..26f25a0da6 100644 --- a/netdeploy/remote/deployedNetwork.go +++ b/netdeploy/remote/deployedNetwork.go @@ -19,6 +19,7 @@ package remote import ( "encoding/binary" "encoding/json" + "errors" "fmt" "io/fs" "math/rand" @@ -58,14 +59,14 @@ var ErrDeployedNetworkInsufficientHosts = fmt.Errorf("target network requires mo // ErrDeployedNetworkNameCantIncludeWildcard is returned by Validate if network name contains '*' var ErrDeployedNetworkNameCantIncludeWildcard = fmt.Errorf("network name cannont include wild-cards") -// ErrDeployedNetworkTemplate A template file contained {{Field}} sections that were not handled by a corresponding Field value in configuration. -type ErrDeployedNetworkTemplate struct { - UnhandledTemplate string +// deployedNetworkTemplateError A template file contained {{Field}} sections that were not handled by a corresponding Field value in configuration. +type deployedNetworkTemplateError struct { + unhandledTemplate string } // Error satisfies error interface -func (ednt ErrDeployedNetworkTemplate) Error() string { - return fmt.Sprintf("config file contains unrecognized token: %s", ednt.UnhandledTemplate) +func (dnte deployedNetworkTemplateError) Error() string { + return fmt.Sprintf("config file contains unrecognized token: %s", dnte.unhandledTemplate) } // DeployedNetworkConfig represents the complete configuration specification for a deployed network @@ -123,10 +124,13 @@ int 1 ` // InitDeployedNetworkConfig loads the DeployedNetworkConfig from a file -func InitDeployedNetworkConfig(file string, buildConfig BuildConfig) (cfg DeployedNetworkConfig, err error) { +func InitDeployedNetworkConfig(file string, buildConfig BuildConfig, ignoreUnkTokens bool) (cfg DeployedNetworkConfig, err error) { processedFile, err := loadAndProcessConfig(file, buildConfig) if err != nil { - return + var dnte deployedNetworkTemplateError + if !errors.As(err, &dnte) || !ignoreUnkTokens { + return + } } err = json.Unmarshal([]byte(processedFile), &cfg) @@ -178,7 +182,7 @@ func replaceTokens(original string, buildConfig BuildConfig) (expanded string, e if closeIndex < 0 { closeIndex = len(expanded) - 2 } - return "", ErrDeployedNetworkTemplate{expanded[openIndex : closeIndex+2]} + return expanded, deployedNetworkTemplateError{expanded[openIndex : closeIndex+2]} } return From d2c4ca75c267e56a3b3d47a4cd97e4a75eb1fa7b Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Thu, 1 Aug 2024 13:01:44 -0400 Subject: [PATCH 51/82] cmd: Add goal node subcommand to generate peer private key (#6078) --- cmd/goal/node.go | 1 + cmd/goal/p2pid.go | 72 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+) create mode 100644 cmd/goal/p2pid.go diff --git a/cmd/goal/node.go b/cmd/goal/node.go index 2db08fd4e5..8bb103115b 100644 --- a/cmd/goal/node.go +++ b/cmd/goal/node.go @@ -79,6 +79,7 @@ func init() { nodeCmd.AddCommand(catchupCmd) // Once the server-side implementation of the shutdown command is ready, we should enable this one. //nodeCmd.AddCommand(shutdownCmd) + nodeCmd.AddCommand(p2pID) startCmd.Flags().StringVarP(&peerDial, "peer", "p", "", "Peer address to dial for initial connection") startCmd.Flags().StringVarP(&listenIP, "listen", "l", "", "Endpoint / REST address to listen on") diff --git a/cmd/goal/p2pid.go b/cmd/goal/p2pid.go new file mode 100644 index 0000000000..b35a946432 --- /dev/null +++ b/cmd/goal/p2pid.go @@ -0,0 +1,72 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +// generate a new p2p private key and print out peerID to stdout + +package main + +import ( + "fmt" + "os" + "path" + + "github.com/algorand/go-algorand/cmd/util/datadir" + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/network/p2p" + "github.com/algorand/go-algorand/util" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/spf13/cobra" +) + +var p2pID = &cobra.Command{ + Use: "generate-p2pid", + Short: "Generate a new p2p private key", + Long: "Generate a new p2p private key (saved to " + p2p.DefaultPrivKeyPath + ") and print out peerID to stdout", + Args: validateNoPosArgsFn, + Run: func(cmd *cobra.Command, args []string) { + anyError := false + datadir.OnDataDirs(func(dataDir string) { + exist := false + privKeyPath := path.Join(dataDir, p2p.DefaultPrivKeyPath) + if util.FileExists(privKeyPath) { + exist = true + } + + peerKey, err := p2p.GetPrivKey(config.Local{P2PPersistPeerID: true}, dataDir) + if err != nil { + fmt.Fprintf(os.Stderr, "Error obtaining private key: %v\n", err) + anyError = true + return + } + peerID, err := peer.IDFromPublicKey(peerKey.GetPublic()) + if err != nil { + fmt.Fprintf(os.Stderr, "Error obtaining peerID from a key: %v\n", err) + anyError = true + return + } + + fmt.Printf("PeerID: %s\n", peerID.String()) + if !exist { + fmt.Printf("Private key saved to %s\n", privKeyPath) + } else { + fmt.Printf("Used existing key %s\n", privKeyPath) + } + }) + if anyError { + os.Exit(1) + } + }, +} From a0815c1435d522a880f0c28210df323e7d611bff Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Wed, 7 Aug 2024 14:33:40 -0400 Subject: [PATCH 52/82] tests: fixes to e2e test logs parsers (#6093) --- test/heapwatch/agreement-log.py | 22 ++- test/heapwatch/topology-extract-ws-e2e.py | 181 ++++++++++++++++++++++ test/heapwatch/topology-extract-ws.py | 6 +- 3 files changed, 204 insertions(+), 5 deletions(-) create mode 100644 test/heapwatch/topology-extract-ws-e2e.py diff --git a/test/heapwatch/agreement-log.py b/test/heapwatch/agreement-log.py index 4109b37a71..873374806d 100644 --- a/test/heapwatch/agreement-log.py +++ b/test/heapwatch/agreement-log.py @@ -12,6 +12,7 @@ import json import logging import os +import re import time from termcolor import COLORS, colored @@ -80,6 +81,14 @@ def process_json_line(line: str, node_name: str, by_node: dict, events: list): return result return None +def node_name_from_line(line: str): + """Extracts node name from the line like "libgoalFixture.go:376: Relay0/node.log:""" + pattern = r'([^:]+?)/node\.log' + match = re.search(pattern, line) + if match: + return match.group(1).strip() + return None + def main(): os.environ['TZ'] = 'UTC' time.tzset() @@ -134,16 +143,21 @@ def main(): libgoalFixture.go:374: ===================... libgoalFixture.go:376: Relay0/node.log: libgoalFixture.go:379: {"file":"server.go"... + + OR without libgoalFixture prefix (depends on the test) + ================================= + Relay0/node.log: + {"file":"server.go","function":"gi... """ node_name = None if line0.endswith('node.log:'): - node_name = line0.split(' ')[1].split('/')[0] - logger.info('found node name: %s', node_name) + node_name = node_name_from_line(line0) + logger.info('found node name: \'%s\'', node_name) for line in file: line = line.strip() if line.endswith('node.log:'): - node_name = line.split(' ')[1].split('/')[0] - logger.info('found node name: %s', node_name) + node_name = node_name_from_line(line) + logger.info('found node name: \'%s\'', node_name) if node_name: for line in file: json_start = line.find('{') diff --git a/test/heapwatch/topology-extract-ws-e2e.py b/test/heapwatch/topology-extract-ws-e2e.py new file mode 100644 index 0000000000..63e3bf8a6c --- /dev/null +++ b/test/heapwatch/topology-extract-ws-e2e.py @@ -0,0 +1,181 @@ +""" +WSNet network topology extraction script from e2e test output single log file. + +1. Save the e2e test output to a file +It starts with a line like this: +libgoalFixture.go:374: ===================... +libgoalFixture.go:376: Relay0/node.log: +libgoalFixture.go:379: {"file":"server.go"... + +OR like this: +================================= +Relay0/node.log: +{"file":"server.go","function":"gi... + +2. Run this script `python3 topology-extract-ws-e2e.py -o top.json e2e-test.log +3. Run the visualizer `topology-viz.py top.json` +""" +import argparse +from datetime import datetime +import json +import logging +import re +import sys +from typing import Dict, List + +logger = logging.getLogger(__name__) + + +def node_name_from_line(line: str): + """Extracts node name from the line like "libgoalFixture.go:376: Relay0/node.log:""" + pattern = r'([^:]+?)/node\.log' + match = re.search(pattern, line) + if match: + return match.group(1).strip() + return None + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument('log_file', help='log file path') + ap.add_argument('-o', '--output', type=argparse.FileType('wt', encoding='utf-8'), help=f'save topology to the file specified instead of showing it') + ap.add_argument('-t', '--timestamp', action='store_true', help=f'store connection timestamp for each edge') + args = ap.parse_args() + + log_file = args.log_file + + nodes: List[str] = [] + edges: List[List[str]] = [] + mapping: Dict[str, str] = {} + + addr_to_name = {} + with open(log_file, 'rt', encoding='utf-8') as file: + line0 = None + while not line0: + line0 = file.readline() + line0 = line0.strip() + + node_name = None + if line0.endswith('node.log:'): + node_name = node_name_from_line(line0) + logger.info('found node name: \'%s\'', node_name) + for line in file: + line = line.strip() + if line.endswith('node.log:'): + node_name = node_name_from_line(line) + logger.info('found node name: \'%s\'', node_name) + if node_name: + nodes.append(node_name) + for line in file: + json_start = line.find('{') + if json_start == -1: + # end of continuous json block + node_name = None + break + line = line[json_start:] + + if "serving genesisID" in line: + data = json.loads(line.strip()) + match = re.search(r'(?:http://)?(\d+\.\d+\.\d+\.\d+:\d+)', data['msg']) + if match: + addr = match.group(1) + addr_to_name[addr] = node_name + + # Check if line contains relevant substrings before parsing as JSON + if "Accepted incoming connection from peer" in line or "Made outgoing connection to peer" in line: + data = json.loads(line.strip()) + + # Check for incoming connections + if "Accepted incoming connection from peer" in data.get("msg", ""): + remote = data['remote'] + match = re.search(r'(?:http://)?(\d+\.\d+\.\d+\.\d+:\d+)', remote) + remote_addr = match.group(1) + remote_name = remote_addr + if resolved := addr_to_name.get(remote_addr): + remote_name = resolved + source = remote_name + target = node_name + if args.timestamp: + # datetime is not serializable, so we store it as string for now + edge = (source, target, {'dt': data["time"]}) + else: + edge = (source, target) + + # Check for outgoing connections + elif "Made outgoing connection to peer" in data.get('msg', ""): + remote_addr = data['remote'] + remote_name = remote_addr + if resolved := addr_to_name.get(remote_addr): + remote_name = resolved + target = remote_name + source = node_name + + if args.timestamp: + # datetime is not serializable, so we store it as string for now + edge = (source, target, {'dt': data["time"]}) + else: + edge = (source, target) + + edges.append(edge) + + # apply names that were learned from the logs + for i, edge in enumerate(edges): + e0 = edge[0] + e0 = addr_to_name.get(e0, e0) + e1 = edge[1] + e1 = addr_to_name.get(e1, e1) + if len(edge) == 3: + edge = (e0, e1, edge[2]) + else: + edge = (e0, e1) + edges[i] = edge + + orig_nodes = set(nodes) + # remap non-canonical names (like poorNode) and non-resolved ip addresses to some nodes + for i, node in enumerate(nodes): + if not node.startswith(('N', 'R', 'NPN')): + nodes[i] = 'N-' + node + + # remove non-resolved ip addresses from edges - most likely these N, NPN already counted + # because both nodes and relays logs are processed + trimmed_edges = [] + for i, edge in enumerate(edges): + e0 = edge[0] + e1 = edge[1] + if e0 not in orig_nodes or e1 not in orig_nodes: + # some non-resolved ip address, skip + continue + + if not e0.startswith(('N', 'R', 'NPN')): + e0 = 'N-' + e0 + if not e1.startswith(('N', 'R', 'NPN')): + e1 = 'N-' + e1 + + if len(edge) == 3: + edge = (e0, e1, edge[2]) + else: + edge = (e0, e1) + trimmed_edges.append(edge) + + result = { + "mapping": mapping, + "nodes": nodes, + "edges": trimmed_edges + } + + if args.timestamp and not args.output: + edges = sorted(edges, key=lambda x: x[2]['dt']) + for edge in edges: + ts = datetime.strptime(edge[2]['dt'], "%Y-%m-%dT%H:%M:%S.%f%z") + print('%15s %5s -> %-5s' % (ts.strftime('%H:%M:%S.%f'), edge[0], edge[1])) + return + + if args.output: + json.dump(result, args.output, indent=2) + else: + json.dump(result, sys.stdout, indent=2) + print(file=sys.stdout) + + +if __name__ == '__main__': + logging.basicConfig(level=logging.INFO) + main() diff --git a/test/heapwatch/topology-extract-ws.py b/test/heapwatch/topology-extract-ws.py index 75f1d99f57..0b0765b007 100644 --- a/test/heapwatch/topology-extract-ws.py +++ b/test/heapwatch/topology-extract-ws.py @@ -68,7 +68,11 @@ def main(): remote_name = ip_to_name[remote_ip] source = remote_name target = mapped - edges.append((source, target)) + if args.timestamp: + # datetime is not serializable, so we store it as string for now + edge = (source, target, {'dt': data["time"]}) + else: + edge = (source, target) # Check for outgoing connections elif "Made outgoing connection to peer" in data.get('msg', ""): From 9924574bd7c48e559e5ffd2060df11801a7920ea Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Wed, 7 Aug 2024 16:11:33 -0400 Subject: [PATCH 53/82] tests: fix e2e subs goal-partkey-commands (#6095) --- test/scripts/e2e.sh | 11 +++- .../scripts/e2e_subs/goal-partkey-commands.sh | 56 +++++++++++++++---- 2 files changed, 54 insertions(+), 13 deletions(-) diff --git a/test/scripts/e2e.sh b/test/scripts/e2e.sh index a3b2c756fa..6f6e4a1f03 100755 --- a/test/scripts/e2e.sh +++ b/test/scripts/e2e.sh @@ -181,7 +181,16 @@ if [ -z "$E2E_TEST_FILTER" ] || [ "$E2E_TEST_FILTER" == "SCRIPTS" ]; then clientrunner="${TEMPDIR}/ve/bin/python3 e2e_client_runner.py ${RUN_KMD_WITH_UNSAFE_SCRYPT}" - $clientrunner ${KEEP_TEMPS_CMD_STR} "$SRCROOT"/test/scripts/e2e_subs/*.{sh,py} + if [ -n "$TESTFILTER" ]; then + echo "Running test: $TESTFILTER" + $clientrunner ${KEEP_TEMPS_CMD_STR} "$SRCROOT"/test/scripts/e2e_subs/${TESTFILTER} + echo -n "deactivating..." + deactivate + echo "done" + exit + else + $clientrunner ${KEEP_TEMPS_CMD_STR} "$SRCROOT"/test/scripts/e2e_subs/*.{sh,py} + fi # If the temporary artifact directory exists, then the test artifact needs to be created if [ -d "${TEMPDIR}/net" ]; then diff --git a/test/scripts/e2e_subs/goal-partkey-commands.sh b/test/scripts/e2e_subs/goal-partkey-commands.sh index 94c831c86e..dd60d44016 100755 --- a/test/scripts/e2e_subs/goal-partkey-commands.sh +++ b/test/scripts/e2e_subs/goal-partkey-commands.sh @@ -6,9 +6,14 @@ set -x date "+$0 start %Y%m%d_%H%M%S" +WALLET=$1 + +gcmd="goal -w ${WALLET}" +INITIAL_ACCOUNT=$(${gcmd} account list|awk '{ print $3 }') + # Registered Account ParticipationID Last Used First round Last round # yes LFMT...RHJQ 4UPT6AQC... 4 0 3000000 -OUTPUT=$(goal account listpartkeys) +OUTPUT=$(${gcmd} account listpartkeys) # In case there are multiple keys, make sure we are checking the correct one. OUTPUT=$(echo "$OUTPUT"|grep "yes.*3000"|tr -s ' ') if [[ "$OUTPUT" != yes* ]]; then echo "Registered should be 'yes' but wasn't."; exit 1; fi @@ -29,7 +34,7 @@ if [[ $(echo "$OUTPUT" | cut -d' ' -f 6) != 3000 ]]; then echo "Last round sh #Key dilution: 10000 #Selection key: esIsBJB86P+sLeqO3gVoLBGfpuwYlWN4lNzz2AYslTo= #Voting key: W1OcXLZsaATyOd5FbhRgXHmcywvn++xEVUAQ0NejmW4= -OUTPUT=$(goal account partkeyinfo) +OUTPUT=$(${gcmd} account partkeyinfo) if ! echo "$OUTPUT" | grep -q 'First round:[[:space:]]* 0'; then echo "First round should have been 0."; exit 1; fi if ! echo "$OUTPUT" | grep -q 'Last round:[[:space:]]* 3000'; then echo "Last round should have been 3000."; exit 1; fi if ! echo "$OUTPUT" | grep -q 'Effective last round:[[:space:]]* 3000'; then echo "Effective last round should have been 3000."; exit 1; fi @@ -39,18 +44,31 @@ if ! echo "$OUTPUT" | grep -q 'Participation ID:[[:space:]]*[[:alnum:]]\{52\}'; # Test multiple data directory supported NUM_OUTPUT_1=$(echo "$OUTPUT"|grep -c 'Participation ID') -OUTPUT=$(goal account partkeyinfo -d "$ALGORAND_DATA" -d "$ALGORAND_DATA2") +OUTPUT=$(${gcmd} account partkeyinfo -d "$ALGORAND_DATA" -d "$ALGORAND_DATA2") NUM_OUTPUT_2=$(echo "$OUTPUT"|grep -c 'Participation ID') if (( "$NUM_OUTPUT_2" <= "$NUM_OUTPUT_1" )); then echo "Should have found more participation keys when checking both data directories."; exit 1; fi # get stderr from this one -OUTPUT=$(goal account listpartkeys -d "$ALGORAND_DATA" -d "$ALGORAND_DATA2" 2>&1) +OUTPUT=$(${gcmd} account listpartkeys -d "$ALGORAND_DATA" -d "$ALGORAND_DATA2" 2>&1) EXPECTED_ERR="Only one data directory can be specified for this command." if [[ "$OUTPUT" != "$EXPECTED_ERR" ]]; then echo -e "Unexpected output from multiple data directories with 'listpartkeys': \n$OUTPUT"; exit 1; fi +# done with manual error checking +set -exo pipefail + +fail_test () { + echo "test_fail: $1" + exit 1 +} + create_and_fund_account () { + set +x # disable command echoing to hide the account funding output local TEMP_ACCT=$(${gcmd} account new|awk '{ print $6 }') - ${gcmd} clerk send -f "$INITIAL_ACCOUNT" -t "$TEMP_ACCT" -a 1000000 > /dev/null + SEND_OUTOUT=$(${gcmd} clerk send -f "$INITIAL_ACCOUNT" -t "$TEMP_ACCT" -a 1000000 2>&1) + if [[ $SEND_OUTOUT == *"Couldn't broadcast tx"* ]]; then + fail_test "Failed to fund account: $SEND_OUTOUT" + fi + set -x echo "$TEMP_ACCT" } @@ -60,14 +78,16 @@ create_and_fund_account () { # $3 - error message verify_registered_state () { # look for participation ID anywhere in the partkeyinfo output - if ! goal account partkeyinfo | grep -q "$2"; then - fail_test "Key was not installed properly: $3" + PARTKEY_OUTPUT=$(${gcmd} account partkeyinfo) + if ! echo "$PARTKEY_OUTPUT" | grep -q "$2"; then + fail_test "Key $2 was not installed properly for cmd '$3':\n$PARTKEY_OUTPUT" fi # looking for yes/no, and the 8 character head of participation id in this line: # yes LFMT...RHJQ 4UPT6AQC... 4 0 3000 - if ! goal account listpartkeys | grep -q "$1.*$(echo "$2" | cut -c1-8)\.\.\."; then - fail_test "Unexpected key state: $3" + LISTKEY_OUTPUT=$(${gcmd} account listpartkeys) + if ! echo "$LISTKEY_OUTPUT" | grep -q "$1.*$(echo "$2" | cut -c1-8)"; then + fail_test "Unexpected key $2 state ($1) for cmd '$3':\n$LISTKEY_OUTPUT" fi } @@ -75,19 +95,31 @@ verify_registered_state () { # install manually generated participation keys (do not register) NEW_ACCOUNT_1=$(create_and_fund_account) algokey part generate --keyfile test_partkey --first 0 --last 3000 --parent "$NEW_ACCOUNT_1" -PARTICIPATION_ID_1=$(goal account installpartkey --delete-input --partkey test_partkey|awk '{ print $7 }') +OUTPUT=$(${gcmd} account installpartkey --delete-input --partkey test_partkey) +PARTICIPATION_ID_1=$(echo "$OUTPUT" |awk '{ print $7 }') verify_registered_state "no" "$PARTICIPATION_ID_1" "goal account installpartkey" # goal account addpartkey # generate and install participation keys (do not register) +# ============= Example output ============= +# Please stand by while generating keys. This might take a few minutes... +# Participation key generation successful +# Transaction id for status change transaction: U3SWNVPUODOUHHUM3W3QP3DTQPO6GLYVOB2A6UYBKAWKJP5T4GEQ +# Transaction U3SWNVPUODOUHHUM3W3QP3DTQPO6GLYVOB2A6UYBKAWKJP5T4GEQ still pending as of round 11 +# Transaction U3SWNVPUODOUHHUM3W3QP3DTQPO6GLYVOB2A6UYBKAWKJP5T4GEQ still pending as of round 12 +# Transaction U3SWNVPUODOUHHUM3W3QP3DTQPO6GLYVOB2A6UYBKAWKJP5T4GEQ committed in round 13 +# Participation key installed successfully, Participation ID: NTJG7MGXZ5SCLIJXW7T2VZGLTVY47QBFTQSOC7JMIF7LWXVMQOPQ +# Generated with goal v3.26.226519 NEW_ACCOUNT_2=$(create_and_fund_account) -PARTICIPATION_ID_2=$(goal account addpartkey -a "$NEW_ACCOUNT_2" --roundFirstValid 0 --roundLastValid 3000|awk '{ print $7 }') +OUTPUT=$(${gcmd} account addpartkey -a "$NEW_ACCOUNT_2" --roundFirstValid 0 --roundLastValid 3000) +PARTICIPATION_ID_2=$(echo "$OUTPUT" | grep "Participation ID" | awk '{ print $7 }') verify_registered_state "no" "$PARTICIPATION_ID_2" "goal account addpartkey" # goal account renewpartkeys # generate, install, and register NEW_ACCOUNT_3=$(create_and_fund_account) -PARTICIPATION_ID_3=$(${gcmd} account renewpartkey --roundLastValid 3000 -a "$NEW_ACCOUNT_3"|tail -n 1|awk '{ print $7 }') +OUTPUT=$(${gcmd} account renewpartkey --roundLastValid 3000 -a "$NEW_ACCOUNT_3") +PARTICIPATION_ID_3=$(echo "$OUTPUT" | grep "Participation ID" | awk '{ print $7 }') verify_registered_state "yes" "$PARTICIPATION_ID_3" "goal account renewpartkey" # goal account changeonlinstatus (--account) From c6a433b5035377246340710a91b295553bb6ac1f Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Wed, 7 Aug 2024 16:52:38 -0400 Subject: [PATCH 54/82] p2p: do not advertise private and non-routable addresses (#6092) --- network/hybridNetwork_test.go | 2 +- network/p2p/p2p.go | 94 +++++++++++++++++++- network/p2p/p2p_test.go | 160 ++++++++++++++++++++++++++++++++++ 3 files changed, 254 insertions(+), 2 deletions(-) diff --git a/network/hybridNetwork_test.go b/network/hybridNetwork_test.go index 7c76c1e38e..842bb10b15 100644 --- a/network/hybridNetwork_test.go +++ b/network/hybridNetwork_test.go @@ -64,7 +64,7 @@ func TestHybridNetwork_DuplicateConn(t *testing.T) { // make it net address and restart the node relayCfg.NetAddress = addr relayCfg.PublicAddress = addr - relayCfg.P2PNetAddress = ":0" + relayCfg.P2PNetAddress = "127.0.0.1:0" netA, err = NewHybridP2PNetwork(log.With("node", "netA"), relayCfg, p2pKeyDir, nil, genesisID, "net", &nopeNodeInfo{}) require.NoError(t, err) diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index 21782dce44..e908f148d8 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -20,6 +20,7 @@ import ( "context" "encoding/base32" "fmt" + "net" "runtime" "strings" "time" @@ -43,6 +44,7 @@ import ( "github.com/libp2p/go-libp2p/p2p/security/noise" "github.com/libp2p/go-libp2p/p2p/transport/tcp" "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" ) // SubNextCancellable is an abstraction for pubsub.Subscription @@ -108,18 +110,35 @@ func MakeHost(cfg config.Local, datadir string, pstore *pstore.PeerStore) (host. ua := fmt.Sprintf("algod/%d.%d (%s; commit=%s; %d) %s(%s)", version.Major, version.Minor, version.Channel, version.CommitHash, version.BuildNumber, runtime.GOOS, runtime.GOARCH) var listenAddr string + var needAddressFilter bool if cfg.NetAddress != "" { if parsedListenAddr, perr := netAddressToListenAddress(cfg.NetAddress); perr == nil { listenAddr = parsedListenAddr + + // check if the listen address is a specific address or a "all interfaces" address (0.0.0.0 or ::) + // in this case enable the address filter. + // this also means the address filter is not enabled for NetAddress set to + // a specific address including loopback and private addresses. + if manet.IsIPUnspecified(multiaddr.StringCast(listenAddr)) { + needAddressFilter = true + } + } else { + logging.Base().Warnf("failed to parse NetAddress %s: %v", cfg.NetAddress, perr) } } else { - // don't listen if NetAddress is not set. + logging.Base().Debug("p2p NetAddress is not set, not listening") listenAddr = "" } var enableMetrics = func(cfg *libp2p.Config) error { cfg.DisableMetrics = false; return nil } metrics.DefaultRegistry().Register(&metrics.PrometheusDefaultMetrics) + var addrFactory func(addrs []multiaddr.Multiaddr) []multiaddr.Multiaddr + if needAddressFilter { + logging.Base().Debug("private addresses filter is enabled") + addrFactory = addressFilter + } + rm, err := configureResourceManager(cfg) if err != nil { return nil, "", err @@ -135,6 +154,7 @@ func MakeHost(cfg config.Local, datadir string, pstore *pstore.PeerStore) (host. libp2p.Security(noise.ID, noise.New), enableMetrics, libp2p.ResourceManager(rm), + libp2p.AddrsFactory(addrFactory), ) return host, listenAddr, err } @@ -321,3 +341,75 @@ func formatPeerTelemetryInfoProtocolName(telemetryID string, telemetryInstance s base32.StdEncoding.EncodeToString([]byte(telemetryInstance)), ) } + +var private6 = parseCIDR([]string{ + "100::/64", + "2001:2::/48", + "2001:db8::/32", // multiaddr v0.13 has it +}) + +// parseCIDR converts string CIDRs to net.IPNet. +// function panics on errors so that it is only called during initialization. +func parseCIDR(cidrs []string) []*net.IPNet { + result := make([]*net.IPNet, 0, len(cidrs)) + var ipnet *net.IPNet + var err error + for _, cidr := range cidrs { + if _, ipnet, err = net.ParseCIDR(cidr); err != nil { + panic(err) + } + result = append(result, ipnet) + } + return result +} + +// addressFilter filters out private and unroutable addresses +func addressFilter(addrs []multiaddr.Multiaddr) []multiaddr.Multiaddr { + if logging.Base().IsLevelEnabled(logging.Debug) { + var b strings.Builder + for _, addr := range addrs { + b.WriteRune(' ') + b.WriteString(addr.String()) + b.WriteRune(' ') + } + logging.Base().Debugf("addressFilter input: %s", b.String()) + } + + res := make([]multiaddr.Multiaddr, 0, len(addrs)) + for _, addr := range addrs { + if manet.IsPublicAddr(addr) { + if _, err := addr.ValueForProtocol(multiaddr.P_IP4); err == nil { + // no rules for IPv4 at the moment, accept + res = append(res, addr) + continue + } + + isPrivate := false + a, err := addr.ValueForProtocol(multiaddr.P_IP6) + if err != nil { + logging.Base().Warnf("failed to get IPv6 addr from %s: %v", addr, err) + continue + } + addrIP := net.ParseIP(a) + for _, ipnet := range private6 { + if ipnet.Contains(addrIP) { + isPrivate = true + break + } + } + if !isPrivate { + res = append(res, addr) + } + } + } + if logging.Base().IsLevelEnabled(logging.Debug) { + var b strings.Builder + for _, addr := range res { + b.WriteRune(' ') + b.WriteString(addr.String()) + b.WriteRune(' ') + } + logging.Base().Debugf("addressFilter output: %s", b.String()) + } + return res +} diff --git a/network/p2p/p2p_test.go b/network/p2p/p2p_test.go index dab6aa5456..2da5782afc 100644 --- a/network/p2p/p2p_test.go +++ b/network/p2p/p2p_test.go @@ -19,14 +19,19 @@ package p2p import ( "context" "fmt" + "net" "testing" "github.com/libp2p/go-libp2p" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" + "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" "github.com/stretchr/testify/require" + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/network/p2p/peerstore" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -169,3 +174,158 @@ func TestP2PProtocolAsMeta(t *testing.T) { require.Equal(t, h1TID, tid) require.Equal(t, h1Inst, inst) } + +func TestP2PPrivateAddresses(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + privAddrList := []string{ + "/ip4/10.0.0.0/ipcidr/8", + "/ip4/100.64.0.0/ipcidr/10", + "/ip4/169.254.0.0/ipcidr/16", + "/ip4/172.16.0.0/ipcidr/12", + "/ip4/192.0.0.0/ipcidr/24", + "/ip4/192.0.2.0/ipcidr/24", + "/ip4/192.88.99.0/ipcidr/24", + "/ip4/192.168.0.0/ipcidr/16", + "/ip4/198.18.0.0/ipcidr/15", + "/ip4/198.51.100.0/ipcidr/24", + "/ip4/203.0.113.0/ipcidr/24", + "/ip4/224.0.0.0/ipcidr/4", + "/ip4/224.0.0.0/ipcidr/4", + "/ip4/233.252.0.0/ipcidr/4", + "/ip4/255.255.255.255/ipcidr/32", + "/ip6/fc00::/ipcidr/7", + "/ip6/fe80::/ipcidr/10", + } + + // these are handled by addrFilter explicitly as a custom filter + extra := []string{ + "/ip6/100::/ipcidr/64", + "/ip6/2001:2::/ipcidr/48", + "/ip6/2001:db8::/ipcidr/32", // multiaddr v0.13 has it + } + + for _, addr := range privAddrList { + ma := multiaddr.StringCast(addr) + require.False(t, manet.IsPublicAddr(ma), "public check failed on %s", addr) + require.Empty(t, addressFilter([]multiaddr.Multiaddr{ma}), "addrFilter failed on %s", addr) + } + + for _, addr := range extra { + ma := multiaddr.StringCast(addr) + require.Empty(t, addressFilter([]multiaddr.Multiaddr{ma}), "addrFilter failed on %s", addr) + } + + // ensure addrFilter allows normal addresses + valid := []string{ + "/ip4/3.4.5.6/tcp/1234", + "/ip6/200:11::/tcp/1234", + } + + for _, addr := range valid { + ma := multiaddr.StringCast(addr) + require.Equal(t, []multiaddr.Multiaddr{ma}, addressFilter([]multiaddr.Multiaddr{ma}), "addrFilter failed on %s", addr) + } +} + +func TestP2PMaNetIsIPUnspecified(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + unspecified := []string{ + ":0", + ":1234", + "0.0.0.0:2345", + "0.0.0.0:0", + } + for _, addr := range unspecified { + parsed, err := netAddressToListenAddress(addr) + require.NoError(t, err) + require.True(t, manet.IsIPUnspecified(multiaddr.StringCast(parsed)), "expected %s to be unspecified", addr) + } + + specified := []string{ + "127.0.0.1:0", + "127.0.0.1:1234", + "1.2.3.4:5678", + "1.2.3.4:0", + "192.168.0.111:0", + "10.0.0.1:101", + } + for _, addr := range specified { + parsed, err := netAddressToListenAddress(addr) + require.NoError(t, err) + require.False(t, manet.IsIPUnspecified(multiaddr.StringCast(parsed)), "expected %s to be specified", addr) + } + + // also make sure IsIPUnspecified supports IPv6 + unspecified6 := []string{ + "/ip6/::/tcp/1234", + } + for _, addr := range unspecified6 { + require.True(t, manet.IsIPUnspecified(multiaddr.StringCast(addr)), "expected %s to be unspecified", addr) + } +} + +// TestP2PMakeHostAddressFilter ensures that the host address filter is enabled only when the +// NetAddress is set to "all interfaces" value (0.0.0.0:P or :P) +func TestP2PMakeHostAddressFilter(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + td := t.TempDir() + pstore, err := peerstore.NewPeerStore(nil, "test") + require.NoError(t, err) + + // check "all interfaces" addr + for _, addr := range []string{":0", "0.0.0.0:0"} { + cfg := config.GetDefaultLocal() + cfg.NetAddress = addr + host, la, err := MakeHost(cfg, td, pstore) + require.NoError(t, err) + require.Equal(t, "/ip4/0.0.0.0/tcp/0", la) + require.Empty(t, host.Addrs()) + + mala, err := multiaddr.NewMultiaddr(la) + require.NoError(t, err) + host.Network().Listen(mala) + require.Empty(t, host.Addrs()) + host.Close() + } + + // check specific addresses IPv4 retrieved from the system + addresses := []string{} + ifaces, err := net.Interfaces() + require.NoError(t, err) + for _, i := range ifaces { + addrs, err := i.Addrs() + require.NoError(t, err) + for _, a := range addrs { + switch v := a.(type) { + case *net.IPAddr: + if v.IP.To4() != nil { + addresses = append(addresses, v.IP.String()) + } + case *net.IPNet: + if v.IP.To4() != nil { + addresses = append(addresses, v.IP.String()) + } + } + } + } + for _, addr := range addresses { + cfg := config.GetDefaultLocal() + cfg.NetAddress = addr + ":0" + host, la, err := MakeHost(cfg, td, pstore) + require.NoError(t, err) + require.Equal(t, "/ip4/"+addr+"/tcp/0", la) + require.Empty(t, host.Addrs()) + mala, err := multiaddr.NewMultiaddr(la) + require.NoError(t, err) + err = host.Network().Listen(mala) + require.NoError(t, err) + require.NotEmpty(t, host.Addrs()) + host.Close() + } +} From 602d9507643a42891d724682dcadc9630b7ef8e6 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Thu, 8 Aug 2024 13:30:10 -0400 Subject: [PATCH 55/82] p2p: handle txns in pubsub validator (#6070) Co-authored-by: cce <51567+cce@users.noreply.github.com> Co-authored-by: Jason Paulos --- components/mocks/mockNetwork.go | 4 +- data/transactions/verify/txnBatch.go | 54 +++++-- data/transactions/verify/txnBatch_test.go | 48 +++++- data/txHandler.go | 160 +++++++++++------- data/txHandler_test.go | 178 +++++++++++++++------ network/gossipNode.go | 45 ++---- network/hybridNetwork.go | 8 +- network/multiplexer.go | 42 ++--- network/p2p/pubsub.go | 1 + network/p2p/pubsubTracer.go | 98 ++++++++++++ network/p2pNetwork.go | 20 +-- network/p2pNetwork_test.go | 54 +++---- network/wsNetwork.go | 6 +- test/e2e-go/features/p2p/p2p_basic_test.go | 79 ++++++++- util/metrics/metrics.go | 11 ++ 15 files changed, 558 insertions(+), 250 deletions(-) create mode 100644 network/p2p/pubsubTracer.go diff --git a/components/mocks/mockNetwork.go b/components/mocks/mockNetwork.go index 4f145b1841..3a1795f057 100644 --- a/components/mocks/mockNetwork.go +++ b/components/mocks/mockNetwork.go @@ -91,8 +91,8 @@ func (network *MockNetwork) RegisterHandlers(dispatch []network.TaggedMessageHan func (network *MockNetwork) ClearHandlers() { } -// RegisterProcessors - empty implementation. -func (network *MockNetwork) RegisterProcessors(dispatch []network.TaggedMessageProcessor) { +// RegisterValidatorHandlers - empty implementation. +func (network *MockNetwork) RegisterValidatorHandlers(dispatch []network.TaggedMessageValidatorHandler) { } // ClearProcessors - empty implementation diff --git a/data/transactions/verify/txnBatch.go b/data/transactions/verify/txnBatch.go index 8619208da8..e2bd95d4ea 100644 --- a/data/transactions/verify/txnBatch.go +++ b/data/transactions/verify/txnBatch.go @@ -17,7 +17,7 @@ package verify import ( - "errors" + "fmt" "sync/atomic" "github.com/algorand/go-algorand/crypto" @@ -98,10 +98,16 @@ func (bl *batchLoad) addLoad(txngrp []transactions.SignedTxn, gctx *GroupContext } +// TxnGroupBatchSigVerifier provides Verify method to synchronously verify a group of transactions +// It starts a new block listener to receive latests block headers for the sig verification +type TxnGroupBatchSigVerifier struct { + cache VerifiedTransactionCache + nbw *NewBlockWatcher + ledger logic.LedgerForSignature +} + type txnSigBatchProcessor struct { - cache VerifiedTransactionCache - nbw *NewBlockWatcher - ledger logic.LedgerForSignature + TxnGroupBatchSigVerifier resultChan chan<- *VerificationResult droppedChan chan<- *UnverifiedTxnSigJob } @@ -142,27 +148,49 @@ func (tbp txnSigBatchProcessor) sendResult(veTxnGroup []transactions.SignedTxn, } } -// MakeSigVerifyJobProcessor returns the object implementing the stream verifier Helper interface -func MakeSigVerifyJobProcessor(ledger LedgerForStreamVerifier, cache VerifiedTransactionCache, - resultChan chan<- *VerificationResult, droppedChan chan<- *UnverifiedTxnSigJob) (svp execpool.BatchProcessor, err error) { +// MakeSigVerifier creats a new TxnGroupBatchSigVerifier for synchronous verification of transactions +func MakeSigVerifier(ledger LedgerForStreamVerifier, cache VerifiedTransactionCache) (TxnGroupBatchSigVerifier, error) { latest := ledger.Latest() latestHdr, err := ledger.BlockHdr(latest) if err != nil { - return nil, errors.New("MakeStreamVerifier: Could not get header for previous block") + return TxnGroupBatchSigVerifier{}, fmt.Errorf("MakeSigVerifier: Could not get header for previous block: %w", err) } nbw := MakeNewBlockWatcher(latestHdr) ledger.RegisterBlockListeners([]ledgercore.BlockListener{nbw}) + verifier := TxnGroupBatchSigVerifier{ + cache: cache, + nbw: nbw, + ledger: ledger, + } + + return verifier, nil +} + +// MakeSigVerifyJobProcessor returns the object implementing the stream verifier Helper interface +func MakeSigVerifyJobProcessor( + ledger LedgerForStreamVerifier, cache VerifiedTransactionCache, + resultChan chan<- *VerificationResult, droppedChan chan<- *UnverifiedTxnSigJob, +) (svp execpool.BatchProcessor, err error) { + sigVerifier, err := MakeSigVerifier(ledger, cache) + if err != nil { + return nil, err + } return &txnSigBatchProcessor{ - cache: cache, - nbw: nbw, - ledger: ledger, - droppedChan: droppedChan, - resultChan: resultChan, + TxnGroupBatchSigVerifier: sigVerifier, + droppedChan: droppedChan, + resultChan: resultChan, }, nil } +// Verify synchronously verifies the signatures of the transactions in the group +func (sv *TxnGroupBatchSigVerifier) Verify(stxs []transactions.SignedTxn) error { + blockHeader := sv.nbw.getBlockHeader() + _, err := txnGroup(stxs, blockHeader, sv.cache, sv.ledger, nil) + return err +} + func (tbp *txnSigBatchProcessor) ProcessBatch(txns []execpool.InputJob) { batchVerifier, ctx := tbp.preProcessUnverifiedTxns(txns) failed, err := batchVerifier.VerifyWithFeedback() diff --git a/data/transactions/verify/txnBatch_test.go b/data/transactions/verify/txnBatch_test.go index 45693f58a1..27dcc56343 100644 --- a/data/transactions/verify/txnBatch_test.go +++ b/data/transactions/verify/txnBatch_test.go @@ -139,9 +139,7 @@ func verifyResults(txnGroups [][]transactions.SignedTxn, badTxnGroups map[uint64 require.GreaterOrEqual(t, len(unverifiedGroups), badSigResultCounter) for _, txn := range unverifiedGroups { u, _ := binary.Uvarint(txn[0].Txn.Note) - if _, has := badTxnGroups[u]; has { - delete(badTxnGroups, u) - } + delete(badTxnGroups, u) } require.Empty(t, badTxnGroups, "unverifiedGroups should have all the transactions with invalid sigs") } @@ -301,6 +299,7 @@ func TestGetNumberOfBatchableSigsInGroup(t *testing.T) { txnGroups[mod][0].Sig = crypto.Signature{} batchSigs, err := UnverifiedTxnSigJob{TxnGroup: txnGroups[mod]}.GetNumberOfBatchableItems() require.ErrorIs(t, err, errTxnSigHasNoSig) + require.Equal(t, uint64(0), batchSigs) mod++ _, signedTxns, secrets, addrs := generateTestObjects(numOfTxns, 20, 0, 50) @@ -353,6 +352,7 @@ byte base64 5rZMNsevs5sULO+54aN+OvU6lQ503z2X+SSYUABIx7E= txnGroups[mod][0].Msig = mSigTxn[0].Msig batchSigs, err = UnverifiedTxnSigJob{TxnGroup: txnGroups[mod]}.GetNumberOfBatchableItems() require.ErrorIs(t, err, errTxnSigNotWellFormed) + require.Equal(t, uint64(0), batchSigs) } // TestStreamToBatchPoolShutdown tests what happens when the exec pool shuts down @@ -437,10 +437,11 @@ func TestStreamToBatchPoolShutdown(t *testing.T) { //nolint:paralleltest // Not // send txn groups to be verified go func() { defer wg.Done() + outer: for _, tg := range txnGroups { select { case <-ctx.Done(): - break + break outer case inputChan <- &UnverifiedTxnSigJob{TxnGroup: tg, BacklogMessage: nil}: } } @@ -493,6 +494,7 @@ func TestStreamToBatchRestart(t *testing.T) { // send txn groups to be verified go func() { defer wg.Done() + outer: for i, tg := range txnGroups { if (i+1)%10 == 0 { cancel() @@ -502,7 +504,7 @@ func TestStreamToBatchRestart(t *testing.T) { } select { case <-ctx2.Done(): - break + break outer case inputChan <- &UnverifiedTxnSigJob{TxnGroup: tg, BacklogMessage: nil}: } } @@ -798,7 +800,10 @@ func TestStreamToBatchPostVBlocked(t *testing.T) { func TestStreamToBatchMakeStreamToBatchErr(t *testing.T) { partitiontest.PartitionTest(t) - _, err := MakeSigVerifyJobProcessor(&DummyLedgerForSignature{badHdr: true}, nil, nil, nil) + _, err := MakeSigVerifier(&DummyLedgerForSignature{badHdr: true}, nil) + require.Error(t, err) + + _, err = MakeSigVerifyJobProcessor(&DummyLedgerForSignature{badHdr: true}, nil, nil, nil) require.Error(t, err) } @@ -863,7 +868,7 @@ func TestGetErredUnprocessed(t *testing.T) { droppedChan := make(chan *UnverifiedTxnSigJob, 1) svh := txnSigBatchProcessor{ - resultChan: make(chan<- *VerificationResult, 0), + resultChan: make(chan<- *VerificationResult), droppedChan: droppedChan, } @@ -871,3 +876,32 @@ func TestGetErredUnprocessed(t *testing.T) { dropped := <-droppedChan require.Equal(t, *dropped, UnverifiedTxnSigJob{}) } + +func TestSigVerifier(t *testing.T) { + partitiontest.PartitionTest(t) + + numOfTxns := 16 + txnGroups, badTxnGroups := getSignedTransactions(numOfTxns, numOfTxns, 0, 0) + require.GreaterOrEqual(t, len(txnGroups), 1) + require.Equal(t, len(badTxnGroups), 0) + txnGroup := txnGroups[0] + + verificationPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, t) + defer verificationPool.Shutdown() + + cache := MakeVerifiedTransactionCache(50000) + + verifier, err := MakeSigVerifier(&DummyLedgerForSignature{}, cache) + require.NoError(t, err) + + err = verifier.Verify(txnGroup) + require.NoError(t, err) + + txnGroups, badTxnGroups = getSignedTransactions(numOfTxns, numOfTxns, 0, 1) + require.GreaterOrEqual(t, len(txnGroups), 1) + require.Greater(t, len(badTxnGroups), 0) + txnGroup = txnGroups[0] + + err = verifier.Verify(txnGroup) + require.Error(t, err) +} diff --git a/data/txHandler.go b/data/txHandler.go index eae9586c47..7ee5764137 100644 --- a/data/txHandler.go +++ b/data/txHandler.go @@ -132,6 +132,9 @@ type TxHandler struct { erl *util.ElasticRateLimiter appLimiter *appRateLimiter appLimiterBacklogThreshold int + + // batchVerifier provides synchronous verification of transaction groups, used only by pubsub validation in validateIncomingTxMessage. + batchVerifier verify.TxnGroupBatchSigVerifier } // TxHandlerOpts is TxHandler configuration options @@ -209,6 +212,13 @@ func MakeTxHandler(opts TxHandlerOpts) (*TxHandler, error) { } } + // prepare the batch processor for pubsub synchronous verification + var err0 error + handler.batchVerifier, err0 = verify.MakeSigVerifier(handler.ledger, handler.ledger.VerifiedTransactionCache()) + if err0 != nil { + return nil, err0 + } + // prepare the transaction stream verifier var err error txnElementProcessor, err := verify.MakeSigVerifyJobProcessor(handler.ledger, handler.ledger.VerifiedTransactionCache(), @@ -246,16 +256,14 @@ func (handler *TxHandler) Start() { }) // libp2p pubsub validator and handler abstracted as TaggedMessageProcessor - handler.net.RegisterProcessors([]network.TaggedMessageProcessor{ + handler.net.RegisterValidatorHandlers([]network.TaggedMessageValidatorHandler{ { Tag: protocol.TxnTag, // create anonymous struct to hold the two functions and satisfy the network.MessageProcessor interface MessageHandler: struct { - network.ProcessorValidateFunc - network.ProcessorHandleFunc + network.ValidateHandleFunc }{ - network.ProcessorValidateFunc(handler.validateIncomingTxMessage), - network.ProcessorHandleFunc(handler.processIncomingTxMessage), + network.ValidateHandleFunc(handler.validateIncomingTxMessage), }, }, }) @@ -348,7 +356,7 @@ func (handler *TxHandler) backlogWorker() { } continue } - // handler.streamVerifierChan does not receive if ctx is cancled + // handler.streamVerifierChan does not receive if ctx is cancelled select { case handler.streamVerifierChan <- &verify.UnverifiedTxnSigJob{TxnGroup: wi.unverifiedTxGroup, BacklogMessage: wi}: case <-handler.ctx.Done(): @@ -550,7 +558,7 @@ func (handler *TxHandler) deleteFromCaches(msgKey *crypto.Digest, canonicalKey * // dedupCanonical checks if the transaction group has been seen before after reencoding to canonical representation. // returns a key used for insertion if the group was not found. -func (handler *TxHandler) dedupCanonical(unverifiedTxGroup []transactions.SignedTxn, consumed int) (key *crypto.Digest, isDup bool) { +func (handler *TxHandler) dedupCanonical(unverifiedTxGroup []transactions.SignedTxn, consumed int) (key *crypto.Digest, reencoded []byte, isDup bool) { // consider situations where someone want to censor transactions A // 1. Txn A is not part of a group => txn A with a valid signature is OK // Censorship attempts are: @@ -567,14 +575,16 @@ func (handler *TxHandler) dedupCanonical(unverifiedTxGroup []transactions.Signed // - using individual txn from a group: {A, Z} could be poisoned by {A, B}, where B is invalid var d crypto.Digest + var reencodedBuf []byte ntx := len(unverifiedTxGroup) if ntx == 1 { // a single transaction => cache/dedup canonical txn with its signature enc := unverifiedTxGroup[0].MarshalMsg(nil) d = crypto.Hash(enc) if handler.txCanonicalCache.CheckAndPut(&d) { - return nil, true + return nil, nil, true } + reencodedBuf = enc } else { // a transaction group => cache/dedup the entire group canonical group encodeBuf := make([]byte, 0, unverifiedTxGroup[0].Msgsize()*ntx) @@ -585,14 +595,15 @@ func (handler *TxHandler) dedupCanonical(unverifiedTxGroup []transactions.Signed // reallocated, some assumption on size was wrong // log and skip logging.Base().Warnf("Decoded size %d does not match to encoded %d", consumed, len(encodeBuf)) - return nil, false + return nil, nil, false } d = crypto.Hash(encodeBuf) if handler.txCanonicalCache.CheckAndPut(&d) { - return nil, true + return nil, nil, true } + reencodedBuf = encodeBuf } - return &d, false + return &d, reencodedBuf, false } // incomingMsgDupCheck runs the duplicate check on a raw incoming message. @@ -687,28 +698,32 @@ func decodeMsg(data []byte) (unverifiedTxGroup []transactions.SignedTxn, consume return unverifiedTxGroup, consumed, false } -// incomingTxGroupDupRateLimit checks -// - if the incoming transaction group has been seen before after reencoding to canonical representation, and -// - if the sender is rate limited by the per-application rate limiter. -func (handler *TxHandler) incomingTxGroupDupRateLimit(unverifiedTxGroup []transactions.SignedTxn, encodedExpectedSize int, sender network.DisconnectablePeer) (*crypto.Digest, bool) { +// incomingTxGroupCanonicalDedup checks if the incoming transaction group has been seen before after reencoding to canonical representation. +// It also return canonical representation of the transaction group allowing the caller to compare it with the input. +func (handler *TxHandler) incomingTxGroupCanonicalDedup(unverifiedTxGroup []transactions.SignedTxn, encodedExpectedSize int) (*crypto.Digest, []byte, bool) { var canonicalKey *crypto.Digest + var reencoded []byte if handler.txCanonicalCache != nil { var isDup bool - if canonicalKey, isDup = handler.dedupCanonical(unverifiedTxGroup, encodedExpectedSize); isDup { + if canonicalKey, reencoded, isDup = handler.dedupCanonical(unverifiedTxGroup, encodedExpectedSize); isDup { transactionMessagesDupCanonical.Inc(nil) - return canonicalKey, true + return nil, nil, true } } + return canonicalKey, reencoded, false +} +// incomingTxGroupAppRateLimit checks if the sender is rate limited by the per-application rate limiter. +func (handler *TxHandler) incomingTxGroupAppRateLimit(unverifiedTxGroup []transactions.SignedTxn, sender network.DisconnectablePeer) bool { // rate limit per application in a group. Limiting any app in a group drops the entire message. if handler.appLimiter != nil { congestedARL := len(handler.backlogQueue) > handler.appLimiterBacklogThreshold if congestedARL && handler.appLimiter.shouldDrop(unverifiedTxGroup, sender.(network.IPAddressable).RoutingAddr()) { transactionMessagesAppLimiterDrop.Inc(nil) - return canonicalKey, true + return true } } - return canonicalKey, false + return false } // processIncomingTxn decodes a transaction group from incoming message and enqueues into the back log for processing. @@ -744,13 +759,17 @@ func (handler *TxHandler) processIncomingTxn(rawmsg network.IncomingMessage) net return network.OutgoingMessage{Action: network.Disconnect} } - canonicalKey, drop := handler.incomingTxGroupDupRateLimit(unverifiedTxGroup, consumed, rawmsg.Sender) + canonicalKey, _, drop := handler.incomingTxGroupCanonicalDedup(unverifiedTxGroup, consumed) if drop { // this re-serialized txgroup was detected as a duplicate by the canonical message cache, // or it was rate-limited by the per-app rate limiter return network.OutgoingMessage{Action: network.Ignore} } + if handler.incomingTxGroupAppRateLimit(unverifiedTxGroup, rawmsg.Sender) { + return network.OutgoingMessage{Action: network.Ignore} + } + select { case handler.backlogQueue <- &txBacklogMsg{ rawmsg: &rawmsg, @@ -772,65 +791,84 @@ func (handler *TxHandler) processIncomingTxn(rawmsg network.IncomingMessage) net return network.OutgoingMessage{Action: network.Ignore} } -type validatedIncomingTxMessage struct { - rawmsg network.IncomingMessage - unverifiedTxGroup []transactions.SignedTxn - msgKey *crypto.Digest - canonicalKey *crypto.Digest -} - // validateIncomingTxMessage is the validator for the MessageProcessor implementation used by P2PNetwork. -func (handler *TxHandler) validateIncomingTxMessage(rawmsg network.IncomingMessage) network.ValidatedMessage { +func (handler *TxHandler) validateIncomingTxMessage(rawmsg network.IncomingMessage) network.OutgoingMessage { msgKey, isDup := handler.incomingMsgDupCheck(rawmsg.Data) if isDup { - return network.ValidatedMessage{Action: network.Ignore, ValidatedMessage: nil} + return network.OutgoingMessage{Action: network.Ignore} } unverifiedTxGroup, consumed, invalid := decodeMsg(rawmsg.Data) if invalid { // invalid encoding or exceeding txgroup, disconnect from this peer - return network.ValidatedMessage{Action: network.Disconnect, ValidatedMessage: nil} + return network.OutgoingMessage{Action: network.Disconnect} } - canonicalKey, drop := handler.incomingTxGroupDupRateLimit(unverifiedTxGroup, consumed, rawmsg.Sender) + canonicalKey, reencoded, drop := handler.incomingTxGroupCanonicalDedup(unverifiedTxGroup, consumed) if drop { - // this re-serialized txgroup was detected as a duplicate by the canonical message cache, - // or it was rate-limited by the per-app rate limiter - return network.ValidatedMessage{Action: network.Ignore, ValidatedMessage: nil} + return network.OutgoingMessage{Action: network.Ignore} } - return network.ValidatedMessage{ - Action: network.Accept, - Tag: rawmsg.Tag, - ValidatedMessage: &validatedIncomingTxMessage{ - rawmsg: rawmsg, - unverifiedTxGroup: unverifiedTxGroup, - msgKey: msgKey, - canonicalKey: canonicalKey, - }, + if handler.incomingTxGroupAppRateLimit(unverifiedTxGroup, rawmsg.Sender) { + return network.OutgoingMessage{Action: network.Ignore} } -} -// processIncomingTxMessage is the handler for the MessageProcessor implementation used by P2PNetwork. -func (handler *TxHandler) processIncomingTxMessage(validatedMessage network.ValidatedMessage) network.OutgoingMessage { - msg := validatedMessage.ValidatedMessage.(*validatedIncomingTxMessage) - select { - case handler.backlogQueue <- &txBacklogMsg{ - rawmsg: &msg.rawmsg, - unverifiedTxGroup: msg.unverifiedTxGroup, - rawmsgDataHash: msg.msgKey, - unverifiedTxGroupHash: msg.canonicalKey, + if reencoded == nil { + reencoded = reencode(unverifiedTxGroup) + } + + if !bytes.Equal(rawmsg.Data, reencoded) { + // reject non-canonically encoded messages + return network.OutgoingMessage{Action: network.Disconnect} + } + + // apply backlog worker logic + + wi := &txBacklogMsg{ + rawmsg: &rawmsg, + unverifiedTxGroup: unverifiedTxGroup, + rawmsgDataHash: msgKey, + unverifiedTxGroupHash: canonicalKey, capguard: nil, - }: - default: - // if we failed here we want to increase the corresponding metric. It might suggest that we - // want to increase the queue size. - transactionMessagesDroppedFromBacklog.Inc(nil) + } - // additionally, remove the txn from duplicate caches to ensure it can be re-submitted - handler.deleteFromCaches(msg.msgKey, msg.canonicalKey) + if handler.checkAlreadyCommitted(wi) { + transactionMessagesAlreadyCommitted.Inc(nil) + return network.OutgoingMessage{ + Action: network.Ignore, + } + } + + err := handler.batchVerifier.Verify(wi.unverifiedTxGroup) + if err != nil { + handler.postProcessReportErrors(err) + logging.Base().Warnf("Received a malformed tx group %v: %v", wi.unverifiedTxGroup, err) + return network.OutgoingMessage{ + Action: network.Disconnect, + } + } + verifiedTxGroup := wi.unverifiedTxGroup + + // save the transaction, if it has high enough fee and not already in the cache + err = handler.txPool.Remember(verifiedTxGroup) + if err != nil { + handler.rememberReportErrors(err) + logging.Base().Debugf("could not remember tx: %v", err) + return network.OutgoingMessage{ + Action: network.Ignore, + } + } + + transactionMessagesRemember.Inc(nil) + + // if we remembered without any error ( i.e. txpool wasn't full ), then we should pin these transactions. + err = handler.ledger.VerifiedTransactionCache().Pin(verifiedTxGroup) + if err != nil { + logging.Base().Infof("unable to pin transaction: %v", err) + } + return network.OutgoingMessage{ + Action: network.Accept, } - return network.OutgoingMessage{Action: network.Ignore} } var errBackLogFullLocal = errors.New("backlog full") diff --git a/data/txHandler_test.go b/data/txHandler_test.go index 23235f15fd..24e09b2963 100644 --- a/data/txHandler_test.go +++ b/data/txHandler_test.go @@ -646,42 +646,42 @@ func TestTxHandlerProcessIncomingGroup(t *testing.T) { } } +func craftNonCanonical(t *testing.T, stxn *transactions.SignedTxn, blobStxn []byte) []byte { + // make non-canonical encoding and ensure it is not accepted + stxnNonCanTxn := transactions.SignedTxn{Txn: stxn.Txn} + blobTxn := protocol.Encode(&stxnNonCanTxn) + stxnNonCanAuthAddr := transactions.SignedTxn{AuthAddr: stxn.AuthAddr} + blobAuthAddr := protocol.Encode(&stxnNonCanAuthAddr) + stxnNonCanAuthSig := transactions.SignedTxn{Sig: stxn.Sig} + blobSig := protocol.Encode(&stxnNonCanAuthSig) + + if blobStxn == nil { + blobStxn = protocol.Encode(stxn) + } + + // double check our skills for transactions.SignedTxn creation by creating a new canonical encoding and comparing to the original + blobValidation := make([]byte, 0, len(blobTxn)+len(blobAuthAddr)+len(blobSig)) + blobValidation = append(blobValidation[:], blobAuthAddr...) + blobValidation = append(blobValidation[:], blobSig[1:]...) // cut transactions.SignedTxn's field count + blobValidation = append(blobValidation[:], blobTxn[1:]...) // cut transactions.SignedTxn's field count + blobValidation[0] += 2 // increase field count + require.Equal(t, blobStxn, blobValidation) + + // craft non-canonical + blobNonCan := make([]byte, 0, len(blobTxn)+len(blobAuthAddr)+len(blobSig)) + blobNonCan = append(blobNonCan[:], blobTxn...) + blobNonCan = append(blobNonCan[:], blobAuthAddr[1:]...) // cut transactions.SignedTxn's field count + blobNonCan = append(blobNonCan[:], blobSig[1:]...) // cut transactions.SignedTxn's field count + blobNonCan[0] += 2 // increase field count + require.Len(t, blobNonCan, len(blobStxn)) + require.NotEqual(t, blobStxn, blobNonCan) + return blobNonCan +} + func TestTxHandlerProcessIncomingCensoring(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() - craftNonCanonical := func(t *testing.T, stxn *transactions.SignedTxn, blobStxn []byte) []byte { - // make non-canonical encoding and ensure it is not accepted - stxnNonCanTxn := transactions.SignedTxn{Txn: stxn.Txn} - blobTxn := protocol.Encode(&stxnNonCanTxn) - stxnNonCanAuthAddr := transactions.SignedTxn{AuthAddr: stxn.AuthAddr} - blobAuthAddr := protocol.Encode(&stxnNonCanAuthAddr) - stxnNonCanAuthSig := transactions.SignedTxn{Sig: stxn.Sig} - blobSig := protocol.Encode(&stxnNonCanAuthSig) - - if blobStxn == nil { - blobStxn = protocol.Encode(stxn) - } - - // double check our skills for transactions.SignedTxn creation by creating a new canonical encoding and comparing to the original - blobValidation := make([]byte, 0, len(blobTxn)+len(blobAuthAddr)+len(blobSig)) - blobValidation = append(blobValidation[:], blobAuthAddr...) - blobValidation = append(blobValidation[:], blobSig[1:]...) // cut transactions.SignedTxn's field count - blobValidation = append(blobValidation[:], blobTxn[1:]...) // cut transactions.SignedTxn's field count - blobValidation[0] += 2 // increase field count - require.Equal(t, blobStxn, blobValidation) - - // craft non-canonical - blobNonCan := make([]byte, 0, len(blobTxn)+len(blobAuthAddr)+len(blobSig)) - blobNonCan = append(blobNonCan[:], blobTxn...) - blobNonCan = append(blobNonCan[:], blobAuthAddr[1:]...) // cut transactions.SignedTxn's field count - blobNonCan = append(blobNonCan[:], blobSig[1:]...) // cut transactions.SignedTxn's field count - blobNonCan[0] += 2 // increase field count - require.Len(t, blobNonCan, len(blobStxn)) - require.NotEqual(t, blobStxn, blobNonCan) - return blobNonCan - } - forgeSig := func(t *testing.T, stxn *transactions.SignedTxn, blobStxn []byte) (transactions.SignedTxn, []byte) { stxnForged := *stxn crypto.RandBytes(stxnForged.Sig[:]) @@ -1012,6 +1012,29 @@ func TestTxHandlerProcessIncomingCacheBacklogDrop(t *testing.T) { require.Equal(t, initialValue+1, currentValue) } +func makeTxns(addresses []basics.Address, secrets []*crypto.SignatureSecrets, sendIdx, recvIdx int, gh crypto.Digest) ([]transactions.SignedTxn, []byte) { + note := make([]byte, 2) + crypto.RandBytes(note) + tx := transactions.Transaction{ + Type: protocol.PaymentTx, + Header: transactions.Header{ + Sender: addresses[sendIdx], + Fee: basics.MicroAlgos{Raw: proto.MinTxnFee * 2}, + FirstValid: 0, + LastValid: basics.Round(proto.MaxTxnLife), + Note: note, + GenesisHash: gh, + }, + PaymentTxnFields: transactions.PaymentTxnFields{ + Receiver: addresses[recvIdx], + Amount: basics.MicroAlgos{Raw: mockBalancesMinBalance + (rand.Uint64() % 10000)}, + }, + } + signedTx := tx.Sign(secrets[sendIdx]) + blob := protocol.Encode(&signedTx) + return []transactions.SignedTxn{signedTx}, blob +} + func TestTxHandlerProcessIncomingCacheTxPoolDrop(t *testing.T) { partitiontest.PartitionTest(t) @@ -1048,27 +1071,7 @@ loop: } } - makeTxns := func(sendIdx, recvIdx int) ([]transactions.SignedTxn, []byte) { - tx := transactions.Transaction{ - Type: protocol.PaymentTx, - Header: transactions.Header{ - Sender: addresses[sendIdx], - Fee: basics.MicroAlgos{Raw: proto.MinTxnFee * 2}, - FirstValid: 0, - LastValid: basics.Round(proto.MaxTxnLife), - Note: make([]byte, 2), - }, - PaymentTxnFields: transactions.PaymentTxnFields{ - Receiver: addresses[recvIdx], - Amount: basics.MicroAlgos{Raw: mockBalancesMinBalance + (rand.Uint64() % 10000)}, - }, - } - signedTx := tx.Sign(secrets[sendIdx]) - blob := protocol.Encode(&signedTx) - return []transactions.SignedTxn{signedTx}, blob - } - - stxns, blob := makeTxns(1, 2) + stxns, blob := makeTxns(addresses, secrets, 1, 2, genesisHash) action := handler.processIncomingTxn(network.IncomingMessage{Data: blob}) require.Equal(t, network.OutgoingMessage{Action: network.Ignore}, action) @@ -2750,3 +2753,72 @@ func TestTxHandlerCapGuard(t *testing.T) { require.Eventually(t, func() bool { return completed.Load() }, 1*time.Second, 10*time.Millisecond) } + +func TestTxHandlerValidateIncomingTxMessage(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + const numUsers = 10 + addresses, secrets, genesis := makeTestGenesisAccounts(t, numUsers) + genBal := bookkeeping.MakeGenesisBalances(genesis, sinkAddr, poolAddr) + + ledgerName := fmt.Sprintf("%s-mem", t.Name()) + const inMem = true + log := logging.TestingLog(t) + log.SetLevel(logging.Panic) + + cfg := config.GetDefaultLocal() + ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, cfg) + require.NoError(t, err) + defer ledger.Close() + + handler, err := makeTestTxHandler(ledger, cfg) + require.NoError(t, err) + + // valid message + _, blob := makeTxns(addresses, secrets, 1, 2, genesisHash) + outmsg := handler.validateIncomingTxMessage(network.IncomingMessage{Data: blob}) + require.Equal(t, outmsg.Action, network.Accept) + + // non-canonical message + // for some reason craftNonCanonical cannot handle makeTxns output so make a simpler random txn + stxns, blob := makeRandomTransactions(1) + stxn := stxns[0] + blobNonCan := craftNonCanonical(t, &stxn, blob) + outmsg = handler.validateIncomingTxMessage(network.IncomingMessage{Data: blobNonCan}) + require.Equal(t, outmsg.Action, network.Disconnect) + + // invalid signature + stxns, _ = makeTxns(addresses, secrets, 1, 2, genesisHash) + stxns[0].Sig[0] = stxns[0].Sig[0] + 1 + blob2 := protocol.Encode(&stxns[0]) + outmsg = handler.validateIncomingTxMessage(network.IncomingMessage{Data: blob2}) + require.Equal(t, outmsg.Action, network.Disconnect) + + // invalid message + _, blob = makeTxns(addresses, secrets, 1, 2, genesisHash) + blob[0] = blob[0] + 1 + outmsg = handler.validateIncomingTxMessage(network.IncomingMessage{Data: blob}) + require.Equal(t, outmsg.Action, network.Disconnect) + + t.Run("with-canonical", func(t *testing.T) { + // make sure the reencoding from the canonical dedup checker's reencoding buf is correctly reused + cfg.TxIncomingFilteringFlags = 2 + require.True(t, cfg.TxFilterCanonicalEnabled()) + handler, err := makeTestTxHandler(ledger, cfg) + require.NoError(t, err) + + // valid message + _, blob := makeTxns(addresses, secrets, 1, 2, genesisHash) + outmsg := handler.validateIncomingTxMessage(network.IncomingMessage{Data: blob}) + require.Equal(t, outmsg.Action, network.Accept) + + // non-canonical message + // for some reason craftNonCanonical cannot handle makeTxns output so make a simpler random txn + stxns, blob := makeRandomTransactions(1) + stxn := stxns[0] + blobNonCan := craftNonCanonical(t, &stxn, blob) + outmsg = handler.validateIncomingTxMessage(network.IncomingMessage{Data: blobNonCan}) + require.Equal(t, outmsg.Action, network.Disconnect) + }) +} diff --git a/network/gossipNode.go b/network/gossipNode.go index 8b108b5fde..86a7b42c55 100644 --- a/network/gossipNode.go +++ b/network/gossipNode.go @@ -81,8 +81,10 @@ type GossipNode interface { // ClearHandlers deregisters all the existing message handlers. ClearHandlers() - // RegisterProcessors adds to the set of given message processors. - RegisterProcessors(dispatch []TaggedMessageProcessor) + // RegisterValidatorHandlers adds to the set of given message validation handlers. + // A difference with regular handlers is validation ones perform synchronous validation. + // Currently used as p2p pubsub topic validators. + RegisterValidatorHandlers(dispatch []TaggedMessageValidatorHandler) // ClearProcessors deregisters all the existing message processors. ClearProcessors() @@ -157,14 +159,6 @@ type OutgoingMessage struct { OnRelease func() } -// ValidatedMessage is a message that has been validated and is ready to be processed. -// Think as an intermediate one between IncomingMessage and OutgoingMessage -type ValidatedMessage struct { - Action ForwardingPolicy - Tag Tag - ValidatedMessage interface{} -} - // ForwardingPolicy is an enum indicating to whom we should send a message // //msgp:ignore ForwardingPolicy @@ -203,28 +197,19 @@ func (f HandlerFunc) Handle(message IncomingMessage) OutgoingMessage { return f(message) } -// MessageProcessor takes a IncomingMessage (e.g., vote, transaction), processes it, and returns what (if anything) +// MessageValidatorHandler takes a IncomingMessage (e.g., vote, transaction), processes it, and returns what (if anything) // to send to the network in response. -// This is an extension of the MessageHandler that works in two stages: validate ->[result]-> handle. -type MessageProcessor interface { - Validate(message IncomingMessage) ValidatedMessage - Handle(message ValidatedMessage) OutgoingMessage +// it supposed to perform synchronous validation and return the result of the validation +// so that network knows immediately if the message should be be broadcasted or not. +type MessageValidatorHandler interface { + ValidateHandle(message IncomingMessage) OutgoingMessage } -// ProcessorValidateFunc represents an implementation of the MessageProcessor interface -type ProcessorValidateFunc func(message IncomingMessage) ValidatedMessage - -// ProcessorHandleFunc represents an implementation of the MessageProcessor interface -type ProcessorHandleFunc func(message ValidatedMessage) OutgoingMessage - -// Validate implements MessageProcessor.Validate, calling the validator with the IncomingMessage and returning the action -// and validation extra data that can be use as the handler input. -func (f ProcessorValidateFunc) Validate(message IncomingMessage) ValidatedMessage { - return f(message) -} +// ValidateHandleFunc represents an implementation of the MessageProcessor interface +type ValidateHandleFunc func(message IncomingMessage) OutgoingMessage -// Handle implements MessageProcessor.Handle calling the handler with the ValidatedMessage and returning the OutgoingMessage -func (f ProcessorHandleFunc) Handle(message ValidatedMessage) OutgoingMessage { +// ValidateHandle implements MessageValidatorHandler.ValidateHandle, calling the validator with the IncomingMessage and returning the action. +func (f ValidateHandleFunc) ValidateHandle(message IncomingMessage) OutgoingMessage { return f(message) } @@ -236,9 +221,9 @@ type taggedMessageDispatcher[T any] struct { // TaggedMessageHandler receives one type of broadcast messages type TaggedMessageHandler = taggedMessageDispatcher[MessageHandler] -// TaggedMessageProcessor receives one type of broadcast messages +// TaggedMessageValidatorHandler receives one type of broadcast messages // and performs two stage processing: validating and handling -type TaggedMessageProcessor = taggedMessageDispatcher[MessageProcessor] +type TaggedMessageValidatorHandler = taggedMessageDispatcher[MessageValidatorHandler] // Propagate is a convenience function to save typing in the common case of a message handler telling us to propagate an incoming message // "return network.Propagate(msg)" instead of "return network.OutgoingMsg{network.Broadcast, msg.Tag, msg.Data}" diff --git a/network/hybridNetwork.go b/network/hybridNetwork.go index c955dfff1a..7d5814ff39 100644 --- a/network/hybridNetwork.go +++ b/network/hybridNetwork.go @@ -192,10 +192,10 @@ func (n *HybridP2PNetwork) ClearHandlers() { n.wsNetwork.ClearHandlers() } -// RegisterProcessors adds to the set of given message processors. -func (n *HybridP2PNetwork) RegisterProcessors(dispatch []TaggedMessageProcessor) { - n.p2pNetwork.RegisterProcessors(dispatch) - n.wsNetwork.RegisterProcessors(dispatch) +// RegisterValidatorHandlers adds to the set of given message processors. +func (n *HybridP2PNetwork) RegisterValidatorHandlers(dispatch []TaggedMessageValidatorHandler) { + n.p2pNetwork.RegisterValidatorHandlers(dispatch) + n.wsNetwork.RegisterValidatorHandlers(dispatch) } // ClearProcessors deregisters all the existing message processors. diff --git a/network/multiplexer.go b/network/multiplexer.go index dc38fba277..ca40f9b0e4 100644 --- a/network/multiplexer.go +++ b/network/multiplexer.go @@ -24,15 +24,15 @@ import ( // Multiplexer is a message handler that sorts incoming messages by Tag and passes // them along to the relevant message handler for that type of message. type Multiplexer struct { - msgHandlers atomic.Value // stores map[Tag]MessageHandler, an immutable map. - msgProcessors atomic.Value // stores map[Tag]MessageProcessor, an immutable map. + msgHandlers atomic.Value // stores map[Tag]MessageHandler, an immutable map. + msgValidatorHandlers atomic.Value // stores map[Tag]MessageValidatorHandler, an immutable map. } // MakeMultiplexer creates an empty Multiplexer func MakeMultiplexer() *Multiplexer { m := &Multiplexer{} - m.ClearHandlers(nil) // allocate the map - m.ClearProcessors(nil) // allocate the map + m.ClearHandlers(nil) // allocate the map + m.ClearValidatorHandlers(nil) // allocate the map return m } @@ -60,9 +60,9 @@ func (m *Multiplexer) getHandler(tag Tag) (MessageHandler, bool) { return getHandler[MessageHandler](&m.msgHandlers, tag) } -// Retrieves the processor for the given message Tag from the processors array. -func (m *Multiplexer) getProcessor(tag Tag) (MessageProcessor, bool) { - return getHandler[MessageProcessor](&m.msgProcessors, tag) +// Retrieves the validating handler for the given message Tag from the validating handlers array. +func (m *Multiplexer) getValidatorHandler(tag Tag) (MessageValidatorHandler, bool) { + return getHandler[MessageValidatorHandler](&m.msgValidatorHandlers, tag) } // Handle is the "input" side of the multiplexer. It dispatches the message to the previously defined handler. @@ -73,18 +73,10 @@ func (m *Multiplexer) Handle(msg IncomingMessage) OutgoingMessage { return OutgoingMessage{} } -// Validate is an alternative "input" side of the multiplexer. It dispatches the message to the previously defined validator. -func (m *Multiplexer) Validate(msg IncomingMessage) ValidatedMessage { - if handler, ok := m.getProcessor(msg.Tag); ok { - return handler.Validate(msg) - } - return ValidatedMessage{} -} - -// Process is the second step of message handling after validation. It dispatches the message to the previously defined processor. -func (m *Multiplexer) Process(msg ValidatedMessage) OutgoingMessage { - if handler, ok := m.getProcessor(msg.Tag); ok { - return handler.Handle(msg) +// ValidateHandle is an alternative "input" side of the multiplexer. It dispatches the message to the previously defined validator. +func (m *Multiplexer) ValidateHandle(msg IncomingMessage) OutgoingMessage { + if handler, ok := m.getValidatorHandler(msg.Tag); ok { + return handler.ValidateHandle(msg) } return OutgoingMessage{} } @@ -110,9 +102,9 @@ func (m *Multiplexer) RegisterHandlers(dispatch []TaggedMessageHandler) { registerMultiplexer(&m.msgHandlers, dispatch) } -// RegisterProcessors registers the set of given message handlers. -func (m *Multiplexer) RegisterProcessors(dispatch []TaggedMessageProcessor) { - registerMultiplexer(&m.msgProcessors, dispatch) +// RegisterValidatorHandlers registers the set of given message handlers. +func (m *Multiplexer) RegisterValidatorHandlers(dispatch []TaggedMessageValidatorHandler) { + registerMultiplexer(&m.msgValidatorHandlers, dispatch) } func clearMultiplexer[T any](target *atomic.Value, excludeTags []Tag) { @@ -143,7 +135,7 @@ func (m *Multiplexer) ClearHandlers(excludeTags []Tag) { clearMultiplexer[MessageHandler](&m.msgHandlers, excludeTags) } -// ClearProcessors deregisters all the existing message handlers other than the one provided in the excludeTags list -func (m *Multiplexer) ClearProcessors(excludeTags []Tag) { - clearMultiplexer[MessageProcessor](&m.msgProcessors, excludeTags) +// ClearValidatorHandlers deregisters all the existing message handlers other than the one provided in the excludeTags list +func (m *Multiplexer) ClearValidatorHandlers(excludeTags []Tag) { + clearMultiplexer[MessageValidatorHandler](&m.msgValidatorHandlers, excludeTags) } diff --git a/network/p2p/pubsub.go b/network/p2p/pubsub.go index a968bcb6a9..657baecdde 100644 --- a/network/p2p/pubsub.go +++ b/network/p2p/pubsub.go @@ -98,6 +98,7 @@ func makePubSub(ctx context.Context, cfg config.Local, host host.Host) (*pubsub. pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign), // pubsub.WithValidateThrottle(cfg.TxBacklogSize), pubsub.WithValidateWorkers(incomingThreads), + pubsub.WithRawTracer(pubsubTracer{}), } return pubsub.NewGossipSub(ctx, host, options...) diff --git a/network/p2p/pubsubTracer.go b/network/p2p/pubsubTracer.go new file mode 100644 index 0000000000..ca57bc69ce --- /dev/null +++ b/network/p2p/pubsubTracer.go @@ -0,0 +1,98 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package p2p + +import ( + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + + "github.com/algorand/go-algorand/util/metrics" +) + +var _ = pubsub.RawTracer(pubsubTracer{}) + +var transactionMessagesP2PRejectMessage = metrics.NewTagCounter(metrics.TransactionMessagesP2PRejectMessage.Name, metrics.TransactionMessagesP2PRejectMessage.Description) +var transactionMessagesP2PDuplicateMessage = metrics.MakeCounter(metrics.TransactionMessagesP2PDuplicateMessage) +var transactionMessagesP2PDeliverMessage = metrics.MakeCounter(metrics.TransactionMessagesP2PDeliverMessage) +var transactionMessagesP2PUnderdeliverableMessage = metrics.MakeCounter(metrics.TransactionMessagesP2PUndeliverableMessage) +var transactionMessagesP2PValidateMessage = metrics.MakeCounter(metrics.TransactionMessagesP2PValidateMessage) + +// pubsubTracer is a tracer for pubsub events used to track metrics. +type pubsubTracer struct{} + +// AddPeer is invoked when a new peer is added. +func (t pubsubTracer) AddPeer(p peer.ID, proto protocol.ID) {} + +// RemovePeer is invoked when a peer is removed. +func (t pubsubTracer) RemovePeer(p peer.ID) {} + +// Join is invoked when a new topic is joined +func (t pubsubTracer) Join(topic string) {} + +// Leave is invoked when a topic is abandoned +func (t pubsubTracer) Leave(topic string) {} + +// Graft is invoked when a new peer is grafted on the mesh (gossipsub) +func (t pubsubTracer) Graft(p peer.ID, topic string) {} + +// Prune is invoked when a peer is pruned from the message (gossipsub) +func (t pubsubTracer) Prune(p peer.ID, topic string) {} + +// ValidateMessage is invoked when a message first enters the validation pipeline. +func (t pubsubTracer) ValidateMessage(msg *pubsub.Message) { + transactionMessagesP2PValidateMessage.Inc(nil) +} + +// DeliverMessage is invoked when a message is delivered +func (t pubsubTracer) DeliverMessage(msg *pubsub.Message) { + transactionMessagesP2PDeliverMessage.Inc(nil) +} + +// RejectMessage is invoked when a message is Rejected or Ignored. +// The reason argument can be one of the named strings Reject*. +func (t pubsubTracer) RejectMessage(msg *pubsub.Message, reason string) { + switch reason { + case pubsub.RejectValidationThrottled, pubsub.RejectValidationQueueFull, pubsub.RejectValidationFailed, pubsub.RejectValidationIgnored: + transactionMessagesP2PRejectMessage.Add(reason, 1) + default: + transactionMessagesP2PRejectMessage.Add("other", 1) + } +} + +// DuplicateMessage is invoked when a duplicate message is dropped. +func (t pubsubTracer) DuplicateMessage(msg *pubsub.Message) { + transactionMessagesP2PDuplicateMessage.Inc(nil) +} + +// ThrottlePeer is invoked when a peer is throttled by the peer gater. +func (t pubsubTracer) ThrottlePeer(p peer.ID) {} + +// RecvRPC is invoked when an incoming RPC is received. +func (t pubsubTracer) RecvRPC(rpc *pubsub.RPC) {} + +// SendRPC is invoked when a RPC is sent. +func (t pubsubTracer) SendRPC(rpc *pubsub.RPC, p peer.ID) {} + +// DropRPC is invoked when an outbound RPC is dropped, typically because of a queue full. +func (t pubsubTracer) DropRPC(rpc *pubsub.RPC, p peer.ID) {} + +// UndeliverableMessage is invoked when the consumer of Subscribe is not reading messages fast enough and +// the pressure release mechanism trigger, dropping messages. +func (t pubsubTracer) UndeliverableMessage(msg *pubsub.Message) { + transactionMessagesP2PUnderdeliverableMessage.Inc(nil) +} diff --git a/network/p2pNetwork.go b/network/p2pNetwork.go index a186426b15..37c6cfcd52 100644 --- a/network/p2pNetwork.go +++ b/network/p2pNetwork.go @@ -693,14 +693,14 @@ func (n *P2PNetwork) ClearHandlers() { n.handler.ClearHandlers([]Tag{}) } -// RegisterProcessors adds to the set of given message handlers. -func (n *P2PNetwork) RegisterProcessors(dispatch []TaggedMessageProcessor) { - n.handler.RegisterProcessors(dispatch) +// RegisterValidatorHandlers adds to the set of given message handlers. +func (n *P2PNetwork) RegisterValidatorHandlers(dispatch []TaggedMessageValidatorHandler) { + n.handler.RegisterValidatorHandlers(dispatch) } // ClearProcessors deregisters all the existing message handlers. func (n *P2PNetwork) ClearProcessors() { - n.handler.ClearProcessors([]Tag{}) + n.handler.ClearValidatorHandlers([]Tag{}) } // GetHTTPClient returns a http.Client with a suitable for the network Transport @@ -916,7 +916,8 @@ func (n *P2PNetwork) txTopicHandleLoop() { n.log.Debugf("Subscribed to topic %s", p2p.TXTopicName) for { - msg, err := sub.Next(n.ctx) + // msg from sub.Next not used since all work done by txTopicValidator + _, err := sub.Next(n.ctx) if err != nil { if err != pubsub.ErrSubscriptionCancelled && err != context.Canceled { n.log.Errorf("Error reading from subscription %v, peerId %s", err, n.service.ID()) @@ -925,13 +926,6 @@ func (n *P2PNetwork) txTopicHandleLoop() { sub.Cancel() return } - // if there is a self-sent the message no need to process it. - if msg.ReceivedFrom == n.service.ID() { - continue - } - - _ = n.handler.Process(msg.ValidatorData.(ValidatedMessage)) - // participation or configuration change, cancel subscription and quit if !n.wantTXGossip.Load() { n.log.Debugf("Cancelling subscription to topic %s due participation change", p2p.TXTopicName) @@ -978,7 +972,7 @@ func (n *P2PNetwork) txTopicValidator(ctx context.Context, peerID peer.ID, msg * peerStats.txReceived.Add(1) n.peerStatsMu.Unlock() - outmsg := n.handler.Validate(inmsg) + outmsg := n.handler.ValidateHandle(inmsg) // there was a decision made in the handler about this message switch outmsg.Action { case Ignore: diff --git a/network/p2pNetwork_test.go b/network/p2pNetwork_test.go index f6eea2ab69..7cb35a0e82 100644 --- a/network/p2pNetwork_test.go +++ b/network/p2pNetwork_test.go @@ -111,26 +111,22 @@ func TestP2PSubmitTX(t *testing.T) { // now we should be connected in a line: B <-> A <-> C where both B and C are connected to A but not each other // Since we aren't using the transaction handler in this test, we need to register a pass-through handler - passThroughHandler := []TaggedMessageProcessor{ + passThroughHandler := []TaggedMessageValidatorHandler{ { Tag: protocol.TxnTag, MessageHandler: struct { - ProcessorValidateFunc - ProcessorHandleFunc + ValidateHandleFunc }{ - ProcessorValidateFunc(func(msg IncomingMessage) ValidatedMessage { - return ValidatedMessage{Action: Accept, Tag: msg.Tag, ValidatedMessage: nil} - }), - ProcessorHandleFunc(func(msg ValidatedMessage) OutgoingMessage { - return OutgoingMessage{Action: Ignore} + ValidateHandleFunc(func(msg IncomingMessage) OutgoingMessage { + return OutgoingMessage{Action: Accept, Tag: msg.Tag} }), }, }, } - netA.RegisterProcessors(passThroughHandler) - netB.RegisterProcessors(passThroughHandler) - netC.RegisterProcessors(passThroughHandler) + netA.RegisterValidatorHandlers(passThroughHandler) + netB.RegisterValidatorHandlers(passThroughHandler) + netC.RegisterValidatorHandlers(passThroughHandler) // send messages from B and confirm that they get received by C (via A) for i := 0; i < 10; i++ { @@ -206,25 +202,21 @@ func TestP2PSubmitTXNoGossip(t *testing.T) { // ensure netC cannot receive messages - passThroughHandler := []TaggedMessageProcessor{ + passThroughHandler := []TaggedMessageValidatorHandler{ { Tag: protocol.TxnTag, MessageHandler: struct { - ProcessorValidateFunc - ProcessorHandleFunc + ValidateHandleFunc }{ - ProcessorValidateFunc(func(msg IncomingMessage) ValidatedMessage { - return ValidatedMessage{Action: Accept, Tag: msg.Tag, ValidatedMessage: nil} - }), - ProcessorHandleFunc(func(msg ValidatedMessage) OutgoingMessage { - return OutgoingMessage{Action: Ignore} + ValidateHandleFunc(func(msg IncomingMessage) OutgoingMessage { + return OutgoingMessage{Action: Accept, Tag: msg.Tag} }), }, }, } - netB.RegisterProcessors(passThroughHandler) - netC.RegisterProcessors(passThroughHandler) + netB.RegisterValidatorHandlers(passThroughHandler) + netC.RegisterValidatorHandlers(passThroughHandler) for i := 0; i < 10; i++ { err = netA.Broadcast(context.Background(), protocol.TxnTag, []byte(fmt.Sprintf("test %d", i)), false, nil) require.NoError(t, err) @@ -860,26 +852,22 @@ func TestP2PRelay(t *testing.T) { return netA.hasPeers() && netB.hasPeers() }, 2*time.Second, 50*time.Millisecond) - makeCounterHandler := func(numExpected int, counter *atomic.Uint32, msgs *[][]byte) ([]TaggedMessageProcessor, chan struct{}) { + makeCounterHandler := func(numExpected int, counter *atomic.Uint32, msgs *[][]byte) ([]TaggedMessageValidatorHandler, chan struct{}) { counterDone := make(chan struct{}) - counterHandler := []TaggedMessageProcessor{ + counterHandler := []TaggedMessageValidatorHandler{ { Tag: protocol.TxnTag, MessageHandler: struct { - ProcessorValidateFunc - ProcessorHandleFunc + ValidateHandleFunc }{ - ProcessorValidateFunc(func(msg IncomingMessage) ValidatedMessage { - return ValidatedMessage{Action: Accept, Tag: msg.Tag, ValidatedMessage: msg.Data} - }), - ProcessorHandleFunc(func(msg ValidatedMessage) OutgoingMessage { + ValidateHandleFunc(func(msg IncomingMessage) OutgoingMessage { if msgs != nil { - *msgs = append(*msgs, msg.ValidatedMessage.([]byte)) + *msgs = append(*msgs, msg.Data) } if count := counter.Add(1); int(count) >= numExpected { close(counterDone) } - return OutgoingMessage{Action: Ignore} + return OutgoingMessage{Action: Accept, Tag: msg.Tag} }), }, }, @@ -888,7 +876,7 @@ func TestP2PRelay(t *testing.T) { } var counter atomic.Uint32 counterHandler, counterDone := makeCounterHandler(1, &counter, nil) - netA.RegisterProcessors(counterHandler) + netA.RegisterValidatorHandlers(counterHandler) // send 5 messages from netB to netA // since relaying is disabled on net B => no messages should be received by net A @@ -943,7 +931,7 @@ func TestP2PRelay(t *testing.T) { var loggedMsgs [][]byte counterHandler, counterDone = makeCounterHandler(expectedMsgs, &counter, &loggedMsgs) netA.ClearProcessors() - netA.RegisterProcessors(counterHandler) + netA.RegisterValidatorHandlers(counterHandler) for i := 0; i < expectedMsgs/2; i++ { err := netB.Relay(context.Background(), protocol.TxnTag, []byte{5, 6, 7, byte(i)}, true, nil) diff --git a/network/wsNetwork.go b/network/wsNetwork.go index fc42296652..917d1b6e64 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -124,6 +124,7 @@ var networkIncomingBufferMicros = metrics.MakeCounter(metrics.MetricName{Name: " var networkHandleMicros = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_rx_handle_micros_total", Description: "microseconds spent by protocol handlers in the receive thread"}) var networkBroadcasts = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_broadcasts_total", Description: "number of broadcast operations"}) +var networkBroadcastQueueFull = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_broadcast_queue_full_total", Description: "number of messages that were drops due to full broadcast queue"}) var networkBroadcastQueueMicros = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_broadcast_queue_micros_total", Description: "microseconds broadcast requests sit on queue"}) var networkBroadcastSendMicros = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_broadcast_send_micros_total", Description: "microseconds spent broadcasting"}) var networkBroadcastsDropped = metrics.MakeCounter(metrics.MetricName{Name: "algod_broadcasts_dropped_total", Description: "number of broadcast messages not sent to any peer"}) @@ -135,7 +136,6 @@ var networkPeerAlreadyClosed = metrics.MakeCounter(metrics.MetricName{Name: "alg var networkSlowPeerDrops = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_slow_drops_total", Description: "number of peers dropped for being slow to send to"}) var networkIdlePeerDrops = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_idle_drops_total", Description: "number of peers dropped due to idle connection"}) -var networkBroadcastQueueFull = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_broadcast_queue_full_total", Description: "number of messages that were drops due to full broadcast queue"}) var minPing = metrics.MakeGauge(metrics.MetricName{Name: "algod_network_peer_min_ping_seconds", Description: "Network round trip time to fastest peer in seconds."}) var meanPing = metrics.MakeGauge(metrics.MetricName{Name: "algod_network_peer_mean_ping_seconds", Description: "Network round trip time to average peer in seconds."}) @@ -859,8 +859,8 @@ func (wn *WebsocketNetwork) ClearHandlers() { wn.handler.ClearHandlers([]Tag{protocol.PingTag, protocol.PingReplyTag, protocol.NetPrioResponseTag}) } -// RegisterProcessors registers the set of given message handlers. -func (wn *WebsocketNetwork) RegisterProcessors(dispatch []TaggedMessageProcessor) { +// RegisterValidatorHandlers registers the set of given message handlers. +func (wn *WebsocketNetwork) RegisterValidatorHandlers(dispatch []TaggedMessageValidatorHandler) { } // ClearProcessors deregisters all the existing message handlers. diff --git a/test/e2e-go/features/p2p/p2p_basic_test.go b/test/e2e-go/features/p2p/p2p_basic_test.go index 6f3e8aae47..5ee2f034ae 100644 --- a/test/e2e-go/features/p2p/p2p_basic_test.go +++ b/test/e2e-go/features/p2p/p2p_basic_test.go @@ -17,6 +17,7 @@ package p2p import ( + "crypto/rand" "path/filepath" "testing" "time" @@ -28,7 +29,7 @@ import ( "github.com/stretchr/testify/require" ) -func testP2PWithConfig(t *testing.T, cfgname string) { +func testP2PWithConfig(t *testing.T, templateName string) *fixtures.RestClientFixture { r := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture @@ -42,22 +43,88 @@ func testP2PWithConfig(t *testing.T, cfgname string) { consensus[protocol.ConsensusCurrentVersion] = fastProtocol fixture.SetConsensus(consensus) - fixture.Setup(t, filepath.Join("nettemplates", cfgname)) - defer fixture.ShutdownImpl(true) // preserve logs in testdir - + fixture.Setup(t, filepath.Join("nettemplates", templateName)) _, err := fixture.NC.AlgodClient() r.NoError(err) err = fixture.WaitForRound(10, 30*time.Second) r.NoError(err) + + return &fixture } func TestP2PTwoNodes(t *testing.T) { partitiontest.PartitionTest(t) - testP2PWithConfig(t, "TwoNodes50EachP2P.json") + fixture := testP2PWithConfig(t, "TwoNodes50EachP2P.json") + defer fixture.Shutdown() + + // ensure transaction propagation on both directions + pingClient := fixture.LibGoalClient + pingAccountList, err := fixture.GetWalletsSortedByBalance() + require.NoError(t, err) + pingAccount := pingAccountList[0].Address + + pongClient := fixture.GetLibGoalClientForNamedNode("Node") + pongAccounts, err := fixture.GetNodeWalletsSortedByBalance(pongClient) + require.NoError(t, err) + pongAccount := pongAccounts[0].Address + + pingBalance, err := pingClient.GetBalance(pingAccount) + require.NoError(t, err) + pongBalance, err := pingClient.GetBalance(pongAccount) + require.NoError(t, err) + + require.Equal(t, pingBalance, pongBalance) + + expectedPingBalance := pingBalance + expectedPongBalance := pongBalance + + minTxnFee, minAcctBalance, err := fixture.CurrentMinFeeAndBalance() + require.NoError(t, err) + + transactionFee := minTxnFee + 5 + amountPongSendsPing := minAcctBalance + amountPingSendsPong := minAcctBalance * 3 / 2 + + pongTxidsToAddresses := make(map[string]string) + pingTxidsToAddresses := make(map[string]string) + + randNote := func(tb testing.TB) []byte { + b := make([]byte, 8) + _, err := rand.Read(b) + require.NoError(tb, err) + return b + } + + for i := 0; i < 5; i++ { + pongTx, err := pongClient.SendPaymentFromUnencryptedWallet(pongAccount, pingAccount, transactionFee, amountPongSendsPing, randNote(t)) + pongTxidsToAddresses[pongTx.ID().String()] = pongAccount + require.NoError(t, err) + pingTx, err := pingClient.SendPaymentFromUnencryptedWallet(pingAccount, pongAccount, transactionFee, amountPingSendsPong, randNote(t)) + pingTxidsToAddresses[pingTx.ID().String()] = pingAccount + require.NoError(t, err) + expectedPingBalance = expectedPingBalance - transactionFee - amountPingSendsPong + amountPongSendsPing + expectedPongBalance = expectedPongBalance - transactionFee - amountPongSendsPing + amountPingSendsPong + } + curStatus, _ := pongClient.Status() + curRound := curStatus.LastRound + + fixture.AlgodClient = fixture.GetAlgodClientForController(fixture.GetNodeControllerForDataDir(pongClient.DataDir())) + confirmed := fixture.WaitForAllTxnsToConfirm(curRound+uint64(5), pingTxidsToAddresses) + require.True(t, confirmed, "failed to see confirmed ping transaction by round %v", curRound+uint64(5)) + confirmed = fixture.WaitForAllTxnsToConfirm(curRound+uint64(5), pongTxidsToAddresses) + require.True(t, confirmed, "failed to see confirmed pong transaction by round %v", curRound+uint64(5)) + + pingBalance, err = pongClient.GetBalance(pingAccount) + require.NoError(t, err) + pongBalance, err = pongClient.GetBalance(pongAccount) + require.NoError(t, err) + require.True(t, expectedPingBalance <= pingBalance, "ping balance is different than expected.") + require.True(t, expectedPongBalance <= pongBalance, "pong balance is different than expected.") } func TestP2PFiveNodes(t *testing.T) { partitiontest.PartitionTest(t) - testP2PWithConfig(t, "FiveNodesP2P.json") + fixture := testP2PWithConfig(t, "FiveNodesP2P.json") + defer fixture.Shutdown() } diff --git a/util/metrics/metrics.go b/util/metrics/metrics.go index eb867729cf..fcc566312f 100644 --- a/util/metrics/metrics.go +++ b/util/metrics/metrics.go @@ -128,6 +128,17 @@ var ( // TransactionMessagesBacklogSize "Number of transaction messages in the TX handler backlog queue" TransactionMessagesBacklogSize = MetricName{Name: "algod_transaction_messages_backlog_size", Description: "Number of transaction messages in the TX handler backlog queue"} + // TransactionMessagesP2PRejectMessage "Number of rejected p2p pubsub transaction messages" + TransactionMessagesP2PRejectMessage = MetricName{Name: "algod_transaction_messages_p2p_reject", Description: "Number of rejected p2p pubsub transaction messages"} + // TransactionMessagesP2PDuplicateMessage "Number of duplicate p2p pubsub transaction messages"} + TransactionMessagesP2PDuplicateMessage = MetricName{Name: "algod_transaction_messages_p2p_duplicate", Description: "Number of duplicate p2p pubsub transaction messages"} + // TransactionMessagesP2PDeliverMessage "Number of delivered p2p pubsub transaction messages" + TransactionMessagesP2PDeliverMessage = MetricName{Name: "algod_transaction_messages_p2p_delivered", Description: "Number of delivered p2p pubsub transaction messages"} + // TransactionMessagesP2PUndeliverableMessage "Number of undeliverable p2p pubsub transaction messages" + TransactionMessagesP2PUndeliverableMessage = MetricName{Name: "algod_transaction_messages_p2p_undeliverable", Description: "Number of undeliverable p2p pubsub transaction messages"} + // TransactionMessagesP2PValidateMessage "Number of p2p pubsub transaction messages received for validation" + TransactionMessagesP2PValidateMessage = MetricName{Name: "algod_transaction_messages_p2p_validate", Description: "Number of p2p pubsub transaction messages received for validation"} + // TransactionGroupTxSyncHandled "Number of transaction groups handled via txsync" TransactionGroupTxSyncHandled = MetricName{Name: "algod_transaction_group_txsync_handled", Description: "Number of transaction groups handled via txsync"} // TransactionGroupTxSyncRemember "Number of transaction groups remembered via txsync" From 595ec23e5a49413d1744b7f3620a45cbd849ff42 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Thu, 8 Aug 2024 16:10:30 -0400 Subject: [PATCH 56/82] network: remove ws net proto 2.1 (#6081) --- network/msgCompressor.go | 55 +++++----------------------- network/msgCompressor_test.go | 39 +------------------- network/wsNetwork.go | 69 ++++++----------------------------- network/wsNetwork_test.go | 46 ++++++----------------- network/wsPeer.go | 16 ++++---- 5 files changed, 43 insertions(+), 182 deletions(-) diff --git a/network/msgCompressor.go b/network/msgCompressor.go index 831b509aef..61108cad99 100644 --- a/network/msgCompressor.go +++ b/network/msgCompressor.go @@ -31,28 +31,6 @@ var zstdCompressionMagic = [4]byte{0x28, 0xb5, 0x2f, 0xfd} const zstdCompressionLevel = zstd.BestSpeed -// checkCanCompress checks if there is an proposal payload message and peers supporting compression -func checkCanCompress(request broadcastRequest, peers []*wsPeer) bool { - canCompress := false - hasPP := false - for _, tag := range request.tags { - if tag == protocol.ProposalPayloadTag { - hasPP = true - break - } - } - // if have proposal payload check if there are any peers supporting compression - if hasPP { - for _, peer := range peers { - if peer.pfProposalCompressionSupported() { - canCompress = true - break - } - } - } - return canCompress -} - // zstdCompressMsg returns a concatenation of a tag and compressed data func zstdCompressMsg(tbytes []byte, d []byte) ([]byte, string) { bound := zstd.CompressBound(len(d)) @@ -89,13 +67,7 @@ type wsPeerMsgDataConverter struct { ppdec zstdProposalDecompressor } -type zstdProposalDecompressor struct { - active bool -} - -func (dec zstdProposalDecompressor) enabled() bool { - return dec.active -} +type zstdProposalDecompressor struct{} func (dec zstdProposalDecompressor) accept(data []byte) bool { return len(data) > 4 && bytes.Equal(data[:4], zstdCompressionMagic[:]) @@ -126,18 +98,16 @@ func (dec zstdProposalDecompressor) convert(data []byte) ([]byte, error) { func (c *wsPeerMsgDataConverter) convert(tag protocol.Tag, data []byte) ([]byte, error) { if tag == protocol.ProposalPayloadTag { - if c.ppdec.enabled() { - // sender might support compressed payload but fail to compress for whatever reason, - // in this case it sends non-compressed payload - the receiver decompress only if it is compressed. - if c.ppdec.accept(data) { - res, err := c.ppdec.convert(data) - if err != nil { - return nil, fmt.Errorf("peer %s: %w", c.origin, err) - } - return res, nil + // sender might support compressed payload but fail to compress for whatever reason, + // in this case it sends non-compressed payload - the receiver decompress only if it is compressed. + if c.ppdec.accept(data) { + res, err := c.ppdec.convert(data) + if err != nil { + return nil, fmt.Errorf("peer %s: %w", c.origin, err) } - c.log.Warnf("peer %s supported zstd but sent non-compressed data", c.origin) + return res, nil } + c.log.Warnf("peer %s supported zstd but sent non-compressed data", c.origin) } return data, nil } @@ -148,11 +118,6 @@ func makeWsPeerMsgDataConverter(wp *wsPeer) *wsPeerMsgDataConverter { origin: wp.originAddress, } - if wp.pfProposalCompressionSupported() { - c.ppdec = zstdProposalDecompressor{ - active: true, - } - } - + c.ppdec = zstdProposalDecompressor{} return &c } diff --git a/network/msgCompressor_test.go b/network/msgCompressor_test.go index 3b08b5fc0e..172cf05a98 100644 --- a/network/msgCompressor_test.go +++ b/network/msgCompressor_test.go @@ -48,37 +48,6 @@ func TestZstdDecompress(t *testing.T) { require.Nil(t, decompressed) } -func TestCheckCanCompress(t *testing.T) { - partitiontest.PartitionTest(t) - - req := broadcastRequest{} - peers := []*wsPeer{} - r := checkCanCompress(req, peers) - require.False(t, r) - - req.tags = []protocol.Tag{protocol.AgreementVoteTag} - r = checkCanCompress(req, peers) - require.False(t, r) - - req.tags = []protocol.Tag{protocol.AgreementVoteTag, protocol.ProposalPayloadTag} - r = checkCanCompress(req, peers) - require.False(t, r) - - peer1 := wsPeer{ - features: 0, - } - peers = []*wsPeer{&peer1} - r = checkCanCompress(req, peers) - require.False(t, r) - - peer2 := wsPeer{ - features: pfCompressedProposal, - } - peers = []*wsPeer{&peer1, &peer2} - r = checkCanCompress(req, peers) - require.True(t, r) -} - func TestZstdCompressMsg(t *testing.T) { partitiontest.PartitionTest(t) @@ -108,7 +77,7 @@ func TestWsPeerMsgDataConverterConvert(t *testing.T) { partitiontest.PartitionTest(t) c := wsPeerMsgDataConverter{} - c.ppdec = zstdProposalDecompressor{active: false} + c.ppdec = zstdProposalDecompressor{} tag := protocol.AgreementVoteTag data := []byte("data") @@ -117,13 +86,9 @@ func TestWsPeerMsgDataConverterConvert(t *testing.T) { require.Equal(t, data, r) tag = protocol.ProposalPayloadTag - r, err = c.convert(tag, data) - require.NoError(t, err) - require.Equal(t, data, r) - l := converterTestLogger{} c.log = &l - c.ppdec = zstdProposalDecompressor{active: true} + c.ppdec = zstdProposalDecompressor{} r, err = c.convert(tag, data) require.NoError(t, err) require.Equal(t, data, r) diff --git a/network/wsNetwork.go b/network/wsNetwork.go index 917d1b6e64..2af3a9b6bf 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -146,11 +146,6 @@ var peers = metrics.MakeGauge(metrics.MetricName{Name: "algod_network_peers", De var incomingPeers = metrics.MakeGauge(metrics.MetricName{Name: "algod_network_incoming_peers", Description: "Number of active incoming peers."}) var outgoingPeers = metrics.MakeGauge(metrics.MetricName{Name: "algod_network_outgoing_peers", Description: "Number of active outgoing peers."}) -var networkPrioBatchesPPWithCompression = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_prio_batches_wpp_comp_sent_total", Description: "number of prio compressed batches with PP"}) -var networkPrioBatchesPPWithoutCompression = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_pp_prio_batches_wpp_non_comp_sent_total", Description: "number of prio non-compressed batches with PP"}) -var networkPrioPPCompressedSize = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_prio_pp_compressed_size_total", Description: "cumulative size of all compressed PP"}) -var networkPrioPPNonCompressedSize = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_prio_pp_non_compressed_size_total", Description: "cumulative size of all non-compressed PP"}) - // peerDisconnectionAckDuration defines the time we would wait for the peer disconnection to complete. const peerDisconnectionAckDuration = 5 * time.Second @@ -1062,6 +1057,7 @@ func (wn *WebsocketNetwork) ServeHTTP(response http.ResponseWriter, request *htt wn.setHeaders(responseHeader) responseHeader.Set(ProtocolVersionHeader, matchingVersion) responseHeader.Set(GenesisHeader, wn.GenesisID) + // set the features we support responseHeader.Set(PeerFeaturesHeader, PeerFeatureProposalCompression) var challenge string if wn.prioScheme != nil { @@ -1391,21 +1387,10 @@ func (wn *WebsocketNetwork) getPeersChangeCounter() int32 { } // preparePeerData prepares batches of data for sending. -// It performs optional zstd compression for proposal massages -func (wn *msgBroadcaster) preparePeerData(request broadcastRequest, prio bool, peers []*wsPeer) ([][]byte, [][]byte, []crypto.Digest, bool) { - // determine if there is a payload proposal and peers supporting compressed payloads - wantCompression := false - containsPrioPPTag := false - if prio { - wantCompression = checkCanCompress(request, peers) - } - +// It performs zstd compression for proposal massages if they this is a prio request and has proposal. +func (wn *msgBroadcaster) preparePeerData(request broadcastRequest, prio bool) ([][]byte, []crypto.Digest) { digests := make([]crypto.Digest, len(request.data)) data := make([][]byte, len(request.data)) - var dataCompressed [][]byte - if wantCompression { - dataCompressed = make([][]byte, len(request.data)) - } for i, d := range request.data { tbytes := []byte(request.tags[i]) mbytes := make([]byte, len(tbytes)+len(d)) @@ -1416,29 +1401,15 @@ func (wn *msgBroadcaster) preparePeerData(request broadcastRequest, prio bool, p digests[i] = crypto.Hash(mbytes) } - if prio { - if request.tags[i] == protocol.ProposalPayloadTag { - networkPrioPPNonCompressedSize.AddUint64(uint64(len(d)), nil) - containsPrioPPTag = true - } - } - - if wantCompression { - if request.tags[i] == protocol.ProposalPayloadTag { - compressed, logMsg := zstdCompressMsg(tbytes, d) - if len(logMsg) > 0 { - wn.log.Warn(logMsg) - } else { - networkPrioPPCompressedSize.AddUint64(uint64(len(compressed)), nil) - } - dataCompressed[i] = compressed - } else { - // otherwise reuse non-compressed from above - dataCompressed[i] = mbytes + if prio && request.tags[i] == protocol.ProposalPayloadTag { + compressed, logMsg := zstdCompressMsg(tbytes, d) + if len(logMsg) > 0 { + wn.log.Warn(logMsg) } + data[i] = compressed } } - return data, dataCompressed, digests, containsPrioPPTag + return data, digests } // prio is set if the broadcast is a high-priority broadcast. @@ -1455,7 +1426,7 @@ func (wn *msgBroadcaster) innerBroadcast(request broadcastRequest, prio bool, pe } start := time.Now() - data, dataWithCompression, digests, containsPrioPPTag := wn.preparePeerData(request, prio, peers) + data, digests := wn.preparePeerData(request, prio) // first send to all the easy outbound peers who don't block, get them started. sentMessageCount := 0 @@ -1466,23 +1437,7 @@ func (wn *msgBroadcaster) innerBroadcast(request broadcastRequest, prio bool, pe if peer == request.except { continue } - var ok bool - if peer.pfProposalCompressionSupported() && len(dataWithCompression) > 0 { - // if this peer supports compressed proposals and compressed data batch is filled out, use it - ok = peer.writeNonBlockMsgs(request.ctx, dataWithCompression, prio, digests, request.enqueueTime) - if prio { - if containsPrioPPTag { - networkPrioBatchesPPWithCompression.Inc(nil) - } - } - } else { - ok = peer.writeNonBlockMsgs(request.ctx, data, prio, digests, request.enqueueTime) - if prio { - if containsPrioPPTag { - networkPrioBatchesPPWithoutCompression.Inc(nil) - } - } - } + ok := peer.writeNonBlockMsgs(request.ctx, data, prio, digests, request.enqueueTime) if ok { sentMessageCount++ continue @@ -1951,7 +1906,7 @@ const ProtocolVersionHeader = "X-Algorand-Version" const ProtocolAcceptVersionHeader = "X-Algorand-Accept-Version" // SupportedProtocolVersions contains the list of supported protocol versions by this node ( in order of preference ). -var SupportedProtocolVersions = []string{"2.2", "2.1"} +var SupportedProtocolVersions = []string{"2.2"} // ProtocolVersion is the current version attached to the ProtocolVersionHeader header /* Version history: diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go index 6af3a697fc..0128c28fc2 100644 --- a/network/wsNetwork_test.go +++ b/network/wsNetwork_test.go @@ -429,15 +429,12 @@ func TestWebsocketProposalPayloadCompression(t *testing.T) { } var tests []testDef = []testDef{ - // two old nodes - {[]string{"2.1"}, "2.1", []string{"2.1"}, "2.1"}, - // two new nodes with overwritten config {[]string{"2.2"}, "2.2", []string{"2.2"}, "2.2"}, // old node + new node {[]string{"2.1"}, "2.1", []string{"2.2", "2.1"}, "2.2"}, - {[]string{"2.2", "2.1"}, "2.2", []string{"2.1"}, "2.1"}, + {[]string{"2.2", "2.1"}, "2.1", []string{"2.2"}, "2.2"}, // combinations {[]string{"2.2", "2.1"}, "2.1", []string{"2.2", "2.1"}, "2.1"}, @@ -1101,7 +1098,7 @@ func TestDupFilter(t *testing.T) { defer netC.Stop() makeMsg := func(n int) []byte { - // We cannot harcode the msgSize to messageFilterSize + 1 because max allowed AV message is smaller than that. + // We cannot hardcode the msgSize to messageFilterSize + 1 because max allowed AV message is smaller than that. // We also cannot use maxSize for PP since it's a compressible tag but trying to compress random data will expand it. if messageFilterSize+1 < n { n = messageFilterSize + 1 @@ -1387,7 +1384,7 @@ func TestPeeringWithIdentityChallenge(t *testing.T) { assert.Equal(t, 0, len(netB.GetPeers(PeersConnectedOut))) // netA never attempts to set identity as it never sees a verified identity assert.Equal(t, 1, netA.identityTracker.(*mockIdentityTracker).getSetCount()) - // no connecton => netB does attepmt to add the identity to the tracker + // no connection => netB does attempt to add the identity to the tracker // and it would not end up being added assert.Equal(t, 1, netB.identityTracker.(*mockIdentityTracker).getSetCount()) assert.Equal(t, 1, netB.identityTracker.(*mockIdentityTracker).getInsertCount()) @@ -1608,7 +1605,7 @@ func TestPeeringReceiverIdentityChallengeOnly(t *testing.T) { assert.Equal(t, 0, netB.identityTracker.(*mockIdentityTracker).getSetCount()) } -// TestPeeringIncorrectDeduplicationName confirm that if the reciever can't match +// TestPeeringIncorrectDeduplicationName confirm that if the receiver can't match // the Address in the challenge to its PublicAddress, identities aren't exchanged, but peering continues func TestPeeringIncorrectDeduplicationName(t *testing.T) { partitiontest.PartitionTest(t) @@ -1665,7 +1662,7 @@ func TestPeeringIncorrectDeduplicationName(t *testing.T) { // bi-directional connection would now work since netB detects to be connected to netA in tryConnectReserveAddr, // so force it. - // this second connection should set identities, because the reciever address matches now + // this second connection should set identities, because the receiver address matches now _, ok = netB.tryConnectReserveAddr(addrA) assert.False(t, ok) netB.wg.Add(1) @@ -2504,9 +2501,9 @@ func TestWebsocketNetwork_checkServerResponseVariables(t *testing.T) { } func (wn *WebsocketNetwork) broadcastWithTimestamp(tag protocol.Tag, data []byte, when time.Time) error { - msgArr := make([][]byte, 1, 1) + msgArr := make([][]byte, 1) msgArr[0] = data - tagArr := make([]protocol.Tag, 1, 1) + tagArr := make([]protocol.Tag, 1) tagArr[0] = tag request := broadcastRequest{tags: tagArr, data: msgArr, enqueueTime: when, ctx: context.Background()} @@ -3711,48 +3708,29 @@ func TestPreparePeerData(t *testing.T) { data: [][]byte{[]byte("test"), []byte("data")}, } - peers := []*wsPeer{} wn := WebsocketNetwork{} - data, comp, digests, seenPrioPPTag := wn.broadcaster.preparePeerData(req, false, peers) + data, digests := wn.broadcaster.preparePeerData(req, false) require.NotEmpty(t, data) - require.Empty(t, comp) require.NotEmpty(t, digests) require.Equal(t, len(req.data), len(digests)) require.Equal(t, len(data), len(digests)) - require.False(t, seenPrioPPTag) for i := range data { require.Equal(t, append([]byte(req.tags[i]), req.data[i]...), data[i]) } - // compression - peer1 := wsPeer{ - features: 0, - } - peer2 := wsPeer{ - features: pfCompressedProposal, - } - peers = []*wsPeer{&peer1, &peer2} - data, comp, digests, seenPrioPPTag = wn.broadcaster.preparePeerData(req, true, peers) + data, digests = wn.broadcaster.preparePeerData(req, true) require.NotEmpty(t, data) - require.NotEmpty(t, comp) require.NotEmpty(t, digests) require.Equal(t, len(req.data), len(digests)) require.Equal(t, len(data), len(digests)) - require.Equal(t, len(comp), len(digests)) - require.True(t, seenPrioPPTag) for i := range data { - require.Equal(t, append([]byte(req.tags[i]), req.data[i]...), data[i]) - } - - for i := range comp { if req.tags[i] != protocol.ProposalPayloadTag { - require.Equal(t, append([]byte(req.tags[i]), req.data[i]...), comp[i]) - require.Equal(t, data[i], comp[i]) + require.Equal(t, append([]byte(req.tags[i]), req.data[i]...), data[i]) + require.Equal(t, data[i], data[i]) } else { - require.NotEqual(t, data[i], comp[i]) - require.Equal(t, append([]byte(req.tags[i]), zstdCompressionMagic[:]...), comp[i][:len(req.tags[i])+len(zstdCompressionMagic)]) + require.Equal(t, append([]byte(req.tags[i]), zstdCompressionMagic[:]...), data[i][:len(req.tags[i])+len(zstdCompressionMagic)]) } } } diff --git a/network/wsPeer.go b/network/wsPeer.go index 2b302f071f..88a0c615f9 100644 --- a/network/wsPeer.go +++ b/network/wsPeer.go @@ -746,7 +746,7 @@ func (wp *wsPeer) handleMessageOfInterest(msg IncomingMessage) (close bool, reas wp.log.Warnf("wsPeer handleMessageOfInterest: could not unmarshall message from: %s %v", wp.conn.RemoteAddrString(), err) return true, disconnectBadData } - msgs := make([]sendMessage, 1, 1) + msgs := make([]sendMessage, 1) msgs[0] = sendMessage{ data: nil, enqueued: time.Now(), @@ -911,8 +911,8 @@ func (wp *wsPeer) writeLoopCleanup(reason disconnectReason) { } func (wp *wsPeer) writeNonBlock(ctx context.Context, data []byte, highPrio bool, digest crypto.Digest, msgEnqueueTime time.Time) bool { - msgs := make([][]byte, 1, 1) - digests := make([]crypto.Digest, 1, 1) + msgs := make([][]byte, 1) + digests := make([]crypto.Digest, 1) msgs[0] = data digests[0] = digest return wp.writeNonBlockMsgs(ctx, msgs, highPrio, digests, msgEnqueueTime) @@ -1090,7 +1090,7 @@ func (wp *wsPeer) Request(ctx context.Context, tag Tag, topics Topics) (resp *Re defer wp.getAndRemoveResponseChannel(hash) // Send serializedMsg - msg := make([]sendMessage, 1, 1) + msg := make([]sendMessage, 1) msg[0] = sendMessage{ data: append([]byte(tag), serializedMsg...), enqueued: time.Now(), @@ -1166,10 +1166,6 @@ func (wp *wsPeer) sendMessagesOfInterest(messagesOfInterestGeneration uint32, me } } -func (wp *wsPeer) pfProposalCompressionSupported() bool { - return wp.features&pfCompressedProposal != 0 -} - func (wp *wsPeer) OnClose(f func()) { if wp.closers == nil { wp.closers = []func(){} @@ -1180,7 +1176,9 @@ func (wp *wsPeer) OnClose(f func()) { //msgp:ignore peerFeatureFlag type peerFeatureFlag int -const pfCompressedProposal peerFeatureFlag = 1 +const ( + pfCompressedProposal peerFeatureFlag = 1 << iota +) // versionPeerFeatures defines protocol version when peer features were introduced const versionPeerFeatures = "2.2" From 23a04c280274df7e723a3c2306b68e02586708e6 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Thu, 8 Aug 2024 16:49:42 -0400 Subject: [PATCH 57/82] tests: flaky tests fixes (#6098) --- ledger/ledger_test.go | 46 ++++++++++--------- network/p2p/p2p_test.go | 8 +++- .../scripts/e2e_subs/goal-partkey-commands.sh | 15 ++++-- 3 files changed, 41 insertions(+), 28 deletions(-) diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index c4bb74fcb5..e73d648b4d 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -2915,6 +2915,12 @@ func testVotersReloadFromDiskAfterOneStateProofCommitted(t *testing.T, cfg confi require.NoError(t, err) defer l.Close() + // quit the commitSyncer goroutine: this test flushes manually with triggerTrackerFlush + l.trackers.ctxCancel() + l.trackers.ctxCancel = nil + <-l.trackers.commitSyncerClosed + l.trackers.commitSyncerClosed = nil + blk := genesisInitState.Block sp := bookkeeping.StateProofTrackingData{ @@ -2929,6 +2935,9 @@ func testVotersReloadFromDiskAfterOneStateProofCommitted(t *testing.T, cfg confi blk.BlockHeader.Round++ err = l.AddBlock(blk, agreement.Certificate{}) require.NoError(t, err) + if i > 0 && i%100 == 0 { + triggerTrackerFlush(t, l) + } } // we simulate that the stateproof for round 512 is confirmed on chain, and we can move to the next one. @@ -2941,31 +2950,12 @@ func testVotersReloadFromDiskAfterOneStateProofCommitted(t *testing.T, cfg confi blk.BlockHeader.Round++ err = l.AddBlock(blk, agreement.Certificate{}) require.NoError(t, err) - } - - // wait all pending commits to finish - l.trackers.accountsWriting.Wait() - - // quit the commitSyncer goroutine: this test flushes manually with triggerTrackerFlush - l.trackers.ctxCancel() - l.trackers.ctxCancel = nil - <-l.trackers.commitSyncerClosed - l.trackers.commitSyncerClosed = nil - - // it is possible a commmit was scheduled while commitSyncer was closing so that there is one pending task - // that required to be done before before the ledger can be closed, so drain the queue -outer: - for { - select { - case <-l.trackers.deferredCommits: - log.Info("drained deferred commit") - l.trackers.accountsWriting.Done() - default: - break outer + if i%100 == 0 { + triggerTrackerFlush(t, l) } } - // flush one final time + // flush remaining blocks triggerTrackerFlush(t, l) var vtSnapshot map[basics.Round]*ledgercore.VotersForRound @@ -2982,6 +2972,18 @@ outer: require.NotContains(t, vtSnapshot, basics.Round(240)) }() + t.Log("reloading ledger") + // drain any deferred commits since AddBlock above triggered scheduleCommit +outer: + for { + select { + case <-l.trackers.deferredCommits: + l.trackers.accountsWriting.Done() + default: + break outer + } + } + err = l.reloadLedger() require.NoError(t, err) diff --git a/network/p2p/p2p_test.go b/network/p2p/p2p_test.go index 2da5782afc..021a5fbd8b 100644 --- a/network/p2p/p2p_test.go +++ b/network/p2p/p2p_test.go @@ -290,7 +290,13 @@ func TestP2PMakeHostAddressFilter(t *testing.T) { mala, err := multiaddr.NewMultiaddr(la) require.NoError(t, err) host.Network().Listen(mala) - require.Empty(t, host.Addrs()) + addrs := host.Addrs() + if len(addrs) > 0 { + // CI servers might have a single public IP interface, validate if this is a case + for _, a := range addrs { + require.True(t, manet.IsPublicAddr(a)) + } + } host.Close() } diff --git a/test/scripts/e2e_subs/goal-partkey-commands.sh b/test/scripts/e2e_subs/goal-partkey-commands.sh index dd60d44016..b333a0e8aa 100755 --- a/test/scripts/e2e_subs/goal-partkey-commands.sh +++ b/test/scripts/e2e_subs/goal-partkey-commands.sh @@ -63,7 +63,8 @@ fail_test () { create_and_fund_account () { set +x # disable command echoing to hide the account funding output - local TEMP_ACCT=$(${gcmd} account new|awk '{ print $6 }') + local TEMP_ACCT + TEMP_ACCT=$(${gcmd} account new|awk '{ print $6 }') SEND_OUTOUT=$(${gcmd} clerk send -f "$INITIAL_ACCOUNT" -t "$TEMP_ACCT" -a 1000000 2>&1) if [[ $SEND_OUTOUT == *"Couldn't broadcast tx"* ]]; then fail_test "Failed to fund account: $SEND_OUTOUT" @@ -77,17 +78,21 @@ create_and_fund_account () { # $2 - a participation id # $3 - error message verify_registered_state () { + SEARCH_STATE=$(echo "$1" | xargs) + SEARCH_KEY=$(echo "$2" | xargs) + SEARCH_INVOKE_CONTEXT=$(echo "$3" | xargs) + # look for participation ID anywhere in the partkeyinfo output PARTKEY_OUTPUT=$(${gcmd} account partkeyinfo) - if ! echo "$PARTKEY_OUTPUT" | grep -q "$2"; then - fail_test "Key $2 was not installed properly for cmd '$3':\n$PARTKEY_OUTPUT" + if ! echo "$PARTKEY_OUTPUT" | grep -q -F "$SEARCH_KEY"; then + fail_test "Key $SEARCH_KEY was not installed properly for cmd '$SEARCH_INVOKE_CONTEXT':\n$PARTKEY_OUTPUT" fi # looking for yes/no, and the 8 character head of participation id in this line: # yes LFMT...RHJQ 4UPT6AQC... 4 0 3000 LISTKEY_OUTPUT=$(${gcmd} account listpartkeys) - if ! echo "$LISTKEY_OUTPUT" | grep -q "$1.*$(echo "$2" | cut -c1-8)"; then - fail_test "Unexpected key $2 state ($1) for cmd '$3':\n$LISTKEY_OUTPUT" + if ! echo "$LISTKEY_OUTPUT" | grep -q "$SEARCH_STATE.*$(echo "$SEARCH_KEY" | cut -c1-8)"; then + fail_test "Unexpected key $SEARCH_KEY state (looked for $SEARCH_STATE ) for cmd '$SEARCH_INVOKE_CONTEXT':\n$LISTKEY_OUTPUT" fi } From 7fc243c2080486b8a3afb355d8d5bae8128985a3 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Thu, 15 Aug 2024 10:04:07 -0400 Subject: [PATCH 58/82] docs: p2p package overview (#6096) Co-authored-by: Jason Paulos Co-authored-by: Gary Malouf <982483+gmalouf@users.noreply.github.com> --- catchup/ledgerFetcher.go | 6 +- catchup/universalFetcher.go | 6 +- network/README-P2P.md | 149 +++++++++++++++++++ network/limitcaller/rateLimitingTransport.go | 21 ++- network/p2p/README.md | 88 ++++++++++- network/p2p/capabilities.go | 16 +- network/p2p/capabilities_test.go | 2 +- network/p2p/p2p.go | 7 +- network/p2pNetwork.go | 5 +- network/p2pNetwork_test.go | 5 - network/p2pPeer.go | 18 +-- node/node.go | 4 +- 12 files changed, 279 insertions(+), 48 deletions(-) create mode 100644 network/README-P2P.md diff --git a/catchup/ledgerFetcher.go b/catchup/ledgerFetcher.go index 916627db8f..ae4c720108 100644 --- a/catchup/ledgerFetcher.go +++ b/catchup/ledgerFetcher.go @@ -81,7 +81,11 @@ func (lf *ledgerFetcher) requestLedger(ctx context.Context, peer network.HTTPPee } network.SetUserAgentHeader(request.Header) - return peer.GetHTTPClient().Do(request) + httpClient := peer.GetHTTPClient() + if httpClient == nil { + return nil, fmt.Errorf("requestLedger: HTTPPeer %s has no http client", peer.GetAddress()) + } + return httpClient.Do(request) } func (lf *ledgerFetcher) headLedger(ctx context.Context, peer network.Peer, round basics.Round) error { diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go index 27b970fc26..c7a8a9a4cf 100644 --- a/catchup/universalFetcher.go +++ b/catchup/universalFetcher.go @@ -69,11 +69,15 @@ func (uf *universalBlockFetcher) fetchBlock(ctx context.Context, round basics.Ro } address = fetcherClient.address() } else if httpPeer, validHTTPPeer := peer.(network.HTTPPeer); validHTTPPeer { + httpClient := httpPeer.GetHTTPClient() + if httpClient == nil { + return nil, nil, time.Duration(0), fmt.Errorf("fetchBlock: HTTPPeer %s has no http client", httpPeer.GetAddress()) + } fetcherClient := &HTTPFetcher{ peer: httpPeer, rootURL: httpPeer.GetAddress(), net: uf.net, - client: httpPeer.GetHTTPClient(), + client: httpClient, log: uf.log, config: &uf.config} fetchedBuf, err = fetcherClient.getBlockBytes(ctx, round) diff --git a/network/README-P2P.md b/network/README-P2P.md new file mode 100644 index 0000000000..c67bd53273 --- /dev/null +++ b/network/README-P2P.md @@ -0,0 +1,149 @@ +# P2P Network implementation overview + +Refer to [p2p sub-package overview](./p2p/README.md) for details about p2p sub-components. + +`P2PNetwork` implements the `GossipNode` interface similarly to `WsNetwork`. Both use +the same peer connection management and message broadcast functions but different +transport: lip2p-managed connections and HTTP + WebSocket, respectively. +`P2PNetwork` and `WsNetwork` require `config.NetAddress` to be set in order to start a server. + +In addition, `HybridNetwork` is an aggregate of `P2PNetwork` and `WsNetwork` allowing a node +to interact over both networks. In the case of hybrid operation, both `config.P2PNetAddress` and +`config.NetAddress` are used. + +## General design + +`P2PNetwork` follows the `WsNetwork` approach for peers management and message handling: + - `msgHandler` used process or route the network protocol messages to external handlers + (for example, transaction handler or agreement service) + - `broadcaster` implementing the broadcast functionality (see below) + - mesh thread to maintain `GossipFanout` number of outgoing peers + - HTTP Server for external HTTP services (block, catchpoints) + - `OnNetworkAdvance` listener to react on round advancing + +A key difference is that `P2PNetwork` uses `go-libp2p-pubsub` for TX message handling. +Upon start it subscribes to `/algo/tx/0.1.0` topic and publishes TX messages as needed. +The `pubsub` library divides message handling into two stages: validation and processing. Based on +the validation result, a message is either discarded or accepted for further +broadcasting to other peers. This necessitates having separate handlers for TX messages +in `TxHandler`, as we must synchronously determine whether a transaction group is valid: + - can't ignore fast and broadcast later - will be rejected as a seen message + - can't accept fast to prevent invalid/expired transactions broadcasting + +## Major Components + +### HTTP Services + +`P2PNetwork` uses libp2p's `http` submodule to handle HTTP traffic over libp2p-managed connection. +It is `http.Handler`-compatible so that service handlers are registered the same way as for `WsNetwork`. + +### Phonebook and Peerstore and peer classes + +Originally phonebook was designed as an address registry holding permanent (`-p` cli option +or `phonebook.json` extra configuration file) and dynamic (SRV DNS records) entries. +These entries later can be later retrieved by a peer role +(`PhoneBookEntryRelayRole` or `PhoneBookEntryArchivalRole`). +A new `PeerStore` (built on top of `libp2p.Peerstore`) resembles the original `Phonebook` +by strictly implementing some of its methods and has the remaining `Phonebook`'s methods +with a slightly different signature - `string` vs `peer.AddrInfo` for address representation. +The main issue is that entries in `PeerStore` are identified by `PeerID` +and each peer might have multiple addresses (versus the original WS peers with the only one +`host:port` connectivity option.) + +Both P2PNetwork and WsNetwork have an extra level of peer classification on top of two phonebook's +classes: `PeersConnectedOut`, `PeersConnectedIn`, `PeersPhonebookRelays`, `PeersPhonebookArchivalNodes`. +This allows network clients to be more precise on peers set they want to work with. For example, +ledger service wants `PeersPhonebookArchivalNodes`, and transaction syncer - `PeersConnectedOut`. + + +### wsPeer + +Peers are created in `wsStreamHandler` that is called for both incoming and outgoing connections +(and streams). `incoming` flag is set to true for incoming connection. +At the very beginning of the `wsStreamHandler` one byte read/write happens in order to make sure: + - Stream is operable + - A placeholder for a handshake where some meta-data can be exchanged + +Each peer gets a read channel `handler.readBuffer` where it enqueues incoming messages for routing +to appropriate handler. + +Connected peers are maintained as a `wsPeers` map similarly to the `WsNetwork`. +The main difference between `P2PNetwork` and `WsNetwork` is `http.Client`. Because wsPeers operate +over the multiplexed streams in libp2p-managed connection, a plain `http.Client` would not be able +to connect to a p2p HTTP server. This requires the `wsPeer` constructed in `P2PNetwork` to have a special +libp2p-streams compatible `http.Client` produced by `MakeHTTPClientWithRateLimit` helper method. +It implements a rate-limiting approach similar to the regular http clients from `WsNetwork`. + +### Broadcaster + +`msgBroadcaster` encapsulates a shared broadcasting logic: priority vs bulk messages (and queues), +data preparation, peers retrieving. Broadcast requests eventually hits +`peer.writeNonBlockMsgs` -> `peer.writeLoopSendMsg` -> `conn.WriteMessage`. +See the diagram denoting the broadcast data flow. + +```mermaid +graph LR + + p2pnet[P2PNetwork] + wsnet[WsNetwork] + B[broadcaster] + + p2pnet & wsnet --> B + + subgraph "wsPeer" + direction LR + writeNonBlockMsgs + Conn[conn.WriteMessage] + + subgraph "writeLoop" + writeLoopSendMsg + end + + writeNonBlockMsgs --> writeLoop + writeLoopSendMsg --> Conn + end + + B --> writeNonBlockMsgs + + Conn --> WMP2P & WMWS + + subgraph "wsPeerConnP2P" + WMP2P[WriteMessage] + end + + subgraph "websocket" + WMWS[WriteMessage] + end + + subgraph "libp2p" + stream.Write + end + + WMP2P --> libp2p +``` + +### DHT and Capabilities discovery + +DHT is controlled by the `EnableDHTProviders` configuration option and the capabilities +exposed by a node. These capabilities include: + - `archival`: a listening node with `Archival` config flag set + - `catchpointStoring`: a listening node configured to store catchpoints + - `gossip`: a listening node with `EnableGossipService` config flag set + +When the `P2PNetwork` starts, the node begins advertising its capabilities by running +a background goroutine. By default, the underlying DHT implementation pulls bootstrap nodes from +a peer store and attempts to connect immediately, which is not how go-algorand services operate. +To address this, a new `bootstrapper` abstraction has been added to control bootstrap peer +access using the DHT's `BootstrapFunc` mechanism. The callback function returns empty bootstrap +peers until the `P2PNetwork` starts. + +### Net identity based peers deduplication + +`WsNetwork` net identity was slightly extended to allow ws and p2p nodes cross-check +when running in a hybrid mode: + - `identityTracker` instance is shared between `WsNetwork` and `P2PNetwork` + - identity schema supplied to the `WsNetwork` uses a p2p-node private key based message signer + - `PublicAddress` must be set for hybrid nodes in order to operate properly + +Using the changes above `identityTracker` is able to deduplicate `WsNetwork` peer if it ends up +to be hybrid node already connected to via `P2PNetwork` and other way around. diff --git a/network/limitcaller/rateLimitingTransport.go b/network/limitcaller/rateLimitingTransport.go index 45bc0725ed..de68c9b371 100644 --- a/network/limitcaller/rateLimitingTransport.go +++ b/network/limitcaller/rateLimitingTransport.go @@ -50,19 +50,16 @@ var ErrConnectionQueueingTimeout = errors.New("rateLimitingTransport: queueing t // according to the entries in the phonebook. func MakeRateLimitingTransport(phonebook ConnectionTimeStore, queueingTimeout time.Duration, dialer *Dialer, maxIdleConnsPerHost int) RateLimitingTransport { defaultTransport := http.DefaultTransport.(*http.Transport) - return RateLimitingTransport{ - phonebook: phonebook, - innerTransport: &http.Transport{ - Proxy: defaultTransport.Proxy, - DialContext: dialer.innerDialContext, - MaxIdleConns: defaultTransport.MaxIdleConns, - IdleConnTimeout: defaultTransport.IdleConnTimeout, - TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, - ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, - MaxIdleConnsPerHost: maxIdleConnsPerHost, - }, - queueingTimeout: queueingTimeout, + innerTransport := &http.Transport{ + Proxy: defaultTransport.Proxy, + DialContext: dialer.innerDialContext, + MaxIdleConns: defaultTransport.MaxIdleConns, + IdleConnTimeout: defaultTransport.IdleConnTimeout, + TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, + MaxIdleConnsPerHost: maxIdleConnsPerHost, } + return MakeRateLimitingTransportWithRoundTripper(phonebook, queueingTimeout, innerTransport, nil, maxIdleConnsPerHost) } // MakeRateLimitingTransportWithRoundTripper creates a rate limiting http transport that would limit the requests rate diff --git a/network/p2p/README.md b/network/p2p/README.md index 8490e391b6..b95e5be32f 100644 --- a/network/p2p/README.md +++ b/network/p2p/README.md @@ -23,7 +23,7 @@ Libp2p also provides an implementation of a message-based gossip protocol, Gossi Algorand's current network protocol sends messages between peers over bidirectional WebSocket connections. Nodes that are configured to enable message-forwarding (including -nodes currently called "relays") validate incoming messages, then selectively forward +nodes currently called "relays") validate incoming messages, then selectively forward messages to other connected peers. This network implementation (`WebsocketNetwork`) sits behind the `GossipNode` interface in the network package. @@ -36,8 +36,8 @@ via peer connections managed by libp2p. The `P2PNetwork` implementation uses and [peer IDs](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-ids-in-multiaddrs) to establish connections and identify peers. -Currently transactions (protocol tag `TX`) are distributed using the GossipSub protocol, -while all other messages are forwarded over a custom message protocol `/algorand-ws/1.0.0` +Currently transactions (protocol tag `TX`) are distributed using the GossipSub protocol (see [pubsub.go](./pubsub.go)), +while all other messages are forwarded over the pre-existing custom message protocol `/algorand-ws/1.0.0` (see [streams.go](./streams.go)) that uses the same message serialization as the existing `WebsocketNetwork` implementation. These two protocols are multiplexed over a single connection using libp2p streams. @@ -63,3 +63,85 @@ graph LR AW --> WS S --> T ``` + +The underlying libp2p implementation is abstracted as `p2p.Service` and is initialized in two steps: +1. Creating a p2p `Host` +2. Creating a service `serviceImpl` object + +`Host` is also used for p2p HTTP server and DHT Discovery service creation. It is also useful for unit testing. Note, `Host` is created with `NoListenAddrs` options that prevents automatic listening and networking until the `Service.Start()` is called. This follows the designs of Algod services (including the WsNetwork service). + +### Connection limiting + +libp2p's `ResourceManager` is used to limit the number of connections up to `cfg.P2PIncomingConnectionsLimit`. + +### DHT and capabilities + +Provides helper methods to construct DHT discovery service using `go-libp2p-kad-dht` library. +High level [CapabilitiesDiscovery](./capabilities.go) class supports retrieving (`PeersForCapability`) +peers by a given capability(-ies) or advertising own capabilities (`AdvertiseCapabilities`). + +Note, by default private and non-routable addresses are filtered (see `AddrsFactory`), +libp2p's `ObservedAddrManager` can track its own public address and makes it available +(and so that discoverable with DHT) if it was observed at least 4 times in 30 minutes (as of libp2p@v0.33.2). + +```mermaid +graph LR + + subgraph "node" + Cap[Capabilities] + end + + subgraph "P2P Implementation" + P2P[P2PNetwork] + AdvCap[AdvertiseCapabilities] + end + + P2P --> AdvCap + Cap -.-> P2P + + subgraph "libp2p" + Adv[Advertise] + Addr[Addrs] + OAM[ObservedAddrManager] + AF[AddrFactory] + KAD["/kad/1.0.0"] + end + + OAM -.-> Addr + AF -.-> Addr + AdvCap --> Adv + + subgraph "libp2p-kad-dht" + Pro[Provide] + end + + Addr -.-> Pro + Adv --> Pro + Pro --> KAD +``` + +### HTTP over libp2p connection + +libp2p@0.33 added ability to multiplex HTTP traffic in p2p connection. +A custom `/algorand-http/1.0.0` stream is utilized to expose HTTP server and allow +network service clients (catchup, catchpoint, txsync) to register its own handlers +similarly to the legacy ws-net implementation. + +### Peerstore + +In-memory peerstore implements `libp2p.Peerstore` and go-algorand `Phonebook` interfaces. +Peer classes (relays, archival, etc) and persistent peers (i.e. peers from command line or phonebook.json) +are supported. Possible enhancement is to save/load peerstore to/from disk to tolerate bootstrap nodes failures. + +### Logging + +lip2p uses zap logger as a separate `ipfs/go-log/v2` module. `EnableP2PLogging` helper adds +go-algorand's `logrus` as a custom zap core so that all libp2p logs go through go-algorand logging facility. +Unfortunately `ipfs/go-log/v2` has a primary logging core as module variable that makes impossible +to have custom `logrus` sub-loggers in unit tests. + +### Metrics + +`go-libp2p` uses Prometheus as a metrics library, `go-libp2p-kad-dht` relies on OpenCensus library. +go-algorand has two collectors (see `util/metrics`) for both Prometheus and OpenCensus for +counters and gauges with labels. Other types (summary, histogram, distribution) are not supported at the moment. \ No newline at end of file diff --git a/network/p2p/capabilities.go b/network/p2p/capabilities.go index e5781aa389..7a418767d1 100644 --- a/network/p2p/capabilities.go +++ b/network/p2p/capabilities.go @@ -56,13 +56,13 @@ type CapabilitiesDiscovery struct { wg sync.WaitGroup } -// Advertise implements the discovery.Discovery/discovery.Advertiser interface -func (c *CapabilitiesDiscovery) Advertise(ctx context.Context, ns string, opts ...discovery.Option) (time.Duration, error) { +// advertise implements the discovery.Discovery/discovery.Advertiser interface +func (c *CapabilitiesDiscovery) advertise(ctx context.Context, ns string, opts ...discovery.Option) (time.Duration, error) { return c.disc.Advertise(ctx, ns, opts...) } -// FindPeers implements the discovery.Discovery/discovery.Discoverer interface -func (c *CapabilitiesDiscovery) FindPeers(ctx context.Context, ns string, opts ...discovery.Option) (<-chan peer.AddrInfo, error) { +// findPeers implements the discovery.Discovery/discovery.Discoverer interface +func (c *CapabilitiesDiscovery) findPeers(ctx context.Context, ns string, opts ...discovery.Option) (<-chan peer.AddrInfo, error) { return c.disc.FindPeers(ctx, ns, opts...) } @@ -78,8 +78,8 @@ func (c *CapabilitiesDiscovery) Host() host.Host { return c.dht.Host() } -// AddPeer adds a given peer.AddrInfo to the Host's Peerstore, and the DHT's routing table -func (c *CapabilitiesDiscovery) AddPeer(p peer.AddrInfo) (bool, error) { +// addPeer adds a given peer.AddrInfo to the Host's Peerstore, and the DHT's routing table +func (c *CapabilitiesDiscovery) addPeer(p peer.AddrInfo) (bool, error) { c.Host().Peerstore().AddAddrs(p.ID, p.Addrs, libpeerstore.AddressTTL) return c.dht.RoutingTable().TryAddPeer(p.ID, true, true) } @@ -93,7 +93,7 @@ func (c *CapabilitiesDiscovery) PeersForCapability(capability Capability, n int) var peers []peer.AddrInfo // +1 because it can include self but we exclude self from the returned list // that might confuse the caller (and tests assertions) - peersChan, err := c.FindPeers(ctx, string(capability), discovery.Limit(n+1)) + peersChan, err := c.findPeers(ctx, string(capability), discovery.Limit(n+1)) if err != nil { return nil, err } @@ -128,7 +128,7 @@ func (c *CapabilitiesDiscovery) AdvertiseCapabilities(capabilities ...Capability var err error advertisementInterval := maxAdvertisementInterval for _, capa := range capabilities { - ttl, err0 := c.Advertise(c.dht.Context(), string(capa)) + ttl, err0 := c.advertise(c.dht.Context(), string(capa)) if err0 != nil { err = err0 c.log.Errorf("failed to advertise for capability %s: %v", capa, err0) diff --git a/network/p2p/capabilities_test.go b/network/p2p/capabilities_test.go index 881860f647..5b41ed70d8 100644 --- a/network/p2p/capabilities_test.go +++ b/network/p2p/capabilities_test.go @@ -62,7 +62,7 @@ func TestCapabilities_Discovery(t *testing.T) { for _, capD := range caps { peersAdded := 0 for _, addr := range addrs { - added, err := capD.AddPeer(addr) + added, err := capD.addPeer(addr) require.NoError(t, err) require.True(t, added) peersAdded++ diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index e908f148d8..2f8e00b911 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -61,7 +61,6 @@ type Service interface { IDSigner() *PeerIDChallengeSigner AddrInfo() peer.AddrInfo // return addrInfo for self - DialNode(context.Context, *peer.AddrInfo) error DialPeersUntilTargetCount(targetConnCount int) ClosePeer(peer.ID) error @@ -257,15 +256,15 @@ func (s *serviceImpl) DialPeersUntilTargetCount(targetConnCount int) { if len(s.host.Network().ConnsToPeer(peerInfo.ID)) > 0 { continue } - err := s.DialNode(context.Background(), peerInfo) // leaving the calls as blocking for now, to not over-connect beyond fanout + err := s.dialNode(context.Background(), peerInfo) // leaving the calls as blocking for now, to not over-connect beyond fanout if err != nil { s.log.Warnf("failed to connect to peer %s: %v", peerInfo.ID, err) } } } -// DialNode attempts to establish a connection to the provided peer -func (s *serviceImpl) DialNode(ctx context.Context, peer *peer.AddrInfo) error { +// dialNode attempts to establish a connection to the provided peer +func (s *serviceImpl) dialNode(ctx context.Context, peer *peer.AddrInfo) error { // don't try connecting to ourselves if peer.ID == s.host.ID() { return nil diff --git a/network/p2pNetwork.go b/network/p2pNetwork.go index 37c6cfcd52..ab53f72e01 100644 --- a/network/p2pNetwork.go +++ b/network/p2pNetwork.go @@ -767,6 +767,7 @@ func (n *P2PNetwork) wsStreamHandler(ctx context.Context, p2pPeer peer.ID, strea maxIdleConnsPerHost := int(n.config.ConnectionsRateLimitingCount) client, err := p2p.MakeHTTPClientWithRateLimit(addrInfo, n.pstore, limitcaller.DefaultQueueingTimeout, maxIdleConnsPerHost) if err != nil { + n.log.Warnf("Cannot construct HTTP Client for %s: %v", p2pPeer, err) client = nil } var netIdentPeerID algocrypto.PublicKey @@ -782,7 +783,7 @@ func (n *P2PNetwork) wsStreamHandler(ctx context.Context, p2pPeer peer.ID, strea peerCore := makePeerCore(ctx, n, n.log, n.handler.readBuffer, addr, client, addr) wsp := &wsPeer{ wsPeerCore: peerCore, - conn: &wsPeerConnP2PImpl{stream: stream}, + conn: &wsPeerConnP2P{stream: stream}, outgoing: !incoming, identity: netIdentPeerID, } @@ -844,7 +845,7 @@ func (n *P2PNetwork) wsStreamHandler(ctx context.Context, p2pPeer peer.ID, strea // peerRemoteClose called from wsPeer to report that it has closed func (n *P2PNetwork) peerRemoteClose(peer *wsPeer, reason disconnectReason) { - remotePeerID := peer.conn.(*wsPeerConnP2PImpl).stream.Conn().RemotePeer() + remotePeerID := peer.conn.(*wsPeerConnP2P).stream.Conn().RemotePeer() n.wsPeersLock.Lock() n.identityTracker.removeIdentity(peer) delete(n.wsPeers, remotePeerID) diff --git a/network/p2pNetwork_test.go b/network/p2pNetwork_test.go index 7cb35a0e82..8e30986cf1 100644 --- a/network/p2pNetwork_test.go +++ b/network/p2pNetwork_test.go @@ -345,11 +345,6 @@ func (s *mockService) AddrInfo() peer.AddrInfo { } } -func (s *mockService) DialNode(ctx context.Context, peer *peer.AddrInfo) error { - s.peers[peer.ID] = *peer - return nil -} - func (s *mockService) DialPeersUntilTargetCount(targetConnCount int) { } diff --git a/network/p2pPeer.go b/network/p2pPeer.go index a5065f01bb..9a0ce2699d 100644 --- a/network/p2pPeer.go +++ b/network/p2pPeer.go @@ -31,15 +31,15 @@ import ( mnet "github.com/multiformats/go-multiaddr/net" ) -type wsPeerConnP2PImpl struct { +type wsPeerConnP2P struct { stream network.Stream } -func (c *wsPeerConnP2PImpl) RemoteAddrString() string { +func (c *wsPeerConnP2P) RemoteAddrString() string { return c.stream.Conn().RemoteMultiaddr().String() } -func (c *wsPeerConnP2PImpl) NextReader() (int, io.Reader, error) { +func (c *wsPeerConnP2P) NextReader() (int, io.Reader, error) { // read length var lenbuf [4]byte _, err := io.ReadFull(c.stream, lenbuf[:]) @@ -54,7 +54,7 @@ func (c *wsPeerConnP2PImpl) NextReader() (int, io.Reader, error) { return websocket.BinaryMessage, io.LimitReader(c.stream, int64(msglen)), nil } -func (c *wsPeerConnP2PImpl) WriteMessage(_ int, buf []byte) error { +func (c *wsPeerConnP2P) WriteMessage(_ int, buf []byte) error { // simple message framing: // 1. write encoding of the length var lenbuf [4]byte @@ -69,13 +69,13 @@ func (c *wsPeerConnP2PImpl) WriteMessage(_ int, buf []byte) error { } // Do nothing for now since this doesn't actually close the connection just sends the close message -func (c *wsPeerConnP2PImpl) CloseWithMessage([]byte, time.Time) error { +func (c *wsPeerConnP2P) CloseWithMessage([]byte, time.Time) error { return nil } -func (c *wsPeerConnP2PImpl) SetReadLimit(int64) {} +func (c *wsPeerConnP2P) SetReadLimit(int64) {} -func (c *wsPeerConnP2PImpl) CloseWithoutFlush() error { +func (c *wsPeerConnP2P) CloseWithoutFlush() error { err := c.stream.Close() if err != nil && err != yamux.ErrStreamClosed && err != yamux.ErrSessionShutdown && err != yamux.ErrStreamReset { return err @@ -83,9 +83,9 @@ func (c *wsPeerConnP2PImpl) CloseWithoutFlush() error { return nil } -func (c *wsPeerConnP2PImpl) UnderlyingConn() net.Conn { return nil } +func (c *wsPeerConnP2P) UnderlyingConn() net.Conn { return nil } -func (c *wsPeerConnP2PImpl) RemoteAddr() net.Addr { +func (c *wsPeerConnP2P) RemoteAddr() net.Addr { netaddr, err := mnet.ToNetAddr(c.stream.Conn().RemoteMultiaddr()) if err != nil { logging.Base().Errorf("Error converting multiaddr to netaddr: %v", err) diff --git a/node/node.go b/node/node.go index b6118aadc0..04d2ced84c 100644 --- a/node/node.go +++ b/node/node.go @@ -393,10 +393,10 @@ func (node *AlgorandFullNode) Start() error { // Capabilities returns the node's capabilities for advertising to other nodes. func (node *AlgorandFullNode) Capabilities() []p2p.Capability { var caps []p2p.Capability - if node.config.Archival { + if node.config.Archival && node.config.IsGossipServer() { caps = append(caps, p2p.Archival) } - if node.config.StoresCatchpoints() { + if node.config.StoresCatchpoints() && node.config.IsGossipServer() { caps = append(caps, p2p.Catchpoints) } if node.config.EnableGossipService && node.config.IsGossipServer() { From 3b9f3e3de2bd9a5ad87b2b51624dce9f68b4f2cd Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Thu, 15 Aug 2024 13:32:43 -0400 Subject: [PATCH 59/82] p2p: get rid of interface{} from PeerStore methods (#6101) --- network/p2p/capabilities_test.go | 4 +-- network/p2p/p2p.go | 7 ++--- network/p2p/peerstore/peerstore.go | 40 ++++++++++--------------- network/p2p/peerstore/peerstore_test.go | 35 +++++++++------------- network/p2pNetwork.go | 17 +++++++---- network/p2pNetwork_test.go | 2 +- network/phonebook/phonebook.go | 2 +- 7 files changed, 48 insertions(+), 59 deletions(-) diff --git a/network/p2p/capabilities_test.go b/network/p2p/capabilities_test.go index 5b41ed70d8..7057eca017 100644 --- a/network/p2p/capabilities_test.go +++ b/network/p2p/capabilities_test.go @@ -83,7 +83,7 @@ func setupDHTHosts(t *testing.T, numHosts int) []*dht.IpfsDHT { tmpdir := t.TempDir() pk, err := GetPrivKey(cfg, tmpdir) require.NoError(t, err) - ps, err := peerstore.NewPeerStore([]*peer.AddrInfo{}, "") + ps, err := peerstore.NewPeerStore(nil, "") require.NoError(t, err) h, err := libp2p.New( libp2p.ListenAddrStrings("/dns4/localhost/tcp/0"), @@ -134,7 +134,7 @@ func setupCapDiscovery(t *testing.T, numHosts int, numBootstrapPeers int) []*Cap tmpdir := t.TempDir() pk, err := GetPrivKey(cfg, tmpdir) require.NoError(t, err) - ps, err := peerstore.NewPeerStore([]*peer.AddrInfo{}, "") + ps, err := peerstore.NewPeerStore(nil, "") require.NoError(t, err) h, err := libp2p.New( libp2p.ListenAddrStrings("/dns4/localhost/tcp/0"), diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index 2f8e00b911..3b467b0b27 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -176,7 +176,7 @@ func configureResourceManager(cfg config.Local) (network.ResourceManager, error) } // MakeService creates a P2P service instance -func MakeService(ctx context.Context, log logging.Logger, cfg config.Local, h host.Host, listenAddr string, wsStreamHandler StreamHandler, bootstrapPeers []*peer.AddrInfo) (*serviceImpl, error) { +func MakeService(ctx context.Context, log logging.Logger, cfg config.Local, h host.Host, listenAddr string, wsStreamHandler StreamHandler) (*serviceImpl, error) { sm := makeStreamManager(ctx, log, h, wsStreamHandler, cfg.EnableGossipService) h.Network().Notify(sm) @@ -238,7 +238,7 @@ func (s *serviceImpl) IDSigner() *PeerIDChallengeSigner { // DialPeersUntilTargetCount attempts to establish connections to the provided phonebook addresses func (s *serviceImpl) DialPeersUntilTargetCount(targetConnCount int) { ps := s.host.Peerstore().(*pstore.PeerStore) - peerIDs := ps.GetAddresses(targetConnCount, phonebook.PhoneBookEntryRelayRole) + addrInfos := ps.GetAddresses(targetConnCount, phonebook.PhoneBookEntryRelayRole) conns := s.host.Network().Conns() var numOutgoingConns int for _, conn := range conns { @@ -246,8 +246,7 @@ func (s *serviceImpl) DialPeersUntilTargetCount(targetConnCount int) { numOutgoingConns++ } } - for _, peerInfo := range peerIDs { - peerInfo := peerInfo.(*peer.AddrInfo) + for _, peerInfo := range addrInfos { // if we are at our target count stop trying to connect if numOutgoingConns >= targetConnCount { return diff --git a/network/p2p/peerstore/peerstore.go b/network/p2p/peerstore/peerstore.go index 3eda0d3686..5ae9c6aa04 100644 --- a/network/p2p/peerstore/peerstore.go +++ b/network/p2p/peerstore/peerstore.go @@ -22,12 +22,13 @@ import ( "math/rand" "time" - "github.com/algorand/go-algorand/network/phonebook" - "github.com/algorand/go-deadlock" "github.com/libp2p/go-libp2p/core/peer" libp2p "github.com/libp2p/go-libp2p/core/peerstore" mempstore "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem" "golang.org/x/exp/slices" + + "github.com/algorand/go-algorand/network/phonebook" + "github.com/algorand/go-deadlock" ) // when using GetAddresses with getAllAddresses, all the addresses will be retrieved, regardless @@ -76,14 +77,8 @@ func NewPeerStore(addrInfo []*peer.AddrInfo, network string) (*PeerStore, error) return nil, fmt.Errorf("cannot initialize a peerstore: %w", err) } - // initialize peerstore with addresses - peers := make([]interface{}, len(addrInfo)) - for i := 0; i < len(addrInfo); i++ { - peers[i] = addrInfo[i] - } - pstore := &PeerStore{peerStoreCAB: ps} - pstore.AddPersistentPeers(peers, network, phonebook.PhoneBookEntryRelayRole) + pstore.AddPersistentPeers(addrInfo, network, phonebook.PhoneBookEntryRelayRole) return pstore, nil } @@ -102,7 +97,7 @@ func MakePhonebook(connectionsRateLimitingCount uint, } // GetAddresses returns up to N addresses, but may return fewer -func (ps *PeerStore) GetAddresses(n int, role phonebook.PhoneBookEntryRoles) []interface{} { +func (ps *PeerStore) GetAddresses(n int, role phonebook.PhoneBookEntryRoles) []*peer.AddrInfo { return shuffleSelect(ps.filterRetryTime(time.Now(), role), n) } @@ -210,7 +205,7 @@ func (ps *PeerStore) UpdateConnectionTime(addrOrPeerID string, provisionalTime t } // ReplacePeerList replaces the peer list for the given networkName and role. -func (ps *PeerStore) ReplacePeerList(addressesThey []interface{}, networkName string, role phonebook.PhoneBookEntryRoles) { +func (ps *PeerStore) ReplacePeerList(addressesThey []*peer.AddrInfo, networkName string, role phonebook.PhoneBookEntryRoles) { // prepare a map of items we'd like to remove. removeItems := make(map[peer.ID]bool, 0) peerIDs := ps.Peers() @@ -226,8 +221,7 @@ func (ps *PeerStore) ReplacePeerList(addressesThey []interface{}, networkName st } } - for _, addr := range addressesThey { - info := addr.(*peer.AddrInfo) + for _, info := range addressesThey { data, _ := ps.Get(info.ID, addressDataKey) if data != nil { // we already have this. @@ -255,17 +249,15 @@ func (ps *PeerStore) ReplacePeerList(addressesThey []interface{}, networkName st // AddPersistentPeers stores addresses of peers which are persistent. // i.e. they won't be replaced by ReplacePeerList calls -func (ps *PeerStore) AddPersistentPeers(dnsAddresses []interface{}, networkName string, role phonebook.PhoneBookEntryRoles) { - for _, addr := range dnsAddresses { - info := addr.(*peer.AddrInfo) +func (ps *PeerStore) AddPersistentPeers(addrInfo []*peer.AddrInfo, networkName string, role phonebook.PhoneBookEntryRoles) { + for _, info := range addrInfo { data, _ := ps.Get(info.ID, addressDataKey) if data != nil { // we already have this. // Make sure the persistence field is set to true ad := data.(addressData) ad.persistent = true - _ = ps.Put(info.ID, addressDataKey, data) - + _ = ps.Put(info.ID, addressDataKey, ad) } else { // we don't have this item. add it. ps.AddAddrs(info.ID, info.Addrs, libp2p.PermanentAddrTTL) @@ -328,8 +320,8 @@ func (ps *PeerStore) popNElements(n int, peerID peer.ID) { _ = ps.Put(peerID, addressDataKey, ad) } -func (ps *PeerStore) filterRetryTime(t time.Time, role phonebook.PhoneBookEntryRoles) []interface{} { - o := make([]interface{}, 0, len(ps.Peers())) +func (ps *PeerStore) filterRetryTime(t time.Time, role phonebook.PhoneBookEntryRoles) []*peer.AddrInfo { + o := make([]*peer.AddrInfo, 0, len(ps.Peers())) for _, peerID := range ps.Peers() { data, _ := ps.Get(peerID, addressDataKey) if data != nil { @@ -344,11 +336,11 @@ func (ps *PeerStore) filterRetryTime(t time.Time, role phonebook.PhoneBookEntryR return o } -func shuffleSelect(set []interface{}, n int) []interface{} { +func shuffleSelect(set []*peer.AddrInfo, n int) []*peer.AddrInfo { if n >= len(set) || n == getAllAddresses { // return shuffled copy of everything out := slices.Clone(set) - shuffleStrings(out) + shuffleAddrInfos(out) return out } // Pick random indexes from the set @@ -361,13 +353,13 @@ func shuffleSelect(set []interface{}, n int) []interface{} { } } } - out := make([]interface{}, n) + out := make([]*peer.AddrInfo, n) for i, index := range indexSample { out[i] = set[index] } return out } -func shuffleStrings(set []interface{}) { +func shuffleAddrInfos(set []*peer.AddrInfo) { rand.Shuffle(len(set), func(i, j int) { set[i], set[j] = set[j], set[i] }) } diff --git a/network/p2p/peerstore/peerstore_test.go b/network/p2p/peerstore/peerstore_test.go index e855013d76..d82b34595d 100644 --- a/network/p2p/peerstore/peerstore_test.go +++ b/network/p2p/peerstore/peerstore_test.go @@ -91,8 +91,7 @@ func TestPeerstore(t *testing.T) { func testPhonebookAll(t *testing.T, set []*peer.AddrInfo, ph *PeerStore) { actual := ph.GetAddresses(len(set), PhoneBookEntryRelayRole) - for _, got := range actual { - info := got.(*peer.AddrInfo) + for _, info := range actual { ok := false for _, known := range set { if info.ID == known.ID { @@ -101,13 +100,12 @@ func testPhonebookAll(t *testing.T, set []*peer.AddrInfo, ph *PeerStore) { } } if !ok { - t.Errorf("get returned junk %#v", got) + t.Errorf("get returned junk %#v", info) } } for _, known := range set { ok := false - for _, got := range actual { - info := got.(*peer.AddrInfo) + for _, info := range actual { if info.ID == known.ID { ok = true break @@ -128,8 +126,7 @@ func testPhonebookUniform(t *testing.T, set []*peer.AddrInfo, ph *PeerStore, get } for i := 0; i < uniformityTestLength; i++ { actual := ph.GetAddresses(getsize, PhoneBookEntryRelayRole) - for _, xa := range actual { - info := xa.(*peer.AddrInfo) + for _, info := range actual { if _, ok := counts[info.ID.String()]; ok { counts[info.ID.String()]++ } @@ -226,11 +223,11 @@ func TestMultiPhonebook(t *testing.T) { require.NoError(t, err) infoSet = append(infoSet, info) } - pha := make([]interface{}, 0) + pha := make([]*peer.AddrInfo, 0) for _, e := range infoSet[:5] { pha = append(pha, e) } - phb := make([]interface{}, 0) + phb := make([]*peer.AddrInfo, 0) for _, e := range infoSet[5:] { phb = append(phb, e) } @@ -252,7 +249,7 @@ func TestMultiPhonebookPersistentPeers(t *testing.T) { info, err := peerInfoFromDomainPort("a:4041") require.NoError(t, err) - persistentPeers := []interface{}{info} + persistentPeers := []*peer.AddrInfo{info} set := []string{"b:4042", "c:4043", "d:4044", "e:4045", "f:4046", "g:4047", "h:4048", "i:4049", "j:4010"} infoSet := make([]*peer.AddrInfo, 0) for _, addr := range set { @@ -261,11 +258,11 @@ func TestMultiPhonebookPersistentPeers(t *testing.T) { infoSet = append(infoSet, info) } - pha := make([]interface{}, 0) + pha := make([]*peer.AddrInfo, 0) for _, e := range infoSet[:5] { pha = append(pha, e) } - phb := make([]interface{}, 0) + phb := make([]*peer.AddrInfo, 0) for _, e := range infoSet[5:] { phb = append(phb, e) } @@ -279,10 +276,8 @@ func TestMultiPhonebookPersistentPeers(t *testing.T) { testPhonebookAll(t, append(infoSet, info), ph) allAddresses := ph.GetAddresses(len(set)+len(persistentPeers), PhoneBookEntryRelayRole) for _, pp := range persistentPeers { - pp := pp.(*peer.AddrInfo) found := false for _, addr := range allAddresses { - addr := addr.(*peer.AddrInfo) if addr.ID == pp.ID { found = true break @@ -303,11 +298,11 @@ func TestMultiPhonebookDuplicateFiltering(t *testing.T) { infoSet = append(infoSet, info) } - pha := make([]interface{}, 0) + pha := make([]*peer.AddrInfo, 0) for _, e := range infoSet[:7] { pha = append(pha, e) } - phb := make([]interface{}, 0) + phb := make([]*peer.AddrInfo, 0) for _, e := range infoSet[3:] { phb = append(phb, e) } @@ -343,7 +338,7 @@ func TestWaitAndAddConnectionTimeLongtWindow(t *testing.T) { // Test the addresses are populated in the phonebook and a // time can be added to one of them - entries.ReplacePeerList([]interface{}{info1, info2}, "default", PhoneBookEntryRelayRole) + entries.ReplacePeerList([]*peer.AddrInfo{info1, info2}, "default", PhoneBookEntryRelayRole) addrInPhonebook, waitTime, provisionalTime := entries.GetConnectionWaitTime(string(info1.ID)) require.Equal(t, true, addrInPhonebook) require.Equal(t, time.Duration(0), waitTime) @@ -458,14 +453,14 @@ func TestPhonebookRoles(t *testing.T) { relaysSet := []string{"relay1:4040", "relay2:4041", "relay3:4042"} archiverSet := []string{"archiver1:1111", "archiver2:1112", "archiver3:1113"} - infoRelaySet := make([]interface{}, 0) + infoRelaySet := make([]*peer.AddrInfo, 0) for _, addr := range relaysSet { info, err := peerInfoFromDomainPort(addr) require.NoError(t, err) infoRelaySet = append(infoRelaySet, info) } - infoArchiverSet := make([]interface{}, 0) + infoArchiverSet := make([]*peer.AddrInfo, 0) for _, addr := range archiverSet { info, err := peerInfoFromDomainPort(addr) require.NoError(t, err) @@ -485,12 +480,10 @@ func TestPhonebookRoles(t *testing.T) { entries := ph.GetAddresses(l, role) if role == PhoneBookEntryRelayRole { for _, entry := range entries { - entry := entry.(*peer.AddrInfo) require.Contains(t, string(entry.ID), "relay") } } else if role == PhoneBookEntryArchiverRole { for _, entry := range entries { - entry := entry.(*peer.AddrInfo) require.Contains(t, string(entry.ID), "archiver") } } diff --git a/network/p2pNetwork.go b/network/p2pNetwork.go index ab53f72e01..f9dc04b785 100644 --- a/network/p2pNetwork.go +++ b/network/p2pNetwork.go @@ -261,15 +261,21 @@ func NewP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebo } log.Infof("P2P host created: peer ID %s addrs %s", h.ID(), h.Addrs()) - net.service, err = p2p.MakeService(net.ctx, log, cfg, h, la, net.wsStreamHandler, addrInfo) + net.service, err = p2p.MakeService(net.ctx, log, cfg, h, la, net.wsStreamHandler) if err != nil { return nil, err } + peerIDs := pstore.Peers() + addrInfos := make([]*peer.AddrInfo, 0, len(peerIDs)) + for _, peerID := range peerIDs { + addrInfo := pstore.PeerInfo(peerID) + addrInfos = append(addrInfos, &addrInfo) + } bootstrapper := &bootstrapper{ cfg: cfg, networkID: networkID, - phonebookPeers: addrInfo, + phonebookPeers: addrInfos, resolveController: dnsaddr.NewMultiaddrDNSResolveController(cfg.DNSSecurityTXTEnforced(), ""), log: net.log, } @@ -426,7 +432,7 @@ func (n *P2PNetwork) meshThreadInner() int { } peers := mergeP2PAddrInfoResolvedAddresses(dnsPeers, dhtPeers) - replace := make([]interface{}, 0, len(peers)) + replace := make([]*peer.AddrInfo, 0, len(peers)) for i := range peers { replace = append(replace, &peers[i]) } @@ -631,9 +637,8 @@ func (n *P2PNetwork) GetPeers(options ...PeerOption) []Peer { n.wsPeersLock.RUnlock() case PeersPhonebookRelays: const maxNodes = 100 - peerIDs := n.pstore.GetAddresses(maxNodes, phonebook.PhoneBookEntryRelayRole) - for _, peerInfo := range peerIDs { - peerInfo := peerInfo.(*peer.AddrInfo) + addrInfos := n.pstore.GetAddresses(maxNodes, phonebook.PhoneBookEntryRelayRole) + for _, peerInfo := range addrInfos { if peerCore, ok := addrInfoToWsPeerCore(n, peerInfo); ok { peers = append(peers, &peerCore) } diff --git a/network/p2pNetwork_test.go b/network/p2pNetwork_test.go index 8e30986cf1..302aa76147 100644 --- a/network/p2pNetwork_test.go +++ b/network/p2pNetwork_test.go @@ -782,7 +782,7 @@ func TestP2PHTTPHandler(t *testing.T) { // zero clients allowed, rate limiting window (10s) is greater than queue deadline (1s) pstore, err := peerstore.MakePhonebook(0, 10*time.Second) require.NoError(t, err) - pstore.AddPersistentPeers([]interface{}{&peerInfoA}, "net", phonebook.PhoneBookEntryRelayRole) + pstore.AddPersistentPeers([]*peer.AddrInfo{&peerInfoA}, "net", phonebook.PhoneBookEntryRelayRole) httpClient, err = p2p.MakeHTTPClientWithRateLimit(&peerInfoA, pstore, 1*time.Second, 1) require.NoError(t, err) _, err = httpClient.Get("/test") diff --git a/network/phonebook/phonebook.go b/network/phonebook/phonebook.go index 634ca9c16c..b3aeafb0fa 100644 --- a/network/phonebook/phonebook.go +++ b/network/phonebook/phonebook.go @@ -204,7 +204,7 @@ func (e *phonebookImpl) AddPersistentPeers(dnsAddresses []string, networkName st // we already have this. // Make sure the persistence field is set to true pbData.persistent = true - + e.data[addr] = pbData } else { // we don't have this item. add it. e.data[addr] = makePhonebookEntryData(networkName, role, true) From 8e2e2e4f4f714e40d6156ed77cbd1b4b4763d11a Mon Sep 17 00:00:00 2001 From: John Lee Date: Thu, 15 Aug 2024 13:33:04 -0400 Subject: [PATCH 60/82] Installer: add support for algoh to systemd installs (#6102) --- cmd/updater/systemd-setup-user.sh | 18 ++++++++++++++++++ cmd/updater/systemd-setup.sh | 19 +++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/cmd/updater/systemd-setup-user.sh b/cmd/updater/systemd-setup-user.sh index fa17a1db2b..c5b145eff7 100755 --- a/cmd/updater/systemd-setup-user.sh +++ b/cmd/updater/systemd-setup-user.sh @@ -21,9 +21,27 @@ setup_user() { sed -e s,@@BINDIR@@,"$bindir", "${SCRIPTPATH}/algorand@.service.template-user" \ > "$homedir/.config/systemd/user/algorand@.service" + if [[ ${HOSTMODE} == true ]]; then + echo "[INFO] Hosted mode - replacing algod with algoh" + sed -i 's/algod/algoh/g' "$homedir/.config/systemd/user/algorand@.service" + fi + systemctl --user daemon-reload } +HOSTMODE=false +while getopts H opt; do + case $opt in + H) + HOSTMODE=true + ;; + ?) + echo "Invalid option: -${OPTARG}" + exit 1 + ;; + esac +done +shift $((OPTIND-1)) if [ "$#" != 1 ]; then echo "Usage: $0 username" diff --git a/cmd/updater/systemd-setup.sh b/cmd/updater/systemd-setup.sh index bad8745137..fc27fd209c 100755 --- a/cmd/updater/systemd-setup.sh +++ b/cmd/updater/systemd-setup.sh @@ -14,9 +14,28 @@ setup_root() { sed ${sedargs} "${SCRIPTPATH}/algorand@.service.template" \ > /lib/systemd/system/algorand@.service + if [[ ${HOSTMODE} == true ]]; then + echo "[INFO] Hosted mode - replacing algod with algoh" + sed -i 's/algod/algoh/g' /lib/systemd/system/algorand@.service + fi + systemctl daemon-reload } +HOSTMODE=false +while getopts H opt; do + case $opt in + H) + HOSTMODE=true + ;; + ?) + echo "Invalid option: -${OPTARG}" + exit 1 + ;; + esac +done +shift $((OPTIND-1)) + if [ "$#" != 2 ] && [ "$#" != 3 ]; then echo "Usage: $0 username group [bindir]" exit 1 From 499007787f522af16781b3f3e43a70b3516ea7e3 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Mon, 26 Aug 2024 16:06:35 -0400 Subject: [PATCH 61/82] network: fix publicKeyIdentTracker data race in hybrid mode (#6110) --- network/netidentity.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/network/netidentity.go b/network/netidentity.go index 30755f0648..74f9b09e62 100644 --- a/network/netidentity.go +++ b/network/netidentity.go @@ -23,6 +23,7 @@ import ( "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-deadlock" ) // netidentity.go implements functionality to participate in an "Identity Challenge Exchange" @@ -461,12 +462,14 @@ func (noopIdentityTracker) removeIdentity(p *wsPeer) {} // mapping from PublicKeys exchanged in identity challenges to a peer // this structure is not thread-safe; it is protected by wn.peersLock or p2p.wsPeersLock type publicKeyIdentTracker struct { + mu deadlock.Mutex peersByID map[crypto.PublicKey]*wsPeer } // NewIdentityTracker returns a new publicKeyIdentTracker func NewIdentityTracker() *publicKeyIdentTracker { return &publicKeyIdentTracker{ + mu: deadlock.Mutex{}, peersByID: make(map[crypto.PublicKey]*wsPeer), } } @@ -475,6 +478,8 @@ func NewIdentityTracker() *publicKeyIdentTracker { // returns false if it was unable to load the peer into the given identity // or true otherwise (if the peer was already there, or if it was added) func (t *publicKeyIdentTracker) setIdentity(p *wsPeer) bool { + t.mu.Lock() + defer t.mu.Unlock() existingPeer, exists := t.peersByID[p.identity] if !exists { // the identity is not occupied, so set it and return true @@ -489,6 +494,8 @@ func (t *publicKeyIdentTracker) setIdentity(p *wsPeer) bool { // removeIdentity removes the entry in the peersByID map if it exists // and is occupied by the given peer func (t *publicKeyIdentTracker) removeIdentity(p *wsPeer) { + t.mu.Lock() + defer t.mu.Unlock() if t.peersByID[p.identity] == p { delete(t.peersByID, p.identity) } From 8d678c311969979cb0777c3d2befff1fa69621df Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Wed, 28 Aug 2024 14:36:50 -0400 Subject: [PATCH 62/82] config: use P2PHybridIncomingConnectionsLimit only for hybrid mode (#6103) --- cmd/algocfg/profileCommand.go | 4 +- cmd/algocfg/profileCommand_test.go | 9 +++-- config/config.go | 2 +- config/config_test.go | 34 +++++++++++------ config/localTemplate.go | 31 +++++++++------ config/local_defaults.go | 4 +- daemon/algod/server.go | 20 +++++----- installer/config.json.example | 4 +- netdeploy/remote/deployedNetwork.go | 4 +- netdeploy/remote/nodeConfig.go | 38 +++++++++---------- netdeploy/remote/nodecfg/nodeDir.go | 16 ++++---- network/README-P2P.md | 2 +- network/hybridNetwork.go | 3 +- network/hybridNetwork_test.go | 2 +- network/p2p/README.md | 2 +- network/p2p/p2p.go | 2 +- node/node_test.go | 2 +- test/testdata/configs/config-v34.json | 4 +- .../scenario1s-p2p/copy-node-configs.py | 4 +- 19 files changed, 103 insertions(+), 84 deletions(-) diff --git a/cmd/algocfg/profileCommand.go b/cmd/algocfg/profileCommand.go index 076cb65a64..c96a95d2b9 100644 --- a/cmd/algocfg/profileCommand.go +++ b/cmd/algocfg/profileCommand.go @@ -106,7 +106,7 @@ var ( // P2P config defaults cfg.EnableP2PHybridMode = true - cfg.P2PNetAddress = ":4190" + cfg.P2PHybridNetAddress = ":4190" cfg.EnableDHTProviders = true return cfg }, @@ -125,7 +125,7 @@ var ( // P2P config defaults cfg.EnableP2PHybridMode = true - cfg.P2PNetAddress = ":4190" + cfg.P2PHybridNetAddress = ":4190" cfg.EnableDHTProviders = true return cfg }, diff --git a/cmd/algocfg/profileCommand_test.go b/cmd/algocfg/profileCommand_test.go index 8d7d95a26e..bdec2d6577 100644 --- a/cmd/algocfg/profileCommand_test.go +++ b/cmd/algocfg/profileCommand_test.go @@ -17,9 +17,10 @@ package main import ( - "github.com/algorand/go-algorand/config" "testing" + "github.com/algorand/go-algorand/config" + "github.com/stretchr/testify/require" "github.com/algorand/go-algorand/test/partitiontest" @@ -80,7 +81,7 @@ func Test_getConfigForArg(t *testing.T) { require.Equal(t, config.PlaceholderPublicAddress, cfg.PublicAddress) require.True(t, cfg.EnableP2PHybridMode) - require.Equal(t, ":4190", cfg.P2PNetAddress) + require.Equal(t, ":4190", cfg.P2PHybridNetAddress) require.True(t, cfg.EnableDHTProviders) }) @@ -100,7 +101,7 @@ func Test_getConfigForArg(t *testing.T) { require.Equal(t, config.PlaceholderPublicAddress, cfg.PublicAddress) require.True(t, cfg.EnableP2PHybridMode) - require.Equal(t, ":4190", cfg.P2PNetAddress) + require.Equal(t, ":4190", cfg.P2PHybridNetAddress) require.True(t, cfg.EnableDHTProviders) }) @@ -121,7 +122,7 @@ func Test_getConfigForArg(t *testing.T) { require.Equal(t, "", cfg.PublicAddress) require.True(t, cfg.EnableP2PHybridMode) - require.Equal(t, "", cfg.P2PNetAddress) + require.Equal(t, "", cfg.P2PHybridNetAddress) require.True(t, cfg.EnableDHTProviders) }) } diff --git a/config/config.go b/config/config.go index 65d711cacc..07cd69cbda 100644 --- a/config/config.go +++ b/config/config.go @@ -176,7 +176,7 @@ func enrichNetworkingConfig(source Local) (Local, error) { } // In hybrid mode we want to prevent connections from the same node over both P2P and WS. // The only way it is supported at the moment is to use net identity challenge that is based on PublicAddress. - if (source.NetAddress != "" || source.P2PNetAddress != "") && source.EnableP2PHybridMode && source.PublicAddress == "" { + if (source.NetAddress != "" || source.P2PHybridNetAddress != "") && source.EnableP2PHybridMode && source.PublicAddress == "" { return source, errors.New("PublicAddress must be specified when EnableP2PHybridMode is set") } source.PublicAddress = strings.ToLower(source.PublicAddress) diff --git a/config/config_test.go b/config/config_test.go index 20338766c3..1b1c4c2753 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -153,7 +153,7 @@ func TestLocal_EnrichNetworkingConfig(t *testing.T) { require.ErrorContains(t, err, "PublicAddress must be specified when EnableP2PHybridMode is set") c1 = Local{ - P2PNetAddress: "test1", + P2PHybridNetAddress: "test1", EnableP2PHybridMode: true, } c2, err = enrichNetworkingConfig(c1) @@ -617,23 +617,27 @@ func TestLocal_IsGossipServer(t *testing.T) { require.False(t, cfg.IsGossipServer()) require.False(t, cfg.IsWsGossipServer()) require.False(t, cfg.IsP2PGossipServer()) + require.False(t, cfg.IsHybridServer()) cfg.NetAddress = ":4160" require.True(t, cfg.IsGossipServer()) require.True(t, cfg.IsWsGossipServer()) require.False(t, cfg.IsP2PGossipServer()) + require.False(t, cfg.IsHybridServer()) cfg.EnableGossipService = false // EnableGossipService does not matter require.True(t, cfg.IsGossipServer()) require.True(t, cfg.IsWsGossipServer()) require.False(t, cfg.IsP2PGossipServer()) + require.False(t, cfg.IsHybridServer()) cfg.EnableP2P = true cfg.NetAddress = ":4160" require.True(t, cfg.IsGossipServer()) require.False(t, cfg.IsWsGossipServer()) require.True(t, cfg.IsP2PGossipServer()) + require.False(t, cfg.IsHybridServer()) cfg.EnableP2P = false @@ -642,41 +646,47 @@ func TestLocal_IsGossipServer(t *testing.T) { require.True(t, cfg.IsGossipServer()) require.True(t, cfg.IsWsGossipServer()) require.False(t, cfg.IsP2PGossipServer()) + require.False(t, cfg.IsHybridServer()) cfg.EnableP2PHybridMode = true cfg.NetAddress = "" require.False(t, cfg.IsGossipServer()) require.False(t, cfg.IsWsGossipServer()) require.False(t, cfg.IsP2PGossipServer()) + require.False(t, cfg.IsHybridServer()) cfg.EnableP2PHybridMode = true - cfg.P2PNetAddress = ":4190" + cfg.P2PHybridNetAddress = ":4190" require.True(t, cfg.IsGossipServer()) require.False(t, cfg.IsWsGossipServer()) require.True(t, cfg.IsP2PGossipServer()) + require.False(t, cfg.IsHybridServer()) cfg.EnableP2PHybridMode = true cfg.NetAddress = ":4160" - cfg.P2PNetAddress = ":4190" + cfg.P2PHybridNetAddress = ":4190" require.True(t, cfg.IsGossipServer()) require.True(t, cfg.IsWsGossipServer()) require.True(t, cfg.IsP2PGossipServer()) + require.True(t, cfg.IsHybridServer()) cfg.EnableP2PHybridMode = true cfg.EnableP2P = true cfg.NetAddress = ":4160" - cfg.P2PNetAddress = ":4190" + cfg.P2PHybridNetAddress = ":4190" require.True(t, cfg.IsGossipServer()) require.True(t, cfg.IsWsGossipServer()) require.True(t, cfg.IsP2PGossipServer()) + require.True(t, cfg.IsHybridServer()) cfg.EnableP2PHybridMode = true cfg.EnableP2P = true cfg.NetAddress = ":4160" - cfg.P2PNetAddress = "" + cfg.P2PHybridNetAddress = "" require.True(t, cfg.IsGossipServer()) require.True(t, cfg.IsWsGossipServer()) require.False(t, cfg.IsP2PGossipServer()) + require.False(t, cfg.IsHybridServer()) } func TestLocal_RecalculateConnectionLimits(t *testing.T) { @@ -720,15 +730,15 @@ func TestLocal_RecalculateConnectionLimits(t *testing.T) { t.Parallel() c := Local{ - NetAddress: ":4160", - RestConnectionsSoftLimit: test.restSoftIn, - RestConnectionsHardLimit: test.restHardIn, - IncomingConnectionsLimit: test.incomingIn, - P2PIncomingConnectionsLimit: test.p2pIncomingIn, + NetAddress: ":4160", + RestConnectionsSoftLimit: test.restSoftIn, + RestConnectionsHardLimit: test.restHardIn, + IncomingConnectionsLimit: test.incomingIn, + P2PHybridIncomingConnectionsLimit: test.p2pIncomingIn, } if test.p2pIncomingIn > 0 { c.EnableP2PHybridMode = true - c.P2PNetAddress = ":4190" + c.P2PHybridNetAddress = ":4190" } requireFDs := test.reservedIn + test.restHardIn + uint64(test.incomingIn) + uint64(test.p2pIncomingIn) res := c.AdjustConnectionLimits(requireFDs, test.maxFDs) @@ -736,7 +746,7 @@ func TestLocal_RecalculateConnectionLimits(t *testing.T) { require.Equal(t, int(test.restSoftExp), int(c.RestConnectionsSoftLimit)) require.Equal(t, int(test.restHardExp), int(c.RestConnectionsHardLimit)) require.Equal(t, int(test.incomingExp), int(c.IncomingConnectionsLimit)) - require.Equal(t, int(test.p2pIncomingExp), int(c.P2PIncomingConnectionsLimit)) + require.Equal(t, int(test.p2pIncomingExp), int(c.P2PHybridIncomingConnectionsLimit)) }) } } diff --git a/config/localTemplate.go b/config/localTemplate.go index 9583a194cd..96a150a88b 100644 --- a/config/localTemplate.go +++ b/config/localTemplate.go @@ -134,7 +134,9 @@ type Local struct { // Estimating 1.5MB per incoming connection, 1.5MB*2400 = 3.6GB IncomingConnectionsLimit int `version[0]:"-1" version[1]:"10000" version[17]:"800" version[27]:"2400"` - P2PIncomingConnectionsLimit int `version[34]:"1200"` + // P2PHybridIncomingConnectionsLimit is used as IncomingConnectionsLimit for P2P connections in hybrid mode. + // For pure P2P nodes IncomingConnectionsLimit is used. + P2PHybridIncomingConnectionsLimit int `version[34]:"1200"` // BroadcastConnectionsLimit specifies the number of connections that // will receive broadcast (gossip) messages from this node. If the @@ -607,8 +609,8 @@ type Local struct { // Enabling this setting also requires PublicAddress to be set. EnableP2PHybridMode bool `version[34]:"false"` - // P2PNetAddress sets the listen address used for P2P networking, if hybrid mode is set. - P2PNetAddress string `version[34]:""` + // P2PHybridNetAddress sets the listen address used for P2P networking, if hybrid mode is set. + P2PHybridNetAddress string `version[34]:""` // EnableDHT will turn on the hash table for use with capabilities advertisement EnableDHTProviders bool `version[34]:"false"` @@ -742,16 +744,21 @@ func (cfg Local) IsGossipServer() bool { return cfg.IsWsGossipServer() || cfg.IsP2PGossipServer() } -// IsWsGossipServer returns true if a node configured to run a listening ws net +// IsWsGossipServer returns true if a node is configured to run a listening ws net func (cfg Local) IsWsGossipServer() bool { // 1. NetAddress is set and EnableP2P is not set // 2. NetAddress is set and EnableP2PHybridMode is set then EnableP2P is overridden by EnableP2PHybridMode return cfg.NetAddress != "" && (!cfg.EnableP2P || cfg.EnableP2PHybridMode) } -// IsP2PGossipServer returns true if a node configured to run a listening p2p net +// IsP2PGossipServer returns true if a node is configured to run a listening p2p net func (cfg Local) IsP2PGossipServer() bool { - return (cfg.EnableP2P && !cfg.EnableP2PHybridMode && cfg.NetAddress != "") || (cfg.EnableP2PHybridMode && cfg.P2PNetAddress != "") + return (cfg.EnableP2P && !cfg.EnableP2PHybridMode && cfg.NetAddress != "") || (cfg.EnableP2PHybridMode && cfg.P2PHybridNetAddress != "") +} + +// IsHybridServer returns true if a node configured to run a listening both ws and p2p networks +func (cfg Local) IsHybridServer() bool { + return cfg.NetAddress != "" && cfg.P2PHybridNetAddress != "" && cfg.EnableP2PHybridMode } // ensureAbsGenesisDir will convert a path to absolute, and will attempt to make a genesis directory there @@ -950,22 +957,22 @@ func (cfg *Local) AdjustConnectionLimits(requiredFDs, maxFDs uint64) bool { restDelta := diff + reservedRESTConns - cfg.RestConnectionsHardLimit cfg.RestConnectionsHardLimit = reservedRESTConns splitRatio := 1 - if cfg.IsWsGossipServer() && cfg.IsP2PGossipServer() { + if cfg.IsHybridServer() { // split the rest of the delta between ws and p2p evenly splitRatio = 2 } - if cfg.IsWsGossipServer() { + if cfg.IsWsGossipServer() || cfg.IsP2PGossipServer() { if cfg.IncomingConnectionsLimit > int(restDelta) { cfg.IncomingConnectionsLimit -= int(restDelta) / splitRatio } else { cfg.IncomingConnectionsLimit = 0 } } - if cfg.IsP2PGossipServer() { - if cfg.P2PIncomingConnectionsLimit > int(restDelta) { - cfg.P2PIncomingConnectionsLimit -= int(restDelta) / splitRatio + if cfg.IsHybridServer() { + if cfg.P2PHybridIncomingConnectionsLimit > int(restDelta) { + cfg.P2PHybridIncomingConnectionsLimit -= int(restDelta) / splitRatio } else { - cfg.P2PIncomingConnectionsLimit = 0 + cfg.P2PHybridIncomingConnectionsLimit = 0 } } } else { diff --git a/config/local_defaults.go b/config/local_defaults.go index 57457531be..b2007621d0 100644 --- a/config/local_defaults.go +++ b/config/local_defaults.go @@ -119,8 +119,8 @@ var defaultLocal = Local{ OptimizeAccountsDatabaseOnStartup: false, OutgoingMessageFilterBucketCount: 3, OutgoingMessageFilterBucketSize: 128, - P2PIncomingConnectionsLimit: 1200, - P2PNetAddress: "", + P2PHybridIncomingConnectionsLimit: 1200, + P2PHybridNetAddress: "", P2PPersistPeerID: false, P2PPrivateKeyLocation: "", ParticipationKeysRefreshInterval: 60000000000, diff --git a/daemon/algod/server.go b/daemon/algod/server.go index 309fdc5799..6023bfab2e 100644 --- a/daemon/algod/server.go +++ b/daemon/algod/server.go @@ -153,16 +153,16 @@ func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string, genes if ot.Overflowed { return errors.New("Initialize() overflowed when adding up ReservedHealthServiceConnections to the existing RLIMIT_NOFILE value; decrease RestConnectionsHardLimit") } - if cfg.IsWsGossipServer() { + if cfg.IsGossipServer() { fdRequired = ot.Add(fdRequired, uint64(cfg.IncomingConnectionsLimit)) if ot.Overflowed { return errors.New("Initialize() overflowed when adding up IncomingConnectionsLimit to the existing RLIMIT_NOFILE value; decrease IncomingConnectionsLimit") } } - if cfg.IsP2PGossipServer() { - fdRequired = ot.Add(fdRequired, uint64(cfg.P2PIncomingConnectionsLimit)) + if cfg.IsHybridServer() { + fdRequired = ot.Add(fdRequired, uint64(cfg.P2PHybridIncomingConnectionsLimit)) if ot.Overflowed { - return errors.New("Initialize() overflowed when adding up P2PIncomingConnectionsLimit to the existing RLIMIT_NOFILE value; decrease P2PIncomingConnectionsLimit") + return errors.New("Initialize() overflowed when adding up P2PHybridIncomingConnectionsLimit to the existing RLIMIT_NOFILE value; decrease P2PHybridIncomingConnectionsLimit") } } _, hard, fdErr := util.GetFdLimits() @@ -176,17 +176,17 @@ func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string, genes // but try to keep cfg.ReservedFDs untouched by decreasing other limits if cfg.AdjustConnectionLimits(fdRequired, hard) { s.log.Warnf( - "Updated connection limits: RestConnectionsSoftLimit=%d, RestConnectionsHardLimit=%d, IncomingConnectionsLimit=%d, P2PIncomingConnectionsLimit=%d", + "Updated connection limits: RestConnectionsSoftLimit=%d, RestConnectionsHardLimit=%d, IncomingConnectionsLimit=%d, P2PHybridIncomingConnectionsLimit=%d", cfg.RestConnectionsSoftLimit, cfg.RestConnectionsHardLimit, cfg.IncomingConnectionsLimit, - cfg.P2PIncomingConnectionsLimit, + cfg.P2PHybridIncomingConnectionsLimit, ) - if cfg.IsWsGossipServer() && cfg.IncomingConnectionsLimit == 0 { - return errors.New("Initialize() failed to adjust connection limits") + if cfg.IsHybridServer() && cfg.P2PHybridIncomingConnectionsLimit == 0 { + return errors.New("Initialize() failed to adjust p2p hybrid connection limits") } - if cfg.IsP2PGossipServer() && cfg.P2PIncomingConnectionsLimit == 0 { - return errors.New("Initialize() failed to adjust p2p connection limits") + if cfg.IsGossipServer() && cfg.IncomingConnectionsLimit == 0 { + return errors.New("Initialize() failed to adjust connection limits") } } } diff --git a/installer/config.json.example b/installer/config.json.example index 3a9714bbfb..59ca9e3a29 100644 --- a/installer/config.json.example +++ b/installer/config.json.example @@ -98,8 +98,8 @@ "OptimizeAccountsDatabaseOnStartup": false, "OutgoingMessageFilterBucketCount": 3, "OutgoingMessageFilterBucketSize": 128, - "P2PIncomingConnectionsLimit": 1200, - "P2PNetAddress": "", + "P2PHybridIncomingConnectionsLimit": 1200, + "P2PHybridNetAddress": "", "P2PPersistPeerID": false, "P2PPrivateKeyLocation": "", "ParticipationKeysRefreshInterval": 60000000000, diff --git a/netdeploy/remote/deployedNetwork.go b/netdeploy/remote/deployedNetwork.go index 26f25a0da6..2a02e77899 100644 --- a/netdeploy/remote/deployedNetwork.go +++ b/netdeploy/remote/deployedNetwork.go @@ -1009,8 +1009,8 @@ func createHostSpec(host HostConfig, template cloudHost) (hostSpec cloudHostSpec portList = append(portList, strconv.Itoa(port)) } } - if node.P2PNetAddress != "" { - port, err = extractPublicPort(node.P2PNetAddress) + if node.P2PHybridNetAddress != "" { + port, err = extractPublicPort(node.P2PHybridNetAddress) if err != nil { return } diff --git a/netdeploy/remote/nodeConfig.go b/netdeploy/remote/nodeConfig.go index bd4b63dac8..4025c47c1c 100644 --- a/netdeploy/remote/nodeConfig.go +++ b/netdeploy/remote/nodeConfig.go @@ -18,25 +18,25 @@ package remote // NodeConfig represents the configuration settings to apply to a single node running on a host type NodeConfig struct { - Name string `json:",omitempty"` - Wallets []NodeWalletData - NetAddress string `json:",omitempty"` - APIEndpoint string `json:",omitempty"` - APIToken string `json:",omitempty"` - AdminAPIToken string `json:",omitempty"` - EnableTelemetry bool // Needs to also be configured host-wide (assign logging host name) - TelemetryURI string `json:",omitempty"` // Needs to be HostConfig - EnableMetrics bool // Needs to also be configured host-wide (register DNS entry) - MetricsURI string `json:",omitempty"` - EnableService bool - CronTabSchedule string `json:",omitempty"` - EnableBlockStats bool - DashboardEndpoint string `json:",omitempty"` - DeadlockOverride int `json:",omitempty"` // -1 = Disable deadlock detection, 0 = Use Default for build, 1 = Enable - ConfigJSONOverride string `json:",omitempty"` // Raw json to merge into config.json after other modifications are complete - P2PBootstrap bool // True if this node should be a p2p bootstrap node and registered in DNS - P2PNetAddress string `json:",omitempty"` - PublicAddress bool + Name string `json:",omitempty"` + Wallets []NodeWalletData + NetAddress string `json:",omitempty"` + APIEndpoint string `json:",omitempty"` + APIToken string `json:",omitempty"` + AdminAPIToken string `json:",omitempty"` + EnableTelemetry bool // Needs to also be configured host-wide (assign logging host name) + TelemetryURI string `json:",omitempty"` // Needs to be HostConfig + EnableMetrics bool // Needs to also be configured host-wide (register DNS entry) + MetricsURI string `json:",omitempty"` + EnableService bool + CronTabSchedule string `json:",omitempty"` + EnableBlockStats bool + DashboardEndpoint string `json:",omitempty"` + DeadlockOverride int `json:",omitempty"` // -1 = Disable deadlock detection, 0 = Use Default for build, 1 = Enable + ConfigJSONOverride string `json:",omitempty"` // Raw json to merge into config.json after other modifications are complete + P2PBootstrap bool // True if this node should be a p2p bootstrap node and registered in DNS + P2PHybridNetAddress string `json:",omitempty"` + PublicAddress bool // NodeNameMatchRegex is tested against Name in generated configs and if matched the rest of the configs in this record are applied as a template NodeNameMatchRegex string `json:",omitempty"` diff --git a/netdeploy/remote/nodecfg/nodeDir.go b/netdeploy/remote/nodecfg/nodeDir.go index 304fa4c636..43417dca27 100644 --- a/netdeploy/remote/nodecfg/nodeDir.go +++ b/netdeploy/remote/nodecfg/nodeDir.go @@ -169,9 +169,9 @@ func (nd *nodeDir) configureNetAddress() (err error) { } } } - if nd.P2PNetAddress != "" { - fmt.Fprintf(os.Stdout, " - Assigning P2PNetAddress: %s\n", nd.P2PNetAddress) - nd.config.P2PNetAddress = nd.P2PNetAddress + if nd.P2PHybridNetAddress != "" { + fmt.Fprintf(os.Stdout, " - Assigning P2PHybridNetAddress: %s\n", nd.P2PHybridNetAddress) + nd.config.P2PHybridNetAddress = nd.P2PHybridNetAddress } err = nd.saveConfig() return @@ -210,21 +210,21 @@ func (nd *nodeDir) configureP2PDNSBootstrap(p2pBootstrap bool) error { } // ensure p2p config params set are what is expected: // - EnableP2P or EnableP2PHybridMode - // - NetAddress or P2PNetAddress is set + // - NetAddress or P2PHybridNetAddress is set // - EnableGossipService if !nd.config.EnableP2P && !nd.config.EnableP2PHybridMode { return errors.New("p2p bootstrap requires EnableP2P or EnableP2PHybridMode to be set") } - if nd.NetAddress == "" && nd.P2PNetAddress == "" { - return errors.New("p2p bootstrap requires NetAddress or P2PNetAddress to be set") + if nd.NetAddress == "" && nd.P2PHybridNetAddress == "" { + return errors.New("p2p bootstrap requires NetAddress or P2PHybridNetAddress to be set") } if !nd.config.EnableGossipService { return errors.New("p2p bootstrap requires EnableGossipService to be set") } netAddress := nd.NetAddress - if nd.P2PNetAddress != "" { - netAddress = nd.P2PNetAddress + if nd.P2PHybridNetAddress != "" { + netAddress = nd.P2PHybridNetAddress } key, err := p2p.GetPrivKey(config.Local{P2PPersistPeerID: true}, nd.dataDir) diff --git a/network/README-P2P.md b/network/README-P2P.md index c67bd53273..853b54ed2d 100644 --- a/network/README-P2P.md +++ b/network/README-P2P.md @@ -8,7 +8,7 @@ transport: lip2p-managed connections and HTTP + WebSocket, respectively. `P2PNetwork` and `WsNetwork` require `config.NetAddress` to be set in order to start a server. In addition, `HybridNetwork` is an aggregate of `P2PNetwork` and `WsNetwork` allowing a node -to interact over both networks. In the case of hybrid operation, both `config.P2PNetAddress` and +to interact over both networks. In the case of hybrid operation, both `config.P2PHybridNetAddress` and `config.NetAddress` are used. ## General design diff --git a/network/hybridNetwork.go b/network/hybridNetwork.go index 7d5814ff39..85621260a9 100644 --- a/network/hybridNetwork.go +++ b/network/hybridNetwork.go @@ -41,7 +41,8 @@ type HybridP2PNetwork struct { func NewHybridP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID, nodeInfo NodeInfo) (*HybridP2PNetwork, error) { // supply alternate NetAddress for P2P network p2pcfg := cfg - p2pcfg.NetAddress = cfg.P2PNetAddress + p2pcfg.NetAddress = cfg.P2PHybridNetAddress + p2pcfg.IncomingConnectionsLimit = cfg.P2PHybridIncomingConnectionsLimit identityTracker := NewIdentityTracker() p2pnet, err := NewP2PNetwork(log, p2pcfg, datadir, phonebookAddresses, genesisID, networkID, nodeInfo, &identityOpts{tracker: identityTracker}) if err != nil { diff --git a/network/hybridNetwork_test.go b/network/hybridNetwork_test.go index 842bb10b15..4e1392a2d0 100644 --- a/network/hybridNetwork_test.go +++ b/network/hybridNetwork_test.go @@ -64,7 +64,7 @@ func TestHybridNetwork_DuplicateConn(t *testing.T) { // make it net address and restart the node relayCfg.NetAddress = addr relayCfg.PublicAddress = addr - relayCfg.P2PNetAddress = "127.0.0.1:0" + relayCfg.P2PHybridNetAddress = "127.0.0.1:0" netA, err = NewHybridP2PNetwork(log.With("node", "netA"), relayCfg, p2pKeyDir, nil, genesisID, "net", &nopeNodeInfo{}) require.NoError(t, err) diff --git a/network/p2p/README.md b/network/p2p/README.md index b95e5be32f..e2d3333caf 100644 --- a/network/p2p/README.md +++ b/network/p2p/README.md @@ -72,7 +72,7 @@ The underlying libp2p implementation is abstracted as `p2p.Service` and is initi ### Connection limiting -libp2p's `ResourceManager` is used to limit the number of connections up to `cfg.P2PIncomingConnectionsLimit`. +libp2p's `ResourceManager` is used to limit the number of connections up to `cfg.IncomingConnectionsLimit`. ### DHT and capabilities diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index 3b467b0b27..270dfb7476 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -166,7 +166,7 @@ func configureResourceManager(cfg config.Local) (network.ResourceManager, error) limitConfig := rcmgr.PartialLimitConfig{ System: rcmgr.ResourceLimits{ - Conns: rcmgr.LimitVal(cfg.P2PIncomingConnectionsLimit), + Conns: rcmgr.LimitVal(cfg.IncomingConnectionsLimit), }, // Everything else is default. The exact values will come from `scaledDefaultLimits` above. } diff --git a/node/node_test.go b/node/node_test.go index df72a699bb..6b991751cb 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -880,7 +880,7 @@ func TestNodeHybridTopology(t *testing.T) { ni.p2pID, err = p2p.PeerIDFromPublicKey(privKey.GetPublic()) require.NoError(t, err) - cfg.P2PNetAddress = ni.p2pNetAddr() + cfg.P2PHybridNetAddress = ni.p2pNetAddr() return ni, cfg } diff --git a/test/testdata/configs/config-v34.json b/test/testdata/configs/config-v34.json index 3a9714bbfb..59ca9e3a29 100644 --- a/test/testdata/configs/config-v34.json +++ b/test/testdata/configs/config-v34.json @@ -98,8 +98,8 @@ "OptimizeAccountsDatabaseOnStartup": false, "OutgoingMessageFilterBucketCount": 3, "OutgoingMessageFilterBucketSize": 128, - "P2PIncomingConnectionsLimit": 1200, - "P2PNetAddress": "", + "P2PHybridIncomingConnectionsLimit": 1200, + "P2PHybridNetAddress": "", "P2PPersistPeerID": false, "P2PPrivateKeyLocation": "", "ParticipationKeysRefreshInterval": 60000000000, diff --git a/test/testdata/deployednettemplates/recipes/scenario1s-p2p/copy-node-configs.py b/test/testdata/deployednettemplates/recipes/scenario1s-p2p/copy-node-configs.py index 12da86f348..93595698f2 100644 --- a/test/testdata/deployednettemplates/recipes/scenario1s-p2p/copy-node-configs.py +++ b/test/testdata/deployednettemplates/recipes/scenario1s-p2p/copy-node-configs.py @@ -65,7 +65,7 @@ def make_hybrid_p2p_net(*args): altconfig["ConfigJSONOverride"] = json.dumps(override_json) if net_address: # relay, set public address altconfig["P2PBootstrap"] = True - altconfig["P2PNetAddress"] = "{{NetworkPort2}}" + altconfig["P2PHybridNetAddress"] = "{{NetworkPort2}}" altconfig["PublicAddress"] = True altconfig['FractionApply'] = 0.5 @@ -99,7 +99,7 @@ def make_hybrid_ws_net(*args): altconfig["ConfigJSONOverride"] = json.dumps(override_json) if net_address: # relay, set public address altconfig["P2PBootstrap"] = True - altconfig["P2PNetAddress"] = "{{NetworkPort2}}" + altconfig["P2PHybridNetAddress"] = "{{NetworkPort2}}" altconfig["PublicAddress"] = True altconfig['FractionApply'] = 0.5 From e14fea6b350b4141eb674a23fe87259b74df8d2b Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Wed, 28 Aug 2024 17:05:33 -0400 Subject: [PATCH 63/82] metrics: collect total netdev sent/received bytes (#6108) --- config/localTemplate.go | 3 + config/local_defaults.go | 1 + daemon/algod/server.go | 4 + go.mod | 6 +- go.sum | 14 +- installer/config.json.example | 1 + test/testdata/configs/config-v34.json | 1 + .../recipes/scenario1s/genesis.json | 2 +- .../recipes/scenario1s/net.json | 188 ++++++++++++------ .../recipes/scenario1s/node.json | 4 +- .../recipes/scenario1s/nonPartNode.json | 2 +- .../recipes/scenario1s/relay.json | 2 +- tools/block-generator/go.mod | 7 +- tools/block-generator/go.sum | 14 +- util/metrics/netdev_common.go | 88 ++++++++ util/metrics/netdev_darwin.go | 112 +++++++++++ util/metrics/netdev_linux.go | 66 ++++++ util/metrics/netdev_noop.go | 24 +++ util/metrics/registry.go | 3 + 19 files changed, 475 insertions(+), 67 deletions(-) create mode 100644 util/metrics/netdev_common.go create mode 100644 util/metrics/netdev_darwin.go create mode 100644 util/metrics/netdev_linux.go create mode 100644 util/metrics/netdev_noop.go diff --git a/config/localTemplate.go b/config/localTemplate.go index 96a150a88b..c03461c371 100644 --- a/config/localTemplate.go +++ b/config/localTemplate.go @@ -357,6 +357,9 @@ type Local struct { // EnableRuntimeMetrics exposes Go runtime metrics in /metrics and via node_exporter. EnableRuntimeMetrics bool `version[22]:"false"` + // EnableNetDevMetrics exposes network interface total bytes sent/received metrics in /metrics + EnableNetDevMetrics bool `version[34]:"false"` + // TelemetryToLog configures whether to record messages to node.log that are normally only sent to remote event monitoring. TelemetryToLog bool `version[5]:"true"` diff --git a/config/local_defaults.go b/config/local_defaults.go index b2007621d0..3315dadf2b 100644 --- a/config/local_defaults.go +++ b/config/local_defaults.go @@ -73,6 +73,7 @@ var defaultLocal = Local{ EnableIncomingMessageFilter: false, EnableLedgerService: false, EnableMetricReporting: false, + EnableNetDevMetrics: false, EnableOutgoingNetworkMessageFiltering: true, EnableP2P: false, EnableP2PHybridMode: false, diff --git a/daemon/algod/server.go b/daemon/algod/server.go index 6023bfab2e..a823ee0001 100644 --- a/daemon/algod/server.go +++ b/daemon/algod/server.go @@ -330,6 +330,10 @@ func (s *Server) Start() { metrics.DefaultRegistry().Register(metrics.NewRuntimeMetrics()) } + if cfg.EnableNetDevMetrics { + metrics.DefaultRegistry().Register(metrics.NetDevMetrics) + } + if cfg.EnableMetricReporting { if err := s.metricCollector.Start(context.Background()); err != nil { // log this error diff --git a/go.mod b/go.mod index e078ec73fb..a9056a976f 100644 --- a/go.mod +++ b/go.mod @@ -31,6 +31,7 @@ require ( github.com/ipfs/go-log v1.0.5 github.com/ipfs/go-log/v2 v2.5.1 github.com/jmoiron/sqlx v1.2.0 + github.com/jsimonetti/rtnetlink v1.4.2 github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c github.com/labstack/echo/v4 v4.9.1 github.com/libp2p/go-libp2p v0.33.2 @@ -52,7 +53,7 @@ require ( golang.org/x/crypto v0.21.0 golang.org/x/exp v0.0.0-20240213143201-ec583247a57a golang.org/x/sync v0.6.0 - golang.org/x/sys v0.18.0 + golang.org/x/sys v0.20.0 golang.org/x/text v0.14.0 gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009 pgregory.net/rapid v0.6.2 @@ -109,6 +110,7 @@ require ( github.com/jbenet/goprocess v0.1.4 // indirect github.com/jmespath/go-jmespath v0.3.0 // indirect github.com/josharian/intern v1.0.0 // indirect + github.com/josharian/native v1.1.0 // indirect github.com/klauspost/compress v1.17.6 // indirect github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/koron/go-ssdp v0.0.4 // indirect @@ -129,6 +131,8 @@ require ( github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mdlayher/netlink v1.7.2 // indirect + github.com/mdlayher/socket v0.4.1 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/sha256-simd v1.0.1 // indirect diff --git a/go.sum b/go.sum index 66924d7486..cec02bb2af 100644 --- a/go.sum +++ b/go.sum @@ -63,6 +63,8 @@ github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e h1:CHPYEbz71w8DqJ7DRIq+MXyCQsdibK08vdcQTY4ufas= github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e/go.mod h1:6Xhs0ZlsRjXLIiSMLKafbZxML/j30pg9Z1priLuha5s= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.12.3 h1:8ht6F9MquybnY97at+VDZb3eQQr8ev79RueWeVaEcG4= +github.com/cilium/ebpf v0.12.3/go.mod h1:TctK1ivibvI3znr66ljgi4hqOT8EYQjz1KWBfb1UVgM= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/datadriven v1.0.0/go.mod h1:5Ib8Meh+jk1RlHIXej6Pzevx/NLlNvQB9pmSBZErGA4= @@ -304,6 +306,10 @@ github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA= github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= +github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/jsimonetti/rtnetlink v1.4.2 h1:Df9w9TZ3npHTyDn0Ev9e1uzmN2odmXd0QX+J5GTEn90= +github.com/jsimonetti/rtnetlink v1.4.2/go.mod h1:92s6LJdE+1iOrw+F2/RO7LYI2Qd8pPpFNNUYW06gcoM= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= @@ -406,6 +412,10 @@ github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwp github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= +github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg= github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= @@ -763,8 +773,8 @@ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= diff --git a/installer/config.json.example b/installer/config.json.example index 59ca9e3a29..db4420ed9d 100644 --- a/installer/config.json.example +++ b/installer/config.json.example @@ -52,6 +52,7 @@ "EnableIncomingMessageFilter": false, "EnableLedgerService": false, "EnableMetricReporting": false, + "EnableNetDevMetrics": false, "EnableOutgoingNetworkMessageFiltering": true, "EnableP2P": false, "EnableP2PHybridMode": false, diff --git a/test/testdata/configs/config-v34.json b/test/testdata/configs/config-v34.json index 59ca9e3a29..db4420ed9d 100644 --- a/test/testdata/configs/config-v34.json +++ b/test/testdata/configs/config-v34.json @@ -52,6 +52,7 @@ "EnableIncomingMessageFilter": false, "EnableLedgerService": false, "EnableMetricReporting": false, + "EnableNetDevMetrics": false, "EnableOutgoingNetworkMessageFiltering": true, "EnableP2P": false, "EnableP2PHybridMode": false, diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/genesis.json b/test/testdata/deployednettemplates/recipes/scenario1s/genesis.json index 31da8b8c28..29fad78137 100644 --- a/test/testdata/deployednettemplates/recipes/scenario1s/genesis.json +++ b/test/testdata/deployednettemplates/recipes/scenario1s/genesis.json @@ -3,7 +3,7 @@ "VersionModifier": "", "ConsensusProtocol": "future", "FirstPartKeyRound": 0, - "LastPartKeyRound": 22000, + "LastPartKeyRound": 5000, "PartKeyDilution": 0, "Wallets": [ { diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/net.json b/test/testdata/deployednettemplates/recipes/scenario1s/net.json index a93573426d..abd624d2d5 100644 --- a/test/testdata/deployednettemplates/recipes/scenario1s/net.json +++ b/test/testdata/deployednettemplates/recipes/scenario1s/net.json @@ -17,7 +17,9 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"EnableAccountUpdatesStats\": true}" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -38,7 +40,9 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"EnableAccountUpdatesStats\": true}" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -59,7 +63,9 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"EnableAccountUpdatesStats\": true}" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -80,7 +86,9 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"EnableAccountUpdatesStats\": true}" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -101,7 +109,9 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"EnableAccountUpdatesStats\": true}" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -122,7 +132,9 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"EnableAccountUpdatesStats\": true}" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -143,7 +155,9 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"EnableAccountUpdatesStats\": true}" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -164,7 +178,9 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"EnableAccountUpdatesStats\": true}" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -188,7 +204,9 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -206,13 +224,15 @@ ], "APIEndpoint": "{{APIEndpoint}}", "APIToken": "{{APIToken}}", - "EnableTelemetry": false, + "EnableTelemetry": true, "TelemetryURI": "{{TelemetryURI}}", - "EnableMetrics": false, + "EnableMetrics": true, "MetricsURI": "{{MetricsURI}}", "EnableService": false, - "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true }", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -236,7 +256,9 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -260,7 +282,9 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -284,7 +308,9 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -302,13 +328,15 @@ ], "APIEndpoint": "{{APIEndpoint}}", "APIToken": "{{APIToken}}", - "EnableTelemetry": false, + "EnableTelemetry": true, "TelemetryURI": "{{TelemetryURI}}", - "EnableMetrics": false, + "EnableMetrics": true, "MetricsURI": "{{MetricsURI}}", "EnableService": false, - "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true }", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -332,7 +360,9 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -356,7 +386,9 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -380,7 +412,9 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -404,7 +438,9 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -428,7 +464,9 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -452,7 +490,9 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -476,7 +516,9 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -500,7 +542,9 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true }", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -524,7 +568,9 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -542,13 +588,15 @@ ], "APIEndpoint": "{{APIEndpoint}}", "APIToken": "{{APIToken}}", - "EnableTelemetry": true, + "EnableTelemetry": false, "TelemetryURI": "{{TelemetryURI}}", - "EnableMetrics": true, + "EnableMetrics": false, "MetricsURI": "{{MetricsURI}}", "EnableService": false, - "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }" + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -566,13 +614,15 @@ ], "APIEndpoint": "{{APIEndpoint}}", "APIToken": "{{APIToken}}", - "EnableTelemetry": true, + "EnableTelemetry": false, "TelemetryURI": "{{TelemetryURI}}", - "EnableMetrics": true, + "EnableMetrics": false, "MetricsURI": "{{MetricsURI}}", "EnableService": false, - "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }" + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -590,13 +640,15 @@ ], "APIEndpoint": "{{APIEndpoint}}", "APIToken": "{{APIToken}}", - "EnableTelemetry": false, + "EnableTelemetry": true, "TelemetryURI": "{{TelemetryURI}}", - "EnableMetrics": false, + "EnableMetrics": true, "MetricsURI": "{{MetricsURI}}", "EnableService": false, - "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true }", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -620,7 +672,9 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true }", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -638,13 +692,15 @@ ], "APIEndpoint": "{{APIEndpoint}}", "APIToken": "{{APIToken}}", - "EnableTelemetry": true, + "EnableTelemetry": false, "TelemetryURI": "{{TelemetryURI}}", - "EnableMetrics": true, + "EnableMetrics": false, "MetricsURI": "{{MetricsURI}}", "EnableService": false, - "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }" + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -666,7 +722,9 @@ "EnableMetrics": false, "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true, \"EnableExperimentalAPI\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true, \"EnableExperimentalAPI\": true, \"EnableNetDevMetrics\": true }", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -688,7 +746,9 @@ "EnableMetrics": false, "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true, \"EnableExperimentalAPI\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true, \"EnableExperimentalAPI\": true, \"EnableNetDevMetrics\": true }", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -710,7 +770,9 @@ "EnableMetrics": false, "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true, \"EnableExperimentalAPI\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true, \"EnableExperimentalAPI\": true, \"EnableNetDevMetrics\": true }", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -732,7 +794,9 @@ "EnableMetrics": false, "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true, \"EnableExperimentalAPI\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true, \"EnableExperimentalAPI\": true, \"EnableNetDevMetrics\": true }", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -754,7 +818,9 @@ "EnableMetrics": false, "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true, \"EnableExperimentalAPI\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true, \"EnableExperimentalAPI\": true, \"EnableNetDevMetrics\": true }", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -776,7 +842,9 @@ "EnableMetrics": false, "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true, \"EnableExperimentalAPI\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true, \"EnableExperimentalAPI\": true, \"EnableNetDevMetrics\": true }", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -798,7 +866,9 @@ "EnableMetrics": false, "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true, \"EnableExperimentalAPI\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true, \"EnableExperimentalAPI\": true, \"EnableNetDevMetrics\": true }", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -820,7 +890,9 @@ "EnableMetrics": false, "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true, \"EnableExperimentalAPI\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true, \"EnableExperimentalAPI\": true, \"EnableNetDevMetrics\": true }", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -842,7 +914,9 @@ "EnableMetrics": false, "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true, \"EnableExperimentalAPI\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true, \"EnableExperimentalAPI\": true, \"EnableNetDevMetrics\": true }", + "P2PBootstrap": false, + "PublicAddress": false } ] }, @@ -864,7 +938,9 @@ "EnableMetrics": false, "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true, \"EnableExperimentalAPI\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true, \"EnableExperimentalAPI\": true, \"EnableNetDevMetrics\": true }", + "P2PBootstrap": false, + "PublicAddress": false } ] } diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/node.json b/test/testdata/deployednettemplates/recipes/scenario1s/node.json index 3b4cb78771..7a0266ef0a 100644 --- a/test/testdata/deployednettemplates/recipes/scenario1s/node.json +++ b/test/testdata/deployednettemplates/recipes/scenario1s/node.json @@ -6,7 +6,7 @@ "TelemetryURI": "{{TelemetryURI}}", "EnableMetrics": false, "MetricsURI": "{{MetricsURI}}", - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}", + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}", "AltConfigs": [ { "APIEndpoint": "{{APIEndpoint}}", @@ -16,7 +16,7 @@ "TelemetryURI": "{{TelemetryURI}}", "EnableMetrics": true, "MetricsURI": "{{MetricsURI}}", - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }", + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true }", "FractionApply": 0.2 } ] diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/nonPartNode.json b/test/testdata/deployednettemplates/recipes/scenario1s/nonPartNode.json index 3f1245f7dd..fd5214a582 100644 --- a/test/testdata/deployednettemplates/recipes/scenario1s/nonPartNode.json +++ b/test/testdata/deployednettemplates/recipes/scenario1s/nonPartNode.json @@ -1,5 +1,5 @@ { "APIEndpoint": "{{APIEndpoint}}", "APIToken": "{{APIToken}}", - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true, \"EnableExperimentalAPI\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0, \"EnableProfiler\": true, \"EnableExperimentalAPI\": true, \"EnableNetDevMetrics\": true }" } diff --git a/test/testdata/deployednettemplates/recipes/scenario1s/relay.json b/test/testdata/deployednettemplates/recipes/scenario1s/relay.json index 327cf0b188..41559d3d55 100644 --- a/test/testdata/deployednettemplates/recipes/scenario1s/relay.json +++ b/test/testdata/deployednettemplates/recipes/scenario1s/relay.json @@ -8,5 +8,5 @@ "TelemetryURI": "{{TelemetryURI}}", "EnableMetrics": true, "MetricsURI": "{{MetricsURI}}", - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"EnableAccountUpdatesStats\": true}" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableRuntimeMetrics\": true, \"EnableExperimentalAPI\": true, \"EnableAccountUpdatesStats\": true, \"EnableNetDevMetrics\": true}" } diff --git a/tools/block-generator/go.mod b/tools/block-generator/go.mod index 9baf9fafef..4f8b21613a 100644 --- a/tools/block-generator/go.mod +++ b/tools/block-generator/go.mod @@ -55,6 +55,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/snappy v0.0.4 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect github.com/google/uuid v1.4.0 // indirect @@ -77,6 +78,8 @@ require ( github.com/jbenet/goprocess v0.1.4 // indirect github.com/jmespath/go-jmespath v0.3.0 // indirect github.com/josharian/intern v1.0.0 // indirect + github.com/josharian/native v1.1.0 // indirect + github.com/jsimonetti/rtnetlink v1.4.2 // indirect github.com/klauspost/compress v1.17.6 // indirect github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/koron/go-ssdp v0.0.4 // indirect @@ -100,6 +103,8 @@ require ( github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-sqlite3 v1.14.16 // indirect + github.com/mdlayher/netlink v1.7.2 // indirect + github.com/mdlayher/socket v0.4.1 // indirect github.com/miekg/dns v1.1.58 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect @@ -152,7 +157,7 @@ require ( golang.org/x/mod v0.15.0 // indirect golang.org/x/net v0.23.0 // indirect golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.18.0 // indirect + golang.org/x/sys v0.20.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.18.0 // indirect gonum.org/v1/gonum v0.13.0 // indirect diff --git a/tools/block-generator/go.sum b/tools/block-generator/go.sum index 5ed96a265b..b627db1073 100644 --- a/tools/block-generator/go.sum +++ b/tools/block-generator/go.sum @@ -57,6 +57,8 @@ github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e h1:CHPYEbz71w8DqJ7DRIq+MXyCQsdibK08vdcQTY4ufas= github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e/go.mod h1:6Xhs0ZlsRjXLIiSMLKafbZxML/j30pg9Z1priLuha5s= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.12.3 h1:8ht6F9MquybnY97at+VDZb3eQQr8ev79RueWeVaEcG4= +github.com/cilium/ebpf v0.12.3/go.mod h1:TctK1ivibvI3znr66ljgi4hqOT8EYQjz1KWBfb1UVgM= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/datadriven v1.0.0/go.mod h1:5Ib8Meh+jk1RlHIXej6Pzevx/NLlNvQB9pmSBZErGA4= @@ -279,6 +281,10 @@ github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2 github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= +github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/jsimonetti/rtnetlink v1.4.2 h1:Df9w9TZ3npHTyDn0Ev9e1uzmN2odmXd0QX+J5GTEn90= +github.com/jsimonetti/rtnetlink v1.4.2/go.mod h1:92s6LJdE+1iOrw+F2/RO7LYI2Qd8pPpFNNUYW06gcoM= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= @@ -366,6 +372,10 @@ github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwp github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= +github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg= github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= @@ -712,8 +722,8 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/util/metrics/netdev_common.go b/util/metrics/netdev_common.go new file mode 100644 index 0000000000..3da28aa998 --- /dev/null +++ b/util/metrics/netdev_common.go @@ -0,0 +1,88 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package metrics + +import ( + "fmt" + "strconv" + "strings" +) + +type netDevStats struct { + bytesReceived uint64 + bytesSent uint64 + iface string +} + +type netDevGatherer struct { +} + +func writeUint64MetricCounterHeader(buf *strings.Builder, name string, desc string) { + buf.WriteString("# HELP ") + buf.WriteString(name) + buf.WriteString(" ") + buf.WriteString(desc) + buf.WriteString("\n# TYPE ") + buf.WriteString(name) + buf.WriteString(" counter\n") +} + +func writeUint64MetricValue(buf *strings.Builder, name string, labels string, value uint64) { + buf.WriteString(name) + if len(labels) > 0 { + buf.WriteString("{" + labels + "}") + } + buf.WriteString(" ") + buf.WriteString(strconv.FormatUint(value, 10)) + buf.WriteString("\n") +} + +// WriteMetric writes the netdev metrics to the provided buffer. +func (pg netDevGatherer) WriteMetric(buf *strings.Builder, parentLabels string) { + nds, err := getNetDevStats() + if err != nil { + return + } + var sep string + if len(parentLabels) > 0 { + sep = "," + } + + writeUint64MetricCounterHeader(buf, "algod_netdev_received_bytes", "Bytes received") + for _, nd := range nds { + labels := fmt.Sprintf("iface=\"%s\"%s%s", nd.iface, sep, parentLabels) + writeUint64MetricValue(buf, "algod_netdev_received_bytes", labels, nd.bytesReceived) + } + + writeUint64MetricCounterHeader(buf, "algod_netdev_sent_bytes", "Bytes sent") + for _, nd := range nds { + labels := fmt.Sprintf("iface=\"%s\"%s%s", nd.iface, sep, parentLabels) + writeUint64MetricValue(buf, "algod_netdev_sent_bytes", labels, nd.bytesSent) + } +} + +// AddMetric writes the netdev metrics to the provided map. +func (pg netDevGatherer) AddMetric(values map[string]float64) { + nds, err := getNetDevStats() + if err != nil { + return + } + for _, nd := range nds { + values[sanitizeTelemetryName("algod_netdev_received_bytes_"+nd.iface)] = float64(nd.bytesReceived) + values[sanitizeTelemetryName("algod_netdev_sent_bytes_"+nd.iface)] = float64(nd.bytesSent) + } +} diff --git a/util/metrics/netdev_darwin.go b/util/metrics/netdev_darwin.go new file mode 100644 index 0000000000..fd7b3cda9f --- /dev/null +++ b/util/metrics/netdev_darwin.go @@ -0,0 +1,112 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +//go:build darwin + +package metrics + +import ( + "bytes" + "encoding/binary" + "net" + + "golang.org/x/sys/unix" +) + +// getNetDevStats returns network device statistics. +func getNetDevStats() ([]netDevStats, error) { + nds := []netDevStats{} + + ifs, err := net.Interfaces() + if err != nil { + return nil, err + } + + for _, iface := range ifs { + ifaceData, err := getIfaceData(iface.Index) + if err != nil { + continue + } + if ifaceData.Data.Ibytes == 0 && ifaceData.Data.Obytes == 0 { + // skip interfaces with no traffic + continue + } + + nds = append(nds, netDevStats{ + bytesReceived: ifaceData.Data.Ibytes, + bytesSent: ifaceData.Data.Obytes, + iface: iface.Name, + }) + } + + return nds, nil +} + +// getIfaceData and ifMsghdr2 are copied node_exporter's collector/netdev_darwin.go +// Not sure what is the origin of this code (it also appears in few other golang projects), +// but it is licensed under Apache 2.0. +func getIfaceData(index int) (*ifMsghdr2, error) { + var data ifMsghdr2 + rawData, err := unix.SysctlRaw("net", unix.AF_ROUTE, 0, 0, unix.NET_RT_IFLIST2, index) + if err != nil { + return nil, err + } + err = binary.Read(bytes.NewReader(rawData), binary.LittleEndian, &data) + return &data, err +} + +type ifMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + SndLen int32 + SndMaxlen int32 + SndDrops int32 + Timer int32 + Data ifData64 +} + +type ifData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange unix.Timeval32 +} diff --git a/util/metrics/netdev_linux.go b/util/metrics/netdev_linux.go new file mode 100644 index 0000000000..7c6db07ae4 --- /dev/null +++ b/util/metrics/netdev_linux.go @@ -0,0 +1,66 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +//go:build linux + +package metrics + +import "github.com/jsimonetti/rtnetlink" + +func getNetDevStats() ([]netDevStats, error) { + nds := []netDevStats{} + + conn, err := rtnetlink.Dial(nil) + if err != nil { + return nil, err + } + + defer conn.Close() + links, err := conn.Link.List() + if err != nil { + return nil, err + } + + for _, msg := range links { + if msg.Attributes == nil { + continue + } + name := msg.Attributes.Name + stats := msg.Attributes.Stats64 + if stats != nil { + if stats.RXBytes == 0 && stats.TXBytes == 0 { + // skip interfaces with no traffic + continue + } + nds = append(nds, netDevStats{ + bytesReceived: stats.RXBytes, + bytesSent: stats.TXBytes, + iface: name, + }) + } else if stats32 := msg.Attributes.Stats; stats32 != nil { + if stats32.RXBytes == 0 && stats32.TXBytes == 0 { + // skip interfaces with no traffic + continue + } + nds = append(nds, netDevStats{ + bytesReceived: uint64(stats32.RXBytes), + bytesSent: uint64(stats32.TXBytes), + iface: name, + }) + } + } + return nds, nil +} diff --git a/util/metrics/netdev_noop.go b/util/metrics/netdev_noop.go new file mode 100644 index 0000000000..3d2a1a1851 --- /dev/null +++ b/util/metrics/netdev_noop.go @@ -0,0 +1,24 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +//go:build !linux && !darwin + +package metrics + +// getNetDevStats returns network device statistics. +func getNetDevStats() ([]netDevStats, error) { + return nil, nil +} diff --git a/util/metrics/registry.go b/util/metrics/registry.go index d525bc1833..f6d7849401 100644 --- a/util/metrics/registry.go +++ b/util/metrics/registry.go @@ -43,6 +43,9 @@ var PrometheusDefaultMetrics = defaultPrometheusGatherer{} // OpencensusDefaultMetrics is the default prometheus gatherer implementing the Metric interface var OpencensusDefaultMetrics = defaultOpencensusGatherer{} +// NetDevMetrics is a netdev gatherer implementing the Metric interface +var NetDevMetrics = netDevGatherer{} + func init() { defaultRegistry = MakeRegistry() } From 9d5c4cd77849a5df5f633bee30a4b68247912a71 Mon Sep 17 00:00:00 2001 From: Jason Paulos Date: Thu, 29 Aug 2024 14:47:05 -0400 Subject: [PATCH 64/82] REST API: Fix `LedgerStateDelta` JSON encoding (#6106) Co-authored-by: Ashy5000 --- daemon/algod/api/client/restClient.go | 23 +++++++++--- daemon/algod/api/server/v2/handlers.go | 32 +++++++++++++--- data/transactions/transaction.go | 6 +-- libgoal/libgoal.go | 5 ++- test/e2e-go/features/devmode/devmode_test.go | 27 +++++++++++--- .../features/followernode/syncDeltas_test.go | 37 ++++++++++++++++++- 6 files changed, 107 insertions(+), 23 deletions(-) diff --git a/daemon/algod/api/client/restClient.go b/daemon/algod/api/client/restClient.go index 33b8da8ee1..a3dc4b769e 100644 --- a/daemon/algod/api/client/restClient.go +++ b/daemon/algod/api/client/restClient.go @@ -30,11 +30,14 @@ import ( "github.com/google/go-querystring/query" "github.com/algorand/go-algorand/crypto" + v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2" "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model" "github.com/algorand/go-algorand/daemon/algod/api/spec/common" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" + "github.com/algorand/go-algorand/ledger/eval" + "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/protocol" ) @@ -229,7 +232,7 @@ func (client RestClient) submitForm( } if decodeJSON { - dec := json.NewDecoder(resp.Body) + dec := protocol.NewJSONDecoder(resp.Body) return dec.Decode(&response) } @@ -559,7 +562,9 @@ func (client RestClient) SendRawTransactionGroup(txgroup []transactions.SignedTx } // Block gets the block info for the given round -func (client RestClient) Block(round uint64) (response model.BlockResponse, err error) { +func (client RestClient) Block(round uint64) (response v2.BlockResponseJSON, err error) { + // Note: this endpoint gets the Block as JSON, meaning some string fields with non-UTF-8 data will lose + // information. Msgpack should be used instead if this becomes a problem. err = client.get(&response, fmt.Sprintf("/v2/blocks/%d", round), nil) return } @@ -767,19 +772,27 @@ func (client RestClient) GetSyncRound() (response model.GetSyncRoundResponse, er } // GetLedgerStateDelta retrieves the ledger state delta for the round -func (client RestClient) GetLedgerStateDelta(round uint64) (response model.LedgerStateDeltaResponse, err error) { +func (client RestClient) GetLedgerStateDelta(round uint64) (response ledgercore.StateDelta, err error) { + // Note: this endpoint gets the StateDelta as JSON, meaning some string fields with non-UTF-8 data will lose + // information. Msgpack should be used instead if this becomes a problem. err = client.get(&response, fmt.Sprintf("/v2/deltas/%d", round), nil) return } // GetLedgerStateDeltaForTransactionGroup retrieves the ledger state delta for the txn group specified by the id -func (client RestClient) GetLedgerStateDeltaForTransactionGroup(id string) (response model.LedgerStateDeltaForTransactionGroupResponse, err error) { +func (client RestClient) GetLedgerStateDeltaForTransactionGroup(id string) (response eval.StateDeltaSubset, err error) { + // Note: this endpoint gets the StateDelta as JSON, meaning some string fields with non-UTF-8 data will lose + // information. Msgpack should be used instead if this becomes a problem. err = client.get(&response, fmt.Sprintf("/v2/deltas/txn/group/%s", id), nil) return } // GetTransactionGroupLedgerStateDeltasForRound retrieves the ledger state deltas for the txn groups in the specified round -func (client RestClient) GetTransactionGroupLedgerStateDeltasForRound(round uint64) (response model.TransactionGroupLedgerStateDeltasForRoundResponse, err error) { +func (client RestClient) GetTransactionGroupLedgerStateDeltasForRound(round uint64) (response struct { + Deltas []eval.TxnGroupDeltaWithIds +}, err error) { + // Note: this endpoint gets the StateDelta as JSON, meaning some string fields with non-UTF-8 data will lose + // information. Msgpack should be used instead if this becomes a problem. err = client.get(&response, fmt.Sprintf("/v2/deltas/%d/txn/group", round), nil) return } diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go index b638825ec4..9c5da32b3d 100644 --- a/daemon/algod/api/server/v2/handlers.go +++ b/daemon/algod/api/server/v2/handlers.go @@ -673,6 +673,11 @@ func (v2 *Handlers) AccountApplicationInformation(ctx echo.Context, address stri return ctx.JSON(http.StatusOK, response) } +// BlockResponseJSON is used to embed the block in JSON responses. +type BlockResponseJSON struct { + Block bookkeeping.Block `codec:"block"` +} + // GetBlock gets the block for the given round. // (GET /v2/blocks/{round}) func (v2 *Handlers) GetBlock(ctx echo.Context, round uint64, params model.GetBlockParams) error { @@ -709,9 +714,7 @@ func (v2 *Handlers) GetBlock(ctx echo.Context, round uint64, params model.GetBlo } // Encoding wasn't working well without embedding "real" objects. - response := struct { - Block bookkeeping.Block `codec:"block"` - }{ + response := BlockResponseJSON{ Block: block, } @@ -839,7 +842,7 @@ func (v2 *Handlers) GetBlockHash(ctx echo.Context, round uint64) error { // (GET /v2/blocks/{round}/transactions/{txid}/proof) func (v2 *Handlers) GetTransactionProof(ctx echo.Context, round uint64, txid string, params model.GetTransactionProofParams) error { var txID transactions.Txid - err := txID.UnmarshalText([]byte(txid)) + err := txID.FromString(txid) if err != nil { return badRequest(ctx, err, errNoValidTxnSpecified, v2.Log) } @@ -1432,6 +1435,11 @@ func (v2 *Handlers) GetLedgerStateDelta(ctx echo.Context, round uint64, params m if err != nil { return notFound(ctx, err, fmt.Sprintf(errFailedRetrievingStateDelta, err), v2.Log) } + if handle == protocol.JSONStrictHandle { + // Zero out the Txleases map since it cannot be represented in JSON, as it is a map with an + // object key. + sDelta.Txleases = nil + } data, err := encode(handle, sDelta) if err != nil { return internalError(ctx, err, errFailedToEncodeResponse, v2.Log) @@ -1501,8 +1509,8 @@ func (v2 *Handlers) PendingTransactionInformation(ctx echo.Context, txid string, } txID := transactions.Txid{} - if err := txID.UnmarshalText([]byte(txid)); err != nil { - return badRequest(ctx, err, errNoValidTxnSpecified, v2.Log) + if err0 := txID.FromString(txid); err0 != nil { + return badRequest(ctx, err0, errNoValidTxnSpecified, v2.Log) } txn, ok := v2.Node.GetPendingTransaction(txID) @@ -2022,6 +2030,11 @@ func (v2 *Handlers) GetLedgerStateDeltaForTransactionGroup(ctx echo.Context, id if err != nil { return notFound(ctx, err, fmt.Sprintf(errFailedRetrievingStateDelta, err), v2.Log) } + if handle == protocol.JSONStrictHandle { + // Zero out the Txleases map since it cannot be represented in JSON, as it is a map with an + // object key. + delta.Txleases = nil + } data, err := encode(handle, delta) if err != nil { return internalError(ctx, err, errFailedToEncodeResponse, v2.Log) @@ -2044,6 +2057,13 @@ func (v2 *Handlers) GetTransactionGroupLedgerStateDeltasForRound(ctx echo.Contex if err != nil { return notFound(ctx, err, fmt.Sprintf(errFailedRetrievingStateDelta, err), v2.Log) } + if handle == protocol.JSONStrictHandle { + // Zero out the Txleases map since it cannot be represented in JSON, as it is a map with an + // object key. + for i := range deltas { + deltas[i].Delta.Txleases = nil + } + } response := struct { Deltas []eval.TxnGroupDeltaWithIds }{ diff --git a/data/transactions/transaction.go b/data/transactions/transaction.go index 06ae38c0d6..4a6d5b6603 100644 --- a/data/transactions/transaction.go +++ b/data/transactions/transaction.go @@ -38,9 +38,9 @@ func (txid Txid) String() string { return fmt.Sprintf("%v", crypto.Digest(txid)) } -// UnmarshalText initializes the Address from an array of bytes. -func (txid *Txid) UnmarshalText(text []byte) error { - d, err := crypto.DigestFromString(string(text)) +// FromString initializes the Txid from a string +func (txid *Txid) FromString(text string) error { + d, err := crypto.DigestFromString(text) *txid = Txid(d) return err } diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go index e72b0588d6..f3f1c67192 100644 --- a/libgoal/libgoal.go +++ b/libgoal/libgoal.go @@ -27,6 +27,7 @@ import ( algodclient "github.com/algorand/go-algorand/daemon/algod/api/client" v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2" kmdclient "github.com/algorand/go-algorand/daemon/kmd/client" + "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/rpcs" "github.com/algorand/go-algorand/config" @@ -819,7 +820,7 @@ func (c *Client) ParsedPendingTransaction(txid string) (txn v2.PreEncodedTxInfo, } // Block takes a round and returns its block -func (c *Client) Block(round uint64) (resp model.BlockResponse, err error) { +func (c *Client) Block(round uint64) (resp v2.BlockResponseJSON, err error) { algod, err := c.ensureAlgodClient() if err == nil { resp, err = algod.Block(round) @@ -1341,7 +1342,7 @@ func (c *Client) GetSyncRound() (rep model.GetSyncRoundResponse, err error) { } // GetLedgerStateDelta gets the LedgerStateDelta on a node w/ EnableFollowMode -func (c *Client) GetLedgerStateDelta(round uint64) (rep model.LedgerStateDeltaResponse, err error) { +func (c *Client) GetLedgerStateDelta(round uint64) (rep ledgercore.StateDelta, err error) { algod, err := c.ensureAlgodClient() if err == nil { return algod.GetLedgerStateDelta(round) diff --git a/test/e2e-go/features/devmode/devmode_test.go b/test/e2e-go/features/devmode/devmode_test.go index 525a6cacbd..bfb6a889ab 100644 --- a/test/e2e-go/features/devmode/devmode_test.go +++ b/test/e2e-go/features/devmode/devmode_test.go @@ -52,7 +52,7 @@ func testDevMode(t *testing.T, version protocol.ConsensusVersion) { firstRound := *txn.ConfirmedRound + 1 blk, err := fixture.AlgodClient.Block(*txn.ConfirmedRound) require.NoError(t, err) - seconds := int64(blk.Block["ts"].(float64)) + seconds := blk.Block.TimeStamp prevTime := time.Unix(seconds, 0) // Set Block timestamp offset to test that consecutive txns properly get their block time set const blkOffset = uint64(1_000_000) @@ -70,7 +70,7 @@ func testDevMode(t *testing.T, version protocol.ConsensusVersion) { require.Equal(t, round-1, uint64(txn.Txn.Txn.FirstValid)) newBlk, err := fixture.AlgodClient.Block(round) require.NoError(t, err) - newBlkSeconds := int64(newBlk.Block["ts"].(float64)) + newBlkSeconds := newBlk.Block.TimeStamp currTime := time.Unix(newBlkSeconds, 0) require.Equal(t, currTime, prevTime.Add(1_000_000*time.Second)) prevTime = currTime @@ -93,7 +93,18 @@ func testTxnGroupDeltasDevMode(t *testing.T, version protocol.ConsensusVersion) require.NoError(t, err) key := crypto.GenerateSignatureSecrets(crypto.Seed{}) receiver := basics.Address(key.SignatureVerifier) - txn := fixture.SendMoneyAndWait(0, 100000, 1000, sender.Address, receiver.String(), "") + + status, err := fixture.AlgodClient.Status() + require.NoError(t, err) + curRound := status.LastRound + + wh, err := fixture.LibGoalClient.GetUnencryptedWalletHandle() + require.NoError(t, err) + + fundingTx, err := fixture.LibGoalClient.SendPaymentFromWalletWithLease(wh, nil, sender.Address, receiver.String(), 1000, 100000, nil, "", [32]byte{1, 2, 3}, basics.Round(curRound).SubSaturate(1), 0) + require.NoError(t, err) + txn, err := fixture.WaitForConfirmedTxn(curRound+uint64(5), fundingTx.ID().String()) + require.NoError(t, err) require.NotNil(t, txn.ConfirmedRound) _, err = fixture.AlgodClient.Block(*txn.ConfirmedRound) require.NoError(t, err) @@ -101,16 +112,20 @@ func testTxnGroupDeltasDevMode(t *testing.T, version protocol.ConsensusVersion) // Test GetLedgerStateDeltaForTransactionGroup and verify the response contains a delta txngroupResponse, err := fixture.AlgodClient.GetLedgerStateDeltaForTransactionGroup(txn.Txn.ID().String()) require.NoError(t, err) - require.True(t, len(txngroupResponse) > 0) + require.NotZero(t, txngroupResponse) // Test GetTransactionGroupLedgerStateDeltasForRound and verify the response contains the delta for our txn roundResponse, err := fixture.AlgodClient.GetTransactionGroupLedgerStateDeltasForRound(1) require.NoError(t, err) require.Equal(t, len(roundResponse.Deltas), 1) groupDelta := roundResponse.Deltas[0] - require.Equal(t, 1, len(groupDelta.Ids)) + require.Len(t, groupDelta.Ids, 1) require.Equal(t, groupDelta.Ids[0], txn.Txn.ID().String()) // Assert that the TxIDs field across both endpoint responses is the same - require.Equal(t, txngroupResponse["Txids"], groupDelta.Delta["Txids"]) + require.Equal(t, txngroupResponse.Txids, groupDelta.Delta.Txids) + + // Txleases should always be nil for JSON responses + require.Nil(t, txngroupResponse.Txleases) + require.Nil(t, groupDelta.Delta.Txleases) } diff --git a/test/e2e-go/features/followernode/syncDeltas_test.go b/test/e2e-go/features/followernode/syncDeltas_test.go index b404a2a5ef..af27c7dda7 100644 --- a/test/e2e-go/features/followernode/syncDeltas_test.go +++ b/test/e2e-go/features/followernode/syncDeltas_test.go @@ -22,6 +22,9 @@ import ( "github.com/stretchr/testify/require" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/test/framework/fixtures" "github.com/algorand/go-algorand/test/partitiontest" ) @@ -52,6 +55,23 @@ func TestBasicSyncMode(t *testing.T) { nc, err := fixture.GetNodeController("Primary") a.NoError(err) + sender, err := fixture.GetRichestAccount() + require.NoError(t, err) + + status, err := fixture.AlgodClient.Status() + require.NoError(t, err) + curRound := status.LastRound + + wh, err := fixture.LibGoalClient.GetUnencryptedWalletHandle() + require.NoError(t, err) + + fundingTx, err := fixture.LibGoalClient.SendPaymentFromWalletWithLease(wh, nil, sender.Address, sender.Address, 0, 0, nil, "", [32]byte{1, 2, 3}, basics.Round(curRound).SubSaturate(1), 0) + require.NoError(t, err) + txn, err := fixture.WaitForConfirmedTxn(5, fundingTx.ID().String()) + require.NoError(t, err) + + require.LessOrEqual(t, *txn.ConfirmedRound, uint64(5), "Transaction should be confirmed in the first 5 rounds") + // Let the network make some progress waitForRound := uint64(5) err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound) @@ -73,7 +93,22 @@ func TestBasicSyncMode(t *testing.T) { // retrieve state delta gResp, err := followClient.GetLedgerStateDelta(round) a.NoError(err) - a.NotNil(gResp) + a.NotZero(gResp) + + if round == *txn.ConfirmedRound { + // Txleases should always be nil for JSON responses + require.Nil(t, gResp.Txleases) + + // Verify that the transaction is in the state delta + expectedTxids := map[transactions.Txid]ledgercore.IncludedTransactions{ + txn.Txn.ID(): { + LastValid: txn.Txn.Txn.LastValid, + Intra: 0, + }, + } + require.Equal(t, expectedTxids, gResp.Txids) + } + // set sync round next err = followClient.SetSyncRound(round + 1) a.NoError(err) From f6c59a4c8166e69993953624a95b6d94a87bb003 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Thu, 29 Aug 2024 15:20:08 -0400 Subject: [PATCH 65/82] p2p: Add algod_network_p2p_* traffic metrics (#6105) --- cmd/algod/main.go | 2 + network/metrics.go | 206 +++++++++++++++++++++++++++ network/metrics_test.go | 76 ++++++++++ network/p2p/p2p.go | 4 +- network/p2p/pubsub.go | 12 +- network/p2p/pubsubTracer.go | 98 ------------- network/p2pNetwork.go | 10 +- network/wsNetwork.go | 30 ---- network/wsPeer.go | 84 +++++------ test/heapwatch/block_history_plot.py | 16 ++- test/heapwatch/metrics_aggs.py | 27 +++- test/heapwatch/metrics_delta.py | 101 +++++++++---- test/heapwatch/metrics_lib.py | 2 +- test/heapwatch/requirements.txt | 4 +- util/metrics/metrics.go | 8 ++ 15 files changed, 466 insertions(+), 214 deletions(-) create mode 100644 network/metrics.go create mode 100644 network/metrics_test.go delete mode 100644 network/p2p/pubsubTracer.go diff --git a/cmd/algod/main.go b/cmd/algod/main.go index 311b1507e6..47008256ac 100644 --- a/cmd/algod/main.go +++ b/cmd/algod/main.go @@ -445,6 +445,8 @@ var startupConfigCheckFields = []string{ "TxPoolExponentialIncreaseFactor", "TxPoolSize", "VerifiedTranscationsCacheSize", + "EnableP2P", + "EnableP2PHybridMode", } func resolveDataDir() string { diff --git a/network/metrics.go b/network/metrics.go new file mode 100644 index 0000000000..a1e92b2424 --- /dev/null +++ b/network/metrics.go @@ -0,0 +1,206 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package network + +import ( + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/peer" + p2proto "github.com/libp2p/go-libp2p/core/protocol" + + "github.com/algorand/go-algorand/network/p2p" + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/util/metrics" +) + +func init() { + // all tags are tracked by ws net + tagStringList := make([]string, 0, len(protocol.TagList)) + for _, t := range protocol.TagList { + tagStringList = append(tagStringList, string(t)) + } + networkSentBytesByTag = metrics.NewTagCounterFiltered("algod_network_sent_bytes_{TAG}", "Number of bytes that were sent over the network for {TAG} messages", tagStringList, "UNK") + networkReceivedBytesByTag = metrics.NewTagCounterFiltered("algod_network_received_bytes_{TAG}", "Number of bytes that were received from the network for {TAG} messages", tagStringList, "UNK") + networkMessageReceivedByTag = metrics.NewTagCounterFiltered("algod_network_message_received_{TAG}", "Number of complete messages that were received from the network for {TAG} messages", tagStringList, "UNK") + networkMessageSentByTag = metrics.NewTagCounterFiltered("algod_network_message_sent_{TAG}", "Number of complete messages that were sent to the network for {TAG} messages", tagStringList, "UNK") + networkHandleCountByTag = metrics.NewTagCounterFiltered("algod_network_rx_handle_countbytag_{TAG}", "count of handler calls in the receive thread for {TAG} messages", tagStringList, "UNK") + networkHandleMicrosByTag = metrics.NewTagCounterFiltered("algod_network_rx_handle_microsbytag_{TAG}", "microseconds spent by protocol handlers in the receive thread for {TAG} messages", tagStringList, "UNK") + + networkP2PSentBytesByTag = metrics.NewTagCounterFiltered("algod_network_p2p_sent_bytes_{TAG}", "Number of bytes that were sent over the network for {TAG} messages", tagStringList, "UNK") + networkP2PReceivedBytesByTag = metrics.NewTagCounterFiltered("algod_network_p2p_received_bytes_{TAG}", "Number of bytes that were received from the network for {TAG} messages", tagStringList, "UNK") + networkP2PMessageReceivedByTag = metrics.NewTagCounterFiltered("algod_network_p2p_message_received_{TAG}", "Number of complete messages that were received from the network for {TAG} messages", tagStringList, "UNK") + networkP2PMessageSentByTag = metrics.NewTagCounterFiltered("algod_network_p2p_message_sent_{TAG}", "Number of complete messages that were sent to the network for {TAG} messages", tagStringList, "UNK") +} + +var networkSentBytesTotal = metrics.MakeCounter(metrics.NetworkSentBytesTotal) +var networkP2PSentBytesTotal = metrics.MakeCounter(metrics.NetworkP2PSentBytesTotal) +var networkSentBytesByTag *metrics.TagCounter +var networkP2PSentBytesByTag *metrics.TagCounter +var networkReceivedBytesTotal = metrics.MakeCounter(metrics.NetworkReceivedBytesTotal) +var networkP2PReceivedBytesTotal = metrics.MakeCounter(metrics.NetworkP2PReceivedBytesTotal) +var networkReceivedBytesByTag *metrics.TagCounter +var networkP2PReceivedBytesByTag *metrics.TagCounter + +var networkMessageReceivedTotal = metrics.MakeCounter(metrics.NetworkMessageReceivedTotal) +var networkP2PMessageReceivedTotal = metrics.MakeCounter(metrics.NetworkP2PMessageReceivedTotal) +var networkMessageReceivedByTag *metrics.TagCounter +var networkP2PMessageReceivedByTag *metrics.TagCounter +var networkMessageSentTotal = metrics.MakeCounter(metrics.NetworkMessageSentTotal) +var networkP2PMessageSentTotal = metrics.MakeCounter(metrics.NetworkP2PMessageSentTotal) +var networkMessageSentByTag *metrics.TagCounter +var networkP2PMessageSentByTag *metrics.TagCounter + +var networkHandleMicrosByTag *metrics.TagCounter +var networkHandleCountByTag *metrics.TagCounter + +var networkConnectionsDroppedTotal = metrics.MakeCounter(metrics.NetworkConnectionsDroppedTotal) +var networkMessageQueueMicrosTotal = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_message_sent_queue_micros_total", Description: "Total microseconds message spent waiting in queue to be sent"}) +var networkP2PMessageQueueMicrosTotal = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_p2p_message_sent_queue_micros_total", Description: "Total microseconds p2p message spent waiting in queue to be sent"}) + +var duplicateNetworkMessageReceivedTotal = metrics.MakeCounter(metrics.DuplicateNetworkMessageReceivedTotal) +var duplicateNetworkMessageReceivedBytesTotal = metrics.MakeCounter(metrics.DuplicateNetworkMessageReceivedBytesTotal) +var duplicateNetworkFilterReceivedTotal = metrics.MakeCounter(metrics.DuplicateNetworkFilterReceivedTotal) +var outgoingNetworkMessageFilteredOutTotal = metrics.MakeCounter(metrics.OutgoingNetworkMessageFilteredOutTotal) +var outgoingNetworkMessageFilteredOutBytesTotal = metrics.MakeCounter(metrics.OutgoingNetworkMessageFilteredOutBytesTotal) +var unknownProtocolTagMessagesTotal = metrics.MakeCounter(metrics.UnknownProtocolTagMessagesTotal) + +var networkIncomingConnections = metrics.MakeGauge(metrics.NetworkIncomingConnections) +var networkOutgoingConnections = metrics.MakeGauge(metrics.NetworkOutgoingConnections) + +var networkIncomingBufferMicros = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_rx_buffer_micros_total", Description: "microseconds spent by incoming messages on the receive buffer"}) +var networkHandleMicros = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_rx_handle_micros_total", Description: "microseconds spent by protocol handlers in the receive thread"}) + +var networkBroadcasts = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_broadcasts_total", Description: "number of broadcast operations"}) +var networkBroadcastQueueFull = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_broadcast_queue_full_total", Description: "number of messages that were drops due to full broadcast queue"}) +var networkBroadcastQueueMicros = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_broadcast_queue_micros_total", Description: "microseconds broadcast requests sit on queue"}) +var networkBroadcastSendMicros = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_broadcast_send_micros_total", Description: "microseconds spent broadcasting"}) +var networkBroadcastsDropped = metrics.MakeCounter(metrics.MetricName{Name: "algod_broadcasts_dropped_total", Description: "number of broadcast messages not sent to any peer"}) +var networkPeerBroadcastDropped = metrics.MakeCounter(metrics.MetricName{Name: "algod_peer_broadcast_dropped_total", Description: "number of broadcast messages not sent to some peer"}) + +var networkPeerIdentityDisconnect = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_identity_duplicate", Description: "number of times identity challenge cause us to disconnect a peer"}) +var networkPeerIdentityError = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_identity_error", Description: "number of times an error occurs (besides expected) when processing identity challenges"}) +var networkPeerAlreadyClosed = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_peer_already_closed", Description: "number of times a peer would be added but the peer connection is already closed"}) + +var networkSlowPeerDrops = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_slow_drops_total", Description: "number of peers dropped for being slow to send to"}) +var networkIdlePeerDrops = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_idle_drops_total", Description: "number of peers dropped due to idle connection"}) + +var peers = metrics.MakeGauge(metrics.MetricName{Name: "algod_network_peers", Description: "Number of active peers."}) +var incomingPeers = metrics.MakeGauge(metrics.MetricName{Name: "algod_network_incoming_peers", Description: "Number of active incoming peers."}) +var outgoingPeers = metrics.MakeGauge(metrics.MetricName{Name: "algod_network_outgoing_peers", Description: "Number of active outgoing peers."}) + +var transactionMessagesP2PRejectMessage = metrics.NewTagCounter(metrics.TransactionMessagesP2PRejectMessage.Name, metrics.TransactionMessagesP2PRejectMessage.Description) +var transactionMessagesP2PDuplicateMessage = metrics.MakeCounter(metrics.TransactionMessagesP2PDuplicateMessage) +var transactionMessagesP2PDeliverMessage = metrics.MakeCounter(metrics.TransactionMessagesP2PDeliverMessage) +var transactionMessagesP2PUnderdeliverableMessage = metrics.MakeCounter(metrics.TransactionMessagesP2PUndeliverableMessage) + +var networkP2PGossipSubSentBytesTotal = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_p2p_gs_sent_bytes_total", Description: "Total number of bytes sent through gossipsub"}) +var networkP2PGossipSubReceivedBytesTotal = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_p2p_gs_received_bytes_total", Description: "Total number of bytes received through gossipsub"}) + +var _ = pubsub.RawTracer(pubsubMetricsTracer{}) + +// pubsubMetricsTracer is a tracer for pubsub events used to track metrics. +type pubsubMetricsTracer struct{} + +// AddPeer is invoked when a new peer is added. +func (t pubsubMetricsTracer) AddPeer(p peer.ID, proto p2proto.ID) {} + +// RemovePeer is invoked when a peer is removed. +func (t pubsubMetricsTracer) RemovePeer(p peer.ID) {} + +// Join is invoked when a new topic is joined +func (t pubsubMetricsTracer) Join(topic string) {} + +// Leave is invoked when a topic is abandoned +func (t pubsubMetricsTracer) Leave(topic string) {} + +// Graft is invoked when a new peer is grafted on the mesh (gossipsub) +func (t pubsubMetricsTracer) Graft(p peer.ID, topic string) {} + +// Prune is invoked when a peer is pruned from the message (gossipsub) +func (t pubsubMetricsTracer) Prune(p peer.ID, topic string) {} + +// ValidateMessage is invoked when a message first enters the validation pipeline. +func (t pubsubMetricsTracer) ValidateMessage(msg *pubsub.Message) { + if msg != nil && msg.Topic != nil { + switch *msg.Topic { + case p2p.TXTopicName: + networkP2PReceivedBytesTotal.AddUint64(uint64(len(msg.Data)), nil) + networkP2PReceivedBytesByTag.Add(string(protocol.TxnTag), uint64(len(msg.Data))) + networkP2PMessageReceivedByTag.Add(string(protocol.TxnTag), 1) + } + } +} + +// DeliverMessage is invoked when a message is delivered +func (t pubsubMetricsTracer) DeliverMessage(msg *pubsub.Message) { + transactionMessagesP2PDeliverMessage.Inc(nil) +} + +// RejectMessage is invoked when a message is Rejected or Ignored. +// The reason argument can be one of the named strings Reject*. +func (t pubsubMetricsTracer) RejectMessage(msg *pubsub.Message, reason string) { + // TagCounter cannot handle tags with spaces so pubsub.Reject* cannot be used directly. + // Since Go's strings are immutable, char replacement is a new allocation so that stick to string literals. + switch reason { + case pubsub.RejectValidationThrottled: + transactionMessagesP2PRejectMessage.Add("throttled", 1) + case pubsub.RejectValidationQueueFull: + transactionMessagesP2PRejectMessage.Add("full", 1) + case pubsub.RejectValidationFailed: + transactionMessagesP2PRejectMessage.Add("failed", 1) + case pubsub.RejectValidationIgnored: + transactionMessagesP2PRejectMessage.Add("ignored", 1) + default: + transactionMessagesP2PRejectMessage.Add("other", 1) + } +} + +// DuplicateMessage is invoked when a duplicate message is dropped. +func (t pubsubMetricsTracer) DuplicateMessage(msg *pubsub.Message) { + transactionMessagesP2PDuplicateMessage.Inc(nil) +} + +// ThrottlePeer is invoked when a peer is throttled by the peer gater. +func (t pubsubMetricsTracer) ThrottlePeer(p peer.ID) {} + +// RecvRPC is invoked when an incoming RPC is received. +func (t pubsubMetricsTracer) RecvRPC(rpc *pubsub.RPC) { + networkP2PGossipSubReceivedBytesTotal.AddUint64(uint64(rpc.Size()), nil) +} + +// SendRPC is invoked when a RPC is sent. +func (t pubsubMetricsTracer) SendRPC(rpc *pubsub.RPC, p peer.ID) { + networkP2PGossipSubSentBytesTotal.AddUint64(uint64(rpc.Size()), nil) + for i := range rpc.GetPublish() { + if rpc.Publish[i] != nil && rpc.Publish[i].Topic != nil { + switch *rpc.Publish[i].Topic { + case p2p.TXTopicName: + networkP2PSentBytesByTag.Add(string(protocol.TxnTag), uint64(len(rpc.Publish[i].Data))) + networkP2PSentBytesTotal.AddUint64(uint64(len(rpc.Publish[i].Data)), nil) + networkP2PMessageSentByTag.Add(string(protocol.TxnTag), 1) + } + } + } +} + +// DropRPC is invoked when an outbound RPC is dropped, typically because of a queue full. +func (t pubsubMetricsTracer) DropRPC(rpc *pubsub.RPC, p peer.ID) {} + +// UndeliverableMessage is invoked when the consumer of Subscribe is not reading messages fast enough and +// the pressure release mechanism trigger, dropping messages. +func (t pubsubMetricsTracer) UndeliverableMessage(msg *pubsub.Message) { + transactionMessagesP2PUnderdeliverableMessage.Inc(nil) +} diff --git a/network/metrics_test.go b/network/metrics_test.go new file mode 100644 index 0000000000..857ab57051 --- /dev/null +++ b/network/metrics_test.go @@ -0,0 +1,76 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package network + +import ( + "go/ast" + "go/parser" + "go/token" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/algorand/go-algorand/test/partitiontest" +) + +// TestPubsubTracer_TagList makes sure pubsubMetricsTracer traces pubsub messages +// by counting switch cases in SendRPC and ValidateMessage +func TestMetrics_PubsubTracer_TagList(t *testing.T) { + t.Parallel() + partitiontest.PartitionTest(t) + + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, "metrics.go", nil, 0) + require.NoError(t, err) + + // Find the SendRPC/ValidateMessage functions and count the switch cases + var sendCaseCount int + var recvCaseCount int + ast.Inspect(f, func(n ast.Node) bool { + switch stmt := n.(type) { + case *ast.FuncDecl: + if stmt.Name.Name == "SendRPC" { + ast.Inspect(stmt.Body, func(n ast.Node) bool { + if switchStmt, ok := n.(*ast.SwitchStmt); ok { + for _, stmt := range switchStmt.Body.List { + if _, ok := stmt.(*ast.CaseClause); ok { + sendCaseCount++ + } + } + } + return true + }) + } + if stmt.Name.Name == "ValidateMessage" { + ast.Inspect(stmt.Body, func(n ast.Node) bool { + if switchStmt, ok := n.(*ast.SwitchStmt); ok { + for _, stmt := range switchStmt.Body.List { + if _, ok := stmt.(*ast.CaseClause); ok { + recvCaseCount++ + } + } + } + return true + }) + } + } + return true + }) + + require.Equal(t, len(gossipSubTags), sendCaseCount) + require.Equal(t, len(gossipSubTags), recvCaseCount) +} diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index 270dfb7476..4ddda54157 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -176,7 +176,7 @@ func configureResourceManager(cfg config.Local) (network.ResourceManager, error) } // MakeService creates a P2P service instance -func MakeService(ctx context.Context, log logging.Logger, cfg config.Local, h host.Host, listenAddr string, wsStreamHandler StreamHandler) (*serviceImpl, error) { +func MakeService(ctx context.Context, log logging.Logger, cfg config.Local, h host.Host, listenAddr string, wsStreamHandler StreamHandler, metricsTracer pubsub.RawTracer) (*serviceImpl, error) { sm := makeStreamManager(ctx, log, h, wsStreamHandler, cfg.EnableGossipService) h.Network().Notify(sm) @@ -188,7 +188,7 @@ func MakeService(ctx context.Context, log logging.Logger, cfg config.Local, h ho telemetryProtoInfo := formatPeerTelemetryInfoProtocolName(telemetryID, telemetryInstance) h.SetStreamHandler(protocol.ID(telemetryProtoInfo), func(s network.Stream) { s.Close() }) - ps, err := makePubSub(ctx, cfg, h) + ps, err := makePubSub(ctx, cfg, h, metricsTracer) if err != nil { return nil, err } diff --git a/network/p2p/pubsub.go b/network/p2p/pubsub.go index 657baecdde..a592657010 100644 --- a/network/p2p/pubsub.go +++ b/network/p2p/pubsub.go @@ -51,11 +51,14 @@ const ( ) // TXTopicName defines a pubsub topic for TX messages -const TXTopicName = "/algo/tx/0.1.0" +// There is a micro optimization for const string comparison: +// 8 bytes const string require a single x86-64 CMPQ instruction. +// Naming convention: "algo" + 2 bytes protocol tag + 2 bytes version +const TXTopicName = "algotx01" const incomingThreads = 20 // matches to number wsNetwork workers -func makePubSub(ctx context.Context, cfg config.Local, host host.Host) (*pubsub.PubSub, error) { +func makePubSub(ctx context.Context, cfg config.Local, host host.Host, metricsTracer pubsub.RawTracer) (*pubsub.PubSub, error) { //defaultParams := pubsub.DefaultGossipSubParams() options := []pubsub.Option{ @@ -98,7 +101,10 @@ func makePubSub(ctx context.Context, cfg config.Local, host host.Host) (*pubsub. pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign), // pubsub.WithValidateThrottle(cfg.TxBacklogSize), pubsub.WithValidateWorkers(incomingThreads), - pubsub.WithRawTracer(pubsubTracer{}), + } + + if metricsTracer != nil { + options = append(options, pubsub.WithRawTracer(metricsTracer)) } return pubsub.NewGossipSub(ctx, host, options...) diff --git a/network/p2p/pubsubTracer.go b/network/p2p/pubsubTracer.go deleted file mode 100644 index ca57bc69ce..0000000000 --- a/network/p2p/pubsubTracer.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (C) 2019-2024 Algorand, Inc. -// This file is part of go-algorand -// -// go-algorand is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as -// published by the Free Software Foundation, either version 3 of the -// License, or (at your option) any later version. -// -// go-algorand is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with go-algorand. If not, see . - -package p2p - -import ( - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/protocol" - - "github.com/algorand/go-algorand/util/metrics" -) - -var _ = pubsub.RawTracer(pubsubTracer{}) - -var transactionMessagesP2PRejectMessage = metrics.NewTagCounter(metrics.TransactionMessagesP2PRejectMessage.Name, metrics.TransactionMessagesP2PRejectMessage.Description) -var transactionMessagesP2PDuplicateMessage = metrics.MakeCounter(metrics.TransactionMessagesP2PDuplicateMessage) -var transactionMessagesP2PDeliverMessage = metrics.MakeCounter(metrics.TransactionMessagesP2PDeliverMessage) -var transactionMessagesP2PUnderdeliverableMessage = metrics.MakeCounter(metrics.TransactionMessagesP2PUndeliverableMessage) -var transactionMessagesP2PValidateMessage = metrics.MakeCounter(metrics.TransactionMessagesP2PValidateMessage) - -// pubsubTracer is a tracer for pubsub events used to track metrics. -type pubsubTracer struct{} - -// AddPeer is invoked when a new peer is added. -func (t pubsubTracer) AddPeer(p peer.ID, proto protocol.ID) {} - -// RemovePeer is invoked when a peer is removed. -func (t pubsubTracer) RemovePeer(p peer.ID) {} - -// Join is invoked when a new topic is joined -func (t pubsubTracer) Join(topic string) {} - -// Leave is invoked when a topic is abandoned -func (t pubsubTracer) Leave(topic string) {} - -// Graft is invoked when a new peer is grafted on the mesh (gossipsub) -func (t pubsubTracer) Graft(p peer.ID, topic string) {} - -// Prune is invoked when a peer is pruned from the message (gossipsub) -func (t pubsubTracer) Prune(p peer.ID, topic string) {} - -// ValidateMessage is invoked when a message first enters the validation pipeline. -func (t pubsubTracer) ValidateMessage(msg *pubsub.Message) { - transactionMessagesP2PValidateMessage.Inc(nil) -} - -// DeliverMessage is invoked when a message is delivered -func (t pubsubTracer) DeliverMessage(msg *pubsub.Message) { - transactionMessagesP2PDeliverMessage.Inc(nil) -} - -// RejectMessage is invoked when a message is Rejected or Ignored. -// The reason argument can be one of the named strings Reject*. -func (t pubsubTracer) RejectMessage(msg *pubsub.Message, reason string) { - switch reason { - case pubsub.RejectValidationThrottled, pubsub.RejectValidationQueueFull, pubsub.RejectValidationFailed, pubsub.RejectValidationIgnored: - transactionMessagesP2PRejectMessage.Add(reason, 1) - default: - transactionMessagesP2PRejectMessage.Add("other", 1) - } -} - -// DuplicateMessage is invoked when a duplicate message is dropped. -func (t pubsubTracer) DuplicateMessage(msg *pubsub.Message) { - transactionMessagesP2PDuplicateMessage.Inc(nil) -} - -// ThrottlePeer is invoked when a peer is throttled by the peer gater. -func (t pubsubTracer) ThrottlePeer(p peer.ID) {} - -// RecvRPC is invoked when an incoming RPC is received. -func (t pubsubTracer) RecvRPC(rpc *pubsub.RPC) {} - -// SendRPC is invoked when a RPC is sent. -func (t pubsubTracer) SendRPC(rpc *pubsub.RPC, p peer.ID) {} - -// DropRPC is invoked when an outbound RPC is dropped, typically because of a queue full. -func (t pubsubTracer) DropRPC(rpc *pubsub.RPC, p peer.ID) {} - -// UndeliverableMessage is invoked when the consumer of Subscribe is not reading messages fast enough and -// the pressure release mechanism trigger, dropping messages. -func (t pubsubTracer) UndeliverableMessage(msg *pubsub.Message) { - transactionMessagesP2PUnderdeliverableMessage.Inc(nil) -} diff --git a/network/p2pNetwork.go b/network/p2pNetwork.go index f9dc04b785..d3af60a223 100644 --- a/network/p2pNetwork.go +++ b/network/p2pNetwork.go @@ -194,6 +194,11 @@ type p2pPeerStats struct { txReceived atomic.Uint64 } +// gossipSubTags defines protocol messages that are relayed using GossipSub +var gossipSubTags = map[protocol.Tag]string{ + protocol.TxnTag: p2p.TXTopicName, +} + // NewP2PNetwork returns an instance of GossipNode that uses the p2p.Service func NewP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID, node NodeInfo, identityOpts *identityOpts) (*P2PNetwork, error) { const readBufferLen = 2048 @@ -214,7 +219,7 @@ func NewP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebo config: cfg, genesisID: genesisID, networkID: networkID, - topicTags: map[protocol.Tag]string{protocol.TxnTag: p2p.TXTopicName}, + topicTags: gossipSubTags, wsPeers: make(map[peer.ID]*wsPeer), wsPeersToIDs: make(map[*wsPeer]peer.ID), peerStats: make(map[peer.ID]*p2pPeerStats), @@ -261,7 +266,7 @@ func NewP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebo } log.Infof("P2P host created: peer ID %s addrs %s", h.ID(), h.Addrs()) - net.service, err = p2p.MakeService(net.ctx, log, cfg, h, la, net.wsStreamHandler) + net.service, err = p2p.MakeService(net.ctx, log, cfg, h, la, net.wsStreamHandler, pubsubMetricsTracer{}) if err != nil { return nil, err } @@ -791,6 +796,7 @@ func (n *P2PNetwork) wsStreamHandler(ctx context.Context, p2pPeer peer.ID, strea conn: &wsPeerConnP2P{stream: stream}, outgoing: !incoming, identity: netIdentPeerID, + peerType: peerTypeP2P, } protos, err := n.pstore.GetProtocols(p2pPeer) if err != nil { diff --git a/network/wsNetwork.go b/network/wsNetwork.go index 2af3a9b6bf..ecb636c8e2 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -53,7 +53,6 @@ import ( tools_network "github.com/algorand/go-algorand/tools/network" "github.com/algorand/go-algorand/tools/network/dnssec" "github.com/algorand/go-algorand/util" - "github.com/algorand/go-algorand/util/metrics" ) const incomingThreads = 20 @@ -117,35 +116,6 @@ const wsMaxHeaderBytes = 4096 // used from the ReservedFDs pool, as this pool is meant for short-lived usage (dns queries, disk i/o, etc.) const ReservedHealthServiceConnections = 10 -var networkIncomingConnections = metrics.MakeGauge(metrics.NetworkIncomingConnections) -var networkOutgoingConnections = metrics.MakeGauge(metrics.NetworkOutgoingConnections) - -var networkIncomingBufferMicros = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_rx_buffer_micros_total", Description: "microseconds spent by incoming messages on the receive buffer"}) -var networkHandleMicros = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_rx_handle_micros_total", Description: "microseconds spent by protocol handlers in the receive thread"}) - -var networkBroadcasts = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_broadcasts_total", Description: "number of broadcast operations"}) -var networkBroadcastQueueFull = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_broadcast_queue_full_total", Description: "number of messages that were drops due to full broadcast queue"}) -var networkBroadcastQueueMicros = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_broadcast_queue_micros_total", Description: "microseconds broadcast requests sit on queue"}) -var networkBroadcastSendMicros = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_broadcast_send_micros_total", Description: "microseconds spent broadcasting"}) -var networkBroadcastsDropped = metrics.MakeCounter(metrics.MetricName{Name: "algod_broadcasts_dropped_total", Description: "number of broadcast messages not sent to any peer"}) -var networkPeerBroadcastDropped = metrics.MakeCounter(metrics.MetricName{Name: "algod_peer_broadcast_dropped_total", Description: "number of broadcast messages not sent to some peer"}) - -var networkPeerIdentityDisconnect = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_identity_duplicate", Description: "number of times identity challenge cause us to disconnect a peer"}) -var networkPeerIdentityError = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_identity_error", Description: "number of times an error occurs (besides expected) when processing identity challenges"}) -var networkPeerAlreadyClosed = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_peer_already_closed", Description: "number of times a peer would be added but the peer connection is already closed"}) - -var networkSlowPeerDrops = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_slow_drops_total", Description: "number of peers dropped for being slow to send to"}) -var networkIdlePeerDrops = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_idle_drops_total", Description: "number of peers dropped due to idle connection"}) - -var minPing = metrics.MakeGauge(metrics.MetricName{Name: "algod_network_peer_min_ping_seconds", Description: "Network round trip time to fastest peer in seconds."}) -var meanPing = metrics.MakeGauge(metrics.MetricName{Name: "algod_network_peer_mean_ping_seconds", Description: "Network round trip time to average peer in seconds."}) -var medianPing = metrics.MakeGauge(metrics.MetricName{Name: "algod_network_peer_median_ping_seconds", Description: "Network round trip time to median peer in seconds."}) -var maxPing = metrics.MakeGauge(metrics.MetricName{Name: "algod_network_peer_max_ping_seconds", Description: "Network round trip time to slowest peer in seconds."}) - -var peers = metrics.MakeGauge(metrics.MetricName{Name: "algod_network_peers", Description: "Number of active peers."}) -var incomingPeers = metrics.MakeGauge(metrics.MetricName{Name: "algod_network_incoming_peers", Description: "Number of active incoming peers."}) -var outgoingPeers = metrics.MakeGauge(metrics.MetricName{Name: "algod_network_outgoing_peers", Description: "Number of active outgoing peers."}) - // peerDisconnectionAckDuration defines the time we would wait for the peer disconnection to complete. const peerDisconnectionAckDuration = 5 * time.Second diff --git a/network/wsPeer.go b/network/wsPeer.go index 88a0c615f9..a6a982af59 100644 --- a/network/wsPeer.go +++ b/network/wsPeer.go @@ -39,7 +39,6 @@ import ( "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util" - "github.com/algorand/go-algorand/util/metrics" ) // MaxMessageLength is the maximum length of a message that can be sent or received, exported to be used in the node.TestMaxSizesCorrect test @@ -52,20 +51,7 @@ const averageMessageLength = 2 * 1024 // Most of the messages are smaller tha // buffer and starve messages from other peers. const msgsInReadBufferPerPeer = 10 -var tagStringList []string - func init() { - tagStringList = make([]string, len(protocol.TagList)) - for i, t := range protocol.TagList { - tagStringList[i] = string(t) - } - networkSentBytesByTag = metrics.NewTagCounterFiltered("algod_network_sent_bytes_{TAG}", "Number of bytes that were sent over the network for {TAG} messages", tagStringList, "UNK") - networkReceivedBytesByTag = metrics.NewTagCounterFiltered("algod_network_received_bytes_{TAG}", "Number of bytes that were received from the network for {TAG} messages", tagStringList, "UNK") - networkMessageReceivedByTag = metrics.NewTagCounterFiltered("algod_network_message_received_{TAG}", "Number of complete messages that were received from the network for {TAG} messages", tagStringList, "UNK") - networkMessageSentByTag = metrics.NewTagCounterFiltered("algod_network_message_sent_{TAG}", "Number of complete messages that were sent to the network for {TAG} messages", tagStringList, "UNK") - networkHandleCountByTag = metrics.NewTagCounterFiltered("algod_network_rx_handle_countbytag_{TAG}", "count of handler calls in the receive thread for {TAG} messages", tagStringList, "UNK") - networkHandleMicrosByTag = metrics.NewTagCounterFiltered("algod_network_rx_handle_microsbytag_{TAG}", "microseconds spent by protocol handlers in the receive thread for {TAG} messages", tagStringList, "UNK") - matched := false for _, version := range SupportedProtocolVersions { if version == versionPeerFeatures { @@ -83,29 +69,6 @@ func init() { } } -var networkSentBytesTotal = metrics.MakeCounter(metrics.NetworkSentBytesTotal) -var networkSentBytesByTag *metrics.TagCounter -var networkReceivedBytesTotal = metrics.MakeCounter(metrics.NetworkReceivedBytesTotal) -var networkReceivedBytesByTag *metrics.TagCounter - -var networkMessageReceivedTotal = metrics.MakeCounter(metrics.NetworkMessageReceivedTotal) -var networkMessageReceivedByTag *metrics.TagCounter -var networkMessageSentTotal = metrics.MakeCounter(metrics.NetworkMessageSentTotal) -var networkMessageSentByTag *metrics.TagCounter - -var networkHandleMicrosByTag *metrics.TagCounter -var networkHandleCountByTag *metrics.TagCounter - -var networkConnectionsDroppedTotal = metrics.MakeCounter(metrics.NetworkConnectionsDroppedTotal) -var networkMessageQueueMicrosTotal = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_message_sent_queue_micros_total", Description: "Total microseconds message spent waiting in queue to be sent"}) - -var duplicateNetworkMessageReceivedTotal = metrics.MakeCounter(metrics.DuplicateNetworkMessageReceivedTotal) -var duplicateNetworkMessageReceivedBytesTotal = metrics.MakeCounter(metrics.DuplicateNetworkMessageReceivedBytesTotal) -var duplicateNetworkFilterReceivedTotal = metrics.MakeCounter(metrics.DuplicateNetworkFilterReceivedTotal) -var outgoingNetworkMessageFilteredOutTotal = metrics.MakeCounter(metrics.OutgoingNetworkMessageFilteredOutTotal) -var outgoingNetworkMessageFilteredOutBytesTotal = metrics.MakeCounter(metrics.OutgoingNetworkMessageFilteredOutBytesTotal) -var unknownProtocolTagMessagesTotal = metrics.MakeCounter(metrics.UnknownProtocolTagMessagesTotal) - // defaultSendMessageTags is the default list of messages which a peer would // allow to be sent without receiving any explicit request. var defaultSendMessageTags = map[protocol.Tag]bool{ @@ -204,6 +167,16 @@ type sendMessages struct { onRelease func() } +//msgp:ignore peerType +type peerType int + +const ( + // peerTypeWs is a peer that is connected over a websocket connection + peerTypeWs peerType = iota + // peerTypeP2P is a peer that is connected over an P2P connection + peerTypeP2P +) + type wsPeer struct { // lastPacketTime contains the UnixNano at the last time a successful communication was made with the peer. // "successful communication" above refers to either reading from or writing to a connection without receiving any @@ -318,6 +291,10 @@ type wsPeer struct { // closers is a slice of functions to run when the peer is closed closers []func() + + // peerType defines the peer's underlying connection type + // used for separate p2p vs ws metrics + peerType peerType } // HTTPPeer is what the opaque Peer might be. @@ -639,10 +616,17 @@ func (wp *wsPeer) readLoop() { } msg.Net = wp.net wp.lastPacketTime.Store(msg.Received) - networkReceivedBytesTotal.AddUint64(uint64(len(msg.Data)+2), nil) - networkMessageReceivedTotal.AddUint64(1, nil) - networkReceivedBytesByTag.Add(string(tag[:]), uint64(len(msg.Data)+2)) - networkMessageReceivedByTag.Add(string(tag[:]), 1) + if wp.peerType == peerTypeWs { + networkReceivedBytesTotal.AddUint64(uint64(len(msg.Data)+2), nil) + networkMessageReceivedTotal.AddUint64(1, nil) + networkReceivedBytesByTag.Add(string(tag[:]), uint64(len(msg.Data)+2)) + networkMessageReceivedByTag.Add(string(tag[:]), 1) + } else { + networkP2PReceivedBytesTotal.AddUint64(uint64(len(msg.Data)+2), nil) + networkP2PMessageReceivedTotal.AddUint64(1, nil) + networkP2PReceivedBytesByTag.Add(string(tag[:]), uint64(len(msg.Data)+2)) + networkP2PMessageReceivedByTag.Add(string(tag[:]), 1) + } msg.Sender = wp // for outgoing connections, we want to notify the connection monitor that we've received @@ -863,11 +847,19 @@ func (wp *wsPeer) writeLoopSendMsg(msg sendMessage) disconnectReason { return disconnectWriteError } wp.lastPacketTime.Store(time.Now().UnixNano()) - networkSentBytesTotal.AddUint64(uint64(len(msg.data)), nil) - networkSentBytesByTag.Add(string(tag), uint64(len(msg.data))) - networkMessageSentTotal.AddUint64(1, nil) - networkMessageSentByTag.Add(string(tag), 1) - networkMessageQueueMicrosTotal.AddUint64(uint64(time.Now().Sub(msg.peerEnqueued).Nanoseconds()/1000), nil) + if wp.peerType == peerTypeWs { + networkSentBytesTotal.AddUint64(uint64(len(msg.data)), nil) + networkSentBytesByTag.Add(string(tag), uint64(len(msg.data))) + networkMessageSentTotal.AddUint64(1, nil) + networkMessageSentByTag.Add(string(tag), 1) + networkMessageQueueMicrosTotal.AddUint64(uint64(time.Since(msg.peerEnqueued).Nanoseconds()/1000), nil) + } else { + networkP2PSentBytesTotal.AddUint64(uint64(len(msg.data)), nil) + networkP2PSentBytesByTag.Add(string(tag), uint64(len(msg.data))) + networkP2PMessageSentTotal.AddUint64(1, nil) + networkP2PMessageSentByTag.Add(string(tag), 1) + networkP2PMessageQueueMicrosTotal.AddUint64(uint64(time.Since(msg.peerEnqueued).Nanoseconds()/1000), nil) + } return disconnectReasonNone } diff --git a/test/heapwatch/block_history_plot.py b/test/heapwatch/block_history_plot.py index d8c86b454f..0bf8a2c8a2 100644 --- a/test/heapwatch/block_history_plot.py +++ b/test/heapwatch/block_history_plot.py @@ -23,6 +23,7 @@ # Graph over time of TPS or 10-round-moving-average-TPS import base64 +import json import os import statistics import sys @@ -106,12 +107,25 @@ def process(path, args): prevtc = tc prevts = ts prevtime = _time - print('{} blocks, block txns [{}-{}], block seconds [{}-{}], tps [{}-{}]'.format( + print('{} blocks, block txns [{}-{}], block seconds [{}-{}], tps [{}-{}], total txns {}'.format( count, mintxn,maxtxn, mindt,maxdt, mintps,maxtps, + tc, )) + if tc > 0: + with open(path + '.stats', 'w') as fout: + fout.write(json.dumps({ + 'blocks': count, + 'tc': tc, + 'mintxn': mintxn, + 'maxtxn': maxtxn, + 'mindt': mindt, + 'maxdt': maxdt, + 'mintps': mintps, + 'maxtps': maxtps, + })) start = 0 end = len(txnv)-1 diff --git a/test/heapwatch/metrics_aggs.py b/test/heapwatch/metrics_aggs.py index 0189634be5..d20593c097 100644 --- a/test/heapwatch/metrics_aggs.py +++ b/test/heapwatch/metrics_aggs.py @@ -33,7 +33,7 @@ from plotly.subplots import make_subplots -from metrics_lib import MetricType, parse_metrics, gather_metrics_files_by_nick +from metrics_lib import Metric, MetricType, parse_metrics, gather_metrics_files_by_nick logger = logging.getLogger(__name__) @@ -53,6 +53,8 @@ def main(): ap.add_argument('--nick-lre', action='append', default=[], help='label:regexp to filter node names, may be repeated') ap.add_argument('-s', '--save', type=str, choices=['png', 'html'], help=f'save plot to \'{default_img_filename}\' or \'{default_html_filename}\' file instead of showing it') ap.add_argument('--verbose', default=False, action='store_true') + ap.add_argument('--avg-max', default=False, action='store_true', help='print avg of max values across nodes for each metric') + ap.add_argument('--avg-max-min', default=False, action='store_true', help='print avg of max-min values across nodes for each metric') args = ap.parse_args() if args.verbose: @@ -99,6 +101,7 @@ def main(): } fig['layout']['height'] = 500 * nrows + nick_series = {} for nick, files_by_date in filesByNick.items(): active_metrics = {} @@ -146,7 +149,10 @@ def main(): active_metric_names.sort() active_metrics[full_name] = active_metric_names idx += 1 - + + if args.avg_max or args.avg_max_min: + nick_series[nick] = raw_series + for i, metric_pair in enumerate(sorted(active_metrics.items())): metric_name, metric_fullnames = metric_pair for metric_fullname in metric_fullnames: @@ -158,6 +164,23 @@ def main(): line=dict(width=1), ), i+1, 1) + if args.avg_max or args.avg_max_min: + metric_names_nick_max_avg = {} + for nick, raw_series in nick_series.items(): + for metric_name, rw in raw_series.items(): + mmax = max(rw) + mmin = min(rw) + print(f'{nick}: {metric_name}: count {len(rw)}, max {mmax}, min {mmin}, min-max {mmax - mmin}') + metric = Metric(metric_name, 0, MetricType.COUNTER) + if metric.short_name() not in metric_names_nick_max_avg: + metric_names_nick_max_avg[metric.short_name()] = [] + if args.avg_max_min: + metric_names_nick_max_avg[metric.short_name()].append(mmax - mmin) + if args.avg_max: + metric_names_nick_max_avg[metric.short_name()].append(mmax) + for metric_name, val in metric_names_nick_max_avg.items(): + print(f'{metric_name}: avg {sum(val)/len(val)}') + if args.save: if args.save == 'html': target_path = os.path.join(args.dir, default_html_filename) diff --git a/test/heapwatch/metrics_delta.py b/test/heapwatch/metrics_delta.py index 2d64ee097a..3ff1afca65 100644 --- a/test/heapwatch/metrics_delta.py +++ b/test/heapwatch/metrics_delta.py @@ -136,6 +136,8 @@ def __init__(self, label=None): self.tpsMeanSum = 0 self.txBpsMeanSum = 0 self.rxBpsMeanSum = 0 + self.txP2PBpsMeanSum = 0 + self.rxP2PBpsMeanSum = 0 self.tpsSum = 0 self.blockTimeSum = 0 self.sumsCount = 0 @@ -152,6 +154,8 @@ def __call__(self, ttr, nick): self.tpsMeanSum += meanOrZero(ttr.tpsList) self.txBpsMeanSum += meanOrZero(ttr.txBpsList) self.rxBpsMeanSum += meanOrZero(ttr.rxBpsList) + self.txP2PBpsMeanSum += meanOrZero(ttr.txP2PBpsList) + self.rxP2PBpsMeanSum += meanOrZero(ttr.rxP2PBpsList) self.tpsSum += ttr.tps self.blockTimeSum += ttr.blockTime self.sumsCount += 1 @@ -164,8 +168,10 @@ def blockinfo(self, curtime): return self.biByTime.get(curtime) def byMsg(self, html=False): - txPSums = {} - rxPSums = {} + txWsPSums = {} + rxWsPSums = {} + txP2PPSums = {} + rxP2PPSums = {} secondsSum = 0 txMax = {} txMin = {} @@ -175,8 +181,10 @@ def byMsg(self, html=False): for nick, ns in self.nodes.items(): nicks.append(nick) secondsSum += ns.secondsSum - dictSum(txPSums, ns.txPSums) - dictSum(rxPSums, ns.rxPSums) + dictSum(txWsPSums, ns.txPSums) + dictSum(rxWsPSums, ns.rxPSums) + dictSum(txP2PPSums, ns.txP2PPSums) + dictSum(rxP2PPSums, ns.rxP2PPSums) dictMax(txMax, ns.txPLists) dictMax(rxMax, ns.rxPLists) dictMin(txMin, ns.txPLists) @@ -185,23 +193,36 @@ def byMsg(self, html=False): lines = [] if html: lines.append('
{}
'.format(nodesummary)) - lines.append('') + lines.append('
tx B/srx B/s
') # traffic per tag two columns: ws and p2p else: lines.append(nodesummary) - lines.append('\ttx B/s\trx B/s') - for msg, txB in txPSums.items(): - if msg not in rxPSums: - rxPSums[msg] = 0 - for rxBps, msg in sorted([(rxB/secondsSum, msg) for msg, rxB in rxPSums.items()], reverse=True): - txBps = txPSums.get(msg,0)/secondsSum - if (txBps < 0.5) and (rxBps < 0.5): - continue + + for title, txPSums, rxPSums in [ + ('ws', txWsPSums, rxWsPSums), + ('p2p', txP2PPSums, rxP2PPSums), + ]: if html: - lines.append(''.format(msg, txBps, rxBps)) + lines.append('') if html: - lines.append('
{}{:.0f}{:.0f}
') + lines.append(f'') else: - lines.append('{}\t{:.0f}\t{:.0f}'.format(msg, txBps, rxBps)) + lines.append(f'{title} traffic per tag') + lines.append('\ttx B/s\trx B/s') + for msg, txB in txPSums.items(): + if msg not in rxPSums: + rxPSums[msg] = 0 + for rxBps, msg in sorted([(rxB/secondsSum, msg) for msg, rxB in rxPSums.items()], reverse=True): + txBps = txPSums.get(msg,0)/secondsSum + if (txBps < 0.5) and (rxBps < 0.5): + continue + if html: + lines.append(''.format(msg, txBps, rxBps)) + else: + lines.append('{}\t{:.0f}\t{:.0f}'.format(msg, txBps, rxBps)) + if html: + lines.append('
{title} traffic per tag
tx B/srx B/s
{}{:.0f}{:.0f}
') + lines.append('
') + lines.append('') # traffic per tag two columns: ws and p2p return '\n'.join(lines) def txPool(self): @@ -230,7 +251,7 @@ def html(self): def str(self, html=False): if not self.sumsCount: - tps, txbps, rxbps = math.nan, math.nan, math.nan + tps, txbps, rxbps, txP2Pbps, rxP2Pbps = math.nan, math.nan, math.nan, math.nan, math.nan blockTimes = math.nan else: #tps = self.tpsMeanSum/self.sumsCount @@ -238,6 +259,8 @@ def str(self, html=False): blockTimes = self.blockTimeSum/self.sumsCount txbps = self.txBpsMeanSum/self.sumsCount rxbps = self.rxBpsMeanSum/self.sumsCount + txP2Pbps = self.txP2PBpsMeanSum/self.sumsCount + rxP2Pbps = self.rxP2PBpsMeanSum/self.sumsCount labelspace = "" if self.label: labelspace = self.label + " " @@ -248,12 +271,12 @@ def str(self, html=False): else: verifyMillis = '' if html: - fmt = '{byMsg}\n{verifyMillis}
{labelspace}{txPool}
\n
{labelspace}summary: {TPS:0.2f} TPS, {bt:1.2f}s/block, tx {txBps}B/s, rx {rxBps}B/s
' + fmt = '{byMsg}\n{verifyMillis}
{labelspace}{txPool}
\n
{labelspace}summary: {TPS:0.2f} TPS, {bt:1.2f}s/block, tx {txBps}B/s, rx {rxBps}B/s, p2p tx {txP2PBps}B/s, p2p rx {rxP2PBps}B/s
' if self.label: fmt = '
' + self.label + '
' + fmt else: - fmt = '{byMsg}\n{verifyMillis}{labelspace}{txPool}\n{labelspace}summary: {TPS:0.2f} TPS, {bt:1.2f}s/block, tx {txBps}B/s, rx {rxBps}B/s' - return fmt.format(labelspace=labelspace, byMsg=self.byMsg(html), txPool=self.txPool(), TPS=tps, txBps=hunum(txbps), rxBps=hunum(rxbps), bt=blockTimes, verifyMillis=verifyMillis) + fmt = '{byMsg}\n{verifyMillis}{labelspace}{txPool}\n{labelspace}summary: {TPS:0.2f} TPS, {bt:1.2f}s/block, tx {txBps}B/s, rx {rxBps}B/s, p2p tx {txP2PBps}B/s, p2p rx {rxP2PBps}B/s' + return fmt.format(labelspace=labelspace, byMsg=self.byMsg(html), txPool=self.txPool(), TPS=tps, txBps=hunum(txbps), rxBps=hunum(rxbps), txP2PBps=hunum(txP2Pbps), rxP2PBps=hunum(rxP2Pbps), bt=blockTimes, verifyMillis=verifyMillis) def plot_pool(self, outpath): from matplotlib import pyplot as plt @@ -486,17 +509,27 @@ def __init__(self): self.deltas = [] self.txBpsList = [] self.rxBpsList = [] + self.txP2PBpsList = [] + self.rxP2PBpsList = [] self.tpsList = [] self.txBSum = 0 self.rxBSum = 0 + self.txP2PBSum = 0 + self.rxP2PBSum = 0 self.txnSum = 0 self.secondsSum = 0 # algod_network_received_bytes_* self.rxPLists = {} self.rxPSums = {} + # algod_network_p2p_received_bytes_* + self.rxP2PPLists = {} + self.rxP2PPSums = {} # algod_network_sent_bytes_* self.txPLists = {} self.txPSums = {} + # algod_network_p2p_sent_bytes_* + self.txP2PPLists = {} + self.txP2PPSums = {} self.times = [] # algod_tx_pool_count self.txPool = [] @@ -533,7 +566,7 @@ def process_files(self, args, nick=None, metrics_files=None, bisource=None): reportpath = args.report[:-4] + nick + '.csv' reportf = open(reportpath, 'wt') writer = csv.writer(reportf) - writer.writerow(('when', 'tx bytes/s', 'rx bytes/s','TPS', 's/block')) + writer.writerow(('when', 'tx bytes/s', 'rx bytes/s', 'tx p2p bytes/s', 'rx p2p bytes/s', 'TPS', 's/block')) prev = None prevtime = None prevPath = None @@ -587,6 +620,11 @@ def process_files(self, args, nick=None, metrics_files=None, bisource=None): rxBytes = d.get('algod_network_received_bytes_total',0) txBytesPerSec = txBytes / dt rxBytesPerSec = rxBytes / dt + txP2PBytes = d.get('algod_network_p2p_sent_bytes_total',0) + rxP2PBytes = d.get('algod_network_p2p_received_bytes_total',0) + txP2PBytesPerSec = txP2PBytes / dt + rxP2PBytesPerSec = rxP2PBytes / dt + # TODO: gather algod_network_sent_bytes_* and algod_network_received_bytes_* if (tps is None) or ((args.mintps is not None) and (tps < args.mintps)): # do not sum up this row @@ -594,18 +632,26 @@ def process_files(self, args, nick=None, metrics_files=None, bisource=None): else: self.txBpsList.append(txBytesPerSec) self.rxBpsList.append(rxBytesPerSec) + self.txP2PBpsList.append(txP2PBytesPerSec) + self.rxP2PBpsList.append(rxP2PBytesPerSec) self.tpsList.append(tps) self.txBSum += txBytes self.rxBSum += rxBytes + self.txP2PBSum += txP2PBytes + self.rxP2PBSum += rxP2PBytes self.txnSum += txnCount self.secondsSum += dt perProtocol('algod_network_sent_bytes_', self.txPLists, self.txPSums, d, dt) perProtocol('algod_network_received_bytes_', self.rxPLists, self.rxPSums, d, dt) + perProtocol('algod_network_p2p_sent_bytes_', self.txP2PPLists, self.txP2PPSums, d, dt) + perProtocol('algod_network_p2p_received_bytes_', self.rxP2PPLists, self.rxP2PPSums, d, dt) if writer: writer.writerow(( time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(curtime)), txBytesPerSec, rxBytesPerSec, + txP2PBytesPerSec, + rxP2PBytesPerSec, tps, blocktime, )) @@ -631,13 +677,14 @@ def process_files(self, args, nick=None, metrics_files=None, bisource=None): self.blockTime = totalDt / rounds if writer and self.txBpsList: writer.writerow([]) - for bsum, msg in sorted([(bsum,msg) for msg,bsum in self.txPSums.items()]): - pass + # TODO: summarize + # for bsum, msg in sorted([(bsum,msg) for msg,bsum in self.txPSums.items()]): + # pass writer.writerow([]) - writer.writerow(['min', min(self.txBpsList), min(self.rxBpsList), min(self.tpsList)]) - writer.writerow(['avg', self.txBSum/self.secondsSum, self.rxBSum/self.secondsSum, self.txnSum/self.secondsSum]) - writer.writerow(['max', max(self.txBpsList), max(self.rxBpsList), max(self.tpsList)]) - writer.writerow(['std', statistics.pstdev(self.txBpsList), statistics.pstdev(self.rxBpsList), statistics.pstdev(self.tpsList)]) + writer.writerow(['min', min(self.txBpsList), min(self.rxBpsList), min(self.txP2PBpsList), min(self.rxP2PBpsList), min(self.tpsList)]) + writer.writerow(['avg', self.txBSum/self.secondsSum, self.rxBSum/self.secondsSum, self.txP2PBSum/self.secondsSum, self.rxP2PBSum/self.secondsSum, self.txnSum/self.secondsSum]) + writer.writerow(['max', max(self.txBpsList), max(self.rxBpsList), max(self.txP2PBpsList), max(self.rxP2PBpsList), max(self.tpsList)]) + writer.writerow(['std', statistics.pstdev(self.txBpsList), statistics.pstdev(self.rxBpsList), statistics.pstdev(self.txP2PBpsList), statistics.pstdev(self.rxP2PBpsList), statistics.pstdev(self.tpsList)]) if reportf: reportf.close() if self.deltas and args.deltas: diff --git a/test/heapwatch/metrics_lib.py b/test/heapwatch/metrics_lib.py index fbda555b90..5fc7b36075 100644 --- a/test/heapwatch/metrics_lib.py +++ b/test/heapwatch/metrics_lib.py @@ -54,7 +54,7 @@ def hunum(x): return '{:.1f}k'.format(x / 1000.0) if x >= 1000: return '{:.2f}k'.format(x / 1000.0) - return '{:.2f}x'.format(x) + return '{:.2f}'.format(x) def test_metric_line_re(): diff --git a/test/heapwatch/requirements.txt b/test/heapwatch/requirements.txt index db92372c6d..cf443a24e4 100644 --- a/test/heapwatch/requirements.txt +++ b/test/heapwatch/requirements.txt @@ -6,5 +6,5 @@ plotly==5.16.0 py-algorand-sdk==2.3.0 kaleido==0.2.1 networkx==3.3 -gravis=0.1.0 -termcolor=2.4.0 +gravis==0.1.0 +termcolor==2.4.0 diff --git a/util/metrics/metrics.go b/util/metrics/metrics.go index fcc566312f..d7afe6439c 100644 --- a/util/metrics/metrics.go +++ b/util/metrics/metrics.go @@ -39,6 +39,14 @@ var ( NetworkMessageReceivedTotal = MetricName{Name: "algod_network_message_received_total", Description: "Total number of complete messages that were received from the network"} // NetworkMessageSentTotal Total number of complete messages that were sent to the network NetworkMessageSentTotal = MetricName{Name: "algod_network_message_sent_total", Description: "Total number of complete messages that were sent to the network"} + // NetworkP2PSentBytesTotal Total number of bytes that were sent over the p2p network + NetworkP2PSentBytesTotal = MetricName{Name: "algod_network_p2p_sent_bytes_total", Description: "Total number of bytes that were sent over the p2p network"} + // NetworkP2PReceivedBytesTotal Total number of bytes that were received from the p2p network + NetworkP2PReceivedBytesTotal = MetricName{Name: "algod_network_p2p_received_bytes_total", Description: "Total number of bytes that were received from the p2p network"} + // NetworkP2PMessageReceivedTotal Total number of complete messages that were received from the p2p network + NetworkP2PMessageReceivedTotal = MetricName{Name: "algod_network_p2p_message_received_total", Description: "Total number of complete messages that were received from the p2p network"} + // NetworkP2PMessageSentTotal Total number of complete messages that were sent to the p2p network + NetworkP2PMessageSentTotal = MetricName{Name: "algod_network_p2p_message_sent_total", Description: "Total number of complete messages that were sent to the p2p network"} // NetworkConnectionsDroppedTotal Total number of connections that were dropped before a message NetworkConnectionsDroppedTotal = MetricName{Name: "algod_network_connections_dropped_total", Description: "Total number of connections that were dropped before a message"} // NetworkSentDecompressedBytesTotal Total number of bytes that were sent over the network prior of being compressed From 10e8b3901d9b8573654c14d5a47c6cb235f04650 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Thu, 29 Aug 2024 16:18:17 -0400 Subject: [PATCH 66/82] tests: fix a race in TestP2PwsStreamHandlerDedup (#6116) --- network/p2p/capabilities_test.go | 37 -------------------------------- network/p2pNetwork_test.go | 6 ++++-- 2 files changed, 4 insertions(+), 39 deletions(-) diff --git a/network/p2p/capabilities_test.go b/network/p2p/capabilities_test.go index 7057eca017..5e662c87fa 100644 --- a/network/p2p/capabilities_test.go +++ b/network/p2p/capabilities_test.go @@ -23,7 +23,6 @@ import ( "testing" "time" - golog "github.com/ipfs/go-log" "github.com/libp2p/go-libp2p" dht "github.com/libp2p/go-libp2p-kad-dht" "github.com/libp2p/go-libp2p/core/discovery" @@ -38,42 +37,6 @@ import ( "github.com/algorand/go-algorand/test/partitiontest" ) -func TestCapabilities_Discovery(t *testing.T) { - partitiontest.PartitionTest(t) - - golog.SetDebugLogging() - var caps []*CapabilitiesDiscovery - var addrs []peer.AddrInfo - testSize := 3 - for i := 0; i < testSize; i++ { - tempdir := t.TempDir() - ps, err := peerstore.NewPeerStore(nil, "") - require.NoError(t, err) - h, _, err := MakeHost(config.GetDefaultLocal(), tempdir, ps) - require.NoError(t, err) - capD, err := MakeCapabilitiesDiscovery(context.Background(), config.GetDefaultLocal(), h, "devtestnet", logging.Base(), func() []peer.AddrInfo { return nil }) - require.NoError(t, err) - caps = append(caps, capD) - addrs = append(addrs, peer.AddrInfo{ - ID: capD.Host().ID(), - Addrs: capD.Host().Addrs(), - }) - } - for _, capD := range caps { - peersAdded := 0 - for _, addr := range addrs { - added, err := capD.addPeer(addr) - require.NoError(t, err) - require.True(t, added) - peersAdded++ - } - err := capD.dht.Bootstrap(context.Background()) - require.NoError(t, err) - capD.dht.ForceRefresh() - require.Equal(t, peersAdded, capD.dht.RoutingTable().Size()) - } -} - func setupDHTHosts(t *testing.T, numHosts int) []*dht.IpfsDHT { var hosts []host.Host var bootstrapPeers []peer.AddrInfo diff --git a/network/p2pNetwork_test.go b/network/p2pNetwork_test.go index 302aa76147..e2c231f843 100644 --- a/network/p2pNetwork_test.go +++ b/network/p2pNetwork_test.go @@ -1187,8 +1187,10 @@ func TestP2PwsStreamHandlerDedup(t *testing.T) { return networkPeerIdentityDisconnect.GetUint64Value() == networkPeerIdentityDisconnectInitial+1 }, 2*time.Second, 50*time.Millisecond) - require.False(t, netA.hasPeers()) - require.False(t, netB.hasPeers()) + // now allow the peer made outgoing connection to handle conn closing initiated by the other side + require.Eventually(t, func() bool { + return !netA.hasPeers() && !netB.hasPeers() + }, 2*time.Second, 50*time.Millisecond) } // TestP2PEnableGossipService_NodeDisable ensures that a node with EnableGossipService=false From 81edd96c47ef9b004a3807fc9cc2816d52aa75d7 Mon Sep 17 00:00:00 2001 From: cce <51567+cce@users.noreply.github.com> Date: Fri, 30 Aug 2024 10:43:18 -0400 Subject: [PATCH 67/82] tests: use temp file for capturing e2e goal-partkey-commands output (#6115) --- .github/workflows/reviewdog.yml | 13 +++++++++++++ test/scripts/e2e_subs/goal-partkey-commands.sh | 18 ++++++++++++------ 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml index b02d99e7cc..7c0f2d26ae 100644 --- a/.github/workflows/reviewdog.yml +++ b/.github/workflows/reviewdog.yml @@ -102,3 +102,16 @@ jobs: run: | curl -X POST --data-urlencode "payload={\"text\": \"Reviewdog failed. ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} \"}" $SLACK_WEBHOOK if: ${{ failure() && (contains(github.ref_name, 'rel/nightly') || contains(github.ref_name, 'rel/beta') || contains(github.ref_name, 'rel/stable') || contains(github.ref_name, 'master')) }} + reviewdog-shellcheck: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: shellcheck + uses: reviewdog/action-shellcheck@v1 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + reporter: "github-pr-check" + shellcheck_flags: "-e SC2034,SC2046,SC2053,SC2207,SC2145 -S warning" + fail_on_error: true + path: | + test/scripts/e2e_subs diff --git a/test/scripts/e2e_subs/goal-partkey-commands.sh b/test/scripts/e2e_subs/goal-partkey-commands.sh index b333a0e8aa..7d7154ec7b 100755 --- a/test/scripts/e2e_subs/goal-partkey-commands.sh +++ b/test/scripts/e2e_subs/goal-partkey-commands.sh @@ -83,16 +83,22 @@ verify_registered_state () { SEARCH_INVOKE_CONTEXT=$(echo "$3" | xargs) # look for participation ID anywhere in the partkeyinfo output - PARTKEY_OUTPUT=$(${gcmd} account partkeyinfo) - if ! echo "$PARTKEY_OUTPUT" | grep -q -F "$SEARCH_KEY"; then - fail_test "Key $SEARCH_KEY was not installed properly for cmd '$SEARCH_INVOKE_CONTEXT':\n$PARTKEY_OUTPUT" + info_temp_file=$(mktemp) + ${gcmd} account partkeyinfo > "${info_temp_file}" + if ! grep -q -F "$SEARCH_KEY" "${info_temp_file}"; then + echo "info_temp_file contents:" + cat "${info_temp_file}" + fail_test "Key $SEARCH_KEY was not installed properly for cmd '$SEARCH_INVOKE_CONTEXT'" fi # looking for yes/no, and the 8 character head of participation id in this line: # yes LFMT...RHJQ 4UPT6AQC... 4 0 3000 - LISTKEY_OUTPUT=$(${gcmd} account listpartkeys) - if ! echo "$LISTKEY_OUTPUT" | grep -q "$SEARCH_STATE.*$(echo "$SEARCH_KEY" | cut -c1-8)"; then - fail_test "Unexpected key $SEARCH_KEY state (looked for $SEARCH_STATE ) for cmd '$SEARCH_INVOKE_CONTEXT':\n$LISTKEY_OUTPUT" + list_temp_file=$(mktemp) + ${gcmd} account listpartkeys > "${list_temp_file}" + if ! grep -q "$SEARCH_STATE.*$(echo "$SEARCH_KEY" | cut -c1-8)" "${list_temp_file}"; then + echo "list_temp_file contents:" + cat "${list_temp_file}" + fail_test "Unexpected key $SEARCH_KEY state (looked for $SEARCH_STATE ) for cmd '$SEARCH_INVOKE_CONTEXT'" fi } From d105841e29f0b93aa74f3a757b60bc1c6297fd13 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Tue, 3 Sep 2024 15:16:00 -0400 Subject: [PATCH 68/82] p2p: store private keys as PKCS#8 ASN.1 DER PEM (#6119) --- network/p2p/peerID.go | 45 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 42 insertions(+), 3 deletions(-) diff --git a/network/p2p/peerID.go b/network/p2p/peerID.go index ca7526977b..c1b2536575 100644 --- a/network/p2p/peerID.go +++ b/network/p2p/peerID.go @@ -19,7 +19,10 @@ package p2p import ( + "crypto/ed25519" "crypto/rand" + "crypto/x509" + "encoding/pem" "fmt" "os" "path" @@ -34,7 +37,7 @@ import ( // DefaultPrivKeyPath is the default path inside the node's root directory at which the private key // for p2p identity is found and persisted to when a new one is generated. -const DefaultPrivKeyPath = "peerIDPrivKey.pem" +const DefaultPrivKeyPath = "peerIDPrivKey.key" // PeerID is a string representation of a peer's public key, primarily used to avoid importing libp2p into packages that shouldn't need it type PeerID string @@ -84,6 +87,9 @@ func PeerIDFromPublicKey(pubKey crypto.PubKey) (PeerID, error) { return PeerID(peerID), nil } +// pemBlockType is the type of PEM block used for private keys +const pemBlockType = "PRIVATE KEY" + // loadPrivateKeyFromFile attempts to read raw privKey bytes from path // It only supports Ed25519 keys. func loadPrivateKeyFromFile(path string) (crypto.PrivKey, error) { @@ -91,8 +97,21 @@ func loadPrivateKeyFromFile(path string) (crypto.PrivKey, error) { if err != nil { return nil, err } + p, _ := pem.Decode(bytes) + if p == nil || p.Type != pemBlockType { + return nil, fmt.Errorf("failed to PEM decode private key at %s", path) + } + + ak, err := x509.ParsePKCS8PrivateKey(p.Bytes) + if err != nil { + return nil, err + } + sk, ok := ak.(ed25519.PrivateKey) + if !ok { + return nil, fmt.Errorf("unsupported private key type: %T, expecting ed25519", ak) + } // We only support Ed25519 keys - return crypto.UnmarshalEd25519PrivateKey(bytes) + return crypto.UnmarshalEd25519PrivateKey(sk) } // writePrivateKeyToFile attempts to write raw privKey bytes to path @@ -101,7 +120,27 @@ func writePrivateKeyToFile(path string, privKey crypto.PrivKey) error { if err != nil { return err } - return os.WriteFile(path, bytes, 0600) + if len(bytes) != ed25519.PrivateKeySize { + return fmt.Errorf("incompatible ed25519 private key length: %d", len(bytes)) + } + key := ed25519.PrivateKey(bytes) + derBytes, err := x509.MarshalPKCS8PrivateKey(key) + if err != nil { + return err + } + + p := pem.Block{ + Type: pemBlockType, + Bytes: derBytes, + } + + f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer f.Close() + + return pem.Encode(f, &p) } // generatePrivKey creates a new Ed25519 key From 0da0e99556ce4426902c6a31ba0ea30c00ed320f Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Tue, 3 Sep 2024 15:16:15 -0400 Subject: [PATCH 69/82] network: fix outgoing HTTP rate limiting (#6118) --- network/limitcaller/rateLimitingTransport.go | 49 +++++++------ .../limitcaller/rateLimitingTransport_test.go | 72 +++++++++++++++++++ network/p2p/http.go | 6 +- network/p2pNetwork.go | 9 +-- network/p2pNetwork_test.go | 2 +- network/wsNetwork.go | 19 +++-- network/wsNetwork_test.go | 39 ++++++++++ 7 files changed, 155 insertions(+), 41 deletions(-) create mode 100644 network/limitcaller/rateLimitingTransport_test.go diff --git a/network/limitcaller/rateLimitingTransport.go b/network/limitcaller/rateLimitingTransport.go index de68c9b371..7877c879d1 100644 --- a/network/limitcaller/rateLimitingTransport.go +++ b/network/limitcaller/rateLimitingTransport.go @@ -22,7 +22,6 @@ import ( "time" "github.com/algorand/go-algorand/util" - "github.com/libp2p/go-libp2p/core/peer" ) // ConnectionTimeStore is a subset of the phonebook that is used to store the connection times. @@ -31,12 +30,12 @@ type ConnectionTimeStore interface { UpdateConnectionTime(addrOrPeerID string, provisionalTime time.Time) bool } -// RateLimitingTransport is the transport for execute a single HTTP transaction, obtaining the Response for a given Request. -type RateLimitingTransport struct { +// RateLimitingBoundTransport is the transport for execute a single HTTP transaction, obtaining the Response for a given Request. +type RateLimitingBoundTransport struct { phonebook ConnectionTimeStore innerTransport http.RoundTripper queueingTimeout time.Duration - targetAddr interface{} // target address for the p2p http request + addrOrPeerID string } // DefaultQueueingTimeout is the default timeout for queueing the request. @@ -46,9 +45,10 @@ const DefaultQueueingTimeout = 10 * time.Second // queueing the current request before the request attempt could be made. var ErrConnectionQueueingTimeout = errors.New("rateLimitingTransport: queueing timeout") -// MakeRateLimitingTransport creates a rate limiting http transport that would limit the requests rate -// according to the entries in the phonebook. -func MakeRateLimitingTransport(phonebook ConnectionTimeStore, queueingTimeout time.Duration, dialer *Dialer, maxIdleConnsPerHost int) RateLimitingTransport { +// MakeRateLimitingBoundTransport creates a rate limiting http transport that that: +// 1. would limit the requests rate according to the entries in the phonebook. +// 2. is bound to a specific target. +func MakeRateLimitingBoundTransport(phonebook ConnectionTimeStore, queueingTimeout time.Duration, dialer *Dialer, maxIdleConnsPerHost int, target string) RateLimitingBoundTransport { defaultTransport := http.DefaultTransport.(*http.Transport) innerTransport := &http.Transport{ Proxy: defaultTransport.Proxy, @@ -59,37 +59,36 @@ func MakeRateLimitingTransport(phonebook ConnectionTimeStore, queueingTimeout ti ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, MaxIdleConnsPerHost: maxIdleConnsPerHost, } - return MakeRateLimitingTransportWithRoundTripper(phonebook, queueingTimeout, innerTransport, nil, maxIdleConnsPerHost) + return MakeRateLimitingBoundTransportWithRoundTripper(phonebook, queueingTimeout, innerTransport, target) } -// MakeRateLimitingTransportWithRoundTripper creates a rate limiting http transport that would limit the requests rate -// according to the entries in the phonebook. -func MakeRateLimitingTransportWithRoundTripper(phonebook ConnectionTimeStore, queueingTimeout time.Duration, rt http.RoundTripper, target interface{}, maxIdleConnsPerHost int) RateLimitingTransport { - return RateLimitingTransport{ +// MakeRateLimitingBoundTransportWithRoundTripper creates a rate limiting http transport that: +// 1. would limit the requests rate according to the entries in the phonebook. +// 2. is bound to a specific target. +func MakeRateLimitingBoundTransportWithRoundTripper(phonebook ConnectionTimeStore, queueingTimeout time.Duration, rt http.RoundTripper, target string) RateLimitingBoundTransport { + return RateLimitingBoundTransport{ phonebook: phonebook, innerTransport: rt, queueingTimeout: queueingTimeout, - targetAddr: target, + addrOrPeerID: target, } } // RoundTrip connects to the address on the named network using the provided context. // It waits if needed not to exceed connectionsRateLimitingCount. -func (r *RateLimitingTransport) RoundTrip(req *http.Request) (res *http.Response, err error) { +func (r *RateLimitingBoundTransport) RoundTrip(req *http.Request) (res *http.Response, err error) { var waitTime time.Duration var provisionalTime time.Time - queueingDeadline := time.Now().Add(r.queueingTimeout) - addrOrPeerID := req.Host - // p2p/http clients have per-connection transport and address info so use that - if len(req.Host) == 0 && req.URL != nil && len(req.URL.Host) == 0 { - addrInfo, ok := r.targetAddr.(*peer.AddrInfo) - if !ok { - return nil, errors.New("rateLimitingTransport: request without Host/URL and targetAddr is not a peer.AddrInfo") - } - addrOrPeerID = string(addrInfo.ID) + if r.addrOrPeerID == "" { + return nil, errors.New("rateLimitingTransport: target not set") + } + if req.URL != nil && req.URL.Host != "" && req.URL.Host != r.addrOrPeerID { + return nil, errors.New("rateLimitingTransport: request URL host does not match the target") } + + queueingDeadline := time.Now().Add(r.queueingTimeout) for { - _, waitTime, provisionalTime = r.phonebook.GetConnectionWaitTime(addrOrPeerID) + _, waitTime, provisionalTime = r.phonebook.GetConnectionWaitTime(r.addrOrPeerID) if waitTime == 0 { break // break out of the loop and proceed to the connection } @@ -101,6 +100,6 @@ func (r *RateLimitingTransport) RoundTrip(req *http.Request) (res *http.Response return nil, ErrConnectionQueueingTimeout } res, err = r.innerTransport.RoundTrip(req) - r.phonebook.UpdateConnectionTime(addrOrPeerID, provisionalTime) + r.phonebook.UpdateConnectionTime(r.addrOrPeerID, provisionalTime) return } diff --git a/network/limitcaller/rateLimitingTransport_test.go b/network/limitcaller/rateLimitingTransport_test.go new file mode 100644 index 0000000000..155ed8310f --- /dev/null +++ b/network/limitcaller/rateLimitingTransport_test.go @@ -0,0 +1,72 @@ +// Copyright (C) 2019-2024 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package limitcaller + +import ( + "net/http" + "testing" + "time" + + "github.com/algorand/go-algorand/test/partitiontest" + "github.com/stretchr/testify/require" +) + +type ctStore struct { + t *testing.T + getCnt uint64 +} + +func (c *ctStore) GetConnectionWaitTime(addrOrPeerID string) (bool, time.Duration, time.Time) { + require.NotEmpty(c.t, addrOrPeerID) + c.getCnt++ + return false, 0, time.Time{} +} + +func (c *ctStore) UpdateConnectionTime(addrOrPeerID string, provisionalTime time.Time) bool { + require.NotEmpty(c.t, addrOrPeerID) + return false +} + +type emptyRoundTripper struct{} + +func (e *emptyRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, nil } + +func TestRoundTrip(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + ctStore := ctStore{t: t} + rtt := MakeRateLimitingBoundTransportWithRoundTripper(&ctStore, 0, &emptyRoundTripper{}, "") + req := &http.Request{} + _, err := rtt.RoundTrip(req) + require.ErrorContains(t, err, "target not set") + require.Equal(t, uint64(0), ctStore.getCnt) + + rtt = MakeRateLimitingBoundTransportWithRoundTripper(&ctStore, 0, &emptyRoundTripper{}, "mytarget") + req, err = http.NewRequest("GET", "https://example.com/test", nil) + require.NoError(t, err) + _, err = rtt.RoundTrip(req) + require.ErrorContains(t, err, "URL host does not match the target") + require.Equal(t, uint64(0), ctStore.getCnt) + + rtt = MakeRateLimitingBoundTransportWithRoundTripper(&ctStore, 0, &emptyRoundTripper{}, "mytarget") + req, err = http.NewRequest("GET", "/test", nil) + require.NoError(t, err) + _, err = rtt.RoundTrip(req) + require.NoError(t, err) + require.Equal(t, uint64(1), ctStore.getCnt) +} diff --git a/network/p2p/http.go b/network/p2p/http.go index 07f27afff1..633a13713d 100644 --- a/network/p2p/http.go +++ b/network/p2p/http.go @@ -88,13 +88,13 @@ func MakeHTTPClient(addrInfo *peer.AddrInfo) (*http.Client, error) { } // MakeHTTPClientWithRateLimit creates a http.Client that uses libp2p transport for a given protocol and peer address. -func MakeHTTPClientWithRateLimit(addrInfo *peer.AddrInfo, pstore limitcaller.ConnectionTimeStore, queueingTimeout time.Duration, maxIdleConnsPerHost int) (*http.Client, error) { +func MakeHTTPClientWithRateLimit(addrInfo *peer.AddrInfo, pstore limitcaller.ConnectionTimeStore, queueingTimeout time.Duration) (*http.Client, error) { cl, err := MakeHTTPClient(addrInfo) if err != nil { return nil, err } - rlrt := limitcaller.MakeRateLimitingTransportWithRoundTripper(pstore, queueingTimeout, cl.Transport, addrInfo, maxIdleConnsPerHost) - cl.Transport = &rlrt + rltr := limitcaller.MakeRateLimitingBoundTransportWithRoundTripper(pstore, queueingTimeout, cl.Transport, string(addrInfo.ID)) + cl.Transport = &rltr return cl, nil } diff --git a/network/p2pNetwork.go b/network/p2pNetwork.go index d3af60a223..f88660b653 100644 --- a/network/p2pNetwork.go +++ b/network/p2pNetwork.go @@ -613,8 +613,7 @@ func addrInfoToWsPeerCore(n *P2PNetwork, addrInfo *peer.AddrInfo) (wsPeerCore, b } addr := mas[0].String() - maxIdleConnsPerHost := int(n.config.ConnectionsRateLimitingCount) - client, err := p2p.MakeHTTPClientWithRateLimit(addrInfo, n.pstore, limitcaller.DefaultQueueingTimeout, maxIdleConnsPerHost) + client, err := p2p.MakeHTTPClientWithRateLimit(addrInfo, n.pstore, limitcaller.DefaultQueueingTimeout) if err != nil { n.log.Warnf("MakeHTTPClient failed: %v", err) return wsPeerCore{}, false @@ -720,8 +719,7 @@ func (n *P2PNetwork) GetHTTPClient(address string) (*http.Client, error) { if err != nil { return nil, err } - maxIdleConnsPerHost := int(n.config.ConnectionsRateLimitingCount) - return p2p.MakeHTTPClientWithRateLimit(addrInfo, n.pstore, limitcaller.DefaultQueueingTimeout, maxIdleConnsPerHost) + return p2p.MakeHTTPClientWithRateLimit(addrInfo, n.pstore, limitcaller.DefaultQueueingTimeout) } // OnNetworkAdvance notifies the network library that the agreement protocol was able to make a notable progress. @@ -774,8 +772,7 @@ func (n *P2PNetwork) wsStreamHandler(ctx context.Context, p2pPeer peer.ID, strea // create a wsPeer for this stream and added it to the peers map. addrInfo := &peer.AddrInfo{ID: p2pPeer, Addrs: []multiaddr.Multiaddr{ma}} - maxIdleConnsPerHost := int(n.config.ConnectionsRateLimitingCount) - client, err := p2p.MakeHTTPClientWithRateLimit(addrInfo, n.pstore, limitcaller.DefaultQueueingTimeout, maxIdleConnsPerHost) + client, err := p2p.MakeHTTPClientWithRateLimit(addrInfo, n.pstore, limitcaller.DefaultQueueingTimeout) if err != nil { n.log.Warnf("Cannot construct HTTP Client for %s: %v", p2pPeer, err) client = nil diff --git a/network/p2pNetwork_test.go b/network/p2pNetwork_test.go index e2c231f843..dcc641c350 100644 --- a/network/p2pNetwork_test.go +++ b/network/p2pNetwork_test.go @@ -783,7 +783,7 @@ func TestP2PHTTPHandler(t *testing.T) { pstore, err := peerstore.MakePhonebook(0, 10*time.Second) require.NoError(t, err) pstore.AddPersistentPeers([]*peer.AddrInfo{&peerInfoA}, "net", phonebook.PhoneBookEntryRelayRole) - httpClient, err = p2p.MakeHTTPClientWithRateLimit(&peerInfoA, pstore, 1*time.Second, 1) + httpClient, err = p2p.MakeHTTPClientWithRateLimit(&peerInfoA, pstore, 1*time.Second) require.NoError(t, err) _, err = httpClient.Get("/test") require.ErrorIs(t, err, limitcaller.ErrConnectionQueueingTimeout) diff --git a/network/wsNetwork.go b/network/wsNetwork.go index ecb636c8e2..c67200f01b 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -230,10 +230,9 @@ type WebsocketNetwork struct { // number of throttled outgoing connections "slots" needed to be populated. throttledOutgoingConnections atomic.Int32 - // transport and dialer are customized to limit the number of + // dialer is customized to limit the number of // connection in compliance with connectionsRateLimitingCount. - transport limitcaller.RateLimitingTransport - dialer limitcaller.Dialer + dialer limitcaller.Dialer // messagesOfInterest specifies the message types that this node // wants to receive. nil means default. non-nil causes this @@ -565,9 +564,7 @@ func (wn *WebsocketNetwork) setup() { if wn.nodeInfo == nil { wn.nodeInfo = &nopeNodeInfo{} } - maxIdleConnsPerHost := int(wn.config.ConnectionsRateLimitingCount) wn.dialer = limitcaller.MakeRateLimitingDialer(wn.phonebook, preferredResolver) - wn.transport = limitcaller.MakeRateLimitingTransport(wn.phonebook, limitcaller.DefaultQueueingTimeout, &wn.dialer, maxIdleConnsPerHost) wn.upgrader.ReadBufferSize = 4096 wn.upgrader.WriteBufferSize = 4096 @@ -1975,8 +1972,18 @@ func (wn *WebsocketNetwork) numOutgoingPending() int { // GetHTTPClient returns a http.Client with a suitable for the network Transport // that would also limit the number of outgoing connections. func (wn *WebsocketNetwork) GetHTTPClient(address string) (*http.Client, error) { + url, err := addr.ParseHostOrURL(address) + if err != nil { + return nil, err + } + + maxIdleConnsPerHost := int(wn.config.ConnectionsRateLimitingCount) + rltr := limitcaller.MakeRateLimitingBoundTransport(wn.phonebook, limitcaller.DefaultQueueingTimeout, &wn.dialer, maxIdleConnsPerHost, url.Host) return &http.Client{ - Transport: &HTTPPAddressBoundTransport{address, &wn.transport}, + Transport: &HTTPPAddressBoundTransport{ + address, + &rltr, + }, }, nil } diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go index 0128c28fc2..91983dfa20 100644 --- a/network/wsNetwork_test.go +++ b/network/wsNetwork_test.go @@ -4601,3 +4601,42 @@ func TestHTTPPAddressBoundTransport(t *testing.T) { } } } + +// TestWebsocketNetworkHTTPClient checks ws net HTTP client can connect to another node +// with out unexpected errors +func TestWebsocketNetworkHTTPClient(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + netA := makeTestWebsocketNode(t) + err := netA.Start() + require.NoError(t, err) + defer netStop(t, netA, "A") + + netB := makeTestWebsocketNodeWithConfig(t, defaultConfig) + + addr, ok := netA.Address() + require.True(t, ok) + + c, err := netB.GetHTTPClient(addr) + require.NoError(t, err) + + netA.RegisterHTTPHandlerFunc("/handled", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + resp, err := c.Do(&http.Request{URL: &url.URL{Path: "/handled"}}) + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) + + resp, err = c.Do(&http.Request{URL: &url.URL{Path: "/test"}}) + require.NoError(t, err) + require.Equal(t, http.StatusNotFound, resp.StatusCode) // no such handler + + resp, err = c.Do(&http.Request{URL: &url.URL{Path: "/v1/" + genesisID + "/gossip"}}) + require.NoError(t, err) + require.Equal(t, http.StatusPreconditionFailed, resp.StatusCode) // not enough ws peer headers + + _, err = netB.GetHTTPClient("invalid") + require.Error(t, err) +} From 43cb563248776b2db0743e9c0579e8c8e83cbf7c Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Tue, 3 Sep 2024 17:06:00 -0400 Subject: [PATCH 70/82] AVM: Provide access to some more block header values (#6107) --- data/transactions/logic/TEAL_opcodes_v10.md | 2 +- data/transactions/logic/TEAL_opcodes_v7.md | 2 +- data/transactions/logic/TEAL_opcodes_v8.md | 2 +- data/transactions/logic/TEAL_opcodes_v9.md | 2 +- data/transactions/logic/assembler_test.go | 10 +++++ data/transactions/logic/eval.go | 12 ++++++ data/transactions/logic/fields.go | 17 +++++++- data/transactions/logic/fields_string.go | 11 +++-- data/transactions/logic/langspec_v10.json | 2 +- data/transactions/logic/langspec_v7.json | 2 +- data/transactions/logic/langspec_v8.json | 2 +- data/transactions/logic/langspec_v9.json | 2 +- test/scripts/e2e_subs/hdr-access.py | 46 ++++++++++++++------- 13 files changed, 86 insertions(+), 26 deletions(-) diff --git a/data/transactions/logic/TEAL_opcodes_v10.md b/data/transactions/logic/TEAL_opcodes_v10.md index b90801521b..4e4491e095 100644 --- a/data/transactions/logic/TEAL_opcodes_v10.md +++ b/data/transactions/logic/TEAL_opcodes_v10.md @@ -1638,7 +1638,7 @@ Fields | Index | Name | Type | Notes | | - | ------ | -- | --------- | -| 0 | BlkSeed | []byte | | +| 0 | BlkSeed | [32]byte | | | 1 | BlkTimestamp | uint64 | | diff --git a/data/transactions/logic/TEAL_opcodes_v7.md b/data/transactions/logic/TEAL_opcodes_v7.md index 3a0c678b00..74314af059 100644 --- a/data/transactions/logic/TEAL_opcodes_v7.md +++ b/data/transactions/logic/TEAL_opcodes_v7.md @@ -1476,6 +1476,6 @@ Fields | Index | Name | Type | Notes | | - | ------ | -- | --------- | -| 0 | BlkSeed | []byte | | +| 0 | BlkSeed | [32]byte | | | 1 | BlkTimestamp | uint64 | | diff --git a/data/transactions/logic/TEAL_opcodes_v8.md b/data/transactions/logic/TEAL_opcodes_v8.md index f06e087e5e..a1059bc50e 100644 --- a/data/transactions/logic/TEAL_opcodes_v8.md +++ b/data/transactions/logic/TEAL_opcodes_v8.md @@ -1635,6 +1635,6 @@ Fields | Index | Name | Type | Notes | | - | ------ | -- | --------- | -| 0 | BlkSeed | []byte | | +| 0 | BlkSeed | [32]byte | | | 1 | BlkTimestamp | uint64 | | diff --git a/data/transactions/logic/TEAL_opcodes_v9.md b/data/transactions/logic/TEAL_opcodes_v9.md index e14f4d7d76..ac4482ce3e 100644 --- a/data/transactions/logic/TEAL_opcodes_v9.md +++ b/data/transactions/logic/TEAL_opcodes_v9.md @@ -1635,6 +1635,6 @@ Fields | Index | Name | Type | Notes | | - | ------ | -- | --------- | -| 0 | BlkSeed | []byte | | +| 0 | BlkSeed | [32]byte | | | 1 | BlkTimestamp | uint64 | | diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go index 32101dbcbc..10d86f476a 100644 --- a/data/transactions/logic/assembler_test.go +++ b/data/transactions/logic/assembler_test.go @@ -1700,11 +1700,21 @@ global AssetCreateMinBalance global AssetOptInMinBalance global GenesisHash pushint 1 +block BlkBranch +pushint 1 +block BlkFeeSink +pushint 1 +block BlkProtocol +pushint 1 +block BlkTxnCounter +pushint 1 block BlkProposer pushint 1 block BlkFeesCollected pushint 1 block BlkBonus +pushint 1 +block BlkProposerPayout global PayoutsEnabled global PayoutsGoOnlineFee global PayoutsPercent diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go index 3201877ed9..4da436a1b6 100644 --- a/data/transactions/logic/eval.go +++ b/data/transactions/logic/eval.go @@ -5771,12 +5771,24 @@ func opBlock(cx *EvalContext) error { return fmt.Errorf("block(%d) timestamp %d < 0", round, hdr.TimeStamp) } cx.Stack[last] = stackValue{Uint: uint64(hdr.TimeStamp)} + + case BlkBranch: + cx.Stack[last].Bytes = hdr.Branch[:] + case BlkFeeSink: + cx.Stack[last].Bytes = hdr.FeeSink[:] + case BlkProtocol: + cx.Stack[last].Bytes = []byte(hdr.CurrentProtocol) + case BlkTxnCounter: + cx.Stack[last] = stackValue{Uint: hdr.TxnCounter} + case BlkProposer: cx.Stack[last].Bytes = hdr.Proposer[:] case BlkFeesCollected: cx.Stack[last] = stackValue{Uint: hdr.FeesCollected.Raw} case BlkBonus: cx.Stack[last] = stackValue{Uint: hdr.Bonus.Raw} + case BlkProposerPayout: + cx.Stack[last] = stackValue{Uint: hdr.ProposerPayout.Raw} default: return fmt.Errorf("invalid block field %s", fs.field) } diff --git a/data/transactions/logic/fields.go b/data/transactions/logic/fields.go index 99cc08bad2..b2f384c259 100644 --- a/data/transactions/logic/fields.go +++ b/data/transactions/logic/fields.go @@ -997,6 +997,16 @@ const ( BlkFeesCollected // BlkBonus is the extra amount to be paid for the given block (from FeeSink) BlkBonus + // BlkBranch is the hash of the previous block + BlkBranch + // BlkFeeSink is the fee sink for the given round + BlkFeeSink + // BlkProtocol is the ConsensusVersion of the block. + BlkProtocol + // BlkTxnCounter is the number of the next transaction after the block + BlkTxnCounter + // BlkProposerPayout is the actual amount moved from feesink to proposer + BlkProposerPayout invalidBlockField // compile-time constant for number of fields ) @@ -1010,11 +1020,16 @@ type blockFieldSpec struct { } var blockFieldSpecs = [...]blockFieldSpec{ - {BlkSeed, StackBytes, randomnessVersion}, + {BlkSeed, StackBytes32, randomnessVersion}, {BlkTimestamp, StackUint64, randomnessVersion}, {BlkProposer, StackAddress, incentiveVersion}, {BlkFeesCollected, StackUint64, incentiveVersion}, {BlkBonus, StackUint64, incentiveVersion}, + {BlkBranch, StackBytes32, incentiveVersion}, + {BlkFeeSink, StackAddress, incentiveVersion}, + {BlkProtocol, StackBytes, incentiveVersion}, + {BlkTxnCounter, StackUint64, incentiveVersion}, + {BlkProposerPayout, StackUint64, incentiveVersion}, } func blockFieldSpecByField(r BlockField) (blockFieldSpec, bool) { diff --git a/data/transactions/logic/fields_string.go b/data/transactions/logic/fields_string.go index 3463da269c..df9922abf2 100644 --- a/data/transactions/logic/fields_string.go +++ b/data/transactions/logic/fields_string.go @@ -382,12 +382,17 @@ func _() { _ = x[BlkProposer-2] _ = x[BlkFeesCollected-3] _ = x[BlkBonus-4] - _ = x[invalidBlockField-5] + _ = x[BlkBranch-5] + _ = x[BlkFeeSink-6] + _ = x[BlkProtocol-7] + _ = x[BlkTxnCounter-8] + _ = x[BlkProposerPayout-9] + _ = x[invalidBlockField-10] } -const _BlockField_name = "BlkSeedBlkTimestampBlkProposerBlkFeesCollectedBlkBonusinvalidBlockField" +const _BlockField_name = "BlkSeedBlkTimestampBlkProposerBlkFeesCollectedBlkBonusBlkBranchBlkFeeSinkBlkProtocolBlkTxnCounterBlkProposerPayoutinvalidBlockField" -var _BlockField_index = [...]uint8{0, 7, 19, 30, 46, 54, 71} +var _BlockField_index = [...]uint8{0, 7, 19, 30, 46, 54, 63, 73, 84, 97, 114, 131} func (i BlockField) String() string { if i < 0 || i >= BlockField(len(_BlockField_index)-1) { diff --git a/data/transactions/logic/langspec_v10.json b/data/transactions/logic/langspec_v10.json index f59103f0f6..1a8986436a 100644 --- a/data/transactions/logic/langspec_v10.json +++ b/data/transactions/logic/langspec_v10.json @@ -4601,7 +4601,7 @@ "BlkTimestamp" ], "ArgEnumTypes": [ - "[]byte", + "[32]byte", "uint64" ], "DocCost": "1", diff --git a/data/transactions/logic/langspec_v7.json b/data/transactions/logic/langspec_v7.json index a7b4df95b3..12d2594194 100644 --- a/data/transactions/logic/langspec_v7.json +++ b/data/transactions/logic/langspec_v7.json @@ -4249,7 +4249,7 @@ "BlkTimestamp" ], "ArgEnumTypes": [ - "[]byte", + "[32]byte", "uint64" ], "DocCost": "1", diff --git a/data/transactions/logic/langspec_v8.json b/data/transactions/logic/langspec_v8.json index 186f9cdfe2..c5fcdbf58d 100644 --- a/data/transactions/logic/langspec_v8.json +++ b/data/transactions/logic/langspec_v8.json @@ -4595,7 +4595,7 @@ "BlkTimestamp" ], "ArgEnumTypes": [ - "[]byte", + "[32]byte", "uint64" ], "DocCost": "1", diff --git a/data/transactions/logic/langspec_v9.json b/data/transactions/logic/langspec_v9.json index cab0aa6d91..01e951cc3c 100644 --- a/data/transactions/logic/langspec_v9.json +++ b/data/transactions/logic/langspec_v9.json @@ -4595,7 +4595,7 @@ "BlkTimestamp" ], "ArgEnumTypes": [ - "[]byte", + "[32]byte", "uint64" ], "DocCost": "1", diff --git a/test/scripts/e2e_subs/hdr-access.py b/test/scripts/e2e_subs/hdr-access.py index bb6c0ad650..32739e91b2 100755 --- a/test/scripts/e2e_subs/hdr-access.py +++ b/test/scripts/e2e_subs/hdr-access.py @@ -1,8 +1,10 @@ #!/usr/bin/env python +import base64 import os import sys from goal import Goal +import algosdk.encoding as enc from datetime import datetime @@ -43,13 +45,10 @@ txinfo, err = goal.app_create(joe, goal.assemble(teal)) assert "not available" in str(err), err -# We want to manipulate lastvalid, so we need to turn off autosend -goal.autosend = False - -# We will be able to access two blocks, by setting lv explcitly. So we -# test that the block timestamp from two blocks ago is between 2 and 5 -# (inclusive) seconds before the previous block timestamp. devMode -# might mess this test up. +# We will be able to access more than one previous block by using a +# shorter tx liftetime. So we test that the block timestamp from two +# blocks ago is between 2 and 5 (inclusive) seconds before the +# previous block timestamp. devMode might mess this test up. teal = """ #pragma version 7 txn FirstValid @@ -73,10 +72,7 @@ int 6 < """ -checktimes = goal.assemble(teal) -tx = goal.app_create(joe, goal.assemble(teal)) -tx.last_valid_round = tx.last_valid_round - 800 -txinfo, err = goal.send(tx) +txinfo, err = goal.app_create(joe, goal.assemble(teal), lifetime=100) assert not err, err # block 0 is not accessible even with a low LastValid @@ -85,11 +81,33 @@ int 0 block BlkTimestamp """ -tx = goal.app_create(joe, goal.assemble(teal)) -tx.last_valid_round = tx.last_valid_round - 800 -txinfo, err = goal.send(tx) +txinfo, err = goal.app_create(joe, goal.assemble(teal), lifetime=100) assert "round 0 is not available" in str(err), err assert "outside [1-" in str(err), err # confirms that we can look back to 1 + +# Get FeeSink from `block` opcode, compare to REST API +teal = """ +#pragma version 11 + txn FirstValid + int 2 + - + block BlkFeeSink + log + int 1 + return +""" +txinfo, err = goal.app_create(joe, goal.assemble(teal), lifetime=100) +assert not err, err +assert len(txinfo["logs"]) == 1 +opcode = txinfo["logs"][0] + +block = goal.algod.block_info(txinfo['confirmed-round']-2)['block'] +api = base64.b64encode(enc.decode_address(block['fees'])).decode("utf-8") + +print(opcode, api) + +assert opcode == api + stamp = datetime.now().strftime("%Y%m%d_%H%M%S") print(f"{os.path.basename(sys.argv[0])} OK {stamp}") From b1d81bce7acc1db7767319f758d8b2c62acb23e6 Mon Sep 17 00:00:00 2001 From: John Lee Date: Tue, 3 Sep 2024 17:06:28 -0400 Subject: [PATCH 71/82] algons: expose error for dnsaddr command (#6121) --- cmd/algons/dnsaddrCmd.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/algons/dnsaddrCmd.go b/cmd/algons/dnsaddrCmd.go index c8fb1ac52b..b248c867f2 100644 --- a/cmd/algons/dnsaddrCmd.go +++ b/cmd/algons/dnsaddrCmd.go @@ -141,7 +141,7 @@ var dnsaddrTreeCreateCmd = &cobra.Command{ dnsaddrsFrom := []string{fmt.Sprintf("_dnsaddr.%s", dnsaddrDomain)} entries, err := getEntries(dnsaddrsFrom[0], "TXT") if err != nil { - fmt.Printf("failed fetching entries for %s\n", dnsaddrsFrom[0]) + fmt.Printf("failed fetching entries for %s: %v\n", dnsaddrsFrom[0], err) os.Exit(1) } if len(entries) > 0 { From 66b87dc781a265cf1287aed7c6c714e43fcd4280 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Thu, 5 Sep 2024 15:06:43 -0400 Subject: [PATCH 72/82] tests: fix expect tests and add EOF linter (#6122) Co-authored-by: cce <51567+cce@users.noreply.github.com> --- Makefile | 3 + scripts/travis/codegen_verification.sh | 3 + .../cli/goal/expect/basicExpectTest.exp | 4 +- test/e2e-go/cli/goal/expect/corsTest.exp | 1 + test/e2e-go/cli/goal/expect/expect_linter.py | 72 ++++++++++++++++ .../cli/goal/expect/goalCmdFlagsTest.exp | 1 + .../cli/goal/expect/goalDryrunRestTest.exp | 1 + .../cli/goal/expect/goalExpectCommon.exp | 85 ++++++++++++++----- .../cli/goal/expect/goalTxValidityTest.exp | 1 + .../e2e-go/cli/goal/expect/limitOrderTest.exp | 12 ++- .../goal/expect/tealAndStatefulTealTest.exp | 4 +- .../cli/goal/expect/tealConsensusTest.exp | 8 +- test/e2e-go/cli/goal/expect/testInfraTest.exp | 2 + 13 files changed, 166 insertions(+), 31 deletions(-) create mode 100644 test/e2e-go/cli/goal/expect/expect_linter.py diff --git a/Makefile b/Makefile index 52c505b301..b6823e665a 100644 --- a/Makefile +++ b/Makefile @@ -114,6 +114,9 @@ fix: build lint: deps $(GOPATH1)/bin/golangci-lint run -c .golangci.yml +expectlint: + cd test/e2e-go/cli/goal/expect && python3 expect_linter.py *.exp + check_go_version: @if [ $(CURRENT_GO_VERSION_MAJOR) != $(GOLANG_VERSION_BUILD_MAJOR) ]; then \ echo "Wrong major version of Go installed ($(CURRENT_GO_VERSION_MAJOR)). Please use $(GOLANG_VERSION_BUILD_MAJOR)"; \ diff --git a/scripts/travis/codegen_verification.sh b/scripts/travis/codegen_verification.sh index 53ad607a49..5e3a53de3b 100755 --- a/scripts/travis/codegen_verification.sh +++ b/scripts/travis/codegen_verification.sh @@ -40,6 +40,9 @@ echo "Running fixcheck" GOPATH=$(go env GOPATH) "$GOPATH"/bin/algofix -error */ +echo "Running expect linter" +make expectlint + echo "Updating TEAL Specs" touch data/transactions/logic/fields_string.go # ensure rebuild make -C data/transactions/logic diff --git a/test/e2e-go/cli/goal/expect/basicExpectTest.exp b/test/e2e-go/cli/goal/expect/basicExpectTest.exp index d44f15ca65..ab572ba63e 100644 --- a/test/e2e-go/cli/goal/expect/basicExpectTest.exp +++ b/test/e2e-go/cli/goal/expect/basicExpectTest.exp @@ -16,12 +16,14 @@ if { [catch { spawn echo "hello" expect { timeout { abort "\n Failed to see expected input hello" } + eof { abort "Ended without hello" } "^hello*" {close} } spawn echo "goodbye" expect { timeout { abort "Failed to see expected input goodbye" } + eof { abort "Ended without goodbye" } "^goodbye*" {close} } @@ -29,4 +31,4 @@ if { [catch { } EXCEPTION ] } { abort "ERROR in basic expect test: $EXCEPTION" -} \ No newline at end of file +} diff --git a/test/e2e-go/cli/goal/expect/corsTest.exp b/test/e2e-go/cli/goal/expect/corsTest.exp index 7691b740fa..43fb0fa24d 100755 --- a/test/e2e-go/cli/goal/expect/corsTest.exp +++ b/test/e2e-go/cli/goal/expect/corsTest.exp @@ -32,6 +32,7 @@ if { [catch { ::AlgorandGoal::CheckNetworkAddressForCors $ALGOD_NET_ADDRESS # Start kmd, then do the same CORS check as algod + exec -- cat "$TEST_PRIMARY_NODE_DIR/kmd-v0.5/kmd_config.json.example" | jq {. |= . + {"allowed_origins": ["http://algorand.com"]}} > "$TEST_PRIMARY_NODE_DIR/kmd-v0.5/kmd_config.json" exec goal kmd start -t 180 -d $TEST_PRIMARY_NODE_DIR set KMD_NET_ADDRESS [::AlgorandGoal::GetKMDNetworkAddress $TEST_PRIMARY_NODE_DIR] ::AlgorandGoal::CheckNetworkAddressForCors $KMD_NET_ADDRESS diff --git a/test/e2e-go/cli/goal/expect/expect_linter.py b/test/e2e-go/cli/goal/expect/expect_linter.py new file mode 100644 index 0000000000..fdeb741148 --- /dev/null +++ b/test/e2e-go/cli/goal/expect/expect_linter.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +import sys +import argparse + +found_issues = False + + +def check_expect_blocks(filename, verbose=False): + with open(filename, 'r') as f: + lines = f.readlines() + + in_expect_block = False + brace_count = 0 + block_start_line = None + current_block = [] + expect_blocks = [] + + # Process each line, considering possible strings or comments + for line_num, line in enumerate(lines, start=1): + stripped_line = line.strip() + + if not in_expect_block: + if "expect " in stripped_line and '{' in stripped_line: + in_expect_block = True + block_start_line = line_num + brace_count = stripped_line.count('{') - stripped_line.count('}') + current_block = [stripped_line] + elif stripped_line.startswith("#") or stripped_line.startswith("//"): + continue # Ignore comment lines outside of expect blocks + else: + current_block.append(stripped_line) + brace_count += stripped_line.count('{') - stripped_line.count('}') + + if brace_count == 0: + in_expect_block = False + expect_blocks.append((block_start_line, "\n".join(current_block))) + current_block = [] + + for block_start_line, block in expect_blocks: + if '#nolint:eof' in block: + if verbose: + print(f"{filename}:{block_start_line}: SKIP: 'nolint:eof' comment found, skipping") + continue + + if 'eof ' not in block: + # Check for only timeout condition + actions = block.count('}') + if block.count('timeout') == actions: + if verbose: + print(f"{filename}:{block_start_line}: OK: only timeout action present") + continue + + print(f"{filename}:{block_start_line}: Warning: missing 'eof' in expect block") + global found_issues + found_issues = True + elif verbose: + print(f"{filename}:{block_start_line}: OK: expect block contains 'eof'") + +def main(): + parser = argparse.ArgumentParser(description="Check for 'eof' in expect blocks of scripts.") + parser.add_argument('files', metavar='FILE', type=str, nargs='+', help='Files to check') + parser.add_argument('-v', '--verbose', action='store_true', help='Enable verbose output') + args = parser.parse_args() + + for fname in args.files: + check_expect_blocks(fname, args.verbose) + + if found_issues: + sys.exit(1) + +if __name__ == "__main__": + main() diff --git a/test/e2e-go/cli/goal/expect/goalCmdFlagsTest.exp b/test/e2e-go/cli/goal/expect/goalCmdFlagsTest.exp index d86cef47b4..2b833415b0 100644 --- a/test/e2e-go/cli/goal/expect/goalCmdFlagsTest.exp +++ b/test/e2e-go/cli/goal/expect/goalCmdFlagsTest.exp @@ -6,6 +6,7 @@ proc TestGoalCommandLineFlags { CMD EXPECTED_RE } { set PASSED 0 eval spawn $CMD expect { + #nolint:eof checking PASSED catches no match timeout { puts "goal asset create timed out"; exit 1 } -re $EXPECTED_RE {set PASSED 1; close } } diff --git a/test/e2e-go/cli/goal/expect/goalDryrunRestTest.exp b/test/e2e-go/cli/goal/expect/goalDryrunRestTest.exp index 26a2b24e11..3227d7fdd9 100644 --- a/test/e2e-go/cli/goal/expect/goalDryrunRestTest.exp +++ b/test/e2e-go/cli/goal/expect/goalDryrunRestTest.exp @@ -8,6 +8,7 @@ proc TestGoalDryrun { DRREQ_FILE TEST_PRIMARY_NODE_DIR } { set PROGRAM_TYPE "" spawn goal clerk dryrun-remote -d $TEST_PRIMARY_NODE_DIR -D $DRREQ_FILE -v expect { + #nolint:eof checking PASSED catches no match timeout { ::AlgorandGoal::Abort "goal clerk dryrun-remote timeout" } "budget consumed:" {set COST 1; exp_continue} -re {(ApprovalProgram)} {set PROGRAM_TYPE $expect_out(1,string); exp_continue} diff --git a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp index 4728f445df..1ee45ed033 100644 --- a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp +++ b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp @@ -234,12 +234,14 @@ proc ::AlgorandGoal::RestartNode { TEST_ALGO_DIR {SYSTEMD_MANAGED ""} } { expect { timeout { close; ::AlgorandGoal::Abort "Did not receive appropriate message during node restart" } "^The node was successfully stopped.*Algorand node successfully started!*" {puts "Node restarted successfully"; close} + eof { close; ::AlgorandGoal::Abort "Did not receive appropriate message before node restart eof" } } } else { spawn goal node restart -d $TEST_ALGO_DIR expect { timeout { close; ::AlgorandGoal::Abort "Did not receive appropriate message during node restart" } "^This node is using systemd and should be managed with systemctl*" { puts "Goal showed correct error message for systemd" ; close} + eof { close; ::AlgorandGoal::Abort "Did not receive appropriate message before node restart eof" } } } } EXCEPTION] } { @@ -333,22 +335,27 @@ proc ::AlgorandGoal::CreateWallet { WALLET_NAME WALLET_PASSWORD TEST_PRIMARY_NOD expect { timeout {::AlgorandGoal::Abort "Timed out CreateWallet password" } + eof {::AlgorandGoal::Abort "EOF CreateWallet password" } "Please choose a password for wallet*" { send "$WALLET_PASSWORD\r" } } expect { timeout {::AlgorandGoal::Abort "Timed out CreateWallet confirmation" } + eof {::AlgorandGoal::Abort "EOF CreateWallet confirmation" } "Please confirm*" { send "$WALLET_PASSWORD\r"} } expect { timeout {::AlgorandGoal::Abort "Timed out CreateWallet see it now" } + eof {::AlgorandGoal::Abort "EOF CreateWallet see it now" } "Would you like to see it now? (Y/n):" { send "y\r" } } expect { timeout {::AlgorandGoal::Abort "Timed out CreateWallet keep info safe" } + eof {::AlgorandGoal::Abort "EOF CreateWallet keep info safe" } "Keep this information safe -- never share it with anyone!" {} } expect { timeout {::AlgorandGoal::Abort "Timed out CreateWallet pass phrase" } + eof {::AlgorandGoal::Abort "EOF CreateWallet pass phrase" } -re {([a-z ]+)} {set WALLET_PASS_PHRASE $expect_out(1,string); close;} } } EXCEPTION ] } { @@ -364,6 +371,7 @@ proc ::AlgorandGoal::VerifyWallet { WALLET_NAME TEST_PRIMARY_NODE_DIR } { spawn goal wallet list -d $TEST_PRIMARY_NODE_DIR expect { timeout { ::AlgorandGoal::Abort "Timed out seeing expected input for spawn goal wallet list" } + eof { ::AlgorandGoal::Abort "EOF seeing expected input for spawn goal wallet list" } "*$WALLET_NAME*" {close} } } EXCEPTION ] } { @@ -375,18 +383,18 @@ proc ::AlgorandGoal::RecoverWallet { NEW_WALLET_NAME WALLET_PASSPHRASE NEW_WALLE set timeout 60 if { [catch { spawn goal wallet new -r $NEW_WALLET_NAME -d $TEST_PRIMARY_NODE_DIR - expect { - timeout { puts "TIMEOUT" } - {Please type your recovery mnemonic below, and hit return when you are done:*} { send "$WALLET_PASSPHRASE\r" } - } - for { set index 1} {$index <= 5} {incr index} { - expect { - timeout { puts "TIMEOUT" } - {Please choose a password for wallet* } { send "$NEW_WALLET_PASSWORD\r"} - {Please confirm the password:*} { send "$NEW_WALLET_PASSWORD\r"} - {Creating wallet...*} {puts $expect_out(buffer) } - -re {Created wallet '([-a-zA-Z0-9_]+)'} {set RECOVERED_WALLET_NAME $expect_out(1,string) } - } + expect { + timeout { ::AlgorandGoal::Abort "TIMEOUT" } + eof { ::AlgorandGoal::Abort "EOF" } + {Please type your recovery mnemonic below, and hit return when you are done:*} { send "$WALLET_PASSPHRASE\r" } + } + expect { + timeout { ::AlgorandGoal::Abort "TIMEOUT" } + eof { ::AlgorandGoal::Abort "EOF" } + {Please choose a password for wallet* } { send "$NEW_WALLET_PASSWORD\r"; exp_continue;} + {Please confirm the password:*} { send "$NEW_WALLET_PASSWORD\r"; exp_continue;} + {Creating wallet...*} {puts $expect_out(buffer); exp_continue; } + -re {Created wallet '([-a-zA-Z0-9_]+)'} {set RECOVERED_WALLET_NAME $expect_out(1,string) } } puts "Recovered wallet: $RECOVERED_WALLET_NAME" } EXCEPTION ] } { @@ -403,6 +411,7 @@ proc ::AlgorandGoal::CreateAccountForWallet { WALLET_NAME WALLET_PASSWORD TEST_P while 1 { expect { timeout { break; ::AlgorandGoal::Abort "Timed out seeing new account created for wallet $WALLET_NAME" } + eof { break; ::AlgorandGoal::Abort "EOF seeing new account created for wallet $WALLET_NAME" } "Please enter the password for wallet*" { send "$WALLET_PASSWORD\r" } -re {Created new account with address ([a-zA-Z0-9]+)} {set ACCOUNT_ADDRESS $expect_out(1,string) ;close; break } } @@ -422,6 +431,7 @@ proc ::AlgorandGoal::VerifyAccount { WALLET_NAME WALLET_PASSWORD ACCOUNT_ADDRESS while 1 { expect { timeout {break; ::AlgorandGoal::Abort "Timed out seeing expected account: $ACCOUNT_ADDRESS"} + eof {break; ::AlgorandGoal::Abort "EOF seeing expected account: $ACCOUNT_ADDRESS"} "Please enter the password for wallet*" { send "$WALLET_PASSWORD\r" } -re {\t([A-Z0-9]+)\t([A-Z0-9]+)} {set RETURN_ACCOUNT_ADDRESS $expect_out(1,string); break } } @@ -487,6 +497,7 @@ proc ::AlgorandGoal::GetAccountRewards { WALLET_NAME ACCOUNT_ADDRESS TEST_PRIMAR spawn goal account rewards -w $WALLET_NAME -a $ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR expect { timeout { ::AlgorandGoal::Abort "Timed out retrieving account rewards for wallet $WALLET_NAME and account $ACCOUNT_ADDRESS" } + eof { ::AlgorandGoal::Abort "EOF retrieving account rewards for wallet $WALLET_NAME and account $ACCOUNT_ADDRESS" } -re {\d+} {set ACCOUNT_EARNINGS $expect_out(0,string)} } puts "Wallet: $WALLET_NAME, Account: $ACCOUNT_ADDRESS, Rewards: $ACCOUNT_EARNINGS" @@ -565,7 +576,8 @@ proc ::AlgorandGoal::AssetCreate { CREATOR WALLET_NAME WALLET_PASSWORD TOTAL_SUP expect { timeout { ::AlgorandGoal::Abort "Timed out create asset" } "Please enter the password for wallet '$WALLET_NAME':" { send "$WALLET_PASSWORD\r"; exp_continue } - eof + -re {Created asset with asset index (\d+)} { puts "Asset created"; close } + eof { ::AlgorandGoal::Abort "EOF create asset" } } } EXCEPTION ] } { ::AlgorandGoal::Abort "ERROR in AssetCreate: $EXCEPTION" @@ -652,9 +664,17 @@ proc ::AlgorandGoal::SplitGroup { INPUT_GROUP OUTPUT_GROUP } { if { [ catch { spawn goal clerk split -i $INPUT_GROUP -o $OUTPUT_GROUP expect { + #nolint:eof just checking status timeout { ::AlgorandGoal::Abort "Timed out splitting group transaction" } eof } + lassign [wait] PID SPAWNID OS_CODE ERR_CODE + if {$OS_CODE == -1} { + ::AlgorandGoal::Abort "Split group failed: OS error code: $ERR_CODE" + } + if {$ERR_CODE != 0} { + ::AlgorandGoal::Abort "Split group failed with: exit code: $ERR_CODE" + } } EXCEPTION ] } { ::AlgorandGoal::Abort "ERROR in Split Group: $EXCEPTION" } @@ -667,6 +687,7 @@ proc ::AlgorandGoal::LimitOrder {TEAL_DRIVER SWAP_N SWAP_D MIN_TRD OWNER FEE TIM spawn python $TEAL_DRIVER "limit-order" --swapn $SWAP_N --swapd $SWAP_D --mintrd $MIN_TRD --own $OWNER --fee $FEE --timeout $TIME_OUT --asset $ASSET_ID expect { timeout { ::AlgorandGoal::Abort "Timed out limit order" } + eof { ::AlgorandGoal::Abort "EOF limit order" } -re {^.+$} { puts $limitf $expect_out(buffer); close $limitf; close } } } EXCEPTION ] } { @@ -724,9 +745,16 @@ proc ::AlgorandGoal::SignTransaction { WALLET_NAME WALLET_PASSWORD INPUT_TXN OUT if { [ catch { spawn goal clerk sign -d $TEST_PRIMARY_NODE_DIR -w $WALLET_NAME -i $INPUT_TXN -o $OUTPUT_TXN expect { + #nolint:eof just signing with outputting to file and checking status timeout { ::AlgorandGoal::Abort "Timed out signing transaction" } - "Please enter the password for wallet '$WALLET_NAME':" { send "$WALLET_PASSWORD\r" ; exp_continue} - eof + "Please enter the password for wallet '$WALLET_NAME':" { send "$WALLET_PASSWORD\r" ; exp_continue;} + } + lassign [wait] PID SPAWNID OS_CODE ERR_CODE + if {$OS_CODE == -1} { + ::AlgorandGoal::Abort "SignTransaction failed: OS error code: $ERR_CODE" + } + if {$ERR_CODE != 0} { + ::AlgorandGoal::Abort "SignTransaction failed with: exit code: $ERR_CODE" } } EXCEPTION ] } { ::AlgorandGoal::Abort "ERROR in SignTransaction: $EXCEPTION" @@ -741,6 +769,7 @@ proc ::AlgorandGoal::RawSend { TXN_FILE TEST_PRIMARY_NODE_DIR } { spawn goal clerk rawsend -f $TXN_FILE -d $TEST_PRIMARY_NODE_DIR expect { timeout { close; ::AlgorandGoal::Abort "Timed out rawsend $TXN_FILE" } + eof { close; ::AlgorandGoal::Abort "EOF rawsend $TXN_FILE" } -re {Transaction ([A-Z0-9]{52}) committed} {set TRANSACTION_ID $expect_out(1,string); close } -re {Rejected transactions written to (.+rej)} {::AlgorandGoal::Abort "RawSend rejected."} } @@ -810,6 +839,9 @@ proc ::AlgorandGoal::CheckNetworkAddressForCors { NET_ADDRESS } { expect { timeout { close; ::AlgorandGoal::Abort "Timeout failure in CheckNetworkAddressForCors" } "Access-Control-Allow-Origin" { puts "success" ; close } + eof { + return -code error "EOF without Access-Control-Allow-Origin in output" + } close } } EXCEPTION ] } { @@ -823,6 +855,7 @@ proc ::AlgorandGoal::GetLedgerSupply { TEST_PRIMARY_NODE_DIR } { spawn goal ledger supply -d $TEST_PRIMARY_NODE_DIR expect { timeout { ::AlgorandGoal::Abort "Get Ledger Supply timed out" } + eof { ::AlgorandGoal::Abort "Get Ledger Supply EOF" } -re {Round: (\d+)} {set ROUND $expect_out(1,string); exp_continue } -re {Total Money: (\d+)} {set TOTAL_MONEY $expect_out(1,string); exp_continue } -re {Online Money: (\d+)} {set ONLINE_MONEY $expect_out(1,string) } @@ -841,6 +874,7 @@ proc ::AlgorandGoal::CreateOneOfTwoMultisigForWallet { ADDRESS_1 ADDRESS_2 WALLE spawn goal account multisig new $ADDRESS_1 $ADDRESS_2 -T 1 -d $TEST_PRIMARY_NODE_DIR -w $WALLET_NAME expect { timeout { ::AlgorandGoal::Abort "Timed out creating a multisig account from $ADDRESS_1 and $ADDRESS_2" } + eof { ::AlgorandGoal::Abort "EOF creating a multisig account from $ADDRESS_1 and $ADDRESS_2" } "Please enter the password for wallet*" { send "$WALLET_PASSWORD\r" } -re {Created new account with address ([a-zA-Z0-9]+)} { set MULTISIG_ADDRESS $expect_out(1,string); @@ -859,6 +893,7 @@ proc ::AlgorandGoal::VerifyMultisigInfoForOneOfTwoMultisig { MULTISIG_ADDRESS AD spawn goal account multisig info --address $MULTISIG_ADDRESS -d $TEST_PRIMARY_NODE_DIR -w $WALLET_NAME expect { timeout { ::AlgorandGoal::Abort "Timed out querying info about multisig account $MULTISIG_ADDRESS" } + eof { ::AlgorandGoal::Abort "EOF querying info about multisig account $MULTISIG_ADDRESS" } -re {Version: (\d+)\s+Threshold: (\d+)\s+Public keys:\s+([a-zA-Z0-9]+)\s+([a-zA-Z0-9]+)\s+} { set VERSION $expect_out(1,string); set THRESHOLD $expect_out(2,string); @@ -879,7 +914,14 @@ proc ::AlgorandGoal::VerifyMultisigInfoForOneOfTwoMultisig { MULTISIG_ADDRESS AD proc ::AlgorandGoal::DeleteMultisigAccount { MULTISIG_ADDRESS TEST_PRIMARY_NODE_DIR } { if { [ catch { spawn goal account multisig delete --address $MULTISIG_ADDRESS -d $TEST_PRIMARY_NODE_DIR - expect {*} + expect eof + lassign [wait] PID SPAWNID OS_CODE ERR_CODE + if {$OS_CODE == -1} { + ::AlgorandGoal::Abort "DeleteMultisigAccount failed: OS error code: $ERR_CODE" + } + if {$ERR_CODE != 0} { + ::AlgorandGoal::Abort "DeleteMultisigAccount failed with: exit code: $ERR_CODE" + } } EXCEPTION ] } { ::AlgorandGoal::Abort "ERROR in DeleteMultisigAccount: $EXCEPTION" } @@ -1046,6 +1088,7 @@ proc ::AlgorandGoal::Report { TEST_PRIMARY_NODE_DIR } { spawn goal report -d $TEST_PRIMARY_NODE_DIR expect { timeout { ::AlgorandGoal::Abort "goal report timed out" } + eof { ::AlgorandGoal::Abort "goal report EOF" } "source code available at https://github.com/algorand/go-algorand" {puts "goal -v ok"} -re {Genesis ID from genesis.json: *} {puts "genesis ID from genesis.json ok"} -re {Last committed block: (\d+)} {puts "status check ok"} @@ -1318,8 +1361,13 @@ proc ::AlgorandGoal::CheckEOF { { ERROR_STRING "" } } { proc ::AlgorandGoal::InspectTransactionFile { TRX_FILE } { puts "\n Inspect $TRX_FILE" spawn goal clerk inspect $TRX_FILE - expect { - eof + expect eof + lassign [wait] PID SPAWNID OS_CODE ERR_CODE + if {$OS_CODE == -1} { + ::AlgorandGoal::Abort "InspectTransactionFile failed: OS error code: $ERR_CODE" + } + if {$ERR_CODE != 0} { + ::AlgorandGoal::Abort "InspectTransactionFile failed with: exit code: $ERR_CODE" } } @@ -1348,4 +1396,3 @@ proc ::AlgorandGoal::RunPingpong {DURATION PINGPONG_OPTIONS TEST_PRIMARY_NODE_DI ::AlgorandGoal::Abort "ERROR in RunPingpong: $EXCEPTION" } } - diff --git a/test/e2e-go/cli/goal/expect/goalTxValidityTest.exp b/test/e2e-go/cli/goal/expect/goalTxValidityTest.exp index 37acb80ce3..8909693fed 100644 --- a/test/e2e-go/cli/goal/expect/goalTxValidityTest.exp +++ b/test/e2e-go/cli/goal/expect/goalTxValidityTest.exp @@ -32,6 +32,7 @@ proc TestLastValidInTx { CMD TX_FILE EXPECTED_LAST_VALID } { spawn goal clerk inspect $TX_FILE expect { timeout { ::AlgorandGoal::Abort "'goal clerk inspect' timed out" } + eof { ::AlgorandGoal::Abort "'goal clerk inspect' eof" } -re {"lv": (\d+)} {set PASSED 1; set LAST_VALID $expect_out(1,string); close } } diff --git a/test/e2e-go/cli/goal/expect/limitOrderTest.exp b/test/e2e-go/cli/goal/expect/limitOrderTest.exp index 26d899f69d..a69a4275d6 100644 --- a/test/e2e-go/cli/goal/expect/limitOrderTest.exp +++ b/test/e2e-go/cli/goal/expect/limitOrderTest.exp @@ -91,10 +91,7 @@ if { [catch { set UNIT_NAME "duckcoin" ::AlgorandGoal::AssetCreate $ACCOUNT_1_ADDRESS $WALLET_1_NAME $WALLET_1_PASSWORD $TOTAL_SUPPLY 0 "" $UNIT_NAME $TEST_PRIMARY_NODE_DIR - # wait about 4 rounds - set ASSET_CREATE_WAIT 20 - puts "Wait $ASSET_CREATE_WAIT for asset creation" - exec sleep $ASSET_CREATE_WAIT + # no extra waiting here since AssetCreate waits for confirmation # get asset id set ASSET_ID [::AlgorandGoal::AssetLookup $ACCOUNT_1_ADDRESS $UNIT_NAME $TEST_PRIMARY_NODE_DIR] @@ -124,7 +121,8 @@ if { [catch { puts "Generated Teal Source:" spawn cat $TEAL_SOURCE expect { - -re {^.+$} { close } + #nolint:eof not asserting expected output + -re {^.+$} { close } } # compile teal assembly to bytecode @@ -146,10 +144,10 @@ if { [catch { # the second payment sends money (the asset) from the Bob to the Alice set ZERO_ADDRESS "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ" set LIMIT_TXN_1 "$TEST_ROOT_DIR/limit1.tx" - ::AlgorandGoal::TealTxnCreate $TEAL_SOURCE $ACCOUNT_1_ADDRESS $ZERO_ADDRESS 20000 $TEST_PRIMARY_NODE_DIR $LIMIT_TXN_1 + ::AlgorandGoal::TealTxnCreate $TEAL_SOURCE $ACCOUNT_1_ADDRESS $ZERO_ADDRESS 20000 $TEST_PRIMARY_NODE_DIR $LIMIT_TXN_1 set LIMIT_TXN_2 "$TEST_ROOT_DIR/limit2.tx" - ::AlgorandGoal::CreateAssetTransfer $ACCOUNT_1_ADDRESS $ACCOUNT_2_ADDRESS $ASSET_ID 30000 $TEST_PRIMARY_NODE_DIR $LIMIT_TXN_2 + ::AlgorandGoal::CreateAssetTransfer $ACCOUNT_1_ADDRESS $ACCOUNT_2_ADDRESS $ASSET_ID 30000 $TEST_PRIMARY_NODE_DIR $LIMIT_TXN_2 set LIMIT_CMB "$TEST_ROOT_DIR/limitcmb.tx" exec cat $LIMIT_TXN_1 $LIMIT_TXN_2 > $LIMIT_CMB diff --git a/test/e2e-go/cli/goal/expect/tealAndStatefulTealTest.exp b/test/e2e-go/cli/goal/expect/tealAndStatefulTealTest.exp index a32e2b4011..b4c6e817e1 100644 --- a/test/e2e-go/cli/goal/expect/tealAndStatefulTealTest.exp +++ b/test/e2e-go/cli/goal/expect/tealAndStatefulTealTest.exp @@ -141,9 +141,7 @@ if { [catch { ::AlgorandGoal::SplitGroup groupedtransactions.tx split.tx puts "sign the split transaction" - set RAW_TX_1 split-0.tx - set RAW_STX_1 signout-0.tx - ::AlgorandGoal::SignTransaction $WALLET_1_NAME $WALLET_1_PASSWORD $RAW_TX_1 $RAW_STX_1 $TEST_PRIMARY_NODE_DIR + ::AlgorandGoal::SignTransaction $WALLET_1_NAME $WALLET_1_PASSWORD split-0.tx signout-0.tx $TEST_PRIMARY_NODE_DIR puts "\ncombine into the sign out transaction" exec cat signout-0.tx split-1.tx > signout.tx diff --git a/test/e2e-go/cli/goal/expect/tealConsensusTest.exp b/test/e2e-go/cli/goal/expect/tealConsensusTest.exp index cd2b81cfd6..a4231acd96 100644 --- a/test/e2e-go/cli/goal/expect/tealConsensusTest.exp +++ b/test/e2e-go/cli/goal/expect/tealConsensusTest.exp @@ -50,6 +50,7 @@ if { [catch { spawn goal clerk compile "$TEST_ROOT_DIR/small-sig.teal" expect { -re {[A-Z2-9]{58}} { set SMALL_SIG $expect_out(0,string) } + eof { ::AlgorandGoal::Abort $expect_out(buffer) } "\n" { ::AlgorandGoal::Abort $expect_out(buffer) } } @@ -58,6 +59,7 @@ if { [catch { expect { -re {[A-Z2-9]{58}} { ::AlgorandGoal::Abort "hash" } -re {.*logicsig program size too large} { puts "bigsigcheck: pass" } + eof { ::AlgorandGoal::Abort $expect_out(buffer) } "\n" { ::AlgorandGoal::Abort $expect_out(buffer) } } @@ -65,6 +67,7 @@ if { [catch { spawn goal clerk compile "$TEST_ROOT_DIR/barely-fits-app.teal" expect { -re {[A-Z2-9]{58}} { puts "hash $expect_out(0,string)" } + eof { ::AlgorandGoal::Abort $expect_out(buffer) } "\n" { ::AlgorandGoal::Abort $expect_out(buffer) } } @@ -74,6 +77,7 @@ if { [catch { expect { -re {[A-Z2-9]{58}} { ::AlgorandGoal::Abort "hash" } -re {.*app program size too large} { puts "bigappcheck: pass" } + eof { ::AlgorandGoal::Abort $expect_out(buffer) } "\n" { ::AlgorandGoal::Abort $expect_out(buffer) } } @@ -84,9 +88,10 @@ if { [catch { " - pass -" { puts "small-sig dryrun pass" } "REJECT" { ::AlgorandGoal::Abort $expect_out(buffer) } "static cost budget" { ::AlgorandGoal::Abort $expect_out(buffer) } + eof { ::AlgorandGoal::Abort $expect_out(buffer) } } - teal "$TEST_ROOT_DIR/slow-sig.teal" 2 1 20001 + teal "$TEST_ROOT_DIR/slow-sig.teal" 4 1 20001 exec goal clerk compile "$TEST_ROOT_DIR/slow-sig.teal" exec goal clerk send -F "$TEST_ROOT_DIR/slow-sig.teal" -t GXBNLU4AXQABPLHXJDMTG2YXSDT4EWUZACT7KTPFXDQW52XPTIUS5OZ5HQ -a 100 -d $TEST_PRIMARY_NODE_DIR -o $TEST_ROOT_DIR/slow-sig.tx spawn goal clerk dryrun -P future -t $TEST_ROOT_DIR/slow-sig.tx # Should succeed Check, fail Eval @@ -94,6 +99,7 @@ if { [catch { "dynamic cost budget" { puts "slow-sig dryrun pass" } " - pass -" { ::AlgorandGoal::Abort $expect_out(buffer) } "REJECT" { ::AlgorandGoal::Abort $expect_out(buffer) } + eof { ::AlgorandGoal::Abort $expect_out(buffer) } } # Shutdown the network diff --git a/test/e2e-go/cli/goal/expect/testInfraTest.exp b/test/e2e-go/cli/goal/expect/testInfraTest.exp index b5785e2807..e4172a89f6 100644 --- a/test/e2e-go/cli/goal/expect/testInfraTest.exp +++ b/test/e2e-go/cli/goal/expect/testInfraTest.exp @@ -58,6 +58,7 @@ proc checkProcessReturnedCodeTest {} { 44 { close } + eof { puts "expected output not 44"; exit 1 } } lassign [::AlgorandGoal::CheckProcessReturnedCode 0] response OS_CODE ERR_CODE KILLED KILL_SIGNAL EXP if {$response != 0} { @@ -72,6 +73,7 @@ proc checkProcessReturnedCodeTest {} { 44 { puts "not closing" } + eof { puts "expected output not 44"; exit 1 } } lassign [::AlgorandGoal::CheckProcessReturnedCode 0] response OS_CODE ERR_CODE KILLED KILL_SIGNAL EXP if {$KILLED != "CHILDKILLED" || $KILL_SIGNAL != "SIGSEGV" || $EXP != "segmentation violation"} { From 37a9a18838a6521f86ec3578d52a1e694f846f3c Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Mon, 9 Sep 2024 12:05:30 -0400 Subject: [PATCH 73/82] AVM: Derive looser, but more principled, checks of txn max size (#6114) Co-authored-by: Jason Paulos --- node/node_test.go | 43 +++++++++++++++++++++++++++++++++++++++++-- protocol/tags.go | 21 +++++++++++++++++---- protocol/tags_test.go | 4 ++++ 3 files changed, 62 insertions(+), 6 deletions(-) diff --git a/node/node_test.go b/node/node_test.go index 6b991751cb..19463177df 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -33,9 +33,11 @@ import ( "github.com/algorand/go-algorand/agreement" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" + csp "github.com/algorand/go-algorand/crypto/stateproof" "github.com/algorand/go-algorand/data/account" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/stateproofmsg" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/network" @@ -805,11 +807,48 @@ func TestMaxSizesCorrect(t *testing.T) { require.Equal(t, ppSize, protocol.ProposalPayloadTag.MaxMessageSize()) spSize := uint64(stateproof.SigFromAddrMaxSize()) require.Equal(t, spSize, protocol.StateProofSigTag.MaxMessageSize()) - txSize := uint64(transactions.SignedTxnMaxSize()) - require.Equal(t, txSize, protocol.TxnTag.MaxMessageSize()) msSize := uint64(crypto.DigestMaxSize()) require.Equal(t, msSize, protocol.MsgDigestSkipTag.MaxMessageSize()) + // We want to check that the TxnTag's max size is big enough, but it is + // foolish to try to be exact here. We will confirm that it is bigger that + // a stateproof txn (the biggest kind, which can only appear by itself), and + // that it is bigger than 16 times the largest transaction other than + // stateproof txn. + txTagMax := protocol.TxnTag.MaxMessageSize() + + // SignedTxnMaxSize() is an overestimate of a single transaction because it + // includes fields from all the different types of signatures, and types of + // transactions. First, we remove the aspects of the overestimate that come + // from the multiple signature types. + maxCombinedTxnSize := uint64(transactions.SignedTxnMaxSize()) + // subtract out the two smaller signature sizes (logicsig is biggest, it can *contain* the others) + maxCombinedTxnSize -= uint64(crypto.SignatureMaxSize() + crypto.MultisigSigMaxSize()) + // the logicsig size is *also* an overestimate, because it thinks each + // logicsig arg can be big, but really the sum of the args and the program + // has a max size. + maxCombinedTxnSize -= uint64(transactions.EvalMaxArgs * config.MaxLogicSigMaxSize) + + // maxCombinedTxnSize is still an overestimate because it assumes all txn + // type fields can be in the same txn. That's not true, but it provides an + // upper bound on the size of ONE transaction, even if the txn is a + // stateproof, which is big. Ensure our constant is big enough to hold one. + require.Greater(t, txTagMax, maxCombinedTxnSize) + + // we actually have to hold 16 txns, but in the case of multiple txns in a + // group, none can be stateproofs. So derive maxMinusSP, which is a per txn + // size estimate that excludes stateproof fields. + spTxnSize := uint64(csp.StateProofMaxSize() + stateproofmsg.MessageMaxSize()) + maxMinusSP := maxCombinedTxnSize - spTxnSize + require.Greater(t, txTagMax, 16*maxMinusSP) + // when we do logisig pooling, 16*maxMinusSP may be a large overshoot, since + // it will assume we can have a big logicsig in _each_ of the 16. It + // probably won't matter, since stateproof will still swamp it. But if so, + // remove 15 * MaxLogicSigMaxSize. + + // but we're not crazy. whichever of those is bigger - we don't need to be twice as big as that + require.Less(t, txTagMax, 2*max(maxCombinedTxnSize, 16*maxMinusSP)) + // UE is a handrolled message not using msgp // including here for completeness ensured by protocol.TestMaxSizesTested ueSize := uint64(67) diff --git a/protocol/tags.go b/protocol/tags.go index 6cfcacd714..cdae9c6cdc 100644 --- a/protocol/tags.go +++ b/protocol/tags.go @@ -84,10 +84,23 @@ const StateProofSigTagMaxSize = 6378 // Matches current network.MaxMessageLength const TopicMsgRespTagMaxSize = 6 * 1024 * 1024 -// TxnTagMaxSize is the maximum size of a TxnTag message. This is equal to SignedTxnMaxSize() -// which is size of just a single message containing maximum Stateproof. Since Stateproof -// transactions can't be batched we don't need to multiply by MaxTxnBatchSize. -const TxnTagMaxSize = 4620031 +// TxnTagMaxSize is the maximum size of a TxnTag message. The TxnTag is used to +// send entire transaction groups. So, naively, we might set it to the maximum +// group size times the maximum transaction size (plus a little bit for msgpack +// encoding). But there are several reasons not to do that. First, the +// function we have for estimating max transaction size +// (transactions.SignedTxnMaxSize())) wildly overestimates the maximum +// transaction size because it is generated code that assumes _every_ +// transaction field can be set, but each transaction type has mutually +// exclusive fields. Second, the stateproof transaction is the biggest +// transaction by far, but it can only appear as a singleton, so it would not +// make sense to multiply it by 16. Finally, we're going to pool logicsig code +// size, so while it's true that one transaction in a group could have a 16k +// logicsig, that would only be true if the other transactions had 0 bytes of +// logicsig. So we will use a bound that is a bit bigger that a txn group can +// be, but avoid trying to be precise. See TestMaxSizesCorrect for the detailed +// reasoning. +const TxnTagMaxSize = 5_000_000 // UniEnsBlockReqTagMaxSize is the maximum size of a UniEnsBlockReqTag message const UniEnsBlockReqTagMaxSize = 67 diff --git a/protocol/tags_test.go b/protocol/tags_test.go index 137bf4e3f7..69c3146cf9 100644 --- a/protocol/tags_test.go +++ b/protocol/tags_test.go @@ -169,6 +169,10 @@ func TestMaxSizesTested(t *testing.T) { } for _, tag := range constTags { + if tag == "TxnTag" { + // TxnTag is tested in a looser way in TestMaxSizesCorrect + continue + } require.Truef(t, tagsFound[tag], "Tag %s does not have a corresponding test in TestMaxSizesCorrect", tag) } } From 1d08955152091af7fe9e02250b0bc90b929dcda2 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Tue, 10 Sep 2024 12:07:12 -0400 Subject: [PATCH 74/82] p2p: make sure p2p http server runs on all interfaces (#6123) --- network/p2p/http.go | 10 ++++++++++ network/p2p/p2p.go | 3 ++- network/p2pNetwork.go | 1 - network/p2pNetwork_test.go | 38 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 50 insertions(+), 2 deletions(-) diff --git a/network/p2p/http.go b/network/p2p/http.go index 633a13713d..f11b5375ab 100644 --- a/network/p2p/http.go +++ b/network/p2p/http.go @@ -28,6 +28,7 @@ import ( "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" libp2phttp "github.com/libp2p/go-libp2p/p2p/http" + "github.com/multiformats/go-multiaddr" ) // algorandP2pHTTPProtocol defines a libp2p protocol name for algorand's http over p2p messages @@ -46,6 +47,15 @@ func MakeHTTPServer(streamHost host.Host) *HTTPServer { Host: libp2phttp.Host{StreamHost: streamHost}, p2phttpMux: mux.NewRouter(), } + // libp2phttp server requires either explicit ListenAddrs or streamHost.Addrs() to be non-empty. + // If streamHost.Addrs() is empty, we will listen on all interfaces + if len(streamHost.Addrs()) == 0 { + logging.Base().Debugf("MakeHTTPServer: no addresses for %s, asking to listen all interfaces", streamHost.ID()) + httpServer.ListenAddrs = []multiaddr.Multiaddr{ + multiaddr.StringCast("/ip4/0.0.0.0/tcp/0/http"), + } + httpServer.InsecureAllowHTTP = true + } return &httpServer } diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index 4ddda54157..f281d1d13b 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -39,6 +39,7 @@ import ( "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" + basichost "github.com/libp2p/go-libp2p/p2p/host/basic" rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" "github.com/libp2p/go-libp2p/p2p/muxer/yamux" "github.com/libp2p/go-libp2p/p2p/security/noise" @@ -277,7 +278,7 @@ func (s *serviceImpl) dialNode(ctx context.Context, peer *peer.AddrInfo) error { func (s *serviceImpl) AddrInfo() peer.AddrInfo { return peer.AddrInfo{ ID: s.host.ID(), - Addrs: s.host.Addrs(), + Addrs: s.host.(*basichost.BasicHost).AllAddrs(), // fetch all addresses, including private ones } } diff --git a/network/p2pNetwork.go b/network/p2pNetwork.go index f88660b653..aa991d0429 100644 --- a/network/p2pNetwork.go +++ b/network/p2pNetwork.go @@ -513,7 +513,6 @@ func (n *P2PNetwork) Address() (string, bool) { for _, addr := range addrs { if !manet.IsIPLoopback(addr) && !manet.IsIPUnspecified(addr) { return addr.String(), true - } } // We don't have a non loopback address, so just return the first one if it contains an ip4 address or port diff --git a/network/p2pNetwork_test.go b/network/p2pNetwork_test.go index dcc641c350..d18196c4c4 100644 --- a/network/p2pNetwork_test.go +++ b/network/p2pNetwork_test.go @@ -789,6 +789,44 @@ func TestP2PHTTPHandler(t *testing.T) { require.ErrorIs(t, err, limitcaller.ErrConnectionQueueingTimeout) } +// TestP2PHTTPHandlerAllInterfaces makes sure HTTP server runs even if NetAddress is set to a non-routable address +func TestP2PHTTPHandlerAllInterfaces(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + cfg := config.GetDefaultLocal() + cfg.EnableDHTProviders = false + cfg.GossipFanout = 1 + cfg.NetAddress = ":0" + log := logging.TestingLog(t) + + netA, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + require.NoError(t, err) + + h := &p2phttpHandler{t, "hello", nil} + netA.RegisterHTTPHandler("/test", h) + + netA.Start() + defer netA.Stop() + + peerInfoA := netA.service.AddrInfo() + addrsB, err := peer.AddrInfoToP2pAddrs(&peerInfoA) + require.NoError(t, err) + require.NotZero(t, addrsB[0]) + + t.Logf("peerInfoB: %s", peerInfoA) + httpClient, err := p2p.MakeHTTPClient(&peerInfoA) + require.NoError(t, err) + resp, err := httpClient.Get("/test") + require.NoError(t, err) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, "hello", string(body)) + +} + // TestP2PRelay checks p2p nodes can properly relay messages: // netA and netB are started with ForceFetchTransactions so it subscribes to the txn topic, // both of them are connected and do not relay messages. From 3b3b5ce00dd3434cb2c692d102ff43eb11be8155 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Wed, 11 Sep 2024 10:48:34 -0400 Subject: [PATCH 75/82] scripts: allow metrics filtering by labels in metrics_viz.py (#6125) --- test/heapwatch/heapWatch.py | 56 ++++++++++++++++++++++++++++++---- test/heapwatch/metrics_aggs.py | 13 ++------ test/heapwatch/metrics_lib.py | 30 +++++++++++++++--- test/heapwatch/metrics_viz.py | 13 ++++++-- 4 files changed, 88 insertions(+), 24 deletions(-) diff --git a/test/heapwatch/heapWatch.py b/test/heapwatch/heapWatch.py index e62cc7ab80..f242ecd06b 100644 --- a/test/heapwatch/heapWatch.py +++ b/test/heapwatch/heapWatch.py @@ -122,6 +122,9 @@ def __init__(self, path, net=None, token=None, admin_token=None): self._algod = None self.timeout = 15 + def __repr__(self): + return ''.format(self.path) + def pid(self): if self._pid is None: if not self.isdir: @@ -159,6 +162,32 @@ def get_pprof_snapshot(self, name, snapshot_name=None, outdir=None, timeout=None logger.debug('%s -> %s', self.nick, outpath) return outpath + def get_debug_settings_pprof(self): + timeout = self.timeout + url = 'http://' + self.net + '/debug/settings/pprof' + headers = self.headers.copy() + headers['X-Algo-API-Token'] = self.admin_token + try: + response = urllib.request.urlopen(urllib.request.Request(url, headers=headers), timeout=timeout) + except Exception as e: + logger.error('could not fetch %s from %s via %r (%s)', '/debug/settings/pprof', self.path, url, e) + return + blob = response.read() + return json.loads(blob) + + def set_debug_settings_pprof(self, settings): + timeout = self.timeout + url = 'http://' + self.net + '/debug/settings/pprof' + headers = self.headers.copy() + headers['X-Algo-API-Token'] = self.admin_token + data = json.dumps(settings).encode() + try: + response = urllib.request.urlopen(urllib.request.Request(url, data=data, headers=headers, method='PUT'), timeout=timeout) + except Exception as e: + logger.error('could not put %s to %s via %r (%s)', settings, self.path, url, e) + return + response.close() + def get_heap_snapshot(self, snapshot_name=None, outdir=None): return self.get_pprof_snapshot('heap', snapshot_name, outdir) @@ -355,6 +384,27 @@ def do_snap(self, now, get_cpu=False, fraction=False): rss, vsz = rssvsz with open(os.path.join(self.args.out, nick + '.heap.csv'), 'at') as fout: fout.write('{},{},{},{}\n'.format(snapshot_name,snapshot_isotime,rss, vsz)) + if self.args.mutex or self.args.block: + # get mutex/blocking profiles state and enable as needed + for ad in self.they: + settings = ad.get_debug_settings_pprof() + if not settings: + # failed to get settings, probably disabled + continue + updated = False + if self.args.mutex: + mrate = settings.get('mutex-rate', 0) + if mrate == 0: + settings['mutex-rate'] = 5 # 1/5 of events recorded + updated = True + if self.args.block: + brate = settings.get('block-rate', 0) + if brate == 0: + settings['block-rate'] = 100 # one blocking event per 100 nanoseconds spent blocked. + updated = True + if updated: + logger.debug('enabling mutex/blocking profiles on %s', ad.path) + ad.set_debug_settings_pprof(settings) if self.args.goroutine: for ad in self.they: ad.get_goroutine_snapshot(snapshot_name, outdir=self.args.out) @@ -466,12 +516,6 @@ def main(): else: logging.basicConfig(level=logging.INFO) - if args.block: - print('Ensure algod is compiled with `runtime.SetBlockProfileRate()` set') - - if args.mutex: - print('Ensure algod is compiled with `runtime.SetMutexProfileFraction()` set') - for nre in args.tf_name_re: try: # do re.compile just to check diff --git a/test/heapwatch/metrics_aggs.py b/test/heapwatch/metrics_aggs.py index d20593c097..33379766a6 100644 --- a/test/heapwatch/metrics_aggs.py +++ b/test/heapwatch/metrics_aggs.py @@ -33,7 +33,7 @@ from plotly.subplots import make_subplots -from metrics_lib import Metric, MetricType, parse_metrics, gather_metrics_files_by_nick +from metrics_lib import Metric, MetricType, parse_metrics, gather_metrics_files_by_nick, parse_tags logger = logging.getLogger(__name__) @@ -62,14 +62,7 @@ def main(): else: logging.basicConfig(level=logging.INFO) - tags = {} - if args.tags: - for tag in args.tags: - if '=' not in tag: - raise (f'Invalid tag: {tag}') - k, v = tag.split('=', 1) - tags[k] = v - tag_keys = set(tags.keys()) + tags, tag_keys = parse_tags(args.tags) metrics_files = sorted(glob.glob(os.path.join(args.dir, '*.metrics'))) metrics_files.extend(glob.glob(os.path.join(args.dir, 'terraform-inventory.host'))) @@ -119,7 +112,7 @@ def main(): for metric in metrics_seq: if metric.type != MetricType.COUNTER: raise RuntimeError('Only COUNT metrics are supported') - if tags is None or tags is not None and metric.has_tags(tag_keys, tags): + if tags is None or tags is not None and metric.has_tags(tags, tag_keys): raw_value += metric.value full_name = metric.string(set(tag_keys).union({'n'})) diff --git a/test/heapwatch/metrics_lib.py b/test/heapwatch/metrics_lib.py index 5fc7b36075..1ac0d85bd7 100644 --- a/test/heapwatch/metrics_lib.py +++ b/test/heapwatch/metrics_lib.py @@ -26,7 +26,7 @@ import os import re import sys -from typing import Dict, Iterable, List, Optional, Tuple, Union +from typing import Dict, Iterable, List, Optional, Set, Tuple, Union from urllib.parse import urlparse @@ -210,14 +210,15 @@ def add_tag(self, key: str, value: str): self.tags[key] = value self.tag_keys.add(key) - def has_tags(self, tag_keys: set, tags: Dict[str, str]): + def has_tags(self, tags: Dict[str, Tuple[str, ...]], tag_keys: Set[str] | None) -> bool: """return True if all tags are present in the metric tags tag_keys are not strictly needed but used as an optimization """ - if self.tag_keys.intersection(tag_keys) != tag_keys: + if tag_keys is not None and self.tag_keys.intersection(tag_keys) != tag_keys: return False - for k, v in tags.items(): - if self.tags.get(k) != v: + for k, vals in tags.items(): + v = self.tags.get(k) + if v not in vals: return False return True @@ -270,3 +271,22 @@ def parse_metrics( out = [{name: metric}] return out + +def parse_tags(tag_pairs: List[str]) -> Tuple[Dict[str, Tuple[str, ...]], Set[str]]: + tags = {} + keys = set() + if not tag_pairs: + return tags, keys + + for tag in tag_pairs: + if '=' not in tag: + raise ValueError(f'Invalid tag: {tag}') + k, v = tag.split('=', 1) + val = tags.get(k) + if val is None: + tags[k] = (v,) + else: + tags[k] = val + (v,) + keys.add(k) + + return tags, keys \ No newline at end of file diff --git a/test/heapwatch/metrics_viz.py b/test/heapwatch/metrics_viz.py index 741aa2dd73..840d109ce8 100644 --- a/test/heapwatch/metrics_viz.py +++ b/test/heapwatch/metrics_viz.py @@ -23,7 +23,7 @@ import plotly.graph_objs as go from plotly.subplots import make_subplots -from metrics_lib import MetricType, parse_metrics, gather_metrics_files_by_nick +from metrics_lib import MetricType, parse_metrics, gather_metrics_files_by_nick, parse_tags logger = logging.getLogger(__name__) @@ -42,6 +42,7 @@ def main(): ap.add_argument('--nick-lre', action='append', default=[], help='label:regexp to filter node names, may be repeated') ap.add_argument('-s', '--save', type=str, choices=['png', 'html'], help=f'save plot to \'{default_img_filename}\' or \'{default_html_filename}\' file instead of showing it') ap.add_argument('--diff', action='store_true', default=None, help='diff two gauge metrics instead of plotting their values. Requires two metrics names to be set') + ap.add_argument('-t', '--tags', action='append', default=[], help='tag/label pairs in a=b format to aggregate by, may be repeated. Empty means aggregation by metric name') ap.add_argument('--verbose', default=False, action='store_true') args = ap.parse_args() @@ -54,6 +55,8 @@ def main(): logging.error('need at least one dir set with -d/--dir') return 1 + tags, tag_keys = parse_tags(args.tags) + metrics_files = sorted(glob.glob(os.path.join(args.dir, '*.metrics'))) metrics_files.extend(glob.glob(os.path.join(args.dir, 'terraform-inventory.host'))) filesByNick = gather_metrics_files_by_nick(metrics_files, args.nick_re, args.nick_lre) @@ -100,6 +103,9 @@ def main(): for metric in metrics_seq: raw_value = metric.value + if tags and not metric.has_tags(tags, tag_keys): + continue + full_name = metric.string() if full_name not in data: # handle gaps in data, sometimes metric file might miss a value @@ -122,8 +128,9 @@ def main(): active_metric_names.append(full_name) - active_metric_names.sort() - active_metrics[metric_name] = active_metric_names + if active_metric_names: + active_metric_names.sort() + active_metrics[metric_name] = active_metric_names idx += 1 for i, metric_pair in enumerate(sorted(active_metrics.items())): From 90353e5aa35317da83d679c3576bb64545fe4c50 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Fri, 13 Sep 2024 13:27:22 -0400 Subject: [PATCH 76/82] config: ensure both ws and p2p net running the same mode in hybrid (#6130) --- config/config_test.go | 36 +++++++++++++++++++++++++++++++++++ config/localTemplate.go | 11 +++++++++++ network/hybridNetwork.go | 4 ++++ network/hybridNetwork_test.go | 18 ++++++++++++++++++ 4 files changed, 69 insertions(+) diff --git a/config/config_test.go b/config/config_test.go index 1b1c4c2753..936622b06c 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -751,6 +751,42 @@ func TestLocal_RecalculateConnectionLimits(t *testing.T) { } } +func TestLocal_ValidateP2PHybridConfig(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + var tests = []struct { + enableP2PHybridMode bool + p2pHybridNetAddress string + netAddress string + err bool + }{ + {false, "", "", false}, + {false, ":0", "", false}, + {false, "", ":0", false}, + {false, ":0", ":0", false}, + {true, "", "", false}, + {true, ":0", "", true}, + {true, "", ":0", true}, + {true, ":0", ":0", false}, + } + + for i, test := range tests { + test := test + t.Run(fmt.Sprintf("test=%d", i), func(t *testing.T) { + t.Parallel() + + c := Local{ + EnableP2PHybridMode: test.enableP2PHybridMode, + P2PHybridNetAddress: test.p2pHybridNetAddress, + NetAddress: test.netAddress, + } + err := c.ValidateP2PHybridConfig() + require.Equal(t, test.err, err != nil, "test=%d", i) + }) + } +} + // Tests that ensureAbsGenesisDir resolves a path to an absolute path, appends the genesis directory, and creates any needed directories func TestEnsureAbsDir(t *testing.T) { partitiontest.PartitionTest(t) diff --git a/config/localTemplate.go b/config/localTemplate.go index c03461c371..ca707be16f 100644 --- a/config/localTemplate.go +++ b/config/localTemplate.go @@ -17,6 +17,7 @@ package config import ( + "errors" "fmt" "os" "path/filepath" @@ -764,6 +765,16 @@ func (cfg Local) IsHybridServer() bool { return cfg.NetAddress != "" && cfg.P2PHybridNetAddress != "" && cfg.EnableP2PHybridMode } +// ValidateP2PHybridConfig checks if both NetAddress and P2PHybridNetAddress are set or unset in hybrid mode. +func (cfg Local) ValidateP2PHybridConfig() error { + if cfg.EnableP2PHybridMode { + if cfg.NetAddress == "" && cfg.P2PHybridNetAddress != "" || cfg.NetAddress != "" && cfg.P2PHybridNetAddress == "" { + return errors.New("both NetAddress and P2PHybridNetAddress must be set or unset") + } + } + return nil +} + // ensureAbsGenesisDir will convert a path to absolute, and will attempt to make a genesis directory there func ensureAbsGenesisDir(path string, genesisID string) (string, error) { pathAbs, err := filepath.Abs(path) diff --git a/network/hybridNetwork.go b/network/hybridNetwork.go index 85621260a9..e64b708a2f 100644 --- a/network/hybridNetwork.go +++ b/network/hybridNetwork.go @@ -38,7 +38,11 @@ type HybridP2PNetwork struct { } // NewHybridP2PNetwork constructs a GossipNode that combines P2PNetwork and WebsocketNetwork +// Hybrid mode requires both P2P and WS to be running in server (NetAddress set) or client (NetAddress empty) mode. func NewHybridP2PNetwork(log logging.Logger, cfg config.Local, datadir string, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID, nodeInfo NodeInfo) (*HybridP2PNetwork, error) { + if err := cfg.ValidateP2PHybridConfig(); err != nil { + return nil, err + } // supply alternate NetAddress for P2P network p2pcfg := cfg p2pcfg.NetAddress = cfg.P2PHybridNetAddress diff --git a/network/hybridNetwork_test.go b/network/hybridNetwork_test.go index 4e1392a2d0..3fac0cefd0 100644 --- a/network/hybridNetwork_test.go +++ b/network/hybridNetwork_test.go @@ -181,3 +181,21 @@ func TestHybridNetwork_DuplicateConn(t *testing.T) { return len(netA.GetPeers(PeersConnectedIn)) == 2 }, 3*time.Second, 50*time.Millisecond) } + +func TestHybridNetwork_ValidateConfig(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + cfg := config.GetDefaultLocal() + cfg.EnableP2PHybridMode = true + cfg.NetAddress = ":0" + cfg.P2PHybridNetAddress = "" + + _, err := NewHybridP2PNetwork(logging.TestingLog(t), cfg, "", nil, genesisID, "net", &nopeNodeInfo{}) + require.ErrorContains(t, err, "both NetAddress and P2PHybridNetAddress") + + cfg.NetAddress = "" + cfg.P2PHybridNetAddress = ":0" + _, err = NewHybridP2PNetwork(logging.TestingLog(t), cfg, "", nil, genesisID, "net", &nopeNodeInfo{}) + require.ErrorContains(t, err, "both NetAddress and P2PHybridNetAddress") +} From 2b6e018938d362609ab6d98390a50801f3c27fde Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Fri, 13 Sep 2024 14:14:35 -0400 Subject: [PATCH 77/82] node: clear new p2p net handlers on fast catchup (#6127) --- data/txHandler.go | 10 +------- network/gossipNode.go | 4 ++-- network/hybridNetwork.go | 8 +++---- network/p2p/http.go | 9 +++++--- network/p2pNetwork.go | 4 ++-- network/p2pNetwork_test.go | 2 +- network/wsNetwork.go | 4 ++-- node/node.go | 2 ++ node/node_test.go | 47 ++++++++++++++++++++++++++++++++++++-- 9 files changed, 65 insertions(+), 25 deletions(-) diff --git a/data/txHandler.go b/data/txHandler.go index 7ee5764137..ec3a84cc1f 100644 --- a/data/txHandler.go +++ b/data/txHandler.go @@ -257,15 +257,7 @@ func (handler *TxHandler) Start() { // libp2p pubsub validator and handler abstracted as TaggedMessageProcessor handler.net.RegisterValidatorHandlers([]network.TaggedMessageValidatorHandler{ - { - Tag: protocol.TxnTag, - // create anonymous struct to hold the two functions and satisfy the network.MessageProcessor interface - MessageHandler: struct { - network.ValidateHandleFunc - }{ - network.ValidateHandleFunc(handler.validateIncomingTxMessage), - }, - }, + {Tag: protocol.TxnTag, MessageHandler: network.ValidateHandleFunc(handler.validateIncomingTxMessage)}, }) handler.backlogWg.Add(2) diff --git a/network/gossipNode.go b/network/gossipNode.go index 86a7b42c55..91fb3506bc 100644 --- a/network/gossipNode.go +++ b/network/gossipNode.go @@ -86,8 +86,8 @@ type GossipNode interface { // Currently used as p2p pubsub topic validators. RegisterValidatorHandlers(dispatch []TaggedMessageValidatorHandler) - // ClearProcessors deregisters all the existing message processors. - ClearProcessors() + // ClearValidatorHandlers deregisters all the existing message processors. + ClearValidatorHandlers() // GetHTTPClient returns a http.Client with a suitable for the network Transport // that would also limit the number of outgoing connections. diff --git a/network/hybridNetwork.go b/network/hybridNetwork.go index e64b708a2f..5f31436fb8 100644 --- a/network/hybridNetwork.go +++ b/network/hybridNetwork.go @@ -203,10 +203,10 @@ func (n *HybridP2PNetwork) RegisterValidatorHandlers(dispatch []TaggedMessageVal n.wsNetwork.RegisterValidatorHandlers(dispatch) } -// ClearProcessors deregisters all the existing message processors. -func (n *HybridP2PNetwork) ClearProcessors() { - n.p2pNetwork.ClearProcessors() - n.wsNetwork.ClearProcessors() +// ClearValidatorHandlers deregisters all the existing message processors. +func (n *HybridP2PNetwork) ClearValidatorHandlers() { + n.p2pNetwork.ClearValidatorHandlers() + n.wsNetwork.ClearValidatorHandlers() } // GetHTTPClient returns a http.Client with a suitable for the network Transport diff --git a/network/p2p/http.go b/network/p2p/http.go index f11b5375ab..18497f403e 100644 --- a/network/p2p/http.go +++ b/network/p2p/http.go @@ -48,11 +48,14 @@ func MakeHTTPServer(streamHost host.Host) *HTTPServer { p2phttpMux: mux.NewRouter(), } // libp2phttp server requires either explicit ListenAddrs or streamHost.Addrs() to be non-empty. - // If streamHost.Addrs() is empty, we will listen on all interfaces + // If streamHost.Addrs() is empty (that happens when NetAddress is set to ":0" and private address filtering is automatically enabled), + // we will listen on localhost to satisfy libp2phttp.Host.Serve() requirements. + // A side effect is it actually starts listening on interfaces listed in ListenAddrs and as go-libp2p v0.33.2 + // there is no other way to have libp2phttp server running AND to have streamHost.Addrs() filtered. if len(streamHost.Addrs()) == 0 { - logging.Base().Debugf("MakeHTTPServer: no addresses for %s, asking to listen all interfaces", streamHost.ID()) + logging.Base().Debugf("MakeHTTPServer: no addresses for %s, asking to listen localhost interface to satisfy libp2phttp.Host.Serve ", streamHost.ID()) httpServer.ListenAddrs = []multiaddr.Multiaddr{ - multiaddr.StringCast("/ip4/0.0.0.0/tcp/0/http"), + multiaddr.StringCast("/ip4/127.0.0.1/tcp/0/http"), } httpServer.InsecureAllowHTTP = true } diff --git a/network/p2pNetwork.go b/network/p2pNetwork.go index aa991d0429..0968db7b77 100644 --- a/network/p2pNetwork.go +++ b/network/p2pNetwork.go @@ -706,8 +706,8 @@ func (n *P2PNetwork) RegisterValidatorHandlers(dispatch []TaggedMessageValidator n.handler.RegisterValidatorHandlers(dispatch) } -// ClearProcessors deregisters all the existing message handlers. -func (n *P2PNetwork) ClearProcessors() { +// ClearValidatorHandlers deregisters all the existing message handlers. +func (n *P2PNetwork) ClearValidatorHandlers() { n.handler.ClearValidatorHandlers([]Tag{}) } diff --git a/network/p2pNetwork_test.go b/network/p2pNetwork_test.go index d18196c4c4..e5e4400d46 100644 --- a/network/p2pNetwork_test.go +++ b/network/p2pNetwork_test.go @@ -963,7 +963,7 @@ func TestP2PRelay(t *testing.T) { counter.Store(0) var loggedMsgs [][]byte counterHandler, counterDone = makeCounterHandler(expectedMsgs, &counter, &loggedMsgs) - netA.ClearProcessors() + netA.ClearValidatorHandlers() netA.RegisterValidatorHandlers(counterHandler) for i := 0; i < expectedMsgs/2; i++ { diff --git a/network/wsNetwork.go b/network/wsNetwork.go index c67200f01b..5ab45e0406 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -825,8 +825,8 @@ func (wn *WebsocketNetwork) ClearHandlers() { func (wn *WebsocketNetwork) RegisterValidatorHandlers(dispatch []TaggedMessageValidatorHandler) { } -// ClearProcessors deregisters all the existing message handlers. -func (wn *WebsocketNetwork) ClearProcessors() { +// ClearValidatorHandlers deregisters all the existing message handlers. +func (wn *WebsocketNetwork) ClearValidatorHandlers() { } func (wn *WebsocketNetwork) setHeaders(header http.Header) { diff --git a/node/node.go b/node/node.go index 04d2ced84c..dddb3203e3 100644 --- a/node/node.go +++ b/node/node.go @@ -452,6 +452,7 @@ func (node *AlgorandFullNode) Stop() { }() node.net.ClearHandlers() + node.net.ClearValidatorHandlers() if !node.config.DisableNetworking { node.net.Stop() } @@ -1218,6 +1219,7 @@ func (node *AlgorandFullNode) SetCatchpointCatchupMode(catchpointCatchupMode boo node.waitMonitoringRoutines() }() node.net.ClearHandlers() + node.net.ClearValidatorHandlers() node.stateProofWorker.Stop() node.txHandler.Stop() node.agreementService.Shutdown() diff --git a/node/node_test.go b/node/node_test.go index 19463177df..d35ac43d98 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -606,12 +606,11 @@ func TestDefaultResourcePaths(t *testing.T) { log := logging.Base() n, err := MakeFull(log, testDirectory, cfg, []string{}, genesis) + require.NoError(t, err) n.Start() defer n.Stop() - require.NoError(t, err) - // confirm genesis dir exists in the data dir, and that resources exist in the expected locations require.DirExists(t, filepath.Join(testDirectory, genesis.ID())) @@ -1073,3 +1072,47 @@ func TestNodeP2PRelays(t *testing.T) { return len(nodes[2].net.GetPeers(network.PeersPhonebookRelays)) == 2 }, 80*time.Second, 1*time.Second) } + +// TestNodeSetCatchpointCatchupMode checks node can handle services restart for fast catchup correctly +func TestNodeSetCatchpointCatchupMode(t *testing.T) { + partitiontest.PartitionTest(t) + + testDirectory := t.TempDir() + + genesis := bookkeeping.Genesis{ + SchemaID: "gen", + Proto: protocol.ConsensusCurrentVersion, + Network: config.Devtestnet, + FeeSink: sinkAddr.String(), + RewardsPool: poolAddr.String(), + } + log := logging.TestingLog(t) + cfg := config.GetDefaultLocal() + + tests := []struct { + name string + enableP2P bool + }{ + {"WS node", false}, + {"P2P node", true}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cfg.EnableP2P = test.enableP2P + + n, err := MakeFull(log, testDirectory, cfg, []string{}, genesis) + require.NoError(t, err) + err = n.Start() + require.NoError(t, err) + defer n.Stop() + + // "start" catchpoint catchup => close services + outCh := n.SetCatchpointCatchupMode(true) + <-outCh + // "stop" catchpoint catchup => resume services + outCh = n.SetCatchpointCatchupMode(false) + <-outCh + }) + } +} From 619d257deb46408e980ba82c36b1966301d15262 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Mon, 16 Sep 2024 09:26:51 -0400 Subject: [PATCH 78/82] p2p: reuse existing libp2p.Host for http clients (#6129) --- network/p2p/http.go | 51 ++++++++++++++++++++++++++------- network/p2p/p2p.go | 10 +++++++ network/p2p/testing/httpNode.go | 2 +- network/p2pNetwork.go | 6 ++-- network/p2pNetwork_test.go | 14 ++++++--- 5 files changed, 65 insertions(+), 18 deletions(-) diff --git a/network/p2p/http.go b/network/p2p/http.go index 18497f403e..cf6b80ffc3 100644 --- a/network/p2p/http.go +++ b/network/p2p/http.go @@ -78,13 +78,44 @@ func (s *HTTPServer) RegisterHTTPHandlerFunc(path string, handler func(http.Resp }) } -// MakeHTTPClient creates a http.Client that uses libp2p transport for a given protocol and peer address. -func MakeHTTPClient(addrInfo *peer.AddrInfo) (*http.Client, error) { - clientStreamHost, err := libp2p.New(libp2p.NoListenAddrs) - if err != nil { - return nil, err +type httpClientConfig struct { + host host.Host +} + +type httpClientOption func(*httpClientConfig) + +// WithHost sets the libp2p host for the http client. +func WithHost(h host.Host) httpClientOption { + return func(o *httpClientConfig) { + o.host = h + } +} + +// MakeTestHTTPClient creates a http.Client that uses libp2p transport for a given protocol and peer address. +// This exported method is only used in tests. +func MakeTestHTTPClient(addrInfo *peer.AddrInfo, opts ...httpClientOption) (*http.Client, error) { + return makeHTTPClient(addrInfo, opts...) +} + +// makeHTTPClient creates a http.Client that uses libp2p transport for a given protocol and peer address. +// If service is nil, a new libp2p host is created. +func makeHTTPClient(addrInfo *peer.AddrInfo, opts ...httpClientOption) (*http.Client, error) { + var config httpClientConfig + for _, opt := range opts { + opt(&config) + } + + var clientStreamHost host.Host + if config.host != nil { + clientStreamHost = config.host + } else { + var err error + clientStreamHost, err = libp2p.New(libp2p.NoListenAddrs) + if err != nil { + return nil, err + } + logging.Base().Debugf("MakeHTTPClient made a new P2P host %s for %s", clientStreamHost.ID(), addrInfo.String()) } - logging.Base().Debugf("MakeHTTPClient made a new P2P host %s for %s", clientStreamHost.ID(), addrInfo.String()) client := libp2phttp.Host{StreamHost: clientStreamHost} @@ -100,13 +131,13 @@ func MakeHTTPClient(addrInfo *peer.AddrInfo) (*http.Client, error) { return &http.Client{Transport: rt}, nil } -// MakeHTTPClientWithRateLimit creates a http.Client that uses libp2p transport for a given protocol and peer address. -func MakeHTTPClientWithRateLimit(addrInfo *peer.AddrInfo, pstore limitcaller.ConnectionTimeStore, queueingTimeout time.Duration) (*http.Client, error) { - cl, err := MakeHTTPClient(addrInfo) +// makeHTTPClientWithRateLimit creates a http.Client that uses libp2p transport for a given protocol and peer address. +func makeHTTPClientWithRateLimit(addrInfo *peer.AddrInfo, p2pService *serviceImpl, connTimeStore limitcaller.ConnectionTimeStore, queueingTimeout time.Duration) (*http.Client, error) { + cl, err := makeHTTPClient(addrInfo, WithHost(p2pService.host)) if err != nil { return nil, err } - rltr := limitcaller.MakeRateLimitingBoundTransportWithRoundTripper(pstore, queueingTimeout, cl.Transport, string(addrInfo.ID)) + rltr := limitcaller.MakeRateLimitingBoundTransportWithRoundTripper(connTimeStore, queueingTimeout, cl.Transport, string(addrInfo.ID)) cl.Transport = &rltr return cl, nil diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index f281d1d13b..0e46b8cb0f 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -21,12 +21,14 @@ import ( "encoding/base32" "fmt" "net" + "net/http" "runtime" "strings" "time" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/network/limitcaller" pstore "github.com/algorand/go-algorand/network/p2p/peerstore" "github.com/algorand/go-algorand/network/phonebook" "github.com/algorand/go-algorand/util/metrics" @@ -69,6 +71,9 @@ type Service interface { ListPeersForTopic(topic string) []peer.ID Subscribe(topic string, val pubsub.ValidatorEx) (SubNextCancellable, error) Publish(ctx context.Context, topic string, data []byte) error + + // GetHTTPClient returns a rate-limiting libp2p-streaming http client that can be used to make requests to the given peer + GetHTTPClient(addrInfo *peer.AddrInfo, connTimeStore limitcaller.ConnectionTimeStore, queueingTimeout time.Duration) (*http.Client, error) } // serviceImpl manages integration with libp2p and implements the Service interface @@ -412,3 +417,8 @@ func addressFilter(addrs []multiaddr.Multiaddr) []multiaddr.Multiaddr { } return res } + +// GetHTTPClient returns a libp2p-streaming http client that can be used to make requests to the given peer +func (s *serviceImpl) GetHTTPClient(addrInfo *peer.AddrInfo, connTimeStore limitcaller.ConnectionTimeStore, queueingTimeout time.Duration) (*http.Client, error) { + return makeHTTPClientWithRateLimit(addrInfo, s, connTimeStore, queueingTimeout) +} diff --git a/network/p2p/testing/httpNode.go b/network/p2p/testing/httpNode.go index 523cdc5d4c..f73b26999f 100644 --- a/network/p2p/testing/httpNode.go +++ b/network/p2p/testing/httpNode.go @@ -104,7 +104,7 @@ func (p httpPeer) GetAddress() string { // GetAddress implements HTTPPeer interface and returns the http client for a peer func (p httpPeer) GetHTTPClient() *http.Client { - c, err := p2p.MakeHTTPClient(&p.addrInfo) + c, err := p2p.MakeTestHTTPClient(&p.addrInfo) require.NoError(p.tb, err) return c } diff --git a/network/p2pNetwork.go b/network/p2pNetwork.go index 0968db7b77..32b9a49ef3 100644 --- a/network/p2pNetwork.go +++ b/network/p2pNetwork.go @@ -612,7 +612,7 @@ func addrInfoToWsPeerCore(n *P2PNetwork, addrInfo *peer.AddrInfo) (wsPeerCore, b } addr := mas[0].String() - client, err := p2p.MakeHTTPClientWithRateLimit(addrInfo, n.pstore, limitcaller.DefaultQueueingTimeout) + client, err := n.service.GetHTTPClient(addrInfo, n.pstore, limitcaller.DefaultQueueingTimeout) if err != nil { n.log.Warnf("MakeHTTPClient failed: %v", err) return wsPeerCore{}, false @@ -718,7 +718,7 @@ func (n *P2PNetwork) GetHTTPClient(address string) (*http.Client, error) { if err != nil { return nil, err } - return p2p.MakeHTTPClientWithRateLimit(addrInfo, n.pstore, limitcaller.DefaultQueueingTimeout) + return n.service.GetHTTPClient(addrInfo, n.pstore, limitcaller.DefaultQueueingTimeout) } // OnNetworkAdvance notifies the network library that the agreement protocol was able to make a notable progress. @@ -771,7 +771,7 @@ func (n *P2PNetwork) wsStreamHandler(ctx context.Context, p2pPeer peer.ID, strea // create a wsPeer for this stream and added it to the peers map. addrInfo := &peer.AddrInfo{ID: p2pPeer, Addrs: []multiaddr.Multiaddr{ma}} - client, err := p2p.MakeHTTPClientWithRateLimit(addrInfo, n.pstore, limitcaller.DefaultQueueingTimeout) + client, err := n.service.GetHTTPClient(addrInfo, n.pstore, limitcaller.DefaultQueueingTimeout) if err != nil { n.log.Warnf("Cannot construct HTTP Client for %s: %v", p2pPeer, err) client = nil diff --git a/network/p2pNetwork_test.go b/network/p2pNetwork_test.go index e5e4400d46..f2e3002e85 100644 --- a/network/p2pNetwork_test.go +++ b/network/p2pNetwork_test.go @@ -368,6 +368,10 @@ func (s *mockService) Publish(ctx context.Context, topic string, data []byte) er return nil } +func (s *mockService) GetHTTPClient(addrInfo *peer.AddrInfo, connTimeStore limitcaller.ConnectionTimeStore, queueingTimeout time.Duration) (*http.Client, error) { + return nil, nil +} + func makeMockService(id peer.ID, addrs []ma.Multiaddr) *mockService { return &mockService{ id: id, @@ -757,7 +761,7 @@ func TestP2PHTTPHandler(t *testing.T) { require.NoError(t, err) require.NotZero(t, addrsA[0]) - httpClient, err := p2p.MakeHTTPClient(&peerInfoA) + httpClient, err := p2p.MakeTestHTTPClient(&peerInfoA) require.NoError(t, err) resp, err := httpClient.Get("/test") require.NoError(t, err) @@ -768,7 +772,7 @@ func TestP2PHTTPHandler(t *testing.T) { require.Equal(t, "hello", string(body)) // check another endpoint that also access the underlying connection/stream - httpClient, err = p2p.MakeHTTPClient(&peerInfoA) + httpClient, err = p2p.MakeTestHTTPClient(&peerInfoA) require.NoError(t, err) resp, err = httpClient.Get("/check-conn") require.NoError(t, err) @@ -780,10 +784,12 @@ func TestP2PHTTPHandler(t *testing.T) { // check rate limiting client: // zero clients allowed, rate limiting window (10s) is greater than queue deadline (1s) + netB, err := NewP2PNetwork(log, cfg, "", nil, genesisID, config.Devtestnet, &nopeNodeInfo{}, nil) + require.NoError(t, err) pstore, err := peerstore.MakePhonebook(0, 10*time.Second) require.NoError(t, err) pstore.AddPersistentPeers([]*peer.AddrInfo{&peerInfoA}, "net", phonebook.PhoneBookEntryRelayRole) - httpClient, err = p2p.MakeHTTPClientWithRateLimit(&peerInfoA, pstore, 1*time.Second) + httpClient, err = netB.service.GetHTTPClient(&peerInfoA, pstore, 1*time.Second) require.NoError(t, err) _, err = httpClient.Get("/test") require.ErrorIs(t, err, limitcaller.ErrConnectionQueueingTimeout) @@ -815,7 +821,7 @@ func TestP2PHTTPHandlerAllInterfaces(t *testing.T) { require.NotZero(t, addrsB[0]) t.Logf("peerInfoB: %s", peerInfoA) - httpClient, err := p2p.MakeHTTPClient(&peerInfoA) + httpClient, err := p2p.MakeTestHTTPClient(&peerInfoA) require.NoError(t, err) resp, err := httpClient.Get("/test") require.NoError(t, err) From cac2c09b5d0f5c5a54b2efcf9d4db4cd6355ba33 Mon Sep 17 00:00:00 2001 From: DevOps Service Date: Tue, 17 Sep 2024 15:00:12 +0000 Subject: [PATCH 79/82] Update the Version, BuildNumber, genesistimestamp.data --- buildnumber.dat | 1 + genesistimestamp.dat | 1 + 2 files changed, 2 insertions(+) create mode 100644 buildnumber.dat create mode 100644 genesistimestamp.dat diff --git a/buildnumber.dat b/buildnumber.dat new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/buildnumber.dat @@ -0,0 +1 @@ +0 diff --git a/genesistimestamp.dat b/genesistimestamp.dat new file mode 100644 index 0000000000..c72c6a7795 --- /dev/null +++ b/genesistimestamp.dat @@ -0,0 +1 @@ +1558657885 From 49d4765ac343089ccc77afe0d28d095f6d0e020d Mon Sep 17 00:00:00 2001 From: John Lee Date: Wed, 18 Sep 2024 13:44:31 -0400 Subject: [PATCH 80/82] CI: fix rpmbuild call for cross-platform execution (#6134) --- installer/rpm/algorand-devtools/algorand-devtools.spec | 1 + installer/rpm/algorand/algorand.spec | 1 + 2 files changed, 2 insertions(+) diff --git a/installer/rpm/algorand-devtools/algorand-devtools.spec b/installer/rpm/algorand-devtools/algorand-devtools.spec index ad5b5a1cd5..5c2490653c 100644 --- a/installer/rpm/algorand-devtools/algorand-devtools.spec +++ b/installer/rpm/algorand-devtools/algorand-devtools.spec @@ -8,6 +8,7 @@ Requires: @REQUIRED_ALGORAND_PKG@ >= @VER@ %define SRCDIR go-algorand-rpmbuild %define _buildshell /bin/bash +%define __os_install_post %{?__brp-compress} %description This package provides development tools for the Algorand blockchain. diff --git a/installer/rpm/algorand/algorand.spec b/installer/rpm/algorand/algorand.spec index ef58c0db1a..02e0909a78 100644 --- a/installer/rpm/algorand/algorand.spec +++ b/installer/rpm/algorand/algorand.spec @@ -11,6 +11,7 @@ Requires(pre): shadow-utils %define SRCDIR go-algorand-rpmbuild %define _buildshell /bin/bash +%define __os_install_post %{?__brp-compress} %description This package provides an implementation of the Algorand protocol. From c44aba5871d7deb39473673e1a02fc719fbcf474 Mon Sep 17 00:00:00 2001 From: John Lee Date: Wed, 18 Sep 2024 14:24:34 -0400 Subject: [PATCH 81/82] CI: remove homebrew autoupdate deactivation from CircleCI (#6135) --- .circleci/config.yml | 4 ---- scripts/configure_dev.sh | 16 +++++++--------- 2 files changed, 7 insertions(+), 13 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 13d4ea996f..f9cd0660dd 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -49,14 +49,10 @@ executors: macos: xcode: 14.2.0 resource_class: macos.m1.medium.gen1 - environment: - HOMEBREW_NO_AUTO_UPDATE: "true" mac_arm64_large: macos: xcode: 14.2.0 resource_class: macos.m1.large.gen1 - environment: - HOMEBREW_NO_AUTO_UPDATE: "true" slack-fail-stop-step: &slack-fail-post-step post-steps: diff --git a/scripts/configure_dev.sh b/scripts/configure_dev.sh index c7bd93a250..56edb84c32 100755 --- a/scripts/configure_dev.sh +++ b/scripts/configure_dev.sh @@ -78,15 +78,13 @@ if [ "${OS}" = "linux" ]; then sudo "$SCRIPTPATH/install_linux_deps.sh" fi elif [ "${OS}" = "darwin" ]; then - if [ "${CIRCLECI}" != "true" ]; then - brew update - brew_version=$(brew --version | head -1 | cut -d' ' -f2) - major_version=$(echo $brew_version | cut -d. -f1) - minor_version=$(echo $brew_version | cut -d. -f2) - version_decimal="$major_version.$minor_version" - if (($(echo "$version_decimal < 2.5" | bc -l))); then - brew tap homebrew/cask - fi + brew update + brew_version=$(brew --version | head -1 | cut -d' ' -f2) + major_version=$(echo $brew_version | cut -d. -f1) + minor_version=$(echo $brew_version | cut -d. -f2) + version_decimal="$major_version.$minor_version" + if (($(echo "$version_decimal < 2.5" | bc -l))); then + brew tap homebrew/cask fi install_or_upgrade pkg-config install_or_upgrade libtool From c0aea8ae1ab2aa752161ad6d68ccdb3cdf10bae7 Mon Sep 17 00:00:00 2001 From: John Lee Date: Wed, 25 Sep 2024 12:58:35 -0400 Subject: [PATCH 82/82] CI: updates to fix publishing universal darwin binaries (#6141) --- scripts/build_packages.sh | 6 +++++- scripts/release/mule/sign/sign.sh | 29 ++++++++++++++--------------- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/scripts/build_packages.sh b/scripts/build_packages.sh index b434c485dc..8a5e75a5b6 100755 --- a/scripts/build_packages.sh +++ b/scripts/build_packages.sh @@ -97,10 +97,14 @@ for var in "${VARIATION_ARRAY[@]}"; do pushd ${PLATFORM_ROOT} tar --exclude=tools -zcf ${PKG_ROOT}/node_${CHANNEL}_${PKG_NAME}_${FULLVERSION}.tar.gz * >/dev/null 2>&1 + if [ $? -ne 0 ]; then + echo "Error creating node tar file for package ${PLATFORM}. Aborting..." + exit 1 + fi cd bin tar -zcf ${PKG_ROOT}/install_${CHANNEL}_${PKG_NAME}_${FULLVERSION}.tar.gz updater update.sh >/dev/null 2>&1 if [ $? -ne 0 ]; then - echo "Error creating tar file for package ${PLATFORM}. Aborting..." + echo "Error creating install tar file for package ${PLATFORM}. Aborting..." exit 1 fi diff --git a/scripts/release/mule/sign/sign.sh b/scripts/release/mule/sign/sign.sh index 89baedb9ce..ab7f3899e7 100755 --- a/scripts/release/mule/sign/sign.sh +++ b/scripts/release/mule/sign/sign.sh @@ -1,8 +1,6 @@ #!/usr/bin/env bash # shellcheck disable=2035,2129 -# TODO: This needs to be reworked a bit to support Darwin. - set -exo pipefail shopt -s nullglob @@ -14,8 +12,8 @@ CHANNEL=${CHANNEL:-$(./scripts/release/mule/common/get_channel.sh "$NETWORK")} VERSION=${VERSION:-$(./scripts/compute_build_number.sh -f)} PKG_DIR="./tmp/node_pkgs" SIGNING_KEY_ADDR=dev@algorand.com -OS_TYPE=$(./scripts/release/mule/common/ostype.sh) -ARCHS=(amd64 arm64) +OS_TYPES=(linux darwin) +ARCHS=(amd64 arm64 universal) ARCH_BITS=(x86_64 aarch64) # Note that we don't want to use $GNUPGHOME here because that is a documented env var for the gnupg # project and if it's set in the environment mule will automatically pick it up, which could have @@ -47,17 +45,19 @@ popd if [ -n "$S3_SOURCE" ] then i=0 - for arch in "${ARCHS[@]}"; do - arch_bit="${ARCH_BITS[$i]}" - ( + for os in "${OS_TYPES[@]}"; do + for arch in "${ARCHS[@]}"; do mkdir -p "$PKG_DIR/$OS_TYPE/$arch" - cd "$PKG_DIR" - # Note the underscore after ${arch}! - # Recall that rpm packages have the arch bit in the filenames (i.e., "x86_64" rather than "amd64"). - # Also, the order of the includes/excludes is important! - aws s3 cp --recursive --exclude "*" --include "*${arch}_*" --include "*$arch_bit.rpm" --exclude "*.sig" --exclude "*.asc" --exclude "*.asc.gz" "s3://$S3_SOURCE/$CHANNEL/$VERSION" . - ) - i=$((i + 1)) + arch_bit="${ARCH_BITS[$i]}" + ( + cd "$PKG_DIR" + # Note the underscore after ${arch}! + # Recall that rpm packages have the arch bit in the filenames (i.e., "x86_64" rather than "amd64"). + # Also, the order of the includes/excludes is important! + aws s3 cp --recursive --exclude "*" --include "*${arch}_*" --include "*$arch_bit.rpm" --exclude "*.sig" --exclude "*.asc" --exclude "*.asc.gz" "s3://$S3_SOURCE/$CHANNEL/$VERSION" . + ) + i=$((i + 1)) + done done fi @@ -69,7 +69,6 @@ cd "$PKG_DIR" # Grab the directories directly underneath (max-depth 1) ./tmp/node_pkgs/ into a space-delimited string. # This will help us target `linux`, `darwin` and (possibly) `windows` build assets. # Note the surrounding parens turns the string created by `find` into an array. -OS_TYPES=($(find . -mindepth 1 -maxdepth 1 -type d -printf '%f\n')) for os in "${OS_TYPES[@]}"; do for arch in "${ARCHS[@]}"; do if [ -d "$os/$arch" ]