diff --git a/cmd/txpool/main.go b/cmd/txpool/main.go index f1b93709eae..1ea97e61318 100644 --- a/cmd/txpool/main.go +++ b/cmd/txpool/main.go @@ -144,7 +144,7 @@ func doTxpool(ctx context.Context, logger log.Logger) error { log.Info("TxPool started", "db", filepath.Join(datadirCli, "txpool")) - sentryClients := make([]direct.SentryClient, len(sentryAddr)) + sentryClients := make([]proto_sentry.SentryClient, len(sentryAddr)) for i := range sentryAddr { creds, err := grpcutil.TLS(TLSCACert, TLSCertfile, TLSKeyFile) if err != nil { diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index beb0d2d9847..6492029b4fc 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -43,7 +43,6 @@ import ( "github.com/erigontech/erigon-lib/kv/dbutils" "github.com/erigontech/erigon-lib/log/v3" - "github.com/erigontech/erigon/common" "github.com/erigontech/erigon/common/debug" "github.com/erigontech/erigon/consensus" "github.com/erigontech/erigon/core/state" @@ -453,7 +452,7 @@ func (c *Clique) Seal(chain consensus.ChainHeaderReader, blockWithReceipts *type wiggle := time.Duration(len(snap.Signers)/2+1) * wiggleTime delay += time.Duration(rand.Int63n(int64(wiggle))) // nolint: gosec - c.logger.Trace("Out-of-turn signing requested", "wiggle", common.PrettyDuration(wiggle)) + c.logger.Trace("Out-of-turn signing requested", "wiggle", libcommon.PrettyDuration(wiggle)) } // Sign all the things! sighash, err := signFn(signer, accounts.MimetypeClique, CliqueRLP(header)) @@ -462,7 +461,7 @@ func (c *Clique) Seal(chain consensus.ChainHeaderReader, blockWithReceipts *type } copy(header.Extra[len(header.Extra)-ExtraSeal:], sighash) // Wait until sealing is terminated or delay timeout. - c.logger.Trace("Waiting for slot to sign and propagate", "delay", common.PrettyDuration(delay)) + c.logger.Trace("Waiting for slot to sign and propagate", "delay", libcommon.PrettyDuration(delay)) go func() { defer debug.LogPanic() select { diff --git a/consensus/clique/snapshot.go b/consensus/clique/snapshot.go index 49573930c55..1770a4d5bab 100644 --- a/consensus/clique/snapshot.go +++ b/consensus/clique/snapshot.go @@ -40,7 +40,6 @@ import ( "github.com/erigontech/erigon-lib/log/v3" - "github.com/erigontech/erigon/common" "github.com/erigontech/erigon/core/types" ) @@ -314,12 +313,12 @@ func (s *Snapshot) apply(sigcache *lru.ARCCache[libcommon.Hash, libcommon.Addres } // If we're taking too much time (ecrecover), notify the user once a while if time.Since(logged) > 8*time.Second { - logger.Info("Reconstructing voting history", "processed", i, "total", len(headers), "elapsed", common.PrettyDuration(time.Since(start))) + logger.Info("Reconstructing voting history", "processed", i, "total", len(headers), "elapsed", libcommon.PrettyDuration(time.Since(start))) logged = time.Now() } } if time.Since(start) > 8*time.Second { - logger.Info("Reconstructed voting history", "processed", len(headers), "elapsed", common.PrettyDuration(time.Since(start))) + logger.Info("Reconstructed voting history", "processed", len(headers), "elapsed", libcommon.PrettyDuration(time.Since(start))) } snap.Number += uint64(len(headers)) snap.Hash = headers[len(headers)-1].Hash() diff --git a/consensus/ethash/algorithm.go b/consensus/ethash/algorithm.go index 0cca67bb8de..e1627e95ec5 100644 --- a/consensus/ethash/algorithm.go +++ b/consensus/ethash/algorithm.go @@ -30,7 +30,7 @@ import ( "time" "unsafe" - common2 "github.com/erigontech/erigon-lib/common" + libcommon "github.com/erigontech/erigon-lib/common" "golang.org/x/crypto/sha3" @@ -38,7 +38,6 @@ import ( "github.com/erigontech/erigon-lib/log/v3" - "github.com/erigontech/erigon/common" "github.com/erigontech/erigon/common/bitutil" "github.com/erigontech/erigon/common/debug" "github.com/erigontech/erigon/crypto" @@ -141,7 +140,7 @@ func seedHash(block uint64) []byte { return seed } - h := common2.NewHasher() + h := libcommon.NewHasher() for i := 0; i < int(block/epochLength); i++ { h.Sha.Reset() @@ -157,7 +156,7 @@ func seedHash(block uint64) []byte { } } - common2.ReturnHasherToPool(h) + libcommon.ReturnHasherToPool(h) return seed } @@ -192,7 +191,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) { if elapsed > 5*time.Second { logFn = logger.Info } - logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed)) + logFn("Generated ethash verification cache", "elapsed", libcommon.PrettyDuration(elapsed)) }() // Convert our destination slice to a byte buffer var cache []byte @@ -218,7 +217,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) { case <-done: return case <-time.After(3 * time.Second): - logger.Info("Generating ethash verification cache", "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/4, "elapsed", common.PrettyDuration(time.Since(start))) + logger.Info("Generating ethash verification cache", "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/4, "elapsed", libcommon.PrettyDuration(time.Since(start))) } } }() @@ -348,7 +347,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) { if elapsed > 3*time.Second { logFn = logger.Info } - logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed)) + logFn("Generated ethash verification cache", "elapsed", libcommon.PrettyDuration(elapsed)) }() // Figure out whether the bytes need to be swapped for the machine @@ -396,7 +395,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) { }) if status := atomic.AddUint64(&progress, 1); status%percent == 0 { - logger.Info("Generating DAG in progress", "percentage", (status*100)/(size/hashBytes), "elapsed", common.PrettyDuration(time.Since(start))) + logger.Info("Generating DAG in progress", "percentage", (status*100)/(size/hashBytes), "elapsed", libcommon.PrettyDuration(time.Since(start))) } } }(i) diff --git a/consensus/ethash/sealer.go b/consensus/ethash/sealer.go index a1083191b0e..7e26df39416 100644 --- a/consensus/ethash/sealer.go +++ b/consensus/ethash/sealer.go @@ -36,7 +36,6 @@ import ( libcommon "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/common/hexutil" - "github.com/erigontech/erigon/common" "github.com/erigontech/erigon/consensus" "github.com/erigontech/erigon/core/types" ) @@ -307,7 +306,7 @@ func (s *remoteSealer) submitWork(nonce types.BlockNonce, mixDigest libcommon.Ha start := time.Now() if !s.noverify { if err := s.ethash.verifySeal(header, true); err != nil { - s.ethash.config.Log.Warn("Invalid proof-of-work submitted", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)), "err", err) + s.ethash.config.Log.Warn("Invalid proof-of-work submitted", "sealhash", sealhash, "elapsed", libcommon.PrettyDuration(time.Since(start)), "err", err) return false } } @@ -316,7 +315,7 @@ func (s *remoteSealer) submitWork(nonce types.BlockNonce, mixDigest libcommon.Ha s.ethash.config.Log.Warn("Ethash result channel is empty, submitted mining result is rejected") return false } - s.ethash.config.Log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start))) + s.ethash.config.Log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", libcommon.PrettyDuration(time.Since(start))) // Solutions seems to be valid, return to the miner and notify acceptance. solution := block.Block.WithSeal(header) diff --git a/common/format.go b/erigon-lib/common/format.go similarity index 100% rename from common/format.go rename to erigon-lib/common/format.go diff --git a/erigon-lib/direct/sentry_client.go b/erigon-lib/direct/sentry_client.go index 5b8a1cf9708..8833b66fc2c 100644 --- a/erigon-lib/direct/sentry_client.go +++ b/erigon-lib/direct/sentry_client.go @@ -26,8 +26,9 @@ import ( "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/emptypb" - sentry "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" + sentryproto "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" types "github.com/erigontech/erigon-lib/gointerfaces/typesproto" + libsentry "github.com/erigontech/erigon-lib/p2p/sentry" ) const ( @@ -37,81 +38,18 @@ const ( ETH68 = 68 ) -var ProtoIds = map[uint]map[sentry.MessageId]struct{}{ - ETH65: { - sentry.MessageId_GET_BLOCK_HEADERS_65: struct{}{}, - sentry.MessageId_BLOCK_HEADERS_65: struct{}{}, - sentry.MessageId_GET_BLOCK_BODIES_65: struct{}{}, - sentry.MessageId_BLOCK_BODIES_65: struct{}{}, - sentry.MessageId_GET_NODE_DATA_65: struct{}{}, - sentry.MessageId_NODE_DATA_65: struct{}{}, - sentry.MessageId_GET_RECEIPTS_65: struct{}{}, - sentry.MessageId_RECEIPTS_65: struct{}{}, - sentry.MessageId_NEW_BLOCK_HASHES_65: struct{}{}, - sentry.MessageId_NEW_BLOCK_65: struct{}{}, - sentry.MessageId_TRANSACTIONS_65: struct{}{}, - sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_65: struct{}{}, - sentry.MessageId_GET_POOLED_TRANSACTIONS_65: struct{}{}, - sentry.MessageId_POOLED_TRANSACTIONS_65: struct{}{}, - }, - ETH66: { - sentry.MessageId_GET_BLOCK_HEADERS_66: struct{}{}, - sentry.MessageId_BLOCK_HEADERS_66: struct{}{}, - sentry.MessageId_GET_BLOCK_BODIES_66: struct{}{}, - sentry.MessageId_BLOCK_BODIES_66: struct{}{}, - sentry.MessageId_GET_NODE_DATA_66: struct{}{}, - sentry.MessageId_NODE_DATA_66: struct{}{}, - sentry.MessageId_GET_RECEIPTS_66: struct{}{}, - sentry.MessageId_RECEIPTS_66: struct{}{}, - sentry.MessageId_NEW_BLOCK_HASHES_66: struct{}{}, - sentry.MessageId_NEW_BLOCK_66: struct{}{}, - sentry.MessageId_TRANSACTIONS_66: struct{}{}, - sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66: struct{}{}, - sentry.MessageId_GET_POOLED_TRANSACTIONS_66: struct{}{}, - sentry.MessageId_POOLED_TRANSACTIONS_66: struct{}{}, - }, - ETH67: { - sentry.MessageId_GET_BLOCK_HEADERS_66: struct{}{}, - sentry.MessageId_BLOCK_HEADERS_66: struct{}{}, - sentry.MessageId_GET_BLOCK_BODIES_66: struct{}{}, - sentry.MessageId_BLOCK_BODIES_66: struct{}{}, - sentry.MessageId_GET_RECEIPTS_66: struct{}{}, - sentry.MessageId_RECEIPTS_66: struct{}{}, - sentry.MessageId_NEW_BLOCK_HASHES_66: struct{}{}, - sentry.MessageId_NEW_BLOCK_66: struct{}{}, - sentry.MessageId_TRANSACTIONS_66: struct{}{}, - sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66: struct{}{}, - sentry.MessageId_GET_POOLED_TRANSACTIONS_66: struct{}{}, - sentry.MessageId_POOLED_TRANSACTIONS_66: struct{}{}, - }, - ETH68: { - sentry.MessageId_GET_BLOCK_HEADERS_66: struct{}{}, - sentry.MessageId_BLOCK_HEADERS_66: struct{}{}, - sentry.MessageId_GET_BLOCK_BODIES_66: struct{}{}, - sentry.MessageId_BLOCK_BODIES_66: struct{}{}, - sentry.MessageId_GET_RECEIPTS_66: struct{}{}, - sentry.MessageId_RECEIPTS_66: struct{}{}, - sentry.MessageId_NEW_BLOCK_HASHES_66: struct{}{}, - sentry.MessageId_NEW_BLOCK_66: struct{}{}, - sentry.MessageId_TRANSACTIONS_66: struct{}{}, - sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_68: struct{}{}, - sentry.MessageId_GET_POOLED_TRANSACTIONS_66: struct{}{}, - sentry.MessageId_POOLED_TRANSACTIONS_66: struct{}{}, - }, -} - //go:generate mockgen -typed=true -destination=./sentry_client_mock.go -package=direct . SentryClient type SentryClient interface { - sentry.SentryClient + sentryproto.SentryClient Protocol() uint Ready() bool MarkDisconnected() } type SentryClientRemote struct { - sentry.SentryClient + sentryproto.SentryClient sync.RWMutex - protocol uint + protocol sentryproto.Protocol ready bool } @@ -121,14 +59,14 @@ var _ SentryClient = (*SentryClientDirect)(nil) // compile-time interface check // NewSentryClientRemote - app code must use this class // to avoid concurrency - it accepts protocol (which received async by SetStatus) in constructor, // means app can't use client which protocol unknown yet -func NewSentryClientRemote(client sentry.SentryClient) *SentryClientRemote { +func NewSentryClientRemote(client sentryproto.SentryClient) *SentryClientRemote { return &SentryClientRemote{SentryClient: client} } func (c *SentryClientRemote) Protocol() uint { c.RLock() defer c.RUnlock() - return c.protocol + return ETH65 + uint(c.protocol) } func (c *SentryClientRemote) Ready() bool { @@ -143,7 +81,7 @@ func (c *SentryClientRemote) MarkDisconnected() { c.ready = false } -func (c *SentryClientRemote) HandShake(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*sentry.HandShakeReply, error) { +func (c *SentryClientRemote) HandShake(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*sentryproto.HandShakeReply, error) { reply, err := c.SentryClient.HandShake(ctx, in, opts...) if err != nil { return nil, err @@ -151,30 +89,25 @@ func (c *SentryClientRemote) HandShake(ctx context.Context, in *emptypb.Empty, o c.Lock() defer c.Unlock() switch reply.Protocol { - case sentry.Protocol_ETH65: - c.protocol = ETH65 - case sentry.Protocol_ETH66: - c.protocol = ETH66 - case sentry.Protocol_ETH67: - c.protocol = ETH67 - case sentry.Protocol_ETH68: - c.protocol = ETH68 + case sentryproto.Protocol_ETH65, sentryproto.Protocol_ETH66, + sentryproto.Protocol_ETH67, reply.Protocol: + c.protocol = reply.Protocol default: return nil, fmt.Errorf("unexpected protocol: %d", reply.Protocol) } c.ready = true return reply, nil } -func (c *SentryClientRemote) SetStatus(ctx context.Context, in *sentry.StatusData, opts ...grpc.CallOption) (*sentry.SetStatusReply, error) { +func (c *SentryClientRemote) SetStatus(ctx context.Context, in *sentryproto.StatusData, opts ...grpc.CallOption) (*sentryproto.SetStatusReply, error) { return c.SentryClient.SetStatus(ctx, in, opts...) } -func (c *SentryClientRemote) Messages(ctx context.Context, in *sentry.MessagesRequest, opts ...grpc.CallOption) (sentry.Sentry_MessagesClient, error) { - in.Ids = filterIds(in.Ids, c.Protocol()) +func (c *SentryClientRemote) Messages(ctx context.Context, in *sentryproto.MessagesRequest, opts ...grpc.CallOption) (sentryproto.Sentry_MessagesClient, error) { + in.Ids = filterIds(in.Ids, c.protocol) return c.SentryClient.Messages(ctx, in, opts...) } -func (c *SentryClientRemote) PeerCount(ctx context.Context, in *sentry.PeerCountRequest, opts ...grpc.CallOption) (*sentry.PeerCountReply, error) { +func (c *SentryClientRemote) PeerCount(ctx context.Context, in *sentryproto.PeerCountRequest, opts ...grpc.CallOption) (*sentryproto.PeerCountReply, error) { return c.SentryClient.PeerCount(ctx, in) } @@ -187,66 +120,66 @@ func (c *SentryClientRemote) PeerCount(ctx context.Context, in *sentry.PeerCount // SentryClientDirect implements SentryClient interface by connecting the instance of the client directly with the corresponding // instance of SentryServer type SentryClientDirect struct { - server sentry.SentryServer - protocol uint + server sentryproto.SentryServer + protocol sentryproto.Protocol } -func NewSentryClientDirect(protocol uint, sentryServer sentry.SentryServer) *SentryClientDirect { - return &SentryClientDirect{protocol: protocol, server: sentryServer} +func NewSentryClientDirect(protocol uint, sentryServer sentryproto.SentryServer) *SentryClientDirect { + return &SentryClientDirect{protocol: sentryproto.Protocol(protocol - ETH65), server: sentryServer} } -func (c *SentryClientDirect) Protocol() uint { return c.protocol } +func (c *SentryClientDirect) Protocol() uint { return uint(c.protocol) + ETH65 } func (c *SentryClientDirect) Ready() bool { return true } func (c *SentryClientDirect) MarkDisconnected() {} -func (c *SentryClientDirect) PenalizePeer(ctx context.Context, in *sentry.PenalizePeerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { +func (c *SentryClientDirect) PenalizePeer(ctx context.Context, in *sentryproto.PenalizePeerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { return c.server.PenalizePeer(ctx, in) } -func (c *SentryClientDirect) PeerMinBlock(ctx context.Context, in *sentry.PeerMinBlockRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { +func (c *SentryClientDirect) PeerMinBlock(ctx context.Context, in *sentryproto.PeerMinBlockRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { return c.server.PeerMinBlock(ctx, in) } -func (c *SentryClientDirect) SendMessageByMinBlock(ctx context.Context, in *sentry.SendMessageByMinBlockRequest, opts ...grpc.CallOption) (*sentry.SentPeers, error) { +func (c *SentryClientDirect) SendMessageByMinBlock(ctx context.Context, in *sentryproto.SendMessageByMinBlockRequest, opts ...grpc.CallOption) (*sentryproto.SentPeers, error) { return c.server.SendMessageByMinBlock(ctx, in) } -func (c *SentryClientDirect) SendMessageById(ctx context.Context, in *sentry.SendMessageByIdRequest, opts ...grpc.CallOption) (*sentry.SentPeers, error) { +func (c *SentryClientDirect) SendMessageById(ctx context.Context, in *sentryproto.SendMessageByIdRequest, opts ...grpc.CallOption) (*sentryproto.SentPeers, error) { return c.server.SendMessageById(ctx, in) } -func (c *SentryClientDirect) SendMessageToRandomPeers(ctx context.Context, in *sentry.SendMessageToRandomPeersRequest, opts ...grpc.CallOption) (*sentry.SentPeers, error) { +func (c *SentryClientDirect) SendMessageToRandomPeers(ctx context.Context, in *sentryproto.SendMessageToRandomPeersRequest, opts ...grpc.CallOption) (*sentryproto.SentPeers, error) { return c.server.SendMessageToRandomPeers(ctx, in) } -func (c *SentryClientDirect) SendMessageToAll(ctx context.Context, in *sentry.OutboundMessageData, opts ...grpc.CallOption) (*sentry.SentPeers, error) { +func (c *SentryClientDirect) SendMessageToAll(ctx context.Context, in *sentryproto.OutboundMessageData, opts ...grpc.CallOption) (*sentryproto.SentPeers, error) { return c.server.SendMessageToAll(ctx, in) } -func (c *SentryClientDirect) HandShake(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*sentry.HandShakeReply, error) { +func (c *SentryClientDirect) HandShake(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*sentryproto.HandShakeReply, error) { return c.server.HandShake(ctx, in) } -func (c *SentryClientDirect) SetStatus(ctx context.Context, in *sentry.StatusData, opts ...grpc.CallOption) (*sentry.SetStatusReply, error) { +func (c *SentryClientDirect) SetStatus(ctx context.Context, in *sentryproto.StatusData, opts ...grpc.CallOption) (*sentryproto.SetStatusReply, error) { return c.server.SetStatus(ctx, in) } -func (c *SentryClientDirect) Peers(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*sentry.PeersReply, error) { +func (c *SentryClientDirect) Peers(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*sentryproto.PeersReply, error) { return c.server.Peers(ctx, in) } -func (c *SentryClientDirect) PeerCount(ctx context.Context, in *sentry.PeerCountRequest, opts ...grpc.CallOption) (*sentry.PeerCountReply, error) { +func (c *SentryClientDirect) PeerCount(ctx context.Context, in *sentryproto.PeerCountRequest, opts ...grpc.CallOption) (*sentryproto.PeerCountReply, error) { return c.server.PeerCount(ctx, in) } -func (c *SentryClientDirect) PeerById(ctx context.Context, in *sentry.PeerByIdRequest, opts ...grpc.CallOption) (*sentry.PeerByIdReply, error) { +func (c *SentryClientDirect) PeerById(ctx context.Context, in *sentryproto.PeerByIdRequest, opts ...grpc.CallOption) (*sentryproto.PeerByIdReply, error) { return c.server.PeerById(ctx, in) } // -- start Messages -func (c *SentryClientDirect) Messages(ctx context.Context, in *sentry.MessagesRequest, opts ...grpc.CallOption) (sentry.Sentry_MessagesClient, error) { - in.Ids = filterIds(in.Ids, c.Protocol()) +func (c *SentryClientDirect) Messages(ctx context.Context, in *sentryproto.MessagesRequest, opts ...grpc.CallOption) (sentryproto.Sentry_MessagesClient, error) { + in.Ids = filterIds(in.Ids, c.protocol) ch := make(chan *inboundMessageReply, 16384) streamServer := &SentryMessagesStreamS{ch: ch, ctx: ctx} go func() { @@ -257,18 +190,18 @@ func (c *SentryClientDirect) Messages(ctx context.Context, in *sentry.MessagesRe } type inboundMessageReply struct { - r *sentry.InboundMessage + r *sentryproto.InboundMessage err error } -// SentryMessagesStreamS implements proto_sentry.Sentry_ReceiveMessagesServer +// SentryMessagesStreamS implements proto_sentryproto.Sentry_ReceiveMessagesServer type SentryMessagesStreamS struct { ch chan *inboundMessageReply ctx context.Context grpc.ServerStream } -func (s *SentryMessagesStreamS) Send(m *sentry.InboundMessage) error { +func (s *SentryMessagesStreamS) Send(m *sentryproto.InboundMessage) error { s.ch <- &inboundMessageReply{r: m} return nil } @@ -288,7 +221,7 @@ type SentryMessagesStreamC struct { grpc.ClientStream } -func (c *SentryMessagesStreamC) Recv() (*sentry.InboundMessage, error) { +func (c *SentryMessagesStreamC) Recv() (*sentryproto.InboundMessage, error) { m, ok := <-c.ch if !ok || m == nil { return nil, io.EOF @@ -303,7 +236,7 @@ func (c *SentryMessagesStreamC) RecvMsg(anyMessage interface{}) error { if err != nil { return err } - outMessage := anyMessage.(*sentry.InboundMessage) + outMessage := anyMessage.(*sentryproto.InboundMessage) proto.Merge(outMessage, m) return nil } @@ -311,7 +244,7 @@ func (c *SentryMessagesStreamC) RecvMsg(anyMessage interface{}) error { // -- end Messages // -- start Peers -func (c *SentryClientDirect) PeerEvents(ctx context.Context, in *sentry.PeerEventsRequest, opts ...grpc.CallOption) (sentry.Sentry_PeerEventsClient, error) { +func (c *SentryClientDirect) PeerEvents(ctx context.Context, in *sentryproto.PeerEventsRequest, opts ...grpc.CallOption) (sentryproto.Sentry_PeerEventsClient, error) { ch := make(chan *peersReply, 16384) streamServer := &SentryPeersStreamS{ch: ch, ctx: ctx} go func() { @@ -321,23 +254,23 @@ func (c *SentryClientDirect) PeerEvents(ctx context.Context, in *sentry.PeerEven return &SentryPeersStreamC{ch: ch, ctx: ctx}, nil } -func (c *SentryClientDirect) AddPeer(ctx context.Context, in *sentry.AddPeerRequest, opts ...grpc.CallOption) (*sentry.AddPeerReply, error) { +func (c *SentryClientDirect) AddPeer(ctx context.Context, in *sentryproto.AddPeerRequest, opts ...grpc.CallOption) (*sentryproto.AddPeerReply, error) { return c.server.AddPeer(ctx, in) } type peersReply struct { - r *sentry.PeerEvent + r *sentryproto.PeerEvent err error } -// SentryPeersStreamS - implements proto_sentry.Sentry_ReceivePeersServer +// SentryPeersStreamS - implements proto_sentryproto.Sentry_ReceivePeersServer type SentryPeersStreamS struct { ch chan *peersReply ctx context.Context grpc.ServerStream } -func (s *SentryPeersStreamS) Send(m *sentry.PeerEvent) error { +func (s *SentryPeersStreamS) Send(m *sentryproto.PeerEvent) error { s.ch <- &peersReply{r: m} return nil } @@ -357,7 +290,7 @@ type SentryPeersStreamC struct { grpc.ClientStream } -func (c *SentryPeersStreamC) Recv() (*sentry.PeerEvent, error) { +func (c *SentryPeersStreamC) Recv() (*sentryproto.PeerEvent, error) { m, ok := <-c.ch if !ok || m == nil { return nil, io.EOF @@ -372,7 +305,7 @@ func (c *SentryPeersStreamC) RecvMsg(anyMessage interface{}) error { if err != nil { return err } - outMessage := anyMessage.(*sentry.PeerEvent) + outMessage := anyMessage.(*sentryproto.PeerEvent) proto.Merge(outMessage, m) return nil } @@ -383,9 +316,9 @@ func (c *SentryClientDirect) NodeInfo(ctx context.Context, in *emptypb.Empty, op return c.server.NodeInfo(ctx, in) } -func filterIds(in []sentry.MessageId, protocol uint) (filtered []sentry.MessageId) { +func filterIds(in []sentryproto.MessageId, protocol sentryproto.Protocol) (filtered []sentryproto.MessageId) { for _, id := range in { - if _, ok := ProtoIds[protocol][id]; ok { + if _, ok := libsentry.ProtoIds[protocol][id]; ok { filtered = append(filtered, id) } } diff --git a/erigon-lib/p2p/sentry/loop.go b/erigon-lib/p2p/sentry/loop.go new file mode 100644 index 00000000000..10b6df82a24 --- /dev/null +++ b/erigon-lib/p2p/sentry/loop.go @@ -0,0 +1,159 @@ +package sentry + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/erigontech/erigon-lib/common/dbg" + "github.com/erigontech/erigon-lib/gointerfaces/grpcutil" + "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" + "github.com/erigontech/erigon-lib/log/v3" + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/emptypb" +) + +type ( + MessageStreamFactory func(context.Context, sentryproto.SentryClient) (grpc.ClientStream, error) + StatusDataFactory func(context.Context) (*sentryproto.StatusData, error) + MessageFactory[T any] func() T + MessageHandler[T any] func(context.Context, T, sentryproto.SentryClient) error +) + +func ReconnectAndPumpStreamLoop[TMessage interface{}]( + ctx context.Context, + sentryClient sentryproto.SentryClient, + statusDataFactory StatusDataFactory, + streamName string, + streamFactory MessageStreamFactory, + messageFactory MessageFactory[TMessage], + handleInboundMessage MessageHandler[TMessage], + wg *sync.WaitGroup, + logger log.Logger, +) { + for ctx.Err() == nil { + if _, err := sentryClient.HandShake(ctx, &emptypb.Empty{}, grpc.WaitForReady(true)); err != nil { + if errors.Is(err, context.Canceled) { + continue + } + if grpcutil.IsRetryLater(err) || grpcutil.IsEndOfStream(err) { + time.Sleep(3 * time.Second) + continue + } + logger.Warn("HandShake error, sentry not ready yet", "stream", streamName, "err", err) + time.Sleep(time.Second) + continue + } + + statusData, err := statusDataFactory(ctx) + + if err != nil { + logger.Error("SentryReconnectAndPumpStreamLoop: statusDataFactory error", "stream", streamName, "err", err) + time.Sleep(time.Second) + continue + } + + if _, err := sentryClient.SetStatus(ctx, statusData); err != nil { + if errors.Is(err, context.Canceled) { + continue + } + if grpcutil.IsRetryLater(err) || grpcutil.IsEndOfStream(err) { + time.Sleep(3 * time.Second) + continue + } + logger.Warn("Status error, sentry not ready yet", "stream", streamName, "err", err) + time.Sleep(time.Second) + continue + } + + if err := pumpStreamLoop(ctx, sentryClient, streamName, streamFactory, messageFactory, handleInboundMessage, wg, logger); err != nil { + if errors.Is(err, context.Canceled) { + continue + } + if IsPeerNotFoundErr(err) { + continue + } + if grpcutil.IsRetryLater(err) || grpcutil.IsEndOfStream(err) { + time.Sleep(3 * time.Second) + continue + } + logger.Warn("pumpStreamLoop failure", "stream", streamName, "err", err) + continue + } + } +} + +// pumpStreamLoop is normally run in a separate go-routine. +// It only exists until there are no more messages +// to be received (end of process, or interruption, or end of test). +// wg is used only in tests to avoid using waits, which is brittle. For non-test code wg == nil. +func pumpStreamLoop[TMessage interface{}]( + ctx context.Context, + sentry sentryproto.SentryClient, + streamName string, + streamFactory MessageStreamFactory, + messageFactory MessageFactory[TMessage], + handleInboundMessage MessageHandler[TMessage], + wg *sync.WaitGroup, + logger log.Logger, +) (err error) { + defer func() { + if rec := recover(); rec != nil { + err = fmt.Errorf("%+v, trace: %s", rec, dbg.Stack()) + } + }() // avoid crash because Erigon's core does many things + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if disconnectedMarker, ok := sentry.(interface{ MarkDisconnected() }); ok { + defer disconnectedMarker.MarkDisconnected() + } + + // need to read all messages from Sentry as fast as we can, then: + // - can group them or process in batch + // - can have slow processing + reqs := make(chan TMessage, 256) + go func() { + for { + select { + case <-ctx.Done(): + return + case req := <-reqs: + if err := handleInboundMessage(ctx, req, sentry); err != nil { + logger.Debug("Handling incoming message", "stream", streamName, "err", err) + } + if wg != nil { + wg.Done() + } + } + } + }() + + stream, err := streamFactory(ctx, sentry) + if err != nil { + return err + } + + for ctx.Err() == nil { + req := messageFactory() + err := stream.RecvMsg(req) + if err != nil { + return err + } + + select { + case reqs <- req: + case <-ctx.Done(): + } + } + + return ctx.Err() +} + +func IsPeerNotFoundErr(err error) bool { + return strings.Contains(err.Error(), "peer not found") +} diff --git a/erigon-lib/p2p/sentry/protocol.go b/erigon-lib/p2p/sentry/protocol.go new file mode 100644 index 00000000000..7e1e5047279 --- /dev/null +++ b/erigon-lib/p2p/sentry/protocol.go @@ -0,0 +1,84 @@ +package sentry + +import ( + "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" +) + +func MinProtocol(m sentryproto.MessageId) sentryproto.Protocol { + for p := sentryproto.Protocol_ETH65; p <= sentryproto.Protocol_ETH68; p++ { + if ids, ok := ProtoIds[p]; ok { + if _, ok := ids[m]; ok { + return p + } + } + } + + return -1 +} + +func ProtocolVersion(p sentryproto.Protocol) uint { + return uint(p + 65) +} + +var ProtoIds = map[sentryproto.Protocol]map[sentryproto.MessageId]struct{}{ + sentryproto.Protocol_ETH65: { + sentryproto.MessageId_GET_BLOCK_HEADERS_65: struct{}{}, + sentryproto.MessageId_BLOCK_HEADERS_65: struct{}{}, + sentryproto.MessageId_GET_BLOCK_BODIES_65: struct{}{}, + sentryproto.MessageId_BLOCK_BODIES_65: struct{}{}, + sentryproto.MessageId_GET_NODE_DATA_65: struct{}{}, + sentryproto.MessageId_NODE_DATA_65: struct{}{}, + sentryproto.MessageId_GET_RECEIPTS_65: struct{}{}, + sentryproto.MessageId_RECEIPTS_65: struct{}{}, + sentryproto.MessageId_NEW_BLOCK_HASHES_65: struct{}{}, + sentryproto.MessageId_NEW_BLOCK_65: struct{}{}, + sentryproto.MessageId_TRANSACTIONS_65: struct{}{}, + sentryproto.MessageId_NEW_POOLED_TRANSACTION_HASHES_65: struct{}{}, + sentryproto.MessageId_GET_POOLED_TRANSACTIONS_65: struct{}{}, + sentryproto.MessageId_POOLED_TRANSACTIONS_65: struct{}{}, + }, + sentryproto.Protocol_ETH66: { + sentryproto.MessageId_GET_BLOCK_HEADERS_66: struct{}{}, + sentryproto.MessageId_BLOCK_HEADERS_66: struct{}{}, + sentryproto.MessageId_GET_BLOCK_BODIES_66: struct{}{}, + sentryproto.MessageId_BLOCK_BODIES_66: struct{}{}, + sentryproto.MessageId_GET_NODE_DATA_66: struct{}{}, + sentryproto.MessageId_NODE_DATA_66: struct{}{}, + sentryproto.MessageId_GET_RECEIPTS_66: struct{}{}, + sentryproto.MessageId_RECEIPTS_66: struct{}{}, + sentryproto.MessageId_NEW_BLOCK_HASHES_66: struct{}{}, + sentryproto.MessageId_NEW_BLOCK_66: struct{}{}, + sentryproto.MessageId_TRANSACTIONS_66: struct{}{}, + sentryproto.MessageId_NEW_POOLED_TRANSACTION_HASHES_66: struct{}{}, + sentryproto.MessageId_GET_POOLED_TRANSACTIONS_66: struct{}{}, + sentryproto.MessageId_POOLED_TRANSACTIONS_66: struct{}{}, + }, + sentryproto.Protocol_ETH67: { + sentryproto.MessageId_GET_BLOCK_HEADERS_66: struct{}{}, + sentryproto.MessageId_BLOCK_HEADERS_66: struct{}{}, + sentryproto.MessageId_GET_BLOCK_BODIES_66: struct{}{}, + sentryproto.MessageId_BLOCK_BODIES_66: struct{}{}, + sentryproto.MessageId_GET_RECEIPTS_66: struct{}{}, + sentryproto.MessageId_RECEIPTS_66: struct{}{}, + sentryproto.MessageId_NEW_BLOCK_HASHES_66: struct{}{}, + sentryproto.MessageId_NEW_BLOCK_66: struct{}{}, + sentryproto.MessageId_TRANSACTIONS_66: struct{}{}, + sentryproto.MessageId_NEW_POOLED_TRANSACTION_HASHES_66: struct{}{}, + sentryproto.MessageId_GET_POOLED_TRANSACTIONS_66: struct{}{}, + sentryproto.MessageId_POOLED_TRANSACTIONS_66: struct{}{}, + }, + sentryproto.Protocol_ETH68: { + sentryproto.MessageId_GET_BLOCK_HEADERS_66: struct{}{}, + sentryproto.MessageId_BLOCK_HEADERS_66: struct{}{}, + sentryproto.MessageId_GET_BLOCK_BODIES_66: struct{}{}, + sentryproto.MessageId_BLOCK_BODIES_66: struct{}{}, + sentryproto.MessageId_GET_RECEIPTS_66: struct{}{}, + sentryproto.MessageId_RECEIPTS_66: struct{}{}, + sentryproto.MessageId_NEW_BLOCK_HASHES_66: struct{}{}, + sentryproto.MessageId_NEW_BLOCK_66: struct{}{}, + sentryproto.MessageId_TRANSACTIONS_66: struct{}{}, + sentryproto.MessageId_NEW_POOLED_TRANSACTION_HASHES_68: struct{}{}, + sentryproto.MessageId_GET_POOLED_TRANSACTIONS_66: struct{}{}, + sentryproto.MessageId_POOLED_TRANSACTIONS_66: struct{}{}, + }, +} diff --git a/erigon-lib/p2p/sentry/sentrymultiplexer.go b/erigon-lib/p2p/sentry/sentrymultiplexer.go new file mode 100644 index 00000000000..13bf73c4b4b --- /dev/null +++ b/erigon-lib/p2p/sentry/sentrymultiplexer.go @@ -0,0 +1,751 @@ +package sentry + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "io" + "math/rand" + "sync" + + "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon-lib/gointerfaces" + "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" + "github.com/erigontech/erigon-lib/gointerfaces/typesproto" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/known/emptypb" +) + +var _ sentryproto.SentryClient = (*sentryMultiplexer)(nil) + +type client struct { + sentryproto.SentryClient + protocol sentryproto.Protocol +} + +type sentryMultiplexer struct { + clients []*client +} + +func NewSentryMultiplexer(clients []sentryproto.SentryClient) *sentryMultiplexer { + mux := &sentryMultiplexer{} + mux.clients = make([]*client, len(clients)) + for i, c := range clients { + mux.clients[i] = &client{c, -1} + } + return mux +} + +func (m *sentryMultiplexer) SetStatus(ctx context.Context, in *sentryproto.StatusData, opts ...grpc.CallOption) (*sentryproto.SetStatusReply, error) { + g, gctx := errgroup.WithContext(ctx) + + for _, client := range m.clients { + client := client + + if client.protocol >= 0 { + g.Go(func() error { + _, err := client.SetStatus(gctx, in, opts...) + return err + }) + } + } + + err := g.Wait() + + if err != nil { + return nil, err + } + + return &sentryproto.SetStatusReply{}, nil +} + +func (m *sentryMultiplexer) PenalizePeer(ctx context.Context, in *sentryproto.PenalizePeerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + g, gctx := errgroup.WithContext(ctx) + + for _, client := range m.clients { + client := client + + g.Go(func() error { + _, err := client.PenalizePeer(gctx, in, opts...) + return err + }) + } + + return &emptypb.Empty{}, g.Wait() +} + +func (m *sentryMultiplexer) PeerMinBlock(ctx context.Context, in *sentryproto.PeerMinBlockRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + g, gctx := errgroup.WithContext(ctx) + + for _, client := range m.clients { + client := client + + g.Go(func() error { + _, err := client.PeerMinBlock(gctx, in, opts...) + return err + }) + } + + return &emptypb.Empty{}, g.Wait() +} + +// Handshake is not performed on the multi-client level +func (m *sentryMultiplexer) HandShake(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*sentryproto.HandShakeReply, error) { + g, gctx := errgroup.WithContext(ctx) + + var protocol sentryproto.Protocol + var mu sync.Mutex + + for _, client := range m.clients { + client := client + + if client.protocol < 0 { + g.Go(func() error { + reply, err := client.HandShake(gctx, &emptypb.Empty{}, grpc.WaitForReady(true)) + + if err != nil { + return err + } + + mu.Lock() + defer mu.Unlock() + + if reply.Protocol > protocol { + protocol = reply.Protocol + } + + client.protocol = protocol + + return nil + }) + } + } + + err := g.Wait() + + if err != nil { + return nil, err + } + + return &sentryproto.HandShakeReply{Protocol: protocol}, nil +} + +func (m *sentryMultiplexer) SendMessageByMinBlock(ctx context.Context, in *sentryproto.SendMessageByMinBlockRequest, opts ...grpc.CallOption) (*sentryproto.SentPeers, error) { + var allSentPeers []*typesproto.H512 + + // run this in series as we need to keep track of peers - note + // that there is a possibility that we will generate duplicate + // sends via cliants with duplicate peers - that would require + // a refactor of the entry code + for _, client := range m.clients { + cin := &sentryproto.SendMessageByMinBlockRequest{ + Data: in.Data, + MinBlock: in.MinBlock, + MaxPeers: in.MaxPeers - uint64(len(allSentPeers)), + } + + sentPeers, err := client.SendMessageByMinBlock(ctx, cin, opts...) + + if err != nil { + return nil, err + } + + allSentPeers = append(allSentPeers, sentPeers.GetPeers()...) + + if len(allSentPeers) >= int(in.MaxPeers) { + break + } + } + + return &sentryproto.SentPeers{Peers: allSentPeers}, nil +} + +func AsPeerIdString(peerId *typesproto.H512) string { + peerHash := gointerfaces.ConvertH512ToHash(peerId) + return hex.EncodeToString(peerHash[:]) +} + +func (m *sentryMultiplexer) SendMessageById(ctx context.Context, in *sentryproto.SendMessageByIdRequest, opts ...grpc.CallOption) (*sentryproto.SentPeers, error) { + if in.Data == nil { + return nil, fmt.Errorf("no data") + } + + if in.PeerId == nil { + return nil, fmt.Errorf("no peer") + } + + minProtocol := MinProtocol(in.Data.Id) + + if minProtocol < 0 { + return nil, fmt.Errorf("unknown protocol for: %s", in.Data.Id.String()) + } + + peerReplies, err := m.peersByClient(ctx, minProtocol, opts...) + + if err != nil { + return nil, err + } + + clientIndex := -1 + peerId := AsPeerIdString(in.PeerId) + +CLIENTS: + for i, reply := range peerReplies { + for _, peer := range reply.GetPeers() { + if peer.Id == peerId { + clientIndex = i + break CLIENTS + } + } + } + + if clientIndex < 0 { + return nil, fmt.Errorf("peer not found: %s", peerId) + } + + g, gctx := errgroup.WithContext(ctx) + + var allSentPeers []*typesproto.H512 + + g.Go(func() error { + sentPeers, err := m.clients[clientIndex].SendMessageById(gctx, in, opts...) + + if err != nil { + return err + } + + allSentPeers = sentPeers.GetPeers() + + return nil + }) + + if err = g.Wait(); err != nil { + return nil, err + } + + return &sentryproto.SentPeers{Peers: allSentPeers}, nil +} + +func (m *sentryMultiplexer) SendMessageToRandomPeers(ctx context.Context, in *sentryproto.SendMessageToRandomPeersRequest, opts ...grpc.CallOption) (*sentryproto.SentPeers, error) { + if in.Data == nil { + return nil, fmt.Errorf("no data") + } + + minProtocol := MinProtocol(in.Data.Id) + + if minProtocol < 0 { + return nil, fmt.Errorf("unknown protocol for: %s", in.Data.Id.String()) + } + + peerReplies, err := m.peersByClient(ctx, minProtocol, opts...) + + if err != nil { + return nil, err + } + + type peer struct { + clientIndex int + peerId *typesproto.H512 + } + + seen := map[string]struct{}{} + var peers []*peer + + for i, reply := range peerReplies { + for _, p := range reply.GetPeers() { + if _, ok := seen[p.Id]; !ok { + peers = append(peers, &peer{ + clientIndex: i, + peerId: gointerfaces.ConvertHashToH512([64]byte(common.Hex2Bytes(p.Id))), + }) + seen[p.Id] = struct{}{} + } + } + } + + g, gctx := errgroup.WithContext(ctx) + + var allSentPeers []*typesproto.H512 + var allSentMutex sync.RWMutex + + rand.Shuffle(len(peers), func(i int, j int) { + peers[i], peers[j] = peers[j], peers[i] + }) + + if in.MaxPeers > 0 { + if in.MaxPeers < uint64(len(peers)) { + peers = peers[0:in.MaxPeers] + } + } + + for _, peer := range peers { + peer := peer + + g.Go(func() error { + sentPeers, err := m.clients[peer.clientIndex].SendMessageById(gctx, &sentryproto.SendMessageByIdRequest{ + PeerId: peer.peerId, + Data: in.Data, + }, opts...) + + if err != nil { + return err + } + + allSentMutex.Lock() + defer allSentMutex.Unlock() + + allSentPeers = append(allSentPeers, sentPeers.GetPeers()...) + + return nil + }) + } + + err = g.Wait() + + if err != nil { + return nil, err + } + + return &sentryproto.SentPeers{Peers: allSentPeers}, nil +} + +func (m *sentryMultiplexer) SendMessageToAll(ctx context.Context, in *sentryproto.OutboundMessageData, opts ...grpc.CallOption) (*sentryproto.SentPeers, error) { + minProtocol := MinProtocol(in.Id) + + if minProtocol < 0 { + return nil, fmt.Errorf("unknown protocol for: %s", in.Id.String()) + } + + peerReplies, err := m.peersByClient(ctx, minProtocol, opts...) + + if err != nil { + return nil, err + } + + type peer struct { + clientIndex int + peerId *typesproto.H512 + } + + peers := map[string]peer{} + + for i, reply := range peerReplies { + for _, p := range reply.GetPeers() { + if _, ok := peers[p.Id]; !ok { + peers[p.Id] = peer{ + clientIndex: i, + peerId: gointerfaces.ConvertHashToH512([64]byte(common.Hex2Bytes(p.Id))), + } + } + } + } + + g, gctx := errgroup.WithContext(ctx) + + var allSentPeers []*typesproto.H512 + var allSentMutex sync.RWMutex + + for _, peer := range peers { + peer := peer + + g.Go(func() error { + sentPeers, err := m.clients[peer.clientIndex].SendMessageById(gctx, &sentryproto.SendMessageByIdRequest{ + PeerId: peer.peerId, + Data: &sentryproto.OutboundMessageData{ + Id: in.Id, + Data: in.Data, + }}, opts...) + + if err != nil { + return err + } + + allSentMutex.Lock() + defer allSentMutex.Unlock() + + allSentPeers = append(allSentPeers, sentPeers.GetPeers()...) + + return nil + }) + } + + err = g.Wait() + + if err != nil { + return nil, err + } + + return &sentryproto.SentPeers{Peers: allSentPeers}, nil +} + +type StreamReply[T protoreflect.ProtoMessage] struct { + R T + Err error +} + +// SentryMessagesStreamS implements proto_sentry.Sentry_ReceiveMessagesServer +type SentryStreamS[T protoreflect.ProtoMessage] struct { + Ch chan StreamReply[T] + Ctx context.Context + grpc.ServerStream +} + +func (s *SentryStreamS[T]) Send(m T) error { + s.Ch <- StreamReply[T]{R: m} + return nil +} + +func (s *SentryStreamS[T]) Context() context.Context { return s.Ctx } + +func (s *SentryStreamS[T]) Err(err error) { + if err == nil { + return + } + s.Ch <- StreamReply[T]{Err: err} +} + +func (s *SentryStreamS[T]) Close() { + if s.Ch != nil { + ch := s.Ch + s.Ch = nil + close(ch) + } +} + +type SentryStreamC[T protoreflect.ProtoMessage] struct { + Ch chan StreamReply[T] + Ctx context.Context + grpc.ClientStream +} + +func (c *SentryStreamC[T]) Recv() (T, error) { + m, ok := <-c.Ch + if !ok { + var t T + return t, io.EOF + } + return m.R, m.Err +} + +func (c *SentryStreamC[T]) Context() context.Context { return c.Ctx } + +func (c *SentryStreamC[T]) RecvMsg(anyMessage interface{}) error { + m, err := c.Recv() + if err != nil { + return err + } + outMessage := anyMessage.(T) + proto.Merge(outMessage, m) + return nil +} + +func (m *sentryMultiplexer) Messages(ctx context.Context, in *sentryproto.MessagesRequest, opts ...grpc.CallOption) (sentryproto.Sentry_MessagesClient, error) { + g, gctx := errgroup.WithContext(ctx) + + ch := make(chan StreamReply[*sentryproto.InboundMessage], 16384) + streamServer := &SentryStreamS[*sentryproto.InboundMessage]{Ch: ch, Ctx: ctx} + + go func() { + defer close(ch) + + for _, client := range m.clients { + client := client + + g.Go(func() error { + messages, err := client.Messages(gctx, in, opts...) + + if err != nil { + streamServer.Err(err) + return err + } + + for { + inboundMessage, err := messages.Recv() + + if err != nil { + if errors.Is(err, io.EOF) { + return nil + } + + streamServer.Err(err) + + select { + case <-gctx.Done(): + return gctx.Err() + default: + } + + return fmt.Errorf("recv: %w", err) + } + + streamServer.Send(inboundMessage) + } + }) + } + + g.Wait() + }() + + return &SentryStreamC[*sentryproto.InboundMessage]{Ch: ch, Ctx: ctx}, nil +} + +func (m *sentryMultiplexer) peersByClient(ctx context.Context, minProtocol sentryproto.Protocol, opts ...grpc.CallOption) ([]*sentryproto.PeersReply, error) { + g, gctx := errgroup.WithContext(ctx) + + var allReplies []*sentryproto.PeersReply = make([]*sentryproto.PeersReply, len(m.clients)) + var allMutex sync.RWMutex + + for i, client := range m.clients { + i := i + client := client + + if client.protocol < minProtocol { + continue + } + + g.Go(func() error { + sentPeers, err := client.Peers(gctx, &emptypb.Empty{}, opts...) + + if err != nil { + return err + } + + allMutex.Lock() + defer allMutex.Unlock() + + allReplies[i] = sentPeers + + return nil + }) + } + + err := g.Wait() + + if err != nil { + return nil, err + } + + return allReplies, nil +} + +func (m *sentryMultiplexer) Peers(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*sentryproto.PeersReply, error) { + var allPeers []*typesproto.PeerInfo + + allReplies, err := m.peersByClient(ctx, -1, opts...) + + if err != nil { + return nil, err + } + + for _, sentPeers := range allReplies { + allPeers = append(allPeers, sentPeers.GetPeers()...) + } + + return &sentryproto.PeersReply{Peers: allPeers}, nil +} + +func (m *sentryMultiplexer) PeerCount(ctx context.Context, in *sentryproto.PeerCountRequest, opts ...grpc.CallOption) (*sentryproto.PeerCountReply, error) { + g, gctx := errgroup.WithContext(ctx) + + var allCount uint64 + var allMutex sync.RWMutex + + for _, client := range m.clients { + client := client + + g.Go(func() error { + peerCount, err := client.PeerCount(gctx, in, opts...) + + if err != nil { + return err + } + + allMutex.Lock() + defer allMutex.Unlock() + + allCount += peerCount.GetCount() + + return nil + }) + } + + err := g.Wait() + + if err != nil { + return nil, err + } + + return &sentryproto.PeerCountReply{Count: allCount}, nil +} + +var errFound = fmt.Errorf("found peer") + +func (m *sentryMultiplexer) PeerById(ctx context.Context, in *sentryproto.PeerByIdRequest, opts ...grpc.CallOption) (*sentryproto.PeerByIdReply, error) { + g, gctx := errgroup.WithContext(ctx) + + var peer *typesproto.PeerInfo + var peerMutex sync.RWMutex + + for _, client := range m.clients { + client := client + + g.Go(func() error { + reply, err := client.PeerById(gctx, in, opts...) + + if err != nil { + return err + } + + peerMutex.Lock() + defer peerMutex.Unlock() + + if peer == nil && reply.GetPeer() != nil { + peer = reply.GetPeer() + // return a success error here to have the + // group stop other concurrent requests + return errFound + } + + return nil + }) + } + + err := g.Wait() + + if err != nil && !errors.Is(errFound, err) { + return nil, err + } + + return &sentryproto.PeerByIdReply{Peer: peer}, nil +} + +func (m *sentryMultiplexer) PeerEvents(ctx context.Context, in *sentryproto.PeerEventsRequest, opts ...grpc.CallOption) (sentryproto.Sentry_PeerEventsClient, error) { + g, gctx := errgroup.WithContext(ctx) + + ch := make(chan StreamReply[*sentryproto.PeerEvent], 16384) + streamServer := &SentryStreamS[*sentryproto.PeerEvent]{Ch: ch, Ctx: ctx} + + go func() { + defer close(ch) + + for _, client := range m.clients { + client := client + + g.Go(func() error { + messages, err := client.PeerEvents(gctx, in, opts...) + + if err != nil { + streamServer.Err(err) + return err + } + + for { + inboundMessage, err := messages.Recv() + + if err != nil { + if errors.Is(err, io.EOF) { + return nil + } + + streamServer.Err(err) + + select { + case <-gctx.Done(): + return gctx.Err() + default: + } + + return fmt.Errorf("recv: %w", err) + } + + streamServer.Send(inboundMessage) + } + }) + } + + g.Wait() + }() + + return &SentryStreamC[*sentryproto.PeerEvent]{Ch: ch, Ctx: ctx}, nil +} + +func (m *sentryMultiplexer) AddPeer(ctx context.Context, in *sentryproto.AddPeerRequest, opts ...grpc.CallOption) (*sentryproto.AddPeerReply, error) { + g, gctx := errgroup.WithContext(ctx) + + var success bool + var successMutex sync.RWMutex + + for _, client := range m.clients { + client := client + + g.Go(func() error { + result, err := client.AddPeer(gctx, in, opts...) + + if err != nil { + return err + } + + successMutex.Lock() + defer successMutex.Unlock() + + // if any client returns success return success + if !success && result.GetSuccess() { + success = true + } + + return nil + }) + } + + err := g.Wait() + + if err != nil { + return nil, err + } + + return &sentryproto.AddPeerReply{Success: success}, nil +} + +func (m *sentryMultiplexer) NodeInfo(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*typesproto.NodeInfoReply, error) { + return nil, status.Errorf(codes.Unimplemented, `method "NodeInfo" not implemented: use "NodeInfos" instead`) +} + +func (m *sentryMultiplexer) NodeInfos(ctx context.Context, opts ...grpc.CallOption) ([]*typesproto.NodeInfoReply, error) { + g, gctx := errgroup.WithContext(ctx) + + var allInfos []*typesproto.NodeInfoReply + var allMutex sync.RWMutex + + for _, client := range m.clients { + client := client + + g.Go(func() error { + info, err := client.NodeInfo(gctx, &emptypb.Empty{}, opts...) + + if err != nil { + return err + } + + allMutex.Lock() + defer allMutex.Unlock() + + allInfos = append(allInfos, info) + + return nil + }) + } + + err := g.Wait() + + if err != nil { + return nil, err + } + + return allInfos, nil +} diff --git a/erigon-lib/p2p/sentry/util.go b/erigon-lib/p2p/sentry/util.go new file mode 100644 index 00000000000..1f3fa8f939b --- /dev/null +++ b/erigon-lib/p2p/sentry/util.go @@ -0,0 +1,62 @@ +package sentry + +import ( + "context" + "strconv" + "strings" + + "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" + "github.com/erigontech/erigon-lib/gointerfaces/typesproto" + "google.golang.org/protobuf/types/known/emptypb" +) + +func PeerProtocols(sentry sentryproto.SentryClient, peer *typesproto.H512) []byte { + if reply, err := sentry.PeerById(context.Background(), &sentryproto.PeerByIdRequest{PeerId: peer}); err == nil { + info := reply.GetPeer() + var protocols []byte + + if info != nil { + for _, cap := range info.Caps { + parts := strings.Split(cap, "/") + if len(parts) > 1 && strings.EqualFold(parts[0], "ETH") { + if version, err := strconv.Atoi(parts[1]); err == nil { + protocols = append(protocols, byte(version)) + } + } + } + + return protocols + } + } + + return nil +} + +func Protocols(sentry sentryproto.SentryClient) []byte { + switch sentry := sentry.(type) { + case interface{ Protocol() uint }: + return []byte{byte(sentry.Protocol())} + default: + if infos, err := sentry.Peers(context.Background(), &emptypb.Empty{}); err == nil { + var protocols []byte + var seen map[byte]struct{} = map[byte]struct{}{} + for _, info := range infos.GetPeers() { + for _, cap := range info.Caps { + parts := strings.Split(cap, "/") + if len(parts) > 1 && strings.EqualFold(parts[0], "ETH") { + if version, err := strconv.Atoi(parts[1]); err == nil { + p := byte(version) + if _, ok := seen[p]; !ok { + protocols = append(protocols, p) + seen[p] = struct{}{} + } + } + } + } + } + + return protocols + } + } + return nil +} diff --git a/erigon-lib/p2p/sentry/util_test.go b/erigon-lib/p2p/sentry/util_test.go new file mode 100644 index 00000000000..7a6062a59ee --- /dev/null +++ b/erigon-lib/p2p/sentry/util_test.go @@ -0,0 +1,99 @@ +package sentry_test + +import ( + "context" + "testing" + + "github.com/erigontech/erigon-lib/direct" + "github.com/erigontech/erigon-lib/gointerfaces" + "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" + "github.com/erigontech/erigon-lib/gointerfaces/typesproto" + sentry "github.com/erigontech/erigon-lib/p2p/sentry" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/emptypb" +) + +func newClient(ctrl *gomock.Controller, peerId *typesproto.H512, caps []string) *direct.MockSentryClient { + client := direct.NewMockSentryClient(ctrl) + client.EXPECT().PeerById(gomock.Any(), gomock.Any(), gomock.Any()). + Return(&sentryproto.PeerByIdReply{ + Peer: &typesproto.PeerInfo{ + Id: peerId.String(), + Caps: caps, + }, + }, nil).AnyTimes() + + client.EXPECT().Peers(gomock.Any(), gomock.Any(), gomock.Any()). + Return(&sentryproto.PeersReply{ + Peers: []*typesproto.PeerInfo{{ + Id: peerId.String(), + Caps: caps, + }}, + }, nil).AnyTimes() + + return client +} + +type sentryClient struct { + sentryproto.SentryClient + mock *direct.MockSentryClient +} + +func (c *sentryClient) Peers(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*sentryproto.PeersReply, error) { + return c.mock.Peers(ctx, in, opts...) +} + +func TestProtocols(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + direct := newClient(ctrl, gointerfaces.ConvertHashToH512([64]byte{0}), []string{"eth/67"}) + direct.EXPECT().Protocol().Return(67) + + p := sentry.Protocols(direct) + + require.Len(t, p, 1) + require.Equal(t, byte(67), p[0]) + + base := &sentryClient{ + mock: newClient(ctrl, gointerfaces.ConvertHashToH512([64]byte{1}), []string{"eth/68"}), + } + + p = sentry.Protocols(base) + + require.Len(t, p, 1) + require.Equal(t, byte(68), p[0]) + + mux := sentry.NewSentryMultiplexer([]sentryproto.SentryClient{direct, base}) + require.NotNil(t, mux) + + p = sentry.Protocols(mux) + + require.Len(t, p, 2) + require.Contains(t, p, byte(67)) + require.Contains(t, p, byte(68)) +} + +func TestProtocolsByPeerId(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + peerId := gointerfaces.ConvertHashToH512([64]byte{}) + + direct := newClient(ctrl, peerId, []string{"eth/67"}) + + p := sentry.PeerProtocols(direct, peerId) + + require.Len(t, p, 1) + require.Equal(t, byte(67), p[0]) + + mux := sentry.NewSentryMultiplexer([]sentryproto.SentryClient{direct}) + require.NotNil(t, mux) + + p = sentry.PeerProtocols(mux, peerId) + + require.Len(t, p, 1) + require.Equal(t, byte(67), p[0]) +} diff --git a/erigon-lib/txpool/fetch.go b/erigon-lib/txpool/fetch.go index d272f0975ec..f7879e5d8a9 100644 --- a/erigon-lib/txpool/fetch.go +++ b/erigon-lib/txpool/fetch.go @@ -28,7 +28,6 @@ import ( "google.golang.org/protobuf/types/known/emptypb" "github.com/erigontech/erigon-lib/common/dbg" - "github.com/erigontech/erigon-lib/direct" "github.com/erigontech/erigon-lib/gointerfaces/grpcutil" remote "github.com/erigontech/erigon-lib/gointerfaces/remoteproto" sentry "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" @@ -51,7 +50,7 @@ type Fetch struct { wg *sync.WaitGroup // used for synchronisation in the tests (nil when not in tests) stateChangesParseCtx *types2.TxParseContext pooledTxsParseCtx *types2.TxParseContext - sentryClients []direct.SentryClient // sentry clients that will be used for accessing the network + sentryClients []sentry.SentryClient // sentry clients that will be used for accessing the network stateChangesParseCtxLock sync.Mutex pooledTxsParseCtxLock sync.Mutex logger log.Logger @@ -64,7 +63,7 @@ type StateChangesClient interface { // NewFetch creates a new fetch object that will work with given sentry clients. Since the // SentryClient here is an interface, it is suitable for mocking in tests (mock will need // to implement all the functions of the SentryClient interface). -func NewFetch(ctx context.Context, sentryClients []direct.SentryClient, pool Pool, stateChangesClient StateChangesClient, coreDB kv.RoDB, db kv.RwDB, +func NewFetch(ctx context.Context, sentryClients []sentry.SentryClient, pool Pool, stateChangesClient StateChangesClient, coreDB kv.RoDB, db kv.RwDB, chainID uint256.Int, logger log.Logger) *Fetch { f := &Fetch{ ctx: ctx, diff --git a/erigon-lib/txpool/fetch_test.go b/erigon-lib/txpool/fetch_test.go index b2c90b97627..a33308e56ef 100644 --- a/erigon-lib/txpool/fetch_test.go +++ b/erigon-lib/txpool/fetch_test.go @@ -33,7 +33,8 @@ import ( "github.com/erigontech/erigon-lib/direct" "github.com/erigontech/erigon-lib/gointerfaces" remote "github.com/erigontech/erigon-lib/gointerfaces/remoteproto" - sentry "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" + "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" + "github.com/erigontech/erigon-lib/gointerfaces/typesproto" "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/kv/memdb" "github.com/erigontech/erigon-lib/log/v3" @@ -46,13 +47,13 @@ func TestFetch(t *testing.T) { ctrl := gomock.NewController(t) remoteKvClient := remote.NewMockKVClient(ctrl) - sentryServer := sentry.NewMockSentryServer(ctrl) + sentryServer := sentryproto.NewMockSentryServer(ctrl) pool := NewMockPool(ctrl) pool.EXPECT().Started().Return(true) m := NewMockSentry(ctx, sentryServer) sentryClient := direct.NewSentryClientDirect(direct.ETH66, m) - fetch := NewFetch(ctx, []direct.SentryClient{sentryClient}, pool, remoteKvClient, nil, nil, *u256.N1, log.New()) + fetch := NewFetch(ctx, []sentryproto.SentryClient{sentryClient}, pool, remoteKvClient, nil, nil, *u256.N1, log.New()) var wg sync.WaitGroup fetch.SetWaitGroup(&wg) m.StreamWg.Add(2) @@ -60,8 +61,8 @@ func TestFetch(t *testing.T) { m.StreamWg.Wait() // Send one transaction id wg.Add(1) - errs := m.Send(&sentry.InboundMessage{ - Id: sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66, + errs := m.Send(&sentryproto.InboundMessage{ + Id: sentryproto.MessageId_NEW_POOLED_TRANSACTION_HASHES_66, Data: decodeHex("e1a0595e27a835cd79729ff1eeacec3120eeb6ed1464a04ec727aaca734ead961328"), PeerId: peerID, }) @@ -78,50 +79,60 @@ func TestSendTxPropagate(t *testing.T) { defer cancelFn() t.Run("few remote byHash", func(t *testing.T) { ctrl := gomock.NewController(t) - sentryServer := sentry.NewMockSentryServer(ctrl) + sentryServer := sentryproto.NewMockSentryServer(ctrl) times := 2 - requests := make([]*sentry.SendMessageToRandomPeersRequest, 0, times) + requests := make([]*sentryproto.SendMessageToRandomPeersRequest, 0, times) sentryServer.EXPECT(). SendMessageToRandomPeers(gomock.Any(), gomock.Any()). - DoAndReturn(func(_ context.Context, r *sentry.SendMessageToRandomPeersRequest) (*sentry.SentPeers, error) { + DoAndReturn(func(_ context.Context, r *sentryproto.SendMessageToRandomPeersRequest) (*sentryproto.SentPeers, error) { requests = append(requests, r) return nil, nil }). Times(times) + sentryServer.EXPECT().PeerById(gomock.Any(), gomock.Any()). + DoAndReturn( + func(_ context.Context, r *sentryproto.PeerByIdRequest) (*sentryproto.PeerByIdReply, error) { + return &sentryproto.PeerByIdReply{ + Peer: &typesproto.PeerInfo{ + Id: r.PeerId.String(), + Caps: []string{"eth/68"}, + }}, nil + }).AnyTimes() + m := NewMockSentry(ctx, sentryServer) - send := NewSend(ctx, []direct.SentryClient{direct.NewSentryClientDirect(direct.ETH68, m)}, nil, log.New()) + send := NewSend(ctx, []sentryproto.SentryClient{direct.NewSentryClientDirect(direct.ETH68, m)}, nil, log.New()) send.BroadcastPooledTxs(testRlps(2), 100) send.AnnouncePooledTxs([]byte{0, 1}, []uint32{10, 15}, toHashes(1, 42), 100) require.Equal(t, 2, len(requests)) txsMessage := requests[0].Data - assert.Equal(t, sentry.MessageId_TRANSACTIONS_66, txsMessage.Id) + assert.Equal(t, sentryproto.MessageId_TRANSACTIONS_66, txsMessage.Id) assert.Equal(t, 3, len(txsMessage.Data)) txnHashesMessage := requests[1].Data - assert.Equal(t, sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_68, txnHashesMessage.Id) + assert.Equal(t, sentryproto.MessageId_NEW_POOLED_TRANSACTION_HASHES_68, txnHashesMessage.Id) assert.Equal(t, 76, len(txnHashesMessage.Data)) }) t.Run("much remote byHash", func(t *testing.T) { ctrl := gomock.NewController(t) - sentryServer := sentry.NewMockSentryServer(ctrl) + sentryServer := sentryproto.NewMockSentryServer(ctrl) times := 2 - requests := make([]*sentry.SendMessageToRandomPeersRequest, 0, times) + requests := make([]*sentryproto.SendMessageToRandomPeersRequest, 0, times) sentryServer.EXPECT(). SendMessageToRandomPeers(gomock.Any(), gomock.Any()). - DoAndReturn(func(_ context.Context, r *sentry.SendMessageToRandomPeersRequest) (*sentry.SentPeers, error) { + DoAndReturn(func(_ context.Context, r *sentryproto.SendMessageToRandomPeersRequest) (*sentryproto.SentPeers, error) { requests = append(requests, r) return nil, nil }). Times(times) m := NewMockSentry(ctx, sentryServer) - send := NewSend(ctx, []direct.SentryClient{direct.NewSentryClientDirect(direct.ETH68, m)}, nil, log.New()) + send := NewSend(ctx, []sentryproto.SentryClient{direct.NewSentryClientDirect(direct.ETH68, m)}, nil, log.New()) list := make(erigonlibtypes.Hashes, p2pTxPacketLimit*3) for i := 0; i < len(list); i += 32 { b := []byte(fmt.Sprintf("%x", i)) @@ -133,67 +144,77 @@ func TestSendTxPropagate(t *testing.T) { require.Equal(t, 2, len(requests)) txsMessage := requests[0].Data - require.Equal(t, sentry.MessageId_TRANSACTIONS_66, txsMessage.Id) + require.Equal(t, sentryproto.MessageId_TRANSACTIONS_66, txsMessage.Id) require.True(t, len(txsMessage.Data) > 0) txnHashesMessage := requests[1].Data - require.Equal(t, sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_68, txnHashesMessage.Id) + require.Equal(t, sentryproto.MessageId_NEW_POOLED_TRANSACTION_HASHES_68, txnHashesMessage.Id) require.True(t, len(txnHashesMessage.Data) > 0) }) t.Run("few local byHash", func(t *testing.T) { ctrl := gomock.NewController(t) - sentryServer := sentry.NewMockSentryServer(ctrl) + sentryServer := sentryproto.NewMockSentryServer(ctrl) times := 2 - requests := make([]*sentry.SendMessageToRandomPeersRequest, 0, times) + requests := make([]*sentryproto.SendMessageToRandomPeersRequest, 0, times) sentryServer.EXPECT(). SendMessageToRandomPeers(gomock.Any(), gomock.Any()). - DoAndReturn(func(_ context.Context, r *sentry.SendMessageToRandomPeersRequest) (*sentry.SentPeers, error) { + DoAndReturn(func(_ context.Context, r *sentryproto.SendMessageToRandomPeersRequest) (*sentryproto.SentPeers, error) { requests = append(requests, r) return nil, nil }). Times(times) m := NewMockSentry(ctx, sentryServer) - send := NewSend(ctx, []direct.SentryClient{direct.NewSentryClientDirect(direct.ETH68, m)}, nil, log.New()) + send := NewSend(ctx, []sentryproto.SentryClient{direct.NewSentryClientDirect(direct.ETH68, m)}, nil, log.New()) send.BroadcastPooledTxs(testRlps(2), 100) send.AnnouncePooledTxs([]byte{0, 1}, []uint32{10, 15}, toHashes(1, 42), 100) require.Equal(t, 2, len(requests)) txsMessage := requests[0].Data - assert.Equal(t, sentry.MessageId_TRANSACTIONS_66, txsMessage.Id) + assert.Equal(t, sentryproto.MessageId_TRANSACTIONS_66, txsMessage.Id) assert.True(t, len(txsMessage.Data) > 0) txnHashesMessage := requests[1].Data - assert.Equal(t, sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_68, txnHashesMessage.Id) + assert.Equal(t, sentryproto.MessageId_NEW_POOLED_TRANSACTION_HASHES_68, txnHashesMessage.Id) assert.Equal(t, 76, len(txnHashesMessage.Data)) }) t.Run("sync with new peer", func(t *testing.T) { ctrl := gomock.NewController(t) - sentryServer := sentry.NewMockSentryServer(ctrl) + sentryServer := sentryproto.NewMockSentryServer(ctrl) times := 3 - requests := make([]*sentry.SendMessageByIdRequest, 0, times) + requests := make([]*sentryproto.SendMessageByIdRequest, 0, times) sentryServer.EXPECT(). SendMessageById(gomock.Any(), gomock.Any()). - DoAndReturn(func(_ context.Context, r *sentry.SendMessageByIdRequest) (*sentry.SentPeers, error) { + DoAndReturn(func(_ context.Context, r *sentryproto.SendMessageByIdRequest) (*sentryproto.SentPeers, error) { requests = append(requests, r) return nil, nil }). Times(times) + sentryServer.EXPECT().PeerById(gomock.Any(), gomock.Any()). + DoAndReturn( + func(_ context.Context, r *sentryproto.PeerByIdRequest) (*sentryproto.PeerByIdReply, error) { + return &sentryproto.PeerByIdReply{ + Peer: &typesproto.PeerInfo{ + Id: r.PeerId.String(), + Caps: []string{"eth/68"}, + }}, nil + }).AnyTimes() + m := NewMockSentry(ctx, sentryServer) - send := NewSend(ctx, []direct.SentryClient{direct.NewSentryClientDirect(direct.ETH68, m)}, nil, log.New()) + send := NewSend(ctx, []sentryproto.SentryClient{direct.NewSentryClientDirect(direct.ETH68, m)}, nil, log.New()) expectPeers := toPeerIDs(1, 2, 42) send.PropagatePooledTxsToPeersList(expectPeers, []byte{0, 1}, []uint32{10, 15}, toHashes(1, 42)) require.Equal(t, 3, len(requests)) for i, req := range requests { assert.Equal(t, expectPeers[i], erigonlibtypes.PeerID(req.PeerId)) - assert.Equal(t, sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_68, req.Data.Id) + assert.Equal(t, sentryproto.MessageId_NEW_POOLED_TRANSACTION_HASHES_68, req.Data.Id) assert.True(t, len(req.Data.Data) > 0) } }) diff --git a/erigon-lib/txpool/send.go b/erigon-lib/txpool/send.go index 61ef47fca09..ab2d1bc2c1b 100644 --- a/erigon-lib/txpool/send.go +++ b/erigon-lib/txpool/send.go @@ -19,33 +19,30 @@ package txpool import ( "context" "fmt" + "math/rand" + "slices" "sync" "google.golang.org/grpc" - "github.com/erigontech/erigon-lib/direct" - sentry "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" + "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" "github.com/erigontech/erigon-lib/log/v3" + "github.com/erigontech/erigon-lib/p2p/sentry" "github.com/erigontech/erigon-lib/rlp" types2 "github.com/erigontech/erigon-lib/types" ) -type SentryClient interface { - sentry.SentryClient - Protocol() uint -} - // Send - does send concrete P2P messages to Sentry. Same as Fetch but for outbound traffic // does not initiate any messages by self type Send struct { ctx context.Context pool Pool wg *sync.WaitGroup - sentryClients []direct.SentryClient // sentry clients that will be used for accessing the network + sentryClients []sentryproto.SentryClient // sentry clients that will be used for accessing the network logger log.Logger } -func NewSend(ctx context.Context, sentryClients []direct.SentryClient, pool Pool, logger log.Logger) *Send { +func NewSend(ctx context.Context, sentryClients []sentryproto.SentryClient, pool Pool, logger log.Logger) *Send { return &Send{ ctx: ctx, pool: pool, @@ -84,15 +81,15 @@ func (f *Send) BroadcastPooledTxs(rlps [][]byte, maxPeers uint64) (txSentTo []in // send them all at once. Then wait till end of array or this threshold hits again if i == l-1 || size >= p2pTxPacketLimit { txsData := types2.EncodeTransactions(rlps[prev:i+1], nil) - var txs66 *sentry.SendMessageToRandomPeersRequest + var txs66 *sentryproto.SendMessageToRandomPeersRequest for _, sentryClient := range f.sentryClients { - if !sentryClient.Ready() { + if ready, ok := sentryClient.(interface{ Ready() bool }); ok && !ready.Ready() { continue } if txs66 == nil { - txs66 = &sentry.SendMessageToRandomPeersRequest{ - Data: &sentry.OutboundMessageData{ - Id: sentry.MessageId_TRANSACTIONS_66, + txs66 = &sentryproto.SendMessageToRandomPeersRequest{ + Data: &sentryproto.OutboundMessageData{ + Id: sentryproto.MessageId_TRANSACTIONS_66, Data: txsData, }, MaxPeers: maxPeers, @@ -144,15 +141,28 @@ func (f *Send) AnnouncePooledTxs(types []byte, sizes []uint32, hashes types2.Has panic(fmt.Sprintf("Serialised announcements encoding len mismatch, expected %d, got %d", jSize, s)) } for _, sentryClient := range f.sentryClients { - if !sentryClient.Ready() { + if ready, ok := sentryClient.(interface{ Ready() bool }); ok && !ready.Ready() { + continue + } + + protocols := sentry.Protocols(sentryClient) + + if len(protocols) == 0 { continue } - switch sentryClient.Protocol() { - case direct.ETH66, direct.ETH67: + + var protocolIndex int + + if len(protocols) > 1 { + protocolIndex = rand.Intn(len(protocols) - 1) + } + + switch protocols[protocolIndex] { + case 66, 67: if i > prevI { - req := &sentry.SendMessageToRandomPeersRequest{ - Data: &sentry.OutboundMessageData{ - Id: sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66, + req := &sentryproto.SendMessageToRandomPeersRequest{ + Data: &sentryproto.OutboundMessageData{ + Id: sentryproto.MessageId_NEW_POOLED_TRANSACTION_HASHES_66, Data: iData, }, MaxPeers: maxPeers, @@ -167,12 +177,11 @@ func (f *Send) AnnouncePooledTxs(types []byte, sizes []uint32, hashes types2.Has } } } - case direct.ETH68: - + case 68: if j > prevJ { - req := &sentry.SendMessageToRandomPeersRequest{ - Data: &sentry.OutboundMessageData{ - Id: sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_68, + req := &sentryproto.SendMessageToRandomPeersRequest{ + Data: &sentryproto.OutboundMessageData{ + Id: sentryproto.MessageId_NEW_POOLED_TRANSACTION_HASHES_68, Data: jData, }, MaxPeers: maxPeers, @@ -227,40 +236,43 @@ func (f *Send) PropagatePooledTxsToPeersList(peers []types2.PeerID, types []byte } for _, sentryClient := range f.sentryClients { - if !sentryClient.Ready() { + if ready, ok := sentryClient.(interface{ Ready() bool }); ok && !ready.Ready() { continue } for _, peer := range peers { - switch sentryClient.Protocol() { - case direct.ETH66, direct.ETH67: - if i > prevI { - req := &sentry.SendMessageByIdRequest{ - PeerId: peer, - Data: &sentry.OutboundMessageData{ - Id: sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66, - Data: iData, - }, - } - if _, err := sentryClient.SendMessageById(f.ctx, req, &grpc.EmptyCallOption{}); err != nil { - f.logger.Debug("[txpool.send] PropagatePooledTxsToPeersList", "err", err) + protocols := sentry.PeerProtocols(sentryClient, peer) + if len(protocols) > 0 { + switch slices.Max(protocols) { + case 66, 67: + if i > prevI { + req := &sentryproto.SendMessageByIdRequest{ + PeerId: peer, + Data: &sentryproto.OutboundMessageData{ + Id: sentryproto.MessageId_NEW_POOLED_TRANSACTION_HASHES_66, + Data: iData, + }, + } + if _, err := sentryClient.SendMessageById(f.ctx, req, &grpc.EmptyCallOption{}); err != nil { + f.logger.Debug("[txpool.send] PropagatePooledTxsToPeersList", "err", err) + } } - } - case direct.ETH68: + case 68: - if j > prevJ { - req := &sentry.SendMessageByIdRequest{ - PeerId: peer, - Data: &sentry.OutboundMessageData{ - Id: sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_68, - Data: jData, - }, - } - if _, err := sentryClient.SendMessageById(f.ctx, req, &grpc.EmptyCallOption{}); err != nil { - f.logger.Debug("[txpool.send] PropagatePooledTxsToPeersList68", "err", err) + if j > prevJ { + req := &sentryproto.SendMessageByIdRequest{ + PeerId: peer, + Data: &sentryproto.OutboundMessageData{ + Id: sentryproto.MessageId_NEW_POOLED_TRANSACTION_HASHES_68, + Data: jData, + }, + } + if _, err := sentryClient.SendMessageById(f.ctx, req, &grpc.EmptyCallOption{}); err != nil { + f.logger.Debug("[txpool.send] PropagatePooledTxsToPeersList68", "err", err) + } } - } + } } } } diff --git a/erigon-lib/txpool/txpoolutil/all_components.go b/erigon-lib/txpool/txpoolutil/all_components.go index 22bfddb43a8..8df37ad7af7 100644 --- a/erigon-lib/txpool/txpoolutil/all_components.go +++ b/erigon-lib/txpool/txpoolutil/all_components.go @@ -26,7 +26,7 @@ import ( "github.com/holiman/uint256" "github.com/erigontech/erigon-lib/chain" - "github.com/erigontech/erigon-lib/direct" + "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/kv/kvcache" "github.com/erigontech/erigon-lib/kv/mdbx" @@ -101,7 +101,7 @@ func SaveChainConfigIfNeed(ctx context.Context, coreDB kv.RoDB, txPoolDB kv.RwDB } func AllComponents(ctx context.Context, cfg txpoolcfg.Config, cache kvcache.Cache, newTxs chan types.Announcements, chainDB kv.RoDB, - sentryClients []direct.SentryClient, stateChangesClient txpool.StateChangesClient, feeCalculator txpool.FeeCalculator, logger log.Logger) (kv.RwDB, *txpool.TxPool, *txpool.Fetch, *txpool.Send, *txpool.GrpcServer, error) { + sentryClients []sentryproto.SentryClient, stateChangesClient txpool.StateChangesClient, feeCalculator txpool.FeeCalculator, logger log.Logger) (kv.RwDB, *txpool.TxPool, *txpool.Fetch, *txpool.Send, *txpool.GrpcServer, error) { opts := mdbx.NewMDBX(logger).Label(kv.TxPoolDB).Path(cfg.DBDir). WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg { return kv.TxpoolTablesCfg }). WriteMergeThreshold(3 * 8192). diff --git a/eth/backend.go b/eth/backend.go index 199e825baf8..e52358bff4f 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -75,6 +75,7 @@ import ( "github.com/erigontech/erigon-lib/kv/remotedbserver" "github.com/erigontech/erigon-lib/kv/temporal" "github.com/erigontech/erigon-lib/log/v3" + libsentry "github.com/erigontech/erigon-lib/p2p/sentry" libstate "github.com/erigontech/erigon-lib/state" "github.com/erigontech/erigon-lib/txpool" "github.com/erigontech/erigon-lib/txpool/txpoolcfg" @@ -375,7 +376,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger } p2pConfig := stack.Config().P2P - var sentries []direct.SentryClient + var sentries []protosentry.SentryClient if len(p2pConfig.SentryAddr) > 0 { for _, addr := range p2pConfig.SentryAddr { sentryClient, err := sentry_multi_client.GrpcClient(backend.sentryCtx, addr) @@ -675,7 +676,8 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger config.Sync, stagedsync.MiningStages(backend.sentryCtx, stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miner, *backend.chainConfig, backend.engine, backend.txPoolDB, nil, tmpdir, backend.blockReader), - stagedsync.StageBorHeimdallCfg(backend.chainDB, snapDb, miner, *backend.chainConfig, heimdallClient, backend.blockReader, nil, nil, recents, signatures, false, nil), stagedsync.StageExecuteBlocksCfg( + stagedsync.StageBorHeimdallCfg(backend.chainDB, snapDb, miner, *backend.chainConfig, heimdallClient, backend.blockReader, nil, nil, recents, signatures, false, nil), + stagedsync.StageExecuteBlocksCfg( backend.chainDB, config.Prune, config.BatchSize, @@ -1736,22 +1738,6 @@ func setBorDefaultTxPoolPriceLimit(chainConfig *chain.Config, config txpoolcfg.C } } -func polygonSyncSentry(sentries []direct.SentryClient) direct.SentryClient { - // TODO - pending sentry multi client refactor - // - sentry multi client should conform to the SentryClient interface and internally - // multiplex - // - for now we just use 1 sentry - var sentryClient direct.SentryClient - for _, client := range sentries { - if client.Protocol() == direct.ETH68 { - sentryClient = client - break - } - } - - if sentryClient == nil { - panic("nil sentryClient for polygon sync") - } - - return sentryClient +func polygonSyncSentry(sentries []protosentry.SentryClient) protosentry.SentryClient { + return libsentry.NewSentryMultiplexer(sentries) } diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index bd6fdb3dc6d..b29aa8e074d 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -35,7 +35,6 @@ import ( "github.com/erigontech/erigon-lib/diagnostics" "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/state" - "github.com/erigontech/erigon/common" "github.com/erigontech/erigon/core/rawdb" "github.com/erigontech/erigon/core/rawdb/blockio" "github.com/erigontech/erigon/core/types" @@ -362,7 +361,7 @@ Loop: }) logger.Info(fmt.Sprintf("[%s] Processed", logPrefix), - "highest", headerInserter.GetHighest(), "age", common.PrettyAge(time.Unix(int64(headerInserter.GetHighestTimestamp()), 0)), + "highest", headerInserter.GetHighest(), "age", libcommon.PrettyAge(time.Unix(int64(headerInserter.GetHighestTimestamp()), 0)), "headers", headers, "in", secs, "blk/sec", uint64(float64(headers)/secs)) } diff --git a/eth/stagedsync/stage_polygon_sync.go b/eth/stagedsync/stage_polygon_sync.go index 9c97c9f07ef..9364970c3a3 100644 --- a/eth/stagedsync/stage_polygon_sync.go +++ b/eth/stagedsync/stage_polygon_sync.go @@ -32,7 +32,7 @@ import ( "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/common/generics" "github.com/erigontech/erigon-lib/common/metrics" - "github.com/erigontech/erigon-lib/direct" + "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/kv/dbutils" "github.com/erigontech/erigon-lib/log/v3" @@ -58,7 +58,7 @@ func NewPolygonSyncStageCfg( chainConfig *chain.Config, db kv.RwDB, heimdallClient heimdall.HeimdallClient, - sentry direct.SentryClient, + sentry sentryproto.SentryClient, maxPeers int, statusDataProvider *sentry.StatusDataProvider, blockReader services.FullBlockReader, @@ -1336,7 +1336,7 @@ func (e *polygonSyncStageExecutionEngine) updateForkChoice(tx kv.RwTx, tip *type tipBlockNum := tip.Number.Uint64() tipHash := tip.Hash() - e.logger.Info(e.appendLogPrefix("update fork choice"), "block", tipBlockNum, "hash", tipHash) + e.logger.Info(e.appendLogPrefix("update fork choice"), "block", tipBlockNum, "age", common.PrettyAge(time.Unix(int64(tip.Time), 0)), "hash", tipHash) logPrefix := e.stageState.LogPrefix() logTicker := time.NewTicker(logInterval) diff --git a/p2p/peer.go b/p2p/peer.go index 68a27573c0e..ec59c235162 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -20,6 +20,7 @@ package p2p import ( + "encoding/hex" "errors" "fmt" "io" @@ -551,7 +552,7 @@ func (p *Peer) Info() *PeerInfo { // Assemble the generic peer metadata info := &PeerInfo{ Enode: p.Node().URLv4(), - ID: p.ID().String(), + ID: hex.EncodeToString(p.pubkey[:]), Name: p.Fullname(), Caps: caps, Protocols: make(map[string]interface{}), diff --git a/p2p/sentry/sentry_multi_client/broadcast.go b/p2p/sentry/sentry_multi_client/broadcast.go index 702f8d2f84f..3ae450af624 100644 --- a/p2p/sentry/sentry_multi_client/broadcast.go +++ b/p2p/sentry/sentry_multi_client/broadcast.go @@ -55,7 +55,7 @@ func (cs *MultiClient) PropagateNewBlockHashes(ctx context.Context, announces [] } for _, sentry := range cs.sentries { - if !sentry.Ready() { + if ready, ok := sentry.(interface{ Ready() bool }); ok && !ready.Ready() { continue } @@ -92,7 +92,7 @@ func (cs *MultiClient) BroadcastNewBlock(ctx context.Context, header *types.Head } for _, sentry := range cs.sentries { - if !sentry.Ready() { + if ready, ok := sentry.(interface{ Ready() bool }); ok && !ready.Ready() { continue } diff --git a/p2p/sentry/sentry_multi_client/sentry_api.go b/p2p/sentry/sentry_multi_client/sentry_api.go index 0213d9834d6..2e06d669007 100644 --- a/p2p/sentry/sentry_multi_client/sentry_api.go +++ b/p2p/sentry/sentry_multi_client/sentry_api.go @@ -43,7 +43,7 @@ func (cs *MultiClient) SetStatus(ctx context.Context) { } for _, sentry := range cs.sentries { - if !sentry.Ready() { + if ready, ok := sentry.(interface{ Ready() bool }); ok && !ready.Ready() { continue } @@ -56,7 +56,7 @@ func (cs *MultiClient) SetStatus(ctx context.Context) { func (cs *MultiClient) SendBodyRequest(ctx context.Context, req *bodydownload.BodyRequest) (peerID [64]byte, ok bool) { // if sentry not found peers to send such message, try next one. stop if found. for i, ok, next := cs.randSentryIndex(); ok; i, ok = next() { - if !cs.sentries[i].Ready() { + if ready, ok := cs.sentries[i].(interface{ Ready() bool }); ok && !ready.Ready() { continue } @@ -96,7 +96,7 @@ func (cs *MultiClient) SendBodyRequest(ctx context.Context, req *bodydownload.Bo func (cs *MultiClient) SendHeaderRequest(ctx context.Context, req *headerdownload.HeaderRequest) (peerID [64]byte, ok bool) { // if sentry not found peers to send such message, try next one. stop if found. for i, ok, next := cs.randSentryIndex(); ok; i, ok = next() { - if !cs.sentries[i].Ready() { + if ready, ok := cs.sentries[i].(interface{ Ready() bool }); ok && !ready.Ready() { continue } //log.Info(fmt.Sprintf("Sending header request {hash: %x, height: %d, length: %d}", req.Hash, req.Number, req.Length)) @@ -160,7 +160,7 @@ func (cs *MultiClient) Penalize(ctx context.Context, penalties []headerdownload. Penalty: proto_sentry.PenaltyKind_Kick, // TODO: Extend penalty kinds } for i, ok, next := cs.randSentryIndex(); ok; i, ok = next() { - if !cs.sentries[i].Ready() { + if ready, ok := cs.sentries[i].(interface{ Ready() bool }); ok && !ready.Ready() { continue } diff --git a/p2p/sentry/sentry_multi_client/sentry_multi_client.go b/p2p/sentry/sentry_multi_client/sentry_multi_client.go index 94e5827fb79..a57782bb9f7 100644 --- a/p2p/sentry/sentry_multi_client/sentry_multi_client.go +++ b/p2p/sentry/sentry_multi_client/sentry_multi_client.go @@ -20,7 +20,6 @@ import ( "bytes" "context" "encoding/hex" - "errors" "fmt" "math/rand" "sort" @@ -33,16 +32,15 @@ import ( "google.golang.org/grpc/backoff" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" - "google.golang.org/protobuf/types/known/emptypb" "github.com/erigontech/erigon-lib/chain" "github.com/erigontech/erigon-lib/common/dbg" "github.com/erigontech/erigon-lib/direct" - "github.com/erigontech/erigon-lib/gointerfaces/grpcutil" proto_sentry "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" proto_types "github.com/erigontech/erigon-lib/gointerfaces/typesproto" "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/log/v3" + libsentry "github.com/erigontech/erigon-lib/p2p/sentry" "github.com/erigontech/erigon/consensus" "github.com/erigontech/erigon/core/types" @@ -56,14 +54,6 @@ import ( "github.com/erigontech/erigon/turbo/stages/headerdownload" ) -type ( - SentryMessageStream grpc.ClientStream - SentryMessageStreamFactory func(context.Context, direct.SentryClient) (SentryMessageStream, error) - StatusDataFactory func(context.Context) (*proto_sentry.StatusData, error) - MessageFactory[T any] func() T - MessageHandler[T any] func(context.Context, T, direct.SentryClient) error -) - // StartStreamLoops starts message processing loops for all sentries. // The processing happens in several streams: // RecvMessage - processing incoming headers/bodies @@ -83,38 +73,38 @@ func (cs *MultiClient) StartStreamLoops(ctx context.Context) { func (cs *MultiClient) RecvUploadMessageLoop( ctx context.Context, - sentry direct.SentryClient, + sentry proto_sentry.SentryClient, wg *sync.WaitGroup, ) { ids := []proto_sentry.MessageId{ eth.ToProto[direct.ETH66][eth.GetBlockBodiesMsg], eth.ToProto[direct.ETH66][eth.GetReceiptsMsg], } - streamFactory := func(streamCtx context.Context, sentry direct.SentryClient) (SentryMessageStream, error) { + streamFactory := func(streamCtx context.Context, sentry proto_sentry.SentryClient) (grpc.ClientStream, error) { return sentry.Messages(streamCtx, &proto_sentry.MessagesRequest{Ids: ids}, grpc.WaitForReady(true)) } - SentryReconnectAndPumpStreamLoop(ctx, sentry, cs.makeStatusData, "RecvUploadMessage", streamFactory, MakeInboundMessage, cs.HandleInboundMessage, wg, cs.logger) + libsentry.ReconnectAndPumpStreamLoop(ctx, sentry, cs.makeStatusData, "RecvUploadMessage", streamFactory, MakeInboundMessage, cs.HandleInboundMessage, wg, cs.logger) } func (cs *MultiClient) RecvUploadHeadersMessageLoop( ctx context.Context, - sentry direct.SentryClient, + sentry proto_sentry.SentryClient, wg *sync.WaitGroup, ) { ids := []proto_sentry.MessageId{ eth.ToProto[direct.ETH66][eth.GetBlockHeadersMsg], } - streamFactory := func(streamCtx context.Context, sentry direct.SentryClient) (SentryMessageStream, error) { + streamFactory := func(streamCtx context.Context, sentry proto_sentry.SentryClient) (grpc.ClientStream, error) { return sentry.Messages(streamCtx, &proto_sentry.MessagesRequest{Ids: ids}, grpc.WaitForReady(true)) } - SentryReconnectAndPumpStreamLoop(ctx, sentry, cs.makeStatusData, "RecvUploadHeadersMessage", streamFactory, MakeInboundMessage, cs.HandleInboundMessage, wg, cs.logger) + libsentry.ReconnectAndPumpStreamLoop(ctx, sentry, cs.makeStatusData, "RecvUploadHeadersMessage", streamFactory, MakeInboundMessage, cs.HandleInboundMessage, wg, cs.logger) } func (cs *MultiClient) RecvMessageLoop( ctx context.Context, - sentry direct.SentryClient, + sentry proto_sentry.SentryClient, wg *sync.WaitGroup, ) { ids := []proto_sentry.MessageId{ @@ -123,154 +113,26 @@ func (cs *MultiClient) RecvMessageLoop( eth.ToProto[direct.ETH66][eth.NewBlockHashesMsg], eth.ToProto[direct.ETH66][eth.NewBlockMsg], } - streamFactory := func(streamCtx context.Context, sentry direct.SentryClient) (SentryMessageStream, error) { + streamFactory := func(streamCtx context.Context, sentry proto_sentry.SentryClient) (grpc.ClientStream, error) { return sentry.Messages(streamCtx, &proto_sentry.MessagesRequest{Ids: ids}, grpc.WaitForReady(true)) } - SentryReconnectAndPumpStreamLoop(ctx, sentry, cs.makeStatusData, "RecvMessage", streamFactory, MakeInboundMessage, cs.HandleInboundMessage, wg, cs.logger) + libsentry.ReconnectAndPumpStreamLoop(ctx, sentry, cs.makeStatusData, "RecvMessage", streamFactory, MakeInboundMessage, cs.HandleInboundMessage, wg, cs.logger) } func (cs *MultiClient) PeerEventsLoop( ctx context.Context, - sentry direct.SentryClient, + sentry proto_sentry.SentryClient, wg *sync.WaitGroup, ) { - streamFactory := func(streamCtx context.Context, sentry direct.SentryClient) (SentryMessageStream, error) { + streamFactory := func(streamCtx context.Context, sentry proto_sentry.SentryClient) (grpc.ClientStream, error) { return sentry.PeerEvents(streamCtx, &proto_sentry.PeerEventsRequest{}, grpc.WaitForReady(true)) } messageFactory := func() *proto_sentry.PeerEvent { return new(proto_sentry.PeerEvent) } - SentryReconnectAndPumpStreamLoop(ctx, sentry, cs.makeStatusData, "PeerEvents", streamFactory, messageFactory, cs.HandlePeerEvent, wg, cs.logger) -} - -func SentryReconnectAndPumpStreamLoop[TMessage interface{}]( - ctx context.Context, - sentryClient direct.SentryClient, - statusDataFactory StatusDataFactory, - streamName string, - streamFactory SentryMessageStreamFactory, - messageFactory MessageFactory[TMessage], - handleInboundMessage MessageHandler[TMessage], - wg *sync.WaitGroup, - logger log.Logger, -) { - for ctx.Err() == nil { - if _, err := sentryClient.HandShake(ctx, &emptypb.Empty{}, grpc.WaitForReady(true)); err != nil { - if errors.Is(err, context.Canceled) { - continue - } - if grpcutil.IsRetryLater(err) || grpcutil.IsEndOfStream(err) { - time.Sleep(3 * time.Second) - continue - } - logger.Warn("HandShake error, sentry not ready yet", "stream", streamName, "err", err) - time.Sleep(time.Second) - continue - } - - statusData, err := statusDataFactory(ctx) - - if err != nil { - logger.Error("SentryReconnectAndPumpStreamLoop: statusDataFactory error", "stream", streamName, "err", err) - time.Sleep(time.Second) - continue - } - - if _, err := sentryClient.SetStatus(ctx, statusData); err != nil { - if errors.Is(err, context.Canceled) { - continue - } - if grpcutil.IsRetryLater(err) || grpcutil.IsEndOfStream(err) { - time.Sleep(3 * time.Second) - continue - } - logger.Warn("Status error, sentry not ready yet", "stream", streamName, "err", err) - time.Sleep(time.Second) - continue - } - - if err := pumpStreamLoop(ctx, sentryClient, streamName, streamFactory, messageFactory, handleInboundMessage, wg, logger); err != nil { - if errors.Is(err, context.Canceled) { - continue - } - if isPeerNotFoundErr(err) { - continue - } - if grpcutil.IsRetryLater(err) || grpcutil.IsEndOfStream(err) { - time.Sleep(3 * time.Second) - continue - } - logger.Warn("pumpStreamLoop failure", "stream", streamName, "err", err) - continue - } - } -} - -// pumpStreamLoop is normally run in a separate go-routine. -// It only exists until there are no more messages -// to be received (end of process, or interruption, or end of test). -// wg is used only in tests to avoid using waits, which is brittle. For non-test code wg == nil. -func pumpStreamLoop[TMessage interface{}]( - ctx context.Context, - sentry direct.SentryClient, - streamName string, - streamFactory SentryMessageStreamFactory, - messageFactory MessageFactory[TMessage], - handleInboundMessage MessageHandler[TMessage], - wg *sync.WaitGroup, - logger log.Logger, -) (err error) { - defer func() { - if rec := recover(); rec != nil { - err = fmt.Errorf("%+v, trace: %s", rec, dbg.Stack()) - } - }() // avoid crash because Erigon's core does many things - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - defer sentry.MarkDisconnected() - - // need to read all messages from Sentry as fast as we can, then: - // - can group them or process in batch - // - can have slow processing - reqs := make(chan TMessage, 256) - go func() { - for { - select { - case <-ctx.Done(): - return - case req := <-reqs: - if err := handleInboundMessage(ctx, req, sentry); err != nil { - logger.Debug("Handling incoming message", "stream", streamName, "err", err) - } - if wg != nil { - wg.Done() - } - } - } - }() - - stream, err := streamFactory(ctx, sentry) - if err != nil { - return err - } - - for ctx.Err() == nil { - req := messageFactory() - err := stream.RecvMsg(req) - if err != nil { - return err - } - - select { - case reqs <- req: - case <-ctx.Done(): - } - } - - return ctx.Err() + libsentry.ReconnectAndPumpStreamLoop(ctx, sentry, cs.makeStatusData, "PeerEvents", streamFactory, messageFactory, cs.HandlePeerEvent, wg, cs.logger) } // MultiClient - does handle request/response/subscriptions to multiple sentries @@ -279,7 +141,7 @@ type MultiClient struct { Hd *headerdownload.HeaderDownload Bd *bodydownload.BodyDownload IsMock bool - sentries []direct.SentryClient + sentries []proto_sentry.SentryClient ChainConfig *chain.Config db kv.RwDB Engine consensus.Engine @@ -304,7 +166,7 @@ func NewMultiClient( db kv.RwDB, chainConfig *chain.Config, engine consensus.Engine, - sentries []direct.SentryClient, + sentries []proto_sentry.SentryClient, syncCfg ethconfig.Sync, blockReader services.FullBlockReader, blockBufferSize int, @@ -369,9 +231,9 @@ func NewMultiClient( return cs, nil } -func (cs *MultiClient) Sentries() []direct.SentryClient { return cs.sentries } +func (cs *MultiClient) Sentries() []proto_sentry.SentryClient { return cs.sentries } -func (cs *MultiClient) newBlockHashes66(ctx context.Context, req *proto_sentry.InboundMessage, sentry direct.SentryClient) error { +func (cs *MultiClient) newBlockHashes66(ctx context.Context, req *proto_sentry.InboundMessage, sentry proto_sentry.SentryClient) error { if cs.disableBlockDownload { return nil } @@ -420,7 +282,7 @@ func (cs *MultiClient) newBlockHashes66(ctx context.Context, req *proto_sentry.I return nil } -func (cs *MultiClient) blockHeaders66(ctx context.Context, in *proto_sentry.InboundMessage, sentry direct.SentryClient) error { +func (cs *MultiClient) blockHeaders66(ctx context.Context, in *proto_sentry.InboundMessage, sentry proto_sentry.SentryClient) error { // Parse the entire packet from scratch var pkt eth.BlockHeadersPacket66 if err := rlp.DecodeBytes(in.Data, &pkt); err != nil { @@ -440,7 +302,7 @@ func (cs *MultiClient) blockHeaders66(ctx context.Context, in *proto_sentry.Inbo return cs.blockHeaders(ctx, pkt.BlockHeadersPacket, rlpStream, in.PeerId, sentry) } -func (cs *MultiClient) blockHeaders(ctx context.Context, pkt eth.BlockHeadersPacket, rlpStream *rlp.Stream, peerID *proto_types.H512, sentryClient direct.SentryClient) error { +func (cs *MultiClient) blockHeaders(ctx context.Context, pkt eth.BlockHeadersPacket, rlpStream *rlp.Stream, peerID *proto_types.H512, sentryClient proto_sentry.SentryClient) error { if cs.disableBlockDownload { return nil } @@ -519,7 +381,7 @@ func (cs *MultiClient) blockHeaders(ctx context.Context, pkt eth.BlockHeadersPac return nil } -func (cs *MultiClient) newBlock66(ctx context.Context, inreq *proto_sentry.InboundMessage, sentryClient direct.SentryClient) error { +func (cs *MultiClient) newBlock66(ctx context.Context, inreq *proto_sentry.InboundMessage, sentryClient proto_sentry.SentryClient) error { if cs.disableBlockDownload { return nil } @@ -574,7 +436,8 @@ func (cs *MultiClient) newBlock66(ctx context.Context, inreq *proto_sentry.Inbou Penalty: proto_sentry.PenaltyKind_Kick, // TODO: Extend penalty kinds } for _, sentry := range cs.sentries { - if !sentry.Ready() { + // TODO does this method need to be moved to the grpc api ? + if directSentry, ok := sentry.(direct.SentryClient); ok && !directSentry.Ready() { continue } if _, err1 := sentry.PenalizePeer(ctx, &outreq, &grpc.EmptyCallOption{}); err1 != nil { @@ -597,7 +460,7 @@ func (cs *MultiClient) newBlock66(ctx context.Context, inreq *proto_sentry.Inbou return nil } -func (cs *MultiClient) blockBodies66(ctx context.Context, inreq *proto_sentry.InboundMessage, sentryClient direct.SentryClient) error { +func (cs *MultiClient) blockBodies66(ctx context.Context, inreq *proto_sentry.InboundMessage, sentryClient proto_sentry.SentryClient) error { if cs.disableBlockDownload { return nil } @@ -615,11 +478,11 @@ func (cs *MultiClient) blockBodies66(ctx context.Context, inreq *proto_sentry.In return nil } -func (cs *MultiClient) receipts66(_ context.Context, _ *proto_sentry.InboundMessage, _ direct.SentryClient) error { +func (cs *MultiClient) receipts66(_ context.Context, _ *proto_sentry.InboundMessage, _ proto_sentry.SentryClient) error { return nil } -func (cs *MultiClient) getBlockHeaders66(ctx context.Context, inreq *proto_sentry.InboundMessage, sentry direct.SentryClient) error { +func (cs *MultiClient) getBlockHeaders66(ctx context.Context, inreq *proto_sentry.InboundMessage, sentry proto_sentry.SentryClient) error { var query eth.GetBlockHeadersPacket66 if err := rlp.DecodeBytes(inreq.Data, &query); err != nil { return fmt.Errorf("decoding getBlockHeaders66: %w, data: %x", err, inreq.Data) @@ -667,7 +530,7 @@ func (cs *MultiClient) getBlockHeaders66(ctx context.Context, inreq *proto_sentr return nil } -func (cs *MultiClient) getBlockBodies66(ctx context.Context, inreq *proto_sentry.InboundMessage, sentry direct.SentryClient) error { +func (cs *MultiClient) getBlockBodies66(ctx context.Context, inreq *proto_sentry.InboundMessage, sentry proto_sentry.SentryClient) error { var query eth.GetBlockBodiesPacket66 if err := rlp.DecodeBytes(inreq.Data, &query); err != nil { return fmt.Errorf("decoding getBlockBodies66: %w, data: %x", err, inreq.Data) @@ -708,7 +571,7 @@ var ( EnableP2PReceipts = dbg.EnvBool("P2P_RECEIPTS", false) ) -func (cs *MultiClient) getReceipts66(ctx context.Context, inreq *proto_sentry.InboundMessage, sentryClient direct.SentryClient) error { +func (cs *MultiClient) getReceipts66(ctx context.Context, inreq *proto_sentry.InboundMessage, sentryClient proto_sentry.SentryClient) error { if !EnableP2PReceipts { return nil } @@ -762,7 +625,7 @@ func MakeInboundMessage() *proto_sentry.InboundMessage { return new(proto_sentry.InboundMessage) } -func (cs *MultiClient) HandleInboundMessage(ctx context.Context, message *proto_sentry.InboundMessage, sentry direct.SentryClient) (err error) { +func (cs *MultiClient) HandleInboundMessage(ctx context.Context, message *proto_sentry.InboundMessage, sentry proto_sentry.SentryClient) (err error) { defer func() { if rec := recover(); rec != nil { err = fmt.Errorf("%+v, msgID=%s, trace: %s", rec, message.Id.String(), dbg.Stack()) @@ -784,7 +647,7 @@ func (cs *MultiClient) HandleInboundMessage(ctx context.Context, message *proto_ return err } -func (cs *MultiClient) handleInboundMessage(ctx context.Context, inreq *proto_sentry.InboundMessage, sentry direct.SentryClient) error { +func (cs *MultiClient) handleInboundMessage(ctx context.Context, inreq *proto_sentry.InboundMessage, sentry proto_sentry.SentryClient) error { switch inreq.Id { // ========= eth 66 ========== @@ -809,7 +672,7 @@ func (cs *MultiClient) handleInboundMessage(ctx context.Context, inreq *proto_se } } -func (cs *MultiClient) HandlePeerEvent(ctx context.Context, event *proto_sentry.PeerEvent, sentryClient direct.SentryClient) error { +func (cs *MultiClient) HandlePeerEvent(ctx context.Context, event *proto_sentry.PeerEvent, sentryClient proto_sentry.SentryClient) error { eventID := event.EventId.String() peerID := sentry.ConvertH512ToPeerID(event.PeerId) peerIDStr := hex.EncodeToString(peerID[:]) diff --git a/p2p/sentry/sentrymultiplexer_test.go b/p2p/sentry/sentrymultiplexer_test.go new file mode 100644 index 00000000000..3cd4d445c96 --- /dev/null +++ b/p2p/sentry/sentrymultiplexer_test.go @@ -0,0 +1,381 @@ +package sentry_test + +import ( + "context" + "crypto/ecdsa" + "crypto/rand" + "encoding/hex" + "fmt" + "io" + "net" + "sync" + "testing" + + "github.com/erigontech/erigon-lib/direct" + "github.com/erigontech/erigon-lib/gointerfaces" + "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" + "github.com/erigontech/erigon-lib/gointerfaces/typesproto" + "github.com/erigontech/erigon-lib/p2p/sentry" + "github.com/erigontech/erigon/p2p/enode" + "github.com/erigontech/secp256k1" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/emptypb" +) + +func newClient(ctrl *gomock.Controller, i int, caps []string) *direct.MockSentryClient { + client := direct.NewMockSentryClient(ctrl) + pk, _ := ecdsa.GenerateKey(secp256k1.S256(), rand.Reader) + node := enode.NewV4(&pk.PublicKey, net.IPv4(127, 0, 0, byte(i)), 30001, 30001) + + if len(caps) == 0 { + caps = []string{"eth/68"} + } + + client.EXPECT().NodeInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(&typesproto.NodeInfoReply{ + Id: node.ID().String(), + Name: fmt.Sprintf("client-%d", i), + Enode: node.URLv4(), + Enr: node.String(), + Ports: &typesproto.NodeInfoPorts{ + Discovery: uint32(30000), + Listener: uint32(30001), + }, + ListenerAddr: fmt.Sprintf("127.0.0.%d", i), + }, nil).AnyTimes() + + client.EXPECT().HandShake(gomock.Any(), gomock.Any(), gomock.Any()).Return(&sentryproto.HandShakeReply{}, nil).AnyTimes() + + client.EXPECT().Peers(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*sentryproto.PeersReply, error) { + id := [64]byte{byte(i)} + return &sentryproto.PeersReply{ + Peers: []*typesproto.PeerInfo{ + { + Id: hex.EncodeToString(id[:]), + Caps: caps, + }, + }, + }, nil + }).AnyTimes() + + return client +} + +func TestCreateMultiplexer(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + var clients []sentryproto.SentryClient + + for i := 0; i < 10; i++ { + clients = append(clients, newClient(ctrl, i, nil)) + } + + mux := sentry.NewSentryMultiplexer(clients) + require.NotNil(t, mux) + + hs, err := mux.HandShake(context.Background(), &emptypb.Empty{}) + require.NotNil(t, hs) + require.NoError(t, err) + + info, err := mux.NodeInfo(context.Background(), &emptypb.Empty{}) + require.Nil(t, info) + require.Error(t, err) + + infos, err := mux.NodeInfos(context.Background()) + require.NoError(t, err) + require.Len(t, infos, 10) +} + +func TestStatus(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + var clients []sentryproto.SentryClient + + var statusCount int + var mu sync.Mutex + + for i := 0; i < 10; i++ { + client := newClient(ctrl, i, nil) + client.EXPECT().SetStatus(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, sd *sentryproto.StatusData, co ...grpc.CallOption) (*sentryproto.SetStatusReply, error) { + mu.Lock() + defer mu.Unlock() + statusCount++ + return &sentryproto.SetStatusReply{}, nil + }) + client.EXPECT().PenalizePeer(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, sd *sentryproto.PenalizePeerRequest, co ...grpc.CallOption) (*emptypb.Empty, error) { + mu.Lock() + defer mu.Unlock() + statusCount++ + return &emptypb.Empty{}, nil + }) + client.EXPECT().PeerMinBlock(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, sd *sentryproto.PeerMinBlockRequest, co ...grpc.CallOption) (*emptypb.Empty, error) { + mu.Lock() + defer mu.Unlock() + statusCount++ + return &emptypb.Empty{}, nil + }) + + clients = append(clients, client) + } + + mux := sentry.NewSentryMultiplexer(clients) + require.NotNil(t, mux) + + hs, err := mux.HandShake(context.Background(), &emptypb.Empty{}) + require.NoError(t, err) + require.NotNil(t, hs) + + reply, err := mux.SetStatus(context.Background(), &sentryproto.StatusData{}) + require.NoError(t, err) + require.NotNil(t, reply) + require.Equal(t, 10, statusCount) + + statusCount = 0 + + empty, err := mux.PenalizePeer(context.Background(), &sentryproto.PenalizePeerRequest{}) + require.NoError(t, err) + require.NotNil(t, empty) + require.Equal(t, 10, statusCount) + + statusCount = 0 + + empty, err = mux.PeerMinBlock(context.Background(), &sentryproto.PeerMinBlockRequest{}) + require.NoError(t, err) + require.NotNil(t, empty) + require.Equal(t, 10, statusCount) +} + +func TestSend(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + var clients []sentryproto.SentryClient + + var statusCount int + var mu sync.Mutex + + for i := 0; i < 10; i++ { + client := newClient(ctrl, i, nil) + client.EXPECT().SendMessageByMinBlock(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, in *sentryproto.SendMessageByMinBlockRequest, opts ...grpc.CallOption) (*sentryproto.SentPeers, error) { + mu.Lock() + defer mu.Unlock() + statusCount++ + return &sentryproto.SentPeers{}, nil + }).AnyTimes() + client.EXPECT().SendMessageById(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, in *sentryproto.SendMessageByIdRequest, opts ...grpc.CallOption) (*sentryproto.SentPeers, error) { + mu.Lock() + defer mu.Unlock() + statusCount++ + return &sentryproto.SentPeers{}, nil + }).AnyTimes() + + clients = append(clients, client) + } + + mux := sentry.NewSentryMultiplexer(clients) + require.NotNil(t, mux) + + _, err := mux.HandShake(context.Background(), &emptypb.Empty{}) + require.NoError(t, err) + + sendReply, err := mux.SendMessageByMinBlock(context.Background(), &sentryproto.SendMessageByMinBlockRequest{}) + require.NoError(t, err) + require.NotNil(t, sendReply) + require.Equal(t, 1, statusCount) + + statusCount = 0 + + for i := byte(0); i < 10; i++ { + sendReply, err = mux.SendMessageById(context.Background(), &sentryproto.SendMessageByIdRequest{ + Data: &sentryproto.OutboundMessageData{ + Id: sentryproto.MessageId_BLOCK_BODIES_65, + }, + PeerId: gointerfaces.ConvertHashToH512([64]byte{i}), + }) + require.NoError(t, err) + require.NotNil(t, sendReply) + require.Equal(t, 1, statusCount) + + statusCount = 0 + } + + sendReply, err = mux.SendMessageToRandomPeers(context.Background(), &sentryproto.SendMessageToRandomPeersRequest{ + Data: &sentryproto.OutboundMessageData{ + Id: sentryproto.MessageId_BLOCK_BODIES_65, + }, + }) + require.NoError(t, err) + require.NotNil(t, sendReply) + require.Equal(t, 10, statusCount) + + statusCount = 0 + + sendReply, err = mux.SendMessageToAll(context.Background(), &sentryproto.OutboundMessageData{ + Id: sentryproto.MessageId_BLOCK_BODIES_65, + }) + require.NoError(t, err) + require.NotNil(t, sendReply) + require.Equal(t, 10, statusCount) +} + +func TestMessages(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + var clients []sentryproto.SentryClient + + for i := 0; i < 10; i++ { + client := newClient(ctrl, i, nil) + client.EXPECT().Messages(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, in *sentryproto.MessagesRequest, opts ...grpc.CallOption) (sentryproto.Sentry_MessagesClient, error) { + ch := make(chan sentry.StreamReply[*sentryproto.InboundMessage], 16384) + streamServer := &sentry.SentryStreamS[*sentryproto.InboundMessage]{Ch: ch, Ctx: ctx} + + go func() { + for i := 0; i < 5; i++ { + streamServer.Send(&sentryproto.InboundMessage{}) + } + + streamServer.Close() + }() + + return &sentry.SentryStreamC[*sentryproto.InboundMessage]{Ch: ch, Ctx: ctx}, nil + }) + + clients = append(clients, client) + } + + mux := sentry.NewSentryMultiplexer(clients) + require.NotNil(t, mux) + + client, err := mux.Messages(context.Background(), &sentryproto.MessagesRequest{}) + require.NoError(t, err) + require.NotNil(t, client) + + var messageCount int + + for { + message, err := client.Recv() + + if err != nil { + require.ErrorIs(t, err, io.EOF) + break + } + + messageCount++ + require.NotNil(t, message) + } + + require.Equal(t, 50, messageCount) +} + +func TestPeers(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + var clients []sentryproto.SentryClient + + var statusCount int + var mu sync.Mutex + + for i := 0; i < 10; i++ { + client := newClient(ctrl, i, nil) + client.EXPECT().AddPeer(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, in *sentryproto.AddPeerRequest, opts ...grpc.CallOption) (*sentryproto.AddPeerReply, error) { + mu.Lock() + defer mu.Unlock() + statusCount++ + return &sentryproto.AddPeerReply{}, nil + }) + client.EXPECT().PeerEvents(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, in *sentryproto.PeerEventsRequest, opts ...grpc.CallOption) (sentryproto.Sentry_PeerEventsClient, error) { + ch := make(chan sentry.StreamReply[*sentryproto.PeerEvent], 16384) + streamServer := &sentry.SentryStreamS[*sentryproto.PeerEvent]{Ch: ch, Ctx: ctx} + + go func() { + for i := 0; i < 5; i++ { + streamServer.Send(&sentryproto.PeerEvent{}) + } + + streamServer.Close() + }() + + return &sentry.SentryStreamC[*sentryproto.PeerEvent]{Ch: ch, Ctx: ctx}, nil + }) + client.EXPECT().PeerById(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, in *sentryproto.PeerByIdRequest, opts ...grpc.CallOption) (*sentryproto.PeerByIdReply, error) { + mu.Lock() + defer mu.Unlock() + statusCount++ + return &sentryproto.PeerByIdReply{}, nil + }) + client.EXPECT().PeerCount(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, in *sentryproto.PeerCountRequest, opts ...grpc.CallOption) (*sentryproto.PeerCountReply, error) { + mu.Lock() + defer mu.Unlock() + statusCount++ + return &sentryproto.PeerCountReply{}, nil + }) + + clients = append(clients, client) + } + + mux := sentry.NewSentryMultiplexer(clients) + require.NotNil(t, mux) + + _, err := mux.HandShake(context.Background(), &emptypb.Empty{}) + require.NoError(t, err) + + addPeerReply, err := mux.AddPeer(context.Background(), &sentryproto.AddPeerRequest{}) + require.NoError(t, err) + require.NotNil(t, addPeerReply) + require.Equal(t, 10, statusCount) + + client, err := mux.PeerEvents(context.Background(), &sentryproto.PeerEventsRequest{}) + require.NoError(t, err) + require.NotNil(t, client) + + var eventCount int + + for { + message, err := client.Recv() + + if err != nil { + require.ErrorIs(t, err, io.EOF) + break + } + + eventCount++ + require.NotNil(t, message) + } + + require.Equal(t, 50, eventCount) + + statusCount = 0 + + peerIdReply, err := mux.PeerById(context.Background(), &sentryproto.PeerByIdRequest{}) + require.NoError(t, err) + require.NotNil(t, peerIdReply) + require.Equal(t, 10, statusCount) + + statusCount = 0 + + peerCountReply, err := mux.PeerCount(context.Background(), &sentryproto.PeerCountRequest{}) + require.NoError(t, err) + require.NotNil(t, peerCountReply) + require.Equal(t, 10, statusCount) + + peersReply, err := mux.Peers(context.Background(), &emptypb.Empty{}) + require.NoError(t, err) + require.NotNil(t, peersReply) + require.Equal(t, 10, len(peersReply.GetPeers())) +} diff --git a/p2p/server.go b/p2p/server.go index 893743053ee..597ff51f0bb 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -38,7 +38,7 @@ import ( "github.com/erigontech/erigon-lib/log/v3" - "github.com/erigontech/erigon/common" + libcommon "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon/common/debug" "github.com/erigontech/erigon/common/mclock" "github.com/erigontech/erigon/crypto" @@ -850,7 +850,7 @@ running: case pd := <-srv.delpeer: // A peer disconnected. - d := common.PrettyDuration(mclock.Now() - pd.created) + d := libcommon.PrettyDuration(mclock.Now() - pd.created) delete(peers, pd.ID()) srv.logger.Trace("Removing p2p peer", "peercount", len(peers), "url", pd.Node(), "duration", d, "err", pd.err) srv.dialsched.peerRemoved(pd.rw) diff --git a/polygon/bor/bor.go b/polygon/bor/bor.go index 721a7ee28ff..062e9db08a5 100644 --- a/polygon/bor/bor.go +++ b/polygon/bor/bor.go @@ -44,7 +44,6 @@ import ( "github.com/erigontech/erigon-lib/common/length" "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/log/v3" - "github.com/erigontech/erigon/common" "github.com/erigontech/erigon/consensus" "github.com/erigontech/erigon/consensus/misc" "github.com/erigontech/erigon/core/rawdb" @@ -1212,7 +1211,7 @@ func (c *Bor) Seal(chain consensus.ChainHeaderReader, blockWithReceipts *types.B go func() { // Wait until sealing is terminated or delay timeout. - c.logger.Info("[bor] Waiting for slot to sign and propagate", "number", number, "hash", header.Hash, "delay", common.PrettyDuration(delay), "TxCount", block.Transactions().Len(), "Signer", signer) + c.logger.Info("[bor] Waiting for slot to sign and propagate", "number", number, "hash", header.Hash, "delay", libcommon.PrettyDuration(delay), "TxCount", block.Transactions().Len(), "Signer", signer) select { case <-stop: @@ -1231,7 +1230,7 @@ func (c *Bor) Seal(chain consensus.ChainHeaderReader, blockWithReceipts *types.B c.logger.Info( "[bor] Sealed out-of-turn", "number", number, - "wiggle", common.PrettyDuration(wiggle), + "wiggle", libcommon.PrettyDuration(wiggle), "delay", delay, "headerDifficulty", header.Difficulty, "signer", signer.Hex(), @@ -1541,48 +1540,6 @@ func (c *Bor) CommitStates( } events := chain.Chain.BorEventsByBlock(header.Hash(), blockNum) - if enableBoreventsRemoteFallback && blockNum <= chain.Chain.FrozenBorBlocks() && len(events) == 50 { - // we still sometime could get 0 events from borevent file - var to time.Time - if c.config.IsIndore(blockNum) { - stateSyncDelay := c.config.CalculateStateSyncDelay(blockNum) - to = time.Unix(int64(header.Time-stateSyncDelay), 0) - } else { - pHeader := chain.Chain.GetHeaderByNumber(blockNum - c.config.CalculateSprintLength(blockNum)) - to = time.Unix(int64(pHeader.Time), 0) - } - - startEventID := chain.Chain.BorStartEventID(header.Hash(), blockNum) - log.Warn("[dbg] fallback to remote bor events", "blockNum", blockNum, "startEventID", startEventID, "events_from_db_or_snaps", len(events)) - remote, err := c.HeimdallClient.FetchStateSyncEvents(context.Background(), startEventID, to, 0) - if err != nil { - return err - } - if len(remote) > 0 { - chainID := c.chainConfig.ChainID.String() - - var merged []*heimdall.EventRecordWithTime - events = events[:0] - for _, event := range remote { - if event.ChainID != chainID { - continue - } - if event.Time.After(to) { - continue - } - merged = append(merged, event) - } - - for _, ev := range merged { - data, err := ev.MarshallBytes() - if err != nil { - panic(err) - } - - events = append(events, data) - } - } - } for _, event := range events { if err := c.stateReceiver.CommitState(event, syscall); err != nil { diff --git a/polygon/p2p/fetcher_base_test.go b/polygon/p2p/fetcher_base_test.go index 9bc1ed69b95..c13ac4a4ffc 100644 --- a/polygon/p2p/fetcher_base_test.go +++ b/polygon/p2p/fetcher_base_test.go @@ -31,12 +31,12 @@ import ( "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/direct" - sentry "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" + "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" erigonlibtypes "github.com/erigontech/erigon-lib/gointerfaces/typesproto" "github.com/erigontech/erigon-lib/log/v3" + "github.com/erigontech/erigon-lib/p2p/sentry" "github.com/erigontech/erigon/core/types" "github.com/erigontech/erigon/eth/protocols/eth" - sentrymulticlient "github.com/erigontech/erigon/p2p/sentry/sentry_multi_client" "github.com/erigontech/erigon/rlp" "github.com/erigontech/erigon/turbo/testlog" ) @@ -46,19 +46,19 @@ func TestFetcherFetchHeaders(t *testing.T) { peerId := PeerIdFromUint64(1) requestId := uint64(1234) - mockInboundMessages := []*sentry.InboundMessage{ + mockInboundMessages := []*sentryproto.InboundMessage{ { // should get filtered because it is from a different peer id PeerId: PeerIdFromUint64(2).H512(), }, { // should get filtered because it is from a different request id - Id: sentry.MessageId_BLOCK_HEADERS_66, + Id: sentryproto.MessageId_BLOCK_HEADERS_66, PeerId: peerId.H512(), Data: newMockBlockHeadersPacket66Bytes(t, requestId*2, 2), }, { - Id: sentry.MessageId_BLOCK_HEADERS_66, + Id: sentryproto.MessageId_BLOCK_HEADERS_66, PeerId: peerId.H512(), Data: newMockBlockHeadersPacket66Bytes(t, requestId, 2), }, @@ -89,9 +89,9 @@ func TestFetcherFetchHeadersWithChunking(t *testing.T) { peerId := PeerIdFromUint64(1) mockHeaders := newMockBlockHeaders(1999) requestId1 := uint64(1234) - mockInboundMessages1 := []*sentry.InboundMessage{ + mockInboundMessages1 := []*sentryproto.InboundMessage{ { - Id: sentry.MessageId_BLOCK_HEADERS_66, + Id: sentryproto.MessageId_BLOCK_HEADERS_66, PeerId: peerId.H512(), // 1024 headers in first response Data: blockHeadersPacket66Bytes(t, requestId1, mockHeaders[:1024]), @@ -105,9 +105,9 @@ func TestFetcherFetchHeadersWithChunking(t *testing.T) { wantRequestAmount: 1024, } requestId2 := uint64(1235) - mockInboundMessages2 := []*sentry.InboundMessage{ + mockInboundMessages2 := []*sentryproto.InboundMessage{ { - Id: sentry.MessageId_BLOCK_HEADERS_66, + Id: sentryproto.MessageId_BLOCK_HEADERS_66, PeerId: peerId.H512(), // remaining 975 headers in second response Data: blockHeadersPacket66Bytes(t, requestId2, mockHeaders[1024:]), @@ -138,9 +138,9 @@ func TestFetcherFetchHeadersResponseTimeout(t *testing.T) { peerId := PeerIdFromUint64(1) requestId1 := uint64(1234) - mockInboundMessages1 := []*sentry.InboundMessage{ + mockInboundMessages1 := []*sentryproto.InboundMessage{ { - Id: sentry.MessageId_BLOCK_HEADERS_66, + Id: sentryproto.MessageId_BLOCK_HEADERS_66, PeerId: peerId.H512(), // requestId2 takes too long and causes response timeout Data: nil, @@ -156,9 +156,9 @@ func TestFetcherFetchHeadersResponseTimeout(t *testing.T) { responseDelay: 600 * time.Millisecond, } requestId2 := uint64(1235) - mockInboundMessages2 := []*sentry.InboundMessage{ + mockInboundMessages2 := []*sentryproto.InboundMessage{ { - Id: sentry.MessageId_BLOCK_HEADERS_66, + Id: sentryproto.MessageId_BLOCK_HEADERS_66, PeerId: peerId.H512(), // requestId2 takes too long and causes response timeout Data: nil, @@ -189,9 +189,9 @@ func TestFetcherFetchHeadersResponseTimeoutRetrySuccess(t *testing.T) { peerId := PeerIdFromUint64(1) mockHeaders := newMockBlockHeaders(1999) requestId1 := uint64(1234) - mockInboundMessages1 := []*sentry.InboundMessage{ + mockInboundMessages1 := []*sentryproto.InboundMessage{ { - Id: sentry.MessageId_BLOCK_HEADERS_66, + Id: sentryproto.MessageId_BLOCK_HEADERS_66, PeerId: peerId.H512(), // 1024 headers in first response Data: blockHeadersPacket66Bytes(t, requestId1, mockHeaders[:1024]), @@ -205,9 +205,9 @@ func TestFetcherFetchHeadersResponseTimeoutRetrySuccess(t *testing.T) { wantRequestAmount: 1024, } requestId2 := uint64(1235) - mockInboundMessages2 := []*sentry.InboundMessage{ + mockInboundMessages2 := []*sentryproto.InboundMessage{ { - Id: sentry.MessageId_BLOCK_HEADERS_66, + Id: sentryproto.MessageId_BLOCK_HEADERS_66, PeerId: peerId.H512(), // requestId2 takes too long and causes response timeout Data: nil, @@ -223,9 +223,9 @@ func TestFetcherFetchHeadersResponseTimeoutRetrySuccess(t *testing.T) { responseDelay: 600 * time.Millisecond, } requestId3 := uint64(1236) - mockInboundMessages3 := []*sentry.InboundMessage{ + mockInboundMessages3 := []*sentryproto.InboundMessage{ { - Id: sentry.MessageId_BLOCK_HEADERS_66, + Id: sentryproto.MessageId_BLOCK_HEADERS_66, PeerId: peerId.H512(), // remaining 975 headers in third response Data: blockHeadersPacket66Bytes(t, requestId3, mockHeaders[1024:]), @@ -272,16 +272,16 @@ func TestFetcherFetchHeadersErrIncompleteResponse(t *testing.T) { peerId := PeerIdFromUint64(1) requestId1 := uint64(1234) requestId2 := uint64(1235) - mockInboundMessages1 := []*sentry.InboundMessage{ + mockInboundMessages1 := []*sentryproto.InboundMessage{ { - Id: sentry.MessageId_BLOCK_HEADERS_66, + Id: sentryproto.MessageId_BLOCK_HEADERS_66, PeerId: peerId.H512(), Data: newMockBlockHeadersPacket66Bytes(t, requestId1, 2), }, } - mockInboundMessages2 := []*sentry.InboundMessage{ + mockInboundMessages2 := []*sentryproto.InboundMessage{ { - Id: sentry.MessageId_BLOCK_HEADERS_66, + Id: sentryproto.MessageId_BLOCK_HEADERS_66, PeerId: peerId.H512(), Data: newMockBlockHeadersPacket66Bytes(t, requestId2, 0), }, @@ -327,9 +327,9 @@ func TestFetcherFetchBodies(t *testing.T) { mockHeaders[0].Hash(), mockHeaders[1].Hash(), } - mockInboundMessages1 := []*sentry.InboundMessage{ + mockInboundMessages1 := []*sentryproto.InboundMessage{ { - Id: sentry.MessageId_BLOCK_BODIES_66, + Id: sentryproto.MessageId_BLOCK_BODIES_66, PeerId: peerId.H512(), Data: newMockBlockBodiesPacketBytes(t, requestId1, &types.Body{ Transactions: types.Transactions{ @@ -354,9 +354,9 @@ func TestFetcherFetchBodies(t *testing.T) { wantRequestPeerId: peerId, wantRequestHashes: mockHashes, } - mockInboundMessages2 := []*sentry.InboundMessage{ + mockInboundMessages2 := []*sentryproto.InboundMessage{ { - Id: sentry.MessageId_BLOCK_BODIES_66, + Id: sentryproto.MessageId_BLOCK_BODIES_66, PeerId: peerId.H512(), Data: newMockBlockBodiesPacketBytes(t, requestId2, &types.Body{ Transactions: types.Transactions{ @@ -401,9 +401,9 @@ func TestFetcherFetchBodiesResponseTimeout(t *testing.T) { requestId2 := uint64(1235) mockHeaders := []*types.Header{{Number: big.NewInt(1)}} mockHashes := []common.Hash{mockHeaders[0].Hash()} - mockInboundMessages := []*sentry.InboundMessage{ + mockInboundMessages := []*sentryproto.InboundMessage{ { - Id: sentry.MessageId_BLOCK_BODIES_66, + Id: sentryproto.MessageId_BLOCK_BODIES_66, PeerId: peerId.H512(), Data: nil, // response timeout }, @@ -440,9 +440,9 @@ func TestFetcherFetchBodiesResponseTimeoutRetrySuccess(t *testing.T) { requestId2 := uint64(1235) mockHeaders := []*types.Header{{Number: big.NewInt(1)}} mockHashes := []common.Hash{mockHeaders[0].Hash()} - mockInboundMessages1 := []*sentry.InboundMessage{ + mockInboundMessages1 := []*sentryproto.InboundMessage{ { - Id: sentry.MessageId_BLOCK_BODIES_66, + Id: sentryproto.MessageId_BLOCK_BODIES_66, PeerId: peerId.H512(), Data: nil, // response timeout }, @@ -454,9 +454,9 @@ func TestFetcherFetchBodiesResponseTimeoutRetrySuccess(t *testing.T) { wantRequestPeerId: peerId, wantRequestHashes: mockHashes, } - mockInboundMessages2 := []*sentry.InboundMessage{ + mockInboundMessages2 := []*sentryproto.InboundMessage{ { - Id: sentry.MessageId_BLOCK_BODIES_66, + Id: sentryproto.MessageId_BLOCK_BODIES_66, PeerId: peerId.H512(), Data: newMockBlockBodiesPacketBytes(t, requestId2, &types.Body{ Transactions: types.Transactions{ @@ -498,9 +498,9 @@ func TestFetcherFetchBodiesErrMissingBodies(t *testing.T) { requestId := uint64(1234) mockHeaders := []*types.Header{{Number: big.NewInt(1)}} mockHashes := []common.Hash{mockHeaders[0].Hash()} - mockInboundMessages := []*sentry.InboundMessage{ + mockInboundMessages := []*sentryproto.InboundMessage{ { - Id: sentry.MessageId_BLOCK_BODIES_66, + Id: sentryproto.MessageId_BLOCK_BODIES_66, PeerId: peerId.H512(), Data: newMockBlockBodiesPacketBytes(t, requestId), }, @@ -535,8 +535,8 @@ func newFetcherTest(t *testing.T, requestIdGenerator RequestIdGenerator) *fetche logger := testlog.Logger(t, log.LvlCrit) ctrl := gomock.NewController(t) sentryClient := direct.NewMockSentryClient(ctrl) - statusDataFactory := sentrymulticlient.StatusDataFactory(func(ctx context.Context) (*sentry.StatusData, error) { - return &sentry.StatusData{}, nil + statusDataFactory := sentry.StatusDataFactory(func(ctx context.Context) (*sentryproto.StatusData, error) { + return &sentryproto.StatusData{}, nil }) peerPenalizer := NewPeerPenalizer(sentryClient) messageListener := NewMessageListener(logger, sentryClient, statusDataFactory, peerPenalizer) @@ -563,7 +563,7 @@ type fetcherTest struct { sentryClient *direct.MockSentryClient messageListener MessageListener headersRequestResponseMocks map[uint64]requestResponseMock - peerEvents chan *delayedMessage[*sentry.PeerEvent] + peerEvents chan *delayedMessage[*sentryproto.PeerEvent] } func (ft *fetcherTest) run(f func(ctx context.Context, t *testing.T)) { @@ -614,8 +614,8 @@ func (ft *fetcherTest) mockSentryInboundMessagesStream(mocks ...requestResponseM ft.headersRequestResponseMocks[mock.requestId] = mock } - inboundMessageStreamChan := make(chan *delayedMessage[*sentry.InboundMessage], numInboundMessages) - mockSentryInboundMessagesStream := &mockSentryMessagesStream[*sentry.InboundMessage]{ + inboundMessageStreamChan := make(chan *delayedMessage[*sentryproto.InboundMessage], numInboundMessages) + mockSentryInboundMessagesStream := &mockSentryMessagesStream[*sentryproto.InboundMessage]{ ctx: ft.ctx, stream: inboundMessageStreamChan, } @@ -628,13 +628,13 @@ func (ft *fetcherTest) mockSentryInboundMessagesStream(mocks ...requestResponseM ft.sentryClient. EXPECT(). SendMessageById(gomock.Any(), gomock.Any(), gomock.Any()). - DoAndReturn(func(_ context.Context, req *sentry.SendMessageByIdRequest, _ ...grpc.CallOption) (*sentry.SentPeers, error) { + DoAndReturn(func(_ context.Context, req *sentryproto.SendMessageByIdRequest, _ ...grpc.CallOption) (*sentryproto.SentPeers, error) { var mock requestResponseMock var err error switch req.Data.Id { - case sentry.MessageId_GET_BLOCK_HEADERS_66: + case sentryproto.MessageId_GET_BLOCK_HEADERS_66: mock, err = ft.mockSendMessageByIdForHeaders(req) - case sentry.MessageId_GET_BLOCK_BODIES_66: + case sentryproto.MessageId_GET_BLOCK_BODIES_66: mock, err = ft.mockSendMessageByIdForBodies(req) default: return nil, fmt.Errorf("unexpected message id request sent %d", req.Data.Id) @@ -645,21 +645,21 @@ func (ft *fetcherTest) mockSentryInboundMessagesStream(mocks ...requestResponseM delete(ft.headersRequestResponseMocks, mock.requestId) for _, inboundMessage := range mock.mockResponseInboundMessages { - inboundMessageStreamChan <- &delayedMessage[*sentry.InboundMessage]{ + inboundMessageStreamChan <- &delayedMessage[*sentryproto.InboundMessage]{ message: inboundMessage, responseDelay: mock.responseDelay, } } - return &sentry.SentPeers{ + return &sentryproto.SentPeers{ Peers: []*erigonlibtypes.H512{req.PeerId}, }, nil }). AnyTimes() } -func (ft *fetcherTest) mockSendMessageByIdForHeaders(req *sentry.SendMessageByIdRequest) (requestResponseMock, error) { - if sentry.MessageId_GET_BLOCK_HEADERS_66 != req.Data.Id { +func (ft *fetcherTest) mockSendMessageByIdForHeaders(req *sentryproto.SendMessageByIdRequest) (requestResponseMock, error) { + if sentryproto.MessageId_GET_BLOCK_HEADERS_66 != req.Data.Id { return requestResponseMock{}, fmt.Errorf("MessageId_GET_BLOCK_HEADERS_66 != req.Data.Id - %v", req.Data.Id) } @@ -689,8 +689,8 @@ func (ft *fetcherTest) mockSendMessageByIdForHeaders(req *sentry.SendMessageById return mock, nil } -func (ft *fetcherTest) mockSendMessageByIdForBodies(req *sentry.SendMessageByIdRequest) (requestResponseMock, error) { - if sentry.MessageId_GET_BLOCK_BODIES_66 != req.Data.Id { +func (ft *fetcherTest) mockSendMessageByIdForBodies(req *sentryproto.SendMessageByIdRequest) (requestResponseMock, error) { + if sentryproto.MessageId_GET_BLOCK_BODIES_66 != req.Data.Id { return requestResponseMock{}, fmt.Errorf("MessageId_GET_BLOCK_BODIES_66 != req.Data.Id - %v", req.Data.Id) } @@ -723,11 +723,11 @@ func (ft *fetcherTest) mockSendMessageByIdForBodies(req *sentry.SendMessageByIdR } func (ft *fetcherTest) mockSentryPeerEventsStream() { - ft.peerEvents = make(chan *delayedMessage[*sentry.PeerEvent]) + ft.peerEvents = make(chan *delayedMessage[*sentryproto.PeerEvent]) ft.sentryClient. EXPECT(). PeerEvents(gomock.Any(), gomock.Any(), gomock.Any()). - Return(&mockSentryMessagesStream[*sentry.PeerEvent]{ + Return(&mockSentryMessagesStream[*sentryproto.PeerEvent]{ ctx: ft.ctx, stream: ft.peerEvents, }, nil). @@ -737,7 +737,7 @@ func (ft *fetcherTest) mockSentryPeerEventsStream() { type requestResponseMock struct { requestId uint64 responseDelay time.Duration - mockResponseInboundMessages []*sentry.InboundMessage + mockResponseInboundMessages []*sentryproto.InboundMessage // Common wantRequestPeerId *PeerId diff --git a/polygon/p2p/message_listener.go b/polygon/p2p/message_listener.go index 218e32e7f11..f9cc61c9083 100644 --- a/polygon/p2p/message_listener.go +++ b/polygon/p2p/message_listener.go @@ -25,16 +25,15 @@ import ( "github.com/erigontech/erigon-lib/log/v3" - "github.com/erigontech/erigon-lib/direct" - sentry "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" + "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" + "github.com/erigontech/erigon-lib/p2p/sentry" "github.com/erigontech/erigon/eth/protocols/eth" - sentrymulticlient "github.com/erigontech/erigon/p2p/sentry/sentry_multi_client" "github.com/erigontech/erigon/polygon/polygoncommon" "github.com/erigontech/erigon/rlp" ) type DecodedInboundMessage[TPacket any] struct { - *sentry.InboundMessage + *sentryproto.InboundMessage Decoded TPacket PeerId *PeerId } @@ -47,13 +46,13 @@ type MessageListener interface { RegisterNewBlockHashesObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]) UnregisterFunc RegisterBlockHeadersObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]) UnregisterFunc RegisterBlockBodiesObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]) UnregisterFunc - RegisterPeerEventObserver(observer polygoncommon.Observer[*sentry.PeerEvent]) UnregisterFunc + RegisterPeerEventObserver(observer polygoncommon.Observer[*sentryproto.PeerEvent]) UnregisterFunc } func NewMessageListener( logger log.Logger, - sentryClient direct.SentryClient, - statusDataFactory sentrymulticlient.StatusDataFactory, + sentryClient sentryproto.SentryClient, + statusDataFactory sentry.StatusDataFactory, peerPenalizer PeerPenalizer, ) MessageListener { return newMessageListener(logger, sentryClient, statusDataFactory, peerPenalizer) @@ -61,8 +60,8 @@ func NewMessageListener( func newMessageListener( logger log.Logger, - sentryClient direct.SentryClient, - statusDataFactory sentrymulticlient.StatusDataFactory, + sentryClient sentryproto.SentryClient, + statusDataFactory sentry.StatusDataFactory, peerPenalizer PeerPenalizer, ) *messageListener { return &messageListener{ @@ -74,21 +73,20 @@ func newMessageListener( newBlockHashesObservers: polygoncommon.NewObservers[*DecodedInboundMessage[*eth.NewBlockHashesPacket]](), blockHeadersObservers: polygoncommon.NewObservers[*DecodedInboundMessage[*eth.BlockHeadersPacket66]](), blockBodiesObservers: polygoncommon.NewObservers[*DecodedInboundMessage[*eth.BlockBodiesPacket66]](), - peerEventObservers: polygoncommon.NewObservers[*sentry.PeerEvent](), + peerEventObservers: polygoncommon.NewObservers[*sentryproto.PeerEvent](), } } type messageListener struct { - once sync.Once logger log.Logger - sentryClient direct.SentryClient - statusDataFactory sentrymulticlient.StatusDataFactory + sentryClient sentryproto.SentryClient + statusDataFactory sentry.StatusDataFactory peerPenalizer PeerPenalizer newBlockObservers *polygoncommon.Observers[*DecodedInboundMessage[*eth.NewBlockPacket]] newBlockHashesObservers *polygoncommon.Observers[*DecodedInboundMessage[*eth.NewBlockHashesPacket]] blockHeadersObservers *polygoncommon.Observers[*DecodedInboundMessage[*eth.BlockHeadersPacket66]] blockBodiesObservers *polygoncommon.Observers[*DecodedInboundMessage[*eth.BlockBodiesPacket66]] - peerEventObservers *polygoncommon.Observers[*sentry.PeerEvent] + peerEventObservers *polygoncommon.Observers[*sentryproto.PeerEvent] stopWg sync.WaitGroup } @@ -134,33 +132,33 @@ func (ml *messageListener) RegisterBlockBodiesObserver(observer polygoncommon.Ob return ml.blockBodiesObservers.Register(observer) } -func (ml *messageListener) RegisterPeerEventObserver(observer polygoncommon.Observer[*sentry.PeerEvent]) UnregisterFunc { +func (ml *messageListener) RegisterPeerEventObserver(observer polygoncommon.Observer[*sentryproto.PeerEvent]) UnregisterFunc { return ml.peerEventObservers.Register(observer) } func (ml *messageListener) listenInboundMessages(ctx context.Context) { - streamFactory := func(ctx context.Context, sentryClient direct.SentryClient) (sentrymulticlient.SentryMessageStream, error) { - messagesRequest := sentry.MessagesRequest{ - Ids: []sentry.MessageId{ - sentry.MessageId_NEW_BLOCK_66, - sentry.MessageId_NEW_BLOCK_HASHES_66, - sentry.MessageId_BLOCK_HEADERS_66, - sentry.MessageId_BLOCK_BODIES_66, + streamFactory := func(ctx context.Context, sentryClient sentryproto.SentryClient) (grpc.ClientStream, error) { + messagesRequest := sentryproto.MessagesRequest{ + Ids: []sentryproto.MessageId{ + sentryproto.MessageId_NEW_BLOCK_66, + sentryproto.MessageId_NEW_BLOCK_HASHES_66, + sentryproto.MessageId_BLOCK_HEADERS_66, + sentryproto.MessageId_BLOCK_BODIES_66, }, } return sentryClient.Messages(ctx, &messagesRequest, grpc.WaitForReady(true)) } - streamMessages(ctx, ml, "InboundMessages", streamFactory, func(message *sentry.InboundMessage) error { + streamMessages(ctx, ml, "InboundMessages", streamFactory, func(message *sentryproto.InboundMessage) error { switch message.Id { - case sentry.MessageId_NEW_BLOCK_66: + case sentryproto.MessageId_NEW_BLOCK_66: return notifyInboundMessageObservers(ctx, ml.logger, ml.peerPenalizer, ml.newBlockObservers, message) - case sentry.MessageId_NEW_BLOCK_HASHES_66: + case sentryproto.MessageId_NEW_BLOCK_HASHES_66: return notifyInboundMessageObservers(ctx, ml.logger, ml.peerPenalizer, ml.newBlockHashesObservers, message) - case sentry.MessageId_BLOCK_HEADERS_66: + case sentryproto.MessageId_BLOCK_HEADERS_66: return notifyInboundMessageObservers(ctx, ml.logger, ml.peerPenalizer, ml.blockHeadersObservers, message) - case sentry.MessageId_BLOCK_BODIES_66: + case sentryproto.MessageId_BLOCK_BODIES_66: return notifyInboundMessageObservers(ctx, ml.logger, ml.peerPenalizer, ml.blockBodiesObservers, message) default: return nil @@ -169,14 +167,14 @@ func (ml *messageListener) listenInboundMessages(ctx context.Context) { } func (ml *messageListener) listenPeerEvents(ctx context.Context) { - streamFactory := func(ctx context.Context, sentryClient direct.SentryClient) (sentrymulticlient.SentryMessageStream, error) { - return sentryClient.PeerEvents(ctx, &sentry.PeerEventsRequest{}, grpc.WaitForReady(true)) + streamFactory := func(ctx context.Context, sentryClient sentryproto.SentryClient) (grpc.ClientStream, error) { + return sentryClient.PeerEvents(ctx, &sentryproto.PeerEventsRequest{}, grpc.WaitForReady(true)) } streamMessages(ctx, ml, "PeerEvents", streamFactory, ml.notifyPeerEventObservers) } -func (ml *messageListener) notifyPeerEventObservers(peerEvent *sentry.PeerEvent) error { +func (ml *messageListener) notifyPeerEventObservers(peerEvent *sentryproto.PeerEvent) error { // wait on all observers to finish processing the peer event before notifying them // with subsequent events in order to preserve the ordering of the sentry messages ml.peerEventObservers.NotifySync(peerEvent) @@ -187,16 +185,16 @@ func streamMessages[TMessage any]( ctx context.Context, ml *messageListener, name string, - streamFactory sentrymulticlient.SentryMessageStreamFactory, + streamFactory sentry.MessageStreamFactory, handler func(event *TMessage) error, ) { defer ml.stopWg.Done() - messageHandler := func(_ context.Context, event *TMessage, _ direct.SentryClient) error { + messageHandler := func(_ context.Context, event *TMessage, client sentryproto.SentryClient) error { return handler(event) } - sentrymulticlient.SentryReconnectAndPumpStreamLoop( + sentry.ReconnectAndPumpStreamLoop( ctx, ml.sentryClient, ml.statusDataFactory, @@ -214,7 +212,7 @@ func notifyInboundMessageObservers[TPacket any]( logger log.Logger, peerPenalizer PeerPenalizer, observers *polygoncommon.Observers[*DecodedInboundMessage[TPacket]], - message *sentry.InboundMessage, + message *sentryproto.InboundMessage, ) error { peerId := PeerIdFromH512(message.PeerId) diff --git a/polygon/p2p/message_listener_test.go b/polygon/p2p/message_listener_test.go index 25fd4cebc0c..45127126132 100644 --- a/polygon/p2p/message_listener_test.go +++ b/polygon/p2p/message_listener_test.go @@ -35,9 +35,9 @@ import ( "github.com/erigontech/erigon-lib/direct" sentry "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" "github.com/erigontech/erigon-lib/log/v3" + libsentry "github.com/erigontech/erigon-lib/p2p/sentry" "github.com/erigontech/erigon/core/types" "github.com/erigontech/erigon/eth/protocols/eth" - sentrymulticlient "github.com/erigontech/erigon/p2p/sentry/sentry_multi_client" "github.com/erigontech/erigon/rlp" "github.com/erigontech/erigon/turbo/testlog" ) @@ -236,7 +236,7 @@ func newMessageListenerTest(t *testing.T) *messageListenerTest { inboundMessagesStream := make(chan *delayedMessage[*sentry.InboundMessage]) peerEventsStream := make(chan *delayedMessage[*sentry.PeerEvent]) sentryClient := direct.NewMockSentryClient(ctrl) - statusDataFactory := sentrymulticlient.StatusDataFactory(func(ctx context.Context) (*sentry.StatusData, error) { + statusDataFactory := libsentry.StatusDataFactory(func(ctx context.Context) (*sentry.StatusData, error) { return &sentry.StatusData{}, nil }) return &messageListenerTest{ diff --git a/polygon/p2p/message_sender.go b/polygon/p2p/message_sender.go index a4b4b372f4c..b435b48c439 100644 --- a/polygon/p2p/message_sender.go +++ b/polygon/p2p/message_sender.go @@ -20,7 +20,6 @@ import ( "context" "errors" - "github.com/erigontech/erigon-lib/direct" sentry "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" "github.com/erigontech/erigon/eth/protocols/eth" "github.com/erigontech/erigon/rlp" @@ -33,14 +32,14 @@ type MessageSender interface { SendGetBlockBodies(ctx context.Context, peerId *PeerId, req eth.GetBlockBodiesPacket66) error } -func NewMessageSender(sentryClient direct.SentryClient) MessageSender { +func NewMessageSender(sentryClient sentry.SentryClient) MessageSender { return &messageSender{ sentryClient: sentryClient, } } type messageSender struct { - sentryClient direct.SentryClient + sentryClient sentry.SentryClient } func (ms *messageSender) SendGetBlockHeaders(ctx context.Context, peerId *PeerId, req eth.GetBlockHeadersPacket66) error { diff --git a/polygon/p2p/peer_penalizer.go b/polygon/p2p/peer_penalizer.go index e0a696a1797..5841022673a 100644 --- a/polygon/p2p/peer_penalizer.go +++ b/polygon/p2p/peer_penalizer.go @@ -19,7 +19,6 @@ package p2p import ( "context" - "github.com/erigontech/erigon-lib/direct" sentry "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" ) @@ -27,14 +26,14 @@ type PeerPenalizer interface { Penalize(ctx context.Context, peerId *PeerId) error } -func NewPeerPenalizer(sentryClient direct.SentryClient) PeerPenalizer { +func NewPeerPenalizer(sentryClient sentry.SentryClient) PeerPenalizer { return &peerPenalizer{ sentryClient: sentryClient, } } type peerPenalizer struct { - sentryClient direct.SentryClient + sentryClient sentry.SentryClient } func (p *peerPenalizer) Penalize(ctx context.Context, peerId *PeerId) error { diff --git a/polygon/p2p/service.go b/polygon/p2p/service.go index ed595088737..a68c79e9e51 100644 --- a/polygon/p2p/service.go +++ b/polygon/p2p/service.go @@ -23,9 +23,9 @@ import ( "golang.org/x/sync/errgroup" - "github.com/erigontech/erigon-lib/direct" + "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" "github.com/erigontech/erigon-lib/log/v3" - sentrymulticlient "github.com/erigontech/erigon/p2p/sentry/sentry_multi_client" + "github.com/erigontech/erigon-lib/p2p/sentry" ) //go:generate mockgen -typed=true -source=./service.go -destination=./service_mock.go -package=p2p . Service @@ -41,8 +41,8 @@ type Service interface { func NewService( maxPeers int, logger log.Logger, - sentryClient direct.SentryClient, - statusDataFactory sentrymulticlient.StatusDataFactory, + sentryClient sentryproto.SentryClient, + statusDataFactory sentry.StatusDataFactory, ) Service { fetcherConfig := FetcherConfig{ responseTimeout: 5 * time.Second, @@ -57,8 +57,8 @@ func newService( maxPeers int, fetcherConfig FetcherConfig, logger log.Logger, - sentryClient direct.SentryClient, - statusDataFactory sentrymulticlient.StatusDataFactory, + sentryClient sentryproto.SentryClient, + statusDataFactory sentry.StatusDataFactory, requestIdGenerator RequestIdGenerator, ) *service { peerPenalizer := NewPeerPenalizer(sentryClient) diff --git a/polygon/sync/service.go b/polygon/sync/service.go index f693819a553..5f305839d98 100644 --- a/polygon/sync/service.go +++ b/polygon/sync/service.go @@ -22,8 +22,8 @@ import ( "golang.org/x/sync/errgroup" "github.com/erigontech/erigon-lib/chain" - "github.com/erigontech/erigon-lib/direct" "github.com/erigontech/erigon-lib/gointerfaces/executionproto" + "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" "github.com/erigontech/erigon-lib/log/v3" "github.com/erigontech/erigon/p2p/sentry" "github.com/erigontech/erigon/polygon/bor/borcfg" @@ -48,7 +48,7 @@ type service struct { func NewService( logger log.Logger, chainConfig *chain.Config, - sentryClient direct.SentryClient, + sentryClient sentryproto.SentryClient, maxPeers int, statusDataProvider *sentry.StatusDataProvider, executionClient executionproto.ExecutionClient, diff --git a/turbo/stages/mock/mock_sentry.go b/turbo/stages/mock/mock_sentry.go index fa759d2cfae..4fa9a03f437 100644 --- a/turbo/stages/mock/mock_sentry.go +++ b/turbo/stages/mock/mock_sentry.go @@ -326,7 +326,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK penalize := func(context.Context, []headerdownload.PenaltyItem) {} mock.SentryClient = direct.NewSentryClientDirect(direct.ETH68, mock) - sentries := []direct.SentryClient{mock.SentryClient} + sentries := []proto_sentry.SentryClient{mock.SentryClient} sendBodyRequest := func(context.Context, *bodydownload.BodyRequest) ([64]byte, bool) { return [64]byte{}, false } blockPropagator := func(Ctx context.Context, header *types.Header, body *types.RawBody, td *big.Int) {} diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 314c6c98179..a6e80484778 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -29,8 +29,8 @@ import ( libcommon "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/common/datadir" "github.com/erigontech/erigon-lib/common/dbg" - "github.com/erigontech/erigon-lib/direct" proto_downloader "github.com/erigontech/erigon-lib/gointerfaces/downloaderproto" + "github.com/erigontech/erigon-lib/gointerfaces/sentryproto" "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/kv/membatchwithdb" "github.com/erigontech/erigon-lib/kv/rawdbv3" @@ -697,7 +697,7 @@ func NewPolygonSyncStages( silkworm *silkworm.Silkworm, forkValidator *engine_helpers.ForkValidator, heimdallClient heimdall.HeimdallClient, - sentry direct.SentryClient, + sentry sentryproto.SentryClient, maxPeers int, statusDataProvider *sentry.StatusDataProvider, stopNode func() error,