Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

all: fix spelling mistakes #25961

Merged
merged 1 commit into from
Oct 11, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion accounts/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ func merge(slice []Wallet, wallets ...Wallet) []Wallet {
return slice
}

// drop is the couterpart of merge, which looks up wallets from within the sorted
// drop is the counterpart of merge, which looks up wallets from within the sorted
// cache and removes the ones specified.
func drop(slice []Wallet, wallets ...Wallet) []Wallet {
for _, wallet := range wallets {
Expand Down
2 changes: 1 addition & 1 deletion cmd/devp2p/internal/ethtest/snap.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
{4000, s.chain.RootAt(0), zero, ffHash, 0, zero, zero},
// A 127 block old stateroot, expected to be served
{4000, s.chain.RootAt(999 - 127), zero, ffHash, 77, firstKey, common.HexToHash("0xe4c6fdef5dd4e789a2612390806ee840b8ec0fe52548f8b4efe41abb20c37aac")},
// A root which is not actually an account root, but a storage orot
// A root which is not actually an account root, but a storage root
{4000, storageRoot, zero, ffHash, 0, zero, zero},

// And some non-sensical requests
Expand Down
2 changes: 1 addition & 1 deletion cmd/evm/internal/t8ntool/transition.go
Original file line number Diff line number Diff line change
Expand Up @@ -394,7 +394,7 @@ func (g Alloc) OnAccount(addr common.Address, dumpAccount state.DumpAccount) {
g[addr] = genesisAccount
}

// saveFile marshalls the object to the given file
// saveFile marshals the object to the given file
func saveFile(baseDir, filename string, data interface{}) error {
b, err := json.MarshalIndent(data, "", " ")
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion consensus/clique/snapshot_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ func (ap *testerAccountPool) sign(header *types.Header, signer string) {
copy(header.Extra[len(header.Extra)-extraSeal:], sig)
}

// testerVote represents a single block signed by a parcitular account, where
// testerVote represents a single block signed by a particular account, where
// the account may or may not have cast a Clique vote.
type testerVote struct {
signer string
Expand Down
2 changes: 1 addition & 1 deletion core/bloombits/matcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -612,7 +612,7 @@ func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan
return

case <-time.After(wait):
// Throttling up, fetch whatever's available
// Throttling up, fetch whatever is available
}
}
// Allocate as much as we can handle and request servicing
Expand Down
2 changes: 1 addition & 1 deletion core/state/dump.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ import (
"github.com/ethereum/go-ethereum/trie"
)

// DumpConfig is a set of options to control what portions of the statewill be
// DumpConfig is a set of options to control what portions of the state will be
// iterated and collected.
type DumpConfig struct {
SkipCode bool
Expand Down
2 changes: 1 addition & 1 deletion core/state/snapshot/conversion.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ func (stat *generateStats) progressAccounts(account common.Hash, done uint64) {
stat.head = account
}

// finishAccounts updates the gemerator stats for the finished account range.
// finishAccounts updates the generator stats for the finished account range.
func (stat *generateStats) finishAccounts(done uint64) {
stat.lock.Lock()
defer stat.lock.Unlock()
Expand Down
2 changes: 1 addition & 1 deletion core/state/snapshot/iterator_fast.go
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,7 @@ func (fi *fastIterator) next(idx int) bool {
return false
}
// The elem we're placing it next to has the same value,
// so whichever winds up on n+1 will need further iteraton
// so whichever winds up on n+1 will need further iteration
clash = n + 1

return cur.priority < fi.iterators[n+1].priority
Expand Down
2 changes: 1 addition & 1 deletion core/state/snapshot/iterator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -819,7 +819,7 @@ func TestStorageIteratorDeletions(t *testing.T) {
// only spit out 200 values eventually.
//
// The value-fetching benchmark is easy on the binary iterator, since it never has to reach
// down at any depth for retrieving the values -- all are on the toppmost layer
// down at any depth for retrieving the values -- all are on the topmost layer
//
// BenchmarkAccountIteratorTraversal/binary_iterator_keys-6 2239 483674 ns/op
// BenchmarkAccountIteratorTraversal/binary_iterator_values-6 2403 501810 ns/op
Expand Down
2 changes: 1 addition & 1 deletion core/state/snapshot/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ var (
snapAccountProveCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/prove", nil)
// snapAccountTrieReadCounter measures time spent on the account trie iteration
snapAccountTrieReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/trieread", nil)
// snapAccountSnapReadCounter measues time spent on the snapshot account iteration
// snapAccountSnapReadCounter measures time spent on the snapshot account iteration
snapAccountSnapReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/snapread", nil)
// snapAccountWriteCounter measures time spent on writing/updating/deleting accounts
snapAccountWriteCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/write", nil)
Expand Down
4 changes: 2 additions & 2 deletions core/state/snapshot/snapshot_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) {
if err := snaps.Cap(common.HexToHash("0x03"), 1); err != nil {
t.Fatalf("failed to merge accumulator onto disk: %v", err)
}
// Since the base layer was modified, ensure that data retrievald on the external reference fail
// Since the base layer was modified, ensure that data retrievals on the external reference fail
if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale {
t.Errorf("stale reference returned account: %#x (err: %v)", acc, err)
}
Expand Down Expand Up @@ -224,7 +224,7 @@ func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) {
if err := snaps.Cap(common.HexToHash("0x04"), 1); err != nil {
t.Fatalf("failed to flatten diff layer into accumulator: %v", err)
}
// Since the accumulator diff layer was modified, ensure that data retrievald on the external reference fail
// Since the accumulator diff layer was modified, ensure that data retrievals on the external reference fail
if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale {
t.Errorf("stale reference returned account: %#x (err: %v)", acc, err)
}
Expand Down
6 changes: 3 additions & 3 deletions core/state/statedb.go
Original file line number Diff line number Diff line change
Expand Up @@ -667,7 +667,7 @@ func (s *StateDB) Copy() *StateDB {
// nil
if object, exist := s.stateObjects[addr]; exist {
// Even though the original object is dirty, we are not copying the journal,
// so we need to make sure that anyside effect the journal would have caused
// so we need to make sure that any side-effect the journal would have caused
// during a commit (or similar op) is already applied to the copy.
state.stateObjects[addr] = object.deepCopy(state)

Expand Down Expand Up @@ -796,8 +796,8 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
// resurrect an account; but the snapshotter needs both events.
if s.snap != nil {
s.snapDestructs[obj.addrHash] = struct{}{} // We need to maintain account deletions explicitly (will remain set indefinitely)
delete(s.snapAccounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a ressurrect)
delete(s.snapStorage, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a ressurrect)
delete(s.snapAccounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect)
delete(s.snapStorage, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect)
}
} else {
obj.finalise(true) // Prefetch slots in the background
Expand Down
2 changes: 1 addition & 1 deletion core/tx_list.go
Original file line number Diff line number Diff line change
Expand Up @@ -469,7 +469,7 @@ func (h *priceHeap) Pop() interface{} {
}

// txPricedList is a price-sorted heap to allow operating on transactions pool
// contents in a price-incrementing way. It's built opon the all transactions
// contents in a price-incrementing way. It's built upon the all transactions
// in txpool but only interested in the remote part. It means only remote transactions
// will be considered for tracking, sorting, eviction, etc.
//
Expand Down
4 changes: 2 additions & 2 deletions core/tx_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ var (
// configured for the transaction pool.
ErrUnderpriced = errors.New("transaction underpriced")

// ErrTxPoolOverflow is returned if the transaction pool is full and can't accpet
// ErrTxPoolOverflow is returned if the transaction pool is full and can't accept
// another remote transaction.
ErrTxPoolOverflow = errors.New("txpool is full")

Expand Down Expand Up @@ -850,7 +850,7 @@ func (pool *TxPool) AddLocals(txs []*types.Transaction) []error {
}

// AddLocal enqueues a single local transaction into the pool if it is valid. This is
// a convenience wrapper aroundd AddLocals.
// a convenience wrapper around AddLocals.
func (pool *TxPool) AddLocal(tx *types.Transaction) error {
errs := pool.AddLocals([]*types.Transaction{tx})
return errs[0]
Expand Down
2 changes: 1 addition & 1 deletion core/tx_pool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2158,7 +2158,7 @@ func TestTransactionReplacementDynamicFee(t *testing.T) {
stages := []string{"pending", "queued"}
for _, stage := range stages {
// Since state is empty, 0 nonce txs are "executable" and can go
// into pending immediately. 2 nonce txs are "happed
// into pending immediately. 2 nonce txs are "gapped"
nonce := uint64(0)
if stage == "queued" {
nonce = 2
Expand Down
2 changes: 1 addition & 1 deletion core/vm/contracts.go
Original file line number Diff line number Diff line change
Expand Up @@ -935,7 +935,7 @@ func (c *bls12381Pairing) Run(input []byte) ([]byte, error) {
return nil, errBLS12381G2PointSubgroup
}

// Update pairing engine with G1 and G2 ponits
// Update pairing engine with G1 and G2 points
e.AddPair(p1, p2)
}
// Prepare 32 byte output
Expand Down
2 changes: 1 addition & 1 deletion crypto/signature_cgo.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ func SigToPub(hash, sig []byte) (*ecdsa.PublicKey, error) {
//
// This function is susceptible to chosen plaintext attacks that can leak
// information about the private key that is used for signing. Callers must
// be aware that the given digest cannot be chosen by an adversery. Common
// be aware that the given digest cannot be chosen by an adversary. Common
// solution is to hash any input before calculating the signature.
//
// The produced signature is in the [R || S || V] format where V is 0 or 1.
Expand Down
2 changes: 1 addition & 1 deletion eth/catalyst/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ type ConsensusAPI struct {
// problematic, so we will only track the head chain segment of a bad
// chain to allow discarding progressing bad chains and side chains,
// without tracking too much bad data.
invalidBlocksHits map[common.Hash]int // Emhemeral cache to track invalid blocks and their hit count
invalidBlocksHits map[common.Hash]int // Ephemeral cache to track invalid blocks and their hit count
invalidTipsets map[common.Hash]*types.Header // Ephemeral cache to track invalid tipsets and their bad ancestor
invalidLock sync.Mutex // Protects the invalid maps from concurrent access

Expand Down
2 changes: 1 addition & 1 deletion eth/downloader/queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -632,7 +632,7 @@ func (q *queue) ExpireReceipts(peer string) int {
// lock is not obtained in here is that the parameters already need to access
// the queue, so they already need a lock anyway.
func (q *queue) expire(peer string, pendPool map[string]*fetchRequest, taskQueue *prque.Prque) int {
// Retrieve the request being expired and log an error if it's non-existnet,
// Retrieve the request being expired and log an error if it's non-existent,
// as there's no order of events that should lead to such expirations.
req := pendPool[peer]
if req == nil {
Expand Down
2 changes: 1 addition & 1 deletion eth/downloader/skeleton.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ import (
// scratchHeaders is the number of headers to store in a scratch space to allow
// concurrent downloads. A header is about 0.5KB in size, so there is no worry
// about using too much memory. The only catch is that we can only validate gaps
// afer they're linked to the head, so the bigger the scratch space, the larger
// after they're linked to the head, so the bigger the scratch space, the larger
// potential for invalid headers.
//
// The current scratch space of 131072 headers is expected to use 64MB RAM.
Expand Down
4 changes: 2 additions & 2 deletions eth/fetcher/tx_fetcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ type TxFetcher struct {
// broadcast without needing explicit request/reply round trips.
waitlist map[common.Hash]map[string]struct{} // Transactions waiting for an potential broadcast
waittime map[common.Hash]mclock.AbsTime // Timestamps when transactions were added to the waitlist
waitslots map[string]map[common.Hash]struct{} // Waiting announcement sgroupped by peer (DoS protection)
waitslots map[string]map[common.Hash]struct{} // Waiting announcements grouped by peer (DoS protection)

// Stage 2: Queue of transactions that waiting to be allocated to some peer
// to be retrieved directly.
Expand Down Expand Up @@ -218,7 +218,7 @@ func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error {
txAnnounceInMeter.Mark(int64(len(hashes)))

// Skip any transaction announcements that we already know of, or that we've
// previously marked as cheap and discarded. This check is of course racey,
// previously marked as cheap and discarded. This check is of course racy,
// because multiple concurrent notifies will still manage to pass it, but it's
// still valuable to check here because it runs concurrent to the internal
// loop, so anything caught here is time saved internally.
Expand Down
2 changes: 1 addition & 1 deletion eth/protocols/eth/dispatcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ func (p *Peer) dispatcher() {
}

case cancelOp := <-p.reqCancel:
// Retrieve the pendign request to cancel and short circuit if it
// Retrieve the pending request to cancel and short circuit if it
// has already been serviced and is not available anymore
req := pending[cancelOp.id]
if req == nil {
Expand Down
2 changes: 1 addition & 1 deletion eth/protocols/eth/handler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,7 @@ func testGetBlockBodies(t *testing.T, protocol uint) {
}
// Run each of the tests and verify the results against the chain
for i, tt := range tests {
// Collect the hashes to request, and the response to expectva
// Collect the hashes to request, and the response to expect
var (
hashes []common.Hash
bodies []*BlockBody
Expand Down
2 changes: 1 addition & 1 deletion eth/protocols/eth/peer.go
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ func (p *Peer) markTransaction(hash common.Hash) {
// not be managed directly.
//
// The reasons this is public is to allow packages using this protocol to write
// tests that directly send messages without having to do the asyn queueing.
// tests that directly send messages without having to do the async queueing.
func (p *Peer) SendTransactions(txs types.Transactions) error {
// Mark all the transactions as known, but ensure we don't overflow our limits
for _, tx := range txs {
Expand Down
4 changes: 2 additions & 2 deletions eth/protocols/snap/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ func HandleMessage(backend Backend, peer *Peer) error {
}
defer msg.Discard()
start := time.Now()
// Track the emount of time it takes to serve the request and run the handler
// Track the amount of time it takes to serve the request and run the handler
if metrics.Enabled {
h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)
defer func(start time.Time) {
Expand Down Expand Up @@ -343,7 +343,7 @@ func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesP
req.Bytes = softResponseLimit
}
// TODO(karalabe): Do we want to enforce > 0 accounts and 1 account if origin is set?
// TODO(karalabe): - Logging locally is not ideal as remote faulst annoy the local user
// TODO(karalabe): - Logging locally is not ideal as remote faults annoy the local user
// TODO(karalabe): - Dropping the remote peer is less flexible wrt client bugs (slow is better than non-functional)

// Calculate the hard limit at which to abort, even if mid storage trie
Expand Down
2 changes: 1 addition & 1 deletion eth/protocols/snap/range_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ func TestHashRanges(t *testing.T) {
// meaningful space size for manual verification.
// - The head being 0xff...f0, we have 14 hashes left in the space
// - Chunking up 14 into 3 pieces is 4.(6), but we need the ceil of 5 to avoid a micro-last-chunk
// - Since the range is not divisible, the last interval will be shrter, capped at 0xff...f
// - Since the range is not divisible, the last interval will be shorter, capped at 0xff...f
// - The chunk ranges thus needs to be [..0, ..5], [..6, ..b], [..c, ..f]
{
head: common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0"),
Expand Down
6 changes: 3 additions & 3 deletions eth/protocols/snap/sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ const (
// and waste round trip times. If it's too high, we're capping responses and
// waste bandwidth.
//
// Depoyed bytecodes are currently capped at 24KB, so the minimum request
// Deployed bytecodes are currently capped at 24KB, so the minimum request
// size should be maxRequestSize / 24K. Assuming that most contracts do not
// come close to that, requesting 4x should be a good approximation.
maxCodeRequestCount = maxRequestSize / (24 * 1024) * 4
Expand All @@ -87,8 +87,8 @@ const (
trienodeHealRateMeasurementImpact = 0.005

// minTrienodeHealThrottle is the minimum divisor for throttling trie node
// heal requests to avoid overloading the local node and exessively expanding
// the state trie bedth wise.
// heal requests to avoid overloading the local node and excessively expanding
// the state trie breadth wise.
minTrienodeHealThrottle = 1

// maxTrienodeHealThrottle is the maximum divisor for throttling trie node
Expand Down
2 changes: 1 addition & 1 deletion eth/tracers/internal/tracetest/calltrace_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) {
if _, err = st.TransitionDb(); err != nil {
t.Fatalf("failed to execute transaction: %v", err)
}
// Retrieve the trace result and compare against the etalon
// Retrieve the trace result and compare against the expected.
res, err := tracer.GetResult()
if err != nil {
t.Fatalf("failed to retrieve trace result: %v", err)
Expand Down
2 changes: 1 addition & 1 deletion eth/tracers/internal/tracetest/prestate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T, typ
if _, err = st.TransitionDb(); err != nil {
t.Fatalf("failed to execute transaction: %v", err)
}
// Retrieve the trace result and compare against the etalon
// Retrieve the trace result and compare against the expected
res, err := tracer.GetResult()
if err != nil {
t.Fatalf("failed to retrieve trace result: %v", err)
Expand Down
2 changes: 1 addition & 1 deletion eth/tracers/internal/tracetest/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import (
"strings"
"unicode"

// Force-load native and js pacakges, to trigger registration
// Force-load native and js packages, to trigger registration
_ "github.com/ethereum/go-ethereum/eth/tracers/js"
_ "github.com/ethereum/go-ethereum/eth/tracers/native"
)
Expand Down
2 changes: 1 addition & 1 deletion ethclient/ethclient_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -392,7 +392,7 @@ func testTransactionInBlockInterrupted(t *testing.T, client *rpc.Client) {
t.Fatalf("unexpected error: %v", err)
}

// Test tx in block interupted.
// Test tx in block interrupted.
ctx, cancel := context.WithCancel(context.Background())
cancel()
tx, err := ec.TransactionInBlock(ctx, block.Hash(), 0)
Expand Down
4 changes: 2 additions & 2 deletions internal/ethapi/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -456,7 +456,7 @@ func (s *PersonalAccountAPI) signTransaction(ctx context.Context, args *Transact
// passwd isn't able to decrypt the key it fails.
func (s *PersonalAccountAPI) SendTransaction(ctx context.Context, args TransactionArgs, passwd string) (common.Hash, error) {
if args.Nonce == nil {
// Hold the addresse's mutex around signing to prevent concurrent assignment of
// Hold the mutex around signing to prevent concurrent assignment of
// the same nonce to multiple accounts.
s.nonceLock.LockAddr(args.from())
defer s.nonceLock.UnlockAddr(args.from())
Expand Down Expand Up @@ -1719,7 +1719,7 @@ func (s *TransactionAPI) SendTransaction(ctx context.Context, args TransactionAr
}

if args.Nonce == nil {
// Hold the addresse's mutex around signing to prevent concurrent assignment of
// Hold the mutex around signing to prevent concurrent assignment of
// the same nonce to multiple accounts.
s.nonceLock.LockAddr(args.from())
defer s.nonceLock.UnlockAddr(args.from())
Expand Down
2 changes: 1 addition & 1 deletion internal/version/version.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ func Info() (version, vcs string) {
// versionInfo returns version information for the currently executing
// implementation.
//
// Depending on how the code is instansiated, it returns different amounts of
// Depending on how the code is instantiated, it returns different amounts of
// information. If it is unable to determine which module is related to our
// package it falls back to the hardcoded values in the params package.
func versionInfo(info *debug.BuildInfo) string {
Expand Down
2 changes: 1 addition & 1 deletion les/downloader/resultstore.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ func (r *resultStore) HasCompletedItems() bool {
// countCompleted returns the number of items ready for delivery, stopping at
// the first non-complete item.
//
// The mthod assumes (at least) rlock is held.
// The method assumes (at least) rlock is held.
func (r *resultStore) countCompleted() int {
// We iterate from the already known complete point, and see
// if any more has completed since last count
Expand Down
2 changes: 1 addition & 1 deletion les/fetcher_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ func testInvalidAnnounces(t *testing.T, protocol int) {
peer.cpeer.sendAnnounce(announce)
<-done // Wait syncing

// Ensure the bad peer is evicited
// Ensure the bad peer is evicted
if c.handler.backend.peers.len() != 0 {
t.Fatalf("Failed to evict invalid peer")
}
Expand Down
Loading